summaryrefslogtreecommitdiffstats
path: root/kernel/include/linux/dmaengine.h
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-11 10:41:07 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-04-13 08:17:18 +0300
commite09b41010ba33a20a87472ee821fa407a5b8da36 (patch)
treed10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/include/linux/dmaengine.h
parentf93b97fd65072de626c074dbe099a1fff05ce060 (diff)
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page. During the rebasing, the following patch collided: Force tick interrupt and get rid of softirq magic(I70131fb85). Collisions have been removed because its logic was found on the source already. Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769 Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/include/linux/dmaengine.h')
-rw-r--r--kernel/include/linux/dmaengine.h153
1 files changed, 148 insertions, 5 deletions
diff --git a/kernel/include/linux/dmaengine.h b/kernel/include/linux/dmaengine.h
index ad4197572..c47c68e53 100644
--- a/kernel/include/linux/dmaengine.h
+++ b/kernel/include/linux/dmaengine.h
@@ -65,6 +65,8 @@ enum dma_transaction_type {
DMA_PQ,
DMA_XOR_VAL,
DMA_PQ_VAL,
+ DMA_MEMSET,
+ DMA_MEMSET_SG,
DMA_INTERRUPT,
DMA_SG,
DMA_PRIVATE,
@@ -122,10 +124,18 @@ enum dma_transfer_direction {
* chunk and before first src/dst address for next chunk.
* Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
* Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
+ * @dst_icg: Number of bytes to jump after last dst address of this
+ * chunk and before the first dst address for next chunk.
+ * Ignored if dst_inc is true and dst_sgl is false.
+ * @src_icg: Number of bytes to jump after last src address of this
+ * chunk and before the first src address for next chunk.
+ * Ignored if src_inc is true and src_sgl is false.
*/
struct data_chunk {
size_t size;
size_t icg;
+ size_t dst_icg;
+ size_t src_icg;
};
/**
@@ -174,6 +184,8 @@ struct dma_interleaved_template {
* operation it continues the calculation with new sources
* @DMA_PREP_FENCE - tell the driver that subsequent operations depend
* on the result of this operation
+ * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
+ * cleared or freed
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -182,6 +194,7 @@ enum dma_ctrl_flags {
DMA_PREP_PQ_DISABLE_Q = (1 << 3),
DMA_PREP_CONTINUE = (1 << 4),
DMA_PREP_FENCE = (1 << 5),
+ DMA_CTRL_REUSE = (1 << 6),
};
/**
@@ -222,6 +235,16 @@ struct dma_chan_percpu {
};
/**
+ * struct dma_router - DMA router structure
+ * @dev: pointer to the DMA router device
+ * @route_free: function to be called when the route can be disconnected
+ */
+struct dma_router {
+ struct device *dev;
+ void (*route_free)(struct device *dev, void *route_data);
+};
+
+/**
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
* @cookie: last cookie value returned to client
@@ -232,6 +255,8 @@ struct dma_chan_percpu {
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
* @table_count: number of appearances in the mem-to-mem allocation table
+ * @router: pointer to the DMA router structure
+ * @route_data: channel specific data for the router
* @private: private data for certain client-channel associations
*/
struct dma_chan {
@@ -247,6 +272,11 @@ struct dma_chan {
struct dma_chan_percpu __percpu *local;
int client_count;
int table_count;
+
+ /* DMA router */
+ struct dma_router *router;
+ void *route_data;
+
void *private;
};
@@ -374,6 +404,8 @@ enum dma_residue_granularity {
* @cmd_pause: true, if pause and thereby resume is supported
* @cmd_terminate: true, if terminate cmd is supported
* @residue_granularity: granularity of the reported transfer residue
+ * @descriptor_reuse: if a descriptor can be reused by client and
+ * resubmitted multiple times
*/
struct dma_slave_caps {
u32 src_addr_widths;
@@ -382,6 +414,7 @@ struct dma_slave_caps {
bool cmd_pause;
bool cmd_terminate;
enum dma_residue_granularity residue_granularity;
+ bool descriptor_reuse;
};
static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -441,6 +474,7 @@ struct dma_async_tx_descriptor {
dma_addr_t phys;
struct dma_chan *chan;
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+ int (*desc_free)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
struct dmaengine_unmap_data *unmap;
@@ -559,6 +593,20 @@ struct dma_tx_state {
};
/**
+ * enum dmaengine_alignment - defines alignment of the DMA async tx
+ * buffers
+ */
+enum dmaengine_alignment {
+ DMAENGINE_ALIGN_1_BYTE = 0,
+ DMAENGINE_ALIGN_2_BYTES = 1,
+ DMAENGINE_ALIGN_4_BYTES = 2,
+ DMAENGINE_ALIGN_8_BYTES = 3,
+ DMAENGINE_ALIGN_16_BYTES = 4,
+ DMAENGINE_ALIGN_32_BYTES = 5,
+ DMAENGINE_ALIGN_64_BYTES = 6,
+};
+
+/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
@@ -570,6 +618,7 @@ struct dma_tx_state {
* @copy_align: alignment shift for memcpy operations
* @xor_align: alignment shift for xor operations
* @pq_align: alignment shift for pq operations
+ * @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @src_addr_widths: bit mask of src addr widths the device supports
@@ -588,12 +637,15 @@ struct dma_tx_state {
* @device_prep_dma_xor_val: prepares a xor validation operation
* @device_prep_dma_pq: prepares a pq operation
* @device_prep_dma_pq_val: prepares a pqzero_sum operation
+ * @device_prep_dma_memset: prepares a memset operation
+ * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
* The function takes a buffer of size buf_len. The callback function will
* be called after period_len bytes have been transferred.
* @device_prep_interleaved_dma: Transfer expression in a generic way.
+ * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
* @device_config: Pushes a new configuration to a channel, return 0 or an error
* code
* @device_pause: Pauses any transfer happening on a channel. Returns
@@ -617,9 +669,10 @@ struct dma_device {
dma_cap_mask_t cap_mask;
unsigned short max_xor;
unsigned short max_pq;
- u8 copy_align;
- u8 xor_align;
- u8 pq_align;
+ enum dmaengine_alignment copy_align;
+ enum dmaengine_alignment xor_align;
+ enum dmaengine_alignment pq_align;
+ enum dmaengine_alignment fill_align;
#define DMA_HAS_PQ_CONTINUE (1 << 15)
int dev_id;
@@ -650,6 +703,12 @@ struct dma_device {
struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
+ struct dma_chan *chan, struct scatterlist *sg,
+ unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -669,6 +728,9 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
+ struct dma_chan *chan, dma_addr_t dst, u64 data,
+ unsigned long flags);
int (*device_config)(struct dma_chan *chan,
struct dma_slave_config *config);
@@ -745,6 +807,17 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags)
+{
+ if (!chan || !chan->device)
+ return NULL;
+
+ return chan->device->device_prep_dma_memset(chan, dest, value,
+ len, flags);
+}
+
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
@@ -790,7 +863,8 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
return desc->tx_submit(desc);
}
-static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
+static inline bool dmaengine_check_align(enum dmaengine_alignment align,
+ size_t off1, size_t off2, size_t len)
{
size_t mask;
@@ -820,6 +894,12 @@ static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
return dmaengine_check_align(dev->pq_align, off1, off2, len);
}
+static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->fill_align, off1, off2, len);
+}
+
static inline void
dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
{
@@ -874,6 +954,33 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
BUG();
}
+static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
+ size_t dir_icg)
+{
+ if (inc) {
+ if (dir_icg)
+ return dir_icg;
+ else if (sgl)
+ return icg;
+ }
+
+ return 0;
+}
+
+static inline size_t dmaengine_get_dst_icg(struct dma_interleaved_template *xt,
+ struct data_chunk *chunk)
+{
+ return dmaengine_get_icg(xt->dst_inc, xt->dst_sgl,
+ chunk->icg, chunk->dst_icg);
+}
+
+static inline size_t dmaengine_get_src_icg(struct dma_interleaved_template *xt,
+ struct data_chunk *chunk)
+{
+ return dmaengine_get_icg(xt->src_inc, xt->src_sgl,
+ chunk->icg, chunk->src_icg);
+}
+
/* --- public DMA engine API --- */
#ifdef CONFIG_DMA_ENGINE
@@ -1079,6 +1186,39 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
}
#endif
+static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
+{
+ struct dma_slave_caps caps;
+
+ dma_get_slave_caps(tx->chan, &caps);
+
+ if (caps.descriptor_reuse) {
+ tx->flags |= DMA_CTRL_REUSE;
+ return 0;
+ } else {
+ return -EPERM;
+ }
+}
+
+static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags &= ~DMA_CTRL_REUSE;
+}
+
+static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
+{
+ return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
+}
+
+static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
+{
+ /* this is supported for reusable desc, so check that */
+ if (dmaengine_desc_test_reuse(desc))
+ return desc->desc_free(desc);
+ else
+ return -EPERM;
+}
+
/* --- DMA device --- */
int dma_async_device_register(struct dma_device *device);
@@ -1093,7 +1233,7 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
static inline struct dma_chan
*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param,
- struct device *dev, char *name)
+ struct device *dev, const char *name)
{
struct dma_chan *chan;
@@ -1101,6 +1241,9 @@ static inline struct dma_chan
if (chan)
return chan;
+ if (!fn || !fn_param)
+ return NULL;
+
return __dma_request_channel(mask, fn, fn_param);
}
#endif /* DMAENGINE_H */