diff options
Diffstat (limited to 'kernel/drivers/dma')
-rw-r--r-- | kernel/drivers/dma/at_xdmac.c | 108 | ||||
-rw-r--r-- | kernel/drivers/dma/dw/core.c | 34 | ||||
-rw-r--r-- | kernel/drivers/dma/hsu/hsu.c | 2 | ||||
-rw-r--r-- | kernel/drivers/dma/hsu/hsu.h | 3 | ||||
-rw-r--r-- | kernel/drivers/dma/ipu/ipu_irq.c | 9 | ||||
-rw-r--r-- | kernel/drivers/dma/pl330.c | 11 | ||||
-rw-r--r-- | kernel/drivers/dma/pxa_dma.c | 39 | ||||
-rw-r--r-- | kernel/drivers/dma/sh/usb-dmac.c | 19 |
8 files changed, 153 insertions, 72 deletions
diff --git a/kernel/drivers/dma/at_xdmac.c b/kernel/drivers/dma/at_xdmac.c index 02f9aa4eb..66c073fc8 100644 --- a/kernel/drivers/dma/at_xdmac.c +++ b/kernel/drivers/dma/at_xdmac.c @@ -242,7 +242,7 @@ struct at_xdmac_lld { u32 mbr_dus; /* Destination Microblock Stride Register */ }; - +/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ struct at_xdmac_desc { struct at_xdmac_lld lld; enum dma_transfer_direction direction; @@ -253,7 +253,7 @@ struct at_xdmac_desc { unsigned int xfer_size; struct list_head descs_list; struct list_head xfer_node; -}; +} __aligned(sizeof(u64)); static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) { @@ -864,8 +864,12 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, * access. Hopefully we can access DDR through both ports (at least on * SAMA5D4x), so we can use the same interface for source and dest, * that solves the fact we don't know the direction. + * ERRATA: Even if useless for memory transfers, the PERID has to not + * match the one of another channel. If not, it could lead to spurious + * flag status. */ - u32 chan_cc = AT_XDMAC_CC_DIF(0) + u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) + | AT_XDMAC_CC_DIF(0) | AT_XDMAC_CC_SIF(0) | AT_XDMAC_CC_MBSIZE_SIXTEEN | AT_XDMAC_CC_TYPE_MEM_TRAN; @@ -1042,8 +1046,12 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, * access DDR through both ports (at least on SAMA5D4x), so we can use * the same interface for source and dest, that solves the fact we * don't know the direction. + * ERRATA: Even if useless for memory transfers, the PERID has to not + * match the one of another channel. If not, it could lead to spurious + * flag status. */ - u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM + u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) + | AT_XDMAC_CC_DAM_INCREMENTED_AM | AT_XDMAC_CC_SAM_INCREMENTED_AM | AT_XDMAC_CC_DIF(0) | AT_XDMAC_CC_SIF(0) @@ -1144,8 +1152,12 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, * access. Hopefully we can access DDR through both ports (at least on * SAMA5D4x), so we can use the same interface for source and dest, * that solves the fact we don't know the direction. + * ERRATA: Even if useless for memory transfers, the PERID has to not + * match the one of another channel. If not, it could lead to spurious + * flag status. */ - u32 chan_cc = AT_XDMAC_CC_DAM_UBS_AM + u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) + | AT_XDMAC_CC_DAM_UBS_AM | AT_XDMAC_CC_SAM_INCREMENTED_AM | AT_XDMAC_CC_DIF(0) | AT_XDMAC_CC_SIF(0) @@ -1183,8 +1195,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, desc->lld.mbr_cfg = chan_cc; dev_dbg(chan2dev(chan), - "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", - __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc, + "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", + __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, desc->lld.mbr_cfg); return desc; @@ -1388,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, u32 cur_nda, check_nda, cur_ubc, mask, value; u8 dwidth = 0; unsigned long flags; + bool initd; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) @@ -1412,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, residue = desc->xfer_size; /* * Flush FIFO: only relevant when the transfer is source peripheral - * synchronized. + * synchronized. Flush is needed before reading CUBC because data in + * the FIFO are not reported by CUBC. Reporting a residue of the + * transfer length while we have data in FIFO can cause issue. + * Usecase: atmel USART has a timeout which means I have received + * characters but there is no more character received for a while. On + * timeout, it requests the residue. If the data are in the DMA FIFO, + * we will return a residue of the transfer length. It means no data + * received. If an application is waiting for these data, it will hang + * since we won't have another USART timeout without receiving new + * data. */ mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; @@ -1423,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, } /* - * When processing the residue, we need to read two registers but we - * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where - * we stand in the descriptor list and AT_XDMAC_CUBC is used - * to know how many data are remaining for the current descriptor. - * Since the dma channel is not paused to not loose data, between the - * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of - * descriptor. - * For that reason, after reading AT_XDMAC_CUBC, we check if we are - * still using the same descriptor by reading a second time - * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to - * read again AT_XDMAC_CUBC. + * The easiest way to compute the residue should be to pause the DMA + * but doing this can lead to miss some data as some devices don't + * have FIFO. + * We need to read several registers because: + * - DMA is running therefore a descriptor change is possible while + * reading these registers + * - When the block transfer is done, the value of the CUBC register + * is set to its initial value until the fetch of the next descriptor. + * This value will corrupt the residue calculation so we have to skip + * it. + * + * INITD -------- ------------ + * |____________________| + * _______________________ _______________ + * NDA @desc2 \/ @desc3 + * _______________________/\_______________ + * __________ ___________ _______________ + * CUBC 0 \/ MAX desc1 \/ MAX desc2 + * __________/\___________/\_______________ + * + * Since descriptors are aligned on 64 bits, we can assume that + * the update of NDA and CUBC is atomic. * Memory barriers are used to ensure the read order of the registers. - * A max number of retries is set because unlikely it can never ends if - * we are transferring a lot of data with small buffers. + * A max number of retries is set because unlikely it could never ends. */ - cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; - rmb(); - cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { - rmb(); check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; - - if (likely(cur_nda == check_nda)) - break; - - cur_nda = check_nda; + rmb(); + initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); rmb(); cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); + rmb(); + cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; + rmb(); + + if ((check_nda == cur_nda) && initd) + break; } if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { @@ -1459,6 +1490,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, } /* + * Flush FIFO: only relevant when the transfer is source peripheral + * synchronized. Another flush is needed here because CUBC is updated + * when the controller sends the data write command. It can lead to + * report data that are not written in the memory or the device. The + * FIFO flush ensures that data are really written. + */ + if ((desc->lld.mbr_cfg & mask) == value) { + at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); + while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) + cpu_relax(); + } + + /* * Remove size of all microblocks already transferred and the current * one. Then add the remaining size to transfer of the current * microblock. @@ -2023,7 +2067,7 @@ err_dma_unregister: err_clk_disable: clk_disable_unprepare(atxdmac->clk); err_free_irq: - free_irq(atxdmac->irq, atxdmac->dma.dev); + free_irq(atxdmac->irq, atxdmac); return ret; } @@ -2039,7 +2083,7 @@ static int at_xdmac_remove(struct platform_device *pdev) synchronize_irq(atxdmac->irq); - free_irq(atxdmac->irq, atxdmac->dma.dev); + free_irq(atxdmac->irq, atxdmac); for (i = 0; i < atxdmac->dma.chancnt; i++) { struct at_xdmac_chan *atchan = &atxdmac->chan[i]; diff --git a/kernel/drivers/dma/dw/core.c b/kernel/drivers/dma/dw/core.c index 4f099ea29..c66133b5e 100644 --- a/kernel/drivers/dma/dw/core.c +++ b/kernel/drivers/dma/dw/core.c @@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) static void dwc_initialize(struct dw_dma_chan *dwc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); - struct dw_dma_slave *dws = dwc->chan.private; u32 cfghi = DWC_CFGH_FIFO_MODE; u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); if (dwc->initialized == true) return; - if (dws) { - /* - * We need controller-specific data to set up slave - * transfers. - */ - BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); - - cfghi |= DWC_CFGH_DST_PER(dws->dst_id); - cfghi |= DWC_CFGH_SRC_PER(dws->src_id); - } else { - cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); - cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); - } + cfghi |= DWC_CFGH_DST_PER(dwc->dst_id); + cfghi |= DWC_CFGH_SRC_PER(dwc->src_id); channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_HI, cfghi); @@ -936,7 +924,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param) struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma_slave *dws = param; - if (!dws || dws->dma_dev != chan->device->dev) + if (dws->dma_dev != chan->device->dev) return false; /* We have to copy data since dws can be temporary storage */ @@ -1160,6 +1148,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) * doesn't mean what you think it means), and status writeback. */ + /* + * We need controller-specific data to set up slave transfers. + */ + if (chan->private && !dw_dma_filter(chan, chan->private)) { + dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); + return -EINVAL; + } + /* Enable controller here if needed */ if (!dw->in_use) dw_dma_on(dw); @@ -1221,6 +1217,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&dwc->lock, flags); list_splice_init(&dwc->free_list, &list); dwc->descs_allocated = 0; + + /* Clear custom channel configuration */ + dwc->src_id = 0; + dwc->dst_id = 0; + + dwc->src_master = 0; + dwc->dst_master = 0; + dwc->initialized = false; /* Disable interrupts */ diff --git a/kernel/drivers/dma/hsu/hsu.c b/kernel/drivers/dma/hsu/hsu.c index 823ad728a..efc02b98e 100644 --- a/kernel/drivers/dma/hsu/hsu.c +++ b/kernel/drivers/dma/hsu/hsu.c @@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc) sr = hsu_chan_readl(hsuc, HSU_CH_SR); spin_unlock_irqrestore(&hsuc->vchan.lock, flags); - return sr; + return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY); } irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) diff --git a/kernel/drivers/dma/hsu/hsu.h b/kernel/drivers/dma/hsu/hsu.h index f06579c6d..26da2865b 100644 --- a/kernel/drivers/dma/hsu/hsu.h +++ b/kernel/drivers/dma/hsu/hsu.h @@ -41,6 +41,9 @@ #define HSU_CH_SR_DESCTO(x) BIT(8 + (x)) #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8)) #define HSU_CH_SR_CHE BIT(15) +#define HSU_CH_SR_DESCE(x) BIT(16 + (x)) +#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16)) +#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30)) /* Bits in HSU_CH_CR */ #define HSU_CH_CR_CHA BIT(0) diff --git a/kernel/drivers/dma/ipu/ipu_irq.c b/kernel/drivers/dma/ipu/ipu_irq.c index 2bf37e68a..dd184b50e 100644 --- a/kernel/drivers/dma/ipu/ipu_irq.c +++ b/kernel/drivers/dma/ipu/ipu_irq.c @@ -286,22 +286,21 @@ static void ipu_irq_handler(struct irq_desc *desc) raw_spin_unlock(&bank_lock); while ((line = ffs(status))) { struct ipu_irq_map *map; - unsigned int irq = NO_IRQ; + unsigned int irq; line--; status &= ~(1UL << line); raw_spin_lock(&bank_lock); map = src2map(32 * i + line); - if (map) - irq = map->irq; - raw_spin_unlock(&bank_lock); - if (!map) { + raw_spin_unlock(&bank_lock); pr_err("IPU: Interrupt on unmapped source %u bank %d\n", line, i); continue; } + irq = map->irq; + raw_spin_unlock(&bank_lock); generic_handle_irq(irq); } } diff --git a/kernel/drivers/dma/pl330.c b/kernel/drivers/dma/pl330.c index 17ee758b4..8250950aa 100644 --- a/kernel/drivers/dma/pl330.c +++ b/kernel/drivers/dma/pl330.c @@ -445,6 +445,9 @@ struct dma_pl330_chan { /* for cyclic capability */ bool cyclic; + + /* for runtime pm tracking */ + bool active; }; struct pl330_dmac { @@ -1994,6 +1997,7 @@ static void pl330_tasklet(unsigned long data) _stop(pch->thread); spin_unlock(&pch->thread->dmac->lock); power_down = true; + pch->active = false; } else { /* Make sure the PL330 Channel thread is active */ spin_lock(&pch->thread->dmac->lock); @@ -2015,6 +2019,7 @@ static void pl330_tasklet(unsigned long data) desc->status = PREP; list_move_tail(&desc->node, &pch->work_list); if (power_down) { + pch->active = true; spin_lock(&pch->thread->dmac->lock); _start(pch->thread); spin_unlock(&pch->thread->dmac->lock); @@ -2129,6 +2134,7 @@ static int pl330_terminate_all(struct dma_chan *chan) unsigned long flags; struct pl330_dmac *pl330 = pch->dmac; LIST_HEAD(list); + bool power_down = false; pm_runtime_get_sync(pl330->ddma.dev); spin_lock_irqsave(&pch->lock, flags); @@ -2139,6 +2145,8 @@ static int pl330_terminate_all(struct dma_chan *chan) pch->thread->req[0].desc = NULL; pch->thread->req[1].desc = NULL; pch->thread->req_running = -1; + power_down = pch->active; + pch->active = false; /* Mark all desc done */ list_for_each_entry(desc, &pch->submitted_list, node) { @@ -2156,6 +2164,8 @@ static int pl330_terminate_all(struct dma_chan *chan) list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); pm_runtime_mark_last_busy(pl330->ddma.dev); + if (power_down) + pm_runtime_put_autosuspend(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev); return 0; @@ -2302,6 +2312,7 @@ static void pl330_issue_pending(struct dma_chan *chan) * updated on work_list emptiness status. */ WARN_ON(list_empty(&pch->submitted_list)); + pch->active = true; pm_runtime_get_sync(pch->dmac->ddma.dev); } list_splice_tail_init(&pch->submitted_list, &pch->work_list); diff --git a/kernel/drivers/dma/pxa_dma.c b/kernel/drivers/dma/pxa_dma.c index a59061e42..55f5d33f6 100644 --- a/kernel/drivers/dma/pxa_dma.c +++ b/kernel/drivers/dma/pxa_dma.c @@ -122,6 +122,7 @@ struct pxad_chan { struct pxad_device { struct dma_device slave; int nr_chans; + int nr_requestors; void __iomem *base; struct pxad_phy *phys; spinlock_t phy_lock; /* Phy association */ @@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan) return; /* clear the channel mapping in DRCMR */ - if (chan->drcmr <= DRCMR_CHLNUM) { + if (chan->drcmr <= pdev->nr_requestors) { reg = pxad_drcmr(chan->drcmr); writel_relaxed(0, chan->phy->base + reg); } @@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan) static void phy_enable(struct pxad_phy *phy, bool misaligned) { + struct pxad_device *pdev; u32 reg, dalgn; if (!phy->vchan) @@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) "%s(); phy=%p(%d) misaligned=%d\n", __func__, phy, phy->idx, misaligned); - if (phy->vchan->drcmr <= DRCMR_CHLNUM) { + pdev = to_pxad_dev(phy->vchan->vc.chan.device); + if (phy->vchan->drcmr <= pdev->nr_requestors) { reg = pxad_drcmr(phy->vchan->drcmr); writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); } @@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan, { u32 maxburst = 0, dev_addr = 0; enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; + struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); *dcmd = 0; if (dir == DMA_DEV_TO_MEM) { @@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan, dev_addr = chan->cfg.src_addr; *dev_src = dev_addr; *dcmd |= PXA_DCMD_INCTRGADDR; - if (chan->drcmr <= DRCMR_CHLNUM) + if (chan->drcmr <= pdev->nr_requestors) *dcmd |= PXA_DCMD_FLOWSRC; } if (dir == DMA_MEM_TO_DEV) { @@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan, dev_addr = chan->cfg.dst_addr; *dev_dst = dev_addr; *dcmd |= PXA_DCMD_INCSRCADDR; - if (chan->drcmr <= DRCMR_CHLNUM) + if (chan->drcmr <= pdev->nr_requestors) *dcmd |= PXA_DCMD_FLOWTRG; } if (dir == DMA_MEM_TO_MEM) @@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec, static int pxad_init_dmadev(struct platform_device *op, struct pxad_device *pdev, - unsigned int nr_phy_chans) + unsigned int nr_phy_chans, + unsigned int nr_requestors) { int ret; unsigned int i; struct pxad_chan *c; pdev->nr_chans = nr_phy_chans; + pdev->nr_requestors = nr_requestors; INIT_LIST_HEAD(&pdev->slave.channels); pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources; pdev->slave.device_free_chan_resources = pxad_free_chan_resources; @@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op) const struct of_device_id *of_id; struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); struct resource *iores; - int ret, dma_channels = 0; + int ret, dma_channels = 0, nb_requestors = 0; const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op) return PTR_ERR(pdev->base); of_id = of_match_device(pxad_dt_ids, &op->dev); - if (of_id) + if (of_id) { of_property_read_u32(op->dev.of_node, "#dma-channels", &dma_channels); - else if (pdata && pdata->dma_channels) + ret = of_property_read_u32(op->dev.of_node, "#dma-requests", + &nb_requestors); + if (ret) { + dev_warn(pdev->slave.dev, + "#dma-requests set to default 32 as missing in OF: %d", + ret); + nb_requestors = 32; + }; + } else if (pdata && pdata->dma_channels) { dma_channels = pdata->dma_channels; - else + nb_requestors = pdata->nb_requestors; + } else { dma_channels = 32; /* default 32 channel */ + } dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask); dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask); @@ -1422,7 +1438,7 @@ static int pxad_probe(struct platform_device *op) pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; pdev->slave.dev = &op->dev; - ret = pxad_init_dmadev(op, pdev, dma_channels); + ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors); if (ret) { dev_err(pdev->slave.dev, "unable to register\n"); return ret; @@ -1441,7 +1457,8 @@ static int pxad_probe(struct platform_device *op) platform_set_drvdata(op, pdev); pxad_init_debugfs(pdev); - dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels); + dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n", + dma_channels, nb_requestors); return 0; } diff --git a/kernel/drivers/dma/sh/usb-dmac.c b/kernel/drivers/dma/sh/usb-dmac.c index f1bcc2a16..b1bc945f0 100644 --- a/kernel/drivers/dma/sh/usb-dmac.c +++ b/kernel/drivers/dma/sh/usb-dmac.c @@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev) { struct usb_dmac_chan *chan = dev; irqreturn_t ret = IRQ_NONE; - u32 mask = USB_DMACHCR_TE; - u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP; + u32 mask = 0; u32 chcr; + bool xfer_end = false; spin_lock(&chan->vc.lock); chcr = usb_dmac_chan_read(chan, USB_DMACHCR); - if (chcr & check_bits) - mask |= USB_DMACHCR_DE | check_bits; + if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) { + mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP; + if (chcr & USB_DMACHCR_DE) + xfer_end = true; + ret |= IRQ_HANDLED; + } if (chcr & USB_DMACHCR_NULL) { /* An interruption of TE will happen after we set FTE */ mask |= USB_DMACHCR_NULL; chcr |= USB_DMACHCR_FTE; ret |= IRQ_HANDLED; } - usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); + if (mask) + usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); - if (chcr & check_bits) { + if (xfer_end) usb_dmac_isr_transfer_end(chan); - ret |= IRQ_HANDLED; - } spin_unlock(&chan->vc.lock); |