summaryrefslogtreecommitdiffstats
path: root/kernel/arch/arc/mm/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/arc/mm/dma.c')
-rw-r--r--kernel/arch/arc/mm/dma.c46
1 files changed, 36 insertions, 10 deletions
diff --git a/kernel/arch/arc/mm/dma.c b/kernel/arch/arc/mm/dma.c
index 12cc6485b..29a46bb19 100644
--- a/kernel/arch/arc/mm/dma.c
+++ b/kernel/arch/arc/mm/dma.c
@@ -14,13 +14,12 @@
* Cache bit off in the TLB entry.
*
* The default DMA address == Phy address which is 0x8000_0000 based.
- * A platform/device can make it zero based, by over-riding
- * plat_{dma,kernel}_addr_to_{kernel,dma}
*/
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/export.h>
+#include <asm/cache.h>
#include <asm/cacheflush.h>
/*
@@ -37,7 +36,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
return NULL;
/* This is bus address, platform dependent */
- *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+ *dma_handle = (dma_addr_t)paddr;
return paddr;
}
@@ -46,8 +45,7 @@ EXPORT_SYMBOL(dma_alloc_noncoherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
- free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
- size);
+ free_pages_exact((void *)dma_handle, size);
}
EXPORT_SYMBOL(dma_free_noncoherent);
@@ -56,6 +54,20 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
{
void *paddr, *kvaddr;
+ /*
+ * IOC relies on all data (even coherent DMA data) being in cache
+ * Thus allocate normal cached memory
+ *
+ * The gains with IOC are two pronged:
+ * -For streaming data, elides needs for cache maintenance, saving
+ * cycles in flush code, and bus bandwidth as all the lines of a
+ * buffer need to be flushed out to memory
+ * -For coherent data, Read/Write to buffers terminate early in cache
+ * (vs. always going to memory - thus are faster)
+ */
+ if (is_isa_arcv2() && ioc_exists)
+ return dma_alloc_noncoherent(dev, size, dma_handle, gfp);
+
/* This is linear addr (0x8000_0000 based) */
paddr = alloc_pages_exact(size, gfp);
if (!paddr)
@@ -63,11 +75,23 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
/* This is kernel Virtual address (0x7000_0000 based) */
kvaddr = ioremap_nocache((unsigned long)paddr, size);
- if (kvaddr != NULL)
- memset(kvaddr, 0, size);
+ if (kvaddr == NULL)
+ return NULL;
/* This is bus address, platform dependent */
- *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+ *dma_handle = (dma_addr_t)paddr;
+
+ /*
+ * Evict any existing L1 and/or L2 lines for the backing page
+ * in case it was used earlier as a normal "cached" page.
+ * Yeah this bit us - STAR 9000898266
+ *
+ * Although core does call flush_cache_vmap(), it gets kvaddr hence
+ * can't be used to efficiently flush L1 and/or L2 which need paddr
+ * Currently flush_cache_vmap nukes the L1 cache completely which
+ * will be optimized as a separate commit
+ */
+ dma_cache_wback_inv((unsigned long)paddr, size);
return kvaddr;
}
@@ -76,10 +100,12 @@ EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
dma_addr_t dma_handle)
{
+ if (is_isa_arcv2() && ioc_exists)
+ return dma_free_noncoherent(dev, size, kvaddr, dma_handle);
+
iounmap((void __force __iomem *)kvaddr);
- free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
- size);
+ free_pages_exact((void *)dma_handle, size);
}
EXPORT_SYMBOL(dma_free_coherent);