summaryrefslogtreecommitdiffstats
path: root/kernel/arch/mips/mm/dma-default.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/mips/mm/dma-default.c')
-rw-r--r--kernel/arch/mips/mm/dma-default.c90
1 files changed, 69 insertions, 21 deletions
diff --git a/kernel/arch/mips/mm/dma-default.c b/kernel/arch/mips/mm/dma-default.c
index 609d1241b..730d394ce 100644
--- a/kernel/arch/mips/mm/dma-default.c
+++ b/kernel/arch/mips/mm/dma-default.c
@@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
dma_flag = __GFP_DMA;
else
#endif
@@ -112,7 +112,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
return gfp | dma_flag;
}
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
+static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp)
{
void *ret;
@@ -128,7 +128,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
return ret;
}
-EXPORT_SYMBOL(dma_alloc_noncoherent);
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
@@ -137,12 +136,16 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
- return ret;
+ /*
+ * XXX: seems like the coherent and non-coherent implementations could
+ * be consolidated.
+ */
+ if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp);
gfp = massage_gfp_flags(dev, gfp);
- if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
+ if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
page = dma_alloc_from_contiguous(dev,
count, get_order(size));
if (!page)
@@ -164,24 +167,24 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
}
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+static void mips_dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
{
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
free_pages((unsigned long) vaddr, get_order(size));
}
-EXPORT_SYMBOL(dma_free_noncoherent);
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long) vaddr;
- int order = get_order(size);
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;
- if (dma_release_from_coherent(dev, order, vaddr))
+ if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+ mips_dma_free_noncoherent(dev, size, vaddr, dma_handle);
return;
+ }
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
@@ -194,6 +197,40 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
__free_pages(page, get_order(size));
}
+static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long addr = (unsigned long)cpu_addr;
+ unsigned long off = vma->vm_pgoff;
+ unsigned long pfn;
+ int ret = -ENXIO;
+
+ if (!plat_device_is_coherent(dev) && !hw_coherentio)
+ addr = CAC_ADDR(addr);
+
+ pfn = page_to_pfn(virt_to_page((void *)addr));
+
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < count && user_count <= (count - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
static inline void __dma_sync_virtual(void *addr, size_t size,
enum dma_data_direction direction)
{
@@ -262,12 +299,13 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
plat_unmap_dma_mem(dev, dma_addr, size, direction);
}
-static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
+static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
{
int i;
+ struct scatterlist *sg;
- for (i = 0; i < nents; i++, sg++) {
+ for_each_sg(sglist, sg, nents, i) {
if (!plat_device_is_coherent(dev))
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
@@ -291,13 +329,14 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
return plat_map_dma_mem_page(dev, page) + offset;
}
-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nhwentries, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
int i;
+ struct scatterlist *sg;
- for (i = 0; i < nhwentries; i++, sg++) {
+ for_each_sg(sglist, sg, nhwentries, i) {
if (!plat_device_is_coherent(dev) &&
direction != DMA_TO_DEVICE)
__dma_sync(sg_page(sg), sg->offset, sg->length,
@@ -324,26 +363,34 @@ static void mips_dma_sync_single_for_device(struct device *dev,
}
static void mips_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
{
int i;
+ struct scatterlist *sg;
- if (cpu_needs_post_dma_flush(dev))
- for (i = 0; i < nelems; i++, sg++)
+ if (cpu_needs_post_dma_flush(dev)) {
+ for_each_sg(sglist, sg, nelems, i) {
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
+ }
+ }
plat_post_dma_flush(dev);
}
static void mips_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
{
int i;
+ struct scatterlist *sg;
- if (!plat_device_is_coherent(dev))
- for (i = 0; i < nelems; i++, sg++)
+ if (!plat_device_is_coherent(dev)) {
+ for_each_sg(sglist, sg, nelems, i) {
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
+ }
+ }
}
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -370,6 +417,7 @@ EXPORT_SYMBOL(dma_cache_sync);
static struct dma_map_ops mips_default_dma_map_ops = {
.alloc = mips_dma_alloc_coherent,
.free = mips_dma_free_coherent,
+ .mmap = mips_dma_mmap,
.map_page = mips_dma_map_page,
.unmap_page = mips_dma_unmap_page,
.map_sg = mips_dma_map_sg,