summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/media/v4l2-core/videobuf2-dma-sg.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/media/v4l2-core/videobuf2-dma-sg.c')
-rw-r--r--kernel/drivers/media/v4l2-core/videobuf2-dma-sg.c120
1 files changed, 32 insertions, 88 deletions
diff --git a/kernel/drivers/media/v4l2-core/videobuf2-dma-sg.c b/kernel/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 45c708e46..9985c89f0 100644
--- a/kernel/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/kernel/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-memops.h>
#include <media/videobuf2-dma-sg.h>
@@ -38,6 +38,7 @@ struct vb2_dma_sg_buf {
struct device *dev;
void *vaddr;
struct page **pages;
+ struct frame_vector *vec;
int offset;
enum dma_data_direction dma_dir;
struct sg_table sg_table;
@@ -51,7 +52,6 @@ struct vb2_dma_sg_buf {
unsigned int num_pages;
atomic_t refcount;
struct vb2_vmarea_handler handler;
- struct vm_area_struct *vma;
struct dma_buf_attachment *db_attach;
};
@@ -147,8 +147,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
- if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
- buf->dma_dir, &attrs) == 0)
+ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
+ if (!sgt->nents)
goto fail_map;
buf->handler.refcount = &buf->refcount;
@@ -187,7 +188,7 @@ static void vb2_dma_sg_put(void *buf_priv)
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
buf->dma_dir, &attrs);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
@@ -209,7 +210,8 @@ static void vb2_dma_sg_prepare(void *buf_priv)
if (buf->db_attach)
return;
- dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir);
}
static void vb2_dma_sg_finish(void *buf_priv)
@@ -221,12 +223,7 @@ static void vb2_dma_sg_finish(void *buf_priv)
if (buf->db_attach)
return;
- dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
-}
-
-static inline int vma_is_io(struct vm_area_struct *vma)
-{
- return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
}
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
@@ -235,14 +232,11 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
{
struct vb2_dma_sg_conf *conf = alloc_ctx;
struct vb2_dma_sg_buf *buf;
- unsigned long first, last;
- int num_pages_from_user;
- struct vm_area_struct *vma;
struct sg_table *sgt;
DEFINE_DMA_ATTRS(attrs);
+ struct frame_vector *vec;
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
-
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return NULL;
@@ -253,85 +247,37 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
buf->dma_sgt = &buf->sg_table;
+ vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
+ if (IS_ERR(vec))
+ goto userptr_fail_pfnvec;
+ buf->vec = vec;
- first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
- last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
- buf->num_pages = last - first + 1;
-
- buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
- GFP_KERNEL);
- if (!buf->pages)
- goto userptr_fail_alloc_pages;
-
- vma = find_vma(current->mm, vaddr);
- if (!vma) {
- dprintk(1, "no vma for address %lu\n", vaddr);
- goto userptr_fail_find_vma;
- }
-
- if (vma->vm_end < vaddr + size) {
- dprintk(1, "vma at %lu is too small for %lu bytes\n",
- vaddr, size);
- goto userptr_fail_find_vma;
- }
-
- buf->vma = vb2_get_vma(vma);
- if (!buf->vma) {
- dprintk(1, "failed to copy vma\n");
- goto userptr_fail_find_vma;
- }
-
- if (vma_is_io(buf->vma)) {
- for (num_pages_from_user = 0;
- num_pages_from_user < buf->num_pages;
- ++num_pages_from_user, vaddr += PAGE_SIZE) {
- unsigned long pfn;
-
- if (follow_pfn(vma, vaddr, &pfn)) {
- dprintk(1, "no page for address %lu\n", vaddr);
- break;
- }
- buf->pages[num_pages_from_user] = pfn_to_page(pfn);
- }
- } else
- num_pages_from_user = get_user_pages(current, current->mm,
- vaddr & PAGE_MASK,
- buf->num_pages,
- buf->dma_dir == DMA_FROM_DEVICE,
- 1, /* force */
- buf->pages,
- NULL);
-
- if (num_pages_from_user != buf->num_pages)
- goto userptr_fail_get_user_pages;
+ buf->pages = frame_vector_pages(vec);
+ if (IS_ERR(buf->pages))
+ goto userptr_fail_sgtable;
+ buf->num_pages = frame_vector_count(vec);
if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
buf->num_pages, buf->offset, size, 0))
- goto userptr_fail_alloc_table_from_pages;
+ goto userptr_fail_sgtable;
sgt = &buf->sg_table;
/*
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
- if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
- buf->dma_dir, &attrs) == 0)
+ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
+ buf->dma_dir, &attrs);
+ if (!sgt->nents)
goto userptr_fail_map;
+
return buf;
userptr_fail_map:
sg_free_table(&buf->sg_table);
-userptr_fail_alloc_table_from_pages:
-userptr_fail_get_user_pages:
- dprintk(1, "get_user_pages requested/got: %d/%d]\n",
- buf->num_pages, num_pages_from_user);
- if (!vma_is_io(buf->vma))
- while (--num_pages_from_user >= 0)
- put_page(buf->pages[num_pages_from_user]);
- vb2_put_vma(buf->vma);
-userptr_fail_find_vma:
- kfree(buf->pages);
-userptr_fail_alloc_pages:
+userptr_fail_sgtable:
+ vb2_destroy_framevec(vec);
+userptr_fail_pfnvec:
kfree(buf);
return NULL;
}
@@ -351,18 +297,16 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages);
- dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs);
+ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
+ &attrs);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
while (--i >= 0) {
if (buf->dma_dir == DMA_FROM_DEVICE)
set_page_dirty_lock(buf->pages[i]);
- if (!vma_is_io(buf->vma))
- put_page(buf->pages[i]);
}
- kfree(buf->pages);
- vb2_put_vma(buf->vma);
+ vb2_destroy_framevec(buf->vec);
kfree(buf);
}
@@ -502,7 +446,6 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt;
- int ret;
mutex_lock(lock);
@@ -521,8 +464,9 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
}
/* mapping to the client with new direction */
- ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
- if (ret <= 0) {
+ sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ dma_dir);
+ if (!sgt->nents) {
pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO);