summaryrefslogtreecommitdiffstats
path: root/drivers/media/v4l2-core/videobuf2-vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/v4l2-core/videobuf2-vmalloc.c')
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c194
1 files changed, 184 insertions, 10 deletions
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 313d9771b2bc..fba944e50227 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -25,7 +25,7 @@ struct vb2_vmalloc_buf {
void *vaddr;
struct page **pages;
struct vm_area_struct *vma;
- int write;
+ enum dma_data_direction dma_dir;
unsigned long size;
unsigned int n_pages;
atomic_t refcount;
@@ -35,7 +35,8 @@ struct vb2_vmalloc_buf {
static void vb2_vmalloc_put(void *buf_priv);
-static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
+static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
+ enum dma_data_direction dma_dir, gfp_t gfp_flags)
{
struct vb2_vmalloc_buf *buf;
@@ -45,6 +46,7 @@ static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fl
buf->size = size;
buf->vaddr = vmalloc_user(buf->size);
+ buf->dma_dir = dma_dir;
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_vmalloc_put;
buf->handler.arg = buf;
@@ -70,7 +72,8 @@ static void vb2_vmalloc_put(void *buf_priv)
}
static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
- unsigned long size, int write)
+ unsigned long size,
+ enum dma_data_direction dma_dir)
{
struct vb2_vmalloc_buf *buf;
unsigned long first, last;
@@ -82,7 +85,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!buf)
return NULL;
- buf->write = write;
+ buf->dma_dir = dma_dir;
offset = vaddr & ~PAGE_MASK;
buf->size = size;
@@ -107,7 +110,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
/* current->mm->mmap_sem is taken by videobuf2 core */
n_pages = get_user_pages(current, current->mm,
vaddr & PAGE_MASK, buf->n_pages,
- write, 1, /* force */
+ dma_dir == DMA_FROM_DEVICE,
+ 1, /* force */
buf->pages, NULL);
if (n_pages != buf->n_pages)
goto fail_get_user_pages;
@@ -144,14 +148,13 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
if (vaddr)
vm_unmap_ram((void *)vaddr, buf->n_pages);
for (i = 0; i < buf->n_pages; ++i) {
- if (buf->write)
+ if (buf->dma_dir == DMA_FROM_DEVICE)
set_page_dirty_lock(buf->pages[i]);
put_page(buf->pages[i]);
}
kfree(buf->pages);
} else {
- if (buf->vma)
- vb2_put_vma(buf->vma);
+ vb2_put_vma(buf->vma);
iounmap(buf->vaddr);
}
kfree(buf);
@@ -209,6 +212,176 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
}
/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_vmalloc_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dma_dir;
+};
+
+static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+ struct dma_buf_attachment *dbuf_attach)
+{
+ struct vb2_vmalloc_attachment *attach;
+ struct vb2_vmalloc_buf *buf = dbuf->priv;
+ int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ void *vaddr = buf->vaddr;
+ int ret;
+ int i;
+
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return ret;
+ }
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ struct page *page = vmalloc_to_page(vaddr);
+
+ if (!page) {
+ sg_free_table(sgt);
+ kfree(attach);
+ return -ENOMEM;
+ }
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ vaddr += PAGE_SIZE;
+ }
+
+ attach->dma_dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+ return 0;
+}
+
+static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
+{
+ struct vb2_vmalloc_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dma_dir != DMA_NONE)
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+ struct vb2_vmalloc_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+ int ret;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dma_dir == dma_dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dma_dir != DMA_NONE) {
+ dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+ attach->dma_dir);
+ attach->dma_dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
+ if (ret <= 0) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dma_dir = dma_dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_vmalloc_get_dmabuf */
+ vb2_vmalloc_put(dbuf->priv);
+}
+
+static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+ struct vb2_vmalloc_buf *buf = dbuf->priv;
+
+ return buf->vaddr + pgnum * PAGE_SIZE;
+}
+
+static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+ struct vb2_vmalloc_buf *buf = dbuf->priv;
+
+ return buf->vaddr;
+}
+
+static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
+{
+ return vb2_vmalloc_mmap(dbuf->priv, vma);
+}
+
+static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
+ .attach = vb2_vmalloc_dmabuf_ops_attach,
+ .detach = vb2_vmalloc_dmabuf_ops_detach,
+ .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
+ .kmap = vb2_vmalloc_dmabuf_ops_kmap,
+ .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
+ .vmap = vb2_vmalloc_dmabuf_ops_vmap,
+ .mmap = vb2_vmalloc_dmabuf_ops_mmap,
+ .release = vb2_vmalloc_dmabuf_ops_release,
+};
+
+static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
+
+ if (WARN_ON(!buf->vaddr))
+ return NULL;
+
+ dbuf = dma_buf_export(buf, &vb2_vmalloc_dmabuf_ops, buf->size, flags, NULL);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ atomic_inc(&buf->refcount);
+
+ return dbuf;
+}
+
+/*********************************************/
/* callbacks for DMABUF buffers */
/*********************************************/
@@ -240,7 +413,7 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
}
static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
- unsigned long size, int write)
+ unsigned long size, enum dma_data_direction dma_dir)
{
struct vb2_vmalloc_buf *buf;
@@ -252,7 +425,7 @@ static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
return ERR_PTR(-ENOMEM);
buf->dbuf = dbuf;
- buf->write = write;
+ buf->dma_dir = dma_dir;
buf->size = size;
return buf;
@@ -264,6 +437,7 @@ const struct vb2_mem_ops vb2_vmalloc_memops = {
.put = vb2_vmalloc_put,
.get_userptr = vb2_vmalloc_get_userptr,
.put_userptr = vb2_vmalloc_put_userptr,
+ .get_dmabuf = vb2_vmalloc_get_dmabuf,
.map_dmabuf = vb2_vmalloc_map_dmabuf,
.unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
.attach_dmabuf = vb2_vmalloc_attach_dmabuf,