diff options
Diffstat (limited to 'drivers/staging/android/ion/ion.c')
-rw-r--r-- | drivers/staging/android/ion/ion.c | 162 |
1 files changed, 152 insertions, 10 deletions
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index dc7174d96b55..bc9e922bfbf8 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -34,7 +34,6 @@ #include "ion.h" #include "ion_priv.h" -#define DEBUG /** * struct ion_device - the metadata of the ion device node @@ -127,6 +126,8 @@ static void ion_buffer_add(struct ion_device *dev, rb_insert_color(&buffer->node, &dev->buffers); } +static int ion_buffer_alloc_dirty(struct ion_buffer *buffer); + /* this function should only be called while dev->lock is held */ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, struct ion_device *dev, @@ -154,15 +155,38 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, buffer->dev = dev; buffer->size = len; + buffer->flags = flags; - table = buffer->heap->ops->map_dma(buffer->heap, buffer); + table = heap->ops->map_dma(heap, buffer); if (IS_ERR_OR_NULL(table)) { heap->ops->free(buffer); kfree(buffer); return ERR_PTR(PTR_ERR(table)); } buffer->sg_table = table; + if (buffer->flags & ION_FLAG_CACHED) + for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, + i) { + if (sg_dma_len(sg) == PAGE_SIZE) + continue; + pr_err("%s: cached mappings must have pagewise " + "sg_lists\n", __func__); + heap->ops->unmap_dma(heap, buffer); + kfree(buffer); + return ERR_PTR(-EINVAL); + } + ret = ion_buffer_alloc_dirty(buffer); + if (ret) { + heap->ops->unmap_dma(heap, buffer); + heap->ops->free(buffer); + kfree(buffer); + return ERR_PTR(ret); + } + + buffer->dev = dev; + buffer->size = len; + INIT_LIST_HEAD(&buffer->vmas); mutex_init(&buffer->lock); /* this will set up dma addresses for the sglist -- it is not technically correct as per the dma api -- a specific @@ -313,13 +337,16 @@ static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) } struct ion_handle *ion_alloc(struct ion_client *client, size_t len, - size_t align, unsigned int flags) + size_t align, unsigned int heap_mask, + unsigned int flags) { struct rb_node *n; struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; + pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len, + align, heap_mask, flags); /* * traverse the list of heaps available in this system in priority * order. If the heap type is supported by the client, and matches the @@ -338,7 +365,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, if (!((1 << heap->type) & client->heap_mask)) continue; /* if the caller didn't specify this heap type */ - if (!((1 << heap->id) & flags)) + if (!((1 << heap->id) & heap_mask)) continue; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR_OR_NULL(buffer)) @@ -647,12 +674,18 @@ struct sg_table *ion_sg_table(struct ion_client *client, return table; } +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction direction); + static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { struct dma_buf *dmabuf = attachment->dmabuf; struct ion_buffer *buffer = dmabuf->priv; + if (buffer->flags & ION_FLAG_CACHED) + ion_buffer_sync_for_device(buffer, attachment->dev, direction); return buffer->sg_table; } @@ -662,10 +695,112 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, { } +static int ion_buffer_alloc_dirty(struct ion_buffer *buffer) +{ + unsigned long pages = buffer->sg_table->nents; + unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG; + + buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL); + if (!buffer->dirty) + return -ENOMEM; + return 0; +} + +struct ion_vma_list { + struct list_head list; + struct vm_area_struct *vma; +}; + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + struct ion_vma_list *vma_list; + + pr_debug("%s: syncing for device %s\n", __func__, + dev ? dev_name(dev) : "null"); + mutex_lock(&buffer->lock); + for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { + if (!test_bit(i, buffer->dirty)) + continue; + dma_sync_sg_for_device(dev, sg, 1, dir); + clear_bit(i, buffer->dirty); + } + list_for_each_entry(vma_list, &buffer->vmas, list) { + struct vm_area_struct *vma = vma_list->vma; + + zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, + NULL); + } + mutex_unlock(&buffer->lock); +} + +int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct scatterlist *sg; + int i; + + mutex_lock(&buffer->lock); + set_bit(vmf->pgoff, buffer->dirty); + + for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { + if (i != vmf->pgoff) + continue; + dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL); + vm_insert_page(vma, (unsigned long)vmf->virtual_address, + sg_page(sg)); + break; + } + mutex_unlock(&buffer->lock); + return VM_FAULT_NOPAGE; +} + +static void ion_vm_open(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list; + + vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); + if (!vma_list) + return; + vma_list->vma = vma; + mutex_lock(&buffer->lock); + list_add(&vma_list->list, &buffer->vmas); + mutex_unlock(&buffer->lock); + pr_debug("%s: adding %p\n", __func__, vma); +} + +static void ion_vm_close(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list, *tmp; + + pr_debug("%s\n", __func__); + mutex_lock(&buffer->lock); + list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { + if (vma_list->vma != vma) + continue; + list_del(&vma_list->list); + kfree(vma_list); + pr_debug("%s: deleting %p\n", __func__, vma); + break; + } + mutex_unlock(&buffer->lock); +} + +struct vm_operations_struct ion_vma_ops = { + .open = ion_vm_open, + .close = ion_vm_close, + .fault = ion_vm_fault, +}; + static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct ion_buffer *buffer = dmabuf->priv; - int ret; + int ret = 0; if (!buffer->heap->ops->map_user) { pr_err("%s: this heap does not define a method for mapping " @@ -673,10 +808,17 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) return -EINVAL; } - mutex_lock(&buffer->lock); - /* now map it to userspace */ - ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); - mutex_unlock(&buffer->lock); + if (buffer->flags & ION_FLAG_CACHED) { + vma->vm_private_data = buffer; + vma->vm_ops = &ion_vma_ops; + ion_vm_open(vma); + } else { + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + mutex_lock(&buffer->lock); + /* now map it to userspace */ + ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); + mutex_unlock(&buffer->lock); + } if (ret) pr_err("%s: failure mapping buffer to userspace\n", @@ -828,7 +970,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; data.handle = ion_alloc(client, data.len, data.align, - data.flags); + data.heap_mask, data.flags); if (IS_ERR(data.handle)) return PTR_ERR(data.handle); |