summaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorTodd Kjos2019-02-08 19:35:15 +0100
committerGreg Kroah-Hartman2019-02-12 10:43:57 +0100
commit8ced0c6231ead26eca8cb416dcb7cc1c2cdd41d8 (patch)
tree367bf43c0df5e7bee2ca62a7939062d581f465ff /drivers/android/binder_alloc.c
parentbinder: create userspace-to-binder-buffer copy function (diff)
downloadkernel-qcow2-linux-8ced0c6231ead26eca8cb416dcb7cc1c2cdd41d8.tar.gz
kernel-qcow2-linux-8ced0c6231ead26eca8cb416dcb7cc1c2cdd41d8.tar.xz
kernel-qcow2-linux-8ced0c6231ead26eca8cb416dcb7cc1c2cdd41d8.zip
binder: add functions to copy to/from binder buffers
Avoid vm_area when copying to or from binder buffers. Instead, new copy functions are added that copy from kernel space to binder buffer space. These use kmap_atomic() and kunmap_atomic() to create temporary mappings and then memcpy() is used to copy within that page. Also, kmap_atomic() / kunmap_atomic() use the appropriate cache flushing to support VIVT cache architectures. Allow binder to build if CPU_CACHE_VIVT is defined. Several uses of the new functions are added here. More to follow in subsequent patches. Signed-off-by: Todd Kjos <tkjos@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c59
1 files changed, 59 insertions, 0 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 94c0d85c4e75..2eebff4be83e 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1166,3 +1166,62 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
}
return 0;
}
+
+static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
+ bool to_buffer,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ void *ptr,
+ size_t bytes)
+{
+ /* All copies must be 32-bit aligned and 32-bit size */
+ BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
+
+ while (bytes) {
+ unsigned long size;
+ struct page *page;
+ pgoff_t pgoff;
+ void *tmpptr;
+ void *base_ptr;
+
+ page = binder_alloc_get_page(alloc, buffer,
+ buffer_offset, &pgoff);
+ size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+ base_ptr = kmap_atomic(page);
+ tmpptr = base_ptr + pgoff;
+ if (to_buffer)
+ memcpy(tmpptr, ptr, size);
+ else
+ memcpy(ptr, tmpptr, size);
+ /*
+ * kunmap_atomic() takes care of flushing the cache
+ * if this device has VIVT cache arch
+ */
+ kunmap_atomic(base_ptr);
+ bytes -= size;
+ pgoff = 0;
+ ptr = ptr + size;
+ buffer_offset += size;
+ }
+}
+
+void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ void *src,
+ size_t bytes)
+{
+ binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
+ src, bytes);
+}
+
+void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
+ void *dest,
+ struct binder_buffer *buffer,
+ binder_size_t buffer_offset,
+ size_t bytes)
+{
+ binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
+ dest, bytes);
+}
+