summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 226906294183..cd86f15071ad 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1281,7 +1281,7 @@ void __init kmem_cache_init(void)
create_boot_cache(kmem_cache, "kmem_cache",
offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *),
- SLAB_HWCACHE_ALIGN);
+ SLAB_HWCACHE_ALIGN, 0, 0);
list_add(&kmem_cache->list, &slab_caches);
slab_state = PARTIAL;
@@ -1291,7 +1291,8 @@ void __init kmem_cache_init(void)
*/
kmalloc_caches[INDEX_NODE] = create_kmalloc_cache(
kmalloc_info[INDEX_NODE].name,
- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
+ kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS,
+ 0, kmalloc_size(INDEX_NODE));
slab_state = PARTIAL_NODE;
setup_kmalloc_cache_index_table();
@@ -4385,13 +4386,15 @@ module_init(slab_proc_init);
#ifdef CONFIG_HARDENED_USERCOPY
/*
- * Rejects objects that are incorrectly sized.
+ * Rejects incorrectly sized objects and objects that are to be copied
+ * to/from userspace but do not fall entirely within the containing slab
+ * cache's usercopy region.
*
* Returns NULL if check passes, otherwise const char * to name of cache
* to indicate an error.
*/
-const char *__check_heap_object(const void *ptr, unsigned long n,
- struct page *page)
+void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
+ bool to_user)
{
struct kmem_cache *cachep;
unsigned int objnr;
@@ -4405,11 +4408,26 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
/* Find offset within object. */
offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
- /* Allow address range falling entirely within object size. */
- if (offset <= cachep->object_size && n <= cachep->object_size - offset)
- return NULL;
+ /* Allow address range falling entirely within usercopy region. */
+ if (offset >= cachep->useroffset &&
+ offset - cachep->useroffset <= cachep->usersize &&
+ n <= cachep->useroffset - offset + cachep->usersize)
+ return;
+
+ /*
+ * If the copy is still within the allocated object, produce
+ * a warning instead of rejecting the copy. This is intended
+ * to be a temporary method to find any missing usercopy
+ * whitelists.
+ */
+ if (usercopy_fallback &&
+ offset <= cachep->object_size &&
+ n <= cachep->object_size - offset) {
+ usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
+ return;
+ }
- return cachep->name;
+ usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
}
#endif /* CONFIG_HARDENED_USERCOPY */