summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c48
1 files changed, 6 insertions, 42 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 9142ee992493..284ab737faee 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1467,53 +1467,17 @@ static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
}
#ifdef CONFIG_DEBUG_PAGEALLOC
-static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
- unsigned long caller)
-{
- int size = cachep->object_size;
-
- addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
-
- if (size < 5 * sizeof(unsigned long))
- return;
-
- *addr++ = 0x12345678;
- *addr++ = caller;
- *addr++ = smp_processor_id();
- size -= 3 * sizeof(unsigned long);
- {
- unsigned long *sptr = &caller;
- unsigned long svalue;
-
- while (!kstack_end(sptr)) {
- svalue = *sptr++;
- if (kernel_text_address(svalue)) {
- *addr++ = svalue;
- size -= sizeof(unsigned long);
- if (size <= sizeof(unsigned long))
- break;
- }
- }
-
- }
- *addr++ = 0x87654321;
-}
-
-static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
- int map, unsigned long caller)
+static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
{
if (!is_debug_pagealloc_cache(cachep))
return;
- if (caller)
- store_stackinfo(cachep, objp, caller);
-
kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
}
#else
static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
- int map, unsigned long caller) {}
+ int map) {}
#endif
@@ -1661,7 +1625,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1, 0);
+ slab_kernel_map(cachep, objp, 1);
}
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
@@ -2433,7 +2397,7 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0, 0);
+ slab_kernel_map(cachep, objp, 0);
}
}
#endif
@@ -2812,7 +2776,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
- slab_kernel_map(cachep, objp, 0, caller);
+ slab_kernel_map(cachep, objp, 0);
}
return objp;
}
@@ -3076,7 +3040,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
return objp;
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
- slab_kernel_map(cachep, objp, 1, 0);
+ slab_kernel_map(cachep, objp, 1);
poison_obj(cachep, objp, POISON_INUSE);
}
if (cachep->flags & SLAB_STORE_USER)