summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c49
1 files changed, 47 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 3ca89ef9b7b0..30e7dedec664 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2750,16 +2750,61 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
}
EXPORT_SYMBOL(kmem_cache_free);
+/* Note that interrupts must be enabled when calling this function. */
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
{
__kmem_cache_free_bulk(s, size, p);
}
EXPORT_SYMBOL(kmem_cache_free_bulk);
+/* Note that interrupts must be enabled when calling this function. */
bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
- void **p)
+ void **p)
{
- return __kmem_cache_alloc_bulk(s, flags, size, p);
+ struct kmem_cache_cpu *c;
+ int i;
+
+ /* Debugging fallback to generic bulk */
+ if (kmem_cache_debug(s))
+ return __kmem_cache_alloc_bulk(s, flags, size, p);
+
+ /*
+ * Drain objects in the per cpu slab, while disabling local
+ * IRQs, which protects against PREEMPT and interrupts
+ * handlers invoking normal fastpath.
+ */
+ local_irq_disable();
+ c = this_cpu_ptr(s->cpu_slab);
+
+ for (i = 0; i < size; i++) {
+ void *object = c->freelist;
+
+ if (!object)
+ break;
+
+ c->freelist = get_freepointer(s, object);
+ p[i] = object;
+ }
+ c->tid = next_tid(c->tid);
+ local_irq_enable();
+
+ /* Clear memory outside IRQ disabled fastpath loop */
+ if (unlikely(flags & __GFP_ZERO)) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ memset(p[j], 0, s->object_size);
+ }
+
+ /* Fallback to single elem alloc */
+ for (; i < size; i++) {
+ void *x = p[i] = kmem_cache_alloc(s, flags);
+ if (unlikely(!x)) {
+ __kmem_cache_free_bulk(s, i, p);
+ return false;
+ }
+ }
+ return true;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);