summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim2014-08-07 01:04:27 +0200
committerLinus Torvalds2014-08-07 03:01:14 +0200
commit1fe00d50a9e81150de5000490b87ed227525cf09 (patch)
treefb9390e694eb4ed8cfba463c6136d0fdf74e0da7 /mm/slab.c
parentslab: defer slab_destroy in free_block() (diff)
downloadkernel-qcow2-linux-1fe00d50a9e81150de5000490b87ed227525cf09.tar.gz
kernel-qcow2-linux-1fe00d50a9e81150de5000490b87ed227525cf09.tar.xz
kernel-qcow2-linux-1fe00d50a9e81150de5000490b87ed227525cf09.zip
slab: factor out initialization of array cache
Factor out initialization of array cache to use it in following patch. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f6ad8d335be7..8d9a0fff160d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -791,13 +791,8 @@ static void start_cpu_timer(int cpu)
}
}
-static struct array_cache *alloc_arraycache(int node, int entries,
- int batchcount, gfp_t gfp)
+static void init_arraycache(struct array_cache *ac, int limit, int batch)
{
- int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
- struct array_cache *nc = NULL;
-
- nc = kmalloc_node(memsize, gfp, node);
/*
* The array_cache structures contain pointers to free object.
* However, when such objects are allocated or transferred to another
@@ -805,15 +800,25 @@ static struct array_cache *alloc_arraycache(int node, int entries,
* valid references during a kmemleak scan. Therefore, kmemleak must
* not scan such objects.
*/
- kmemleak_no_scan(nc);
- if (nc) {
- nc->avail = 0;
- nc->limit = entries;
- nc->batchcount = batchcount;
- nc->touched = 0;
- spin_lock_init(&nc->lock);
+ kmemleak_no_scan(ac);
+ if (ac) {
+ ac->avail = 0;
+ ac->limit = limit;
+ ac->batchcount = batch;
+ ac->touched = 0;
+ spin_lock_init(&ac->lock);
}
- return nc;
+}
+
+static struct array_cache *alloc_arraycache(int node, int entries,
+ int batchcount, gfp_t gfp)
+{
+ int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
+ struct array_cache *ac = NULL;
+
+ ac = kmalloc_node(memsize, gfp, node);
+ init_arraycache(ac, entries, batchcount);
+ return ac;
}
static inline bool is_slab_pfmemalloc(struct page *page)