summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c37
1 files changed, 19 insertions, 18 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 453efcb1c980..18e3164de09a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -759,6 +759,7 @@ static enum {
NONE,
PARTIAL_AC,
PARTIAL_L3,
+ EARLY,
FULL
} g_cpucache_up;
@@ -767,7 +768,7 @@ static enum {
*/
int slab_is_available(void)
{
- return g_cpucache_up == FULL;
+ return g_cpucache_up >= EARLY;
}
static DEFINE_PER_CPU(struct delayed_work, reap_work);
@@ -1631,19 +1632,27 @@ void __init kmem_cache_init(void)
}
}
- /* 6) resize the head arrays to their final sizes */
- {
- struct kmem_cache *cachep;
- mutex_lock(&cache_chain_mutex);
- list_for_each_entry(cachep, &cache_chain, next)
- if (enable_cpucache(cachep, GFP_NOWAIT))
- BUG();
- mutex_unlock(&cache_chain_mutex);
- }
+ g_cpucache_up = EARLY;
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
+}
+
+void __init kmem_cache_init_late(void)
+{
+ struct kmem_cache *cachep;
+
+ /*
+ * Interrupts are enabled now so all GFP allocations are safe.
+ */
+ slab_gfp_mask = __GFP_BITS_MASK;
+ /* 6) resize the head arrays to their final sizes */
+ mutex_lock(&cache_chain_mutex);
+ list_for_each_entry(cachep, &cache_chain, next)
+ if (enable_cpucache(cachep, GFP_NOWAIT))
+ BUG();
+ mutex_unlock(&cache_chain_mutex);
/* Done! */
g_cpucache_up = FULL;
@@ -1660,14 +1669,6 @@ void __init kmem_cache_init(void)
*/
}
-void __init kmem_cache_init_late(void)
-{
- /*
- * Interrupts are enabled now so all GFP allocations are safe.
- */
- slab_gfp_mask = __GFP_BITS_MASK;
-}
-
static int __init cpucache_init(void)
{
int cpu;