summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter2008-01-08 08:20:26 +0100
committerChristoph Lameter2008-02-04 19:56:02 +0100
commit9824601ead957a29e35d539e43266c003f7b085b (patch)
tree13df23987102e39fce77d64f60e499401444a905 /mm/slub.c
parentMove count_partial before kmem_cache_shrink (diff)
downloadkernel-qcow2-linux-9824601ead957a29e35d539e43266c003f7b085b.tar.gz
kernel-qcow2-linux-9824601ead957a29e35d539e43266c003f7b085b.tar.xz
kernel-qcow2-linux-9824601ead957a29e35d539e43266c003f7b085b.zip
SLUB: rename defrag to remote_node_defrag_ratio
The NUMA defrag works by allocating objects from partial slabs on remote nodes. Rename it to remote_node_defrag_ratio to be clear about this. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 9aa12b54ad1b..5146e2779c11 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1295,7 +1295,8 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
* expensive if we do it every time we are trying to find a slab
* with available objects.
*/
- if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio)
+ if (!s->remote_node_defrag_ratio ||
+ get_cycles() % 1024 > s->remote_node_defrag_ratio)
return NULL;
zonelist = &NODE_DATA(slab_node(current->mempolicy))
@@ -2209,7 +2210,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
s->refcount = 1;
#ifdef CONFIG_NUMA
- s->defrag_ratio = 100;
+ s->remote_node_defrag_ratio = 100;
#endif
if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
goto error;
@@ -3847,21 +3848,21 @@ static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
SLAB_ATTR_RO(free_calls);
#ifdef CONFIG_NUMA
-static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf)
+static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", s->defrag_ratio / 10);
+ return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
}
-static ssize_t defrag_ratio_store(struct kmem_cache *s,
+static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
const char *buf, size_t length)
{
int n = simple_strtoul(buf, NULL, 10);
if (n < 100)
- s->defrag_ratio = n * 10;
+ s->remote_node_defrag_ratio = n * 10;
return length;
}
-SLAB_ATTR(defrag_ratio);
+SLAB_ATTR(remote_node_defrag_ratio);
#endif
static struct attribute * slab_attrs[] = {
@@ -3892,7 +3893,7 @@ static struct attribute * slab_attrs[] = {
&cache_dma_attr.attr,
#endif
#ifdef CONFIG_NUMA
- &defrag_ratio_attr.attr,
+ &remote_node_defrag_ratio_attr.attr,
#endif
NULL
};