From 19af27aff901e401a5b79e5c974e881e4701162c Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Thu, 5 Apr 2018 16:21:39 -0700 Subject: slub: make struct kmem_cache_order_objects::x unsigned int struct kmem_cache_order_objects is for mixing order and number of objects, and orders aren't big enough to warrant 64-bit width. Propagate unsignedness down so that everything fits. !!! Patch assumes that "PAGE_SIZE << order" doesn't overflow. !!! Link: http://lkml.kernel.org/r/20180305200730.15812-23-adobriyan@gmail.com Signed-off-by: Alexey Dobriyan Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slub.c | 74 +++++++++++++++++++++++++++++++++------------------------------ 1 file changed, 39 insertions(+), 35 deletions(-) (limited to 'mm/slub.c') diff --git a/mm/slub.c b/mm/slub.c index 5c5b9aed10a1..21e9de8f8d3a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -316,13 +316,13 @@ static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) return (p - addr) / s->size; } -static inline int order_objects(int order, unsigned long size, int reserved) +static inline unsigned int order_objects(unsigned int order, unsigned int size, unsigned int reserved) { - return ((PAGE_SIZE << order) - reserved) / size; + return (((unsigned int)PAGE_SIZE << order) - reserved) / size; } -static inline struct kmem_cache_order_objects oo_make(int order, - unsigned long size, int reserved) +static inline struct kmem_cache_order_objects oo_make(unsigned int order, + unsigned int size, unsigned int reserved) { struct kmem_cache_order_objects x = { (order << OO_SHIFT) + order_objects(order, size, reserved) @@ -331,12 +331,12 @@ static inline struct kmem_cache_order_objects oo_make(int order, return x; } -static inline int oo_order(struct kmem_cache_order_objects x) +static inline unsigned int oo_order(struct kmem_cache_order_objects x) { return x.x >> OO_SHIFT; } -static inline int oo_objects(struct kmem_cache_order_objects x) +static inline unsigned int oo_objects(struct kmem_cache_order_objects x) { return x.x & OO_MASK; } @@ -1436,7 +1436,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, gfp_t flags, int node, struct kmem_cache_order_objects oo) { struct page *page; - int order = oo_order(oo); + unsigned int order = oo_order(oo); if (node == NUMA_NO_NODE) page = alloc_pages(flags, order); @@ -1455,8 +1455,8 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, /* Pre-initialize the random sequence cache */ static int init_cache_random_seq(struct kmem_cache *s) { + unsigned int count = oo_objects(s->oo); int err; - unsigned long i, count = oo_objects(s->oo); /* Bailout if already initialised */ if (s->random_seq) @@ -1471,6 +1471,8 @@ static int init_cache_random_seq(struct kmem_cache *s) /* Transform to an offset on the set of pages */ if (s->random_seq) { + unsigned int i; + for (i = 0; i < count; i++) s->random_seq[i] *= s->size; } @@ -2399,7 +2401,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", nid, gfpflags, &gfpflags); - pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %d, min order: %d\n", + pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", s->name, s->object_size, s->size, oo_order(s->oo), oo_order(s->min)); @@ -3182,9 +3184,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk); * and increases the number of allocations possible without having to * take the list_lock. */ -static int slub_min_order; -static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; -static int slub_min_objects; +static unsigned int slub_min_order; +static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; +static unsigned int slub_min_objects; /* * Calculate the order of allocation given an slab object size. @@ -3211,20 +3213,21 @@ static int slub_min_objects; * requested a higher mininum order then we start with that one instead of * the smallest order which will fit the object. */ -static inline int slab_order(int size, int min_objects, - int max_order, int fract_leftover, int reserved) +static inline unsigned int slab_order(unsigned int size, + unsigned int min_objects, unsigned int max_order, + unsigned int fract_leftover, unsigned int reserved) { - int order; - int rem; - int min_order = slub_min_order; + unsigned int min_order = slub_min_order; + unsigned int order; if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE) return get_order(size * MAX_OBJS_PER_PAGE) - 1; - for (order = max(min_order, get_order(min_objects * size + reserved)); + for (order = max(min_order, (unsigned int)get_order(min_objects * size + reserved)); order <= max_order; order++) { - unsigned long slab_size = PAGE_SIZE << order; + unsigned int slab_size = (unsigned int)PAGE_SIZE << order; + unsigned int rem; rem = (slab_size - reserved) % size; @@ -3235,12 +3238,11 @@ static inline int slab_order(int size, int min_objects, return order; } -static inline int calculate_order(int size, int reserved) +static inline int calculate_order(unsigned int size, unsigned int reserved) { - int order; - int min_objects; - int fraction; - int max_objects; + unsigned int order; + unsigned int min_objects; + unsigned int max_objects; /* * Attempt to find best configuration for a slab. This @@ -3257,6 +3259,8 @@ static inline int calculate_order(int size, int reserved) min_objects = min(min_objects, max_objects); while (min_objects > 1) { + unsigned int fraction; + fraction = 16; while (fraction >= 4) { order = slab_order(size, min_objects, @@ -3459,7 +3463,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) { slab_flags_t flags = s->flags; unsigned int size = s->object_size; - int order; + unsigned int order; /* * Round up object size to the next word boundary. We can only @@ -3549,7 +3553,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) else order = calculate_order(size, s->reserved); - if (order < 0) + if ((int)order < 0) return 0; s->allocflags = 0; @@ -3717,7 +3721,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s) static int __init setup_slub_min_order(char *str) { - get_option(&str, &slub_min_order); + get_option(&str, (int *)&slub_min_order); return 1; } @@ -3726,8 +3730,8 @@ __setup("slub_min_order=", setup_slub_min_order); static int __init setup_slub_max_order(char *str) { - get_option(&str, &slub_max_order); - slub_max_order = min(slub_max_order, MAX_ORDER - 1); + get_option(&str, (int *)&slub_max_order); + slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1); return 1; } @@ -3736,7 +3740,7 @@ __setup("slub_max_order=", setup_slub_max_order); static int __init setup_slub_min_objects(char *str) { - get_option(&str, &slub_min_objects); + get_option(&str, (int *)&slub_min_objects); return 1; } @@ -4231,7 +4235,7 @@ void __init kmem_cache_init(void) cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL, slub_cpu_dead); - pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%u, Nodes=%d\n", + pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n", cache_line_size(), slub_min_order, slub_max_order, slub_min_objects, nr_cpu_ids, nr_node_ids); @@ -4907,17 +4911,17 @@ SLAB_ATTR_RO(object_size); static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) { - return sprintf(buf, "%d\n", oo_objects(s->oo)); + return sprintf(buf, "%u\n", oo_objects(s->oo)); } SLAB_ATTR_RO(objs_per_slab); static ssize_t order_store(struct kmem_cache *s, const char *buf, size_t length) { - unsigned long order; + unsigned int order; int err; - err = kstrtoul(buf, 10, &order); + err = kstrtouint(buf, 10, &order); if (err) return err; @@ -4930,7 +4934,7 @@ static ssize_t order_store(struct kmem_cache *s, static ssize_t order_show(struct kmem_cache *s, char *buf) { - return sprintf(buf, "%d\n", oo_order(s->oo)); + return sprintf(buf, "%u\n", oo_order(s->oo)); } SLAB_ATTR(order); -- cgit v1.2.3-55-g7522