From 7b7c1df2883dd4393592859758c3e76207da8b1d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 Jul 2019 16:25:54 -0700 Subject: lib/mpi/longlong.h: fix building with 32-bit x86 The mpi library contains some rather old inline assembly statements that produce a lot of warnings for 32-bit x86, such as: lib/mpi/mpih-div.c:76:16: error: invalid use of a cast in a inline asm context requiring an l-value: remove the cast or build with -fheinous-gnu-extensions udiv_qrnnd(qp[i], n1, n1, np[i], d); ~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~ lib/mpi/longlong.h:423:20: note: expanded from macro 'udiv_qrnnd' : "=a" ((USItype)(q)), \ ~~~~~~~~~~^~ There is no point in doing a type cast for the output of an inline assembler statement, so just remove the cast here, as we have done for other architectures in the past. See also dea632cadd12 ("lib/mpi: fix build with clang"). Link: http://lkml.kernel.org/r/20190712090740.340186-1-arnd@arndb.de Signed-off-by: Arnd Bergmann Reviewed-by: Nick Desaulniers Cc: Stefan Agner Cc: Dmitry Kasatkin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/mpi/longlong.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 08c60d10747f..3bb6260d8f42 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -397,8 +397,8 @@ do { \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("addl %5,%1\n" \ "adcl %3,%0" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "%0" ((USItype)(ah)), \ "g" ((USItype)(bh)), \ "%1" ((USItype)(al)), \ @@ -406,22 +406,22 @@ do { \ #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("subl %5,%1\n" \ "sbbl %3,%0" \ - : "=r" ((USItype)(sh)), \ - "=&r" ((USItype)(sl)) \ + : "=r" (sh), \ + "=&r" (sl) \ : "0" ((USItype)(ah)), \ "g" ((USItype)(bh)), \ "1" ((USItype)(al)), \ "g" ((USItype)(bl))) #define umul_ppmm(w1, w0, u, v) \ __asm__ ("mull %3" \ - : "=a" ((USItype)(w0)), \ - "=d" ((USItype)(w1)) \ + : "=a" (w0), \ + "=d" (w1) \ : "%0" ((USItype)(u)), \ "rm" ((USItype)(v))) #define udiv_qrnnd(q, r, n1, n0, d) \ __asm__ ("divl %4" \ - : "=a" ((USItype)(q)), \ - "=d" ((USItype)(r)) \ + : "=a" (q), \ + "=d" (r) \ : "0" ((USItype)(n0)), \ "1" ((USItype)(n1)), \ "rm" ((USItype)(d))) -- cgit v1.2.3-55-g7522 From b09757104e433447226a95eff4b92583acc0b0fb Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Tue, 16 Jul 2019 16:27:15 -0700 Subject: lib/string.c: allow searching for NUL with strnchr Patch series "lib/string: search for NUL with strchr/strnchr". I noticed an inconsistency where strchr and strnchr do not behave the same with respect to the trailing NUL. strchr is standardised and the kernel function conforms, and the kernel relies on the behavior. So, naturally strchr stays as-is and strnchr is what I change. While writing a few tests to verify that my new strnchr loop was sane, I noticed that the tests for memset16/32/64 had a problem. Since it's all about the lib/string.c file I made a short series of it all... This patch (of 3): strchr considers the terminating NUL to be part of the string, and NUL can thus be searched for with that function. For consistency, do the same with strnchr. Link: http://lkml.kernel.org/r/20190506124634.6807-2-peda@axentia.se Signed-off-by: Peter Rosin Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/string.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/string.c b/lib/string.c index 6016eb3ac73d..461fb620f85f 100644 --- a/lib/string.c +++ b/lib/string.c @@ -400,6 +400,9 @@ EXPORT_SYMBOL(strncmp); * strchr - Find the first occurrence of a character in a string * @s: The string to be searched * @c: The character to search for + * + * Note that the %NUL-terminator is considered part of the string, and can + * be searched for. */ char *strchr(const char *s, int c) { @@ -453,12 +456,18 @@ EXPORT_SYMBOL(strrchr); * @s: The string to be searched * @count: The number of characters to be searched * @c: The character to search for + * + * Note that the %NUL-terminator is considered part of the string, and can + * be searched for. */ char *strnchr(const char *s, size_t count, int c) { - for (; count-- && *s != '\0'; ++s) + while (count--) { if (*s == (char)c) return (char *)s; + if (*s++ == '\0') + break; + } return NULL; } EXPORT_SYMBOL(strnchr); -- cgit v1.2.3-55-g7522 From 33d6e0ff68af74be0c846c8e042e84a9a1a0561e Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Tue, 16 Jul 2019 16:27:18 -0700 Subject: lib/test_string.c: avoid masking memset16/32/64 failures If a memsetXX implementation is completely broken and fails in the first iteration, when i, j, and k are all zero, the failure is masked as zero is returned. Failing in the first iteration is perhaps the most likely failure, so this makes the tests pretty much useless. Avoid the situation by always setting a random unused bit in the result on failure. Link: http://lkml.kernel.org/r/20190506124634.6807-3-peda@axentia.se Fixes: 03270c13c5ff ("lib/string.c: add testcases for memset16/32/64") Signed-off-by: Peter Rosin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_string.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/test_string.c b/lib/test_string.c index bf8def01ed20..b5117ae59693 100644 --- a/lib/test_string.c +++ b/lib/test_string.c @@ -36,7 +36,7 @@ static __init int memset16_selftest(void) fail: kfree(p); if (i < 256) - return (i << 24) | (j << 16) | k; + return (i << 24) | (j << 16) | k | 0x8000; return 0; } @@ -72,7 +72,7 @@ static __init int memset32_selftest(void) fail: kfree(p); if (i < 256) - return (i << 24) | (j << 16) | k; + return (i << 24) | (j << 16) | k | 0x8000; return 0; } @@ -108,7 +108,7 @@ static __init int memset64_selftest(void) fail: kfree(p); if (i < 256) - return (i << 24) | (j << 16) | k; + return (i << 24) | (j << 16) | k | 0x8000; return 0; } -- cgit v1.2.3-55-g7522 From d1a5dc5e6accbeaabe59e3d55b47f15a8b19c2bd Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Tue, 16 Jul 2019 16:27:21 -0700 Subject: lib/test_string.c: add some testcases for strchr and strnchr Make sure that the trailing NUL is considered part of the string and can be found. Link: http://lkml.kernel.org/r/20190506124634.6807-4-peda@axentia.se Signed-off-by: Peter Rosin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_string.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) (limited to 'lib') diff --git a/lib/test_string.c b/lib/test_string.c index b5117ae59693..7b31f4a505bf 100644 --- a/lib/test_string.c +++ b/lib/test_string.c @@ -112,6 +112,73 @@ fail: return 0; } +static __init int strchr_selftest(void) +{ + const char *test_string = "abcdefghijkl"; + const char *empty_string = ""; + char *result; + int i; + + for (i = 0; i < strlen(test_string) + 1; i++) { + result = strchr(test_string, test_string[i]); + if (result - test_string != i) + return i + 'a'; + } + + result = strchr(empty_string, '\0'); + if (result != empty_string) + return 0x101; + + result = strchr(empty_string, 'a'); + if (result) + return 0x102; + + result = strchr(test_string, 'z'); + if (result) + return 0x103; + + return 0; +} + +static __init int strnchr_selftest(void) +{ + const char *test_string = "abcdefghijkl"; + const char *empty_string = ""; + char *result; + int i, j; + + for (i = 0; i < strlen(test_string) + 1; i++) { + for (j = 0; j < strlen(test_string) + 2; j++) { + result = strnchr(test_string, j, test_string[i]); + if (j <= i) { + if (!result) + continue; + return ((i + 'a') << 8) | j; + } + if (result - test_string != i) + return ((i + 'a') << 8) | j; + } + } + + result = strnchr(empty_string, 0, '\0'); + if (result) + return 0x10001; + + result = strnchr(empty_string, 1, '\0'); + if (result != empty_string) + return 0x10002; + + result = strnchr(empty_string, 1, 'a'); + if (result) + return 0x10003; + + result = strnchr(NULL, 0, '\0'); + if (result) + return 0x10004; + + return 0; +} + static __init int string_selftest_init(void) { int test, subtest; @@ -131,6 +198,16 @@ static __init int string_selftest_init(void) if (subtest) goto fail; + test = 4; + subtest = strchr_selftest(); + if (subtest) + goto fail; + + test = 5; + subtest = strnchr_selftest(); + if (subtest) + goto fail; + pr_info("String selftests succeeded\n"); return 0; fail: -- cgit v1.2.3-55-g7522 From 8e060c21ae2c265a2b596e9e7f9f97ec274151a4 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 16 Jul 2019 16:27:24 -0700 Subject: lib/test_overflow.c: avoid tainting the kernel and fix wrap size This adds __GFP_NOWARN to the kmalloc()-portions of the overflow test to avoid tainting the kernel. Additionally fixes up the math on wrap size to be architecture and page size agnostic. Link: http://lkml.kernel.org/r/201905282012.0A8767E24@keescook Fixes: ca90800a91ba ("test_overflow: Add memory allocation overflow tests") Signed-off-by: Kees Cook Reported-by: Randy Dunlap Suggested-by: Rasmus Villemoes Cc: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_overflow.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/test_overflow.c b/lib/test_overflow.c index fc680562d8b6..7a4b6f6c5473 100644 --- a/lib/test_overflow.c +++ b/lib/test_overflow.c @@ -486,16 +486,17 @@ static int __init test_overflow_shift(void) * Deal with the various forms of allocator arguments. See comments above * the DEFINE_TEST_ALLOC() instances for mapping of the "bits". */ -#define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL) -#define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE) +#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN) +#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP) +#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE) #define alloc000(alloc, arg, sz) alloc(sz) #define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE) -#define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL) +#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP) #define free0(free, arg, ptr) free(ptr) #define free1(free, arg, ptr) free(arg, ptr) -/* Wrap around to 8K */ -#define TEST_SIZE (9 << PAGE_SHIFT) +/* Wrap around to 16K */ +#define TEST_SIZE (5 * 4096) #define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\ static int __init test_ ## func (void *arg) \ -- cgit v1.2.3-55-g7522 From 5015a300a522c8fb542dc993140e4c360cf4cf5f Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Tue, 16 Jul 2019 16:27:27 -0700 Subject: lib: introduce test_meminit module Add tests for heap and pagealloc initialization. These can be used to check init_on_alloc and init_on_free implementations as well as other approaches to initialization. Expected test output in the case the kernel provides heap initialization (e.g. when running with either init_on_alloc=1 or init_on_free=1): test_meminit: all 10 tests in test_pages passed test_meminit: all 40 tests in test_kvmalloc passed test_meminit: all 60 tests in test_kmemcache passed test_meminit: all 10 tests in test_rcu_persistent passed test_meminit: all 120 tests passed! Link: http://lkml.kernel.org/r/20190529123812.43089-4-glider@google.com Signed-off-by: Alexander Potapenko Acked-by: Kees Cook Cc: Christoph Lameter Cc: Nick Desaulniers Cc: Kostya Serebryany Cc: Dmitry Vyukov Cc: Sandeep Patil Cc: Laura Abbott Cc: Jann Horn Cc: Marco Elver Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 8 ++ lib/Makefile | 1 + lib/test_meminit.c | 362 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 371 insertions(+) create mode 100644 lib/test_meminit.c (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 4ac4ca21a30a..c6ee805202bd 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2076,6 +2076,14 @@ config TEST_STACKINIT If unsure, say N. +config TEST_MEMINIT + tristate "Test heap/page initialization" + help + Test if the kernel is zero-initializing heap and page allocations. + This can be useful to test init_on_alloc and init_on_free features. + + If unsure, say N. + endif # RUNTIME_TESTING_MENU config MEMTEST diff --git a/lib/Makefile b/lib/Makefile index fdd56bc219b8..59067f51f3ab 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -92,6 +92,7 @@ obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o +obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ diff --git a/lib/test_meminit.c b/lib/test_meminit.c new file mode 100644 index 000000000000..ed7efec1387b --- /dev/null +++ b/lib/test_meminit.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test cases for SL[AOU]B/page initialization at alloc/free time. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#define GARBAGE_INT (0x09A7BA9E) +#define GARBAGE_BYTE (0x9E) + +#define REPORT_FAILURES_IN_FN() \ + do { \ + if (failures) \ + pr_info("%s failed %d out of %d times\n", \ + __func__, failures, num_tests); \ + else \ + pr_info("all %d tests in %s passed\n", \ + num_tests, __func__); \ + } while (0) + +/* Calculate the number of uninitialized bytes in the buffer. */ +static int __init count_nonzero_bytes(void *ptr, size_t size) +{ + int i, ret = 0; + unsigned char *p = (unsigned char *)ptr; + + for (i = 0; i < size; i++) + if (p[i]) + ret++; + return ret; +} + +/* Fill a buffer with garbage, skipping |skip| first bytes. */ +static void __init fill_with_garbage_skip(void *ptr, size_t size, size_t skip) +{ + unsigned int *p = (unsigned int *)ptr; + int i = 0; + + if (skip) { + WARN_ON(skip > size); + p += skip; + } + while (size >= sizeof(*p)) { + p[i] = GARBAGE_INT; + i++; + size -= sizeof(*p); + } + if (size) + memset(&p[i], GARBAGE_BYTE, size); +} + +static void __init fill_with_garbage(void *ptr, size_t size) +{ + fill_with_garbage_skip(ptr, size, 0); +} + +static int __init do_alloc_pages_order(int order, int *total_failures) +{ + struct page *page; + void *buf; + size_t size = PAGE_SIZE << order; + + page = alloc_pages(GFP_KERNEL, order); + buf = page_address(page); + fill_with_garbage(buf, size); + __free_pages(page, order); + + page = alloc_pages(GFP_KERNEL, order); + buf = page_address(page); + if (count_nonzero_bytes(buf, size)) + (*total_failures)++; + fill_with_garbage(buf, size); + __free_pages(page, order); + return 1; +} + +/* Test the page allocator by calling alloc_pages with different orders. */ +static int __init test_pages(int *total_failures) +{ + int failures = 0, num_tests = 0; + int i; + + for (i = 0; i < 10; i++) + num_tests += do_alloc_pages_order(i, &failures); + + REPORT_FAILURES_IN_FN(); + *total_failures += failures; + return num_tests; +} + +/* Test kmalloc() with given parameters. */ +static int __init do_kmalloc_size(size_t size, int *total_failures) +{ + void *buf; + + buf = kmalloc(size, GFP_KERNEL); + fill_with_garbage(buf, size); + kfree(buf); + + buf = kmalloc(size, GFP_KERNEL); + if (count_nonzero_bytes(buf, size)) + (*total_failures)++; + fill_with_garbage(buf, size); + kfree(buf); + return 1; +} + +/* Test vmalloc() with given parameters. */ +static int __init do_vmalloc_size(size_t size, int *total_failures) +{ + void *buf; + + buf = vmalloc(size); + fill_with_garbage(buf, size); + vfree(buf); + + buf = vmalloc(size); + if (count_nonzero_bytes(buf, size)) + (*total_failures)++; + fill_with_garbage(buf, size); + vfree(buf); + return 1; +} + +/* Test kmalloc()/vmalloc() by allocating objects of different sizes. */ +static int __init test_kvmalloc(int *total_failures) +{ + int failures = 0, num_tests = 0; + int i, size; + + for (i = 0; i < 20; i++) { + size = 1 << i; + num_tests += do_kmalloc_size(size, &failures); + num_tests += do_vmalloc_size(size, &failures); + } + + REPORT_FAILURES_IN_FN(); + *total_failures += failures; + return num_tests; +} + +#define CTOR_BYTES (sizeof(unsigned int)) +#define CTOR_PATTERN (0x41414141) +/* Initialize the first 4 bytes of the object. */ +static void test_ctor(void *obj) +{ + *(unsigned int *)obj = CTOR_PATTERN; +} + +/* + * Check the invariants for the buffer allocated from a slab cache. + * If the cache has a test constructor, the first 4 bytes of the object must + * always remain equal to CTOR_PATTERN. + * If the cache isn't an RCU-typesafe one, or if the allocation is done with + * __GFP_ZERO, then the object contents must be zeroed after allocation. + * If the cache is an RCU-typesafe one, the object contents must never be + * zeroed after the first use. This is checked by memcmp() in + * do_kmem_cache_size(). + */ +static bool __init check_buf(void *buf, int size, bool want_ctor, + bool want_rcu, bool want_zero) +{ + int bytes; + bool fail = false; + + bytes = count_nonzero_bytes(buf, size); + WARN_ON(want_ctor && want_zero); + if (want_zero) + return bytes; + if (want_ctor) { + if (*(unsigned int *)buf != CTOR_PATTERN) + fail = 1; + } else { + if (bytes) + fail = !want_rcu; + } + return fail; +} + +/* + * Test kmem_cache with given parameters: + * want_ctor - use a constructor; + * want_rcu - use SLAB_TYPESAFE_BY_RCU; + * want_zero - use __GFP_ZERO. + */ +static int __init do_kmem_cache_size(size_t size, bool want_ctor, + bool want_rcu, bool want_zero, + int *total_failures) +{ + struct kmem_cache *c; + int iter; + bool fail = false; + gfp_t alloc_mask = GFP_KERNEL | (want_zero ? __GFP_ZERO : 0); + void *buf, *buf_copy; + + c = kmem_cache_create("test_cache", size, 1, + want_rcu ? SLAB_TYPESAFE_BY_RCU : 0, + want_ctor ? test_ctor : NULL); + for (iter = 0; iter < 10; iter++) { + buf = kmem_cache_alloc(c, alloc_mask); + /* Check that buf is zeroed, if it must be. */ + fail = check_buf(buf, size, want_ctor, want_rcu, want_zero); + fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0); + /* + * If this is an RCU cache, use a critical section to ensure we + * can touch objects after they're freed. + */ + if (want_rcu) { + rcu_read_lock(); + /* + * Copy the buffer to check that it's not wiped on + * free(). + */ + buf_copy = kmalloc(size, GFP_KERNEL); + if (buf_copy) + memcpy(buf_copy, buf, size); + } + kmem_cache_free(c, buf); + if (want_rcu) { + /* + * Check that |buf| is intact after kmem_cache_free(). + * |want_zero| is false, because we wrote garbage to + * the buffer already. + */ + fail |= check_buf(buf, size, want_ctor, want_rcu, + false); + if (buf_copy) { + fail |= (bool)memcmp(buf, buf_copy, size); + kfree(buf_copy); + } + rcu_read_unlock(); + } + } + kmem_cache_destroy(c); + + *total_failures += fail; + return 1; +} + +/* + * Check that the data written to an RCU-allocated object survives + * reallocation. + */ +static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures) +{ + struct kmem_cache *c; + void *buf, *buf_contents, *saved_ptr; + void **used_objects; + int i, iter, maxiter = 1024; + bool fail = false; + + c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU, + NULL); + buf = kmem_cache_alloc(c, GFP_KERNEL); + saved_ptr = buf; + fill_with_garbage(buf, size); + buf_contents = kmalloc(size, GFP_KERNEL); + if (!buf_contents) + goto out; + used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL); + if (!used_objects) { + kfree(buf_contents); + goto out; + } + memcpy(buf_contents, buf, size); + kmem_cache_free(c, buf); + /* + * Run for a fixed number of iterations. If we never hit saved_ptr, + * assume the test passes. + */ + for (iter = 0; iter < maxiter; iter++) { + buf = kmem_cache_alloc(c, GFP_KERNEL); + used_objects[iter] = buf; + if (buf == saved_ptr) { + fail = memcmp(buf_contents, buf, size); + for (i = 0; i <= iter; i++) + kmem_cache_free(c, used_objects[i]); + goto free_out; + } + } + +free_out: + kmem_cache_destroy(c); + kfree(buf_contents); + kfree(used_objects); +out: + *total_failures += fail; + return 1; +} + +/* + * Test kmem_cache allocation by creating caches of different sizes, with and + * without constructors, with and without SLAB_TYPESAFE_BY_RCU. + */ +static int __init test_kmemcache(int *total_failures) +{ + int failures = 0, num_tests = 0; + int i, flags, size; + bool ctor, rcu, zero; + + for (i = 0; i < 10; i++) { + size = 8 << i; + for (flags = 0; flags < 8; flags++) { + ctor = flags & 1; + rcu = flags & 2; + zero = flags & 4; + if (ctor & zero) + continue; + num_tests += do_kmem_cache_size(size, ctor, rcu, zero, + &failures); + } + } + REPORT_FAILURES_IN_FN(); + *total_failures += failures; + return num_tests; +} + +/* Test the behavior of SLAB_TYPESAFE_BY_RCU caches of different sizes. */ +static int __init test_rcu_persistent(int *total_failures) +{ + int failures = 0, num_tests = 0; + int i, size; + + for (i = 0; i < 10; i++) { + size = 8 << i; + num_tests += do_kmem_cache_rcu_persistent(size, &failures); + } + REPORT_FAILURES_IN_FN(); + *total_failures += failures; + return num_tests; +} + +/* + * Run the tests. Each test function returns the number of executed tests and + * updates |failures| with the number of failed tests. + */ +static int __init test_meminit_init(void) +{ + int failures = 0, num_tests = 0; + + num_tests += test_pages(&failures); + num_tests += test_kvmalloc(&failures); + num_tests += test_kmemcache(&failures); + num_tests += test_rcu_persistent(&failures); + + if (failures == 0) + pr_info("all %d tests passed!\n", num_tests); + else + pr_info("failures: %d out of %d\n", failures, num_tests); + + return failures ? -EINVAL : 0; +} +module_init(test_meminit_init); + +MODULE_LICENSE("GPL"); -- cgit v1.2.3-55-g7522 From 6b95ab4218bfa59bc315105127ffe03aef3b5742 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 16 Jul 2019 16:27:30 -0700 Subject: mm/ioremap: check virtual address alignment while creating huge mappings Virtual address alignment is essential in ensuring correct clearing for all intermediate level pgtable entries and freeing associated pgtable pages. An unaligned address can end up randomly freeing pgtable page that potentially still contains valid mappings. Hence also check it's alignment along with existing phys_addr check. Signed-off-by: Anshuman Khandual Reviewed-by: Catalin Marinas Cc: Toshi Kani Cc: Will Deacon Cc: Chintan Pandya Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/ioremap.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'lib') diff --git a/lib/ioremap.c b/lib/ioremap.c index 063213685563..a95161d9c883 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -86,6 +86,9 @@ static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr, if ((end - addr) != PMD_SIZE) return 0; + if (!IS_ALIGNED(addr, PMD_SIZE)) + return 0; + if (!IS_ALIGNED(phys_addr, PMD_SIZE)) return 0; @@ -126,6 +129,9 @@ static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr, if ((end - addr) != PUD_SIZE) return 0; + if (!IS_ALIGNED(addr, PUD_SIZE)) + return 0; + if (!IS_ALIGNED(phys_addr, PUD_SIZE)) return 0; @@ -166,6 +172,9 @@ static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr, if ((end - addr) != P4D_SIZE) return 0; + if (!IS_ALIGNED(addr, P4D_SIZE)) + return 0; + if (!IS_ALIGNED(phys_addr, P4D_SIZE)) return 0; -- cgit v1.2.3-55-g7522 From 0f472d04f59ff89d15b2a1c4eafde7317ddd67a2 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 16 Jul 2019 16:27:33 -0700 Subject: mm/ioremap: probe platform for p4d huge map support Finish up what commit c2febafc6773 ("mm: convert generic code to 5-level paging") started while levelling up P4D huge mapping support at par with PUD and PMD. A new arch call back arch_ioremap_p4d_supported() is added which just maintains status quo (P4D huge map not supported) on x86, arm64 and powerpc. When HAVE_ARCH_HUGE_VMAP is enabled its just a simple check from the arch about the support, hence runtime effects are minimal. Link: http://lkml.kernel.org/r/1561699231-20991-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Acked-by: Thomas Gleixner Acked-by: Michael Ellerman (powerpc) Cc: Catalin Marinas Cc: Will Deacon Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Kirill A. Shutemov Cc: Michal Hocko Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/mmu.c | 5 +++++ arch/powerpc/mm/book3s64/radix_pgtable.c | 5 +++++ arch/x86/mm/ioremap.c | 5 +++++ include/linux/io.h | 1 + lib/ioremap.c | 2 ++ 5 files changed, 18 insertions(+) (limited to 'lib') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1b49c08dfa2b..e661469cabdd 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -942,6 +942,11 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) return dt_virt; } +int __init arch_ioremap_p4d_supported(void) +{ + return 0; +} + int __init arch_ioremap_pud_supported(void) { /* diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 65c2ba1e1783..b4ca9e95e678 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1237,3 +1237,8 @@ int radix__ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, return 0; } } + +int __init arch_ioremap_p4d_supported(void) +{ + return 0; +} diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index e500f1df1140..63e99f15d7cf 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -459,6 +459,11 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); +int __init arch_ioremap_p4d_supported(void) +{ + return 0; +} + int __init arch_ioremap_pud_supported(void) { #ifdef CONFIG_X86_64 diff --git a/include/linux/io.h b/include/linux/io.h index 9876e5801a9d..accac822336a 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -33,6 +33,7 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP void __init ioremap_huge_init(void); +int arch_ioremap_p4d_supported(void); int arch_ioremap_pud_supported(void); int arch_ioremap_pmd_supported(void); #else diff --git a/lib/ioremap.c b/lib/ioremap.c index a95161d9c883..0a2ffadc6d71 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -30,6 +30,8 @@ early_param("nohugeiomap", set_nohugeiomap); void __init ioremap_huge_init(void) { if (!ioremap_huge_disabled) { + if (arch_ioremap_p4d_supported()) + ioremap_p4d_capable = 1; if (arch_ioremap_pud_supported()) ioremap_pud_capable = 1; if (arch_ioremap_pmd_supported()) -- cgit v1.2.3-55-g7522 From b4658cdd8cab49c978334dc5db9070d0d881e3dd Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Tue, 16 Jul 2019 16:27:36 -0700 Subject: lib/string_helpers: fix some kerneldoc warnings Due to some sad limitations in how kerneldoc comments are parsed, the documentation in lib/string_helpers.c generates these warnings: lib/string_helpers.c:236: WARNING: Unexpected indentation. lib/string_helpers.c:241: WARNING: Block quote ends without a blank line; unexpected unindent. lib/string_helpers.c:446: WARNING: Unexpected indentation. lib/string_helpers.c:451: WARNING: Block quote ends without a blank line; unexpected unindent. lib/string_helpers.c:474: WARNING: Unexpected indentation. Rework the comments to obtain something like the desired result. Link: http://lkml.kernel.org/r/20190607110952.409011ba@lwn.net Signed-off-by: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/string_helpers.c | 77 +++++++++++++++++++++++++++------------------------- 1 file changed, 40 insertions(+), 37 deletions(-) (limited to 'lib') diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 3a90a9e2b94a..963050c0283e 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -231,35 +231,36 @@ static bool unescape_special(char **src, char **dst) * @src: source buffer (escaped) * @dst: destination buffer (unescaped) * @size: size of the destination buffer (0 to unlimit) - * @flags: combination of the flags (bitwise OR): - * %UNESCAPE_SPACE: + * @flags: combination of the flags. + * + * Description: + * The function unquotes characters in the given string. + * + * Because the size of the output will be the same as or less than the size of + * the input, the transformation may be performed in place. + * + * Caller must provide valid source and destination pointers. Be aware that + * destination buffer will always be NULL-terminated. Source string must be + * NULL-terminated as well. The supported flags are:: + * + * UNESCAPE_SPACE: * '\f' - form feed * '\n' - new line * '\r' - carriage return * '\t' - horizontal tab * '\v' - vertical tab - * %UNESCAPE_OCTAL: + * UNESCAPE_OCTAL: * '\NNN' - byte with octal value NNN (1 to 3 digits) - * %UNESCAPE_HEX: + * UNESCAPE_HEX: * '\xHH' - byte with hexadecimal value HH (1 to 2 digits) - * %UNESCAPE_SPECIAL: + * UNESCAPE_SPECIAL: * '\"' - double quote * '\\' - backslash * '\a' - alert (BEL) * '\e' - escape - * %UNESCAPE_ANY: + * UNESCAPE_ANY: * all previous together * - * Description: - * The function unquotes characters in the given string. - * - * Because the size of the output will be the same as or less than the size of - * the input, the transformation may be performed in place. - * - * Caller must provide valid source and destination pointers. Be aware that - * destination buffer will always be NULL-terminated. Source string must be - * NULL-terminated as well. - * * Return: * The amount of the characters processed to the destination buffer excluding * trailing '\0' is returned. @@ -441,7 +442,29 @@ static bool escape_hex(unsigned char c, char **dst, char *end) * @isz: source buffer size * @dst: destination buffer (escaped) * @osz: destination buffer size - * @flags: combination of the flags (bitwise OR): + * @flags: combination of the flags + * @only: NULL-terminated string containing characters used to limit + * the selected escape class. If characters are included in @only + * that would not normally be escaped by the classes selected + * in @flags, they will be copied to @dst unescaped. + * + * Description: + * The process of escaping byte buffer includes several parts. They are applied + * in the following sequence. + * + * 1. The character is matched to the printable class, if asked, and in + * case of match it passes through to the output. + * 2. The character is not matched to the one from @only string and thus + * must go as-is to the output. + * 3. The character is checked if it falls into the class given by @flags. + * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any + * character. Note that they actually can't go together, otherwise + * %ESCAPE_HEX will be ignored. + * + * Caller must provide valid source and destination pointers. Be aware that + * destination buffer will not be NULL-terminated, thus caller have to append + * it if needs. The supported flags are:: + * * %ESCAPE_SPACE: (special white space, not space itself) * '\f' - form feed * '\n' - new line @@ -464,26 +487,6 @@ static bool escape_hex(unsigned char c, char **dst, char *end) * all previous together * %ESCAPE_HEX: * '\xHH' - byte with hexadecimal value HH (2 digits) - * @only: NULL-terminated string containing characters used to limit - * the selected escape class. If characters are included in @only - * that would not normally be escaped by the classes selected - * in @flags, they will be copied to @dst unescaped. - * - * Description: - * The process of escaping byte buffer includes several parts. They are applied - * in the following sequence. - * 1. The character is matched to the printable class, if asked, and in - * case of match it passes through to the output. - * 2. The character is not matched to the one from @only string and thus - * must go as-is to the output. - * 3. The character is checked if it falls into the class given by @flags. - * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any - * character. Note that they actually can't go together, otherwise - * %ESCAPE_HEX will be ignored. - * - * Caller must provide valid source and destination pointers. Be aware that - * destination buffer will not be NULL-terminated, thus caller have to append - * it if needs. * * Return: * The total size of the escaped output that would be generated for -- cgit v1.2.3-55-g7522 From d3a811617ae629d7c0c5b7f0b7b0a72715ae3407 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 Jul 2019 16:27:39 -0700 Subject: lib/test_meminit.c: fix -Wmaybe-uninitialized false positive The conditional logic is too complicated for the compiler to fully comprehend: lib/test_meminit.c: In function 'test_meminit_init': lib/test_meminit.c:236:5: error: 'buf_copy' may be used uninitialized in this function [-Werror=maybe-uninitialized] kfree(buf_copy); ^~~~~~~~~~~~~~~ lib/test_meminit.c:201:14: note: 'buf_copy' was declared here Simplify it by splitting out the non-rcu section. Link: http://lkml.kernel.org/r/20190617131210.2190280-1-arnd@arndb.de Fixes: af734ee6ec85 ("lib: introduce test_meminit module") Signed-off-by: Arnd Bergmann Acked-by: Alexander Potapenko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_meminit.c | 50 ++++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 24 deletions(-) (limited to 'lib') diff --git a/lib/test_meminit.c b/lib/test_meminit.c index ed7efec1387b..7ae2183ff1f4 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -208,35 +208,37 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor, /* Check that buf is zeroed, if it must be. */ fail = check_buf(buf, size, want_ctor, want_rcu, want_zero); fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0); + + if (!want_rcu) { + kmem_cache_free(c, buf); + continue; + } + /* * If this is an RCU cache, use a critical section to ensure we * can touch objects after they're freed. */ - if (want_rcu) { - rcu_read_lock(); - /* - * Copy the buffer to check that it's not wiped on - * free(). - */ - buf_copy = kmalloc(size, GFP_KERNEL); - if (buf_copy) - memcpy(buf_copy, buf, size); - } - kmem_cache_free(c, buf); - if (want_rcu) { - /* - * Check that |buf| is intact after kmem_cache_free(). - * |want_zero| is false, because we wrote garbage to - * the buffer already. - */ - fail |= check_buf(buf, size, want_ctor, want_rcu, - false); - if (buf_copy) { - fail |= (bool)memcmp(buf, buf_copy, size); - kfree(buf_copy); - } - rcu_read_unlock(); + rcu_read_lock(); + /* + * Copy the buffer to check that it's not wiped on + * free(). + */ + buf_copy = kmalloc(size, GFP_KERNEL); + if (buf_copy) + memcpy(buf_copy, buf, size); + + /* + * Check that |buf| is intact after kmem_cache_free(). + * |want_zero| is false, because we wrote garbage to + * the buffer already. + */ + fail |= check_buf(buf, size, want_ctor, want_rcu, + false); + if (buf_copy) { + fail |= (bool)memcmp(buf, buf_copy, size); + kfree(buf_copy); } + rcu_read_unlock(); } kmem_cache_destroy(c); -- cgit v1.2.3-55-g7522 From 4ab7ace465466d25c12cee9854e7140077e208cb Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Tue, 16 Jul 2019 16:27:42 -0700 Subject: lib/test_meminit.c: minor test fixes Fix the following issues in test_meminit.c: - |size| in fill_with_garbage_skip() should be signed so that it doesn't overflow if it's not aligned on sizeof(*p); - fill_with_garbage_skip() should actually skip |skip| bytes; - do_kmem_cache_size() should deallocate memory in the RCU case. Link: http://lkml.kernel.org/r/20190626133135.217355-1-glider@google.com Fixes: 7e659650cbda ("lib: introduce test_meminit module") Fixes: 94e8988d91c7 ("lib/test_meminit.c: fix -Wmaybe-uninitialized false positive") Signed-off-by: Alexander Potapenko Cc: Arnd Bergmann Cc: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_meminit.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/test_meminit.c b/lib/test_meminit.c index 7ae2183ff1f4..62d19f270cad 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -38,15 +38,14 @@ static int __init count_nonzero_bytes(void *ptr, size_t size) } /* Fill a buffer with garbage, skipping |skip| first bytes. */ -static void __init fill_with_garbage_skip(void *ptr, size_t size, size_t skip) +static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip) { - unsigned int *p = (unsigned int *)ptr; + unsigned int *p = (unsigned int *)((char *)ptr + skip); int i = 0; - if (skip) { - WARN_ON(skip > size); - p += skip; - } + WARN_ON(skip > size); + size -= skip; + while (size >= sizeof(*p)) { p[i] = GARBAGE_INT; i++; @@ -227,6 +226,7 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor, if (buf_copy) memcpy(buf_copy, buf, size); + kmem_cache_free(c, buf); /* * Check that |buf| is intact after kmem_cache_free(). * |want_zero| is false, because we wrote garbage to -- cgit v1.2.3-55-g7522 From 9f973cb38088e0cf42e0bae97ff140813e623f13 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Tue, 16 Jul 2019 16:27:45 -0700 Subject: lib/rbtree: avoid generating code twice for the cached versions As was already noted in rbtree.h, the logic to cache rb_first (or rb_last) can easily be implemented externally to the core rbtree api. Change the implementation to do just that. Previously the update of rb_leftmost was wired deeper into the implmentation, but there were some disadvantages to that - mostly, lib/rbtree.c had separate instantiations for rb_insert_color() vs rb_insert_color_cached(), as well as rb_erase() vs rb_erase_cached(), which were doing exactly the same thing save for the rb_leftmost update at the start of either function. text data bss dec hex filename 5405 120 0 5525 1595 lib/rbtree.o-vanilla 3827 96 0 3923 f53 lib/rbtree.o-patch [dave@stgolabs.net: changelog addition] Link: http://lkml.kernel.org/r/20190628171416.by5gdizl3rcxk5h5@linux-r8p5 [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/20190628045008.39926-1-walken@google.com Signed-off-by: Michel Lespinasse Acked-by: Davidlohr Bueso Acked-by: Peter Zijlstra (Intel) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rbtree.h | 70 ++++++++++++++++++++++++++-------------- include/linux/rbtree_augmented.h | 27 ++++++---------- lib/rbtree.c | 40 ++--------------------- 3 files changed, 59 insertions(+), 78 deletions(-) (limited to 'lib') diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index e6337fce08f2..1fd61a9af45c 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -32,25 +32,9 @@ struct rb_root { struct rb_node *rb_node; }; -/* - * Leftmost-cached rbtrees. - * - * We do not cache the rightmost node based on footprint - * size vs number of potential users that could benefit - * from O(1) rb_last(). Just not worth it, users that want - * this feature can always implement the logic explicitly. - * Furthermore, users that want to cache both pointers may - * find it a bit asymmetric, but that's ok. - */ -struct rb_root_cached { - struct rb_root rb_root; - struct rb_node *rb_leftmost; -}; - #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) #define RB_ROOT (struct rb_root) { NULL, } -#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) @@ -72,12 +56,6 @@ extern struct rb_node *rb_prev(const struct rb_node *); extern struct rb_node *rb_first(const struct rb_root *); extern struct rb_node *rb_last(const struct rb_root *); -extern void rb_insert_color_cached(struct rb_node *, - struct rb_root_cached *, bool); -extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); -/* Same as rb_first(), but O(1) */ -#define rb_first_cached(root) (root)->rb_leftmost - /* Postorder iteration - always visit the parent after its children */ extern struct rb_node *rb_first_postorder(const struct rb_root *); extern struct rb_node *rb_next_postorder(const struct rb_node *); @@ -87,8 +65,6 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, struct rb_root *root); -extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, - struct rb_root_cached *root); static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, struct rb_node **rb_link) @@ -136,4 +112,50 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent typeof(*pos), field); 1; }); \ pos = n) +/* + * Leftmost-cached rbtrees. + * + * We do not cache the rightmost node based on footprint + * size vs number of potential users that could benefit + * from O(1) rb_last(). Just not worth it, users that want + * this feature can always implement the logic explicitly. + * Furthermore, users that want to cache both pointers may + * find it a bit asymmetric, but that's ok. + */ +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } + +/* Same as rb_first(), but O(1) */ +#define rb_first_cached(root) (root)->rb_leftmost + +static inline void rb_insert_color_cached(struct rb_node *node, + struct rb_root_cached *root, + bool leftmost) +{ + if (leftmost) + root->rb_leftmost = node; + rb_insert_color(node, &root->rb_root); +} + +static inline void rb_erase_cached(struct rb_node *node, + struct rb_root_cached *root) +{ + if (root->rb_leftmost == node) + root->rb_leftmost = rb_next(node); + rb_erase(node, &root->rb_root); +} + +static inline void rb_replace_node_cached(struct rb_node *victim, + struct rb_node *new, + struct rb_root_cached *root) +{ + if (root->rb_leftmost == victim) + root->rb_leftmost = new; + rb_replace_node(victim, new, &root->rb_root); +} + #endif /* _LINUX_RBTREE_H */ diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 0f902ccb48b0..179faab29f52 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -30,10 +30,9 @@ struct rb_augment_callbacks { void (*rotate)(struct rb_node *old, struct rb_node *new); }; -extern void __rb_insert_augmented(struct rb_node *node, - struct rb_root *root, - bool newleft, struct rb_node **leftmost, +extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); + /* * Fixup the rbtree and update the augmented information when rebalancing. * @@ -48,7 +47,7 @@ static inline void rb_insert_augmented(struct rb_node *node, struct rb_root *root, const struct rb_augment_callbacks *augment) { - __rb_insert_augmented(node, root, false, NULL, augment->rotate); + __rb_insert_augmented(node, root, augment->rotate); } static inline void @@ -56,8 +55,9 @@ rb_insert_augmented_cached(struct rb_node *node, struct rb_root_cached *root, bool newleft, const struct rb_augment_callbacks *augment) { - __rb_insert_augmented(node, &root->rb_root, - newleft, &root->rb_leftmost, augment->rotate); + if (newleft) + root->rb_leftmost = node; + rb_insert_augmented(node, &root->rb_root, augment); } #define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ @@ -150,7 +150,6 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, static __always_inline struct rb_node * __rb_erase_augmented(struct rb_node *node, struct rb_root *root, - struct rb_node **leftmost, const struct rb_augment_callbacks *augment) { struct rb_node *child = node->rb_right; @@ -158,9 +157,6 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, struct rb_node *parent, *rebalance; unsigned long pc; - if (leftmost && node == *leftmost) - *leftmost = rb_next(node); - if (!tmp) { /* * Case 1: node to erase has no more than 1 child (easy!) @@ -260,8 +256,7 @@ static __always_inline void rb_erase_augmented(struct rb_node *node, struct rb_root *root, const struct rb_augment_callbacks *augment) { - struct rb_node *rebalance = __rb_erase_augmented(node, root, - NULL, augment); + struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); if (rebalance) __rb_erase_color(rebalance, root, augment->rotate); } @@ -270,11 +265,9 @@ static __always_inline void rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, const struct rb_augment_callbacks *augment) { - struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root, - &root->rb_leftmost, - augment); - if (rebalance) - __rb_erase_color(rebalance, &root->rb_root, augment->rotate); + if (root->rb_leftmost == node) + root->rb_leftmost = rb_next(node); + rb_erase_augmented(node, &root->rb_root, augment); } #endif /* _LINUX_RBTREE_AUGMENTED_H */ diff --git a/lib/rbtree.c b/lib/rbtree.c index 1ef6e25d031c..abc86c6a3177 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -83,14 +83,10 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, static __always_inline void __rb_insert(struct rb_node *node, struct rb_root *root, - bool newleft, struct rb_node **leftmost, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; - if (newleft) - *leftmost = node; - while (true) { /* * Loop invariant: node is red. @@ -437,38 +433,19 @@ static const struct rb_augment_callbacks dummy_callbacks = { void rb_insert_color(struct rb_node *node, struct rb_root *root) { - __rb_insert(node, root, false, NULL, dummy_rotate); + __rb_insert(node, root, dummy_rotate); } EXPORT_SYMBOL(rb_insert_color); void rb_erase(struct rb_node *node, struct rb_root *root) { struct rb_node *rebalance; - rebalance = __rb_erase_augmented(node, root, - NULL, &dummy_callbacks); + rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); if (rebalance) ____rb_erase_color(rebalance, root, dummy_rotate); } EXPORT_SYMBOL(rb_erase); -void rb_insert_color_cached(struct rb_node *node, - struct rb_root_cached *root, bool leftmost) -{ - __rb_insert(node, &root->rb_root, leftmost, - &root->rb_leftmost, dummy_rotate); -} -EXPORT_SYMBOL(rb_insert_color_cached); - -void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) -{ - struct rb_node *rebalance; - rebalance = __rb_erase_augmented(node, &root->rb_root, - &root->rb_leftmost, &dummy_callbacks); - if (rebalance) - ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate); -} -EXPORT_SYMBOL(rb_erase_cached); - /* * Augmented rbtree manipulation functions. * @@ -477,10 +454,9 @@ EXPORT_SYMBOL(rb_erase_cached); */ void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, - bool newleft, struct rb_node **leftmost, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { - __rb_insert(node, root, newleft, leftmost, augment_rotate); + __rb_insert(node, root, augment_rotate); } EXPORT_SYMBOL(__rb_insert_augmented); @@ -591,16 +567,6 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new, } EXPORT_SYMBOL(rb_replace_node); -void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, - struct rb_root_cached *root) -{ - rb_replace_node(victim, new, &root->rb_root); - - if (root->rb_leftmost == victim) - root->rb_leftmost = new; -} -EXPORT_SYMBOL(rb_replace_node_cached); - void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, struct rb_root *root) { -- cgit v1.2.3-55-g7522