diff options
author | Linus Torvalds | 2018-12-29 01:55:46 +0100 |
---|---|---|
committer | Linus Torvalds | 2018-12-29 01:55:46 +0100 |
commit | f346b0becb1bc62e45495f9cdbae3eef35d0b635 (patch) | |
tree | ae79f3dfb8e031da51d38f0f095f89d7d23f3643 /mm/slab_common.c | |
parent | Merge tag 'mmc-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc (diff) | |
parent | kernel/fork.c: mark 'stack_vm_area' with __maybe_unused (diff) | |
download | kernel-qcow2-linux-f346b0becb1bc62e45495f9cdbae3eef35d0b635.tar.gz kernel-qcow2-linux-f346b0becb1bc62e45495f9cdbae3eef35d0b635.tar.xz kernel-qcow2-linux-f346b0becb1bc62e45495f9cdbae3eef35d0b635.zip |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
- large KASAN update to use arm's "software tag-based mode"
- a few misc things
- sh updates
- ocfs2 updates
- just about all of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (167 commits)
kernel/fork.c: mark 'stack_vm_area' with __maybe_unused
memcg, oom: notify on oom killer invocation from the charge path
mm, swap: fix swapoff with KSM pages
include/linux/gfp.h: fix typo
mm/hmm: fix memremap.h, move dev_page_fault_t callback to hmm
hugetlbfs: Use i_mmap_rwsem to fix page fault/truncate race
hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization
memory_hotplug: add missing newlines to debugging output
mm: remove __hugepage_set_anon_rmap()
include/linux/vmstat.h: remove unused page state adjustment macro
mm/page_alloc.c: allow error injection
mm: migrate: drop unused argument of migrate_page_move_mapping()
blkdev: avoid migration stalls for blkdev pages
mm: migrate: provide buffer_migrate_page_norefs()
mm: migrate: move migrate_page_lock_buffers()
mm: migrate: lock buffers before migrate_page_move_mapping()
mm: migration: factor out code to compute expected number of page references
mm, page_alloc: enable pcpu_drain with zone capability
kmemleak: add config to select auto scan
mm/page_alloc.c: don't call kasan_free_pages() at deferred mem init
...
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r-- | mm/slab_common.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 9c11e8a937d2..70b0cc85db67 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1029,10 +1029,8 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) index = size_index[size_index_elem(size)]; } else { - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { - WARN_ON(1); + if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE)) return NULL; - } index = fls(size - 1); } @@ -1204,7 +1202,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) page = alloc_pages(flags, order); ret = page ? page_address(page) : NULL; kmemleak_alloc(ret, size, 1, flags); - kasan_kmalloc_large(ret, size, flags); + ret = kasan_kmalloc_large(ret, size, flags); return ret; } EXPORT_SYMBOL(kmalloc_order); @@ -1482,7 +1480,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size, ks = ksize(p); if (ks >= new_size) { - kasan_krealloc((void *)p, new_size, flags); + p = kasan_krealloc((void *)p, new_size, flags); return (void *)p; } @@ -1534,7 +1532,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) } ret = __do_krealloc(p, new_size, flags); - if (ret && p != ret) + if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) kfree(p); return ret; |