summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/dump_pagetables.c
diff options
context:
space:
mode:
authorLinus Torvalds2018-12-29 01:55:46 +0100
committerLinus Torvalds2018-12-29 01:55:46 +0100
commitf346b0becb1bc62e45495f9cdbae3eef35d0b635 (patch)
treeae79f3dfb8e031da51d38f0f095f89d7d23f3643 /arch/x86/mm/dump_pagetables.c
parentMerge tag 'mmc-v4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc (diff)
parentkernel/fork.c: mark 'stack_vm_area' with __maybe_unused (diff)
downloadkernel-qcow2-linux-f346b0becb1bc62e45495f9cdbae3eef35d0b635.tar.gz
kernel-qcow2-linux-f346b0becb1bc62e45495f9cdbae3eef35d0b635.tar.xz
kernel-qcow2-linux-f346b0becb1bc62e45495f9cdbae3eef35d0b635.zip
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - large KASAN update to use arm's "software tag-based mode" - a few misc things - sh updates - ocfs2 updates - just about all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (167 commits) kernel/fork.c: mark 'stack_vm_area' with __maybe_unused memcg, oom: notify on oom killer invocation from the charge path mm, swap: fix swapoff with KSM pages include/linux/gfp.h: fix typo mm/hmm: fix memremap.h, move dev_page_fault_t callback to hmm hugetlbfs: Use i_mmap_rwsem to fix page fault/truncate race hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization memory_hotplug: add missing newlines to debugging output mm: remove __hugepage_set_anon_rmap() include/linux/vmstat.h: remove unused page state adjustment macro mm/page_alloc.c: allow error injection mm: migrate: drop unused argument of migrate_page_move_mapping() blkdev: avoid migration stalls for blkdev pages mm: migrate: provide buffer_migrate_page_norefs() mm: migrate: move migrate_page_lock_buffers() mm: migrate: lock buffers before migrate_page_move_mapping() mm: migration: factor out code to compute expected number of page references mm, page_alloc: enable pcpu_drain with zone capability kmemleak: add config to select auto scan mm/page_alloc.c: don't call kasan_free_pages() at deferred mem init ...
Diffstat (limited to 'arch/x86/mm/dump_pagetables.c')
-rw-r--r--arch/x86/mm/dump_pagetables.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index abcb8d00b014..e3cdc85ce5b6 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -377,7 +377,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
/*
* This is an optimization for KASAN=y case. Since all kasan page tables
- * eventually point to the kasan_zero_page we could call note_page()
+ * eventually point to the kasan_early_shadow_page we could call note_page()
* right away without walking through lower level page tables. This saves
* us dozens of seconds (minutes for 5-level config) while checking for
* W+X mapping or reading kernel_page_tables debugfs file.
@@ -385,10 +385,11 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
void *pt)
{
- if (__pa(pt) == __pa(kasan_zero_pmd) ||
- (pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) ||
- __pa(pt) == __pa(kasan_zero_pud)) {
- pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
+ if (__pa(pt) == __pa(kasan_early_shadow_pmd) ||
+ (pgtable_l5_enabled() &&
+ __pa(pt) == __pa(kasan_early_shadow_p4d)) ||
+ __pa(pt) == __pa(kasan_early_shadow_pud)) {
+ pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]);
note_page(m, st, __pgprot(prot), 0, 5);
return true;
}