summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu2013-01-24 21:20:05 +0100
committerH. Peter Anvin2013-01-30 00:26:35 +0100
commit0e691cf824f76adefb4498fe39c300aba2c2575a (patch)
tree6e61f59e7f100eac02a9aa7e064eaaa34e4e1904
parentx86, kexec: Replace ident_mapping_init and init_level4_page (diff)
downloadkernel-qcow2-linux-0e691cf824f76adefb4498fe39c300aba2c2575a.tar.gz
kernel-qcow2-linux-0e691cf824f76adefb4498fe39c300aba2c2575a.tar.xz
kernel-qcow2-linux-0e691cf824f76adefb4498fe39c300aba2c2575a.zip
x86, kexec, 64bit: Only set ident mapping for ram.
We should set mappings only for usable memory ranges under max_pfn Otherwise causes same problem that is fixed by x86, mm: Only direct map addresses that are marked as E820_RAM This patch exposes pfn_mapped array, and only sets ident mapping for ranges in that array. This patch relies on new kernel_ident_mapping_init that could handle existing pgd/pud between different calls. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1359058816-7615-25-git-send-email-yinghai@kernel.org Cc: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/include/asm/page.h4
-rw-r--r--arch/x86/kernel/machine_kexec_64.c13
-rw-r--r--arch/x86/mm/init.c4
3 files changed, 15 insertions, 6 deletions
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 8ca82839288a..100a20c7b98d 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -17,6 +17,10 @@
struct page;
+#include <linux/range.h>
+extern struct range pfn_mapped[];
+extern int nr_pfn_mapped;
+
static inline void clear_user_page(void *page, unsigned long vaddr,
struct page *pg)
{
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index d2d7e023a8c8..4eabc160696f 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -100,10 +100,15 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
level4p = (pgd_t *)__va(start_pgtable);
clear_page(level4p);
- result = kernel_ident_mapping_init(&info, level4p,
- 0, max_pfn << PAGE_SHIFT);
- if (result)
- return result;
+ for (i = 0; i < nr_pfn_mapped; i++) {
+ mstart = pfn_mapped[i].start << PAGE_SHIFT;
+ mend = pfn_mapped[i].end << PAGE_SHIFT;
+
+ result = kernel_ident_mapping_init(&info,
+ level4p, mstart, mend);
+ if (result)
+ return result;
+ }
/*
* segments's mem ranges could be outside 0 ~ max_pfn,
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 3364a7643a4c..d41815265a0b 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -302,8 +302,8 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
return nr_range;
}
-static struct range pfn_mapped[E820_X_MAX];
-static int nr_pfn_mapped;
+struct range pfn_mapped[E820_X_MAX];
+int nr_pfn_mapped;
static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
{