summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar2008-01-30 13:33:58 +0100
committerIngo Molnar2008-01-30 13:33:58 +0100
commit12d6f21eacc21d84a809829543f2fe45c7e37319 (patch)
tree6985f2370ad238fb2a568547a5049751d7c95a69 /arch
parentx86: cpa: simplify locking (diff)
downloadkernel-qcow2-linux-12d6f21eacc21d84a809829543f2fe45c7e37319.tar.gz
kernel-qcow2-linux-12d6f21eacc21d84a809829543f2fe45c7e37319.tar.xz
kernel-qcow2-linux-12d6f21eacc21d84a809829543f2fe45c7e37319.zip
x86: do not PSE on CONFIG_DEBUG_PAGEALLOC=y
get more testing of the c_p_a() code done by not turning off PSE on DEBUG_PAGEALLOC. this simplifies the early pagetable setup code, and tests the largepage-splitup code quite heavily. In the end, all the largepages will be split up pretty quickly, so there's no difference to how DEBUG_PAGEALLOC worked before. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/mm/pageattr_32.c12
2 files changed, 11 insertions, 8 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index bba850b05d0e..db28aa9e2f69 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -641,13 +641,6 @@ void __init early_cpu_init(void)
nexgen_init_cpu();
umc_init_cpu();
early_cpu_detect();
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
- /* pse is not compatible with on-the-fly unmapping,
- * disable it even if the cpus claim to support it.
- */
- setup_clear_cpu_cap(X86_FEATURE_PSE);
-#endif
}
/* Make sure %fs is initialized properly in idle threads */
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c
index 9cf2fea54eb5..dd49b16b3a0e 100644
--- a/arch/x86/mm/pageattr_32.c
+++ b/arch/x86/mm/pageattr_32.c
@@ -61,13 +61,17 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
static int split_large_page(pte_t *kpte, unsigned long address)
{
pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
+ gfp_t gfp_flags = GFP_KERNEL;
unsigned long flags;
unsigned long addr;
pte_t *pbase, *tmp;
struct page *base;
int i, level;
- base = alloc_pages(GFP_KERNEL, 0);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ gfp_flags = GFP_ATOMIC;
+#endif
+ base = alloc_pages(gfp_flags, 0);
if (!base)
return -ENOMEM;
@@ -219,6 +223,12 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
}
/*
+ * If page allocator is not up yet then do not call c_p_a():
+ */
+ if (!debug_pagealloc_enabled)
+ return;
+
+ /*
* the return value is ignored - the calls cannot fail,
* large pages are disabled at boot time.
*/