summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorAneesh Kumar K.V2019-05-28 07:36:25 +0200
committerMichael Ellerman2019-07-04 16:48:00 +0200
commit5d49275a27310233964fc3edc8dd097a094ce338 (patch)
tree4b792dec2affcd9e58e2bf9b63afbe0f7209accd /arch/powerpc/mm
parentpowerpc/mm: Handle page table allocation failures (diff)
downloadkernel-qcow2-linux-5d49275a27310233964fc3edc8dd097a094ce338.tar.gz
kernel-qcow2-linux-5d49275a27310233964fc3edc8dd097a094ce338.tar.xz
kernel-qcow2-linux-5d49275a27310233964fc3edc8dd097a094ce338.zip
powerpc/mm/hugetlb: Fix kernel crash if we fail to allocate page table caches
We only check for hugetlb allocations, because with hugetlb we do conditional registration. For PGD/PUD/PMD levels we register them always in pgtable_cache_init. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 1de0f43a68e5..f55dc110f2ad 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -61,12 +61,17 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
num_hugepd = 1;
}
+ if (!cachep) {
+ WARN_ONCE(1, "No page table cache created for hugetlb tables");
+ return -ENOMEM;
+ }
+
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
- if (! new)
+ if (!new)
return -ENOMEM;
/*