summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds2019-07-14 01:08:36 +0200
committerLinus Torvalds2019-07-14 01:08:36 +0200
commit192f0f8e9db7efe4ac98d47f5fa4334e43c1204d (patch)
tree9d1f274bf91e30bafc1296ca0520f7ef49c5d15a /arch/powerpc/mm
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc (diff)
parentpowerpc/powernv/idle: Fix restore of SPRN_LDBAR for POWER9 stop state. (diff)
downloadkernel-qcow2-linux-192f0f8e9db7efe4ac98d47f5fa4334e43c1204d.tar.gz
kernel-qcow2-linux-192f0f8e9db7efe4ac98d47f5fa4334e43c1204d.tar.xz
kernel-qcow2-linux-192f0f8e9db7efe4ac98d47f5fa4334e43c1204d.zip
Merge tag 'powerpc-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Notable changes: - Removal of the NPU DMA code, used by the out-of-tree Nvidia driver, as well as some other functions only used by drivers that haven't (yet?) made it upstream. - A fix for a bug in our handling of hardware watchpoints (eg. perf record -e mem: ...) which could lead to register corruption and kernel crashes. - Enable HAVE_ARCH_HUGE_VMAP, which allows us to use large pages for vmalloc when using the Radix MMU. - A large but incremental rewrite of our exception handling code to use gas macros rather than multiple levels of nested CPP macros. And the usual small fixes, cleanups and improvements. Thanks to: Alastair D'Silva, Alexey Kardashevskiy, Andreas Schwab, Aneesh Kumar K.V, Anju T Sudhakar, Anton Blanchard, Arnd Bergmann, Athira Rajeev, Cédric Le Goater, Christian Lamparter, Christophe Leroy, Christophe Lombard, Christoph Hellwig, Daniel Axtens, Denis Efremov, Enrico Weigelt, Frederic Barrat, Gautham R. Shenoy, Geert Uytterhoeven, Geliang Tang, Gen Zhang, Greg Kroah-Hartman, Greg Kurz, Gustavo Romero, Krzysztof Kozlowski, Madhavan Srinivasan, Masahiro Yamada, Mathieu Malaterre, Michael Neuling, Nathan Lynch, Naveen N. Rao, Nicholas Piggin, Nishad Kamdar, Oliver O'Halloran, Qian Cai, Ravi Bangoria, Sachin Sant, Sam Bobroff, Satheesh Rajendran, Segher Boessenkool, Shaokun Zhang, Shawn Anastasio, Stewart Smith, Suraj Jitindar Singh, Thiago Jung Bauermann, YueHaibing" * tag 'powerpc-5.3-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (163 commits) powerpc/powernv/idle: Fix restore of SPRN_LDBAR for POWER9 stop state. powerpc/eeh: Handle hugepages in ioremap space ocxl: Update for AFU descriptor template version 1.1 powerpc/boot: pass CONFIG options in a simpler and more robust way powerpc/boot: add {get, put}_unaligned_be32 to xz_config.h powerpc/irq: Don't WARN continuously in arch_local_irq_restore() powerpc/module64: Use symbolic instructions names. powerpc/module32: Use symbolic instructions names. powerpc: Move PPC_HA() PPC_HI() and PPC_LO() to ppc-opcode.h powerpc/module64: Fix comment in R_PPC64_ENTRY handling powerpc/boot: Add lzo support for uImage powerpc/boot: Add lzma support for uImage powerpc/boot: don't force gzipped uImage powerpc/8xx: Add microcode patch to move SMC parameter RAM. powerpc/8xx: Use IO accessors in microcode programming. powerpc/8xx: replace #ifdefs by IS_ENABLED() in microcode.c powerpc/8xx: refactor programming of microcode CPM params. powerpc/8xx: refactor printing of microcode patch name. powerpc/8xx: Refactor microcode write powerpc/8xx: refactor writing of CPM microcode arrays ...
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/book3s64/Makefile1
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c6
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c6
-rw-r--r--arch/powerpc/mm/book3s64/mmu_context.c1
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c23
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c149
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c40
-rw-r--r--arch/powerpc/mm/book3s64/vphn.c73
-rw-r--r--arch/powerpc/mm/book3s64/vphn.h16
-rw-r--r--arch/powerpc/mm/hugetlbpage.c25
-rw-r--r--arch/powerpc/mm/init_64.c5
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/numa.c61
-rw-r--r--arch/powerpc/mm/pgtable.c16
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c39
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c6
17 files changed, 285 insertions, 188 deletions
diff --git a/arch/powerpc/mm/book3s64/Makefile b/arch/powerpc/mm/book3s64/Makefile
index 974b4fc19f4f..fd393b8be14f 100644
--- a/arch/powerpc/mm/book3s64/Makefile
+++ b/arch/powerpc/mm/book3s64/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_PPC_NATIVE) += hash_native.o
obj-$(CONFIG_PPC_RADIX_MMU) += radix_pgtable.o radix_tlb.o
obj-$(CONFIG_PPC_4K_PAGES) += hash_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash_64k.o
-obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_HUGETLB_PAGE) += hash_hugetlbpage.o
ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_RADIX_MMU) += radix_hugetlbpage.o
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 30d62ffe3310..90ab4f31e2b3 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -41,7 +41,7 @@
#define HPTE_LOCK_BIT (56+3)
#endif
-DEFINE_RAW_SPINLOCK(native_tlbie_lock);
+static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
{
@@ -56,7 +56,7 @@ static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
* tlbiel instruction for hash, set invalidation
* i.e., r=1 and is=01 or is=10 or is=11
*/
-static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
+static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
unsigned int pid,
unsigned int ric, unsigned int prs)
{
@@ -112,7 +112,7 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
asm volatile("ptesync": : :"memory");
- asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
}
void hash__tlbiel_all(unsigned int action)
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 28ced26f2a00..9a5963e07a82 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -684,10 +684,8 @@ static void __init htab_init_page_sizes(void)
if (mmu_psize_defs[MMU_PAGE_16M].shift &&
memblock_phys_mem_size() >= 0x40000000)
mmu_vmemmap_psize = MMU_PAGE_16M;
- else if (mmu_psize_defs[MMU_PAGE_64K].shift)
- mmu_vmemmap_psize = MMU_PAGE_64K;
else
- mmu_vmemmap_psize = MMU_PAGE_4K;
+ mmu_vmemmap_psize = mmu_virtual_psize;
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
printk(KERN_DEBUG "Page orders: linear mapping = %d, "
@@ -981,7 +979,7 @@ void __init hash__early_init_devtree(void)
htab_scan_page_sizes();
}
-struct hash_mm_context init_hash_mm_context;
+static struct hash_mm_context init_hash_mm_context;
void __init hash__early_init_mmu(void)
{
#ifndef CONFIG_PPC_64K_PAGES
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index 794404d50a85..2d0cb5ba9a47 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -174,7 +174,6 @@ static int radix__init_new_context(struct mm_struct *mm)
*/
asm volatile("ptesync;isync" : : : "memory");
- mm->context.npu_context = NULL;
mm->context.hash_context = NULL;
return index;
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 01bc9663360d..7d0e0d0d22c4 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -72,7 +72,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
assert_spin_locked(pmd_lockptr(mm, pmdp));
- WARN_ON(!(pmd_large(pmd) || pmd_devmap(pmd)));
+ WARN_ON(!(pmd_large(pmd)));
#endif
trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
@@ -446,3 +446,24 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
return true;
}
+
+int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
+{
+ unsigned long i;
+
+ if (radix_enabled())
+ return radix__ioremap_range(ea, pa, size, prot, nid);
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ int err = map_kernel_page(ea + i, pa + i, prot);
+ if (err) {
+ if (slab_is_available())
+ unmap_kernel_range(ea, size);
+ else
+ WARN_ON_ONCE(1); /* Should clean up */
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 273ae66a9a45..65c2ba1e1783 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "radix-mmu: " fmt
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/sched/mm.h>
#include <linux/memblock.h>
@@ -198,14 +199,14 @@ void radix__change_memory_range(unsigned long start, unsigned long end,
pudp = pud_alloc(&init_mm, pgdp, idx);
if (!pudp)
continue;
- if (pud_huge(*pudp)) {
+ if (pud_is_leaf(*pudp)) {
ptep = (pte_t *)pudp;
goto update_the_pte;
}
pmdp = pmd_alloc(&init_mm, pudp, idx);
if (!pmdp)
continue;
- if (pmd_huge(*pmdp)) {
+ if (pmd_is_leaf(*pmdp)) {
ptep = pmdp_ptep(pmdp);
goto update_the_pte;
}
@@ -319,7 +320,7 @@ static int __meminit create_physical_mapping(unsigned long start,
return 0;
}
-void __init radix_init_pgtable(void)
+static void __init radix_init_pgtable(void)
{
unsigned long rts_field;
struct memblock_region *reg;
@@ -515,14 +516,6 @@ void __init radix__early_init_devtree(void)
mmu_psize_defs[MMU_PAGE_64K].shift = 16;
mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
found:
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
- if (mmu_psize_defs[MMU_PAGE_2M].shift) {
- /*
- * map vmemmap using 2M if available
- */
- mmu_vmemmap_psize = MMU_PAGE_2M;
- }
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
return;
}
@@ -587,7 +580,13 @@ void __init radix__early_init_mmu(void)
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* vmemmap mapping */
- mmu_vmemmap_psize = mmu_virtual_psize;
+ if (mmu_psize_defs[MMU_PAGE_2M].shift) {
+ /*
+ * map vmemmap using 2M if available
+ */
+ mmu_vmemmap_psize = MMU_PAGE_2M;
+ } else
+ mmu_vmemmap_psize = mmu_virtual_psize;
#endif
/*
* initialize page table size
@@ -832,7 +831,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
if (!pmd_present(*pmd))
continue;
- if (pmd_huge(*pmd)) {
+ if (pmd_is_leaf(*pmd)) {
split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
continue;
}
@@ -857,7 +856,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
if (!pud_present(*pud))
continue;
- if (pud_huge(*pud)) {
+ if (pud_is_leaf(*pud)) {
split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
continue;
}
@@ -883,7 +882,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
if (!pgd_present(*pgd))
continue;
- if (pgd_huge(*pgd)) {
+ if (pgd_is_leaf(*pgd)) {
split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
continue;
}
@@ -1118,3 +1117,123 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
set_pte_at(mm, addr, ptep, pte);
}
+
+int __init arch_ioremap_pud_supported(void)
+{
+ /* HPT does not cope with large pages in the vmalloc area */
+ return radix_enabled();
+}
+
+int __init arch_ioremap_pmd_supported(void)
+{
+ return radix_enabled();
+}
+
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+ return 0;
+}
+
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ pte_t *ptep = (pte_t *)pud;
+ pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
+
+ if (!radix_enabled())
+ return 0;
+
+ set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
+
+ return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+ if (pud_huge(*pud)) {
+ pud_clear(pud);
+ return 1;
+ }
+
+ return 0;
+}
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ pmd_t *pmd;
+ int i;
+
+ pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pud_clear(pud);
+
+ flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(pmd[i])) {
+ pte_t *pte;
+ pte = (pte_t *)pmd_page_vaddr(pmd[i]);
+
+ pte_free_kernel(&init_mm, pte);
+ }
+ }
+
+ pmd_free(&init_mm, pmd);
+
+ return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ pte_t *ptep = (pte_t *)pmd;
+ pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
+
+ if (!radix_enabled())
+ return 0;
+
+ set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
+
+ return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+ if (pmd_huge(*pmd)) {
+ pmd_clear(pmd);
+ return 1;
+ }
+
+ return 0;
+}
+
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ pte_t *pte;
+
+ pte = (pte_t *)pmd_page_vaddr(*pmd);
+ pmd_clear(pmd);
+
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+
+ pte_free_kernel(&init_mm, pte);
+
+ return 1;
+}
+
+int radix__ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size,
+ pgprot_t prot, int nid)
+{
+ if (likely(slab_is_available())) {
+ int err = ioremap_page_range(ea, ea + size, pa, prot);
+ if (err)
+ unmap_kernel_range(ea, size);
+ return err;
+ } else {
+ unsigned long i;
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ int err = map_kernel_page(ea + i, pa + i, prot);
+ if (WARN_ON_ONCE(err)) /* Should clean up */
+ return err;
+ }
+ return 0;
+ }
+}
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index ce8a77fae6a7..71f7fede2fa4 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -25,7 +25,7 @@
* tlbiel instruction for radix, set invalidation
* i.e., r=1 and is=01 or is=10 or is=11
*/
-static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
+static __always_inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
unsigned int pid,
unsigned int ric, unsigned int prs)
{
@@ -83,7 +83,7 @@ void radix__tlbiel_all(unsigned int action)
else
WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
- asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
}
static __always_inline void __tlbiel_pid(unsigned long pid, int set,
@@ -146,8 +146,8 @@ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
}
-static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
- unsigned long ric)
+static __always_inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
+ unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -163,8 +163,8 @@ static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
}
-static inline void __tlbiel_va(unsigned long va, unsigned long pid,
- unsigned long ap, unsigned long ric)
+static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid,
+ unsigned long ap, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -179,8 +179,8 @@ static inline void __tlbiel_va(unsigned long va, unsigned long pid,
trace_tlbie(0, 1, rb, rs, ric, prs, r);
}
-static inline void __tlbie_va(unsigned long va, unsigned long pid,
- unsigned long ap, unsigned long ric)
+static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
+ unsigned long ap, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -195,8 +195,8 @@ static inline void __tlbie_va(unsigned long va, unsigned long pid,
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
-static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
- unsigned long ap, unsigned long ric)
+static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
+ unsigned long ap, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -235,7 +235,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
-static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
+static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;
@@ -258,7 +258,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
__tlbiel_pid(pid, set, RIC_FLUSH_TLB);
asm volatile("ptesync": : :"memory");
- asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+ asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory");
}
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
@@ -310,7 +310,7 @@ static inline void _tlbiel_lpid(unsigned long lpid, unsigned long ric)
__tlbiel_lpid(lpid, set, RIC_FLUSH_TLB);
asm volatile("ptesync": : :"memory");
- asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
+ asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST "; isync" : : :"memory");
}
static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
@@ -337,7 +337,7 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
-static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
+static __always_inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
{
int set;
@@ -362,7 +362,7 @@ static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
__tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB);
asm volatile("ptesync": : :"memory");
- asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
+ asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
}
@@ -377,8 +377,8 @@ static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
__tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
}
-static inline void _tlbiel_va(unsigned long va, unsigned long pid,
- unsigned long psize, unsigned long ric)
+static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid,
+ unsigned long psize, unsigned long ric)
{
unsigned long ap = mmu_get_ap(psize);
@@ -409,8 +409,8 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
__tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
}
-static inline void _tlbie_va(unsigned long va, unsigned long pid,
- unsigned long psize, unsigned long ric)
+static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
+ unsigned long psize, unsigned long ric)
{
unsigned long ap = mmu_get_ap(psize);
@@ -420,7 +420,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
-static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
+static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
unsigned long psize, unsigned long ric)
{
unsigned long ap = mmu_get_ap(psize);
diff --git a/arch/powerpc/mm/book3s64/vphn.c b/arch/powerpc/mm/book3s64/vphn.c
deleted file mode 100644
index 0ee7734afb50..000000000000
--- a/arch/powerpc/mm/book3s64/vphn.c
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <asm/byteorder.h>
-#include "vphn.h"
-
-/*
- * The associativity domain numbers are returned from the hypervisor as a
- * stream of mixed 16-bit and 32-bit fields. The stream is terminated by the
- * special value of "all ones" (aka. 0xffff) and its size may not exceed 48
- * bytes.
- *
- * --- 16-bit fields -->
- * _________________________
- * | 0 | 1 | 2 | 3 | be_packed[0]
- * ------+-----+-----+------
- * _________________________
- * | 4 | 5 | 6 | 7 | be_packed[1]
- * -------------------------
- * ...
- * _________________________
- * | 20 | 21 | 22 | 23 | be_packed[5]
- * -------------------------
- *
- * Convert to the sequence they would appear in the ibm,associativity property.
- */
-int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
-{
- __be64 be_packed[VPHN_REGISTER_COUNT];
- int i, nr_assoc_doms = 0;
- const __be16 *field = (const __be16 *) be_packed;
- u16 last = 0;
- bool is_32bit = false;
-
-#define VPHN_FIELD_UNUSED (0xffff)
-#define VPHN_FIELD_MSB (0x8000)
-#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
-
- /* Let's fix the values returned by plpar_hcall9() */
- for (i = 0; i < VPHN_REGISTER_COUNT; i++)
- be_packed[i] = cpu_to_be64(packed[i]);
-
- for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
- u16 new = be16_to_cpup(field++);
-
- if (is_32bit) {
- /*
- * Let's concatenate the 16 bits of this field to the
- * 15 lower bits of the previous field
- */
- unpacked[++nr_assoc_doms] =
- cpu_to_be32(last << 16 | new);
- is_32bit = false;
- } else if (new == VPHN_FIELD_UNUSED)
- /* This is the list terminator */
- break;
- else if (new & VPHN_FIELD_MSB) {
- /* Data is in the lower 15 bits of this field */
- unpacked[++nr_assoc_doms] =
- cpu_to_be32(new & VPHN_FIELD_MASK);
- } else {
- /*
- * Data is in the lower 15 bits of this field
- * concatenated with the next 16 bit field
- */
- last = new;
- is_32bit = true;
- }
- }
-
- /* The first cell contains the length of the property */
- unpacked[0] = cpu_to_be32(nr_assoc_doms);
-
- return nr_assoc_doms;
-}
diff --git a/arch/powerpc/mm/book3s64/vphn.h b/arch/powerpc/mm/book3s64/vphn.h
deleted file mode 100644
index f0b93c2dd578..000000000000
--- a/arch/powerpc/mm/book3s64/vphn.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ARCH_POWERPC_MM_VPHN_H_
-#define _ARCH_POWERPC_MM_VPHN_H_
-
-/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */
-#define VPHN_REGISTER_COUNT 6
-
-/*
- * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
- * form the complete property we have to add the length in the first cell.
- */
-#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
-
-extern int vphn_unpack_associativity(const long *packed, __be32 *unpacked);
-
-#endif
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 51716c11d0fb..a8953f108808 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -61,12 +61,17 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
num_hugepd = 1;
}
+ if (!cachep) {
+ WARN_ONCE(1, "No page table cache created for hugetlb tables");
+ return -ENOMEM;
+ }
+
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
- if (! new)
+ if (!new)
return -ENOMEM;
/*
@@ -130,6 +135,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else {
pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr);
+ if (!pu)
+ return NULL;
if (pshift == PUD_SHIFT)
return (pte_t *)pu;
else if (pshift > PMD_SHIFT) {
@@ -138,6 +145,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else {
pdshift = PMD_SHIFT;
pm = pmd_alloc(mm, pu, addr);
+ if (!pm)
+ return NULL;
if (pshift == PMD_SHIFT)
/* 16MB hugepage */
return (pte_t *)pm;
@@ -154,12 +163,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else {
pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr);
+ if (!pu)
+ return NULL;
if (pshift >= PUD_SHIFT) {
ptl = pud_lockptr(mm, pu);
hpdp = (hugepd_t *)pu;
} else {
pdshift = PMD_SHIFT;
pm = pmd_alloc(mm, pu, addr);
+ if (!pm)
+ return NULL;
ptl = pmd_lockptr(mm, pm);
hpdp = (hugepd_t *)pm;
}
@@ -581,6 +594,7 @@ __setup("hugepagesz=", hugepage_setup_sz);
static int __init hugetlbpage_init(void)
{
+ bool configured = false;
int psize;
if (hugetlb_disabled) {
@@ -631,10 +645,15 @@ static int __init hugetlbpage_init(void)
pgtable_cache_add(pdshift - shift);
else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
pgtable_cache_add(PTE_T_ORDER);
+
+ configured = true;
}
- if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
- hugetlbpage_init_default();
+ if (configured) {
+ if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
+ hugetlbpage_init_default();
+ } else
+ pr_info("Failed to initialize. Disabling HugeTLB");
return 0;
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index a4e17a979e45..a44f6281ca3a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -194,8 +194,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
* fail due to alignment issues when using 16MB hugepages, so
* fall back to system memory if the altmap allocation fail.
*/
- if (altmap)
+ if (altmap) {
p = altmap_alloc_block_buf(page_size, altmap);
+ if (!p)
+ pr_debug("altmap block allocation failed, falling back to system memory");
+ }
if (!p)
p = vmemmap_alloc_block_buf(page_size, node);
if (!p)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 2540d3b2588c..6d5f0fc76666 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -120,7 +120,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
start, start + size, rc);
return -EFAULT;
}
- flush_inval_dcache_range(start, start + size);
+ flush_dcache_range(start, start + size);
return __add_pages(nid, start_pfn, nr_pages, restrictions);
}
@@ -146,7 +146,7 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
/* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start);
- flush_inval_dcache_range(start, start + size);
+ flush_dcache_range(start, start + size);
ret = remove_section_mapping(start, start + size);
WARN_ON_ONCE(ret);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 917904d2fe97..50d68d21ddcc 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -163,6 +163,22 @@ static void unmap_cpu_from_node(unsigned long cpu)
}
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
+int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
+{
+ int dist = 0;
+
+ int i, index;
+
+ for (i = 0; i < distance_ref_points_depth; i++) {
+ index = be32_to_cpu(distance_ref_points[i]);
+ if (cpu1_assoc[index] == cpu2_assoc[index])
+ break;
+ dist++;
+ }
+
+ return dist;
+}
+
/* must hold reference to node during call */
static const __be32 *of_get_associativity(struct device_node *dev)
{
@@ -212,7 +228,7 @@ static int associativity_to_nid(const __be32 *associativity)
{
int nid = NUMA_NO_NODE;
- if (min_common_depth == -1)
+ if (!numa_enabled)
goto out;
if (of_read_number(associativity, 1) >= min_common_depth)
@@ -416,17 +432,19 @@ static int of_get_assoc_arrays(struct assoc_arrays *aa)
static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
{
struct assoc_arrays aa = { .arrays = NULL };
- int default_nid = 0;
+ int default_nid = NUMA_NO_NODE;
int nid = default_nid;
int rc, index;
+ if ((min_common_depth < 0) || !numa_enabled)
+ return default_nid;
+
rc = of_get_assoc_arrays(&aa);
if (rc)
return default_nid;
- if (min_common_depth > 0 && min_common_depth <= aa.array_sz &&
- !(lmb->flags & DRCONF_MEM_AI_INVALID) &&
- lmb->aa_index < aa.n_arrays) {
+ if (min_common_depth <= aa.array_sz &&
+ !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
nid = of_read_number(&aa.arrays[index], 1);
@@ -626,8 +644,14 @@ static int __init parse_numa_properties(void)
min_common_depth = find_min_common_depth();
- if (min_common_depth < 0)
+ if (min_common_depth < 0) {
+ /*
+ * if we fail to parse min_common_depth from device tree
+ * mark the numa disabled, boot with numa disabled.
+ */
+ numa_enabled = false;
return min_common_depth;
+ }
dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
@@ -743,7 +767,7 @@ void __init dump_numa_cpu_topology(void)
unsigned int node;
unsigned int cpu, count;
- if (min_common_depth == -1 || !numa_enabled)
+ if (!numa_enabled)
return;
for_each_online_node(node) {
@@ -808,7 +832,7 @@ static void __init find_possible_nodes(void)
struct device_node *rtas;
u32 numnodes, i;
- if (min_common_depth <= 0)
+ if (!numa_enabled)
return;
rtas = of_find_node_by_path("/rtas");
@@ -1010,7 +1034,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
struct device_node *memory = NULL;
int nid;
- if (!numa_enabled || (min_common_depth < 0))
+ if (!numa_enabled)
return first_online_node;
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
@@ -1063,9 +1087,6 @@ u64 memory_hotplug_max(void)
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
-
-#include "book3s64/vphn.h"
-
struct topology_update_data {
struct topology_update_data *next;
unsigned int cpu;
@@ -1161,25 +1182,13 @@ static int update_cpu_associativity_changes_mask(void)
* Retrieve the new associativity information for a virtual processor's
* home node.
*/
-static long hcall_vphn(unsigned long cpu, __be32 *associativity)
-{
- long rc;
- long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
- u64 flags = 1;
- int hwcpu = get_hard_smp_processor_id(cpu);
-
- rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
- vphn_unpack_associativity(retbuf, associativity);
-
- return rc;
-}
-
static long vphn_get_associativity(unsigned long cpu,
__be32 *associativity)
{
long rc;
- rc = hcall_vphn(cpu, associativity);
+ rc = hcall_vphn(get_hard_smp_processor_id(cpu),
+ VPHN_FLAG_VCPU, associativity);
switch (rc) {
case H_FUNCTION:
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index fc10c0c24f51..e3759b69f81b 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -336,10 +336,11 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (pgd_none(pgd))
return NULL;
- if (pgd_huge(pgd)) {
+ if (pgd_is_leaf(pgd)) {
ret_pte = (pte_t *)pgdp;
goto out;
}
+
if (is_hugepd(__hugepd(pgd_val(pgd)))) {
hpdp = (hugepd_t *)&pgd;
goto out_huge;
@@ -357,14 +358,16 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (pud_none(pud))
return NULL;
- if (pud_huge(pud)) {
+ if (pud_is_leaf(pud)) {
ret_pte = (pte_t *)pudp;
goto out;
}
+
if (is_hugepd(__hugepd(pud_val(pud)))) {
hpdp = (hugepd_t *)&pud;
goto out_huge;
}
+
pdshift = PMD_SHIFT;
pmdp = pmd_offset(&pud, ea);
pmd = READ_ONCE(*pmdp);
@@ -393,15 +396,12 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
ret_pte = (pte_t *)pmdp;
goto out;
}
- /*
- * pmd_large check below will handle the swap pmd pte
- * we need to do both the check because they are config
- * dependent.
- */
- if (pmd_huge(pmd) || pmd_large(pmd)) {
+
+ if (pmd_is_leaf(pmd)) {
ret_pte = (pte_t *)pmdp;
goto out;
}
+
if (is_hugepd(__hugepd(pmd_val(pmd)))) {
hpdp = (hugepd_t *)&pmd;
goto out_huge;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index d53188dee18f..35cb96cfc258 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -360,7 +360,7 @@ void mark_initmem_nx(void)
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
- if (v_block_mapped((unsigned long)_stext) + 1)
+ if (v_block_mapped((unsigned long)_stext + 1))
mmu_mark_initmem_nx();
else
change_page_attr(page, numpages, PAGE_KERNEL);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 12d5e083942d..9ad59b733984 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -103,14 +103,30 @@ unsigned long ioremap_bot;
unsigned long ioremap_bot = IOREMAP_BASE;
#endif
+int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
+{
+ unsigned long i;
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ int err = map_kernel_page(ea + i, pa + i, prot);
+ if (err) {
+ if (slab_is_available())
+ unmap_kernel_range(ea, size);
+ else
+ WARN_ON_ONCE(1); /* Should clean up */
+ return err;
+ }
+ }
+
+ return 0;
+}
+
/**
* __ioremap_at - Low level function to establish the page tables
* for an IO mapping
*/
void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
{
- unsigned long i;
-
/* We don't support the 4K PFN hack with ioremap */
if (pgprot_val(prot) & H_PAGE_4K_PFN)
return NULL;
@@ -124,9 +140,8 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
WARN_ON(size & ~PAGE_MASK);
- for (i = 0; i < size; i += PAGE_SIZE)
- if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
- return NULL;
+ if (ioremap_range((unsigned long)ea, pa, size, prot, NUMA_NO_NODE))
+ return NULL;
return (void __iomem *)ea;
}
@@ -177,8 +192,6 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
area->phys_addr = paligned;
ret = __ioremap_at(paligned, area->addr, size, prot);
- if (!ret)
- vunmap(area->addr);
} else {
ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
if (ret)
@@ -291,16 +304,20 @@ EXPORT_SYMBOL(__iounmap_at);
/* 4 level page table */
struct page *pgd_page(pgd_t pgd)
{
- if (pgd_huge(pgd))
+ if (pgd_is_leaf(pgd)) {
+ VM_WARN_ON(!pgd_huge(pgd));
return pte_page(pgd_pte(pgd));
+ }
return virt_to_page(pgd_page_vaddr(pgd));
}
#endif
struct page *pud_page(pud_t pud)
{
- if (pud_huge(pud))
+ if (pud_is_leaf(pud)) {
+ VM_WARN_ON(!pud_huge(pud));
return pte_page(pud_pte(pud));
+ }
return virt_to_page(pud_page_vaddr(pud));
}
@@ -310,8 +327,10 @@ struct page *pud_page(pud_t pud)
*/
struct page *pmd_page(pmd_t pmd)
{
- if (pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
+ if (pmd_is_leaf(pmd)) {
+ VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
return pte_page(pmd_pte(pmd));
+ }
return virt_to_page(pmd_page_vaddr(pmd));
}
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 39bf1e2cba13..6a88a9f585d4 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -273,7 +273,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
- if (!pmd_none(*pmd) && !pmd_huge(*pmd))
+ if (!pmd_none(*pmd) && !pmd_is_leaf(*pmd))
/* pmd exists */
walk_pte(st, pmd, addr);
else
@@ -289,7 +289,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
addr = start + i * PUD_SIZE;
- if (!pud_none(*pud) && !pud_huge(*pud))
+ if (!pud_none(*pud) && !pud_is_leaf(*pud))
/* pud exists */
walk_pmd(st, pud, addr);
else
@@ -310,7 +310,7 @@ static void walk_pagetables(struct pg_state *st)
* the hash pagetable.
*/
for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
- if (!pgd_none(*pgd) && !pgd_huge(*pgd))
+ if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd))
/* pgd exists */
walk_pud(st, pgd, addr);
else