summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig139
-rw-r--r--arch/powerpc/Makefile11
-rw-r--r--arch/powerpc/boot/zImage.lds.S1
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c2
-rw-r--r--arch/powerpc/include/asm/bitops.h4
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h88
-rw-r--r--arch/powerpc/include/asm/checksum.h2
-rw-r--r--arch/powerpc/include/asm/cpuidle.h4
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/extable.h29
-rw-r--r--arch/powerpc/include/asm/mce.h108
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-4k.h3
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-64k.h1
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h7
-rw-r--r--arch/powerpc/include/asm/prom.h18
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/uaccess.h85
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/cputable.c3
-rw-r--r--arch/powerpc/kernel/idle_book3s.S30
-rw-r--r--arch/powerpc/kernel/mce.c88
-rw-r--r--arch/powerpc/kernel/mce_power.c237
-rw-r--r--arch/powerpc/kernel/prom_init.c120
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/lib/Makefile3
-rw-r--r--arch/powerpc/lib/copy_32.S14
-rw-r--r--arch/powerpc/lib/copyuser_64.S35
-rw-r--r--arch/powerpc/lib/sstep.c20
-rw-r--r--arch/powerpc/lib/test_emulate_step.c434
-rw-r--r--arch/powerpc/lib/usercopy_64.c41
-rw-r--r--arch/powerpc/mm/init_64.c39
-rw-r--r--arch/powerpc/mm/pgtable-radix.c4
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/isa207-common.c43
-rw-r--r--arch/powerpc/perf/isa207-common.h1
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c21
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c20
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c4
-rw-r--r--arch/powerpc/purgatory/trampoline.S12
-rw-r--r--arch/powerpc/sysdev/axonram.c5
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c10
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c17
49 files changed, 1368 insertions, 362 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 494091762bd7..839f08887269 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -80,93 +80,100 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK
config PPC
bool
default y
- select BUILDTIME_EXTABLE_SORT
+ #
+ # Please keep this list sorted alphabetically.
+ #
+ select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_DMA_SET_COHERENT_MASK
+ select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_RAW_COPY_USER
+ select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
+ select ARCH_HAS_SG_CHAIN
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF if PPC64
+ select ARCH_WANT_IPC_PARSE_VERSION
select BINFMT_ELF
- select ARCH_HAS_ELF_RANDOMIZE
- select OF
- select OF_EARLY_FLATTREE
- select OF_RESERVED_MEM
- select HAVE_FTRACE_MCOUNT_RECORD
+ select BUILDTIME_EXTABLE_SORT
+ select CLONE_BACKWARDS
+ select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
+ select EDAC_ATOMIC_SCRUB
+ select EDAC_SUPPORT
+ select GENERIC_ATOMIC64 if PPC32
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select GENERIC_CMOS_UPDATE
+ select GENERIC_CPU_AUTOPROBE
+ select GENERIC_IRQ_SHOW
+ select GENERIC_IRQ_SHOW_LEVEL
+ select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select GENERIC_TIME_VSYSCALL_OLD
+ select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HARDENED_USERCOPY
+ select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_CBPF_JIT if !PPC64
+ select HAVE_CONTEXT_TRACKING if PPC64
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_DMA_API_DEBUG
select HAVE_DYNAMIC_FTRACE
- select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
- select HAVE_FUNCTION_TRACER
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
+ select HAVE_EBPF_JIT if PPC64
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+ select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
- select SYSCTL_EXCEPTION_TRACE
- select VIRT_TO_BUS if !PPC64
+ select HAVE_GENERIC_RCU_GUP
+ select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IDE
select HAVE_IOREMAP_PROT
- select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select HAVE_KERNEL_GZIP
select HAVE_KPROBES
- select HAVE_OPTPROBES if PPC64
- select HAVE_ARCH_KGDB
select HAVE_KRETPROBES
- select HAVE_ARCH_TRACEHOOK
+ select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
- select HAVE_DMA_API_DEBUG
+ select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_NMI if PERF_EVENTS
select HAVE_OPROFILE
- select HAVE_DEBUG_KMEMLEAK
- select ARCH_HAS_SG_CHAIN
- select GENERIC_ATOMIC64 if PPC32
+ select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS
+ select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RCU_TABLE_FREE if SMP
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
- select ARCH_WANT_IPC_PARSE_VERSION
- select SPARSE_IRQ
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_VIRT_CPU_ACCOUNTING
select IRQ_DOMAIN
- select GENERIC_IRQ_SHOW
- select GENERIC_IRQ_SHOW_LEVEL
select IRQ_FORCED_THREADING
- select HAVE_RCU_TABLE_FREE if SMP
- select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_CBPF_JIT if !PPC64
- select HAVE_EBPF_JIT if PPC64
- select HAVE_ARCH_JUMP_LABEL
- select ARCH_HAVE_NMI_SAFE_CMPXCHG
- select ARCH_HAS_GCOV_PROFILE_ALL
- select GENERIC_SMP_IDLE_THREAD
- select GENERIC_CMOS_UPDATE
- select GENERIC_TIME_VSYSCALL_OLD
- select GENERIC_CLOCKEVENTS
- select GENERIC_CLOCKEVENTS_BROADCAST if SMP
- select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select GENERIC_STRNCPY_FROM_USER
- select GENERIC_STRNLEN_USER
- select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
- select CLONE_BACKWARDS
- select ARCH_USE_BUILTIN_BSWAP
- select OLD_SIGSUSPEND
- select OLD_SIGACTION if PPC32
- select HAVE_DEBUG_STACKOVERFLOW
- select HAVE_IRQ_EXIT_ON_IRQ_STACK
- select ARCH_USE_CMPXCHG_LOCKREF if PPC64
- select HAVE_ARCH_AUDITSYSCALL
- select ARCH_SUPPORTS_ATOMIC_RMW
- select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select NO_BOOTMEM
- select HAVE_GENERIC_RCU_GUP
- select HAVE_PERF_EVENTS_NMI if PPC64
- select HAVE_NMI if PERF_EVENTS
- select EDAC_SUPPORT
- select EDAC_ATOMIC_SCRUB
- select ARCH_HAS_DMA_SET_COHERENT_MASK
- select ARCH_HAS_DEVMEM_IS_ALLOWED
- select HAVE_ARCH_SECCOMP_FILTER
- select ARCH_HAS_UBSAN_SANITIZE_ALL
- select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
- select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
- select GENERIC_CPU_AUTOPROBE
- select HAVE_VIRT_CPU_ACCOUNTING
- select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
- select HAVE_ARCH_HARDENED_USERCOPY
- select HAVE_KERNEL_GZIP
- select HAVE_CONTEXT_TRACKING if PPC64
+ select OF
+ select OF_EARLY_FLATTREE
+ select OF_RESERVED_MEM
+ select OLD_SIGACTION if PPC32
+ select OLD_SIGSUSPEND
+ select SPARSE_IRQ
+ select SYSCTL_EXCEPTION_TRACE
+ select VIRT_TO_BUS if !PPC64
+ #
+ # Please keep this list sorted alphabetically.
+ #
config GENERIC_CSUM
def_bool n
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 31286fa7873c..19b0d1a81959 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -72,8 +72,15 @@ GNUTARGET := powerpc
MULTIPLEWORD := -mmultiple
endif
-cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
+ifdef CONFIG_PPC64
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
+aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
+aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
+endif
+
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
ifneq ($(cc-name),clang)
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
endif
@@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
else
+CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
+AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
endif
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 861e72109df2..f080abfc2f83 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -68,6 +68,7 @@ SECTIONS
}
#ifdef CONFIG_PPC64_BOOT_WRAPPER
+ . = ALIGN(256);
.got :
{
__toc_start = .;
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 9fa046d56eba..411994551afc 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
{
u32 *key = crypto_tfm_ctx(tfm);
- *key = 0;
+ *key = ~0;
return 0;
}
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 73eb794d6163..bc5fdfd22788 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -51,6 +51,10 @@
#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+/* Put a PPC bit into a "normal" bit position */
+#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
+ ((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
+
#include <asm/barrier.h>
/* Macro for generating the ***_bits() functions */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 012223638815..26ed228d4dc6 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/book3s/32/hash.h>
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 1eeeb72c7015..8f4d41936e5a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1,9 +1,12 @@
#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+#include <asm-generic/5level-fixup.h>
+
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
#endif
+
/*
* Common bits between hash and Radix page table
*/
@@ -347,23 +350,58 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
__r; \
})
+static inline int __pte_write(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+ /*
+ * Saved write ptes are prot none ptes that doesn't have
+ * privileged bit sit. We mark prot none as one which has
+ * present and pviliged bit set and RWX cleared. To mark
+ * protnone which used to have _PAGE_WRITE set we clear
+ * the privileged bit.
+ */
+ return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+}
+#else
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+ return false;
+}
+#endif
+
+static inline int pte_write(pte_t pte)
+{
+ return __pte_write(pte) || pte_savedwrite(pte);
+}
+
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
- return;
-
- pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+ if (__pte_write(*ptep))
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+ else if (unlikely(pte_savedwrite(*ptep)))
+ pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
- return;
-
- pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+ /*
+ * We should not find protnone for hugetlb, but this complete the
+ * interface.
+ */
+ if (__pte_write(*ptep))
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+ else if (unlikely(pte_savedwrite(*ptep)))
+ pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
@@ -397,11 +435,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
-static inline int pte_write(pte_t pte)
-{
- return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
-}
-
static inline int pte_dirty(pte_t pte)
{
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
@@ -465,19 +498,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
VM_BUG_ON(!pte_protnone(pte));
return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
}
-
-#define pte_savedwrite pte_savedwrite
-static inline bool pte_savedwrite(pte_t pte)
+#else
+#define pte_clear_savedwrite pte_clear_savedwrite
+static inline pte_t pte_clear_savedwrite(pte_t pte)
{
- /*
- * Saved write ptes are prot none ptes that doesn't have
- * privileged bit sit. We mark prot none as one which has
- * present and pviliged bit set and RWX cleared. To mark
- * protnone which used to have _PAGE_WRITE set we clear
- * the privileged bit.
- */
- VM_BUG_ON(!pte_protnone(pte));
- return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+ VM_WARN_ON(1);
+ return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -506,6 +532,8 @@ static inline unsigned long pte_pfn(pte_t pte)
/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
+ if (unlikely(pte_savedwrite(pte)))
+ return pte_clear_savedwrite(pte);
return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
@@ -926,6 +954,7 @@ static inline int pmd_protnone(pmd_t pmd)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -982,11 +1011,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
-
- if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0)
- return;
-
- pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+ if (__pmd_write((*pmdp)))
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+ else if (unlikely(pmd_savedwrite(*pmdp)))
+ pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
}
static inline int pmd_trans_huge(pmd_t pmd)
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 4e63787dc3be..842124b199b5 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
#ifdef __powerpc64__
res += (__force u64)addend;
- return (__force __wsum)((u32)res + (res >> 32));
+ return (__force __wsum) from64to32(res);
#else
asm("addc %0,%0,%1;"
"addze %0,%0;"
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index fd321eb423cb..155731557c9b 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err)
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
-1: cmpd cr0,r0,r0; \
- bne 1b; \
+236: cmpd cr0,r0,r0; \
+ bne 236b; \
IDLE_INST; \
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 93b9b84568e8..09bde6e34f5d 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
#define ARCH_DLINFO_CACHE_GEOMETRY \
NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \
NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \
- NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size); \
- NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i)); \
+ NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size); \
+ NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d)); \
NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \
NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \
NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \
diff --git a/arch/powerpc/include/asm/extable.h b/arch/powerpc/include/asm/extable.h
new file mode 100644
index 000000000000..07cc45cd86d9
--- /dev/null
+++ b/arch/powerpc/include/asm/extable.h
@@ -0,0 +1,29 @@
+#ifndef _ARCH_POWERPC_EXTABLE_H
+#define _ARCH_POWERPC_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of relative addresses: the first is
+ * the address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out what
+ * to do.
+ *
+ * All the routines below use bits of fixup code that are out of line with the
+ * main instruction path. This means when everything is well, we don't even
+ * have to jump over them. Further, they do not intrude on our cache or tlb
+ * entries.
+ */
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+struct exception_table_entry {
+ int insn;
+ int fixup;
+};
+
+static inline unsigned long extable_fixup(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->fixup + x->fixup;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index f97d8cb6bdf6..ed62efe01e49 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -66,6 +66,55 @@
#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
P8_DSISR_MC_ERAT_MULTIHIT_SEC)
+
+/*
+ * Machine Check bits on power9
+ */
+#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1)
+
+#define P9_SRR1_MC_IFETCH(srr1) ( \
+ PPC_BITEXTRACT(srr1, 45, 0) | \
+ PPC_BITEXTRACT(srr1, 44, 1) | \
+ PPC_BITEXTRACT(srr1, 43, 2) | \
+ PPC_BITEXTRACT(srr1, 36, 3) )
+
+/* 0 is reserved */
+#define P9_SRR1_MC_IFETCH_UE 1
+#define P9_SRR1_MC_IFETCH_SLB_PARITY 2
+#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3
+#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4
+#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5
+#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6
+/* 7 is reserved */
+#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8
+#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9
+/* 10 ? */
+#define P9_SRR1_MC_IFETCH_RA 11
+#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12
+#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13
+#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14
+#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
+
+/* DSISR bits for machine check (On Power9) */
+#define P9_DSISR_MC_UE (PPC_BIT(48))
+#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
+#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50))
+#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51))
+#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
+#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
+#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54))
+#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55))
+#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56))
+#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57))
+#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58))
+#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59))
+#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60))
+
+/* SLB error bits */
+#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \
+ P9_DSISR_MC_SLB_PARITY_MFSLB | \
+ P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
+
enum MCE_Version {
MCE_V1 = 1,
};
@@ -93,6 +142,9 @@ enum MCE_ErrorType {
MCE_ERROR_TYPE_SLB = 2,
MCE_ERROR_TYPE_ERAT = 3,
MCE_ERROR_TYPE_TLB = 4,
+ MCE_ERROR_TYPE_USER = 5,
+ MCE_ERROR_TYPE_RA = 6,
+ MCE_ERROR_TYPE_LINK = 7,
};
enum MCE_UeErrorType {
@@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
MCE_TLB_ERROR_MULTIHIT = 2,
};
+enum MCE_UserErrorType {
+ MCE_USER_ERROR_INDETERMINATE = 0,
+ MCE_USER_ERROR_TLBIE = 1,
+};
+
+enum MCE_RaErrorType {
+ MCE_RA_ERROR_INDETERMINATE = 0,
+ MCE_RA_ERROR_IFETCH = 1,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
+ MCE_RA_ERROR_LOAD = 4,
+ MCE_RA_ERROR_STORE = 5,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
+ MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
+};
+
+enum MCE_LinkErrorType {
+ MCE_LINK_ERROR_INDETERMINATE = 0,
+ MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
+ MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
+ MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
+ MCE_LINK_ERROR_STORE_TIMEOUT = 4,
+ MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
+};
+
struct machine_check_event {
enum MCE_Version version:8; /* 0x00 */
uint8_t in_use; /* 0x01 */
@@ -166,6 +244,30 @@ struct machine_check_event {
uint64_t effective_address;
uint8_t reserved_2[16];
} tlb_error;
+
+ struct {
+ enum MCE_UserErrorType user_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } user_error;
+
+ struct {
+ enum MCE_RaErrorType ra_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } ra_error;
+
+ struct {
+ enum MCE_LinkErrorType link_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } link_error;
} u;
};
@@ -176,8 +278,12 @@ struct mce_error_info {
enum MCE_SlbErrorType slb_error_type:8;
enum MCE_EratErrorType erat_error_type:8;
enum MCE_TlbErrorType tlb_error_type:8;
+ enum MCE_UserErrorType user_error_type:8;
+ enum MCE_RaErrorType ra_error_type:8;
+ enum MCE_LinkErrorType link_error_type:8;
} u;
- uint8_t reserved[2];
+ enum MCE_Severity severity:8;
+ enum MCE_Initiator initiator:8;
};
#define MAX_MC_EVT 100
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index ba9921bf202e..5134ade2e850 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
index d0db98793dd8..9f4de0a1035e 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -1,5 +1,8 @@
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
+
+#include <asm-generic/5level-fixup.h>
+
/*
* Entries per page directory level. The PTE level must use a 64b record
* for each page table entry. The PMD and PGD level use a 32b record for
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
index 55b28ef3409a..1facb584dd29 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 0cd8a3852763..e5805ad78e12 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd)
return ((hpd_val(hpd) & 0x4) != 0);
#else
/* We clear the top bit to indicate hugepd */
- return ((hpd_val(hpd) & PD_HUGE) == 0);
+ return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
#endif
}
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index d99bd442aacb..e7d6d86563ee 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -284,6 +284,13 @@
#define PPC_INST_BRANCH_COND 0x40800000
#define PPC_INST_LBZCIX 0x7c0006aa
#define PPC_INST_STBCIX 0x7c0007aa
+#define PPC_INST_LWZX 0x7c00002e
+#define PPC_INST_LFSX 0x7c00042e
+#define PPC_INST_STFSX 0x7c00052e
+#define PPC_INST_LFDX 0x7c0004ae
+#define PPC_INST_STFDX 0x7c0005ae
+#define PPC_INST_LVX 0x7c0000ce
+#define PPC_INST_STVX 0x7c0001ce
/* macros to insert fields into opcodes */
#define ___PPC_RA(a) (((a) & 0x1f) << 16)
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 4a90634e8322..35c00d7a0cf8 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -160,12 +160,18 @@ struct of_drconf_cell {
#define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */
#define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */
#define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */
-#define OV5_MMU_RADIX_300 0x1880 /* ISA v3.00 radix MMU supported */
-#define OV5_MMU_HASH_300 0x1840 /* ISA v3.00 hash MMU supported */
-#define OV5_MMU_SEGM_RADIX 0x1820 /* radix mode (no segmentation) */
-#define OV5_MMU_PROC_TBL 0x1810 /* hcall selects SLB or proc table */
-#define OV5_MMU_SLB 0x1800 /* always use SLB */
-#define OV5_MMU_GTSE 0x1808 /* Guest translation shootdown */
+/* MMU Base Architecture */
+#define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */
+#define OV5_MMU_HASH 0x1800 /* Hash MMU Only */
+#define OV5_MMU_RADIX 0x1840 /* Radix MMU Only */
+#define OV5_MMU_EITHER 0x1880 /* Hash or Radix Supported */
+#define OV5_MMU_DYNAMIC 0x18C0 /* Hash or Radix Can Switch Later */
+#define OV5_NMMU 0x1820 /* Nest MMU Available */
+/* Hash Table Extensions */
+#define OV5_HASH_SEG_TBL 0x1980 /* In Memory Segment Tables Available */
+#define OV5_HASH_GTSE 0x1940 /* Guest Translation Shoot Down Avail */
+/* Radix Table Extensions */
+#define OV5_RADIX_GTSE 0x1A40 /* Guest Translation Shoot Down Avail */
/* Option Vector 6: IBM PAPR hints */
#define OV6_LINUX 0x02 /* Linux is our OS */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 4b369d83fe9c..1c9470881c4a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
COMPAT_SYS_SPU(preadv2)
COMPAT_SYS_SPU(pwritev2)
SYSCALL(kexec_file_load)
+SYSCALL(statx)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 3904040a3542..5c0d8a8cdae5 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -5,6 +5,7 @@
#include <asm/ppc_asm.h>
#include <asm/processor.h>
#include <asm/page.h>
+#include <asm/extable.h>
/*
* The fs value determines whether argument validity checking should be
@@ -56,31 +57,6 @@
__access_ok((__force unsigned long)(addr), (size), get_fs()))
/*
- * The exception table consists of pairs of relative addresses: the first is
- * the address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out what
- * to do.
- *
- * All the routines below use bits of fixup code that are out of line with the
- * main instruction path. This means when everything is well, we don't even
- * have to jump over them. Further, they do not intrude on our cache or tlb
- * entries.
- */
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-
-struct exception_table_entry {
- int insn;
- int fixup;
-};
-
-static inline unsigned long extable_fixup(const struct exception_table_entry *x)
-{
- return (unsigned long)&x->fixup + x->fixup;
-}
-
-/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*
@@ -293,42 +269,19 @@ extern unsigned long __copy_tofrom_user(void __user *to,
#ifndef __powerpc64__
-static inline unsigned long copy_from_user(void *to,
- const void __user *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_READ, from, n))) {
- check_object_size(to, n, false);
- return __copy_tofrom_user((__force void __user *)to, from, n);
- }
- memset(to, 0, n);
- return n;
-}
-
-static inline unsigned long copy_to_user(void __user *to,
- const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n)) {
- check_object_size(from, n, true);
- return __copy_tofrom_user(to, (__force void __user *)from, n);
- }
- return n;
-}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
#else /* __powerpc64__ */
-#define __copy_in_user(to, from, size) \
- __copy_tofrom_user((to), (from), (size))
-
-extern unsigned long copy_from_user(void *to, const void __user *from,
- unsigned long n);
-extern unsigned long copy_to_user(void __user *to, const void *from,
- unsigned long n);
-extern unsigned long copy_in_user(void __user *to, const void __user *from,
- unsigned long n);
-
+static inline unsigned long
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+ return __copy_tofrom_user(to, from, n);
+}
#endif /* __powerpc64__ */
-static inline unsigned long __copy_from_user_inatomic(void *to,
+static inline unsigned long raw_copy_from_user(void *to,
const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n) && (n <= 8)) {
@@ -352,12 +305,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
return 0;
}
- check_object_size(to, n, false);
-
return __copy_tofrom_user((__force void __user *)to, from, n);
}
-static inline unsigned long __copy_to_user_inatomic(void __user *to,
+static inline unsigned long raw_copy_to_user(void __user *to,
const void *from, unsigned long n)
{
if (__builtin_constant_p(n) && (n <= 8)) {
@@ -381,25 +332,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
return 0;
}
- check_object_size(from, n, true);
-
return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
-static inline unsigned long __copy_from_user(void *to,
- const void __user *from, unsigned long size)
-{
- might_fault();
- return __copy_from_user_inatomic(to, from, size);
-}
-
-static inline unsigned long __copy_to_user(void __user *to,
- const void *from, unsigned long size)
-{
- might_fault();
- return __copy_to_user_inatomic(to, from, size);
-}
-
extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index eb1acee91a20..9ba11dbcaca9 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 383
+#define NR_syscalls 384
#define __NR__exit __NR_exit
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 2f26335a3c42..b85f14228857 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -393,5 +393,6 @@
#define __NR_preadv2 380
#define __NR_pwritev2 381
#define __NR_kexec_file_load 382
+#define __NR_statx 383
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index bb7a1890aeb7..e79b9daa873c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
extern void __flush_tlb_power9(unsigned int action);
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
+extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.flush_tlb = __flush_tlb_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
{ /* Power9 */
@@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.flush_tlb = __flush_tlb_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
{ /* Cell Broadband Engine */
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 5f61cc0349c0..6fd08219248d 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -276,19 +276,21 @@ power_enter_stop:
*/
andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
- bne 1f
+ bne .Lhandle_esl_ec_set
IDLE_STATE_ENTER_SEQ(PPC_STOP)
li r3,0 /* Since we didn't lose state, return 0 */
b pnv_wakeup_noloss
+
+.Lhandle_esl_ec_set:
/*
* Check if the requested state is a deep idle state.
*/
-1: LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
+ LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
cmpd r3,r4
- bge 2f
+ bge .Lhandle_deep_stop
IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
-2:
+.Lhandle_deep_stop:
/*
* Entering deep idle state.
* Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
@@ -447,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
_GLOBAL(pnv_wakeup_tb_loss)
ld r1,PACAR1(r13)
/*
- * Before entering any idle state, the NVGPRs are saved in the stack
- * and they are restored before switching to the process context. Hence
- * until they are restored, they are free to be used.
+ * Before entering any idle state, the NVGPRs are saved in the stack.
+ * If there was a state loss, or PACA_NAPSTATELOST was set, then the
+ * NVGPRs are restored. If we are here, it is likely that state is lost,
+ * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
+ * here are the same as the test to restore NVGPRS:
+ * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
+ * and SRR1 test for restoring NVGPRs.
+ *
+ * We are about to clobber NVGPRs now, so set NAPSTATELOST to
+ * guarantee they will always be restored. This might be tightened
+ * with careful reading of specs (particularly for ISA300) but this
+ * is already a slow wakeup path and it's simpler to be safe.
+ */
+ li r0,1
+ stb r0,PACA_NAPSTATELOST(r13)
+
+ /*
*
* Save SRR1 and LR in NVGPRs as they might be clobbered in
* opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index c6923ff45131..a1475e6aef3a 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
case MCE_ERROR_TYPE_TLB:
mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
break;
+ case MCE_ERROR_TYPE_USER:
+ mce->u.user_error.user_error_type = mce_err->u.user_error_type;
+ break;
+ case MCE_ERROR_TYPE_RA:
+ mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
+ break;
+ case MCE_ERROR_TYPE_LINK:
+ mce->u.link_error.link_error_type = mce_err->u.link_error_type;
+ break;
case MCE_ERROR_TYPE_UNKNOWN:
default:
break;
@@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
mce->gpr3 = regs->gpr[3];
mce->in_use = 1;
- mce->initiator = MCE_INITIATOR_CPU;
/* Mark it recovered if we have handled it and MSR(RI=1). */
if (handled && (regs->msr & MSR_RI))
mce->disposition = MCE_DISPOSITION_RECOVERED;
else
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
- mce->severity = MCE_SEV_ERROR_SYNC;
+
+ mce->initiator = mce_err->initiator;
+ mce->severity = mce_err->severity;
/*
* Populate the mce error_type and type-specific error_type.
@@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
mce->u.erat_error.effective_address_provided = true;
mce->u.erat_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
+ mce->u.user_error.effective_address_provided = true;
+ mce->u.user_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
+ mce->u.ra_error.effective_address_provided = true;
+ mce->u.ra_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
+ mce->u.link_error.effective_address_provided = true;
+ mce->u.link_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
mce->u.ue_error.effective_address_provided = true;
mce->u.ue_error.effective_address = addr;
@@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
"Parity",
"Multihit",
};
+ static const char *mc_user_types[] = {
+ "Indeterminate",
+ "tlbie(l) invalid",
+ };
+ static const char *mc_ra_types[] = {
+ "Indeterminate",
+ "Instruction fetch (bad)",
+ "Page table walk ifetch (bad)",
+ "Page table walk ifetch (foreign)",
+ "Load (bad)",
+ "Store (bad)",
+ "Page table walk Load/Store (bad)",
+ "Page table walk Load/Store (foreign)",
+ "Load/Store (foreign)",
+ };
+ static const char *mc_link_types[] = {
+ "Indeterminate",
+ "Instruction fetch (timeout)",
+ "Page table walk ifetch (timeout)",
+ "Load (timeout)",
+ "Store (timeout)",
+ "Page table walk Load/Store (timeout)",
+ };
/* Print things out */
if (evt->version != MCE_V1) {
@@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
printk("%s Effective address: %016llx\n",
level, evt->u.tlb_error.effective_address);
break;
+ case MCE_ERROR_TYPE_USER:
+ subtype = evt->u.user_error.user_error_type <
+ ARRAY_SIZE(mc_user_types) ?
+ mc_user_types[evt->u.user_error.user_error_type]
+ : "Unknown";
+ printk("%s Error type: User [%s]\n", level, subtype);
+ if (evt->u.user_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.user_error.effective_address);
+ break;
+ case MCE_ERROR_TYPE_RA:
+ subtype = evt->u.ra_error.ra_error_type <
+ ARRAY_SIZE(mc_ra_types) ?
+ mc_ra_types[evt->u.ra_error.ra_error_type]
+ : "Unknown";
+ printk("%s Error type: Real address [%s]\n", level, subtype);
+ if (evt->u.ra_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.ra_error.effective_address);
+ break;
+ case MCE_ERROR_TYPE_LINK:
+ subtype = evt->u.link_error.link_error_type <
+ ARRAY_SIZE(mc_link_types) ?
+ mc_link_types[evt->u.link_error.link_error_type]
+ : "Unknown";
+ printk("%s Error type: Link [%s]\n", level, subtype);
+ if (evt->u.link_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.link_error.effective_address);
+ break;
default:
case MCE_ERROR_TYPE_UNKNOWN:
printk("%s Error type: Unknown\n", level);
@@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
if (evt->u.tlb_error.effective_address_provided)
return evt->u.tlb_error.effective_address;
break;
+ case MCE_ERROR_TYPE_USER:
+ if (evt->u.user_error.effective_address_provided)
+ return evt->u.user_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_RA:
+ if (evt->u.ra_error.effective_address_provided)
+ return evt->u.ra_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_LINK:
+ if (evt->u.link_error.effective_address_provided)
+ return evt->u.link_error.effective_address;
+ break;
default:
case MCE_ERROR_TYPE_UNKNOWN:
break;
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 7353991c4ece..763d6f58caa8 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
}
#endif
+static void flush_erat(void)
+{
+ asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
+}
+
+#define MCE_FLUSH_SLB 1
+#define MCE_FLUSH_TLB 2
+#define MCE_FLUSH_ERAT 3
+
+static int mce_flush(int what)
+{
+#ifdef CONFIG_PPC_STD_MMU_64
+ if (what == MCE_FLUSH_SLB) {
+ flush_and_reload_slb();
+ return 1;
+ }
+#endif
+ if (what == MCE_FLUSH_ERAT) {
+ flush_erat();
+ return 1;
+ }
+ if (what == MCE_FLUSH_TLB) {
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
+{
+ if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
+ dsisr &= ~slb;
+ if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
+ dsisr &= ~erat;
+ if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
+ dsisr &= ~tlb;
+ /* Any other errors we don't understand? */
+ if (dsisr)
+ return 0;
+ return 1;
+}
+
static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
{
long handled = 1;
@@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
long handled = 1;
struct mce_error_info mce_error_info = { 0 };
+ mce_error_info.severity = MCE_SEV_ERROR_SYNC;
+ mce_error_info.initiator = MCE_INITIATOR_CPU;
+
srr1 = regs->msr;
nip = regs->nip;
@@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
long handled = 1;
struct mce_error_info mce_error_info = { 0 };
+ mce_error_info.severity = MCE_SEV_ERROR_SYNC;
+ mce_error_info.initiator = MCE_INITIATOR_CPU;
+
srr1 = regs->msr;
nip = regs->nip;
@@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
save_mce_event(regs, handled, &mce_error_info, nip, addr);
return handled;
}
+
+static int mce_handle_derror_p9(struct pt_regs *regs)
+{
+ uint64_t dsisr = regs->dsisr;
+
+ return mce_handle_flush_derrors(dsisr,
+ P9_DSISR_MC_SLB_PARITY_MFSLB |
+ P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
+
+ P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
+
+ P9_DSISR_MC_ERAT_MULTIHIT);
+}
+
+static int mce_handle_ierror_p9(struct pt_regs *regs)
+{
+ uint64_t srr1 = regs->msr;
+
+ switch (P9_SRR1_MC_IFETCH(srr1)) {
+ case P9_SRR1_MC_IFETCH_SLB_PARITY:
+ case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
+ return mce_flush(MCE_FLUSH_SLB);
+ case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
+ return mce_flush(MCE_FLUSH_TLB);
+ case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
+ return mce_flush(MCE_FLUSH_ERAT);
+ default:
+ return 0;
+ }
+}
+
+static void mce_get_derror_p9(struct pt_regs *regs,
+ struct mce_error_info *mce_err, uint64_t *addr)
+{
+ uint64_t dsisr = regs->dsisr;
+
+ mce_err->severity = MCE_SEV_ERROR_SYNC;
+ mce_err->initiator = MCE_INITIATOR_CPU;
+
+ if (dsisr & P9_DSISR_MC_USER_TLBIE)
+ *addr = regs->nip;
+ else
+ *addr = regs->dar;
+
+ if (dsisr & P9_DSISR_MC_UE) {
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
+ } else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+ } else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
+ } else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
+ } else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ } else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_TLB;
+ mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+ } else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
+ mce_err->error_type = MCE_ERROR_TYPE_USER;
+ mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
+ } else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+ } else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+ } else if (dsisr & P9_DSISR_MC_RA_LOAD) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
+ } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+ } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
+ } else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
+ }
+}
+
+static void mce_get_ierror_p9(struct pt_regs *regs,
+ struct mce_error_info *mce_err, uint64_t *addr)
+{
+ uint64_t srr1 = regs->msr;
+
+ switch (P9_SRR1_MC_IFETCH(srr1)) {
+ case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
+ case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
+ mce_err->severity = MCE_SEV_FATAL;
+ break;
+ default:
+ mce_err->severity = MCE_SEV_ERROR_SYNC;
+ break;
+ }
+
+ mce_err->initiator = MCE_INITIATOR_CPU;
+
+ *addr = regs->nip;
+
+ switch (P9_SRR1_MC_IFETCH(srr1)) {
+ case P9_SRR1_MC_IFETCH_UE:
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_SLB_PARITY:
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+ break;
+ case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+ break;
+ case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ break;
+ case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_TLB;
+ mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+ break;
+ case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
+ break;
+ case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
+ break;
+ case P9_SRR1_MC_IFETCH_RA:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
+ break;
+ case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
+ break;
+ case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
+ break;
+ default:
+ break;
+ }
+}
+
+long __machine_check_early_realmode_p9(struct pt_regs *regs)
+{
+ uint64_t nip, addr;
+ long handled;
+ struct mce_error_info mce_error_info = { 0 };
+
+ nip = regs->nip;
+
+ if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
+ handled = mce_handle_derror_p9(regs);
+ mce_get_derror_p9(regs, &mce_error_info, &addr);
+ } else {
+ handled = mce_handle_ierror_p9(regs);
+ mce_get_ierror_p9(regs, &mce_error_info, &addr);
+ }
+
+ /* Handle UE error. */
+ if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
+ handled = mce_handle_ue_error(regs);
+
+ save_mce_event(regs, handled, &mce_error_info, nip, addr);
+ return handled;
+}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index a3944540fe0d..1c1b44ec7642 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start;
static unsigned long __initdata prom_tce_alloc_end;
#endif
+static bool __initdata prom_radix_disable;
+
+struct platform_support {
+ bool hash_mmu;
+ bool radix_mmu;
+ bool radix_gtse;
+};
+
/* Platforms codes are now obsolete in the kernel. Now only used within this
* file and ultimately gone too. Feel free to change them if you need, they
* are not shared with anything outside of this file anymore
@@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void)
prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
#endif
}
+
+ opt = strstr(prom_cmd_line, "disable_radix");
+ if (opt) {
+ prom_debug("Radix disabled from cmdline\n");
+ prom_radix_disable = true;
+ }
}
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
@@ -695,6 +709,8 @@ struct option_vector5 {
u8 byte22;
u8 intarch;
u8 mmu;
+ u8 hash_ext;
+ u8 radix_ext;
} __packed;
struct option_vector6 {
@@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
.reserved3 = 0,
.subprocessors = 1,
.intarch = 0,
- .mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) |
- OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE),
+ .mmu = 0,
+ .hash_ext = 0,
+ .radix_ext = 0,
},
/* option vector 6: IBM PAPR hints */
@@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void)
}
+static void __init prom_parse_mmu_model(u8 val,
+ struct platform_support *support)
+{
+ switch (val) {
+ case OV5_FEAT(OV5_MMU_DYNAMIC):
+ case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
+ prom_debug("MMU - either supported\n");
+ support->radix_mmu = !prom_radix_disable;
+ support->hash_mmu = true;
+ break;
+ case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
+ prom_debug("MMU - radix only\n");
+ if (prom_radix_disable) {
+ /*
+ * If we __have__ to do radix, we're better off ignoring
+ * the command line rather than not booting.
+ */
+ prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
+ }
+ support->radix_mmu = true;
+ break;
+ case OV5_FEAT(OV5_MMU_HASH):
+ prom_debug("MMU - hash only\n");
+ support->hash_mmu = true;
+ break;
+ default:
+ prom_debug("Unknown mmu support option: 0x%x\n", val);
+ break;
+ }
+}
+
+static void __init prom_parse_platform_support(u8 index, u8 val,
+ struct platform_support *support)
+{
+ switch (index) {
+ case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
+ prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
+ break;
+ case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
+ if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
+ prom_debug("Radix - GTSE supported\n");
+ support->radix_gtse = true;
+ }
+ break;
+ }
+}
+
+static void __init prom_check_platform_support(void)
+{
+ struct platform_support supported = {
+ .hash_mmu = false,
+ .radix_mmu = false,
+ .radix_gtse = false
+ };
+ int prop_len = prom_getproplen(prom.chosen,
+ "ibm,arch-vec-5-platform-support");
+ if (prop_len > 1) {
+ int i;
+ u8 vec[prop_len];
+ prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
+ prop_len);
+ prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
+ &vec, sizeof(vec));
+ for (i = 0; i < prop_len; i += 2) {
+ prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
+ , vec[i]
+ , vec[i + 1]);
+ prom_parse_platform_support(vec[i], vec[i + 1],
+ &supported);
+ }
+ }
+
+ if (supported.radix_mmu && supported.radix_gtse) {
+ /* Radix preferred - but we require GTSE for now */
+ prom_debug("Asking for radix with GTSE\n");
+ ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
+ ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
+ } else if (supported.hash_mmu) {
+ /* Default to hash mmu (if we can) */
+ prom_debug("Asking for hash\n");
+ ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
+ } else {
+ /* We're probably on a legacy hypervisor */
+ prom_debug("Assuming legacy hash support\n");
+ }
+}
static void __init prom_send_capabilities(void)
{
@@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void)
prom_arg_t ret;
u32 cores;
+ /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
+ prom_check_platform_support();
+
root = call_prom("open", 1, 1, ADDR("/"));
if (root != 0) {
/* We need to tell the FW about the number of cores we support.
@@ -2993,6 +3099,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
prom_check_initrd(r3, r4);
+ /*
+ * Do early parsing of command line
+ */
+ early_cmdline_parse();
+
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
* On pSeries, inform the firmware about our capabilities
@@ -3009,11 +3120,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
copy_and_flush(0, kbase, 0x100, 0);
/*
- * Do early parsing of command line
- */
- early_cmdline_parse();
-
- /*
* Initialize memory management within prom_init
*/
prom_init_mem();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index adf2084f214b..9cfaa8b69b5f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
info->line_size = lsize;
info->block_size = bsize;
info->log_block_size = __ilog2(bsize);
- info->blocks_per_page = PAGE_SIZE / bsize;
+ if (bsize)
+ info->blocks_per_page = PAGE_SIZE / bsize;
+ else
+ info->blocks_per_page = 0;
if (sets == 0)
info->assoc = 0xffff;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3158fb16de3..8c68145ba1bd 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hva, NULL, NULL);
if (ptep) {
pte = kvmppc_read_update_linux_pte(ptep, 1);
- if (pte_write(pte))
+ if (__pte_write(pte))
write_ok = 1;
}
local_irq_restore(flags);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6fca970373ee..ce6f2121fffe 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -256,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
}
pte = kvmppc_read_update_linux_pte(ptep, writing);
if (pte_present(pte) && !pte_protnone(pte)) {
- if (writing && !pte_write(pte))
+ if (writing && !__pte_write(pte))
/* make the actual HPTE be read-only */
ptel = hpte_make_readonly(ptel);
is_ci = pte_ci(pte);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 0e649d72fe8d..ed7dfce331e0 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -14,12 +14,13 @@ obj-y += string.o alloc.o crtsavres.o code-patching.o \
obj-$(CONFIG_PPC32) += div64.o copy_32.o
-obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
+obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \
memcpy_64.o memcmp_64.o
obj64-$(CONFIG_SMP) += locks.o
obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
+obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o
obj-y += checksum_$(BITS).o checksum_wrappers.o
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index ff0d894d7ff9..8aedbb5f4b86 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -477,18 +477,6 @@ _GLOBAL(__copy_tofrom_user)
bdnz 130b
/* then clear out the destination: r3 bytes starting at 4(r6) */
132: mfctr r3
- srwi. r0,r3,2
- li r9,0
- mtctr r0
- beq 113f
-112: stwu r9,4(r6)
- bdnz 112b
-113: andi. r0,r3,3
- mtctr r0
- beq 120f
-114: stb r9,4(r6)
- addi r6,r6,1
- bdnz 114b
120: blr
EX_TABLE(30b,108b)
@@ -497,7 +485,5 @@ _GLOBAL(__copy_tofrom_user)
EX_TABLE(41b,111b)
EX_TABLE(130b,132b)
EX_TABLE(131b,120b)
- EX_TABLE(112b,120b)
- EX_TABLE(114b,120b)
EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index aee6e24e81ab..08da06e1bd72 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -319,32 +319,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
/*
- * here we have trapped again, need to clear ctr bytes starting at r3
+ * here we have trapped again, amount remaining is in ctr.
*/
-143: mfctr r5
- li r0,0
- mr r4,r3
- mr r3,r5 /* return the number of bytes not copied */
-1: andi. r9,r4,7
- beq 3f
-90: stb r0,0(r4)
- addic. r5,r5,-1
- addi r4,r4,1
- bne 1b
- blr
-3: cmpldi cr1,r5,8
- srdi r9,r5,3
- andi. r5,r5,7
- blt cr1,93f
- mtctr r9
-91: std r0,0(r4)
- addi r4,r4,8
- bdnz 91b
-93: beqlr
- mtctr r5
-92: stb r0,0(r4)
- addi r4,r4,1
- bdnz 92b
+143: mfctr r3
blr
/*
@@ -389,10 +366,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
ld r5,-8(r1)
add r6,r6,r5
subf r3,r3,r6 /* #bytes not copied */
-190:
-191:
-192:
- blr /* #bytes not copied in r3 */
+ blr
EX_TABLE(20b,120b)
EX_TABLE(220b,320b)
@@ -451,9 +425,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
EX_TABLE(88b,188b)
EX_TABLE(43b,143b)
EX_TABLE(89b,189b)
- EX_TABLE(90b,190b)
- EX_TABLE(91b,191b)
- EX_TABLE(92b,192b)
/*
* Routine to copy a whole page of data, optimized for POWER4.
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 846dba2c6360..9c542ec70c5b 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1799,8 +1799,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto instr_done;
case LARX:
- if (regs->msr & MSR_LE)
- return 0;
if (op.ea & (size - 1))
break; /* can't handle misaligned */
if (!address_ok(regs, op.ea, size))
@@ -1823,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto ldst_done;
case STCX:
- if (regs->msr & MSR_LE)
- return 0;
if (op.ea & (size - 1))
break; /* can't handle misaligned */
if (!address_ok(regs, op.ea, size))
@@ -1849,8 +1845,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto ldst_done;
case LOAD:
- if (regs->msr & MSR_LE)
- return 0;
err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
if (!err) {
if (op.type & SIGNEXT)
@@ -1862,8 +1856,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#ifdef CONFIG_PPC_FPU
case LOAD_FP:
- if (regs->msr & MSR_LE)
- return 0;
if (size == 4)
err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
else
@@ -1872,15 +1864,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#endif
#ifdef CONFIG_ALTIVEC
case LOAD_VMX:
- if (regs->msr & MSR_LE)
- return 0;
err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
goto ldst_done;
#endif
#ifdef CONFIG_VSX
case LOAD_VSX:
- if (regs->msr & MSR_LE)
- return 0;
err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
goto ldst_done;
#endif
@@ -1903,8 +1891,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
goto instr_done;
case STORE:
- if (regs->msr & MSR_LE)
- return 0;
if ((op.type & UPDATE) && size == sizeof(long) &&
op.reg == 1 && op.update_reg == 1 &&
!(regs->msr & MSR_PR) &&
@@ -1917,8 +1903,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#ifdef CONFIG_PPC_FPU
case STORE_FP:
- if (regs->msr & MSR_LE)
- return 0;
if (size == 4)
err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
else
@@ -1927,15 +1911,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
#endif
#ifdef CONFIG_ALTIVEC
case STORE_VMX:
- if (regs->msr & MSR_LE)
- return 0;
err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
goto ldst_done;
#endif
#ifdef CONFIG_VSX
case STORE_VSX:
- if (regs->msr & MSR_LE)
- return 0;
err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
goto ldst_done;
#endif
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
new file mode 100644
index 000000000000..2534c1447554
--- /dev/null
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -0,0 +1,434 @@
+/*
+ * Simple sanity test for emulate_step load/store instructions.
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "emulate_step_test: " fmt
+
+#include <linux/ptrace.h>
+#include <asm/sstep.h>
+#include <asm/ppc-opcode.h>
+
+#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
+
+/*
+ * Defined with TEST_ prefix so it does not conflict with other
+ * definitions.
+ */
+#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \
+ ___PPC_RA(base) | IMM_L(i))
+#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \
+ ___PPC_RA(base) | IMM_L(i))
+#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \
+ ___PPC_RA(base) | ((i) & 0xfffc))
+#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b) | \
+ __PPC_EH(eh))
+#define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b))
+#define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b))
+
+
+static void __init init_pt_regs(struct pt_regs *regs)
+{
+ static unsigned long msr;
+ static bool msr_cached;
+
+ memset(regs, 0, sizeof(struct pt_regs));
+
+ if (likely(msr_cached)) {
+ regs->msr = msr;
+ return;
+ }
+
+ asm volatile("mfmsr %0" : "=r"(regs->msr));
+
+ regs->msr |= MSR_FP;
+ regs->msr |= MSR_VEC;
+ regs->msr |= MSR_VSX;
+
+ msr = regs->msr;
+ msr_cached = true;
+}
+
+static void __init show_result(char *ins, char *result)
+{
+ pr_info("%-14s : %s\n", ins, result);
+}
+
+static void __init test_ld(void)
+{
+ struct pt_regs regs;
+ unsigned long a = 0x23;
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long) &a;
+
+ /* ld r5, 0(r3) */
+ stepped = emulate_step(&regs, TEST_LD(5, 3, 0));
+
+ if (stepped == 1 && regs.gpr[5] == a)
+ show_result("ld", "PASS");
+ else
+ show_result("ld", "FAIL");
+}
+
+static void __init test_lwz(void)
+{
+ struct pt_regs regs;
+ unsigned int a = 0x4545;
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long) &a;
+
+ /* lwz r5, 0(r3) */
+ stepped = emulate_step(&regs, TEST_LWZ(5, 3, 0));
+
+ if (stepped == 1 && regs.gpr[5] == a)
+ show_result("lwz", "PASS");
+ else
+ show_result("lwz", "FAIL");
+}
+
+static void __init test_lwzx(void)
+{
+ struct pt_regs regs;
+ unsigned int a[3] = {0x0, 0x0, 0x1234};
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long) a;
+ regs.gpr[4] = 8;
+ regs.gpr[5] = 0x8765;
+
+ /* lwzx r5, r3, r4 */
+ stepped = emulate_step(&regs, TEST_LWZX(5, 3, 4));
+ if (stepped == 1 && regs.gpr[5] == a[2])
+ show_result("lwzx", "PASS");
+ else
+ show_result("lwzx", "FAIL");
+}
+
+static void __init test_std(void)
+{
+ struct pt_regs regs;
+ unsigned long a = 0x1234;
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long) &a;
+ regs.gpr[5] = 0x5678;
+
+ /* std r5, 0(r3) */
+ stepped = emulate_step(&regs, TEST_STD(5, 3, 0));
+ if (stepped == 1 || regs.gpr[5] == a)
+ show_result("std", "PASS");
+ else
+ show_result("std", "FAIL");
+}
+
+static void __init test_ldarx_stdcx(void)
+{
+ struct pt_regs regs;
+ unsigned long a = 0x1234;
+ int stepped = -1;
+ unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */
+
+ init_pt_regs(&regs);
+ asm volatile("mfcr %0" : "=r"(regs.ccr));
+
+
+ /*** ldarx ***/
+
+ regs.gpr[3] = (unsigned long) &a;
+ regs.gpr[4] = 0;
+ regs.gpr[5] = 0x5678;
+
+ /* ldarx r5, r3, r4, 0 */
+ stepped = emulate_step(&regs, TEST_LDARX(5, 3, 4, 0));
+
+ /*
+ * Don't touch 'a' here. Touching 'a' can do Load/store
+ * of 'a' which result in failure of subsequent stdcx.
+ * Instead, use hardcoded value for comparison.
+ */
+ if (stepped <= 0 || regs.gpr[5] != 0x1234) {
+ show_result("ldarx / stdcx.", "FAIL (ldarx)");
+ return;
+ }
+
+
+ /*** stdcx. ***/
+
+ regs.gpr[5] = 0x9ABC;
+
+ /* stdcx. r5, r3, r4 */
+ stepped = emulate_step(&regs, TEST_STDCX(5, 3, 4));
+
+ /*
+ * Two possible scenarios that indicates successful emulation
+ * of stdcx. :
+ * 1. Reservation is active and store is performed. In this
+ * case cr0.eq bit will be set to 1.
+ * 2. Reservation is not active and store is not performed.
+ * In this case cr0.eq bit will be set to 0.
+ */
+ if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq))
+ || (regs.gpr[5] != a && !(regs.ccr & cr0_eq))))
+ show_result("ldarx / stdcx.", "PASS");
+ else
+ show_result("ldarx / stdcx.", "FAIL (stdcx.)");
+}
+
+#ifdef CONFIG_PPC_FPU
+static void __init test_lfsx_stfsx(void)
+{
+ struct pt_regs regs;
+ union {
+ float a;
+ int b;
+ } c;
+ int cached_b;
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+
+
+ /*** lfsx ***/
+
+ c.a = 123.45;
+ cached_b = c.b;
+
+ regs.gpr[3] = (unsigned long) &c.a;
+ regs.gpr[4] = 0;
+
+ /* lfsx frt10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_LFSX(10, 3, 4));
+
+ if (stepped == 1)
+ show_result("lfsx", "PASS");
+ else
+ show_result("lfsx", "FAIL");
+
+
+ /*** stfsx ***/
+
+ c.a = 678.91;
+
+ /* stfsx frs10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_STFSX(10, 3, 4));
+
+ if (stepped == 1 && c.b == cached_b)
+ show_result("stfsx", "PASS");
+ else
+ show_result("stfsx", "FAIL");
+}
+
+static void __init test_lfdx_stfdx(void)
+{
+ struct pt_regs regs;
+ union {
+ double a;
+ long b;
+ } c;
+ long cached_b;
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+
+
+ /*** lfdx ***/
+
+ c.a = 123456.78;
+ cached_b = c.b;
+
+ regs.gpr[3] = (unsigned long) &c.a;
+ regs.gpr[4] = 0;
+
+ /* lfdx frt10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_LFDX(10, 3, 4));
+
+ if (stepped == 1)
+ show_result("lfdx", "PASS");
+ else
+ show_result("lfdx", "FAIL");
+
+
+ /*** stfdx ***/
+
+ c.a = 987654.32;
+
+ /* stfdx frs10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_STFDX(10, 3, 4));
+
+ if (stepped == 1 && c.b == cached_b)
+ show_result("stfdx", "PASS");
+ else
+ show_result("stfdx", "FAIL");
+}
+#else
+static void __init test_lfsx_stfsx(void)
+{
+ show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)");
+ show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)");
+}
+
+static void __init test_lfdx_stfdx(void)
+{
+ show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)");
+ show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)");
+}
+#endif /* CONFIG_PPC_FPU */
+
+#ifdef CONFIG_ALTIVEC
+static void __init test_lvx_stvx(void)
+{
+ struct pt_regs regs;
+ union {
+ vector128 a;
+ u32 b[4];
+ } c;
+ u32 cached_b[4];
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+
+
+ /*** lvx ***/
+
+ cached_b[0] = c.b[0] = 923745;
+ cached_b[1] = c.b[1] = 2139478;
+ cached_b[2] = c.b[2] = 9012;
+ cached_b[3] = c.b[3] = 982134;
+
+ regs.gpr[3] = (unsigned long) &c.a;
+ regs.gpr[4] = 0;
+
+ /* lvx vrt10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_LVX(10, 3, 4));
+
+ if (stepped == 1)
+ show_result("lvx", "PASS");
+ else
+ show_result("lvx", "FAIL");
+
+
+ /*** stvx ***/
+
+ c.b[0] = 4987513;
+ c.b[1] = 84313948;
+ c.b[2] = 71;
+ c.b[3] = 498532;
+
+ /* stvx vrs10, r3, r4 */
+ stepped = emulate_step(&regs, TEST_STVX(10, 3, 4));
+
+ if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
+ cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
+ show_result("stvx", "PASS");
+ else
+ show_result("stvx", "FAIL");
+}
+#else
+static void __init test_lvx_stvx(void)
+{
+ show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)");
+ show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)");
+}
+#endif /* CONFIG_ALTIVEC */
+
+#ifdef CONFIG_VSX
+static void __init test_lxvd2x_stxvd2x(void)
+{
+ struct pt_regs regs;
+ union {
+ vector128 a;
+ u32 b[4];
+ } c;
+ u32 cached_b[4];
+ int stepped = -1;
+
+ init_pt_regs(&regs);
+
+
+ /*** lxvd2x ***/
+
+ cached_b[0] = c.b[0] = 18233;
+ cached_b[1] = c.b[1] = 34863571;
+ cached_b[2] = c.b[2] = 834;
+ cached_b[3] = c.b[3] = 6138911;
+
+ regs.gpr[3] = (unsigned long) &c.a;
+ regs.gpr[4] = 0;
+
+ /* lxvd2x vsr39, r3, r4 */
+ stepped = emulate_step(&regs, TEST_LXVD2X(39, 3, 4));
+
+ if (stepped == 1)
+ show_result("lxvd2x", "PASS");
+ else
+ show_result("lxvd2x", "FAIL");
+
+
+ /*** stxvd2x ***/
+
+ c.b[0] = 21379463;
+ c.b[1] = 87;
+ c.b[2] = 374234;
+ c.b[3] = 4;
+
+ /* stxvd2x vsr39, r3, r4 */
+ stepped = emulate_step(&regs, TEST_STXVD2X(39, 3, 4));
+
+ if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
+ cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
+ show_result("stxvd2x", "PASS");
+ else
+ show_result("stxvd2x", "FAIL");
+}
+#else
+static void __init test_lxvd2x_stxvd2x(void)
+{
+ show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)");
+ show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)");
+}
+#endif /* CONFIG_VSX */
+
+static int __init test_emulate_step(void)
+{
+ test_ld();
+ test_lwz();
+ test_lwzx();
+ test_std();
+ test_ldarx_stdcx();
+ test_lfsx_stfsx();
+ test_lfdx_stfdx();
+ test_lvx_stvx();
+ test_lxvd2x_stxvd2x();
+
+ return 0;
+}
+late_initcall(test_emulate_step);
diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
deleted file mode 100644
index 9bd3a3dad78d..000000000000
--- a/arch/powerpc/lib/usercopy_64.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Functions which are too large to be inlined.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/uaccess.h>
-
-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_READ, from, n)))
- n = __copy_from_user(to, from, n);
- else
- memset(to, 0, n);
- return n;
-}
-
-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_WRITE, to, n)))
- n = __copy_to_user(to, from, n);
- return n;
-}
-
-unsigned long copy_in_user(void __user *to, const void __user *from,
- unsigned long n)
-{
- might_sleep();
- if (likely(access_ok(VERIFY_READ, from, n) &&
- access_ok(VERIFY_WRITE, to, n)))
- n =__copy_tofrom_user(to, from, n);
- return n;
-}
-
-EXPORT_SYMBOL(copy_from_user);
-EXPORT_SYMBOL(copy_to_user);
-EXPORT_SYMBOL(copy_in_user);
-
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 6aa3b76aa0d6..c22f207aa656 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -356,25 +356,48 @@ static void early_check_vec5(void)
unsigned long root, chosen;
int size;
const u8 *vec5;
+ u8 mmu_supported;
root = of_get_flat_dt_root();
chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
- if (chosen == -FDT_ERR_NOTFOUND)
+ if (chosen == -FDT_ERR_NOTFOUND) {
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
return;
+ }
vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
- if (!vec5)
+ if (!vec5) {
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
return;
- if (size <= OV5_INDX(OV5_MMU_RADIX_300) ||
- !(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300)))
- /* Hypervisor doesn't support radix */
+ }
+ if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+ return;
+ }
+
+ /* Check for supported configuration */
+ mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
+ OV5_FEAT(OV5_MMU_SUPPORT);
+ if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
+ /* Hypervisor only supports radix - check enabled && GTSE */
+ if (!early_radix_enabled()) {
+ pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
+ }
+ if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
+ OV5_FEAT(OV5_RADIX_GTSE))) {
+ pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
+ }
+ /* Do radix anyway - the hypervisor said we had to */
+ cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
+ } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
+ /* Hypervisor only supports hash - disable radix */
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+ }
}
void __init mmu_early_init_devtree(void)
{
/* Disable radix mode based on kernel command line. */
- /* We don't yet have the machinery to do radix as a guest. */
- if (disable_radix || !(mfmsr() & MSR_HV))
+ if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
/*
@@ -383,7 +406,7 @@ void __init mmu_early_init_devtree(void)
* even though the ibm,architecture-vec-5 property created by
* skiboot doesn't have the necessary bits set.
*/
- if (early_radix_enabled() && !(mfmsr() & MSR_HV))
+ if (!(mfmsr() & MSR_HV))
early_check_vec5();
if (early_radix_enabled())
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 2a590a98e652..c28165d8970b 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void)
*/
register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
+ asm volatile("ptesync" : : : "memory");
+ asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
+ "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
}
static void __init radix_init_partition_table(void)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 595dd718ea87..2ff13249f87a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
sdsync = POWER7P_MMCRA_SDAR_VALID;
else if (ppmu->flags & PPMU_ALT_SIPR)
sdsync = POWER6_MMCRA_SDSYNC;
+ else if (ppmu->flags & PPMU_NO_SIAR)
+ sdsync = MMCRA_SAMPLE_ENABLE;
else
sdsync = MMCRA_SDSYNC;
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index e79fb5fb817d..cd951fd231c4 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -65,12 +65,41 @@ static bool is_event_valid(u64 event)
return !(event & ~valid_mask);
}
-static u64 mmcra_sdar_mode(u64 event)
+static inline bool is_event_marked(u64 event)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
- return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+ if (event & EVENT_IS_MARKED)
+ return true;
+
+ return false;
+}
- return MMCRA_SDAR_MODE_TLB;
+static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
+{
+ /*
+ * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
+ * continous sampling mode.
+ *
+ * Incase of Power8:
+ * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
+ * mode and will be un-changed when setting MMCRA[63] (Marked events).
+ *
+ * Incase of Power9:
+ * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
+ * or if group already have any marked events.
+ * Non-Marked events (for DD1):
+ * MMCRA[SDAR_MODE] will be set to 0b01
+ * For rest
+ * MMCRA[SDAR_MODE] will be set from event code.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
+ *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
+ else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
+ *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+ else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ *mmcra |= MMCRA_SDAR_MODE_TLB;
+ } else
+ *mmcra |= MMCRA_SDAR_MODE_TLB;
}
static u64 thresh_cmp_val(u64 value)
@@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
value |= CNST_L1_QUAL_VAL(cache);
}
- if (event & EVENT_IS_MARKED) {
+ if (is_event_marked(event)) {
mask |= CNST_SAMPLE_MASK;
value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
}
@@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
}
/* In continuous sampling mode, update SDAR on TLB miss */
- mmcra |= mmcra_sdar_mode(event[i]);
+ mmcra_sdar_mode(event[i], &mmcra);
if (event[i] & EVENT_IS_L1) {
cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
@@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
}
- if (event[i] & EVENT_IS_MARKED) {
+ if (is_event_marked(event[i])) {
mmcra |= MMCRA_SAMPLE_ENABLE;
val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index cf9bd8990159..899210f14ee4 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -246,6 +246,7 @@
#define MMCRA_THR_CMP_SHIFT 32
#define MMCRA_SDAR_MODE_SHIFT 42
#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
+#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
#define MMCRA_IFM_SHIFT 30
/* MMCR1 Threshold Compare bit constant for power9 */
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 6693f75e93d1..da8a0f7a035c 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -39,8 +39,8 @@ opal_tracepoint_refcount:
BEGIN_FTR_SECTION; \
b 1f; \
END_FTR_SECTION(0, 1); \
- ld r12,opal_tracepoint_refcount@toc(r2); \
- cmpdi r12,0; \
+ ld r11,opal_tracepoint_refcount@toc(r2); \
+ cmpdi r11,0; \
bne- LABEL; \
1:
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 86d9fde93c17..e0f856bfbfe8 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs,
struct machine_check_event *evt)
{
int recovered = 0;
- uint64_t ea = get_mce_fault_addr(evt);
if (!(regs->msr & MSR_RI)) {
/* If MSR_RI isn't set, we cannot recover */
@@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs,
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
/* Platform corrected itself */
recovered = 1;
- } else if (ea && !is_kernel_addr(ea)) {
+ } else if (evt->severity == MCE_SEV_FATAL) {
+ /* Fatal machine check */
+ pr_err("Machine check interrupt is fatal\n");
+ recovered = 0;
+ } else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
+ (user_mode(regs) && !is_global_init(current))) {
/*
- * Faulting address is not in kernel text. We should be fine.
- * We need to find which process uses this address.
* For now, kill the task if we have received exception when
* in userspace.
*
* TODO: Queue up this address for hwpoisioning later.
*/
- if (user_mode(regs) && !is_global_init(current)) {
- _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
- recovered = 1;
- } else
- recovered = 0;
- } else if (user_mode(regs) && !is_global_init(current) &&
- evt->severity == MCE_SEV_ERROR_SYNC) {
- /*
- * If we have received a synchronous error when in userspace
- * kill the task.
- */
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
recovered = 1;
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6901a06da2f9..e36738291c32 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
}
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
- struct pci_bus *bus)
+ struct pci_bus *bus,
+ bool add_to_group)
{
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
set_dma_offset(&dev->dev, pe->tce_bypass_base);
- iommu_add_device(&dev->dev);
+ if (add_to_group)
+ iommu_add_device(&dev->dev);
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
- pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+ pnv_ioda_setup_bus_dma(pe, dev->subordinate,
+ add_to_group);
}
}
@@ -2191,7 +2194,7 @@ found:
set_iommu_table_base(&pe->pdev->dev, tbl);
iommu_add_device(&pe->pdev->dev);
} else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
return;
fail:
@@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
pnv_pci_ioda2_set_bypass(pe, false);
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
+ if (pe->pbus)
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
pnv_ioda2_table_free(tbl);
}
@@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
table_group);
pnv_pci_ioda2_setup_default_config(pe);
+ if (pe->pbus)
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
}
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
@@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
level_shift = entries_shift + 3;
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
+ if ((level_shift - 3) * levels + page_shift >= 60)
+ return -EINVAL;
+
/* Allocate TCE table */
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
levels, tce_table_size, &offset, &total_allocated);
@@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
if (pe->flags & PNV_IODA_PE_DEV)
iommu_add_device(&pe->pdev->dev);
else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
}
#ifdef CONFIG_PCI_MSI
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 251060cf1713..8b1fe895daa3 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
- mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
+
+ if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
+ mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
}
void radix_init_pseries(void)
diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S
index f9760ccf4032..3696ea6c4826 100644
--- a/arch/powerpc/purgatory/trampoline.S
+++ b/arch/powerpc/purgatory/trampoline.S
@@ -116,13 +116,13 @@ dt_offset:
.data
.balign 8
-.globl sha256_digest
-sha256_digest:
+.globl purgatory_sha256_digest
+purgatory_sha256_digest:
.skip 32
- .size sha256_digest, . - sha256_digest
+ .size purgatory_sha256_digest, . - purgatory_sha256_digest
.balign 8
-.globl sha_regions
-sha_regions:
+.globl purgatory_sha_regions
+purgatory_sha_regions:
.skip 8 * 2 * 16
- .size sha_regions, . - sha_regions
+ .size purgatory_sha_regions, . - purgatory_sha_regions
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ada29eaed6e2..f523ac883150 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -274,7 +274,9 @@ failed:
if (bank->disk->major > 0)
unregister_blkdev(bank->disk->major,
bank->disk->disk_name);
- del_gendisk(bank->disk);
+ if (bank->disk->flags & GENHD_FL_UP)
+ del_gendisk(bank->disk);
+ put_disk(bank->disk);
}
device->dev.platform_data = NULL;
if (bank->io_addr != 0)
@@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device)
device_remove_file(&device->dev, &dev_attr_ecc);
free_irq(bank->irq_id, device);
del_gendisk(bank->disk);
+ put_disk(bank->disk);
iounmap((void __iomem *) bank->io_addr);
kfree(bank);
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index f9670eabfcfa..b53f80f0b4d8 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void)
static void icp_opal_set_cpu_priority(unsigned char cppr)
{
+ /*
+ * Here be dragons. The caller has asked to allow only IPI's and not
+ * external interrupts. But OPAL XIVE doesn't support that. So instead
+ * of allowing no interrupts allow all. That's still not right, but
+ * currently the only caller who does this is xics_migrate_irqs_away()
+ * and it works in that case.
+ */
+ if (cppr >= DEFAULT_PRIORITY)
+ cppr = LOWEST_PRIORITY;
+
xics_set_base_cppr(cppr);
opal_int_set_cppr(cppr);
iosync();
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 69d858e51ac7..23efe4e42172 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/delay.h>
#include <asm/prom.h>
#include <asm/io.h>
@@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void)
/* Remove ourselves from the global interrupt queue */
xics_set_cpu_giq(xics_default_distrib_server, 0);
- /* Allow IPIs again... */
- icp_ops->set_priority(DEFAULT_PRIORITY);
-
for_each_irq_desc(virq, desc) {
struct irq_chip *chip;
long server;
@@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void)
unlock:
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
+
+ /* Allow "sufficient" time to drop any inflight IRQ's */
+ mdelay(5);
+
+ /*
+ * Allow IPIs again. This is done at the very end, after migrating all
+ * interrupts, the expectation is that we'll only get woken up by an IPI
+ * interrupt beyond this point, but leave externals masked just to be
+ * safe. If we're using icp-opal this may actually allow all
+ * interrupts anyway, but that should be OK.
+ */
+ icp_ops->set_priority(DEFAULT_PRIORITY);
+
}
#endif /* CONFIG_HOTPLUG_CPU */