summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/fault.c')
-rw-r--r--arch/powerpc/mm/fault.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d0b137d96df1..6fd30ac7d14a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -391,6 +391,20 @@ good_area:
if (is_exec) {
/*
+ * An execution fault + no execute ?
+ *
+ * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
+ * deliberately create NX mappings, and use the fault to do the
+ * cache flush. This is usually handled in hash_page_do_lazy_icache()
+ * but we could end up here if that races with a concurrent PTE
+ * update. In that case we need to fall through here to the VMA
+ * check below.
+ */
+ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
+ (regs->msr & SRR1_ISI_N_OR_G))
+ goto bad_area;
+
+ /*
* Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing.
*
@@ -404,6 +418,7 @@ good_area:
(cpu_has_feature(CPU_FTR_NOEXECUTE) ||
!(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area;
+
#ifdef CONFIG_PPC_STD_MMU
/*
* protfault should only happen due to us
@@ -512,7 +527,7 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Are we prepared to handle this fault? */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
- regs->nip = entry->fixup;
+ regs->nip = extable_fixup(entry);
return;
}