summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/smp.c
diff options
context:
space:
mode:
authorMichael Ellerman2018-05-02 15:07:27 +0200
committerMichael Ellerman2018-06-03 12:43:43 +0200
commit6ba55716a24f5f399ad4d37685e4bb721f8e6dd5 (patch)
tree2197f84b380924e2e5190734c3de32d9ad146d01 /arch/powerpc/kernel/smp.c
parentpowerpc/64: Save stack pointer when we hard disable interrupts (diff)
downloadkernel-qcow2-linux-6ba55716a24f5f399ad4d37685e4bb721f8e6dd5.tar.gz
kernel-qcow2-linux-6ba55716a24f5f399ad4d37685e4bb721f8e6dd5.tar.xz
kernel-qcow2-linux-6ba55716a24f5f399ad4d37685e4bb721f8e6dd5.zip
powerpc/nmi: Add an API for sending "safe" NMIs
Currently the options we have for sending NMIs are not necessarily safe, that is they can potentially interrupt a CPU in a non-recoverable region of code, meaning the kernel must then panic(). But we'd like to use smp_send_nmi_ipi() to do cross-CPU calls in situations where we don't want to risk a panic(), because it doesn't have the requirement that interrupts must be enabled like smp_call_function(). So add an API for the caller to indicate that it wants to use the NMI infrastructure, but doesn't want to do anything "unsafe". Currently that is implemented by not actually calling cause_nmi_ipi(), instead falling back to an IPI. In future we can pass the safe parameter down to cause_nmi_ipi() and the individual backends can potentially take it into account before deciding what to do. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r--arch/powerpc/kernel/smp.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index b009a562c76b..5eadfffabe35 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -430,9 +430,9 @@ out:
return ret;
}
-static void do_smp_send_nmi_ipi(int cpu)
+static void do_smp_send_nmi_ipi(int cpu, bool safe)
{
- if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
+ if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
return;
if (cpu >= 0) {
@@ -472,7 +472,7 @@ void smp_flush_nmi_ipi(u64 delay_us)
* - delay_us > 0 is the delay before giving up waiting for targets to
* enter the handler, == 0 specifies indefinite delay.
*/
-int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
+int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
{
unsigned long flags;
int me = raw_smp_processor_id();
@@ -505,7 +505,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
nmi_ipi_busy_count++;
nmi_ipi_unlock();
- do_smp_send_nmi_ipi(cpu);
+ do_smp_send_nmi_ipi(cpu, safe);
while (!cpumask_empty(&nmi_ipi_pending_mask)) {
udelay(1);
@@ -527,6 +527,16 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
return ret;
}
+
+int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
+{
+ return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
+}
+
+int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
+{
+ return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
+}
#endif /* CONFIG_NMI_IPI */
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
@@ -570,7 +580,7 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
* entire NMI dance and waiting for
* cpus to clear pending mask, etc.
*/
- do_smp_send_nmi_ipi(cpu);
+ do_smp_send_nmi_ipi(cpu, false);
}
}
}