summaryrefslogtreecommitdiffstats
path: root/linux-user/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'linux-user/signal.c')
-rw-r--r--linux-user/signal.c129
1 files changed, 110 insertions, 19 deletions
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 14d8fdfde1..81c45bfce9 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "qemu/bitops.h"
#include "exec/gdbstub.h"
+#include "hw/core/tcg-cpu-ops.h"
#include <sys/ucontext.h>
#include <sys/resource.h>
@@ -29,6 +30,7 @@
#include "loader.h"
#include "trace.h"
#include "signal-common.h"
+#include "host-signal.h"
static struct target_sigaction sigact_table[TARGET_NSIG];
@@ -686,9 +688,38 @@ void force_sigsegv(int oldsig)
}
force_sig(TARGET_SIGSEGV);
}
-
#endif
+void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type, bool maperr, uintptr_t ra)
+{
+ const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
+
+ if (tcg_ops->record_sigsegv) {
+ tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
+ }
+
+ force_sig_fault(TARGET_SIGSEGV,
+ maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
+ addr);
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit_restore(cpu, ra);
+}
+
+void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
+ MMUAccessType access_type, uintptr_t ra)
+{
+ const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
+
+ if (tcg_ops->record_sigbus) {
+ tcg_ops->record_sigbus(cpu, addr, access_type, ra);
+ }
+
+ force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit_restore(cpu, ra);
+}
+
/* abort execution with signal */
static void QEMU_NORETURN dump_core_and_abort(int target_sig)
{
@@ -769,41 +800,101 @@ static inline void rewind_if_in_safe_syscall(void *puc)
}
#endif
-static void host_signal_handler(int host_signum, siginfo_t *info,
- void *puc)
+static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
{
CPUArchState *env = thread_cpu->env_ptr;
CPUState *cpu = env_cpu(env);
TaskState *ts = cpu->opaque;
-
- int sig;
target_siginfo_t tinfo;
ucontext_t *uc = puc;
struct emulated_sigtable *k;
+ int guest_sig;
+ uintptr_t pc = 0;
+ bool sync_sig = false;
+
+ /*
+ * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
+ * handling wrt signal blocking and unwinding.
+ */
+ if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) {
+ MMUAccessType access_type;
+ uintptr_t host_addr;
+ abi_ptr guest_addr;
+ bool is_write;
+
+ host_addr = (uintptr_t)info->si_addr;
+
+ /*
+ * Convert forcefully to guest address space: addresses outside
+ * reserved_va are still valid to report via SEGV_MAPERR.
+ */
+ guest_addr = h2g_nocheck(host_addr);
- /* the CPU emulator uses some host signals to detect exceptions,
- we forward to it some signals */
- if ((host_signum == SIGSEGV || host_signum == SIGBUS)
- && info->si_code > 0) {
- if (cpu_signal_handler(host_signum, info, puc))
- return;
+ pc = host_signal_pc(uc);
+ is_write = host_signal_write(info, uc);
+ access_type = adjust_signal_pc(&pc, is_write);
+
+ if (host_sig == SIGSEGV) {
+ bool maperr = true;
+
+ if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
+ /* If this was a write to a TB protected page, restart. */
+ if (is_write &&
+ handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
+ pc, guest_addr)) {
+ return;
+ }
+
+ /*
+ * With reserved_va, the whole address space is PROT_NONE,
+ * which means that we may get ACCERR when we want MAPERR.
+ */
+ if (page_get_flags(guest_addr) & PAGE_VALID) {
+ maperr = false;
+ } else {
+ info->si_code = SEGV_MAPERR;
+ }
+ }
+
+ sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
+ cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
+ } else {
+ sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
+ if (info->si_code == BUS_ADRALN) {
+ cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
+ }
+ }
+
+ sync_sig = true;
}
/* get target signal number */
- sig = host_to_target_signal(host_signum);
- if (sig < 1 || sig > TARGET_NSIG)
+ guest_sig = host_to_target_signal(host_sig);
+ if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
return;
- trace_user_host_signal(env, host_signum, sig);
-
- rewind_if_in_safe_syscall(puc);
+ }
+ trace_user_host_signal(env, host_sig, guest_sig);
host_to_target_siginfo_noswap(&tinfo, info);
- k = &ts->sigtab[sig - 1];
+ k = &ts->sigtab[guest_sig - 1];
k->info = tinfo;
- k->pending = sig;
+ k->pending = guest_sig;
ts->signal_pending = 1;
- /* Block host signals until target signal handler entered. We
+ /*
+ * For synchronous signals, unwind the cpu state to the faulting
+ * insn and then exit back to the main loop so that the signal
+ * is delivered immediately.
+ */
+ if (sync_sig) {
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit_restore(cpu, pc);
+ }
+
+ rewind_if_in_safe_syscall(puc);
+
+ /*
+ * Block host signals until target signal handler entered. We
* can't block SIGSEGV or SIGBUS while we're executing guest
* code in case the guest code provokes one in the window between
* now and it getting out to the main loop. Signals will be