diff options
author | Richard Henderson | 2022-07-08 17:15:35 +0200 |
---|---|---|
committer | Peter Maydell | 2022-07-11 14:43:52 +0200 |
commit | 78fd56ba13a472d20975d4bca1633b3dccd70954 (patch) | |
tree | f50ad668e79d96bd585daa14d8f2d17d80b28448 /linux-user | |
parent | linux-user/aarch64: Move sve record checks into restore (diff) | |
download | qemu-78fd56ba13a472d20975d4bca1633b3dccd70954.tar.gz qemu-78fd56ba13a472d20975d4bca1633b3dccd70954.tar.xz qemu-78fd56ba13a472d20975d4bca1633b3dccd70954.zip |
linux-user/aarch64: Implement SME signal handling
Set the SM bit in the SVE record on signal delivery, create the ZA record.
Restore SM and ZA state according to the records present on return.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20220708151540.18136-41-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'linux-user')
-rw-r--r-- | linux-user/aarch64/signal.c | 167 |
1 files changed, 154 insertions, 13 deletions
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c index 22d0b8b4ec..6a2c6e06d2 100644 --- a/linux-user/aarch64/signal.c +++ b/linux-user/aarch64/signal.c @@ -104,6 +104,22 @@ struct target_sve_context { #define TARGET_SVE_SIG_FLAG_SM 1 +#define TARGET_ZA_MAGIC 0x54366345 + +struct target_za_context { + struct target_aarch64_ctx head; + uint16_t vl; + uint16_t reserved[3]; + /* The actual ZA data immediately follows. */ +}; + +#define TARGET_ZA_SIG_REGS_OFFSET \ + QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES) +#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \ + (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N)) +#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \ + TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES) + struct target_rt_sigframe { struct target_siginfo info; struct target_ucontext uc; @@ -176,9 +192,9 @@ static void target_setup_end_record(struct target_aarch64_ctx *end) } static void target_setup_sve_record(struct target_sve_context *sve, - CPUARMState *env, int vq, int size) + CPUARMState *env, int size) { - int i, j; + int i, j, vq = sve_vq(env); memset(sve, 0, sizeof(*sve)); __put_user(TARGET_SVE_MAGIC, &sve->head.magic); @@ -207,6 +223,35 @@ static void target_setup_sve_record(struct target_sve_context *sve, } } +static void target_setup_za_record(struct target_za_context *za, + CPUARMState *env, int size) +{ + int vq = sme_vq(env); + int vl = vq * TARGET_SVE_VQ_BYTES; + int i, j; + + memset(za, 0, sizeof(*za)); + __put_user(TARGET_ZA_MAGIC, &za->head.magic); + __put_user(size, &za->head.size); + __put_user(vl, &za->vl); + + if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) { + return; + } + assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq)); + + /* + * Note that ZA vectors are stored as a byte stream, + * with each byte element at a subsequent address. + */ + for (i = 0; i < vl; ++i) { + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); + for (j = 0; j < vq * 2; ++j) { + __put_user_e(env->zarray[i].d[j], z + j, le); + } + } +} + static void target_restore_general_frame(CPUARMState *env, struct target_rt_sigframe *sf) { @@ -252,16 +297,28 @@ static void target_restore_fpsimd_record(CPUARMState *env, static bool target_restore_sve_record(CPUARMState *env, struct target_sve_context *sve, - int size) + int size, int *svcr) { - int i, j, vl, vq; + int i, j, vl, vq, flags; + bool sm; + + __get_user(vl, &sve->vl); + __get_user(flags, &sve->flags); - if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) { + sm = flags & TARGET_SVE_SIG_FLAG_SM; + + /* The cpu must support Streaming or Non-streaming SVE. */ + if (sm + ? !cpu_isar_feature(aa64_sme, env_archcpu(env)) + : !cpu_isar_feature(aa64_sve, env_archcpu(env))) { return false; } - __get_user(vl, &sve->vl); - vq = sve_vq(env); + /* + * Note that we cannot use sve_vq() because that depends on the + * current setting of PSTATE.SM, not the state to be restored. + */ + vq = sve_vqm1_for_el_sm(env, 0, sm) + 1; /* Reject mismatched VL. */ if (vl != vq * TARGET_SVE_VQ_BYTES) { @@ -278,6 +335,8 @@ static bool target_restore_sve_record(CPUARMState *env, return false; } + *svcr = FIELD_DP64(*svcr, SVCR, SM, sm); + /* * Note that SVE regs are stored as a byte stream, with each byte element * at a subsequent address. This corresponds to a little-endian load @@ -304,15 +363,57 @@ static bool target_restore_sve_record(CPUARMState *env, return true; } +static bool target_restore_za_record(CPUARMState *env, + struct target_za_context *za, + int size, int *svcr) +{ + int i, j, vl, vq; + + if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) { + return false; + } + + __get_user(vl, &za->vl); + vq = sme_vq(env); + + /* Reject mismatched VL. */ + if (vl != vq * TARGET_SVE_VQ_BYTES) { + return false; + } + + /* Accept empty record -- used to clear PSTATE.ZA. */ + if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) { + return true; + } + + /* Reject non-empty but incomplete record. */ + if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) { + return false; + } + + *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1); + + for (i = 0; i < vl; ++i) { + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); + for (j = 0; j < vq * 2; ++j) { + __get_user_e(env->zarray[i].d[j], z + j, le); + } + } + return true; +} + static int target_restore_sigframe(CPUARMState *env, struct target_rt_sigframe *sf) { struct target_aarch64_ctx *ctx, *extra = NULL; struct target_fpsimd_context *fpsimd = NULL; struct target_sve_context *sve = NULL; + struct target_za_context *za = NULL; uint64_t extra_datap = 0; bool used_extra = false; int sve_size = 0; + int za_size = 0; + int svcr = 0; target_restore_general_frame(env, sf); @@ -350,6 +451,14 @@ static int target_restore_sigframe(CPUARMState *env, sve_size = size; break; + case TARGET_ZA_MAGIC: + if (za || size < sizeof(struct target_za_context)) { + goto err; + } + za = (struct target_za_context *)ctx; + za_size = size; + break; + case TARGET_EXTRA_MAGIC: if (extra || size != sizeof(struct target_extra_context)) { goto err; @@ -381,9 +490,16 @@ static int target_restore_sigframe(CPUARMState *env, } /* SVE data, if present, overwrites FPSIMD data. */ - if (sve && !target_restore_sve_record(env, sve, sve_size)) { + if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) { goto err; } + if (za && !target_restore_za_record(env, za, za_size, &svcr)) { + goto err; + } + if (env->svcr != svcr) { + env->svcr = svcr; + arm_rebuild_hflags(env); + } unlock_user(extra, extra_datap, 0); return 0; @@ -451,7 +567,8 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, .total_size = offsetof(struct target_rt_sigframe, uc.tuc_mcontext.__reserved), }; - int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0; + int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0; + int sve_size = 0, za_size = 0; struct target_rt_sigframe *frame; struct target_rt_frame_record *fr; abi_ulong frame_addr, return_addr; @@ -461,11 +578,20 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, &layout); /* SVE state needs saving only if it exists. */ - if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { - vq = sve_vq(env); - sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); + if (cpu_isar_feature(aa64_sve, env_archcpu(env)) || + cpu_isar_feature(aa64_sme, env_archcpu(env))) { + sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16); sve_ofs = alloc_sigframe_space(sve_size, &layout); } + if (cpu_isar_feature(aa64_sme, env_archcpu(env))) { + /* ZA state needs saving only if it is enabled. */ + if (FIELD_EX64(env->svcr, SVCR, ZA)) { + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env)); + } else { + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0); + } + za_ofs = alloc_sigframe_space(za_size, &layout); + } if (layout.extra_ofs) { /* Reserve space for the extra end marker. The standard end marker @@ -512,7 +638,10 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, target_setup_end_record((void *)frame + layout.extra_end_ofs); } if (sve_ofs) { - target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size); + target_setup_sve_record((void *)frame + sve_ofs, env, sve_size); + } + if (za_ofs) { + target_setup_za_record((void *)frame + za_ofs, env, za_size); } /* Set up the stack frame for unwinding. */ @@ -536,6 +665,18 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, env->btype = 2; } + /* + * Invoke the signal handler with both SM and ZA disabled. + * When clearing SM, ResetSVEState, per SMSTOP. + */ + if (FIELD_EX64(env->svcr, SVCR, SM)) { + arm_reset_sve_state(env); + } + if (env->svcr) { + env->svcr = 0; + arm_rebuild_hflags(env); + } + if (info) { tswap_siginfo(&frame->info, info); env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); |