diff options
Diffstat (limited to 'target/s390x')
| -rw-r--r-- | target/s390x/cpu.c | 5 | ||||
| -rw-r--r-- | target/s390x/cpu.h | 9 | ||||
| -rw-r--r-- | target/s390x/cpu_features.c | 3 | ||||
| -rw-r--r-- | target/s390x/cpu_features_def.h | 3 | ||||
| -rw-r--r-- | target/s390x/cpu_models.c | 2 | ||||
| -rw-r--r-- | target/s390x/excp_helper.c | 80 | ||||
| -rw-r--r-- | target/s390x/fpu_helper.c | 13 | ||||
| -rw-r--r-- | target/s390x/gen-features.c | 3 | ||||
| -rw-r--r-- | target/s390x/helper.h | 1 | ||||
| -rw-r--r-- | target/s390x/insn-data.def | 395 | ||||
| -rw-r--r-- | target/s390x/interrupt.c | 15 | ||||
| -rw-r--r-- | target/s390x/kvm.c | 29 | ||||
| -rw-r--r-- | target/s390x/mem_helper.c | 196 | ||||
| -rw-r--r-- | target/s390x/tcg-stub.c | 10 | ||||
| -rw-r--r-- | target/s390x/tcg_s390x.h | 4 | ||||
| -rw-r--r-- | target/s390x/translate.c | 248 |
16 files changed, 521 insertions, 495 deletions
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index 8ed4823d6e..18ba7f85a5 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -145,6 +145,11 @@ static void s390_cpu_full_reset(CPUState *s) env->cregs[0] = CR0_RESET; env->cregs[14] = CR14_RESET; +#if defined(CONFIG_USER_ONLY) + /* user mode should always be allowed to use the full FPU */ + env->cregs[0] |= CR0_AFP; +#endif + /* architectured initial value for Breaking-Event-Address register */ env->gbea = 1; diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h index 6f8861e554..8c2320e882 100644 --- a/target/s390x/cpu.h +++ b/target/s390x/cpu.h @@ -255,6 +255,7 @@ extern const struct VMStateDescription vmstate_s390_cpu; /* PSW defines */ #undef PSW_MASK_PER +#undef PSW_MASK_UNUSED_2 #undef PSW_MASK_DAT #undef PSW_MASK_IO #undef PSW_MASK_EXT @@ -273,6 +274,7 @@ extern const struct VMStateDescription vmstate_s390_cpu; #undef PSW_MASK_ESA_ADDR #define PSW_MASK_PER 0x4000000000000000ULL +#define PSW_MASK_UNUSED_2 0x2000000000000000ULL #define PSW_MASK_DAT 0x0400000000000000ULL #define PSW_MASK_IO 0x0200000000000000ULL #define PSW_MASK_EXT 0x0100000000000000ULL @@ -318,10 +320,14 @@ extern const struct VMStateDescription vmstate_s390_cpu; #define FLAG_MASK_PSW (FLAG_MASK_PER | FLAG_MASK_DAT | FLAG_MASK_PSTATE \ | FLAG_MASK_ASC | FLAG_MASK_64 | FLAG_MASK_32) +/* we'll use some unused PSW positions to store CR flags in tb flags */ +#define FLAG_MASK_AFP (PSW_MASK_UNUSED_2 >> FLAG_MASK_PSW_SHIFT) + /* Control register 0 bits */ #define CR0_LOWPROT 0x0000000010000000ULL #define CR0_SECONDARY 0x0000000004000000ULL #define CR0_EDAT 0x0000000000800000ULL +#define CR0_AFP 0x0000000000040000ULL #define CR0_EMERGENCY_SIGNAL_SC 0x0000000000004000ULL #define CR0_EXTERNAL_CALL_SC 0x0000000000002000ULL #define CR0_CKC_SC 0x0000000000000800ULL @@ -363,6 +369,9 @@ static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc, *pc = env->psw.addr; *cs_base = env->ex_value; *flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW; + if (env->cregs[0] & CR0_AFP) { + *flags |= FLAG_MASK_AFP; + } } /* PER bits from control register 9 */ diff --git a/target/s390x/cpu_features.c b/target/s390x/cpu_features.c index 172fb18df7..60cfeba48f 100644 --- a/target/s390x/cpu_features.c +++ b/target/s390x/cpu_features.c @@ -39,8 +39,10 @@ static const S390FeatDef s390_features[] = { FEAT_INIT("srs", S390_FEAT_TYPE_STFL, 9, "Sense-running-status facility"), FEAT_INIT("csske", S390_FEAT_TYPE_STFL, 10, "Conditional-SSKE facility"), FEAT_INIT("ctop", S390_FEAT_TYPE_STFL, 11, "Configuration-topology facility"), + FEAT_INIT("apqci", S390_FEAT_TYPE_STFL, 12, "Query AP Configuration Information facility"), FEAT_INIT("ipter", S390_FEAT_TYPE_STFL, 13, "IPTE-range facility"), FEAT_INIT("nonqks", S390_FEAT_TYPE_STFL, 14, "Nonquiescing key-setting facility"), + FEAT_INIT("apft", S390_FEAT_TYPE_STFL, 15, "AP Facilities Test facility"), FEAT_INIT("etf2", S390_FEAT_TYPE_STFL, 16, "Extended-translation facility 2"), FEAT_INIT("msa-base", S390_FEAT_TYPE_STFL, 17, "Message-security-assist facility (excluding subfunctions)"), FEAT_INIT("ldisp", S390_FEAT_TYPE_STFL, 18, "Long-displacement facility"), @@ -129,6 +131,7 @@ static const S390FeatDef s390_features[] = { FEAT_INIT_MISC("dateh2", "DAT-enhancement facility 2"), FEAT_INIT_MISC("cmm", "Collaborative-memory-management facility"), + FEAT_INIT_MISC("ap", "AP instructions installed"), FEAT_INIT("plo-cl", S390_FEAT_TYPE_PLO, 0, "PLO Compare and load (32 bit in general registers)"), FEAT_INIT("plo-clg", S390_FEAT_TYPE_PLO, 1, "PLO Compare and load (64 bit in parameter list)"), diff --git a/target/s390x/cpu_features_def.h b/target/s390x/cpu_features_def.h index ac2c947f30..5fc7e7bf01 100644 --- a/target/s390x/cpu_features_def.h +++ b/target/s390x/cpu_features_def.h @@ -27,8 +27,10 @@ typedef enum { S390_FEAT_SENSE_RUNNING_STATUS, S390_FEAT_CONDITIONAL_SSKE, S390_FEAT_CONFIGURATION_TOPOLOGY, + S390_FEAT_AP_QUERY_CONFIG_INFO, S390_FEAT_IPTE_RANGE, S390_FEAT_NONQ_KEY_SETTING, + S390_FEAT_AP_FACILITIES_TEST, S390_FEAT_EXTENDED_TRANSLATION_2, S390_FEAT_MSA, S390_FEAT_LONG_DISPLACEMENT, @@ -119,6 +121,7 @@ typedef enum { /* Misc */ S390_FEAT_DAT_ENH_2, S390_FEAT_CMM, + S390_FEAT_AP, /* PLO */ S390_FEAT_PLO_CL, diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index 265d25c937..7c253ff308 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -786,6 +786,8 @@ static void check_consistency(const S390CPUModel *model) { S390_FEAT_PRNO_TRNG_QRTCR, S390_FEAT_MSA_EXT_5 }, { S390_FEAT_PRNO_TRNG, S390_FEAT_MSA_EXT_5 }, { S390_FEAT_SIE_KSS, S390_FEAT_SIE_F2 }, + { S390_FEAT_AP_QUERY_CONFIG_INFO, S390_FEAT_AP }, + { S390_FEAT_AP_FACILITIES_TEST, S390_FEAT_AP }, }; int i; diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c index f0ce60cff2..2a33222f7e 100644 --- a/target/s390x/excp_helper.c +++ b/target/s390x/excp_helper.c @@ -21,33 +21,52 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" +#include "exec/helper-proto.h" #include "qemu/timer.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "hw/s390x/ioinst.h" #include "exec/address-spaces.h" +#include "tcg_s390x.h" #ifndef CONFIG_USER_ONLY #include "sysemu/sysemu.h" #include "hw/s390x/s390_flic.h" #endif -/* #define DEBUG_S390 */ -/* #define DEBUG_S390_STDOUT */ - -#ifdef DEBUG_S390 -#ifdef DEBUG_S390_STDOUT -#define DPRINTF(fmt, ...) \ - do { fprintf(stderr, fmt, ## __VA_ARGS__); \ - if (qemu_log_separate()) { qemu_log(fmt, ##__VA_ARGS__); } } while (0) -#else -#define DPRINTF(fmt, ...) \ - do { qemu_log(fmt, ## __VA_ARGS__); } while (0) -#endif -#else -#define DPRINTF(fmt, ...) \ - do { } while (0) +void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code, + int ilen, uintptr_t ra) +{ + CPUState *cs = CPU(s390_env_get_cpu(env)); + + cpu_restore_state(cs, ra, true); + qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", + env->psw.addr); + trigger_pgm_exception(env, code, ilen); + cpu_loop_exit(cs); +} + +void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, + uintptr_t ra) +{ + g_assert(dxc <= 0xff); +#if !defined(CONFIG_USER_ONLY) + /* Store the DXC into the lowcore */ + stl_phys(CPU(s390_env_get_cpu(env))->as, + env->psa + offsetof(LowCore, data_exc_code), dxc); #endif + /* Store the DXC into the FPC if AFP is enabled */ + if (env->cregs[0] & CR0_AFP) { + env->fpc = deposit32(env->fpc, 8, 8, dxc); + } + tcg_s390_program_interrupt(env, PGM_DATA, ILEN_AUTO, ra); +} + +void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc) +{ + tcg_s390_data_exception(env, dxc, GETPC()); +} + #if defined(CONFIG_USER_ONLY) void s390_cpu_do_interrupt(CPUState *cs) @@ -92,8 +111,8 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size, uint64_t asc; int prot; - DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n", - __func__, orig_vaddr, rw, mmu_idx); + qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n", + __func__, orig_vaddr, rw, mmu_idx); vaddr = orig_vaddr; @@ -122,8 +141,9 @@ int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr, int size, if (!address_space_access_valid(&address_space_memory, raddr, TARGET_PAGE_SIZE, rw, MEMTXATTRS_UNSPECIFIED)) { - DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__, - (uint64_t)raddr, (uint64_t)ram_size); + qemu_log_mask(CPU_LOG_MMU, + "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", + __func__, (uint64_t)raddr, (uint64_t)ram_size); trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_AUTO); return 1; } @@ -181,8 +201,10 @@ static void do_program_interrupt(CPUS390XState *env) break; } - qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n", - __func__, env->int_pgm_code, ilen); + qemu_log_mask(CPU_LOG_INT, + "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n", + __func__, env->int_pgm_code, ilen, env->psw.mask, + env->psw.addr); lowcore = cpu_map_lowcore(env); @@ -204,10 +226,6 @@ static void do_program_interrupt(CPUS390XState *env) cpu_unmap_lowcore(lowcore); - DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__, - env->int_pgm_code, ilen, env->psw.mask, - env->psw.addr); - load_psw(env, mask, addr); } @@ -298,9 +316,6 @@ static void do_ext_interrupt(CPUS390XState *env) cpu_unmap_lowcore(lowcore); - DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, - env->psw.mask, env->psw.addr); - load_psw(env, mask, addr); } @@ -329,8 +344,6 @@ static void do_io_interrupt(CPUS390XState *env) cpu_unmap_lowcore(lowcore); g_free(io); - DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, env->psw.mask, - env->psw.addr); load_psw(env, mask, addr); } @@ -372,9 +385,6 @@ static void do_mchk_interrupt(CPUS390XState *env) cpu_unmap_lowcore(lowcore); - DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, - env->psw.mask, env->psw.addr); - load_psw(env, mask, addr); } @@ -385,8 +395,8 @@ void s390_cpu_do_interrupt(CPUState *cs) CPUS390XState *env = &cpu->env; bool stopped = false; - qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n", - __func__, cs->exception_index, env->psw.addr); + qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n", + __func__, cs->exception_index, env->psw.mask, env->psw.addr); try_deliver: /* handle machine checks */ diff --git a/target/s390x/fpu_helper.c b/target/s390x/fpu_helper.c index 5c5b451b3b..1b662d2520 100644 --- a/target/s390x/fpu_helper.c +++ b/target/s390x/fpu_helper.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internal.h" +#include "tcg_s390x.h" #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "exec/helper-proto.h" @@ -40,14 +41,6 @@ ? (mask / (from / to)) & to \ : (mask & from) * (to / from)) -static void ieee_exception(CPUS390XState *env, uint32_t dxc, uintptr_t retaddr) -{ - /* Install the DXC code. */ - env->fpc = (env->fpc & ~0xff00) | (dxc << 8); - /* Trap. */ - s390_program_interrupt(env, PGM_DATA, ILEN_AUTO, retaddr); -} - /* Should be called after any operation that may raise IEEE exceptions. */ static void handle_exceptions(CPUS390XState *env, uintptr_t retaddr) { @@ -75,7 +68,7 @@ static void handle_exceptions(CPUS390XState *env, uintptr_t retaddr) /* Send signals for enabled exceptions. */ s390_exc &= env->fpc >> 24; if (s390_exc) { - ieee_exception(env, s390_exc, retaddr); + tcg_s390_data_exception(env, s390_exc, retaddr); } } @@ -773,6 +766,6 @@ void HELPER(sfas)(CPUS390XState *env, uint64_t val) is also 1, a simulated-iee-exception trap occurs. */ s390_exc = (signalling >> 16) & (source >> 24); if (s390_exc) { - ieee_exception(env, s390_exc | 3, GETPC()); + tcg_s390_data_exception(env, s390_exc | 3, GETPC()); } } diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c index 384b61cd67..70015eaaf5 100644 --- a/target/s390x/gen-features.c +++ b/target/s390x/gen-features.c @@ -447,6 +447,9 @@ static uint16_t full_GEN12_GA1[] = { S390_FEAT_ADAPTER_INT_SUPPRESSION, S390_FEAT_EDAT_2, S390_FEAT_SIDE_EFFECT_ACCESS_ESOP2, + S390_FEAT_AP_QUERY_CONFIG_INFO, + S390_FEAT_AP_FACILITIES_TEST, + S390_FEAT_AP, }; static uint16_t full_GEN12_GA2[] = { diff --git a/target/s390x/helper.h b/target/s390x/helper.h index 97c60ca7bc..018e9dd414 100644 --- a/target/s390x/helper.h +++ b/target/s390x/helper.h @@ -1,4 +1,5 @@ DEF_HELPER_2(exception, noreturn, env, i32) +DEF_HELPER_2(data_exception, noreturn, env, i32) DEF_HELPER_FLAGS_4(nc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) DEF_HELPER_FLAGS_4(oc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) DEF_HELPER_FLAGS_4(xc, TCG_CALL_NO_WG, i32, env, i32, i64, i64) diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def index 9c7b434fca..54e39df831 100644 --- a/target/s390x/insn-data.def +++ b/target/s390x/insn-data.def @@ -3,6 +3,8 @@ * * C(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC) * D(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA) + * E(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA, FLAGS) + * F(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, FLAGS) * * OPC = (op << 8) | op2 where op is the major, op2 the minor opcode * NAME = name of the opcode, used internally @@ -15,6 +17,7 @@ * OP = func op_xx does the bulk of the operation * CC = func cout_xx defines how cc should get set * DATA = immediate argument to op_xx function + * FLAGS = categorize the type of instruction (e.g. for advanced checks) * * The helpers get called in order: I1, I2, P, OP, W, CC */ @@ -29,11 +32,11 @@ C(0xb9e8, AGRK, RRF_a, DO, r2, r3, r1, 0, add, adds64) C(0xe308, AG, RXY_a, Z, r1, m2_64, r1, 0, add, adds64) C(0xe318, AGF, RXY_a, Z, r1, m2_32s, r1, 0, add, adds64) - C(0xb30a, AEBR, RRE, Z, e1, e2, new, e1, aeb, f32) - C(0xb31a, ADBR, RRE, Z, f1_o, f2_o, f1, 0, adb, f64) - C(0xb34a, AXBR, RRE, Z, 0, x2_o, x1, 0, axb, f128) - C(0xed0a, AEB, RXE, Z, e1, m2_32u, new, e1, aeb, f32) - C(0xed1a, ADB, RXE, Z, f1_o, m2_64, f1, 0, adb, f64) + F(0xb30a, AEBR, RRE, Z, e1, e2, new, e1, aeb, f32, IF_BFP) + F(0xb31a, ADBR, RRE, Z, f1_o, f2_o, f1, 0, adb, f64, IF_BFP) + F(0xb34a, AXBR, RRE, Z, 0, x2_o, x1, 0, axb, f128, IF_BFP) + F(0xed0a, AEB, RXE, Z, e1, m2_32u, new, e1, aeb, f32, IF_BFP) + F(0xed1a, ADB, RXE, Z, f1_o, m2_64, f1, 0, adb, f64, IF_BFP) /* ADD HIGH */ C(0xb9c8, AHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, adds32) C(0xb9d8, AHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, adds32) @@ -151,7 +154,7 @@ C(0xb241, CKSM, RRE, Z, r1_o, ra2, new, r1_32, cksm, 0) /* COPY SIGN */ - C(0xb372, CPSDR, RRF_b, FPSSH, f3_o, f2_o, f1, 0, cps, 0) + F(0xb372, CPSDR, RRF_b, FPSSH, f3_o, f2_o, f1, 0, cps, 0, IF_AFP1 | IF_AFP2 | IF_AFP3) /* COMPARE */ C(0x1900, CR, RR_a, Z, r1_o, r2_o, 0, 0, 0, cmps32) @@ -161,17 +164,17 @@ C(0xb930, CGFR, RRE, Z, r1_o, r2_32s, 0, 0, 0, cmps64) C(0xe320, CG, RXY_a, Z, r1_o, m2_64, 0, 0, 0, cmps64) C(0xe330, CGF, RXY_a, Z, r1_o, m2_32s, 0, 0, 0, cmps64) - C(0xb309, CEBR, RRE, Z, e1, e2, 0, 0, ceb, 0) - C(0xb319, CDBR, RRE, Z, f1_o, f2_o, 0, 0, cdb, 0) - C(0xb349, CXBR, RRE, Z, x1_o, x2_o, 0, 0, cxb, 0) - C(0xed09, CEB, RXE, Z, e1, m2_32u, 0, 0, ceb, 0) - C(0xed19, CDB, RXE, Z, f1_o, m2_64, 0, 0, cdb, 0) + F(0xb309, CEBR, RRE, Z, e1, e2, 0, 0, ceb, 0, IF_BFP) + F(0xb319, CDBR, RRE, Z, f1_o, f2_o, 0, 0, cdb, 0, IF_BFP) + F(0xb349, CXBR, RRE, Z, x1_o, x2_o, 0, 0, cxb, 0, IF_BFP) + F(0xed09, CEB, RXE, Z, e1, m2_32u, 0, 0, ceb, 0, IF_BFP) + F(0xed19, CDB, RXE, Z, f1_o, m2_64, 0, 0, cdb, 0, IF_BFP) /* COMPARE AND SIGNAL */ - C(0xb308, KEBR, RRE, Z, e1, e2, 0, 0, keb, 0) - C(0xb318, KDBR, RRE, Z, f1_o, f2_o, 0, 0, kdb, 0) - C(0xb348, KXBR, RRE, Z, x1_o, x2_o, 0, 0, kxb, 0) - C(0xed08, KEB, RXE, Z, e1, m2_32u, 0, 0, keb, 0) - C(0xed18, KDB, RXE, Z, f1_o, m2_64, 0, 0, kdb, 0) + F(0xb308, KEBR, RRE, Z, e1, e2, 0, 0, keb, 0, IF_BFP) + F(0xb318, KDBR, RRE, Z, f1_o, f2_o, 0, 0, kdb, 0, IF_BFP) + F(0xb348, KXBR, RRE, Z, x1_o, x2_o, 0, 0, kxb, 0, IF_BFP) + F(0xed08, KEB, RXE, Z, e1, m2_32u, 0, 0, keb, 0, IF_BFP) + F(0xed18, KDB, RXE, Z, f1_o, m2_64, 0, 0, kdb, 0, IF_BFP) /* COMPARE IMMEDIATE */ C(0xc20d, CFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps32) C(0xc20c, CGFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps64) @@ -288,33 +291,33 @@ C(0x4e00, CVD, RX_a, Z, r1_o, a2, 0, 0, cvd, 0) C(0xe326, CVDY, RXY_a, LD, r1_o, a2, 0, 0, cvd, 0) /* CONVERT TO FIXED */ - C(0xb398, CFEBR, RRF_e, Z, 0, e2, new, r1_32, cfeb, 0) - C(0xb399, CFDBR, RRF_e, Z, 0, f2_o, new, r1_32, cfdb, 0) - C(0xb39a, CFXBR, RRF_e, Z, 0, x2_o, new, r1_32, cfxb, 0) - C(0xb3a8, CGEBR, RRF_e, Z, 0, e2, r1, 0, cgeb, 0) - C(0xb3a9, CGDBR, RRF_e, Z, 0, f2_o, r1, 0, cgdb, 0) - C(0xb3aa, CGXBR, RRF_e, Z, 0, x2_o, r1, 0, cgxb, 0) + F(0xb398, CFEBR, RRF_e, Z, 0, e2, new, r1_32, cfeb, 0, IF_BFP) + F(0xb399, CFDBR, RRF_e, Z, 0, f2_o, new, r1_32, cfdb, 0, IF_BFP) + F(0xb39a, CFXBR, RRF_e, Z, 0, x2_o, new, r1_32, cfxb, 0, IF_BFP) + F(0xb3a8, CGEBR, RRF_e, Z, 0, e2, r1, 0, cgeb, 0, IF_BFP) + F(0xb3a9, CGDBR, RRF_e, Z, 0, f2_o, r1, 0, cgdb, 0, IF_BFP) + F(0xb3aa, CGXBR, RRF_e, Z, 0, x2_o, r1, 0, cgxb, 0, IF_BFP) /* CONVERT FROM FIXED */ - C(0xb394, CEFBR, RRF_e, Z, 0, r2_32s, new, e1, cegb, 0) - C(0xb395, CDFBR, RRF_e, Z, 0, r2_32s, f1, 0, cdgb, 0) - C(0xb396, CXFBR, RRF_e, Z, 0, r2_32s, x1, 0, cxgb, 0) - C(0xb3a4, CEGBR, RRF_e, Z, 0, r2_o, new, e1, cegb, 0) - C(0xb3a5, CDGBR, RRF_e, Z, 0, r2_o, f1, 0, cdgb, 0) - C(0xb3a6, CXGBR, RRF_e, Z, 0, r2_o, x1, 0, cxgb, 0) + F(0xb394, CEFBR, RRF_e, Z, 0, r2_32s, new, e1, cegb, 0, IF_BFP) + F(0xb395, CDFBR, RRF_e, Z, 0, r2_32s, f1, 0, cdgb, 0, IF_BFP) + F(0xb396, CXFBR, RRF_e, Z, 0, r2_32s, x1, 0, cxgb, 0, IF_BFP) + F(0xb3a4, CEGBR, RRF_e, Z, 0, r2_o, new, e1, cegb, 0, IF_BFP) + F(0xb3a5, CDGBR, RRF_e, Z, 0, r2_o, f1, 0, cdgb, 0, IF_BFP) + F(0xb3a6, CXGBR, RRF_e, Z, 0, r2_o, x1, 0, cxgb, 0, IF_BFP) /* CONVERT TO LOGICAL */ - C(0xb39c, CLFEBR, RRF_e, FPE, 0, e2, new, r1_32, clfeb, 0) - C(0xb39d, CLFDBR, RRF_e, FPE, 0, f2_o, new, r1_32, clfdb, 0) - C(0xb39e, CLFXBR, RRF_e, FPE, 0, x2_o, new, r1_32, clfxb, 0) - C(0xb3ac, CLGEBR, RRF_e, FPE, 0, e2, r1, 0, clgeb, 0) - C(0xb3ad, CLGDBR, RRF_e, FPE, 0, f2_o, r1, 0, clgdb, 0) - C(0xb3ae, CLGXBR, RRF_e, FPE, 0, x2_o, r1, 0, clgxb, 0) + F(0xb39c, CLFEBR, RRF_e, FPE, 0, e2, new, r1_32, clfeb, 0, IF_BFP) + F(0xb39d, CLFDBR, RRF_e, FPE, 0, f2_o, new, r1_32, clfdb, 0, IF_BFP) + F(0xb39e, CLFXBR, RRF_e, FPE, 0, x2_o, new, r1_32, clfxb, 0, IF_BFP) + F(0xb3ac, CLGEBR, RRF_e, FPE, 0, e2, r1, 0, clgeb, 0, IF_BFP) + F(0xb3ad, CLGDBR, RRF_e, FPE, 0, f2_o, r1, 0, clgdb, 0, IF_BFP) + F(0xb3ae, CLGXBR, RRF_e, FPE, 0, x2_o, r1, 0, clgxb, 0, IF_BFP) /* CONVERT FROM LOGICAL */ - C(0xb390, CELFBR, RRF_e, FPE, 0, r2_32u, new, e1, celgb, 0) - C(0xb391, CDLFBR, RRF_e, FPE, 0, r2_32u, f1, 0, cdlgb, 0) - C(0xb392, CXLFBR, RRF_e, FPE, 0, r2_32u, x1, 0, cxlgb, 0) - C(0xb3a0, CELGBR, RRF_e, FPE, 0, r2_o, new, e1, celgb, 0) - C(0xb3a1, CDLGBR, RRF_e, FPE, 0, r2_o, f1, 0, cdlgb, 0) - C(0xb3a2, CXLGBR, RRF_e, FPE, 0, r2_o, x1, 0, cxlgb, 0) + F(0xb390, CELFBR, RRF_e, FPE, 0, r2_32u, new, e1, celgb, 0, IF_BFP) + F(0xb391, CDLFBR, RRF_e, FPE, 0, r2_32u, f1, 0, cdlgb, 0, IF_BFP) + F(0xb392, CXLFBR, RRF_e, FPE, 0, r2_32u, x1, 0, cxlgb, 0, IF_BFP) + F(0xb3a0, CELGBR, RRF_e, FPE, 0, r2_o, new, e1, celgb, 0, IF_BFP) + F(0xb3a1, CDLGBR, RRF_e, FPE, 0, r2_o, f1, 0, cdlgb, 0, IF_BFP) + F(0xb3a2, CXLGBR, RRF_e, FPE, 0, r2_o, x1, 0, cxlgb, 0, IF_BFP) /* CONVERT UTF-8 TO UTF-16 */ D(0xb2a7, CU12, RRF_c, Z, 0, 0, 0, 0, cuXX, 0, 12) @@ -332,11 +335,11 @@ /* DIVIDE */ C(0x1d00, DR, RR_a, Z, r1_D32, r2_32s, new_P, r1_P32, divs32, 0) C(0x5d00, D, RX_a, Z, r1_D32, m2_32s, new_P, r1_P32, divs32, 0) - C(0xb30d, DEBR, RRE, Z, e1, e2, new, e1, deb, 0) - C(0xb31d, DDBR, RRE, Z, f1_o, f2_o, f1, 0, ddb, 0) - C(0xb34d, DXBR, RRE, Z, 0, x2_o, x1, 0, dxb, 0) - C(0xed0d, DEB, RXE, Z, e1, m2_32u, new, e1, deb, 0) - C(0xed1d, DDB, RXE, Z, f1_o, m2_64, f1, 0, ddb, 0) + F(0xb30d, DEBR, RRE, Z, e1, e2, new, e1, deb, 0, IF_BFP) + F(0xb31d, DDBR, RRE, Z, f1_o, f2_o, f1, 0, ddb, 0, IF_BFP) + F(0xb34d, DXBR, RRE, Z, 0, x2_o, x1, 0, dxb, 0, IF_BFP) + F(0xed0d, DEB, RXE, Z, e1, m2_32u, new, e1, deb, 0, IF_BFP) + F(0xed1d, DDB, RXE, Z, f1_o, m2_64, f1, 0, ddb, 0, IF_BFP) /* DIVIDE LOGICAL */ C(0xb997, DLR, RRE, Z, r1_D32, r2_32u, new_P, r1_P32, divu32, 0) C(0xe397, DL, RXY_a, Z, r1_D32, m2_32u, new_P, r1_P32, divu32, 0) @@ -375,7 +378,7 @@ /* EXTRACT CPU TIME */ C(0xc801, ECTG, SSF, ECT, 0, 0, 0, 0, ectg, 0) /* EXTRACT FPC */ - C(0xb38c, EFPC, RRE, Z, 0, 0, new, r1_32, efpc, 0) + F(0xb38c, EFPC, RRE, Z, 0, 0, new, r1_32, efpc, 0, IF_BFP) /* EXTRACT PSW */ C(0xb98d, EPSW, RRE, Z, 0, 0, 0, 0, epsw, 0) @@ -407,13 +410,13 @@ C(0xb914, LGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, 0) C(0xe304, LG, RXY_a, Z, 0, a2, r1, 0, ld64, 0) C(0xe314, LGF, RXY_a, Z, 0, a2, r1, 0, ld32s, 0) - C(0x2800, LDR, RR_a, Z, 0, f2_o, 0, f1, mov2, 0) - C(0x6800, LD, RX_a, Z, 0, m2_64, 0, f1, mov2, 0) - C(0xed65, LDY, RXY_a, LD, 0, m2_64, 0, f1, mov2, 0) - C(0x3800, LER, RR_a, Z, 0, e2, 0, cond_e1e2, mov2, 0) - C(0x7800, LE, RX_a, Z, 0, m2_32u, 0, e1, mov2, 0) - C(0xed64, LEY, RXY_a, LD, 0, m2_32u, 0, e1, mov2, 0) - C(0xb365, LXR, RRE, Z, 0, x2_o, 0, x1, movx, 0) + F(0x2800, LDR, RR_a, Z, 0, f2_o, 0, f1, mov2, 0, IF_AFP1 | IF_AFP2) + F(0x6800, LD, RX_a, Z, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) + F(0xed65, LDY, RXY_a, LD, 0, m2_64, 0, f1, mov2, 0, IF_AFP1) + F(0x3800, LER, RR_a, Z, 0, e2, 0, cond_e1e2, mov2, 0, IF_AFP1 | IF_AFP2) + F(0x7800, LE, RX_a, Z, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1) + F(0xed64, LEY, RXY_a, LD, 0, m2_32u, 0, e1, mov2, 0, IF_AFP1) + F(0xb365, LXR, RRE, Z, 0, x2_o, 0, x1, movx, 0, IF_AFP1) /* LOAD IMMEDIATE */ C(0xc001, LGFI, RIL_a, EI, 0, i2, 0, r1, mov2, 0) /* LOAD RELATIVE LONG */ @@ -450,9 +453,9 @@ C(0xe312, LT, RXY_a, EI, 0, a2, new, r1_32, ld32s, s64) C(0xe302, LTG, RXY_a, EI, 0, a2, r1, 0, ld64, s64) C(0xe332, LTGF, RXY_a, GIE, 0, a2, r1, 0, ld32s, s64) - C(0xb302, LTEBR, RRE, Z, 0, e2, 0, cond_e1e2, mov2, f32) - C(0xb312, LTDBR, RRE, Z, 0, f2_o, 0, f1, mov2, f64) - C(0xb342, LTXBR, RRE, Z, 0, x2_o, 0, x1, movx, f128) + F(0xb302, LTEBR, RRE, Z, 0, e2, 0, cond_e1e2, mov2, f32, IF_BFP) + F(0xb312, LTDBR, RRE, Z, 0, f2_o, 0, f1, mov2, f64, IF_BFP) + F(0xb342, LTXBR, RRE, Z, 0, x2_o, 0, x1, movx, f128, IF_BFP) /* LOAD AND TRAP */ C(0xe39f, LAT, RXY_a, LAT, 0, m2_32u, r1, 0, lat, 0) C(0xe385, LGAT, RXY_a, LAT, 0, a2, r1, 0, lgat, 0) @@ -472,10 +475,10 @@ C(0x1300, LCR, RR_a, Z, 0, r2, new, r1_32, neg, neg32) C(0xb903, LCGR, RRE, Z, 0, r2, r1, 0, neg, neg64) C(0xb913, LCGFR, RRE, Z, 0, r2_32s, r1, 0, neg, neg64) - C(0xb303, LCEBR, RRE, Z, 0, e2, new, e1, negf32, f32) - C(0xb313, LCDBR, RRE, Z, 0, f2_o, f1, 0, negf64, f64) - C(0xb343, LCXBR, RRE, Z, 0, x2_o, x1, 0, negf128, f128) - C(0xb373, LCDFR, RRE, FPSSH, 0, f2_o, f1, 0, negf64, 0) + F(0xb303, LCEBR, RRE, Z, 0, e2, new, e1, negf32, f32, IF_BFP) + F(0xb313, LCDBR, RRE, Z, 0, f2_o, f1, 0, negf64, f64, IF_BFP) + F(0xb343, LCXBR, RRE, Z, 0, x2_o, x1, 0, negf128, f128, IF_BFP) + F(0xb373, LCDFR, RRE, FPSSH, 0, f2_o, f1, 0, negf64, 0, IF_AFP1 | IF_AFP2) /* LOAD HALFWORD */ C(0xb927, LHR, RRE, EI, 0, r2_16s, 0, r1_32, mov2, 0) C(0xb907, LGHR, RRE, EI, 0, r2_16s, 0, r1, mov2, 0) @@ -532,17 +535,17 @@ C(0xe39c, LLGTAT, RXY_a, LAT, 0, m2_32u, r1, 0, llgtat, 0) /* LOAD FPR FROM GR */ - C(0xb3c1, LDGR, RRE, FPRGR, 0, r2_o, 0, f1, mov2, 0) + F(0xb3c1, LDGR, RRE, FPRGR, 0, r2_o, 0, f1, mov2, 0, IF_AFP1) /* LOAD GR FROM FPR */ - C(0xb3cd, LGDR, RRE, FPRGR, 0, f2_o, 0, r1, mov2, 0) + F(0xb3cd, LGDR, RRE, FPRGR, 0, f2_o, 0, r1, mov2, 0, IF_AFP2) /* LOAD NEGATIVE */ C(0x1100, LNR, RR_a, Z, 0, r2_32s, new, r1_32, nabs, nabs32) C(0xb901, LNGR, RRE, Z, 0, r2, r1, 0, nabs, nabs64) C(0xb911, LNGFR, RRE, Z, 0, r2_32s, r1, 0, nabs, nabs64) - C(0xb301, LNEBR, RRE, Z, 0, e2, new, e1, nabsf32, f32) - C(0xb311, LNDBR, RRE, Z, 0, f2_o, f1, 0, nabsf64, f64) - C(0xb341, LNXBR, RRE, Z, 0, x2_o, x1, 0, nabsf128, f128) - C(0xb371, LNDFR, RRE, FPSSH, 0, f2_o, f1, 0, nabsf64, 0) + F(0xb301, LNEBR, RRE, Z, 0, e2, new, e1, nabsf32, f32, IF_BFP) + F(0xb311, LNDBR, RRE, Z, 0, f2_o, f1, 0, nabsf64, f64, IF_BFP) + F(0xb341, LNXBR, RRE, Z, 0, x2_o, x1, 0, nabsf128, f128, IF_BFP) + F(0xb371, LNDFR, RRE, FPSSH, 0, f2_o, f1, 0, nabsf64, 0, IF_AFP1 | IF_AFP2) /* LOAD ON CONDITION */ C(0xb9f2, LOCR, RRF_c, LOC, r1, r2, new, r1_32, loc, 0) C(0xb9e2, LOCGR, RRF_c, LOC, r1, r2, r1, 0, loc, 0) @@ -564,10 +567,10 @@ C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32) C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64) C(0xb910, LPGFR, RRE, Z, 0, r2_32s, r1, 0, abs, abs64) - C(0xb300, LPEBR, RRE, Z, 0, e2, new, e1, absf32, f32) - C(0xb310, LPDBR, RRE, Z, 0, f2_o, f1, 0, absf64, f64) - C(0xb340, LPXBR, RRE, Z, 0, x2_o, x1, 0, absf128, f128) - C(0xb370, LPDFR, RRE, FPSSH, 0, f2_o, f1, 0, absf64, 0) + F(0xb300, LPEBR, RRE, Z, 0, e2, new, e1, absf32, f32, IF_BFP) + F(0xb310, LPDBR, RRE, Z, 0, f2_o, f1, 0, absf64, f64, IF_BFP) + F(0xb340, LPXBR, RRE, Z, 0, x2_o, x1, 0, absf128, f128, IF_BFP) + F(0xb370, LPDFR, RRE, FPSSH, 0, f2_o, f1, 0, absf64, 0, IF_AFP1 | IF_AFP2) /* LOAD REVERSED */ C(0xb91f, LRVR, RRE, Z, 0, r2_32u, new, r1_32, rev32, 0) C(0xb90f, LRVGR, RRE, Z, 0, r2_o, r1, 0, rev64, 0) @@ -575,30 +578,30 @@ C(0xe31e, LRV, RXY_a, Z, 0, m2_32u, new, r1_32, rev32, 0) C(0xe30f, LRVG, RXY_a, Z, 0, m2_64, r1, 0, rev64, 0) /* LOAD ZERO */ - C(0xb374, LZER, RRE, Z, 0, 0, 0, e1, zero, 0) - C(0xb375, LZDR, RRE, Z, 0, 0, 0, f1, zero, 0) - C(0xb376, LZXR, RRE, Z, 0, 0, 0, x1, zero2, 0) + F(0xb374, LZER, RRE, Z, 0, 0, 0, e1, zero, 0, IF_AFP1) + F(0xb375, LZDR, RRE, Z, 0, 0, 0, f1, zero, 0, IF_AFP1) + F(0xb376, LZXR, RRE, Z, 0, 0, 0, x1, zero2, 0, IF_AFP1) /* LOAD FPC */ - C(0xb29d, LFPC, S, Z, 0, m2_32u, 0, 0, sfpc, 0) + F(0xb29d, LFPC, S, Z, 0, m2_32u, 0, 0, sfpc, 0, IF_BFP) /* LOAD FPC AND SIGNAL */ - C(0xb2bd, LFAS, S, IEEEE_SIM, 0, m2_32u, 0, 0, sfas, 0) + F(0xb2bd, LFAS, S, IEEEE_SIM, 0, m2_32u, 0, 0, sfas, 0, IF_DFP) /* LOAD FP INTEGER */ - C(0xb357, FIEBR, RRF_e, Z, 0, e2, new, e1, fieb, 0) - C(0xb35f, FIDBR, RRF_e, Z, 0, f2_o, f1, 0, fidb, 0) - C(0xb347, FIXBR, RRF_e, Z, 0, x2_o, x1, 0, fixb, 0) + F(0xb357, FIEBR, RRF_e, Z, 0, e2, new, e1, fieb, 0, IF_BFP) + F(0xb35f, FIDBR, RRF_e, Z, 0, f2_o, f1, 0, fidb, 0, IF_BFP) + F(0xb347, FIXBR, RRF_e, Z, 0, x2_o, x1, 0, fixb, 0, IF_BFP) /* LOAD LENGTHENED */ - C(0xb304, LDEBR, RRE, Z, 0, e2, f1, 0, ldeb, 0) - C(0xb305, LXDBR, RRE, Z, 0, f2_o, x1, 0, lxdb, 0) - C(0xb306, LXEBR, RRE, Z, 0, e2, x1, 0, lxeb, 0) - C(0xed04, LDEB, RXE, Z, 0, m2_32u, f1, 0, ldeb, 0) - C(0xed05, LXDB, RXE, Z, 0, m2_64, x1, 0, lxdb, 0) - C(0xed06, LXEB, RXE, Z, 0, m2_32u, x1, 0, lxeb, 0) + F(0xb304, LDEBR, RRE, Z, 0, e2, f1, 0, ldeb, 0, IF_BFP) + F(0xb305, LXDBR, RRE, Z, 0, f2_o, x1, 0, lxdb, 0, IF_BFP) + F(0xb306, LXEBR, RRE, Z, 0, e2, x1, 0, lxeb, 0, IF_BFP) + F(0xed04, LDEB, RXE, Z, 0, m2_32u, f1, 0, ldeb, 0, IF_BFP) + F(0xed05, LXDB, RXE, Z, 0, m2_64, x1, 0, lxdb, 0, IF_BFP) + F(0xed06, LXEB, RXE, Z, 0, m2_32u, x1, 0, lxeb, 0, IF_BFP) /* LOAD ROUNDED */ - C(0xb344, LEDBR, RRE, Z, 0, f2_o, new, e1, ledb, 0) - C(0xb345, LDXBR, RRE, Z, 0, x2_o, f1, 0, ldxb, 0) - C(0xb346, LEXBR, RRE, Z, 0, x2_o, new, e1, lexb, 0) + F(0xb344, LEDBR, RRE, Z, 0, f2_o, new, e1, ledb, 0, IF_BFP) + F(0xb345, LDXBR, RRE, Z, 0, x2_o, f1, 0, ldxb, 0, IF_BFP) + F(0xb346, LEXBR, RRE, Z, 0, x2_o, new, e1, lexb, 0, IF_BFP) /* LOAD MULTIPLE */ C(0x9800, LM, RS_a, Z, 0, a2, 0, 0, lm32, 0) @@ -644,15 +647,15 @@ C(0x1c00, MR, RR_a, Z, r1p1_32s, r2_32s, new, r1_D32, mul, 0) C(0x5c00, M, RX_a, Z, r1p1_32s, m2_32s, new, r1_D32, mul, 0) C(0xe35c, MFY, RXY_a, GIE, r1p1_32s, m2_32s, new, r1_D32, mul, 0) - C(0xb317, MEEBR, RRE, Z, e1, e2, new, e1, meeb, 0) - C(0xb31c, MDBR, RRE, Z, f1_o, f2_o, f1, 0, mdb, 0) - C(0xb34c, MXBR, RRE, Z, 0, x2_o, x1, 0, mxb, 0) - C(0xb30c, MDEBR, RRE, Z, f1_o, e2, f1, 0, mdeb, 0) - C(0xb307, MXDBR, RRE, Z, 0, f2_o, x1, 0, mxdb, 0) - C(0xed17, MEEB, RXE, Z, e1, m2_32u, new, e1, meeb, 0) - C(0xed1c, MDB, RXE, Z, f1_o, m2_64, f1, 0, mdb, 0) - C(0xed0c, MDEB, RXE, Z, f1_o, m2_32u, f1, 0, mdeb, 0) - C(0xed07, MXDB, RXE, Z, 0, m2_64, x1, 0, mxdb, 0) + F(0xb317, MEEBR, RRE, Z, e1, e2, new, e1, meeb, 0, IF_BFP) + F(0xb31c, MDBR, RRE, Z, f1_o, f2_o, f1, 0, mdb, 0, IF_BFP) + F(0xb34c, MXBR, RRE, Z, 0, x2_o, x1, 0, mxb, 0, IF_BFP) + F(0xb30c, MDEBR, RRE, Z, f1_o, e2, f1, 0, mdeb, 0, IF_BFP) + F(0xb307, MXDBR, RRE, Z, 0, f2_o, x1, 0, mxdb, 0, IF_BFP) + F(0xed17, MEEB, RXE, Z, e1, m2_32u, new, e1, meeb, 0, IF_BFP) + F(0xed1c, MDB, RXE, Z, f1_o, m2_64, f1, 0, mdb, 0, IF_BFP) + F(0xed0c, MDEB, RXE, Z, f1_o, m2_32u, f1, 0, mdeb, 0, IF_BFP) + F(0xed07, MXDB, RXE, Z, 0, m2_64, x1, 0, mxdb, 0, IF_BFP) /* MULTIPLY HALFWORD */ C(0x4c00, MH, RX_a, Z, r1_o, m2_16s, new, r1_32, mul, 0) C(0xe37c, MHY, RXY_a, GIE, r1_o, m2_16s, new, r1_32, mul, 0) @@ -677,15 +680,15 @@ C(0xc200, MSGFI, RIL_a, GIE, r1_o, i2, r1, 0, mul, 0) /* MULTIPLY AND ADD */ - C(0xb30e, MAEBR, RRD, Z, e1, e2, new, e1, maeb, 0) - C(0xb31e, MADBR, RRD, Z, f1_o, f2_o, f1, 0, madb, 0) - C(0xed0e, MAEB, RXF, Z, e1, m2_32u, new, e1, maeb, 0) - C(0xed1e, MADB, RXF, Z, f1_o, m2_64, f1, 0, madb, 0) + F(0xb30e, MAEBR, RRD, Z, e1, e2, new, e1, maeb, 0, IF_BFP) + F(0xb31e, MADBR, RRD, Z, f1_o, f2_o, f1, 0, madb, 0, IF_BFP) + F(0xed0e, MAEB, RXF, Z, e1, m2_32u, new, e1, maeb, 0, IF_BFP) + F(0xed1e, MADB, RXF, Z, f1_o, m2_64, f1, 0, madb, 0, IF_BFP) /* MULTIPLY AND SUBTRACT */ - C(0xb30f, MSEBR, RRD, Z, e1, e2, new, e1, mseb, 0) - C(0xb31f, MSDBR, RRD, Z, f1_o, f2_o, f1, 0, msdb, 0) - C(0xed0f, MSEB, RXF, Z, e1, m2_32u, new, e1, mseb, 0) - C(0xed1f, MSDB, RXF, Z, f1_o, m2_64, f1, 0, msdb, 0) + F(0xb30f, MSEBR, RRD, Z, e1, e2, new, e1, mseb, 0, IF_BFP) + F(0xb31f, MSDBR, RRD, Z, f1_o, f2_o, f1, 0, msdb, 0, IF_BFP) + F(0xed0f, MSEB, RXF, Z, e1, m2_32u, new, e1, mseb, 0, IF_BFP) + F(0xed1f, MSDB, RXF, Z, f1_o, m2_64, f1, 0, msdb, 0, IF_BFP) /* OR */ C(0x1600, OR, RR_a, Z, r1, r2, new, r1_32, or, nz32) @@ -752,14 +755,14 @@ D(0x010d, SAM31, E, Z, 0, 0, 0, 0, sam, 0, 1) D(0x010e, SAM64, E, Z, 0, 0, 0, 0, sam, 0, 3) /* SET FPC */ - C(0xb384, SFPC, RRE, Z, 0, r1_o, 0, 0, sfpc, 0) + F(0xb384, SFPC, RRE, Z, 0, r1_o, 0, 0, sfpc, 0, IF_BFP) /* SET FPC AND SIGNAL */ - C(0xb385, SFASR, RRE, IEEEE_SIM, 0, r1_o, 0, 0, sfas, 0) + F(0xb385, SFASR, RRE, IEEEE_SIM, 0, r1_o, 0, 0, sfas, 0, IF_DFP) /* SET BFP ROUNDING MODE */ - C(0xb299, SRNM, S, Z, 0, 0, 0, 0, srnm, 0) - C(0xb2b8, SRNMB, S, FPE, 0, 0, 0, 0, srnm, 0) + F(0xb299, SRNM, S, Z, 0, 0, 0, 0, srnm, 0, IF_BFP) + F(0xb2b8, SRNMB, S, FPE, 0, 0, 0, 0, srnm, 0, IF_BFP) /* SET DFP ROUNDING MODE */ - C(0xb2b9, SRNMT, S, DFPR, 0, 0, 0, 0, srnm, 0) + F(0xb2b9, SRNMT, S, DFPR, 0, 0, 0, 0, srnm, 0, IF_DFP) /* SET PROGRAM MASK */ C(0x0400, SPM, RR_a, Z, r1, 0, 0, 0, spm, 0) @@ -789,20 +792,20 @@ C(0x8c00, SRDL, RS_a, Z, r1_D32, sh64, new, r1_D32, srl, 0) /* SQUARE ROOT */ - C(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0) - C(0xb315, SQDBR, RRE, Z, 0, f2_o, f1, 0, sqdb, 0) - C(0xb316, SQXBR, RRE, Z, 0, x2_o, x1, 0, sqxb, 0) - C(0xed14, SQEB, RXE, Z, 0, m2_32u, new, e1, sqeb, 0) - C(0xed15, SQDB, RXE, Z, 0, m2_64, f1, 0, sqdb, 0) + F(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0, IF_BFP) + F(0xb315, SQDBR, RRE, Z, 0, f2_o, f1, 0, sqdb, 0, IF_BFP) + F(0xb316, SQXBR, RRE, Z, 0, x2_o, x1, 0, sqxb, 0, IF_BFP) + F(0xed14, SQEB, RXE, Z, 0, m2_32u, new, e1, sqeb, 0, IF_BFP) + F(0xed15, SQDB, RXE, Z, 0, m2_64, f1, 0, sqdb, 0, IF_BFP) /* STORE */ C(0x5000, ST, RX_a, Z, r1_o, a2, 0, 0, st32, 0) C(0xe350, STY, RXY_a, LD, r1_o, a2, 0, 0, st32, 0) C(0xe324, STG, RXY_a, Z, r1_o, a2, 0, 0, st64, 0) - C(0x6000, STD, RX_a, Z, f1_o, a2, 0, 0, st64, 0) - C(0xed67, STDY, RXY_a, LD, f1_o, a2, 0, 0, st64, 0) - C(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0) - C(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0) + F(0x6000, STD, RX_a, Z, f1_o, a2, 0, 0, st64, 0, IF_AFP1) + F(0xed67, STDY, RXY_a, LD, f1_o, a2, 0, 0, st64, 0, IF_AFP1) + F(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0, IF_AFP1) + F(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0, IF_AFP1) /* STORE RELATIVE LONG */ C(0xc40f, STRL, RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0) C(0xc40b, STGRL, RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0) @@ -837,7 +840,7 @@ /* STORE FACILITY LIST EXTENDED */ C(0xb2b0, STFLE, S, SFLE, 0, a2, 0, 0, stfle, 0) /* STORE FPC */ - C(0xb29c, STFPC, S, Z, 0, a2, new, m2_32, efpc, 0) + F(0xb29c, STFPC, S, Z, 0, a2, new, m2_32, efpc, 0, IF_BFP) /* STORE MULTIPLE */ D(0x9000, STM, RS_a, Z, 0, a2, 0, 0, stm, 0, 4) @@ -861,11 +864,11 @@ C(0xb9e9, SGRK, RRF_a, DO, r2, r3, r1, 0, sub, subs64) C(0xe309, SG, RXY_a, Z, r1, m2_64, r1, 0, sub, subs64) C(0xe319, SGF, RXY_a, Z, r1, m2_32s, r1, 0, sub, subs64) - C(0xb30b, SEBR, RRE, Z, e1, e2, new, e1, seb, f32) - C(0xb31b, SDBR, RRE, Z, f1_o, f2_o, f1, 0, sdb, f64) - C(0xb34b, SXBR, RRE, Z, 0, x2_o, x1, 0, sxb, f128) - C(0xed0b, SEB, RXE, Z, e1, m2_32u, new, e1, seb, f32) - C(0xed1b, SDB, RXE, Z, f1_o, m2_64, f1, 0, sdb, f64) + F(0xb30b, SEBR, RRE, Z, e1, e2, new, e1, seb, f32, IF_BFP) + F(0xb31b, SDBR, RRE, Z, f1_o, f2_o, f1, 0, sdb, f64, IF_BFP) + F(0xb34b, SXBR, RRE, Z, 0, x2_o, x1, 0, sxb, f128, IF_BFP) + F(0xed0b, SEB, RXE, Z, e1, m2_32u, new, e1, seb, f32, IF_BFP) + F(0xed1b, SDB, RXE, Z, f1_o, m2_64, f1, 0, sdb, f64, IF_BFP) /* SUBTRACT HALFWORD */ C(0x4b00, SH, RX_a, Z, r1, m2_16s, new, r1_32, sub, subs32) C(0xe37b, SHY, RXY_a, LD, r1, m2_16s, new, r1_32, sub, subs32) @@ -904,9 +907,9 @@ C(0x9300, TS, S, Z, 0, a2, 0, 0, ts, 0) /* TEST DATA CLASS */ - C(0xed10, TCEB, RXE, Z, e1, a2, 0, 0, tceb, 0) - C(0xed11, TCDB, RXE, Z, f1_o, a2, 0, 0, tcdb, 0) - C(0xed12, TCXB, RXE, Z, x1_o, a2, 0, 0, tcxb, 0) + F(0xed10, TCEB, RXE, Z, e1, a2, 0, 0, tceb, 0, IF_BFP) + F(0xed11, TCDB, RXE, Z, f1_o, a2, 0, 0, tcdb, 0, IF_BFP) + F(0xed12, TCXB, RXE, Z, x1_o, a2, 0, 0, tcxb, 0, IF_BFP) /* TEST DECIMAL */ C(0xebc0, TP, RSL, E2, la1, 0, 0, 0, tp, 0) @@ -961,126 +964,126 @@ #ifndef CONFIG_USER_ONLY /* COMPARE AND SWAP AND PURGE */ - D(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL) - D(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ) + E(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL, IF_PRIV) + E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ, IF_PRIV) /* DIAGNOSE (KVM hypercall) */ - C(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0) + F(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0, IF_PRIV) /* INSERT STORAGE KEY EXTENDED */ - C(0xb229, ISKE, RRE, Z, 0, r2_o, new, r1_8, iske, 0) + F(0xb229, ISKE, RRE, Z, 0, r2_o, new, r1_8, iske, 0, IF_PRIV) /* INVALIDATE DAT TABLE ENTRY */ - C(0xb98e, IPDE, RRF_b, Z, r1_o, r2_o, 0, 0, idte, 0) + F(0xb98e, IPDE, RRF_b, Z, r1_o, r2_o, 0, 0, idte, 0, IF_PRIV) /* INVALIDATE PAGE TABLE ENTRY */ - C(0xb221, IPTE, RRF_a, Z, r1_o, r2_o, 0, 0, ipte, 0) + F(0xb221, IPTE, RRF_a, Z, r1_o, r2_o, 0, 0, ipte, 0, IF_PRIV) /* LOAD CONTROL */ - C(0xb700, LCTL, RS_a, Z, 0, a2, 0, 0, lctl, 0) - C(0xeb2f, LCTLG, RSY_a, Z, 0, a2, 0, 0, lctlg, 0) + F(0xb700, LCTL, RS_a, Z, 0, a2, 0, 0, lctl, 0, IF_PRIV) + F(0xeb2f, LCTLG, RSY_a, Z, 0, a2, 0, 0, lctlg, 0, IF_PRIV) /* LOAD PROGRAM PARAMETER */ - C(0xb280, LPP, S, LPP, 0, m2_64, 0, 0, lpp, 0) + F(0xb280, LPP, S, LPP, 0, m2_64, 0, 0, lpp, 0, IF_PRIV) /* LOAD PSW */ - C(0x8200, LPSW, S, Z, 0, a2, 0, 0, lpsw, 0) + F(0x8200, LPSW, S, Z, 0, a2, 0, 0, lpsw, 0, IF_PRIV) /* LOAD PSW EXTENDED */ - C(0xb2b2, LPSWE, S, Z, 0, a2, 0, 0, lpswe, 0) + F(0xb2b2, LPSWE, S, Z, 0, a2, 0, 0, lpswe, 0, IF_PRIV) /* LOAD REAL ADDRESS */ - C(0xb100, LRA, RX_a, Z, 0, a2, r1, 0, lra, 0) - C(0xe313, LRAY, RXY_a, LD, 0, a2, r1, 0, lra, 0) - C(0xe303, LRAG, RXY_a, Z, 0, a2, r1, 0, lra, 0) + F(0xb100, LRA, RX_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV) + F(0xe313, LRAY, RXY_a, LD, 0, a2, r1, 0, lra, 0, IF_PRIV) + F(0xe303, LRAG, RXY_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV) /* LOAD USING REAL ADDRESS */ - C(0xb24b, LURA, RRE, Z, 0, r2, new, r1_32, lura, 0) - C(0xb905, LURAG, RRE, Z, 0, r2, r1, 0, lurag, 0) + F(0xb24b, LURA, RRE, Z, 0, r2, new, r1_32, lura, 0, IF_PRIV) + F(0xb905, LURAG, RRE, Z, 0, r2, r1, 0, lurag, 0, IF_PRIV) /* MOVE TO PRIMARY */ - C(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0) + F(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0, IF_PRIV) /* MOVE TO SECONDARY */ - C(0xdb00, MVCS, SS_d, Z, la1, a2, 0, 0, mvcs, 0) + F(0xdb00, MVCS, SS_d, Z, la1, a2, 0, 0, mvcs, 0, IF_PRIV) /* PURGE TLB */ - C(0xb20d, PTLB, S, Z, 0, 0, 0, 0, ptlb, 0) + F(0xb20d, PTLB, S, Z, 0, 0, 0, 0, ptlb, 0, IF_PRIV) /* RESET REFERENCE BIT EXTENDED */ - C(0xb22a, RRBE, RRE, Z, 0, r2_o, 0, 0, rrbe, 0) + F(0xb22a, RRBE, RRE, Z, 0, r2_o, 0, 0, rrbe, 0, IF_PRIV) /* SERVICE CALL LOGICAL PROCESSOR (PV hypercall) */ - C(0xb220, SERVC, RRE, Z, r1_o, r2_o, 0, 0, servc, 0) + F(0xb220, SERVC, RRE, Z, r1_o, r2_o, 0, 0, servc, 0, IF_PRIV) /* SET ADDRESS SPACE CONTROL FAST */ - C(0xb279, SACF, S, Z, 0, a2, 0, 0, sacf, 0) + F(0xb279, SACF, S, Z, 0, a2, 0, 0, sacf, 0, IF_PRIV) /* SET CLOCK */ - C(0xb204, SCK, S, Z, la2, 0, 0, 0, sck, 0) + F(0xb204, SCK, S, Z, la2, 0, 0, 0, sck, 0, IF_PRIV) /* SET CLOCK COMPARATOR */ - C(0xb206, SCKC, S, Z, 0, m2_64a, 0, 0, sckc, 0) + F(0xb206, SCKC, S, Z, 0, m2_64a, 0, 0, sckc, 0, IF_PRIV) /* SET CLOCK PROGRAMMABLE FIELD */ - C(0x0107, SCKPF, E, Z, 0, 0, 0, 0, sckpf, 0) + F(0x0107, SCKPF, E, Z, 0, 0, 0, 0, sckpf, 0, IF_PRIV) /* SET CPU TIMER */ - C(0xb208, SPT, S, Z, 0, m2_64a, 0, 0, spt, 0) + F(0xb208, SPT, S, Z, 0, m2_64a, 0, 0, spt, 0, IF_PRIV) /* SET PREFIX */ - C(0xb210, SPX, S, Z, 0, m2_32ua, 0, 0, spx, 0) + F(0xb210, SPX, S, Z, 0, m2_32ua, 0, 0, spx, 0, IF_PRIV) /* SET PSW KEY FROM ADDRESS */ - C(0xb20a, SPKA, S, Z, 0, a2, 0, 0, spka, 0) + F(0xb20a, SPKA, S, Z, 0, a2, 0, 0, spka, 0, IF_PRIV) /* SET STORAGE KEY EXTENDED */ - C(0xb22b, SSKE, RRF_c, Z, r1_o, r2_o, 0, 0, sske, 0) + F(0xb22b, SSKE, RRF_c, Z, r1_o, r2_o, 0, 0, sske, 0, IF_PRIV) /* SET SYSTEM MASK */ - C(0x8000, SSM, S, Z, 0, m2_8u, 0, 0, ssm, 0) + F(0x8000, SSM, S, Z, 0, m2_8u, 0, 0, ssm, 0, IF_PRIV) /* SIGNAL PROCESSOR */ - C(0xae00, SIGP, RS_a, Z, 0, a2, 0, 0, sigp, 0) + F(0xae00, SIGP, RS_a, Z, 0, a2, 0, 0, sigp, 0, IF_PRIV) /* STORE CLOCK */ C(0xb205, STCK, S, Z, la2, 0, new, m1_64, stck, 0) C(0xb27c, STCKF, S, SCF, la2, 0, new, m1_64, stck, 0) /* STORE CLOCK EXTENDED */ C(0xb278, STCKE, S, Z, 0, a2, 0, 0, stcke, 0) /* STORE CLOCK COMPARATOR */ - C(0xb207, STCKC, S, Z, la2, 0, new, m1_64a, stckc, 0) + F(0xb207, STCKC, S, Z, la2, 0, new, m1_64a, stckc, 0, IF_PRIV) /* STORE CONTROL */ - C(0xb600, STCTL, RS_a, Z, 0, a2, 0, 0, stctl, 0) - C(0xeb25, STCTG, RSY_a, Z, 0, a2, 0, 0, stctg, 0) + F(0xb600, STCTL, RS_a, Z, 0, a2, 0, 0, stctl, 0, IF_PRIV) + F(0xeb25, STCTG, RSY_a, Z, 0, a2, 0, 0, stctg, 0, IF_PRIV) /* STORE CPU ADDRESS */ - C(0xb212, STAP, S, Z, la2, 0, new, m1_16a, stap, 0) + F(0xb212, STAP, S, Z, la2, 0, new, m1_16a, stap, 0, IF_PRIV) /* STORE CPU ID */ - C(0xb202, STIDP, S, Z, la2, 0, new, m1_64a, stidp, 0) + F(0xb202, STIDP, S, Z, la2, 0, new, m1_64a, stidp, 0, IF_PRIV) /* STORE CPU TIMER */ - C(0xb209, STPT, S, Z, la2, 0, new, m1_64a, stpt, 0) + F(0xb209, STPT, S, Z, la2, 0, new, m1_64a, stpt, 0, IF_PRIV) /* STORE FACILITY LIST */ - C(0xb2b1, STFL, S, Z, 0, 0, 0, 0, stfl, 0) + F(0xb2b1, STFL, S, Z, 0, 0, 0, 0, stfl, 0, IF_PRIV) /* STORE PREFIX */ - C(0xb211, STPX, S, Z, la2, 0, new, m1_32a, stpx, 0) + F(0xb211, STPX, S, Z, la2, 0, new, m1_32a, stpx, 0, IF_PRIV) /* STORE SYSTEM INFORMATION */ - C(0xb27d, STSI, S, Z, 0, a2, 0, 0, stsi, 0) + F(0xb27d, STSI, S, Z, 0, a2, 0, 0, stsi, 0, IF_PRIV) /* STORE THEN AND SYSTEM MASK */ - C(0xac00, STNSM, SI, Z, la1, 0, 0, 0, stnosm, 0) + F(0xac00, STNSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV) /* STORE THEN OR SYSTEM MASK */ - C(0xad00, STOSM, SI, Z, la1, 0, 0, 0, stnosm, 0) + F(0xad00, STOSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV) /* STORE USING REAL ADDRESS */ - C(0xb246, STURA, RRE, Z, r1_o, r2_o, 0, 0, stura, 0) - C(0xb925, STURG, RRE, Z, r1_o, r2_o, 0, 0, sturg, 0) + F(0xb246, STURA, RRE, Z, r1_o, r2_o, 0, 0, stura, 0, IF_PRIV) + F(0xb925, STURG, RRE, Z, r1_o, r2_o, 0, 0, sturg, 0, IF_PRIV) /* TEST BLOCK */ - C(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0) + F(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0, IF_PRIV) /* TEST PROTECTION */ C(0xe501, TPROT, SSE, Z, la1, a2, 0, 0, tprot, 0) /* CCW I/O Instructions */ - C(0xb276, XSCH, S, Z, 0, 0, 0, 0, xsch, 0) - C(0xb230, CSCH, S, Z, 0, 0, 0, 0, csch, 0) - C(0xb231, HSCH, S, Z, 0, 0, 0, 0, hsch, 0) - C(0xb232, MSCH, S, Z, 0, insn, 0, 0, msch, 0) - C(0xb23b, RCHP, S, Z, 0, 0, 0, 0, rchp, 0) - C(0xb238, RSCH, S, Z, 0, 0, 0, 0, rsch, 0) - C(0xb237, SAL, S, Z, 0, 0, 0, 0, sal, 0) - C(0xb23c, SCHM, S, Z, 0, insn, 0, 0, schm, 0) - C(0xb274, SIGA, S, Z, 0, 0, 0, 0, siga, 0) - C(0xb23a, STCPS, S, Z, 0, 0, 0, 0, stcps, 0) - C(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0) - C(0xb239, STCRW, S, Z, 0, insn, 0, 0, stcrw, 0) - C(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0) - C(0xb236, TPI , S, Z, la2, 0, 0, 0, tpi, 0) - C(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0) + F(0xb276, XSCH, S, Z, 0, 0, 0, 0, xsch, 0, IF_PRIV) + F(0xb230, CSCH, S, Z, 0, 0, 0, 0, csch, 0, IF_PRIV) + F(0xb231, HSCH, S, Z, 0, 0, 0, 0, hsch, 0, IF_PRIV) + F(0xb232, MSCH, S, Z, 0, insn, 0, 0, msch, 0, IF_PRIV) + F(0xb23b, RCHP, S, Z, 0, 0, 0, 0, rchp, 0, IF_PRIV) + F(0xb238, RSCH, S, Z, 0, 0, 0, 0, rsch, 0, IF_PRIV) + F(0xb237, SAL, S, Z, 0, 0, 0, 0, sal, 0, IF_PRIV) + F(0xb23c, SCHM, S, Z, 0, insn, 0, 0, schm, 0, IF_PRIV) + F(0xb274, SIGA, S, Z, 0, 0, 0, 0, siga, 0, IF_PRIV) + F(0xb23a, STCPS, S, Z, 0, 0, 0, 0, stcps, 0, IF_PRIV) + F(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0, IF_PRIV) + F(0xb239, STCRW, S, Z, 0, insn, 0, 0, stcrw, 0, IF_PRIV) + F(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0, IF_PRIV) + F(0xb236, TPI , S, Z, la2, 0, 0, 0, tpi, 0, IF_PRIV) + F(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0, IF_PRIV) /* ??? Not listed in PoO ninth edition, but there's a linux driver that uses it: "A CHSC subchannel is usually present on LPAR only." */ - C(0xb25f, CHSC, RRE, Z, 0, insn, 0, 0, chsc, 0) + F(0xb25f, CHSC, RRE, Z, 0, insn, 0, 0, chsc, 0, IF_PRIV) /* zPCI Instructions */ /* None of these instructions are documented in the PoP, so this is all based upon target/s390x/kvm.c and Linux code and likely incomplete */ - C(0xebd0, PCISTB, RSY_a, PCI, la2, 0, 0, 0, pcistb, 0) - C(0xebd1, SIC, RSY_a, AIS, r1, r3, 0, 0, sic, 0) - C(0xb9a0, CLP, RRF_c, PCI, 0, 0, 0, 0, clp, 0) - C(0xb9d0, PCISTG, RRE, PCI, 0, 0, 0, 0, pcistg, 0) - C(0xb9d2, PCILG, RRE, PCI, 0, 0, 0, 0, pcilg, 0) - C(0xb9d3, RPCIT, RRE, PCI, 0, 0, 0, 0, rpcit, 0) - C(0xe3d0, MPCIFC, RXY_a, PCI, la2, 0, 0, 0, mpcifc, 0) - C(0xe3d4, STPCIFC, RXY_a, PCI, la2, 0, 0, 0, stpcifc, 0) + F(0xebd0, PCISTB, RSY_a, PCI, la2, 0, 0, 0, pcistb, 0, IF_PRIV) + F(0xebd1, SIC, RSY_a, AIS, r1, r3, 0, 0, sic, 0, IF_PRIV) + F(0xb9a0, CLP, RRF_c, PCI, 0, 0, 0, 0, clp, 0, IF_PRIV) + F(0xb9d0, PCISTG, RRE, PCI, 0, 0, 0, 0, pcistg, 0, IF_PRIV) + F(0xb9d2, PCILG, RRE, PCI, 0, 0, 0, 0, pcilg, 0, IF_PRIV) + F(0xb9d3, RPCIT, RRE, PCI, 0, 0, 0, 0, rpcit, 0, IF_PRIV) + F(0xe3d0, MPCIFC, RXY_a, PCI, la2, 0, 0, 0, mpcifc, 0, IF_PRIV) + F(0xe3d4, STPCIFC, RXY_a, PCI, la2, 0, 0, 0, stpcifc, 0, IF_PRIV) #endif /* CONFIG_USER_ONLY */ diff --git a/target/s390x/interrupt.c b/target/s390x/interrupt.c index 25cfb3eef8..a17eff5ebc 100644 --- a/target/s390x/interrupt.c +++ b/target/s390x/interrupt.c @@ -15,6 +15,7 @@ #include "exec/exec-all.h" #include "sysemu/kvm.h" #include "hw/s390x/ioinst.h" +#include "tcg_s390x.h" #if !defined(CONFIG_USER_ONLY) #include "hw/s390x/s390_flic.h" #endif @@ -29,25 +30,11 @@ void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen) env->int_pgm_ilen = ilen; } -static void tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code, - int ilen, uintptr_t ra) -{ -#ifdef CONFIG_TCG - trigger_pgm_exception(env, code, ilen); - cpu_loop_exit_restore(CPU(s390_env_get_cpu(env)), ra); -#else - g_assert_not_reached(); -#endif -} - void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen, uintptr_t ra) { S390CPU *cpu = s390_env_get_cpu(env); - qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", - env->psw.addr); - if (kvm_enabled()) { kvm_s390_program_interrupt(cpu, code); } else if (tcg_enabled()) { diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c index 348e8cc546..2ebf26adfe 100644 --- a/target/s390x/kvm.c +++ b/target/s390x/kvm.c @@ -36,6 +36,7 @@ #include "qemu/timer.h" #include "qemu/units.h" #include "qemu/mmap-alloc.h" +#include "qemu/log.h" #include "sysemu/sysemu.h" #include "sysemu/hw_accel.h" #include "hw/hw.h" @@ -292,6 +293,12 @@ static int kvm_s390_configure_mempath_backing(KVMState *s) return 0; } + if (!hpage_1m_allowed()) { + error_report("This QEMU machine does not support huge page " + "mappings"); + return -EINVAL; + } + if (path_psize != 1 * MiB) { error_report("Memory backing with 2G pages was specified, " "but KVM does not support this memory backing"); @@ -1109,7 +1116,8 @@ void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code) .type = KVM_S390_PROGRAM_INT, .u.pgm.code = code, }; - + qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", + cpu->env.psw.addr); kvm_s390_vcpu_interrupt(cpu, &irq); } @@ -2291,11 +2299,26 @@ void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) error_setg(errp, "KVM: host CPU model could not be identified"); return; } + /* for now, we can only provide the AP feature with HW support */ + if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, + KVM_S390_VM_CRYPTO_ENABLE_APIE)) { + set_bit(S390_FEAT_AP, model->features); + } /* strip of features that are not part of the maximum model */ bitmap_and(model->features, model->features, model->def->full_feat, S390_FEAT_MAX); } +static void kvm_s390_configure_apie(bool interpret) +{ + uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE : + KVM_S390_VM_CRYPTO_DISABLE_APIE; + + if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { + kvm_s390_set_attr(attr); + } +} + void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) { struct kvm_s390_vm_cpu_processor prop = { @@ -2345,6 +2368,10 @@ void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) if (test_bit(S390_FEAT_CMM, model->features)) { kvm_s390_enable_cmma(); } + + if (test_bit(S390_FEAT_AP, model->features)) { + kvm_s390_configure_apie(true); + } } void kvm_s390_restart_interrupt(S390CPU *cpu) diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index bacae4f503..490c43e6e6 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -25,6 +25,7 @@ #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "qemu/int128.h" +#include "qemu/atomic128.h" #if !defined(CONFIG_USER_ONLY) #include "hw/s390x/storage-keys.h" @@ -1379,65 +1380,62 @@ uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2, return cc; } -static void do_cdsg(CPUS390XState *env, uint64_t addr, - uint32_t r1, uint32_t r3, bool parallel) +void HELPER(cdsg)(CPUS390XState *env, uint64_t addr, + uint32_t r1, uint32_t r3) { uintptr_t ra = GETPC(); Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); Int128 oldv; + uint64_t oldh, oldl; bool fail; - if (parallel) { -#ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); -#else - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); - oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); - fail = !int128_eq(oldv, cmpv); -#endif - } else { - uint64_t oldh, oldl; + check_alignment(env, addr, 16, ra); - check_alignment(env, addr, 16, ra); + oldh = cpu_ldq_data_ra(env, addr + 0, ra); + oldl = cpu_ldq_data_ra(env, addr + 8, ra); - oldh = cpu_ldq_data_ra(env, addr + 0, ra); - oldl = cpu_ldq_data_ra(env, addr + 8, ra); - - oldv = int128_make128(oldl, oldh); - fail = !int128_eq(oldv, cmpv); - if (fail) { - newv = oldv; - } - - cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra); - cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra); + oldv = int128_make128(oldl, oldh); + fail = !int128_eq(oldv, cmpv); + if (fail) { + newv = oldv; } + cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra); + cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra); + env->cc_op = fail; env->regs[r1] = int128_gethi(oldv); env->regs[r1 + 1] = int128_getlo(oldv); } -void HELPER(cdsg)(CPUS390XState *env, uint64_t addr, - uint32_t r1, uint32_t r3) -{ - do_cdsg(env, addr, r1, r3, false); -} - void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr, uint32_t r1, uint32_t r3) { - do_cdsg(env, addr, r1, r3, true); + uintptr_t ra = GETPC(); + Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); + Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); + int mem_idx; + TCGMemOpIdx oi; + Int128 oldv; + bool fail; + + assert(HAVE_CMPXCHG128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); + fail = !int128_eq(oldv, cmpv); + + env->cc_op = fail; + env->regs[r1] = int128_gethi(oldv); + env->regs[r1 + 1] = int128_getlo(oldv); } static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2, bool parallel) { -#if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128) uint32_t mem_idx = cpu_mmu_index(env, false); -#endif uintptr_t ra = GETPC(); uint32_t fc = extract32(env->regs[0], 0, 8); uint32_t sc = extract32(env->regs[0], 8, 8); @@ -1465,18 +1463,20 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, probe_write(env, a2, 0, mem_idx, ra); #endif - /* Note that the compare-and-swap is atomic, and the store is atomic, but - the complete operation is not. Therefore we do not need to assert serial - context in order to implement this. That said, restart early if we can't - support either operation that is supposed to be atomic. */ + /* + * Note that the compare-and-swap is atomic, and the store is atomic, + * but the complete operation is not. Therefore we do not need to + * assert serial context in order to implement this. That said, + * restart early if we can't support either operation that is supposed + * to be atomic. + */ if (parallel) { - int mask = 0; -#if !defined(CONFIG_ATOMIC64) - mask = -8; -#elif !defined(CONFIG_ATOMIC128) - mask = -16; + uint32_t max = 2; +#ifdef CONFIG_ATOMIC64 + max = 3; #endif - if (((4 << fc) | (1 << sc)) & mask) { + if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) || + (HAVE_ATOMIC128 ? 0 : sc > max)) { cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); } } @@ -1546,16 +1546,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]); Int128 ov; - if (parallel) { -#ifdef CONFIG_ATOMIC128 - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); - ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); - cc = !int128_eq(ov, cv); -#else - /* Note that we asserted !parallel above. */ - g_assert_not_reached(); -#endif - } else { + if (!parallel) { uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra); uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra); @@ -1567,6 +1558,13 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); + } else if (HAVE_CMPXCHG128) { + TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); + cc = !int128_eq(ov, cv); + } else { + /* Note that we asserted !parallel above. */ + g_assert_not_reached(); } env->regs[r3 + 0] = int128_gethi(ov); @@ -1596,18 +1594,16 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, cpu_stq_data_ra(env, a2, svh, ra); break; case 4: - if (parallel) { -#ifdef CONFIG_ATOMIC128 + if (!parallel) { + cpu_stq_data_ra(env, a2 + 0, svh, ra); + cpu_stq_data_ra(env, a2 + 8, svl, ra); + } else if (HAVE_ATOMIC128) { TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); Int128 sv = int128_make128(svl, svh); helper_atomic_sto_be_mmu(env, a2, sv, oi, ra); -#else + } else { /* Note that we asserted !parallel above. */ g_assert_not_reached(); -#endif - } else { - cpu_stq_data_ra(env, a2 + 0, svh, ra); - cpu_stq_data_ra(env, a2 + 8, svl, ra); } break; default: @@ -2100,76 +2096,64 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr) #endif /* load pair from quadword */ -static uint64_t do_lpq(CPUS390XState *env, uint64_t addr, bool parallel) +uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr) { uintptr_t ra = GETPC(); uint64_t hi, lo; - if (parallel) { -#ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); -#else - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); - Int128 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra); - hi = int128_gethi(v); - lo = int128_getlo(v); -#endif - } else { - check_alignment(env, addr, 16, ra); - - hi = cpu_ldq_data_ra(env, addr + 0, ra); - lo = cpu_ldq_data_ra(env, addr + 8, ra); - } + check_alignment(env, addr, 16, ra); + hi = cpu_ldq_data_ra(env, addr + 0, ra); + lo = cpu_ldq_data_ra(env, addr + 8, ra); env->retxl = lo; return hi; } -uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr) -{ - return do_lpq(env, addr, false); -} - uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr) { - return do_lpq(env, addr, true); -} - -/* store pair to quadword */ -static void do_stpq(CPUS390XState *env, uint64_t addr, - uint64_t low, uint64_t high, bool parallel) -{ uintptr_t ra = GETPC(); + uint64_t hi, lo; + int mem_idx; + TCGMemOpIdx oi; + Int128 v; - if (parallel) { -#ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); -#else - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + assert(HAVE_ATOMIC128); - Int128 v = int128_make128(low, high); - helper_atomic_sto_be_mmu(env, addr, v, oi, ra); -#endif - } else { - check_alignment(env, addr, 16, ra); + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + v = helper_atomic_ldo_be_mmu(env, addr, oi, ra); + hi = int128_gethi(v); + lo = int128_getlo(v); - cpu_stq_data_ra(env, addr + 0, high, ra); - cpu_stq_data_ra(env, addr + 8, low, ra); - } + env->retxl = lo; + return hi; } +/* store pair to quadword */ void HELPER(stpq)(CPUS390XState *env, uint64_t addr, uint64_t low, uint64_t high) { - do_stpq(env, addr, low, high, false); + uintptr_t ra = GETPC(); + + check_alignment(env, addr, 16, ra); + cpu_stq_data_ra(env, addr + 0, high, ra); + cpu_stq_data_ra(env, addr + 8, low, ra); } void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr, uint64_t low, uint64_t high) { - do_stpq(env, addr, low, high, true); + uintptr_t ra = GETPC(); + int mem_idx; + TCGMemOpIdx oi; + Int128 v; + + assert(HAVE_ATOMIC128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + v = int128_make128(low, high); + helper_atomic_sto_be_mmu(env, addr, v, oi, ra); } /* Execute instruction. This instruction executes an insn modified with diff --git a/target/s390x/tcg-stub.c b/target/s390x/tcg-stub.c index c93501db0b..32adb7276a 100644 --- a/target/s390x/tcg-stub.c +++ b/target/s390x/tcg-stub.c @@ -18,3 +18,13 @@ void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) { } +void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code, + int ilen, uintptr_t ra) +{ + g_assert_not_reached(); +} +void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, + uintptr_t ra) +{ + g_assert_not_reached(); +} diff --git a/target/s390x/tcg_s390x.h b/target/s390x/tcg_s390x.h index 4e308aa0ce..ab2c4ba703 100644 --- a/target/s390x/tcg_s390x.h +++ b/target/s390x/tcg_s390x.h @@ -14,5 +14,9 @@ #define TCG_S390X_H void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque); +void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code, + int ilen, uintptr_t ra); +void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc, + uintptr_t ra); #endif /* TCG_S390X_H */ diff --git a/target/s390x/translate.c b/target/s390x/translate.c index 7363aabf3a..b5bd56b7ee 100644 --- a/target/s390x/translate.c +++ b/target/s390x/translate.c @@ -44,6 +44,7 @@ #include "trace-tcg.h" #include "exec/translator.h" #include "exec/log.h" +#include "qemu/atomic128.h" /* Information that (most) every instruction needs to manipulate. */ @@ -314,28 +315,18 @@ static inline void gen_illegal_opcode(DisasContext *s) gen_program_exception(s, PGM_OPERATION); } -static inline void gen_trap(DisasContext *s) +static inline void gen_data_exception(uint8_t dxc) { - TCGv_i32 t; - - /* Set DXC to 0xff. */ - t = tcg_temp_new_i32(); - tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc)); - tcg_gen_ori_i32(t, t, 0xff00); - tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc)); - tcg_temp_free_i32(t); - - gen_program_exception(s, PGM_DATA); + TCGv_i32 tmp = tcg_const_i32(dxc); + gen_helper_data_exception(cpu_env, tmp); + tcg_temp_free_i32(tmp); } -#ifndef CONFIG_USER_ONLY -static void check_privileged(DisasContext *s) +static inline void gen_trap(DisasContext *s) { - if (s->base.tb->flags & FLAG_MASK_PSTATE) { - gen_program_exception(s, PGM_PRIVILEGED); - } + /* Set DXC to 0xff */ + gen_data_exception(0xff); } -#endif static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) { @@ -1120,19 +1111,37 @@ typedef struct { /* We are exiting the TB to the main loop. */ #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4 + +/* Instruction flags */ +#define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */ +#define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */ +#define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */ +#define IF_BFP 0x0008 /* binary floating point instruction */ +#define IF_DFP 0x0010 /* decimal floating point instruction */ +#define IF_PRIV 0x0020 /* privileged instruction */ + struct DisasInsn { unsigned opc:16; + unsigned flags:16; DisasFormat fmt:8; unsigned fac:8; unsigned spec:8; const char *name; + /* Pre-process arguments before HELP_OP. */ void (*help_in1)(DisasContext *, DisasFields *, DisasOps *); void (*help_in2)(DisasContext *, DisasFields *, DisasOps *); void (*help_prep)(DisasContext *, DisasFields *, DisasOps *); + + /* + * Post-process output after HELP_OP. + * Note that these are not called if HELP_OP returns DISAS_NORETURN. + */ void (*help_wout)(DisasContext *, DisasFields *, DisasOps *); void (*help_cout)(DisasContext *, DisasOps *); + + /* Implement the operation itself. */ DisasJumpType (*help_op)(DisasContext *, DisasOps *); uint64_t data; @@ -2032,6 +2041,7 @@ static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) int r3 = get_field(s->fields, r3); int d2 = get_field(s->fields, d2); int b2 = get_field(s->fields, b2); + DisasJumpType ret = DISAS_NEXT; TCGv_i64 addr; TCGv_i32 t_r1, t_r3; @@ -2039,17 +2049,20 @@ static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) addr = get_address(s, 0, b2, d2); t_r1 = tcg_const_i32(r1); t_r3 = tcg_const_i32(r3); - if (tb_cflags(s->base.tb) & CF_PARALLEL) { + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_helper_cdsg(cpu_env, addr, t_r1, t_r3); + } else if (HAVE_CMPXCHG128) { gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3); } else { - gen_helper_cdsg(cpu_env, addr, t_r1, t_r3); + gen_helper_exit_atomic(cpu_env); + ret = DISAS_NORETURN; } tcg_temp_free_i64(addr); tcg_temp_free_i32(t_r1); tcg_temp_free_i32(t_r3); set_cc_static(s); - return DISAS_NEXT; + return ret; } static DisasJumpType op_csst(DisasContext *s, DisasOps *o) @@ -2078,7 +2091,6 @@ static DisasJumpType op_csp(DisasContext *s, DisasOps *o) /* Note that in1 = R1 (zero-extended expected value), out = R1 (original reg), out2 = R1+1 (new value). */ - check_privileged(s); addr = tcg_temp_new_i64(); old = tcg_temp_new_i64(); tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE)); @@ -2202,7 +2214,6 @@ static DisasJumpType op_diag(DisasContext *s, DisasOps *o) TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2)); - check_privileged(s); gen_helper_diag(cpu_env, r1, r3, func_code); tcg_temp_free_i32(func_code); @@ -2463,7 +2474,6 @@ static DisasJumpType op_idte(DisasContext *s, DisasOps *o) { TCGv_i32 m4; - check_privileged(s); if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { m4 = tcg_const_i32(get_field(s->fields, m4)); } else { @@ -2478,7 +2488,6 @@ static DisasJumpType op_ipte(DisasContext *s, DisasOps *o) { TCGv_i32 m4; - check_privileged(s); if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { m4 = tcg_const_i32(get_field(s->fields, m4)); } else { @@ -2491,7 +2500,6 @@ static DisasJumpType op_ipte(DisasContext *s, DisasOps *o) static DisasJumpType op_iske(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_iske(o->out, cpu_env, o->in2); return DISAS_NEXT; } @@ -2790,7 +2798,6 @@ static DisasJumpType op_lctl(DisasContext *s, DisasOps *o) { TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); - check_privileged(s); gen_helper_lctl(cpu_env, r1, o->in2, r3); tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); @@ -2802,7 +2809,6 @@ static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) { TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); - check_privileged(s); gen_helper_lctlg(cpu_env, r1, o->in2, r3); tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); @@ -2812,7 +2818,6 @@ static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) static DisasJumpType op_lra(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_lra(o->out, cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; @@ -2820,8 +2825,6 @@ static DisasJumpType op_lra(DisasContext *s, DisasOps *o) static DisasJumpType op_lpp(DisasContext *s, DisasOps *o) { - check_privileged(s); - tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp)); return DISAS_NEXT; } @@ -2830,12 +2833,12 @@ static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o) { TCGv_i64 t1, t2; - check_privileged(s); per_breaking_event(s); t1 = tcg_temp_new_i64(); t2 = tcg_temp_new_i64(); - tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), + MO_TEUL | MO_ALIGN_8); tcg_gen_addi_i64(o->in2, o->in2, 4); tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s)); /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */ @@ -2850,12 +2853,12 @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) { TCGv_i64 t1, t2; - check_privileged(s); per_breaking_event(s); t1 = tcg_temp_new_i64(); t2 = tcg_temp_new_i64(); - tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s)); + tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), + MO_TEQ | MO_ALIGN_8); tcg_gen_addi_i64(o->in2, o->in2, 8); tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s)); gen_helper_load_psw(cpu_env, t1, t2); @@ -3036,10 +3039,13 @@ static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) static DisasJumpType op_lpq(DisasContext *s, DisasOps *o) { - if (tb_cflags(s->base.tb) & CF_PARALLEL) { + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_helper_lpq(o->out, cpu_env, o->in2); + } else if (HAVE_ATOMIC128) { gen_helper_lpq_parallel(o->out, cpu_env, o->in2); } else { - gen_helper_lpq(o->out, cpu_env, o->in2); + gen_helper_exit_atomic(cpu_env); + return DISAS_NORETURN; } return_low128(o->out2); return DISAS_NEXT; @@ -3048,14 +3054,12 @@ static DisasJumpType op_lpq(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_lura(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_lura(o->out, cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_lurag(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_lurag(o->out, cpu_env, o->in2); return DISAS_NEXT; } @@ -3214,7 +3218,6 @@ static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o) static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) { int r1 = get_field(s->fields, l1); - check_privileged(s); gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2); set_cc_static(s); return DISAS_NEXT; @@ -3223,7 +3226,6 @@ static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o) { int r1 = get_field(s->fields, l1); - check_privileged(s); gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2); set_cc_static(s); return DISAS_NEXT; @@ -3509,7 +3511,6 @@ static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_ptlb(cpu_env); return DISAS_NEXT; } @@ -3700,7 +3701,6 @@ static DisasJumpType op_rll64(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_rrbe(cc_op, cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; @@ -3708,7 +3708,6 @@ static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o) static DisasJumpType op_sacf(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_sacf(cpu_env, o->in2); /* Addressing mode has changed, so end the block. */ return DISAS_PC_STALE; @@ -3798,7 +3797,6 @@ static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_servc(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_servc(cc_op, cpu_env, o->in2, o->in1); set_cc_static(s); return DISAS_NEXT; @@ -3808,7 +3806,6 @@ static DisasJumpType op_sigp(DisasContext *s, DisasOps *o) { TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); - check_privileged(s); gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3); set_cc_static(s); tcg_temp_free_i32(r1); @@ -3990,7 +3987,6 @@ static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) #ifndef CONFIG_USER_ONLY static DisasJumpType op_spka(DisasContext *s, DisasOps *o) { - check_privileged(s); tcg_gen_shri_i64(o->in2, o->in2, 4); tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4); return DISAS_NEXT; @@ -3998,14 +3994,12 @@ static DisasJumpType op_spka(DisasContext *s, DisasOps *o) static DisasJumpType op_sske(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_sske(cpu_env, o->in1, o->in2); return DISAS_NEXT; } static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) { - check_privileged(s); tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8); /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ return DISAS_PC_STALE_NOCHAIN; @@ -4013,7 +4007,6 @@ static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) static DisasJumpType op_stap(DisasContext *s, DisasOps *o) { - check_privileged(s); tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id)); return DISAS_NEXT; } @@ -4055,7 +4048,6 @@ static DisasJumpType op_stcke(DisasContext *s, DisasOps *o) static DisasJumpType op_sck(DisasContext *s, DisasOps *o) { - check_privileged(s); tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); gen_helper_sck(cc_op, cpu_env, o->in1); set_cc_static(s); @@ -4064,21 +4056,18 @@ static DisasJumpType op_sck(DisasContext *s, DisasOps *o) static DisasJumpType op_sckc(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_sckc(cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_sckpf(cpu_env, regs[0]); return DISAS_NEXT; } static DisasJumpType op_stckc(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_stckc(o->out, cpu_env); return DISAS_NEXT; } @@ -4087,7 +4076,6 @@ static DisasJumpType op_stctg(DisasContext *s, DisasOps *o) { TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); - check_privileged(s); gen_helper_stctg(cpu_env, r1, o->in2, r3); tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); @@ -4098,7 +4086,6 @@ static DisasJumpType op_stctl(DisasContext *s, DisasOps *o) { TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); - check_privileged(s); gen_helper_stctl(cpu_env, r1, o->in2, r3); tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); @@ -4107,35 +4094,30 @@ static DisasJumpType op_stctl(DisasContext *s, DisasOps *o) static DisasJumpType op_stidp(DisasContext *s, DisasOps *o) { - check_privileged(s); tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid)); return DISAS_NEXT; } static DisasJumpType op_spt(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_spt(cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_stfl(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_stfl(cpu_env); return DISAS_NEXT; } static DisasJumpType op_stpt(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_stpt(o->out, cpu_env); return DISAS_NEXT; } static DisasJumpType op_stsi(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]); set_cc_static(s); return DISAS_NEXT; @@ -4143,14 +4125,12 @@ static DisasJumpType op_stsi(DisasContext *s, DisasOps *o) static DisasJumpType op_spx(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_spx(cpu_env, o->in2); return DISAS_NEXT; } static DisasJumpType op_xsch(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_xsch(cpu_env, regs[1]); set_cc_static(s); return DISAS_NEXT; @@ -4158,7 +4138,6 @@ static DisasJumpType op_xsch(DisasContext *s, DisasOps *o) static DisasJumpType op_csch(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_csch(cpu_env, regs[1]); set_cc_static(s); return DISAS_NEXT; @@ -4166,7 +4145,6 @@ static DisasJumpType op_csch(DisasContext *s, DisasOps *o) static DisasJumpType op_hsch(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_hsch(cpu_env, regs[1]); set_cc_static(s); return DISAS_NEXT; @@ -4174,7 +4152,6 @@ static DisasJumpType op_hsch(DisasContext *s, DisasOps *o) static DisasJumpType op_msch(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_msch(cpu_env, regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; @@ -4182,7 +4159,6 @@ static DisasJumpType op_msch(DisasContext *s, DisasOps *o) static DisasJumpType op_rchp(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_rchp(cpu_env, regs[1]); set_cc_static(s); return DISAS_NEXT; @@ -4190,7 +4166,6 @@ static DisasJumpType op_rchp(DisasContext *s, DisasOps *o) static DisasJumpType op_rsch(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_rsch(cpu_env, regs[1]); set_cc_static(s); return DISAS_NEXT; @@ -4198,21 +4173,18 @@ static DisasJumpType op_rsch(DisasContext *s, DisasOps *o) static DisasJumpType op_sal(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_sal(cpu_env, regs[1]); return DISAS_NEXT; } static DisasJumpType op_schm(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_schm(cpu_env, regs[1], regs[2], o->in2); return DISAS_NEXT; } static DisasJumpType op_siga(DisasContext *s, DisasOps *o) { - check_privileged(s); /* From KVM code: Not provided, set CC = 3 for subchannel not operational */ gen_op_movi_cc(s, 3); return DISAS_NEXT; @@ -4220,14 +4192,12 @@ static DisasJumpType op_siga(DisasContext *s, DisasOps *o) static DisasJumpType op_stcps(DisasContext *s, DisasOps *o) { - check_privileged(s); /* The instruction is suppressed if not provided. */ return DISAS_NEXT; } static DisasJumpType op_ssch(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_ssch(cpu_env, regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; @@ -4235,7 +4205,6 @@ static DisasJumpType op_ssch(DisasContext *s, DisasOps *o) static DisasJumpType op_stsch(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_stsch(cpu_env, regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; @@ -4243,7 +4212,6 @@ static DisasJumpType op_stsch(DisasContext *s, DisasOps *o) static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_stcrw(cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; @@ -4251,7 +4219,6 @@ static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o) static DisasJumpType op_tpi(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_tpi(cc_op, cpu_env, o->addr1); set_cc_static(s); return DISAS_NEXT; @@ -4259,7 +4226,6 @@ static DisasJumpType op_tpi(DisasContext *s, DisasOps *o) static DisasJumpType op_tsch(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_tsch(cpu_env, regs[1], o->in2); set_cc_static(s); return DISAS_NEXT; @@ -4267,7 +4233,6 @@ static DisasJumpType op_tsch(DisasContext *s, DisasOps *o) static DisasJumpType op_chsc(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_chsc(cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; @@ -4275,7 +4240,6 @@ static DisasJumpType op_chsc(DisasContext *s, DisasOps *o) static DisasJumpType op_stpx(DisasContext *s, DisasOps *o) { - check_privileged(s); tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa)); tcg_gen_andi_i64(o->out, o->out, 0x7fffe000); return DISAS_NEXT; @@ -4286,8 +4250,6 @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) uint64_t i2 = get_field(s->fields, i2); TCGv_i64 t; - check_privileged(s); - /* It is important to do what the instruction name says: STORE THEN. If we let the output hook perform the store then if we fault and restart, we'll have the wrong SYSTEM MASK in place. */ @@ -4309,14 +4271,12 @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) static DisasJumpType op_stura(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_stura(cpu_env, o->in2, o->in1); return DISAS_NEXT; } static DisasJumpType op_sturg(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_sturg(cpu_env, o->in2, o->in1); return DISAS_NEXT; } @@ -4462,10 +4422,13 @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) static DisasJumpType op_stpq(DisasContext *s, DisasOps *o) { - if (tb_cflags(s->base.tb) & CF_PARALLEL) { + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_helper_stpq(cpu_env, o->in2, o->out2, o->out); + } else if (HAVE_ATOMIC128) { gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out); } else { - gen_helper_stpq(cpu_env, o->in2, o->out2, o->out); + gen_helper_exit_atomic(cpu_env); + return DISAS_NORETURN; } return DISAS_NEXT; } @@ -4582,7 +4545,6 @@ static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o) static DisasJumpType op_testblock(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_testblock(cc_op, cpu_env, o->in2); set_cc_static(s); return DISAS_NEXT; @@ -4840,7 +4802,6 @@ static DisasJumpType op_clp(DisasContext *s, DisasOps *o) { TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); - check_privileged(s); gen_helper_clp(cpu_env, r2); tcg_temp_free_i32(r2); set_cc_static(s); @@ -4852,7 +4813,6 @@ static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o) TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); - check_privileged(s); gen_helper_pcilg(cpu_env, r1, r2); tcg_temp_free_i32(r1); tcg_temp_free_i32(r2); @@ -4865,7 +4825,6 @@ static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o) TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); - check_privileged(s); gen_helper_pcistg(cpu_env, r1, r2); tcg_temp_free_i32(r1); tcg_temp_free_i32(r2); @@ -4878,7 +4837,6 @@ static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o) TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2)); - check_privileged(s); gen_helper_stpcifc(cpu_env, r1, o->addr1, ar); tcg_temp_free_i32(ar); tcg_temp_free_i32(r1); @@ -4888,7 +4846,6 @@ static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o) static DisasJumpType op_sic(DisasContext *s, DisasOps *o) { - check_privileged(s); gen_helper_sic(cpu_env, o->in1, o->in2); return DISAS_NEXT; } @@ -4898,7 +4855,6 @@ static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o) TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); - check_privileged(s); gen_helper_rpcit(cpu_env, r1, r2); tcg_temp_free_i32(r1); tcg_temp_free_i32(r2); @@ -4912,7 +4868,6 @@ static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o) TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2)); - check_privileged(s); gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar); tcg_temp_free_i32(ar); tcg_temp_free_i32(r1); @@ -4926,7 +4881,6 @@ static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o) TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2)); - check_privileged(s); gen_helper_mpcifc(cpu_env, r1, o->addr1, ar); tcg_temp_free_i32(ar); tcg_temp_free_i32(r1); @@ -5834,17 +5788,24 @@ static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o) search tree, rather than us having to post-process the table. */ #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \ - D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0) + E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0) + +#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \ + E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0) -#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM, +#define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \ + E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL) + +#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM, enum DisasInsnEnum { #include "insn-data.def" }; -#undef D -#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \ +#undef E +#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \ .opc = OPC, \ + .flags = FL, \ .fmt = FMT_##FT, \ .fac = FAC_##FC, \ .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \ @@ -5915,8 +5876,8 @@ static const DisasInsn insn_info[] = { #include "insn-data.def" }; -#undef D -#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \ +#undef E +#define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \ case OPC: return &insn_info[insn_ ## NM]; static const DisasInsn *lookup_opc(uint16_t opc) @@ -5928,6 +5889,8 @@ static const DisasInsn *lookup_opc(uint16_t opc) } } +#undef F +#undef E #undef D #undef C @@ -6075,6 +6038,17 @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s, return info; } +static bool is_afp_reg(int reg) +{ + return reg % 2 || reg > 6; +} + +static bool is_fp_pair(int reg) +{ + /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */ + return !(reg & 0x2); +} + static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) { const DisasInsn *insn; @@ -6101,42 +6075,48 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) } #endif - /* Check for insn specification exceptions. */ - if (insn->spec) { - int spec = insn->spec, excp = 0, r; + /* process flags */ + if (insn->flags) { + /* privileged instruction */ + if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) { + gen_program_exception(s, PGM_PRIVILEGED); + return DISAS_NORETURN; + } - if (spec & SPEC_r1_even) { - r = get_field(&f, r1); - if (r & 1) { - excp = PGM_SPECIFICATION; + /* if AFP is not enabled, instructions and registers are forbidden */ + if (!(s->base.tb->flags & FLAG_MASK_AFP)) { + uint8_t dxc = 0; + + if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) { + dxc = 1; } - } - if (spec & SPEC_r2_even) { - r = get_field(&f, r2); - if (r & 1) { - excp = PGM_SPECIFICATION; + if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) { + dxc = 1; } - } - if (spec & SPEC_r3_even) { - r = get_field(&f, r3); - if (r & 1) { - excp = PGM_SPECIFICATION; + if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) { + dxc = 1; } - } - if (spec & SPEC_r1_f128) { - r = get_field(&f, r1); - if (r > 13) { - excp = PGM_SPECIFICATION; + if (insn->flags & IF_BFP) { + dxc = 2; } - } - if (spec & SPEC_r2_f128) { - r = get_field(&f, r2); - if (r > 13) { - excp = PGM_SPECIFICATION; + if (insn->flags & IF_DFP) { + dxc = 3; + } + if (dxc) { + gen_data_exception(dxc); + return DISAS_NORETURN; } } - if (excp) { - gen_program_exception(s, excp); + } + + /* Check for insn specification exceptions. */ + if (insn->spec) { + if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) || + (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) || + (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) || + (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) || + (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) { + gen_program_exception(s, PGM_SPECIFICATION); return DISAS_NORETURN; } } @@ -6164,11 +6144,13 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) if (insn->help_op) { ret = insn->help_op(s, &o); } - if (insn->help_wout) { - insn->help_wout(s, &f, &o); - } - if (insn->help_cout) { - insn->help_cout(s, &o); + if (ret != DISAS_NORETURN) { + if (insn->help_wout) { + insn->help_wout(s, &f, &o); + } + if (insn->help_cout) { + insn->help_cout(s, &o); + } } /* Free any temporaries created by the helpers. */ |
