summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xconfigure11
-rw-r--r--crypto/xts.c200
-rw-r--r--default-configs/alpha-softmmu.mak1
-rw-r--r--hw/arm/boot.c18
-rw-r--r--hw/core/Makefile.objs3
-rw-r--r--hw/core/null-machine.c2
-rw-r--r--hw/intc/armv7m_nvic.c12
-rw-r--r--hw/isa/Makefile.objs3
-rw-r--r--hw/net/cadence_gem.c9
-rw-r--r--hw/riscv/sifive_clint.c8
-rw-r--r--hw/riscv/sifive_plic.c4
-rw-r--r--hw/riscv/sifive_u.c4
-rw-r--r--hw/riscv/spike.c6
-rw-r--r--hw/riscv/virt.c6
-rw-r--r--hw/sd/ssi-sd.c2
-rw-r--r--linux-user/aarch64/signal.c4
-rw-r--r--linux-user/elfload.c58
-rw-r--r--linux-user/mips/target_elf.h3
-rw-r--r--linux-user/syscall.c10
-rw-r--r--po/Makefile2
-rw-r--r--target/arm/cpu.c238
-rw-r--r--target/arm/cpu.h221
-rw-r--r--target/arm/cpu64.c148
-rw-r--r--target/arm/helper.c389
-rw-r--r--target/arm/internals.h45
-rw-r--r--target/arm/kvm.c60
-rw-r--r--target/arm/kvm32.c13
-rw-r--r--target/arm/kvm64.c15
-rw-r--r--target/arm/kvm_arm.h24
-rw-r--r--target/arm/machine.c25
-rw-r--r--target/arm/op_helper.c2
-rw-r--r--target/arm/translate-a64.c715
-rw-r--r--target/arm/translate.c1445
-rw-r--r--target/arm/translate.h21
-rw-r--r--target/mips/mips-defs.h3
-rw-r--r--target/mips/translate.c865
-rw-r--r--target/mips/translate_init.inc.c59
-rw-r--r--target/riscv/Makefile.objs2
-rw-r--r--target/riscv/cpu.c6
-rw-r--r--target/riscv/cpu.h22
-rw-r--r--target/riscv/cpu_bits.h683
-rw-r--r--target/riscv/cpu_helper.c (renamed from target/riscv/helper.c)35
-rw-r--r--target/riscv/op_helper.c34
-rw-r--r--tests/benchmark-crypto-cipher.c149
-rwxr-xr-xtests/docker/dockerfiles/debian-bootstrap.pre4
-rw-r--r--tests/migration-test.c16
-rw-r--r--tests/tcg/mips/mipsr5900/Makefile30
-rw-r--r--tests/tcg/mips/mipsr5900/div1.c73
-rw-r--r--tests/tcg/mips/mipsr5900/divu1.c48
-rw-r--r--tests/tcg/mips/mipsr5900/mflohi1.c35
-rw-r--r--tests/tcg/mips/mipsr5900/mtlohi1.c40
-rw-r--r--tests/tcg/mips/mipsr5900/mult.c76
-rw-r--r--tests/tcg/mips/mipsr5900/multu.c68
-rw-r--r--tests/test-crypto-xts.c226
54 files changed, 4161 insertions, 2040 deletions
diff --git a/configure b/configure
index e39f63d01d..1ee09bd112 100755
--- a/configure
+++ b/configure
@@ -428,7 +428,7 @@ usb_redir=""
opengl=""
opengl_dmabuf="no"
cpuid_h="no"
-avx2_opt="no"
+avx2_opt=""
zlib="yes"
capstone=""
lzo=""
@@ -1329,6 +1329,10 @@ for opt do
;;
--disable-glusterfs) glusterfs="no"
;;
+ --disable-avx2) avx2_opt="no"
+ ;;
+ --enable-avx2) avx2_opt="yes"
+ ;;
--enable-glusterfs) glusterfs="yes"
;;
--disable-virtio-blk-data-plane|--enable-virtio-blk-data-plane)
@@ -1703,6 +1707,7 @@ disabled with --disable-FEATURE, default is enabled if available:
libxml2 for Parallels image format
tcmalloc tcmalloc support
jemalloc jemalloc support
+ avx2 AVX2 optimization support
replication replication support
vhost-vsock virtio sockets device support
opengl opengl support
@@ -5032,7 +5037,7 @@ fi
# There is no point enabling this if cpuid.h is not usable,
# since we won't be able to select the new routines.
-if test $cpuid_h = yes; then
+if test "$cpuid_h" = "yes" -a "$avx2_opt" != "no"; then
cat > $TMPC << EOF
#pragma GCC push_options
#pragma GCC target("avx2")
@@ -5046,6 +5051,8 @@ int main(int argc, char *argv[]) { return bar(argv[0]); }
EOF
if compile_object "" ; then
avx2_opt="yes"
+ else
+ avx2_opt="no"
fi
fi
diff --git a/crypto/xts.c b/crypto/xts.c
index 95212341f6..4277ad40de 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -24,52 +24,75 @@
*/
#include "qemu/osdep.h"
+#include "qemu/bswap.h"
#include "crypto/xts.h"
-static void xts_mult_x(uint8_t *I)
+typedef union {
+ uint8_t b[XTS_BLOCK_SIZE];
+ uint64_t u[2];
+} xts_uint128;
+
+static inline void xts_uint128_xor(xts_uint128 *D,
+ const xts_uint128 *S1,
+ const xts_uint128 *S2)
{
- int x;
- uint8_t t, tt;
+ D->u[0] = S1->u[0] ^ S2->u[0];
+ D->u[1] = S1->u[1] ^ S2->u[1];
+}
- for (x = t = 0; x < 16; x++) {
- tt = I[x] >> 7;
- I[x] = ((I[x] << 1) | t) & 0xFF;
- t = tt;
- }
- if (tt) {
- I[0] ^= 0x87;
+static inline void xts_uint128_cpu_to_les(xts_uint128 *v)
+{
+ cpu_to_le64s(&v->u[0]);
+ cpu_to_le64s(&v->u[1]);
+}
+
+static inline void xts_uint128_le_to_cpus(xts_uint128 *v)
+{
+ le64_to_cpus(&v->u[0]);
+ le64_to_cpus(&v->u[1]);
+}
+
+static void xts_mult_x(xts_uint128 *I)
+{
+ uint64_t tt;
+
+ xts_uint128_le_to_cpus(I);
+
+ tt = I->u[0] >> 63;
+ I->u[0] <<= 1;
+
+ if (I->u[1] >> 63) {
+ I->u[0] ^= 0x87;
}
+ I->u[1] <<= 1;
+ I->u[1] |= tt;
+
+ xts_uint128_cpu_to_les(I);
}
/**
- * xts_tweak_uncrypt:
+ * xts_tweak_encdec:
* @param ctxt: the cipher context
* @param func: the cipher function
- * @src: buffer providing the cipher text of XTS_BLOCK_SIZE bytes
- * @dst: buffer to output the plain text of XTS_BLOCK_SIZE bytes
+ * @src: buffer providing the input text of XTS_BLOCK_SIZE bytes
+ * @dst: buffer to output the output text of XTS_BLOCK_SIZE bytes
* @iv: the initialization vector tweak of XTS_BLOCK_SIZE bytes
*
- * Decrypt data with a tweak
+ * Encrypt/decrypt data with a tweak
*/
-static void xts_tweak_decrypt(const void *ctx,
- xts_cipher_func *func,
- const uint8_t *src,
- uint8_t *dst,
- uint8_t *iv)
+static inline void xts_tweak_encdec(const void *ctx,
+ xts_cipher_func *func,
+ const xts_uint128 *src,
+ xts_uint128 *dst,
+ xts_uint128 *iv)
{
- unsigned long x;
-
/* tweak encrypt block i */
- for (x = 0; x < XTS_BLOCK_SIZE; x++) {
- dst[x] = src[x] ^ iv[x];
- }
+ xts_uint128_xor(dst, src, iv);
- func(ctx, XTS_BLOCK_SIZE, dst, dst);
+ func(ctx, XTS_BLOCK_SIZE, dst->b, dst->b);
- for (x = 0; x < XTS_BLOCK_SIZE; x++) {
- dst[x] = dst[x] ^ iv[x];
- }
+ xts_uint128_xor(dst, dst, iv);
/* LFSR the tweak */
xts_mult_x(iv);
@@ -85,7 +108,7 @@ void xts_decrypt(const void *datactx,
uint8_t *dst,
const uint8_t *src)
{
- uint8_t PP[XTS_BLOCK_SIZE], CC[XTS_BLOCK_SIZE], T[XTS_BLOCK_SIZE];
+ xts_uint128 PP, CC, T;
unsigned long i, m, mo, lim;
/* get number of blocks */
@@ -102,72 +125,53 @@ void xts_decrypt(const void *datactx,
}
/* encrypt the iv */
- encfunc(tweakctx, XTS_BLOCK_SIZE, T, iv);
-
- for (i = 0; i < lim; i++) {
- xts_tweak_decrypt(datactx, decfunc, src, dst, T);
-
- src += XTS_BLOCK_SIZE;
- dst += XTS_BLOCK_SIZE;
+ encfunc(tweakctx, XTS_BLOCK_SIZE, T.b, iv);
+
+ if (QEMU_PTR_IS_ALIGNED(src, sizeof(uint64_t)) &&
+ QEMU_PTR_IS_ALIGNED(dst, sizeof(uint64_t))) {
+ xts_uint128 *S = (xts_uint128 *)src;
+ xts_uint128 *D = (xts_uint128 *)dst;
+ for (i = 0; i < lim; i++, S++, D++) {
+ xts_tweak_encdec(datactx, decfunc, S, D, &T);
+ }
+ } else {
+ xts_uint128 D;
+
+ for (i = 0; i < lim; i++) {
+ memcpy(&D, src, XTS_BLOCK_SIZE);
+ xts_tweak_encdec(datactx, decfunc, &D, &D, &T);
+ memcpy(dst, &D, XTS_BLOCK_SIZE);
+ src += XTS_BLOCK_SIZE;
+ dst += XTS_BLOCK_SIZE;
+ }
}
/* if length is not a multiple of XTS_BLOCK_SIZE then */
if (mo > 0) {
- memcpy(CC, T, XTS_BLOCK_SIZE);
- xts_mult_x(CC);
+ xts_uint128 S, D;
+ memcpy(&CC, &T, XTS_BLOCK_SIZE);
+ xts_mult_x(&CC);
/* PP = tweak decrypt block m-1 */
- xts_tweak_decrypt(datactx, decfunc, src, PP, CC);
+ memcpy(&S, src, XTS_BLOCK_SIZE);
+ xts_tweak_encdec(datactx, decfunc, &S, &PP, &CC);
/* Pm = first length % XTS_BLOCK_SIZE bytes of PP */
for (i = 0; i < mo; i++) {
- CC[i] = src[XTS_BLOCK_SIZE + i];
- dst[XTS_BLOCK_SIZE + i] = PP[i];
+ CC.b[i] = src[XTS_BLOCK_SIZE + i];
+ dst[XTS_BLOCK_SIZE + i] = PP.b[i];
}
for (; i < XTS_BLOCK_SIZE; i++) {
- CC[i] = PP[i];
+ CC.b[i] = PP.b[i];
}
/* Pm-1 = Tweak uncrypt CC */
- xts_tweak_decrypt(datactx, decfunc, CC, dst, T);
+ xts_tweak_encdec(datactx, decfunc, &CC, &D, &T);
+ memcpy(dst, &D, XTS_BLOCK_SIZE);
}
/* Decrypt the iv back */
- decfunc(tweakctx, XTS_BLOCK_SIZE, iv, T);
-}
-
-
-/**
- * xts_tweak_crypt:
- * @param ctxt: the cipher context
- * @param func: the cipher function
- * @src: buffer providing the plain text of XTS_BLOCK_SIZE bytes
- * @dst: buffer to output the cipher text of XTS_BLOCK_SIZE bytes
- * @iv: the initialization vector tweak of XTS_BLOCK_SIZE bytes
- *
- * Encrypt data with a tweak
- */
-static void xts_tweak_encrypt(const void *ctx,
- xts_cipher_func *func,
- const uint8_t *src,
- uint8_t *dst,
- uint8_t *iv)
-{
- unsigned long x;
-
- /* tweak encrypt block i */
- for (x = 0; x < XTS_BLOCK_SIZE; x++) {
- dst[x] = src[x] ^ iv[x];
- }
-
- func(ctx, XTS_BLOCK_SIZE, dst, dst);
-
- for (x = 0; x < XTS_BLOCK_SIZE; x++) {
- dst[x] = dst[x] ^ iv[x];
- }
-
- /* LFSR the tweak */
- xts_mult_x(iv);
+ decfunc(tweakctx, XTS_BLOCK_SIZE, iv, T.b);
}
@@ -180,7 +184,7 @@ void xts_encrypt(const void *datactx,
uint8_t *dst,
const uint8_t *src)
{
- uint8_t PP[XTS_BLOCK_SIZE], CC[XTS_BLOCK_SIZE], T[XTS_BLOCK_SIZE];
+ xts_uint128 PP, CC, T;
unsigned long i, m, mo, lim;
/* get number of blocks */
@@ -197,34 +201,50 @@ void xts_encrypt(const void *datactx,
}
/* encrypt the iv */
- encfunc(tweakctx, XTS_BLOCK_SIZE, T, iv);
+ encfunc(tweakctx, XTS_BLOCK_SIZE, T.b, iv);
+
+ if (QEMU_PTR_IS_ALIGNED(src, sizeof(uint64_t)) &&
+ QEMU_PTR_IS_ALIGNED(dst, sizeof(uint64_t))) {
+ xts_uint128 *S = (xts_uint128 *)src;
+ xts_uint128 *D = (xts_uint128 *)dst;
+ for (i = 0; i < lim; i++, S++, D++) {
+ xts_tweak_encdec(datactx, encfunc, S, D, &T);
+ }
+ } else {
+ xts_uint128 D;
- for (i = 0; i < lim; i++) {
- xts_tweak_encrypt(datactx, encfunc, src, dst, T);
+ for (i = 0; i < lim; i++) {
+ memcpy(&D, src, XTS_BLOCK_SIZE);
+ xts_tweak_encdec(datactx, encfunc, &D, &D, &T);
+ memcpy(dst, &D, XTS_BLOCK_SIZE);
- dst += XTS_BLOCK_SIZE;
- src += XTS_BLOCK_SIZE;
+ dst += XTS_BLOCK_SIZE;
+ src += XTS_BLOCK_SIZE;
+ }
}
/* if length is not a multiple of XTS_BLOCK_SIZE then */
if (mo > 0) {
+ xts_uint128 S, D;
/* CC = tweak encrypt block m-1 */
- xts_tweak_encrypt(datactx, encfunc, src, CC, T);
+ memcpy(&S, src, XTS_BLOCK_SIZE);
+ xts_tweak_encdec(datactx, encfunc, &S, &CC, &T);
/* Cm = first length % XTS_BLOCK_SIZE bytes of CC */
for (i = 0; i < mo; i++) {
- PP[i] = src[XTS_BLOCK_SIZE + i];
- dst[XTS_BLOCK_SIZE + i] = CC[i];
+ PP.b[i] = src[XTS_BLOCK_SIZE + i];
+ dst[XTS_BLOCK_SIZE + i] = CC.b[i];
}
for (; i < XTS_BLOCK_SIZE; i++) {
- PP[i] = CC[i];
+ PP.b[i] = CC.b[i];
}
/* Cm-1 = Tweak encrypt PP */
- xts_tweak_encrypt(datactx, encfunc, PP, dst, T);
+ xts_tweak_encdec(datactx, encfunc, &PP, &D, &T);
+ memcpy(dst, &D, XTS_BLOCK_SIZE);
}
/* Decrypt the iv back */
- decfunc(tweakctx, XTS_BLOCK_SIZE, iv, T);
+ decfunc(tweakctx, XTS_BLOCK_SIZE, iv, T.b);
}
diff --git a/default-configs/alpha-softmmu.mak b/default-configs/alpha-softmmu.mak
index bbe361f01a..eb58b40254 100644
--- a/default-configs/alpha-softmmu.mak
+++ b/default-configs/alpha-softmmu.mak
@@ -19,3 +19,4 @@ CONFIG_IDE_CMD646=y
CONFIG_I8259=y
CONFIG_MC146818RTC=y
CONFIG_ISA_TESTDEV=y
+CONFIG_SMC37C669=y
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index 20c71d7d96..586baa9b64 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -24,6 +24,7 @@
#include "qemu/config-file.h"
#include "qemu/option.h"
#include "exec/address-spaces.h"
+#include "qemu/units.h"
/* Kernel boot protocol is specified in the kernel docs
* Documentation/arm/Booting and Documentation/arm64/booting.txt
@@ -36,6 +37,8 @@
#define ARM64_TEXT_OFFSET_OFFSET 8
#define ARM64_MAGIC_OFFSET 56
+#define BOOTLOADER_MAX_SIZE (4 * KiB)
+
AddressSpace *arm_boot_address_space(ARMCPU *cpu,
const struct arm_boot_info *info)
{
@@ -184,6 +187,8 @@ static void write_bootloader(const char *name, hwaddr addr,
code[i] = tswap32(insn);
}
+ assert((len * sizeof(uint32_t)) < BOOTLOADER_MAX_SIZE);
+
rom_add_blob_fixed_as(name, code, len * sizeof(uint32_t), addr, as);
g_free(code);
@@ -919,6 +924,19 @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base,
memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals));
if (hdrvals[1] != 0) {
kernel_load_offset = le64_to_cpu(hdrvals[0]);
+
+ /*
+ * We write our startup "bootloader" at the very bottom of RAM,
+ * so that bit can't be used for the image. Luckily the Image
+ * format specification is that the image requests only an offset
+ * from a 2MB boundary, not an absolute load address. So if the
+ * image requests an offset that might mean it overlaps with the
+ * bootloader, we can just load it starting at 2MB+offset rather
+ * than 0MB + offset.
+ */
+ if (kernel_load_offset < BOOTLOADER_MAX_SIZE) {
+ kernel_load_offset += 2 * MiB;
+ }
}
}
diff --git a/hw/core/Makefile.objs b/hw/core/Makefile.objs
index b736ce223a..a799c83815 100644
--- a/hw/core/Makefile.objs
+++ b/hw/core/Makefile.objs
@@ -21,5 +21,4 @@ common-obj-$(CONFIG_SOFTMMU) += or-irq.o
common-obj-$(CONFIG_SOFTMMU) += split-irq.o
common-obj-$(CONFIG_PLATFORM_BUS) += platform-bus.o
common-obj-$(CONFIG_SOFTMMU) += generic-loader.o
-
-obj-$(CONFIG_SOFTMMU) += null-machine.o
+common-obj-$(CONFIG_SOFTMMU) += null-machine.o
diff --git a/hw/core/null-machine.c b/hw/core/null-machine.c
index cde4d3eb57..76d3f8e39c 100644
--- a/hw/core/null-machine.c
+++ b/hw/core/null-machine.c
@@ -18,7 +18,7 @@
#include "hw/boards.h"
#include "sysemu/sysemu.h"
#include "exec/address-spaces.h"
-#include "cpu.h"
+#include "qom/cpu.h"
static void machine_none_init(MachineState *mch)
{
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 0d816fdd2c..0beefb05d4 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -1055,17 +1055,17 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
case 0xd5c: /* MMFR3. */
return cpu->id_mmfr3;
case 0xd60: /* ISAR0. */
- return cpu->id_isar0;
+ return cpu->isar.id_isar0;
case 0xd64: /* ISAR1. */
- return cpu->id_isar1;
+ return cpu->isar.id_isar1;
case 0xd68: /* ISAR2. */
- return cpu->id_isar2;
+ return cpu->isar.id_isar2;
case 0xd6c: /* ISAR3. */
- return cpu->id_isar3;
+ return cpu->isar.id_isar3;
case 0xd70: /* ISAR4. */
- return cpu->id_isar4;
+ return cpu->isar.id_isar4;
case 0xd74: /* ISAR5. */
- return cpu->id_isar5;
+ return cpu->isar.id_isar5;
case 0xd78: /* CLIDR */
return cpu->clidr;
case 0xd7c: /* CTR */
diff --git a/hw/isa/Makefile.objs b/hw/isa/Makefile.objs
index 83e06f6c04..9e106df186 100644
--- a/hw/isa/Makefile.objs
+++ b/hw/isa/Makefile.objs
@@ -1,9 +1,10 @@
common-obj-$(CONFIG_ISA_BUS) += isa-bus.o
-common-obj-$(CONFIG_ISA_BUS) += isa-superio.o smc37c669-superio.o
+common-obj-$(CONFIG_ISA_BUS) += isa-superio.o
common-obj-$(CONFIG_APM) += apm.o
common-obj-$(CONFIG_I82378) += i82378.o
common-obj-$(CONFIG_PC87312) += pc87312.o
common-obj-$(CONFIG_PIIX4) += piix4.o
common-obj-$(CONFIG_VT82C686) += vt82c686.o
+common-obj-$(CONFIG_SMC37C669) += smc37c669-superio.o
obj-$(CONFIG_LPC_ICH9) += lpc_ich9.o
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
index 1795998928..d95cc27f58 100644
--- a/hw/net/cadence_gem.c
+++ b/hw/net/cadence_gem.c
@@ -142,6 +142,7 @@
#define GEM_DESCONF4 (0x0000028C/4)
#define GEM_DESCONF5 (0x00000290/4)
#define GEM_DESCONF6 (0x00000294/4)
+#define GEM_DESCONF6_64B_MASK (1U << 23)
#define GEM_DESCONF7 (0x00000298/4)
#define GEM_INT_Q1_STATUS (0x00000400 / 4)
@@ -1283,6 +1284,7 @@ static void gem_reset(DeviceState *d)
int i;
CadenceGEMState *s = CADENCE_GEM(d);
const uint8_t *a;
+ uint32_t queues_mask = 0;
DB_PRINT("\n");
@@ -1299,7 +1301,12 @@ static void gem_reset(DeviceState *d)
s->regs[GEM_DESCONF] = 0x02500111;
s->regs[GEM_DESCONF2] = 0x2ab13fff;
s->regs[GEM_DESCONF5] = 0x002f2045;
- s->regs[GEM_DESCONF6] = 0x00000200;
+ s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK;
+
+ if (s->num_priority_queues > 1) {
+ queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
+ s->regs[GEM_DESCONF6] |= queues_mask;
+ }
/* Set MAC address */
a = &s->conf.macaddr.a[0];
diff --git a/hw/riscv/sifive_clint.c b/hw/riscv/sifive_clint.c
index 7cc606e065..0d2fd52487 100644
--- a/hw/riscv/sifive_clint.c
+++ b/hw/riscv/sifive_clint.c
@@ -47,12 +47,12 @@ static void sifive_clint_write_timecmp(RISCVCPU *cpu, uint64_t value)
if (cpu->env.timecmp <= rtc_r) {
/* if we're setting an MTIMECMP value in the "past",
immediately raise the timer interrupt */
- riscv_set_local_interrupt(cpu, MIP_MTIP, 1);
+ riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(1));
return;
}
/* otherwise, set up the future timer interrupt */
- riscv_set_local_interrupt(cpu, MIP_MTIP, 0);
+ riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(0));
diff = cpu->env.timecmp - rtc_r;
/* back to ns (note args switched in muldiv64) */
next = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
@@ -67,7 +67,7 @@ static void sifive_clint_write_timecmp(RISCVCPU *cpu, uint64_t value)
static void sifive_clint_timer_cb(void *opaque)
{
RISCVCPU *cpu = opaque;
- riscv_set_local_interrupt(cpu, MIP_MTIP, 1);
+ riscv_cpu_update_mip(cpu, MIP_MTIP, BOOL_TO_MASK(1));
}
/* CPU wants to read rtc or timecmp register */
@@ -132,7 +132,7 @@ static void sifive_clint_write(void *opaque, hwaddr addr, uint64_t value,
if (!env) {
error_report("clint: invalid timecmp hartid: %zu", hartid);
} else if ((addr & 0x3) == 0) {
- riscv_set_local_interrupt(RISCV_CPU(cpu), MIP_MSIP, value != 0);
+ riscv_cpu_update_mip(RISCV_CPU(cpu), MIP_MSIP, BOOL_TO_MASK(value));
} else {
error_report("clint: invalid sip write: %08x", (uint32_t)addr);
}
diff --git a/hw/riscv/sifive_plic.c b/hw/riscv/sifive_plic.c
index f635e6ff67..9cf9a1f986 100644
--- a/hw/riscv/sifive_plic.c
+++ b/hw/riscv/sifive_plic.c
@@ -142,10 +142,10 @@ static void sifive_plic_update(SiFivePLICState *plic)
int level = sifive_plic_irqs_pending(plic, addrid);
switch (mode) {
case PLICMode_M:
- riscv_set_local_interrupt(RISCV_CPU(cpu), MIP_MEIP, level);
+ riscv_cpu_update_mip(RISCV_CPU(cpu), MIP_MEIP, BOOL_TO_MASK(level));
break;
case PLICMode_S:
- riscv_set_local_interrupt(RISCV_CPU(cpu), MIP_SEIP, level);
+ riscv_cpu_update_mip(RISCV_CPU(cpu), MIP_SEIP, BOOL_TO_MASK(level));
break;
default:
break;
diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c
index 862f8ff5f7..ef07df2442 100644
--- a/hw/riscv/sifive_u.c
+++ b/hw/riscv/sifive_u.c
@@ -230,7 +230,9 @@ static void create_fdt(SiFiveUState *s, const struct MemmapEntry *memmap,
qemu_fdt_add_subnode(fdt, "/chosen");
qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename);
- qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline);
+ if (cmdline) {
+ qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline);
+ }
g_free(nodename);
}
diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
index be5ef85e81..8a712ed490 100644
--- a/hw/riscv/spike.c
+++ b/hw/riscv/spike.c
@@ -156,8 +156,10 @@ static void create_fdt(SpikeState *s, const struct MemmapEntry *memmap,
g_free(cells);
g_free(nodename);
- qemu_fdt_add_subnode(fdt, "/chosen");
- qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline);
+ if (cmdline) {
+ qemu_fdt_add_subnode(fdt, "/chosen");
+ qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline);
+ }
}
static void spike_v1_10_0_board_init(MachineState *machine)
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index 005169eabc..4a137a503c 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -254,7 +254,9 @@ static void *create_fdt(RISCVVirtState *s, const struct MemmapEntry *memmap,
qemu_fdt_add_subnode(fdt, "/chosen");
qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename);
- qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline);
+ if (cmdline) {
+ qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", cmdline);
+ }
g_free(nodename);
return fdt;
@@ -385,6 +387,8 @@ static void riscv_virt_board_init(MachineState *machine)
serial_mm_init(system_memory, memmap[VIRT_UART0].base,
0, qdev_get_gpio_in(DEVICE(s->plic), UART0_IRQ), 399193,
serial_hd(0), DEVICE_LITTLE_ENDIAN);
+
+ g_free(plic_hart_config);
}
static void riscv_virt_board_machine_init(MachineClass *mc)
diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c
index 95a143bfba..623d0333e8 100644
--- a/hw/sd/ssi-sd.c
+++ b/hw/sd/ssi-sd.c
@@ -284,6 +284,8 @@ static void ssi_sd_class_init(ObjectClass *klass, void *data)
k->cs_polarity = SSI_CS_LOW;
dc->vmsd = &vmstate_ssi_sd;
dc->reset = ssi_sd_reset;
+ /* Reason: init() method uses drive_get_next() */
+ dc->user_creatable = false;
}
static const TypeInfo ssi_sd_info = {
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
index 07fedfc33c..f84a9cf28a 100644
--- a/linux-user/aarch64/signal.c
+++ b/linux-user/aarch64/signal.c
@@ -314,7 +314,7 @@ static int target_restore_sigframe(CPUARMState *env,
break;
case TARGET_SVE_MAGIC:
- if (arm_feature(env, ARM_FEATURE_SVE)) {
+ if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
vq = (env->vfp.zcr_el[1] & 0xf) + 1;
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
if (!sve && size == sve_size) {
@@ -433,7 +433,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
&layout);
/* SVE state needs saving only if it exists. */
- if (arm_feature(env, ARM_FEATURE_SVE)) {
+ if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) {
vq = (env->vfp.zcr_el[1] & 0xf) + 1;
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
sve_ofs = alloc_sigframe_space(sve_size, &layout);
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 10bca65b99..055f6a95ab 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -458,6 +458,10 @@ static uint32_t get_elf_hwcap(void)
/* probe for the extra features */
#define GET_FEATURE(feat, hwcap) \
do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
+
+#define GET_FEATURE_ID(feat, hwcap) \
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
/* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
@@ -467,8 +471,8 @@ static uint32_t get_elf_hwcap(void)
GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4);
- GET_FEATURE(ARM_FEATURE_ARM_DIV, ARM_HWCAP_ARM_IDIVA);
- GET_FEATURE(ARM_FEATURE_THUMB_DIV, ARM_HWCAP_ARM_IDIVT);
+ GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA);
+ GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT);
/* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c.
* Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of
* ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated
@@ -485,15 +489,16 @@ static uint32_t get_elf_hwcap2(void)
ARMCPU *cpu = ARM_CPU(thread_cpu);
uint32_t hwcaps = 0;
- GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES);
- GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL);
- GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1);
- GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2);
- GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32);
+ GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
+ GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
+ GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
+ GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
+ GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
return hwcaps;
}
#undef GET_FEATURE
+#undef GET_FEATURE_ID
#else
/* 64 bit ARM definitions */
@@ -568,25 +573,26 @@ static uint32_t get_elf_hwcap(void)
hwcaps |= ARM_HWCAP_A64_ASIMD;
/* probe for the extra features */
-#define GET_FEATURE(feat, hwcap) \
- do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
- GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES);
- GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL);
- GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1);
- GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2);
- GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32);
- GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3);
- GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3);
- GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4);
- GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512);
- GET_FEATURE(ARM_FEATURE_V8_FP16,
- ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
- GET_FEATURE(ARM_FEATURE_V8_ATOMICS, ARM_HWCAP_A64_ATOMICS);
- GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM);
- GET_FEATURE(ARM_FEATURE_V8_DOTPROD, ARM_HWCAP_A64_ASIMDDP);
- GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA);
- GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE);
-#undef GET_FEATURE
+#define GET_FEATURE_ID(feat, hwcap) \
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
+ GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
+ GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
+ GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
+ GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
+ GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
+ GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
+ GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
+ GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
+ GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
+ GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
+ GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
+ GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
+ GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
+ GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
+ GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
+
+#undef GET_FEATURE_ID
return hwcaps;
}
diff --git a/linux-user/mips/target_elf.h b/linux-user/mips/target_elf.h
index fa5d30bf99..a98c9bd6ad 100644
--- a/linux-user/mips/target_elf.h
+++ b/linux-user/mips/target_elf.h
@@ -12,6 +12,9 @@ static inline const char *cpu_get_model(uint32_t eflags)
if ((eflags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) {
return "mips32r6-generic";
}
+ if ((eflags & EF_MIPS_MACH) == EF_MIPS_MACH_5900) {
+ return "R5900";
+ }
return "24Kf";
}
#endif
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index cf4511b0e4..15b03e17b9 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -9544,7 +9544,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
* even though the current architectural maximum is VQ=16.
*/
ret = -TARGET_EINVAL;
- if (arm_feature(cpu_env, ARM_FEATURE_SVE)
+ if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env))
&& arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
CPUARMState *env = cpu_env;
ARMCPU *cpu = arm_env_get_cpu(env);
@@ -9563,9 +9563,11 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
return ret;
case TARGET_PR_SVE_GET_VL:
ret = -TARGET_EINVAL;
- if (arm_feature(cpu_env, ARM_FEATURE_SVE)) {
- CPUARMState *env = cpu_env;
- ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16;
+ {
+ ARMCPU *cpu = arm_env_get_cpu(cpu_env);
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
+ }
}
return ret;
#endif /* AARCH64 */
diff --git a/po/Makefile b/po/Makefile
index e47e262ee6..c041f4c858 100644
--- a/po/Makefile
+++ b/po/Makefile
@@ -36,7 +36,7 @@ clean:
install: $(OBJS)
for obj in $(OBJS); do \
- base=`basename $$obj .mo`; \
+ base=$$(basename $$obj .mo); \
$(INSTALL) -d $(DESTDIR)$(prefix)/share/locale/$$base/LC_MESSAGES; \
$(INSTALL) -m644 $$obj $(DESTDIR)$(prefix)/share/locale/$$base/LC_MESSAGES/qemu.mo; \
done
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index cd48ad42d8..8f16e96b6c 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -144,9 +144,9 @@ static void arm_cpu_reset(CPUState *s)
g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
- env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
- env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
- env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2;
+ env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
+ env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
+ env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON;
s->halted = cpu->start_powered_off;
@@ -814,7 +814,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
/* Some features automatically imply others: */
if (arm_feature(env, ARM_FEATURE_V8)) {
- set_feature(env, ARM_FEATURE_V7VE);
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_V7);
+ } else {
+ set_feature(env, ARM_FEATURE_V7VE);
+ }
}
if (arm_feature(env, ARM_FEATURE_V7VE)) {
/* v7 Virtualization Extensions. In real hardware this implies
@@ -825,7 +829,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* Presence of EL2 itself is ARM_FEATURE_EL2, and of the
* Security Extensions is ARM_FEATURE_EL3.
*/
- set_feature(env, ARM_FEATURE_ARM_DIV);
+ assert(cpu_isar_feature(arm_div, cpu));
set_feature(env, ARM_FEATURE_LPAE);
set_feature(env, ARM_FEATURE_V7);
}
@@ -850,20 +854,14 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (arm_feature(env, ARM_FEATURE_V6)) {
set_feature(env, ARM_FEATURE_V5);
- set_feature(env, ARM_FEATURE_JAZELLE);
if (!arm_feature(env, ARM_FEATURE_M)) {
+ assert(cpu_isar_feature(jazelle, cpu));
set_feature(env, ARM_FEATURE_AUXCR);
}
}
if (arm_feature(env, ARM_FEATURE_V5)) {
set_feature(env, ARM_FEATURE_V4T);
}
- if (arm_feature(env, ARM_FEATURE_M)) {
- set_feature(env, ARM_FEATURE_THUMB_DIV);
- }
- if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
- set_feature(env, ARM_FEATURE_THUMB_DIV);
- }
if (arm_feature(env, ARM_FEATURE_VFP4)) {
set_feature(env, ARM_FEATURE_VFP3);
set_feature(env, ARM_FEATURE_VFP_FP16);
@@ -938,7 +936,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
*/
cpu->id_pfr1 &= ~0xf0;
- cpu->id_aa64pfr0 &= ~0xf000;
+ cpu->isar.id_aa64pfr0 &= ~0xf000;
}
if (!cpu->has_el2) {
@@ -955,7 +953,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* registers if we don't have EL2. These are id_pfr1[15:12] and
* id_aa64pfr0_el1[11:8].
*/
- cpu->id_aa64pfr0 &= ~0xf00;
+ cpu->isar.id_aa64pfr0 &= ~0xf00;
cpu->id_pfr1 &= ~0xf000;
}
@@ -1084,11 +1082,16 @@ static void arm926_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_VFP);
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
- set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
cpu->midr = 0x41069265;
cpu->reset_fpsid = 0x41011090;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00090078;
+
+ /*
+ * ARMv5 does not have the ID_ISAR registers, but we can still
+ * set the field to indicate Jazelle support within QEMU.
+ */
+ cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
}
static void arm946_initfn(Object *obj)
@@ -1114,12 +1117,18 @@ static void arm1026_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_AUXCR);
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
- set_feature(&cpu->env, ARM_FEATURE_JAZELLE);
cpu->midr = 0x4106a262;
cpu->reset_fpsid = 0x410110a0;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00090078;
cpu->reset_auxcr = 1;
+
+ /*
+ * ARMv5 does not have the ID_ISAR registers, but we can still
+ * set the field to indicate Jazelle support within QEMU.
+ */
+ cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
+
{
/* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
ARMCPRegInfo ifar = {
@@ -1151,8 +1160,8 @@ static void arm1136_r2_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
cpu->midr = 0x4107b362;
cpu->reset_fpsid = 0x410120b4;
- cpu->mvfr0 = 0x11111111;
- cpu->mvfr1 = 0x00000000;
+ cpu->isar.mvfr0 = 0x11111111;
+ cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
cpu->id_pfr0 = 0x111;
@@ -1162,11 +1171,11 @@ static void arm1136_r2_initfn(Object *obj)
cpu->id_mmfr0 = 0x01130003;
cpu->id_mmfr1 = 0x10030302;
cpu->id_mmfr2 = 0x01222110;
- cpu->id_isar0 = 0x00140011;
- cpu->id_isar1 = 0x12002111;
- cpu->id_isar2 = 0x11231111;
- cpu->id_isar3 = 0x01102131;
- cpu->id_isar4 = 0x141;
+ cpu->isar.id_isar0 = 0x00140011;
+ cpu->isar.id_isar1 = 0x12002111;
+ cpu->isar.id_isar2 = 0x11231111;
+ cpu->isar.id_isar3 = 0x01102131;
+ cpu->isar.id_isar4 = 0x141;
cpu->reset_auxcr = 7;
}
@@ -1183,8 +1192,8 @@ static void arm1136_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
cpu->midr = 0x4117b363;
cpu->reset_fpsid = 0x410120b4;
- cpu->mvfr0 = 0x11111111;
- cpu->mvfr1 = 0x00000000;
+ cpu->isar.mvfr0 = 0x11111111;
+ cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
cpu->id_pfr0 = 0x111;
@@ -1194,11 +1203,11 @@ static void arm1136_initfn(Object *obj)
cpu->id_mmfr0 = 0x01130003;
cpu->id_mmfr1 = 0x10030302;
cpu->id_mmfr2 = 0x01222110;
- cpu->id_isar0 = 0x00140011;
- cpu->id_isar1 = 0x12002111;
- cpu->id_isar2 = 0x11231111;
- cpu->id_isar3 = 0x01102131;
- cpu->id_isar4 = 0x141;
+ cpu->isar.id_isar0 = 0x00140011;
+ cpu->isar.id_isar1 = 0x12002111;
+ cpu->isar.id_isar2 = 0x11231111;
+ cpu->isar.id_isar3 = 0x01102131;
+ cpu->isar.id_isar4 = 0x141;
cpu->reset_auxcr = 7;
}
@@ -1216,8 +1225,8 @@ static void arm1176_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_EL3);
cpu->midr = 0x410fb767;
cpu->reset_fpsid = 0x410120b5;
- cpu->mvfr0 = 0x11111111;
- cpu->mvfr1 = 0x00000000;
+ cpu->isar.mvfr0 = 0x11111111;
+ cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
cpu->id_pfr0 = 0x111;
@@ -1227,11 +1236,11 @@ static void arm1176_initfn(Object *obj)
cpu->id_mmfr0 = 0x01130003;
cpu->id_mmfr1 = 0x10030302;
cpu->id_mmfr2 = 0x01222100;
- cpu->id_isar0 = 0x0140011;
- cpu->id_isar1 = 0x12002111;
- cpu->id_isar2 = 0x11231121;
- cpu->id_isar3 = 0x01102131;
- cpu->id_isar4 = 0x01141;
+ cpu->isar.id_isar0 = 0x0140011;
+ cpu->isar.id_isar1 = 0x12002111;
+ cpu->isar.id_isar2 = 0x11231121;
+ cpu->isar.id_isar3 = 0x01102131;
+ cpu->isar.id_isar4 = 0x01141;
cpu->reset_auxcr = 7;
}
@@ -1247,8 +1256,8 @@ static void arm11mpcore_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
cpu->midr = 0x410fb022;
cpu->reset_fpsid = 0x410120b4;
- cpu->mvfr0 = 0x11111111;
- cpu->mvfr1 = 0x00000000;
+ cpu->isar.mvfr0 = 0x11111111;
+ cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
cpu->id_pfr0 = 0x111;
cpu->id_pfr1 = 0x1;
@@ -1257,11 +1266,11 @@ static void arm11mpcore_initfn(Object *obj)
cpu->id_mmfr0 = 0x01100103;
cpu->id_mmfr1 = 0x10020302;
cpu->id_mmfr2 = 0x01222000;
- cpu->id_isar0 = 0x00100011;
- cpu->id_isar1 = 0x12002111;
- cpu->id_isar2 = 0x11221011;
- cpu->id_isar3 = 0x01102131;
- cpu->id_isar4 = 0x141;
+ cpu->isar.id_isar0 = 0x00100011;
+ cpu->isar.id_isar1 = 0x12002111;
+ cpu->isar.id_isar2 = 0x11221011;
+ cpu->isar.id_isar3 = 0x01102131;
+ cpu->isar.id_isar4 = 0x141;
cpu->reset_auxcr = 1;
}
@@ -1290,13 +1299,13 @@ static void cortex_m3_initfn(Object *obj)
cpu->id_mmfr1 = 0x00000000;
cpu->id_mmfr2 = 0x00000000;
cpu->id_mmfr3 = 0x00000000;
- cpu->id_isar0 = 0x01141110;
- cpu->id_isar1 = 0x02111000;
- cpu->id_isar2 = 0x21112231;
- cpu->id_isar3 = 0x01111110;
- cpu->id_isar4 = 0x01310102;
- cpu->id_isar5 = 0x00000000;
- cpu->id_isar6 = 0x00000000;
+ cpu->isar.id_isar0 = 0x01141110;
+ cpu->isar.id_isar1 = 0x02111000;
+ cpu->isar.id_isar2 = 0x21112231;
+ cpu->isar.id_isar3 = 0x01111110;
+ cpu->isar.id_isar4 = 0x01310102;
+ cpu->isar.id_isar5 = 0x00000000;
+ cpu->isar.id_isar6 = 0x00000000;
}
static void cortex_m4_initfn(Object *obj)
@@ -1317,13 +1326,13 @@ static void cortex_m4_initfn(Object *obj)
cpu->id_mmfr1 = 0x00000000;
cpu->id_mmfr2 = 0x00000000;
cpu->id_mmfr3 = 0x00000000;
- cpu->id_isar0 = 0x01141110;
- cpu->id_isar1 = 0x02111000;
- cpu->id_isar2 = 0x21112231;
- cpu->id_isar3 = 0x01111110;
- cpu->id_isar4 = 0x01310102;
- cpu->id_isar5 = 0x00000000;
- cpu->id_isar6 = 0x00000000;
+ cpu->isar.id_isar0 = 0x01141110;
+ cpu->isar.id_isar1 = 0x02111000;
+ cpu->isar.id_isar2 = 0x21112231;
+ cpu->isar.id_isar3 = 0x01111110;
+ cpu->isar.id_isar4 = 0x01310102;
+ cpu->isar.id_isar5 = 0x00000000;
+ cpu->isar.id_isar6 = 0x00000000;
}
static void cortex_m33_initfn(Object *obj)
@@ -1346,13 +1355,13 @@ static void cortex_m33_initfn(Object *obj)
cpu->id_mmfr1 = 0x00000000;
cpu->id_mmfr2 = 0x01000000;
cpu->id_mmfr3 = 0x00000000;
- cpu->id_isar0 = 0x01101110;
- cpu->id_isar1 = 0x02212000;
- cpu->id_isar2 = 0x20232232;
- cpu->id_isar3 = 0x01111131;
- cpu->id_isar4 = 0x01310132;
- cpu->id_isar5 = 0x00000000;
- cpu->id_isar6 = 0x00000000;
+ cpu->isar.id_isar0 = 0x01101110;
+ cpu->isar.id_isar1 = 0x02212000;
+ cpu->isar.id_isar2 = 0x20232232;
+ cpu->isar.id_isar3 = 0x01111131;
+ cpu->isar.id_isar4 = 0x01310132;
+ cpu->isar.id_isar5 = 0x00000000;
+ cpu->isar.id_isar6 = 0x00000000;
cpu->clidr = 0x00000000;
cpu->ctr = 0x8000c000;
}
@@ -1384,8 +1393,6 @@ static void cortex_r5_initfn(Object *obj)
ARMCPU *cpu = ARM_CPU(obj);
set_feature(&cpu->env, ARM_FEATURE_V7);
- set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV);
- set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
set_feature(&cpu->env, ARM_FEATURE_V7MP);
set_feature(&cpu->env, ARM_FEATURE_PMSA);
cpu->midr = 0x411fc153; /* r1p3 */
@@ -1397,13 +1404,13 @@ static void cortex_r5_initfn(Object *obj)
cpu->id_mmfr1 = 0x00000000;
cpu->id_mmfr2 = 0x01200000;
cpu->id_mmfr3 = 0x0211;
- cpu->id_isar0 = 0x02101111;
- cpu->id_isar1 = 0x13112111;
- cpu->id_isar2 = 0x21232141;
- cpu->id_isar3 = 0x01112131;
- cpu->id_isar4 = 0x0010142;
- cpu->id_isar5 = 0x0;
- cpu->id_isar6 = 0x0;
+ cpu->isar.id_isar0 = 0x02101111;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232141;
+ cpu->isar.id_isar3 = 0x01112131;
+ cpu->isar.id_isar4 = 0x0010142;
+ cpu->isar.id_isar5 = 0x0;
+ cpu->isar.id_isar6 = 0x0;
cpu->mp_is_up = true;
cpu->pmsav7_dregion = 16;
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
@@ -1438,8 +1445,8 @@ static void cortex_a8_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_EL3);
cpu->midr = 0x410fc080;
cpu->reset_fpsid = 0x410330c0;
- cpu->mvfr0 = 0x11110222;
- cpu->mvfr1 = 0x00011111;
+ cpu->isar.mvfr0 = 0x11110222;
+ cpu->isar.mvfr1 = 0x00011111;
cpu->ctr = 0x82048004;
cpu->reset_sctlr = 0x00c50078;
cpu->id_pfr0 = 0x1031;
@@ -1450,11 +1457,11 @@ static void cortex_a8_initfn(Object *obj)
cpu->id_mmfr1 = 0x20000000;
cpu->id_mmfr2 = 0x01202000;
cpu->id_mmfr3 = 0x11;
- cpu->id_isar0 = 0x00101111;
- cpu->id_isar1 = 0x12112111;
- cpu->id_isar2 = 0x21232031;
- cpu->id_isar3 = 0x11112131;
- cpu->id_isar4 = 0x00111142;
+ cpu->isar.id_isar0 = 0x00101111;
+ cpu->isar.id_isar1 = 0x12112111;
+ cpu->isar.id_isar2 = 0x21232031;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x00111142;
cpu->dbgdidr = 0x15141000;
cpu->clidr = (1 << 27) | (2 << 24) | 3;
cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
@@ -1512,8 +1519,8 @@ static void cortex_a9_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_CBAR);
cpu->midr = 0x410fc090;
cpu->reset_fpsid = 0x41033090;
- cpu->mvfr0 = 0x11110222;
- cpu->mvfr1 = 0x01111111;
+ cpu->isar.mvfr0 = 0x11110222;
+ cpu->isar.mvfr1 = 0x01111111;
cpu->ctr = 0x80038003;
cpu->reset_sctlr = 0x00c50078;
cpu->id_pfr0 = 0x1031;
@@ -1524,11 +1531,11 @@ static void cortex_a9_initfn(Object *obj)
cpu->id_mmfr1 = 0x20000000;
cpu->id_mmfr2 = 0x01230000;
cpu->id_mmfr3 = 0x00002111;
- cpu->id_isar0 = 0x00101111;
- cpu->id_isar1 = 0x13112111;
- cpu->id_isar2 = 0x21232041;
- cpu->id_isar3 = 0x11112131;
- cpu->id_isar4 = 0x00111142;
+ cpu->isar.id_isar0 = 0x00101111;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232041;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x00111142;
cpu->dbgdidr = 0x35141000;
cpu->clidr = (1 << 27) | (1 << 24) | 3;
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
@@ -1573,8 +1580,8 @@ static void cortex_a7_initfn(Object *obj)
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
cpu->midr = 0x410fc075;
cpu->reset_fpsid = 0x41023075;
- cpu->mvfr0 = 0x10110222;
- cpu->mvfr1 = 0x11111111;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x11111111;
cpu->ctr = 0x84448003;
cpu->reset_sctlr = 0x00c50078;
cpu->id_pfr0 = 0x00001131;
@@ -1590,11 +1597,11 @@ static void cortex_a7_initfn(Object *obj)
/* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
* table 4-41 gives 0x02101110, which includes the arm div insns.
*/
- cpu->id_isar0 = 0x02101110;
- cpu->id_isar1 = 0x13112111;
- cpu->id_isar2 = 0x21232041;
- cpu->id_isar3 = 0x11112131;
- cpu->id_isar4 = 0x10011142;
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232041;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x10011142;
cpu->dbgdidr = 0x3515f005;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
@@ -1619,8 +1626,8 @@ static void cortex_a15_initfn(Object *obj)
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
cpu->midr = 0x412fc0f1;
cpu->reset_fpsid = 0x410430f0;
- cpu->mvfr0 = 0x10110222;
- cpu->mvfr1 = 0x11111111;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x11111111;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50078;
cpu->id_pfr0 = 0x00001131;
@@ -1633,11 +1640,11 @@ static void cortex_a15_initfn(Object *obj)
cpu->id_mmfr1 = 0x20000000;
cpu->id_mmfr2 = 0x01240000;
cpu->id_mmfr3 = 0x02102211;
- cpu->id_isar0 = 0x02101110;
- cpu->id_isar1 = 0x13112111;
- cpu->id_isar2 = 0x21232041;
- cpu->id_isar3 = 0x11112131;
- cpu->id_isar4 = 0x10011142;
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232041;
+ cpu->isar.id_isar3 = 0x11112131;
+ cpu->isar.id_isar4 = 0x10011142;
cpu->dbgdidr = 0x3515f021;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
@@ -1830,17 +1837,26 @@ static void arm_max_initfn(Object *obj)
cortex_a15_initfn(obj);
#ifdef CONFIG_USER_ONLY
/* We don't set these in system emulation mode for the moment,
- * since we don't correctly set the ID registers to advertise them,
+ * since we don't correctly set (all of) the ID registers to
+ * advertise them.
*/
set_feature(&cpu->env, ARM_FEATURE_V8);
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
- set_feature(&cpu->env, ARM_FEATURE_CRC);
- set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
- set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD);
- set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
+ {
+ uint32_t t;
+
+ t = cpu->isar.id_isar5;
+ t = FIELD_DP32(t, ID_ISAR5, AES, 2);
+ t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
+ t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
+ t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
+ t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
+ t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
+ cpu->isar.id_isar5 = t;
+
+ t = cpu->isar.id_isar6;
+ t = FIELD_DP32(t, ID_ISAR6, DP, 1);
+ cpu->isar.id_isar6 = t;
+ }
#endif
}
}
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index f00c0444c4..8e6779936e 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -531,6 +531,13 @@ typedef struct CPUARMState {
*/
} exception;
+ /* Information associated with an SError */
+ struct {
+ uint8_t pending;
+ uint8_t has_esr;
+ uint64_t esr;
+ } serror;
+
/* Thumb-2 EE state. */
uint32_t teecr;
uint32_t teehbr;
@@ -669,6 +676,8 @@ typedef enum ARMPSCIState {
PSCI_ON_PENDING = 2
} ARMPSCIState;
+typedef struct ARMISARegisters ARMISARegisters;
+
/**
* ARMCPU:
* @env: #CPUARMState
@@ -788,13 +797,28 @@ struct ARMCPU {
* ARMv7AR ARM Architecture Reference Manual. A reset_ prefix
* is used for reset values of non-constant registers; no reset_
* prefix means a constant register.
+ * Some of these registers are split out into a substructure that
+ * is shared with the translators to control the ISA.
*/
+ struct ARMISARegisters {
+ uint32_t id_isar0;
+ uint32_t id_isar1;
+ uint32_t id_isar2;
+ uint32_t id_isar3;
+ uint32_t id_isar4;
+ uint32_t id_isar5;
+ uint32_t id_isar6;
+ uint32_t mvfr0;
+ uint32_t mvfr1;
+ uint32_t mvfr2;
+ uint64_t id_aa64isar0;
+ uint64_t id_aa64isar1;
+ uint64_t id_aa64pfr0;
+ uint64_t id_aa64pfr1;
+ } isar;
uint32_t midr;
uint32_t revidr;
uint32_t reset_fpsid;
- uint32_t mvfr0;
- uint32_t mvfr1;
- uint32_t mvfr2;
uint32_t ctr;
uint32_t reset_sctlr;
uint32_t id_pfr0;
@@ -808,21 +832,10 @@ struct ARMCPU {
uint32_t id_mmfr2;
uint32_t id_mmfr3;
uint32_t id_mmfr4;
- uint32_t id_isar0;
- uint32_t id_isar1;
- uint32_t id_isar2;
- uint32_t id_isar3;
- uint32_t id_isar4;
- uint32_t id_isar5;
- uint32_t id_isar6;
- uint64_t id_aa64pfr0;
- uint64_t id_aa64pfr1;
uint64_t id_aa64dfr0;
uint64_t id_aa64dfr1;
uint64_t id_aa64afr0;
uint64_t id_aa64afr1;
- uint64_t id_aa64isar0;
- uint64_t id_aa64isar1;
uint64_t id_aa64mmfr0;
uint64_t id_aa64mmfr1;
uint32_t dbgdidr;
@@ -1531,6 +1544,16 @@ FIELD(ID_AA64ISAR1, FRINTTS, 32, 4)
FIELD(ID_AA64ISAR1, SB, 36, 4)
FIELD(ID_AA64ISAR1, SPECRES, 40, 4)
+FIELD(ID_AA64PFR0, EL0, 0, 4)
+FIELD(ID_AA64PFR0, EL1, 4, 4)
+FIELD(ID_AA64PFR0, EL2, 8, 4)
+FIELD(ID_AA64PFR0, EL3, 12, 4)
+FIELD(ID_AA64PFR0, FP, 16, 4)
+FIELD(ID_AA64PFR0, ADVSIMD, 20, 4)
+FIELD(ID_AA64PFR0, GIC, 24, 4)
+FIELD(ID_AA64PFR0, RAS, 28, 4)
+FIELD(ID_AA64PFR0, SVE, 32, 4)
+
QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
/* If adding a feature bit which corresponds to a Linux ELF
@@ -1550,7 +1573,6 @@ enum arm_features {
ARM_FEATURE_VFP3,
ARM_FEATURE_VFP_FP16,
ARM_FEATURE_NEON,
- ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */
ARM_FEATURE_M, /* Microcontroller profile. */
ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
ARM_FEATURE_THUMB2EE,
@@ -1560,7 +1582,6 @@ enum arm_features {
ARM_FEATURE_V5,
ARM_FEATURE_STRONGARM,
ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
- ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */
ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */
ARM_FEATURE_GENERIC_TIMER,
ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
@@ -1573,30 +1594,15 @@ enum arm_features {
ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
ARM_FEATURE_V8,
ARM_FEATURE_AARCH64, /* supports 64 bit mode */
- ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
ARM_FEATURE_CBAR, /* has cp15 CBAR */
ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
ARM_FEATURE_EL2, /* has EL2 Virtualization support */
ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
- ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */
- ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */
- ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */
ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
ARM_FEATURE_PMU, /* has PMU support */
ARM_FEATURE_VBAR, /* has cp15 VBAR */
ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
- ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */
- ARM_FEATURE_SVE, /* has Scalable Vector Extension */
- ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */
- ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */
- ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */
- ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */
- ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */
- ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */
- ARM_FEATURE_V8_DOTPROD, /* implements v8.2 simd dot product */
- ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
- ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */
ARM_FEATURE_M_MAIN, /* M profile Main Extension */
};
@@ -3148,4 +3154,157 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
/* Shared between translate-sve.c and sve_helper.c. */
extern const uint64_t pred_esz_masks[4];
+/*
+ * 32-bit feature tests via id registers.
+ */
+static inline bool isar_feature_thumb_div(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0;
+}
+
+static inline bool isar_feature_arm_div(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
+}
+
+static inline bool isar_feature_jazelle(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
+}
+
+static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
+}
+
+static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1;
+}
+
+static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0;
+}
+
+static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0;
+}
+
+static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0;
+}
+
+static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0;
+}
+
+static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0;
+}
+
+static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
+{
+ return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
+}
+
+static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
+{
+ /*
+ * This is a placeholder for use by VCMA until the rest of
+ * the ARMv8.2-FP16 extension is implemented for aa32 mode.
+ * At which point we can properly set and check MVFR1.FPHP.
+ */
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
+}
+
+/*
+ * 64-bit feature tests via id registers.
+ */
+static inline bool isar_feature_aa64_aes(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0;
+}
+
+static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1;
+}
+
+static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0;
+}
+
+static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0;
+}
+
+static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1;
+}
+
+static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0;
+}
+
+static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0;
+}
+
+static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0;
+}
+
+static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0;
+}
+
+static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0;
+}
+
+static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0;
+}
+
+static inline bool isar_feature_aa64_dp(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0;
+}
+
+static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
+}
+
+static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
+{
+ /* We always set the AdvSIMD and FP fields identically wrt FP16. */
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
+}
+
+static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
+}
+
+/*
+ * Forward to the above feature tests given an ARMCPU pointer.
+ */
+#define cpu_isar_feature(name, cpu) \
+ ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); })
+
#endif
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 44fdf0f6fa..873f059bf2 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -109,11 +109,6 @@ static void aarch64_a57_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
- set_feature(&cpu->env, ARM_FEATURE_CRC);
set_feature(&cpu->env, ARM_FEATURE_EL2);
set_feature(&cpu->env, ARM_FEATURE_EL3);
set_feature(&cpu->env, ARM_FEATURE_PMU);
@@ -121,9 +116,9 @@ static void aarch64_a57_initfn(Object *obj)
cpu->midr = 0x411fd070;
cpu->revidr = 0x00000000;
cpu->reset_fpsid = 0x41034070;
- cpu->mvfr0 = 0x10110222;
- cpu->mvfr1 = 0x12111111;
- cpu->mvfr2 = 0x00000043;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x12111111;
+ cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50838;
cpu->id_pfr0 = 0x00000131;
@@ -134,18 +129,18 @@ static void aarch64_a57_initfn(Object *obj)
cpu->id_mmfr1 = 0x40000000;
cpu->id_mmfr2 = 0x01260000;
cpu->id_mmfr3 = 0x02102211;
- cpu->id_isar0 = 0x02101110;
- cpu->id_isar1 = 0x13112111;
- cpu->id_isar2 = 0x21232042;
- cpu->id_isar3 = 0x01112131;
- cpu->id_isar4 = 0x00011142;
- cpu->id_isar5 = 0x00011121;
- cpu->id_isar6 = 0;
- cpu->id_aa64pfr0 = 0x00002222;
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232042;
+ cpu->isar.id_isar3 = 0x01112131;
+ cpu->isar.id_isar4 = 0x00011142;
+ cpu->isar.id_isar5 = 0x00011121;
+ cpu->isar.id_isar6 = 0;
+ cpu->isar.id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106;
cpu->pmceid0 = 0x00000000;
cpu->pmceid1 = 0x00000000;
- cpu->id_aa64isar0 = 0x00011120;
+ cpu->isar.id_aa64isar0 = 0x00011120;
cpu->id_aa64mmfr0 = 0x00001124;
cpu->dbgdidr = 0x3516d000;
cpu->clidr = 0x0a200023;
@@ -170,11 +165,6 @@ static void aarch64_a53_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
- set_feature(&cpu->env, ARM_FEATURE_CRC);
set_feature(&cpu->env, ARM_FEATURE_EL2);
set_feature(&cpu->env, ARM_FEATURE_EL3);
set_feature(&cpu->env, ARM_FEATURE_PMU);
@@ -182,9 +172,9 @@ static void aarch64_a53_initfn(Object *obj)
cpu->midr = 0x410fd034;
cpu->revidr = 0x00000000;
cpu->reset_fpsid = 0x41034070;
- cpu->mvfr0 = 0x10110222;
- cpu->mvfr1 = 0x12111111;
- cpu->mvfr2 = 0x00000043;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x12111111;
+ cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x84448004; /* L1Ip = VIPT */
cpu->reset_sctlr = 0x00c50838;
cpu->id_pfr0 = 0x00000131;
@@ -195,16 +185,16 @@ static void aarch64_a53_initfn(Object *obj)
cpu->id_mmfr1 = 0x40000000;
cpu->id_mmfr2 = 0x01260000;
cpu->id_mmfr3 = 0x02102211;
- cpu->id_isar0 = 0x02101110;
- cpu->id_isar1 = 0x13112111;
- cpu->id_isar2 = 0x21232042;
- cpu->id_isar3 = 0x01112131;
- cpu->id_isar4 = 0x00011142;
- cpu->id_isar5 = 0x00011121;
- cpu->id_isar6 = 0;
- cpu->id_aa64pfr0 = 0x00002222;
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232042;
+ cpu->isar.id_isar3 = 0x01112131;
+ cpu->isar.id_isar4 = 0x00011142;
+ cpu->isar.id_isar5 = 0x00011121;
+ cpu->isar.id_isar6 = 0;
+ cpu->isar.id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106;
- cpu->id_aa64isar0 = 0x00011120;
+ cpu->isar.id_aa64isar0 = 0x00011120;
cpu->id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
cpu->dbgdidr = 0x3516d000;
cpu->clidr = 0x0a200023;
@@ -229,20 +219,15 @@ static void aarch64_a72_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
- set_feature(&cpu->env, ARM_FEATURE_V8_AES);
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
- set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
- set_feature(&cpu->env, ARM_FEATURE_CRC);
set_feature(&cpu->env, ARM_FEATURE_EL2);
set_feature(&cpu->env, ARM_FEATURE_EL3);
set_feature(&cpu->env, ARM_FEATURE_PMU);
cpu->midr = 0x410fd083;
cpu->revidr = 0x00000000;
cpu->reset_fpsid = 0x41034080;
- cpu->mvfr0 = 0x10110222;
- cpu->mvfr1 = 0x12111111;
- cpu->mvfr2 = 0x00000043;
+ cpu->isar.mvfr0 = 0x10110222;
+ cpu->isar.mvfr1 = 0x12111111;
+ cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50838;
cpu->id_pfr0 = 0x00000131;
@@ -253,17 +238,17 @@ static void aarch64_a72_initfn(Object *obj)
cpu->id_mmfr1 = 0x40000000;
cpu->id_mmfr2 = 0x01260000;
cpu->id_mmfr3 = 0x02102211;
- cpu->id_isar0 = 0x02101110;
- cpu->id_isar1 = 0x13112111;
- cpu->id_isar2 = 0x21232042;
- cpu->id_isar3 = 0x01112131;
- cpu->id_isar4 = 0x00011142;
- cpu->id_isar5 = 0x00011121;
- cpu->id_aa64pfr0 = 0x00002222;
+ cpu->isar.id_isar0 = 0x02101110;
+ cpu->isar.id_isar1 = 0x13112111;
+ cpu->isar.id_isar2 = 0x21232042;
+ cpu->isar.id_isar3 = 0x01112131;
+ cpu->isar.id_isar4 = 0x00011142;
+ cpu->isar.id_isar5 = 0x00011121;
+ cpu->isar.id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106;
cpu->pmceid0 = 0x00000000;
cpu->pmceid1 = 0x00000000;
- cpu->id_aa64isar0 = 0x00011120;
+ cpu->isar.id_aa64isar0 = 0x00011120;
cpu->id_aa64mmfr0 = 0x00001124;
cpu->dbgdidr = 0x3516d000;
cpu->clidr = 0x0a200023;
@@ -312,24 +297,55 @@ static void aarch64_max_initfn(Object *obj)
if (kvm_enabled()) {
kvm_arm_set_cpu_features_from_host(cpu);
} else {
+ uint64_t t;
+ uint32_t u;
aarch64_a57_initfn(obj);
-#ifdef CONFIG_USER_ONLY
- /* We don't set these in system emulation mode for the moment,
- * since we don't correctly set the ID registers to advertise them,
- * and in some cases they're only available in AArch64 and not AArch32,
- * whereas the architecture requires them to be present in both if
- * present in either.
+
+ t = cpu->isar.id_aa64isar0;
+ t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */
+ t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2);
+ t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1);
+ cpu->isar.id_aa64isar0 = t;
+
+ t = cpu->isar.id_aa64isar1;
+ t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1);
+ cpu->isar.id_aa64isar1 = t;
+
+ t = cpu->isar.id_aa64pfr0;
+ t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
+ t = FIELD_DP64(t, ID_AA64PFR0, FP, 1);
+ t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1);
+ cpu->isar.id_aa64pfr0 = t;
+
+ /* Replicate the same data to the 32-bit id registers. */
+ u = cpu->isar.id_isar5;
+ u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */
+ u = FIELD_DP32(u, ID_ISAR5, SHA1, 1);
+ u = FIELD_DP32(u, ID_ISAR5, SHA2, 1);
+ u = FIELD_DP32(u, ID_ISAR5, CRC32, 1);
+ u = FIELD_DP32(u, ID_ISAR5, RDM, 1);
+ u = FIELD_DP32(u, ID_ISAR5, VCMA, 1);
+ cpu->isar.id_isar5 = u;
+
+ u = cpu->isar.id_isar6;
+ u = FIELD_DP32(u, ID_ISAR6, DP, 1);
+ cpu->isar.id_isar6 = u;
+
+ /*
+ * FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet,
+ * so do not set MVFR1.FPHP. Strictly speaking this is not legal,
+ * but it is also not legal to enable SVE without support for FP16,
+ * and enabling SVE in system mode is more useful in the short term.
*/
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA512);
- set_feature(&cpu->env, ARM_FEATURE_V8_SHA3);
- set_feature(&cpu->env, ARM_FEATURE_V8_SM3);
- set_feature(&cpu->env, ARM_FEATURE_V8_SM4);
- set_feature(&cpu->env, ARM_FEATURE_V8_ATOMICS);
- set_feature(&cpu->env, ARM_FEATURE_V8_RDM);
- set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD);
- set_feature(&cpu->env, ARM_FEATURE_V8_FP16);
- set_feature(&cpu->env, ARM_FEATURE_V8_FCMA);
- set_feature(&cpu->env, ARM_FEATURE_SVE);
+
+#ifdef CONFIG_USER_ONLY
/* For usermode -cpu max we can use a larger and more efficient DCZ
* blocksize since we don't have to follow what the hardware does.
*/
diff --git a/target/arm/helper.c b/target/arm/helper.c
index e3946562aa..0ea95b0815 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -56,6 +56,8 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address,
V8M_SAttributes *sattrs);
#endif
+static void switch_mode(CPUARMState *env, int mode);
+
static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
{
int nregs;
@@ -552,12 +554,61 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
raw_write(env, ri, value);
}
+/* IS variants of TLB operations must affect all cores */
+static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+
+ tlb_flush_all_cpus_synced(cs);
+}
+
+static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+
+ tlb_flush_all_cpus_synced(cs);
+}
+
+static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+
+ tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
+}
+
+static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+
+ tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
+}
+
+/*
+ * Non-IS variants of TLB operations are upgraded to
+ * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
+ * force broadcast of these operations.
+ */
+static bool tlb_force_broadcast(CPUARMState *env)
+{
+ return (env->cp15.hcr_el2 & HCR_FB) &&
+ arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
+}
+
static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate all (TLBIALL) */
ARMCPU *cpu = arm_env_get_cpu(env);
+ if (tlb_force_broadcast(env)) {
+ tlbiall_is_write(env, NULL, value);
+ return;
+ }
+
tlb_flush(CPU(cpu));
}
@@ -567,6 +618,11 @@ static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
ARMCPU *cpu = arm_env_get_cpu(env);
+ if (tlb_force_broadcast(env)) {
+ tlbimva_is_write(env, NULL, value);
+ return;
+ }
+
tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
@@ -576,6 +632,11 @@ static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* Invalidate by ASID (TLBIASID) */
ARMCPU *cpu = arm_env_get_cpu(env);
+ if (tlb_force_broadcast(env)) {
+ tlbiasid_is_write(env, NULL, value);
+ return;
+ }
+
tlb_flush(CPU(cpu));
}
@@ -585,40 +646,12 @@ static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
ARMCPU *cpu = arm_env_get_cpu(env);
- tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
-}
-
-/* IS variants of TLB operations must affect all cores */
-static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = ENV_GET_CPU(env);
-
- tlb_flush_all_cpus_synced(cs);
-}
-
-static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = ENV_GET_CPU(env);
-
- tlb_flush_all_cpus_synced(cs);
-}
-
-static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = ENV_GET_CPU(env);
-
- tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
-}
-
-static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = ENV_GET_CPU(env);
+ if (tlb_force_broadcast(env)) {
+ tlbimvaa_is_write(env, NULL, value);
+ return;
+ }
- tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
+ tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -1296,12 +1329,26 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
CPUState *cs = ENV_GET_CPU(env);
uint64_t ret = 0;
- if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
- ret |= CPSR_I;
+ if (arm_hcr_el2_imo(env)) {
+ if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
+ ret |= CPSR_I;
+ }
+ } else {
+ if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ ret |= CPSR_I;
+ }
}
- if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
- ret |= CPSR_F;
+
+ if (arm_hcr_el2_fmo(env)) {
+ if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
+ ret |= CPSR_F;
+ }
+ } else {
+ if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
+ ret |= CPSR_F;
+ }
}
+
/* External aborts are not possible in QEMU so A bit is always clear */
return ret;
}
@@ -2270,13 +2317,15 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
* * The Non-secure TTBCR.EAE bit is set to 1
* * The implementation includes EL2, and the value of HCR.VM is 1
*
+ * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
+ *
* ATS1Hx always uses the 64bit format (not supported yet).
*/
format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
if (arm_feature(env, ARM_FEATURE_EL2)) {
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- format64 |= env->cp15.hcr_el2 & HCR_VM;
+ format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
} else {
format64 |= arm_current_el(env) == 2;
}
@@ -2709,12 +2758,10 @@ static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- /* 64 bit accesses to the TTBRs can change the ASID and so we
- * must flush the TLB.
- */
- if (cpreg_field_is_64bit(ri)) {
+ /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
+ if (cpreg_field_is_64bit(ri) &&
+ extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
ARMCPU *cpu = arm_env_get_cpu(env);
-
tlb_flush(CPU(cpu));
}
raw_write(env, ri, value);
@@ -3083,22 +3130,6 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
* Page D4-1736 (DDI0487A.b)
*/
-static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPUState *cs = ENV_GET_CPU(env);
-
- if (arm_is_secure_below_el3(env)) {
- tlb_flush_by_mmuidx(cs,
- ARMMMUIdxBit_S1SE1 |
- ARMMMUIdxBit_S1SE0);
- } else {
- tlb_flush_by_mmuidx(cs,
- ARMMMUIdxBit_S12NSE1 |
- ARMMMUIdxBit_S12NSE0);
- }
-}
-
static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -3116,6 +3147,27 @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
+static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+
+ if (tlb_force_broadcast(env)) {
+ tlbi_aa64_vmalle1_write(env, NULL, value);
+ return;
+ }
+
+ if (arm_is_secure_below_el3(env)) {
+ tlb_flush_by_mmuidx(cs,
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
+ } else {
+ tlb_flush_by_mmuidx(cs,
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
+ }
+}
+
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -3205,29 +3257,6 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
}
-static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /* Invalidate by VA, EL1&0 (AArch64 version).
- * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
- * since we don't support flush-for-specific-ASID-only or
- * flush-last-level-only.
- */
- ARMCPU *cpu = arm_env_get_cpu(env);
- CPUState *cs = CPU(cpu);
- uint64_t pageaddr = sextract64(value << 12, 0, 56);
-
- if (arm_is_secure_below_el3(env)) {
- tlb_flush_page_by_mmuidx(cs, pageaddr,
- ARMMMUIdxBit_S1SE1 |
- ARMMMUIdxBit_S1SE0);
- } else {
- tlb_flush_page_by_mmuidx(cs, pageaddr,
- ARMMMUIdxBit_S12NSE1 |
- ARMMMUIdxBit_S12NSE0);
- }
-}
-
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -3275,6 +3304,34 @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
+static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by VA, EL1&0 (AArch64 version).
+ * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ if (tlb_force_broadcast(env)) {
+ tlbi_aa64_vae1is_write(env, NULL, value);
+ return;
+ }
+
+ if (arm_is_secure_below_el3(env)) {
+ tlb_flush_page_by_mmuidx(cs, pageaddr,
+ ARMMMUIdxBit_S1SE1 |
+ ARMMMUIdxBit_S1SE0);
+ } else {
+ tlb_flush_page_by_mmuidx(cs, pageaddr,
+ ARMMMUIdxBit_S12NSE1 |
+ ARMMMUIdxBit_S12NSE0);
+ }
+}
+
static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -3872,6 +3929,7 @@ static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = ENV_GET_CPU(env);
uint64_t valid_mask = HCR_MASK;
if (arm_feature(env, ARM_FEATURE_EL3)) {
@@ -3890,6 +3948,28 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* Clear RES0 bits. */
value &= valid_mask;
+ /*
+ * VI and VF are kept in cs->interrupt_request. Modifying that
+ * requires that we have the iothread lock, which is done by
+ * marking the reginfo structs as ARM_CP_IO.
+ * Note that if a write to HCR pends a VIRQ or VFIQ it is never
+ * possible for it to be taken immediately, because VIRQ and
+ * VFIQ are masked unless running at EL0 or EL1, and HCR
+ * can only be written at EL2.
+ */
+ g_assert(qemu_mutex_iothread_locked());
+ if (value & HCR_VI) {
+ cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
+ } else {
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ }
+ if (value & HCR_VF) {
+ cs->interrupt_request |= CPU_INTERRUPT_VFIQ;
+ } else {
+ cs->interrupt_request &= ~CPU_INTERRUPT_VFIQ;
+ }
+ value &= ~(HCR_VI | HCR_VF);
+
/* These bits change the MMU setup:
* HCR_VM enables stage 2 translation
* HCR_PTW forbids certain page-table setups
@@ -3917,16 +3997,32 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
hcr_write(env, NULL, value);
}
+static uint64_t hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* The VI and VF bits live in cs->interrupt_request */
+ uint64_t ret = env->cp15.hcr_el2 & ~(HCR_VI | HCR_VF);
+ CPUState *cs = ENV_GET_CPU(env);
+
+ if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
+ ret |= HCR_VI;
+ }
+ if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
+ ret |= HCR_VF;
+ }
+ return ret;
+}
+
static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_IO,
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
- .writefn = hcr_write },
+ .writefn = hcr_write, .readfn = hcr_read },
{ .name = "HCR", .state = ARM_CP_STATE_AA32,
- .type = ARM_CP_ALIAS,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
- .writefn = hcr_writelow },
+ .writefn = hcr_writelow, .readfn = hcr_read },
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
@@ -4163,7 +4259,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
{ .name = "HCR2", .state = ARM_CP_STATE_AA32,
- .type = ARM_CP_ALIAS,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
.cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
.access = PL2_RW,
.fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
@@ -4214,7 +4310,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
{ .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
- .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
+ .access = PL3_RW, .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
{ .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
@@ -4873,7 +4969,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = arm_env_get_cpu(env);
- uint64_t pfr0 = cpu->id_aa64pfr0;
+ uint64_t pfr0 = cpu->isar.id_aa64pfr0;
if (env->gicv3state) {
pfr0 |= 1 << 24;
@@ -4940,27 +5036,27 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_isar0 },
+ .resetvalue = cpu->isar.id_isar0 },
{ .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_isar1 },
+ .resetvalue = cpu->isar.id_isar1 },
{ .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_isar2 },
+ .resetvalue = cpu->isar.id_isar2 },
{ .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_isar3 },
+ .resetvalue = cpu->isar.id_isar3 },
{ .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_isar4 },
+ .resetvalue = cpu->isar.id_isar4 },
{ .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_isar5 },
+ .resetvalue = cpu->isar.id_isar5 },
{ .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -4968,7 +5064,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_isar6 },
+ .resetvalue = cpu->isar.id_isar6 },
REGINFO_SENTINEL
};
define_arm_cp_regs(cpu, v6_idregs);
@@ -5039,7 +5135,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_aa64pfr1},
+ .resetvalue = cpu->isar.id_aa64pfr1},
{ .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -5100,11 +5196,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_aa64isar0 },
+ .resetvalue = cpu->isar.id_aa64isar0 },
{ .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_aa64isar1 },
+ .resetvalue = cpu->isar.id_aa64isar1 },
{ .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -5164,15 +5260,15 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->mvfr0 },
+ .resetvalue = cpu->isar.mvfr0 },
{ .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->mvfr1 },
+ .resetvalue = cpu->isar.mvfr1 },
{ .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->mvfr2 },
+ .resetvalue = cpu->isar.mvfr2 },
{ .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -5618,7 +5714,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &sctlr);
}
- if (arm_feature(env, ARM_FEATURE_SVE)) {
+ if (cpu_isar_feature(aa64_sve, cpu)) {
define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
if (arm_feature(env, ARM_FEATURE_EL2)) {
define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
@@ -6208,7 +6304,17 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
mask |= CPSR_IL;
val |= CPSR_IL;
}
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Illegal AArch32 mode switch attempt from %s to %s\n",
+ aarch32_mode_name(env->uncached_cpsr),
+ aarch32_mode_name(val));
} else {
+ qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
+ write_type == CPSRWriteExceptionReturn ?
+ "Exception return from AArch32" :
+ "AArch32 mode switch from",
+ aarch32_mode_name(env->uncached_cpsr),
+ aarch32_mode_name(val), env->regs[15]);
switch_mode(env, val & CPSR_M);
}
}
@@ -6306,7 +6412,7 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
return 0;
}
-void switch_mode(CPUARMState *env, int mode)
+static void switch_mode(CPUARMState *env, int mode)
{
ARMCPU *cpu = arm_env_get_cpu(env);
@@ -6328,7 +6434,7 @@ void aarch64_sync_64_to_32(CPUARMState *env)
#else
-void switch_mode(CPUARMState *env, int mode)
+static void switch_mode(CPUARMState *env, int mode)
{
int old_mode;
int i;
@@ -8194,6 +8300,19 @@ static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
}
if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ /*
+ * QEMU syndrome values are v8-style. v7 has the IL bit
+ * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
+ * If this is a v7 CPU, squash the IL bit in those cases.
+ */
+ if (cs->exception_index == EXCP_PREFETCH_ABORT ||
+ (cs->exception_index == EXCP_DATA_ABORT &&
+ !(env->exception.syndrome & ARM_EL_ISV)) ||
+ syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
+ env->exception.syndrome &= ~ARM_EL_IL;
+ }
+ }
env->cp15.esr_el[2] = env->exception.syndrome;
}
@@ -8228,7 +8347,7 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
uint32_t moe;
/* If this is a debug exception we must update the DBGDSCR.MOE bits */
- switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
+ switch (syn_get_ec(env->exception.syndrome)) {
case EC_BREAKPOINT:
case EC_BREAKPOINT_SAME_EL:
moe = 1;
@@ -8425,6 +8544,15 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
case EXCP_HVC:
case EXCP_HYP_TRAP:
case EXCP_SMC:
+ if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
+ /*
+ * QEMU internal FP/SIMD syndromes from AArch32 include the
+ * TA and coproc fields which are only exposed if the exception
+ * is taken to AArch32 Hyp mode. Mask them out to get a valid
+ * AArch64 format syndrome.
+ */
+ env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
+ }
env->cp15.esr_el[new_el] = env->exception.syndrome;
break;
case EXCP_IRQ:
@@ -8568,7 +8696,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
if (qemu_loglevel_mask(CPU_LOG_INT)
&& !excp_is_internal(cs->exception_index)) {
qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
- env->exception.syndrome >> ARM_EL_EC_SHIFT,
+ syn_get_ec(env->exception.syndrome),
env->exception.syndrome);
}
@@ -8665,7 +8793,8 @@ static inline bool regime_translation_disabled(CPUARMState *env,
}
if (mmu_idx == ARMMMUIdx_S2NS) {
- return (env->cp15.hcr_el2 & HCR_VM) == 0;
+ /* HCR.DC means HCR.VM behaves as 1 */
+ return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
}
if (env->cp15.hcr_el2 & HCR_TGE) {
@@ -8675,6 +8804,12 @@ static inline bool regime_translation_disabled(CPUARMState *env,
}
}
+ if ((env->cp15.hcr_el2 & HCR_DC) &&
+ (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
+ /* HCR.DC means SCTLR_EL1.M behaves as 0 */
+ return true;
+ }
+
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
}
@@ -9026,9 +9161,20 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
hwaddr s2pa;
int s2prot;
int ret;
+ ARMCacheAttrs cacheattrs = {};
+ ARMCacheAttrs *pcacheattrs = NULL;
+
+ if (env->cp15.hcr_el2 & HCR_PTW) {
+ /*
+ * PTW means we must fault if this S1 walk touches S2 Device
+ * memory; otherwise we don't care about the attributes and can
+ * save the S2 translation the effort of computing them.
+ */
+ pcacheattrs = &cacheattrs;
+ }
ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
- &txattrs, &s2prot, &s2size, fi, NULL);
+ &txattrs, &s2prot, &s2size, fi, pcacheattrs);
if (ret) {
assert(fi->type != ARMFault_None);
fi->s2addr = addr;
@@ -9036,6 +9182,14 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
fi->s1ptw = true;
return ~0;
}
+ if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
+ /* Access was to Device memory: generate Permission fault */
+ fi->type = ARMFault_Permission;
+ fi->s2addr = addr;
+ fi->stage2 = true;
+ fi->s1ptw = true;
+ return ~0;
+ }
addr = s2pa;
}
return addr;
@@ -10655,6 +10809,16 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
/* Combine the S1 and S2 cache attributes, if needed */
if (!ret && cacheattrs != NULL) {
+ if (env->cp15.hcr_el2 & HCR_DC) {
+ /*
+ * HCR.DC forces the first stage attributes to
+ * Normal Non-Shareable,
+ * Inner Write-Back Read-Allocate Write-Allocate,
+ * Outer Write-Back Read-Allocate Write-Allocate.
+ */
+ cacheattrs->attrs = 0xff;
+ cacheattrs->shareability = 0;
+ }
*cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
}
@@ -11612,7 +11776,7 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
uint32_t changed;
/* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */
- if (!arm_feature(env, ARM_FEATURE_V8_FP16)) {
+ if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) {
val &= ~FPCR_FZ16;
}
@@ -12671,13 +12835,15 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
uint32_t flags;
if (is_a64(env)) {
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
*pc = env->pc;
flags = ARM_TBFLAG_AARCH64_STATE_MASK;
/* Get control bits for tagged addresses */
flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
- if (arm_feature(env, ARM_FEATURE_SVE)) {
+ if (cpu_isar_feature(aa64_sve, cpu)) {
int sve_el = sve_exception_el(env, current_el);
uint32_t zcr_len;
@@ -12801,11 +12967,12 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
void aarch64_sve_change_el(CPUARMState *env, int old_el,
int new_el, bool el0_a64)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
int old_len, new_len;
bool old_a64, new_a64;
/* Nothing to do if no SVE. */
- if (!arm_feature(env, ARM_FEATURE_SVE)) {
+ if (!cpu_isar_feature(aa64_sve, cpu)) {
return;
}
diff --git a/target/arm/internals.h b/target/arm/internals.h
index a4fc709bcc..6c2bb2deeb 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -145,7 +145,6 @@ static inline int bank_number(int mode)
g_assert_not_reached();
}
-void switch_mode(CPUARMState *, int);
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
void arm_translate_init(void);
@@ -279,14 +278,19 @@ enum arm_exception_class {
#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
+static inline uint32_t syn_get_ec(uint32_t syn)
+{
+ return syn >> ARM_EL_EC_SHIFT;
+}
+
/* Utility functions for constructing various kinds of syndrome value.
* Note that in general we follow the AArch64 syndrome values; in a
* few cases the value in HSR for exceptions taken to AArch32 Hyp
- * mode differs slightly, so if we ever implemented Hyp mode then the
- * syndrome value would need some massaging on exception entry.
- * (One example of this is that AArch64 defaults to IL bit set for
- * exceptions which don't specifically indicate information about the
- * trapping instruction, whereas AArch32 defaults to IL bit clear.)
+ * mode differs slightly, and we fix this up when populating HSR in
+ * arm_cpu_do_interrupt_aarch32_hyp().
+ * The exception is FP/SIMD access traps -- these report extra information
+ * when taking an exception to AArch32. For those we include the extra coproc
+ * and TA fields, and mask them out when taking the exception to AArch64.
*/
static inline uint32_t syn_uncategorized(void)
{
@@ -386,9 +390,18 @@ static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
{
+ /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */
+ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | 0xa;
+}
+
+static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit)
+{
+ /* AArch32 SIMD trap: TA == 1 coproc == 0 */
return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
| (is_16bit ? 0 : ARM_EL_IL)
- | (cv << 24) | (cond << 20);
+ | (cv << 24) | (cond << 20) | (1 << 5);
}
static inline uint32_t syn_sve_access_trap(void)
@@ -840,4 +853,22 @@ static inline uint32_t v7m_sp_limit(CPUARMState *env)
}
}
+/**
+ * aarch32_mode_name(): Return name of the AArch32 CPU mode
+ * @psr: Program Status Register indicating CPU mode
+ *
+ * Returns, for debug logging purposes, a printable representation
+ * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
+ * the low bits of the specified PSR.
+ */
+static inline const char *aarch32_mode_name(uint32_t psr)
+{
+ static const char cpu_mode_names[16][4] = {
+ "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
+ "???", "???", "hyp", "und", "???", "???", "???", "sys"
+ };
+
+ return cpu_mode_names[psr & 0xf];
+}
+
#endif
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 54ef5f711b..09a86e2820 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -34,6 +34,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
};
static bool cap_has_mp_state;
+static bool cap_has_inject_serror_esr;
static ARMHostCPUFeatures arm_host_cpu_features;
@@ -48,6 +49,12 @@ int kvm_arm_vcpu_init(CPUState *cs)
return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
}
+void kvm_arm_init_serror_injection(CPUState *cs)
+{
+ cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
+ KVM_CAP_ARM_INJECT_SERROR_ESR);
+}
+
bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
int *fdarray,
struct kvm_vcpu_init *init)
@@ -522,6 +529,59 @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
return 0;
}
+int kvm_put_vcpu_events(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ struct kvm_vcpu_events events;
+ int ret;
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ memset(&events, 0, sizeof(events));
+ events.exception.serror_pending = env->serror.pending;
+
+ /* Inject SError to guest with specified syndrome if host kernel
+ * supports it, otherwise inject SError without syndrome.
+ */
+ if (cap_has_inject_serror_esr) {
+ events.exception.serror_has_esr = env->serror.has_esr;
+ events.exception.serror_esr = env->serror.esr;
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
+ if (ret) {
+ error_report("failed to put vcpu events");
+ }
+
+ return ret;
+}
+
+int kvm_get_vcpu_events(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ struct kvm_vcpu_events events;
+ int ret;
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ memset(&events, 0, sizeof(events));
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
+ if (ret) {
+ error_report("failed to get vcpu events");
+ return ret;
+ }
+
+ env->serror.pending = events.exception.serror_pending;
+ env->serror.has_esr = events.exception.serror_has_esr;
+ env->serror.esr = events.exception.serror_esr;
+
+ return 0;
+}
+
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{
}
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
index 4e91c11796..0f1e94c7b5 100644
--- a/target/arm/kvm32.c
+++ b/target/arm/kvm32.c
@@ -217,6 +217,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
+ /* Check whether userspace can specify guest syndrome value */
+ kvm_arm_init_serror_injection(cs);
+
return kvm_arm_init_cpreg_list(cpu);
}
@@ -358,6 +361,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
+ ret = kvm_put_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
/* Note that we do not call write_cpustate_to_list()
* here, so we are only writing the tuple list back to
* KVM. This is safe because nothing can change the
@@ -445,6 +453,11 @@ int kvm_arch_get_registers(CPUState *cs)
}
vfp_set_fpscr(env, fpscr);
+ ret = kvm_get_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
if (!write_kvmstate_to_list(cpu)) {
return EINVAL;
}
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index e0b8246283..5de8ff0ac5 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -546,6 +546,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
kvm_arm_init_debug(cs);
+ /* Check whether user space can specify guest syndrome value */
+ kvm_arm_init_serror_injection(cs);
+
return kvm_arm_init_cpreg_list(cpu);
}
@@ -727,6 +730,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
+ ret = kvm_put_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
if (!write_list_to_kvmstate(cpu, level)) {
return EINVAL;
}
@@ -863,6 +871,11 @@ int kvm_arch_get_registers(CPUState *cs)
}
vfp_set_fpcr(env, fpr);
+ ret = kvm_get_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
if (!write_kvmstate_to_list(cpu)) {
return EINVAL;
}
@@ -920,7 +933,7 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
{
- int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT;
+ int hsr_ec = syn_get_ec(debug_exit->hsr);
ARMCPU *cpu = ARM_CPU(cs);
CPUClass *cc = CPU_GET_CLASS(cs);
CPUARMState *env = &cpu->env;
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index 5948e8b560..21c0129da2 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -121,6 +121,30 @@ bool write_kvmstate_to_list(ARMCPU *cpu);
*/
void kvm_arm_reset_vcpu(ARMCPU *cpu);
+/**
+ * kvm_arm_init_serror_injection:
+ * @cs: CPUState
+ *
+ * Check whether KVM can set guest SError syndrome.
+ */
+void kvm_arm_init_serror_injection(CPUState *cs);
+
+/**
+ * kvm_get_vcpu_events:
+ * @cpu: ARMCPU
+ *
+ * Get VCPU related state from kvm.
+ */
+int kvm_get_vcpu_events(ARMCPU *cpu);
+
+/**
+ * kvm_put_vcpu_events:
+ * @cpu: ARMCPU
+ *
+ * Put VCPU related state to kvm.
+ */
+int kvm_put_vcpu_events(ARMCPU *cpu);
+
#ifdef CONFIG_KVM
/**
* kvm_arm_create_scratch_host_vcpu:
diff --git a/target/arm/machine.c b/target/arm/machine.c
index ff4ec22bf7..239fe4e84d 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -131,9 +131,8 @@ static const VMStateDescription vmstate_iwmmxt = {
static bool sve_needed(void *opaque)
{
ARMCPU *cpu = opaque;
- CPUARMState *env = &cpu->env;
- return arm_feature(env, ARM_FEATURE_SVE);
+ return cpu_isar_feature(aa64_sve, cpu);
}
/* The first two words of each Zreg is stored in VFP state. */
@@ -172,6 +171,27 @@ static const VMStateDescription vmstate_sve = {
};
#endif /* AARCH64 */
+static bool serror_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return env->serror.pending != 0;
+}
+
+static const VMStateDescription vmstate_serror = {
+ .name = "cpu/serror",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = serror_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(env.serror.pending, ARMCPU),
+ VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
+ VMSTATE_UINT64(env.serror.esr, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool m_needed(void *opaque)
{
ARMCPU *cpu = opaque;
@@ -726,6 +746,7 @@ const VMStateDescription vmstate_arm_cpu = {
#ifdef TARGET_AARCH64
&vmstate_sve,
#endif
+ &vmstate_serror,
NULL
}
};
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index d915579712..90741f6331 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -42,7 +42,7 @@ void raise_exception(CPUARMState *env, uint32_t excp,
* (see DDI0478C.a D1.10.4)
*/
target_el = 2;
- if (syndrome >> ARM_EL_EC_SHIFT == EC_ADVSIMDFPACCESSTRAP) {
+ if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
syndrome = syn_uncategorized();
}
}
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index bb9c4d8ac7..88195ab949 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -174,7 +174,7 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
cpu_fprintf(f, " FPCR=%08x FPSR=%08x\n",
vfp_get_fpcr(env), vfp_get_fpsr(env));
- if (arm_feature(env, ARM_FEATURE_SVE) && sve_exception_el(env, el) == 0) {
+ if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
int j, zcr_len = sve_zcr_len_for_el(env, el);
for (i = 0; i <= FFR_PRED_NUM; i++) {
@@ -1201,25 +1201,23 @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
/* Store from vector register to memory */
static void do_vec_st(DisasContext *s, int srcidx, int element,
- TCGv_i64 tcg_addr, int size)
+ TCGv_i64 tcg_addr, int size, TCGMemOp endian)
{
- TCGMemOp memop = s->be_data + size;
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
read_vec_element(s, tcg_tmp, srcidx, element, size);
- tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
+ tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
tcg_temp_free_i64(tcg_tmp);
}
/* Load from memory to vector register */
static void do_vec_ld(DisasContext *s, int destidx, int element,
- TCGv_i64 tcg_addr, int size)
+ TCGv_i64 tcg_addr, int size, TCGMemOp endian)
{
- TCGMemOp memop = s->be_data + size;
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
+ tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size);
write_vec_element(s, tcg_tmp, destidx, element, size);
tcg_temp_free_i64(tcg_tmp);
@@ -2328,7 +2326,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
}
if (rt2 == 31
&& ((rt | rs) & 1) == 0
- && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
+ && dc_isar_feature(aa64_atomics, s)) {
/* CASP / CASPL */
gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
return;
@@ -2350,7 +2348,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
}
if (rt2 == 31
&& ((rt | rs) & 1) == 0
- && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
+ && dc_isar_feature(aa64_atomics, s)) {
/* CASPA / CASPAL */
gen_compare_and_swap_pair(s, rs, rt, rn, size | 2);
return;
@@ -2361,7 +2359,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
case 0xb: /* CASL */
case 0xe: /* CASA */
case 0xf: /* CASAL */
- if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) {
+ if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) {
gen_compare_and_swap(s, rs, rt, rn, size);
return;
}
@@ -2900,11 +2898,10 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
int rs = extract32(insn, 16, 5);
int rn = extract32(insn, 5, 5);
int o3_opc = extract32(insn, 12, 4);
- int feature = ARM_FEATURE_V8_ATOMICS;
TCGv_i64 tcg_rn, tcg_rs;
AtomicThreeOpFn *fn;
- if (is_vector) {
+ if (is_vector || !dc_isar_feature(aa64_atomics, s)) {
unallocated_encoding(s);
return;
}
@@ -2940,10 +2937,6 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
unallocated_encoding(s);
return;
}
- if (!arm_dc_feature(s, feature)) {
- unallocated_encoding(s);
- return;
- }
if (rn == 31) {
gen_check_sp_alignment(s);
@@ -3023,10 +3016,11 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
bool is_store = !extract32(insn, 22, 1);
bool is_postidx = extract32(insn, 23, 1);
bool is_q = extract32(insn, 30, 1);
- TCGv_i64 tcg_addr, tcg_rn;
+ TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
+ TCGMemOp endian = s->be_data;
- int ebytes = 1 << size;
- int elements = (is_q ? 128 : 64) / (8 << size);
+ int ebytes; /* bytes per element */
+ int elements; /* elements per vector */
int rpt; /* num iterations */
int selem; /* structure elements */
int r;
@@ -3085,39 +3079,55 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
gen_check_sp_alignment(s);
}
+ /* For our purposes, bytes are always little-endian. */
+ if (size == 0) {
+ endian = MO_LE;
+ }
+
+ /* Consecutive little-endian elements from a single register
+ * can be promoted to a larger little-endian operation.
+ */
+ if (selem == 1 && endian == MO_LE) {
+ size = 3;
+ }
+ ebytes = 1 << size;
+ elements = (is_q ? 16 : 8) / ebytes;
+
tcg_rn = cpu_reg_sp(s, rn);
tcg_addr = tcg_temp_new_i64();
tcg_gen_mov_i64(tcg_addr, tcg_rn);
+ tcg_ebytes = tcg_const_i64(ebytes);
for (r = 0; r < rpt; r++) {
int e;
for (e = 0; e < elements; e++) {
- int tt = (rt + r) % 32;
int xs;
for (xs = 0; xs < selem; xs++) {
+ int tt = (rt + r + xs) % 32;
if (is_store) {
- do_vec_st(s, tt, e, tcg_addr, size);
+ do_vec_st(s, tt, e, tcg_addr, size, endian);
} else {
- do_vec_ld(s, tt, e, tcg_addr, size);
-
- /* For non-quad operations, setting a slice of the low
- * 64 bits of the register clears the high 64 bits (in
- * the ARM ARM pseudocode this is implicit in the fact
- * that 'rval' is a 64 bit wide variable).
- * For quad operations, we might still need to zero the
- * high bits of SVE. We optimize by noticing that we only
- * need to do this the first time we touch a register.
- */
- if (e == 0 && (r == 0 || xs == selem - 1)) {
- clear_vec_high(s, is_q, tt);
- }
+ do_vec_ld(s, tt, e, tcg_addr, size, endian);
}
- tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
- tt = (tt + 1) % 32;
+ tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
}
}
}
+ if (!is_store) {
+ /* For non-quad operations, setting a slice of the low
+ * 64 bits of the register clears the high 64 bits (in
+ * the ARM ARM pseudocode this is implicit in the fact
+ * that 'rval' is a 64 bit wide variable).
+ * For quad operations, we might still need to zero the
+ * high bits of SVE.
+ */
+ for (r = 0; r < rpt * selem; r++) {
+ int tt = (rt + r) % 32;
+ clear_vec_high(s, is_q, tt);
+ }
+ }
+
if (is_postidx) {
int rm = extract32(insn, 16, 5);
if (rm == 31) {
@@ -3126,6 +3136,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
}
}
+ tcg_temp_free_i64(tcg_ebytes);
tcg_temp_free_i64(tcg_addr);
}
@@ -3168,7 +3179,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
bool replicate = false;
int index = is_q << 3 | S << 2 | size;
int ebytes, xs;
- TCGv_i64 tcg_addr, tcg_rn;
+ TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes;
switch (scale) {
case 3:
@@ -3221,49 +3232,28 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
tcg_rn = cpu_reg_sp(s, rn);
tcg_addr = tcg_temp_new_i64();
tcg_gen_mov_i64(tcg_addr, tcg_rn);
+ tcg_ebytes = tcg_const_i64(ebytes);
for (xs = 0; xs < selem; xs++) {
if (replicate) {
/* Load and replicate to all elements */
- uint64_t mulconst;
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
get_mem_index(s), s->be_data + scale);
- switch (scale) {
- case 0:
- mulconst = 0x0101010101010101ULL;
- break;
- case 1:
- mulconst = 0x0001000100010001ULL;
- break;
- case 2:
- mulconst = 0x0000000100000001ULL;
- break;
- case 3:
- mulconst = 0;
- break;
- default:
- g_assert_not_reached();
- }
- if (mulconst) {
- tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
- }
- write_vec_element(s, tcg_tmp, rt, 0, MO_64);
- if (is_q) {
- write_vec_element(s, tcg_tmp, rt, 1, MO_64);
- }
+ tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt),
+ (is_q + 1) * 8, vec_full_reg_size(s),
+ tcg_tmp);
tcg_temp_free_i64(tcg_tmp);
- clear_vec_high(s, is_q, rt);
} else {
/* Load/store one element per register */
if (is_load) {
- do_vec_ld(s, rt, index, tcg_addr, scale);
+ do_vec_ld(s, rt, index, tcg_addr, scale, s->be_data);
} else {
- do_vec_st(s, rt, index, tcg_addr, scale);
+ do_vec_st(s, rt, index, tcg_addr, scale, s->be_data);
}
}
- tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
+ tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes);
rt = (rt + 1) % 32;
}
@@ -3275,6 +3265,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
}
}
+ tcg_temp_free_i64(tcg_ebytes);
tcg_temp_free_i64(tcg_addr);
}
@@ -4574,7 +4565,7 @@ static void handle_crc32(DisasContext *s,
TCGv_i64 tcg_acc, tcg_val;
TCGv_i32 tcg_bytes;
- if (!arm_dc_feature(s, ARM_FEATURE_CRC)
+ if (!dc_isar_feature(aa64_crc32, s)
|| (sf == 1 && sz != 3)
|| (sf == 0 && sz == 3)) {
unallocated_encoding(s);
@@ -4816,7 +4807,7 @@ static void disas_fp_compare(DisasContext *s, uint32_t insn)
break;
case 3:
size = MO_16;
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (dc_isar_feature(aa64_fp16, s)) {
break;
}
/* fallthru */
@@ -4867,7 +4858,7 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
break;
case 3:
size = MO_16;
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (dc_isar_feature(aa64_fp16, s)) {
break;
}
/* fallthru */
@@ -4933,7 +4924,7 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn)
break;
case 3:
sz = MO_16;
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (dc_isar_feature(aa64_fp16, s)) {
break;
}
/* fallthru */
@@ -5266,7 +5257,7 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
handle_fp_1src_double(s, opcode, rd, rn);
break;
case 3:
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
return;
}
@@ -5481,7 +5472,7 @@ static void disas_fp_2src(DisasContext *s, uint32_t insn)
handle_fp_2src_double(s, opcode, rd, rn, rm);
break;
case 3:
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
return;
}
@@ -5639,7 +5630,7 @@ static void disas_fp_3src(DisasContext *s, uint32_t insn)
handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
break;
case 3:
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
return;
}
@@ -5709,7 +5700,7 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn)
break;
case 3:
sz = MO_16;
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (dc_isar_feature(aa64_fp16, s)) {
break;
}
/* fallthru */
@@ -5934,7 +5925,7 @@ static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
case 1: /* float64 */
break;
case 3: /* float16 */
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (dc_isar_feature(aa64_fp16, s)) {
break;
}
/* fallthru */
@@ -6064,7 +6055,7 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
break;
case 0x6: /* 16-bit float, 32-bit int */
case 0xe: /* 16-bit float, 64-bit int */
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (dc_isar_feature(aa64_fp16, s)) {
break;
}
/* fallthru */
@@ -6091,7 +6082,7 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
case 1: /* float64 */
break;
case 3: /* float16 */
- if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (dc_isar_feature(aa64_fp16, s)) {
break;
}
/* fallthru */
@@ -6528,7 +6519,7 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
*/
is_min = extract32(size, 1, 1);
is_fp = true;
- if (!is_u && arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (!is_u && dc_isar_feature(aa64_fp16, s)) {
size = 1;
} else if (!is_u || !is_q || extract32(size, 0, 1)) {
unallocated_encoding(s);
@@ -6924,7 +6915,7 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
/* Check for FMOV (vector, immediate) - half-precision */
- if (!(arm_dc_feature(s, ARM_FEATURE_V8_FP16) && o2 && cmode == 0xf)) {
+ if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) {
unallocated_encoding(s);
return;
}
@@ -7091,7 +7082,7 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
case 0x2f: /* FMINP */
/* FP op, size[0] is 32 or 64 bit*/
if (!u) {
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
return;
} else {
@@ -7736,7 +7727,7 @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
size = MO_32;
} else if (immh & 2) {
size = MO_16;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
return;
}
@@ -7781,7 +7772,7 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
size = MO_32;
} else if (immh & 0x2) {
size = MO_16;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
return;
}
@@ -8046,28 +8037,6 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
}
}
-/* CMTST : test is "if (X & Y != 0)". */
-static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
-{
- tcg_gen_and_i32(d, a, b);
- tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
- tcg_gen_neg_i32(d, d);
-}
-
-static void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
-{
- tcg_gen_and_i64(d, a, b);
- tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
- tcg_gen_neg_i64(d, d);
-}
-
-static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
-{
- tcg_gen_and_vec(vece, d, a, b);
- tcg_gen_dupi_vec(vece, a, 0);
- tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
-}
-
static void handle_3same_64(DisasContext *s, int opcode, bool u,
TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
{
@@ -8545,7 +8514,7 @@ static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s,
return;
}
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
}
@@ -8618,7 +8587,7 @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
bool u = extract32(insn, 29, 1);
TCGv_i32 ele1, ele2, ele3;
TCGv_i64 res;
- int feature;
+ bool feature;
switch (u * 16 + opcode) {
case 0x10: /* SQRDMLAH (vector) */
@@ -8627,13 +8596,13 @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s,
unallocated_encoding(s);
return;
}
- feature = ARM_FEATURE_V8_RDM;
+ feature = dc_isar_feature(aa64_rdm, s);
break;
default:
unallocated_encoding(s);
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
@@ -9407,191 +9376,10 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
}
}
-static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_vec_sar8i_i64(a, a, shift);
- tcg_gen_vec_add8_i64(d, d, a);
-}
-
-static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_vec_sar16i_i64(a, a, shift);
- tcg_gen_vec_add16_i64(d, d, a);
-}
-
-static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
-{
- tcg_gen_sari_i32(a, a, shift);
- tcg_gen_add_i32(d, d, a);
-}
-
-static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_sari_i64(a, a, shift);
- tcg_gen_add_i64(d, d, a);
-}
-
-static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
-{
- tcg_gen_sari_vec(vece, a, a, sh);
- tcg_gen_add_vec(vece, d, d, a);
-}
-
-static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_vec_shr8i_i64(a, a, shift);
- tcg_gen_vec_add8_i64(d, d, a);
-}
-
-static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_vec_shr16i_i64(a, a, shift);
- tcg_gen_vec_add16_i64(d, d, a);
-}
-
-static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
-{
- tcg_gen_shri_i32(a, a, shift);
- tcg_gen_add_i32(d, d, a);
-}
-
-static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_shri_i64(a, a, shift);
- tcg_gen_add_i64(d, d, a);
-}
-
-static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
-{
- tcg_gen_shri_vec(vece, a, a, sh);
- tcg_gen_add_vec(vece, d, d, a);
-}
-
-static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- uint64_t mask = dup_const(MO_8, 0xff >> shift);
- TCGv_i64 t = tcg_temp_new_i64();
-
- tcg_gen_shri_i64(t, a, shift);
- tcg_gen_andi_i64(t, t, mask);
- tcg_gen_andi_i64(d, d, ~mask);
- tcg_gen_or_i64(d, d, t);
- tcg_temp_free_i64(t);
-}
-
-static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- uint64_t mask = dup_const(MO_16, 0xffff >> shift);
- TCGv_i64 t = tcg_temp_new_i64();
-
- tcg_gen_shri_i64(t, a, shift);
- tcg_gen_andi_i64(t, t, mask);
- tcg_gen_andi_i64(d, d, ~mask);
- tcg_gen_or_i64(d, d, t);
- tcg_temp_free_i64(t);
-}
-
-static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
-{
- tcg_gen_shri_i32(a, a, shift);
- tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
-}
-
-static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_shri_i64(a, a, shift);
- tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
-}
-
-static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
-{
- uint64_t mask = (2ull << ((8 << vece) - 1)) - 1;
- TCGv_vec t = tcg_temp_new_vec_matching(d);
- TCGv_vec m = tcg_temp_new_vec_matching(d);
-
- tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh));
- tcg_gen_shri_vec(vece, t, a, sh);
- tcg_gen_and_vec(vece, d, d, m);
- tcg_gen_or_vec(vece, d, d, t);
-
- tcg_temp_free_vec(t);
- tcg_temp_free_vec(m);
-}
-
/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
int immh, int immb, int opcode, int rn, int rd)
{
- static const GVecGen2i ssra_op[4] = {
- { .fni8 = gen_ssra8_i64,
- .fniv = gen_ssra_vec,
- .load_dest = true,
- .opc = INDEX_op_sari_vec,
- .vece = MO_8 },
- { .fni8 = gen_ssra16_i64,
- .fniv = gen_ssra_vec,
- .load_dest = true,
- .opc = INDEX_op_sari_vec,
- .vece = MO_16 },
- { .fni4 = gen_ssra32_i32,
- .fniv = gen_ssra_vec,
- .load_dest = true,
- .opc = INDEX_op_sari_vec,
- .vece = MO_32 },
- { .fni8 = gen_ssra64_i64,
- .fniv = gen_ssra_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true,
- .opc = INDEX_op_sari_vec,
- .vece = MO_64 },
- };
- static const GVecGen2i usra_op[4] = {
- { .fni8 = gen_usra8_i64,
- .fniv = gen_usra_vec,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_8, },
- { .fni8 = gen_usra16_i64,
- .fniv = gen_usra_vec,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_16, },
- { .fni4 = gen_usra32_i32,
- .fniv = gen_usra_vec,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_32, },
- { .fni8 = gen_usra64_i64,
- .fniv = gen_usra_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_64, },
- };
- static const GVecGen2i sri_op[4] = {
- { .fni8 = gen_shr8_ins_i64,
- .fniv = gen_shr_ins_vec,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_8 },
- { .fni8 = gen_shr16_ins_i64,
- .fniv = gen_shr_ins_vec,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_16 },
- { .fni4 = gen_shr32_ins_i32,
- .fniv = gen_shr_ins_vec,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_32 },
- { .fni8 = gen_shr64_ins_i64,
- .fniv = gen_shr_ins_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true,
- .opc = INDEX_op_shri_vec,
- .vece = MO_64 },
- };
-
int size = 32 - clz32(immh) - 1;
int immhb = immh << 3 | immb;
int shift = 2 * (8 << size) - immhb;
@@ -9687,85 +9475,10 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
clear_vec_high(s, is_q, rd);
}
-static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- uint64_t mask = dup_const(MO_8, 0xff << shift);
- TCGv_i64 t = tcg_temp_new_i64();
-
- tcg_gen_shli_i64(t, a, shift);
- tcg_gen_andi_i64(t, t, mask);
- tcg_gen_andi_i64(d, d, ~mask);
- tcg_gen_or_i64(d, d, t);
- tcg_temp_free_i64(t);
-}
-
-static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- uint64_t mask = dup_const(MO_16, 0xffff << shift);
- TCGv_i64 t = tcg_temp_new_i64();
-
- tcg_gen_shli_i64(t, a, shift);
- tcg_gen_andi_i64(t, t, mask);
- tcg_gen_andi_i64(d, d, ~mask);
- tcg_gen_or_i64(d, d, t);
- tcg_temp_free_i64(t);
-}
-
-static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
-{
- tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
-}
-
-static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
-{
- tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
-}
-
-static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
-{
- uint64_t mask = (1ull << sh) - 1;
- TCGv_vec t = tcg_temp_new_vec_matching(d);
- TCGv_vec m = tcg_temp_new_vec_matching(d);
-
- tcg_gen_dupi_vec(vece, m, mask);
- tcg_gen_shli_vec(vece, t, a, sh);
- tcg_gen_and_vec(vece, d, d, m);
- tcg_gen_or_vec(vece, d, d, t);
-
- tcg_temp_free_vec(t);
- tcg_temp_free_vec(m);
-}
-
/* SHL/SLI - Vector shift left */
static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
int immh, int immb, int opcode, int rn, int rd)
{
- static const GVecGen2i shi_op[4] = {
- { .fni8 = gen_shl8_ins_i64,
- .fniv = gen_shl_ins_vec,
- .opc = INDEX_op_shli_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true,
- .vece = MO_8 },
- { .fni8 = gen_shl16_ins_i64,
- .fniv = gen_shl_ins_vec,
- .opc = INDEX_op_shli_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true,
- .vece = MO_16 },
- { .fni4 = gen_shl32_ins_i32,
- .fniv = gen_shl_ins_vec,
- .opc = INDEX_op_shli_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true,
- .vece = MO_32 },
- { .fni8 = gen_shl64_ins_i64,
- .fniv = gen_shl_ins_vec,
- .opc = INDEX_op_shli_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true,
- .vece = MO_64 },
- };
int size = 32 - clz32(immh) - 1;
int immhb = immh << 3 | immb;
int shift = immhb - (8 << size);
@@ -9785,7 +9498,7 @@ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
}
if (insert) {
- gen_gvec_op2i(s, is_q, rd, rn, shift, &shi_op[size]);
+ gen_gvec_op2i(s, is_q, rd, rn, shift, &sli_op[size]);
} else {
gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size);
}
@@ -10362,7 +10075,7 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
return;
}
if (size == 3) {
- if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
+ if (!dc_isar_feature(aa64_pmull, s)) {
unallocated_encoding(s);
return;
}
@@ -10407,70 +10120,9 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
}
}
-static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
-{
- tcg_gen_xor_i64(rn, rn, rm);
- tcg_gen_and_i64(rn, rn, rd);
- tcg_gen_xor_i64(rd, rm, rn);
-}
-
-static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
-{
- tcg_gen_xor_i64(rn, rn, rd);
- tcg_gen_and_i64(rn, rn, rm);
- tcg_gen_xor_i64(rd, rd, rn);
-}
-
-static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
-{
- tcg_gen_xor_i64(rn, rn, rd);
- tcg_gen_andc_i64(rn, rn, rm);
- tcg_gen_xor_i64(rd, rd, rn);
-}
-
-static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
-{
- tcg_gen_xor_vec(vece, rn, rn, rm);
- tcg_gen_and_vec(vece, rn, rn, rd);
- tcg_gen_xor_vec(vece, rd, rm, rn);
-}
-
-static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
-{
- tcg_gen_xor_vec(vece, rn, rn, rd);
- tcg_gen_and_vec(vece, rn, rn, rm);
- tcg_gen_xor_vec(vece, rd, rd, rn);
-}
-
-static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
-{
- tcg_gen_xor_vec(vece, rn, rn, rd);
- tcg_gen_andc_vec(vece, rn, rn, rm);
- tcg_gen_xor_vec(vece, rd, rd, rn);
-}
-
/* Logic op (opcode == 3) subgroup of C3.6.16. */
static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
{
- static const GVecGen3 bsl_op = {
- .fni8 = gen_bsl_i64,
- .fniv = gen_bsl_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true
- };
- static const GVecGen3 bit_op = {
- .fni8 = gen_bit_i64,
- .fniv = gen_bit_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true
- };
- static const GVecGen3 bif_op = {
- .fni8 = gen_bif_i64,
- .fniv = gen_bif_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true
- };
-
int rd = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
int rm = extract32(insn, 16, 5);
@@ -10742,131 +10394,9 @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
}
}
-static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
-{
- gen_helper_neon_mul_u8(a, a, b);
- gen_helper_neon_add_u8(d, d, a);
-}
-
-static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
-{
- gen_helper_neon_mul_u16(a, a, b);
- gen_helper_neon_add_u16(d, d, a);
-}
-
-static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
-{
- tcg_gen_mul_i32(a, a, b);
- tcg_gen_add_i32(d, d, a);
-}
-
-static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
-{
- tcg_gen_mul_i64(a, a, b);
- tcg_gen_add_i64(d, d, a);
-}
-
-static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
-{
- tcg_gen_mul_vec(vece, a, a, b);
- tcg_gen_add_vec(vece, d, d, a);
-}
-
-static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
-{
- gen_helper_neon_mul_u8(a, a, b);
- gen_helper_neon_sub_u8(d, d, a);
-}
-
-static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
-{
- gen_helper_neon_mul_u16(a, a, b);
- gen_helper_neon_sub_u16(d, d, a);
-}
-
-static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
-{
- tcg_gen_mul_i32(a, a, b);
- tcg_gen_sub_i32(d, d, a);
-}
-
-static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
-{
- tcg_gen_mul_i64(a, a, b);
- tcg_gen_sub_i64(d, d, a);
-}
-
-static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
-{
- tcg_gen_mul_vec(vece, a, a, b);
- tcg_gen_sub_vec(vece, d, d, a);
-}
-
/* Integer op subgroup of C3.6.16. */
static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
{
- static const GVecGen3 cmtst_op[4] = {
- { .fni4 = gen_helper_neon_tst_u8,
- .fniv = gen_cmtst_vec,
- .vece = MO_8 },
- { .fni4 = gen_helper_neon_tst_u16,
- .fniv = gen_cmtst_vec,
- .vece = MO_16 },
- { .fni4 = gen_cmtst_i32,
- .fniv = gen_cmtst_vec,
- .vece = MO_32 },
- { .fni8 = gen_cmtst_i64,
- .fniv = gen_cmtst_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .vece = MO_64 },
- };
- static const GVecGen3 mla_op[4] = {
- { .fni4 = gen_mla8_i32,
- .fniv = gen_mla_vec,
- .opc = INDEX_op_mul_vec,
- .load_dest = true,
- .vece = MO_8 },
- { .fni4 = gen_mla16_i32,
- .fniv = gen_mla_vec,
- .opc = INDEX_op_mul_vec,
- .load_dest = true,
- .vece = MO_16 },
- { .fni4 = gen_mla32_i32,
- .fniv = gen_mla_vec,
- .opc = INDEX_op_mul_vec,
- .load_dest = true,
- .vece = MO_32 },
- { .fni8 = gen_mla64_i64,
- .fniv = gen_mla_vec,
- .opc = INDEX_op_mul_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true,
- .vece = MO_64 },
- };
- static const GVecGen3 mls_op[4] = {
- { .fni4 = gen_mls8_i32,
- .fniv = gen_mls_vec,
- .opc = INDEX_op_mul_vec,
- .load_dest = true,
- .vece = MO_8 },
- { .fni4 = gen_mls16_i32,
- .fniv = gen_mls_vec,
- .opc = INDEX_op_mul_vec,
- .load_dest = true,
- .vece = MO_16 },
- { .fni4 = gen_mls32_i32,
- .fniv = gen_mls_vec,
- .opc = INDEX_op_mul_vec,
- .load_dest = true,
- .vece = MO_32 },
- { .fni8 = gen_mls64_i64,
- .fniv = gen_mls_vec,
- .opc = INDEX_op_mul_vec,
- .prefer_i64 = TCG_TARGET_REG_BITS == 64,
- .load_dest = true,
- .vece = MO_64 },
- };
-
int is_q = extract32(insn, 30, 1);
int u = extract32(insn, 29, 1);
int size = extract32(insn, 22, 2);
@@ -11226,7 +10756,7 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn)
TCGv_ptr fpst;
bool pairwise = false;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
return;
}
@@ -11414,7 +10944,8 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
int size = extract32(insn, 22, 2);
bool u = extract32(insn, 29, 1);
bool is_q = extract32(insn, 30, 1);
- int feature, rot;
+ bool feature;
+ int rot;
switch (u * 16 + opcode) {
case 0x10: /* SQRDMLAH (vector) */
@@ -11423,7 +10954,7 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
- feature = ARM_FEATURE_V8_RDM;
+ feature = dc_isar_feature(aa64_rdm, s);
break;
case 0x02: /* SDOT (vector) */
case 0x12: /* UDOT (vector) */
@@ -11431,7 +10962,7 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
- feature = ARM_FEATURE_V8_DOTPROD;
+ feature = dc_isar_feature(aa64_dp, s);
break;
case 0x18: /* FCMLA, #0 */
case 0x19: /* FCMLA, #90 */
@@ -11440,18 +10971,18 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn)
case 0x1c: /* FCADD, #90 */
case 0x1e: /* FCADD, #270 */
if (size == 0
- || (size == 1 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))
+ || (size == 1 && !dc_isar_feature(aa64_fp16, s))
|| (size == 3 && !is_q)) {
unallocated_encoding(s);
return;
}
- feature = ARM_FEATURE_V8_FCMA;
+ feature = dc_isar_feature(aa64_fcma, s);
break;
default:
unallocated_encoding(s);
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
@@ -12320,7 +11851,7 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn)
bool need_fpst = true;
int rmode;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (!dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
return;
}
@@ -12665,14 +12196,14 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
break;
case 0x1d: /* SQRDMLAH */
case 0x1f: /* SQRDMLSH */
- if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
+ if (!dc_isar_feature(aa64_rdm, s)) {
unallocated_encoding(s);
return;
}
break;
case 0x0e: /* SDOT */
case 0x1e: /* UDOT */
- if (size != MO_32 || !arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
+ if (size != MO_32 || !dc_isar_feature(aa64_dp, s)) {
unallocated_encoding(s);
return;
}
@@ -12681,7 +12212,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
case 0x13: /* FCMLA #90 */
case 0x15: /* FCMLA #180 */
case 0x17: /* FCMLA #270 */
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
+ if (!dc_isar_feature(aa64_fcma, s)) {
unallocated_encoding(s);
return;
}
@@ -12737,7 +12268,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
}
break;
}
- if (is_fp16 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) {
unallocated_encoding(s);
return;
}
@@ -13208,8 +12739,7 @@ static void disas_crypto_aes(DisasContext *s, uint32_t insn)
TCGv_i32 tcg_decrypt;
CryptoThreeOpIntFn *genfn;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
- || size != 0) {
+ if (!dc_isar_feature(aa64_aes, s) || size != 0) {
unallocated_encoding(s);
return;
}
@@ -13266,7 +12796,7 @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
int rd = extract32(insn, 0, 5);
CryptoThreeOpFn *genfn;
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
- int feature = ARM_FEATURE_V8_SHA256;
+ bool feature;
if (size != 0) {
unallocated_encoding(s);
@@ -13279,23 +12809,26 @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
case 2: /* SHA1M */
case 3: /* SHA1SU0 */
genfn = NULL;
- feature = ARM_FEATURE_V8_SHA1;
+ feature = dc_isar_feature(aa64_sha1, s);
break;
case 4: /* SHA256H */
genfn = gen_helper_crypto_sha256h;
+ feature = dc_isar_feature(aa64_sha256, s);
break;
case 5: /* SHA256H2 */
genfn = gen_helper_crypto_sha256h2;
+ feature = dc_isar_feature(aa64_sha256, s);
break;
case 6: /* SHA256SU1 */
genfn = gen_helper_crypto_sha256su1;
+ feature = dc_isar_feature(aa64_sha256, s);
break;
default:
unallocated_encoding(s);
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
@@ -13336,7 +12869,7 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
CryptoTwoOpFn *genfn;
- int feature;
+ bool feature;
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
if (size != 0) {
@@ -13346,15 +12879,15 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
switch (opcode) {
case 0: /* SHA1H */
- feature = ARM_FEATURE_V8_SHA1;
+ feature = dc_isar_feature(aa64_sha1, s);
genfn = gen_helper_crypto_sha1h;
break;
case 1: /* SHA1SU1 */
- feature = ARM_FEATURE_V8_SHA1;
+ feature = dc_isar_feature(aa64_sha1, s);
genfn = gen_helper_crypto_sha1su1;
break;
case 2: /* SHA256SU0 */
- feature = ARM_FEATURE_V8_SHA256;
+ feature = dc_isar_feature(aa64_sha256, s);
genfn = gen_helper_crypto_sha256su0;
break;
default:
@@ -13362,7 +12895,7 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
@@ -13393,40 +12926,40 @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
int rm = extract32(insn, 16, 5);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
- int feature;
+ bool feature;
CryptoThreeOpFn *genfn;
if (o == 0) {
switch (opcode) {
case 0: /* SHA512H */
- feature = ARM_FEATURE_V8_SHA512;
+ feature = dc_isar_feature(aa64_sha512, s);
genfn = gen_helper_crypto_sha512h;
break;
case 1: /* SHA512H2 */
- feature = ARM_FEATURE_V8_SHA512;
+ feature = dc_isar_feature(aa64_sha512, s);
genfn = gen_helper_crypto_sha512h2;
break;
case 2: /* SHA512SU1 */
- feature = ARM_FEATURE_V8_SHA512;
+ feature = dc_isar_feature(aa64_sha512, s);
genfn = gen_helper_crypto_sha512su1;
break;
case 3: /* RAX1 */
- feature = ARM_FEATURE_V8_SHA3;
+ feature = dc_isar_feature(aa64_sha3, s);
genfn = NULL;
break;
}
} else {
switch (opcode) {
case 0: /* SM3PARTW1 */
- feature = ARM_FEATURE_V8_SM3;
+ feature = dc_isar_feature(aa64_sm3, s);
genfn = gen_helper_crypto_sm3partw1;
break;
case 1: /* SM3PARTW2 */
- feature = ARM_FEATURE_V8_SM3;
+ feature = dc_isar_feature(aa64_sm3, s);
genfn = gen_helper_crypto_sm3partw2;
break;
case 2: /* SM4EKEY */
- feature = ARM_FEATURE_V8_SM4;
+ feature = dc_isar_feature(aa64_sm4, s);
genfn = gen_helper_crypto_sm4ekey;
break;
default:
@@ -13435,7 +12968,7 @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
}
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
@@ -13494,16 +13027,16 @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
- int feature;
+ bool feature;
CryptoTwoOpFn *genfn;
switch (opcode) {
case 0: /* SHA512SU0 */
- feature = ARM_FEATURE_V8_SHA512;
+ feature = dc_isar_feature(aa64_sha512, s);
genfn = gen_helper_crypto_sha512su0;
break;
case 1: /* SM4E */
- feature = ARM_FEATURE_V8_SM4;
+ feature = dc_isar_feature(aa64_sm4, s);
genfn = gen_helper_crypto_sm4e;
break;
default:
@@ -13511,7 +13044,7 @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
@@ -13542,22 +13075,22 @@ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
int ra = extract32(insn, 10, 5);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
- int feature;
+ bool feature;
switch (op0) {
case 0: /* EOR3 */
case 1: /* BCAX */
- feature = ARM_FEATURE_V8_SHA3;
+ feature = dc_isar_feature(aa64_sha3, s);
break;
case 2: /* SM3SS1 */
- feature = ARM_FEATURE_V8_SM3;
+ feature = dc_isar_feature(aa64_sm3, s);
break;
default:
unallocated_encoding(s);
return;
}
- if (!arm_dc_feature(s, feature)) {
+ if (!feature) {
unallocated_encoding(s);
return;
}
@@ -13644,7 +13177,7 @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn)
TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
int pass;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) {
+ if (!dc_isar_feature(aa64_sha3, s)) {
unallocated_encoding(s);
return;
}
@@ -13690,7 +13223,7 @@ static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn)
TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
TCGv_i32 tcg_imm2, tcg_opcode;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) {
+ if (!dc_isar_feature(aa64_sm3, s)) {
unallocated_encoding(s);
return;
}
@@ -13798,7 +13331,7 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
unallocated_encoding(s);
break;
case 0x2:
- if (!arm_dc_feature(s, ARM_FEATURE_SVE) || !disas_sve(s, insn)) {
+ if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) {
unallocated_encoding(s);
}
break;
@@ -13839,6 +13372,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
ARMCPU *arm_cpu = arm_env_get_cpu(env);
int bound;
+ dc->isar = &arm_cpu->isar;
dc->pc = dc->base.pc_first;
dc->condjmp = 0;
@@ -13902,7 +13436,6 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu)
{
- tcg_clear_temp_count();
}
static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 1b4bacb522..7c4675ffd8 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -42,7 +42,7 @@
#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
/* currently all emulated v5 cores are also v5TE, so don't bother */
#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
-#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
+#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s)
#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
@@ -72,7 +72,7 @@ static TCGv_i64 cpu_F0d, cpu_F1d;
#include "exec/gen-icount.h"
-static const char *regnames[] =
+static const char * const regnames[] =
{ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
@@ -1585,6 +1585,25 @@ neon_reg_offset (int reg, int n)
return vfp_reg_offset(0, sreg);
}
+/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
+ * where 0 is the least significant end of the register.
+ */
+static inline long
+neon_element_offset(int reg, int element, TCGMemOp size)
+{
+ int element_size = 1 << size;
+ int ofs = element * element_size;
+#ifdef HOST_WORDS_BIGENDIAN
+ /* Calculate the offset assuming fully little-endian,
+ * then XOR to account for the order of the 8-byte units.
+ */
+ if (element_size < 8) {
+ ofs ^= 8 - element_size;
+ }
+#endif
+ return neon_reg_offset(reg, 0) + ofs;
+}
+
static TCGv_i32 neon_load_reg(int reg, int pass)
{
TCGv_i32 tmp = tcg_temp_new_i32();
@@ -1592,12 +1611,94 @@ static TCGv_i32 neon_load_reg(int reg, int pass)
return tmp;
}
+static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop)
+{
+ long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
+
+ switch (mop) {
+ case MO_UB:
+ tcg_gen_ld8u_i32(var, cpu_env, offset);
+ break;
+ case MO_UW:
+ tcg_gen_ld16u_i32(var, cpu_env, offset);
+ break;
+ case MO_UL:
+ tcg_gen_ld_i32(var, cpu_env, offset);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop)
+{
+ long offset = neon_element_offset(reg, ele, mop & MO_SIZE);
+
+ switch (mop) {
+ case MO_UB:
+ tcg_gen_ld8u_i64(var, cpu_env, offset);
+ break;
+ case MO_UW:
+ tcg_gen_ld16u_i64(var, cpu_env, offset);
+ break;
+ case MO_UL:
+ tcg_gen_ld32u_i64(var, cpu_env, offset);
+ break;
+ case MO_Q:
+ tcg_gen_ld_i64(var, cpu_env, offset);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
static void neon_store_reg(int reg, int pass, TCGv_i32 var)
{
tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
tcg_temp_free_i32(var);
}
+static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var)
+{
+ long offset = neon_element_offset(reg, ele, size);
+
+ switch (size) {
+ case MO_8:
+ tcg_gen_st8_i32(var, cpu_env, offset);
+ break;
+ case MO_16:
+ tcg_gen_st16_i32(var, cpu_env, offset);
+ break;
+ case MO_32:
+ tcg_gen_st_i32(var, cpu_env, offset);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var)
+{
+ long offset = neon_element_offset(reg, ele, size);
+
+ switch (size) {
+ case MO_8:
+ tcg_gen_st8_i64(var, cpu_env, offset);
+ break;
+ case MO_16:
+ tcg_gen_st16_i64(var, cpu_env, offset);
+ break;
+ case MO_32:
+ tcg_gen_st32_i64(var, cpu_env, offset);
+ break;
+ case MO_64:
+ tcg_gen_st_i64(var, cpu_env, offset);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
static inline void neon_load_reg64(TCGv_i64 var, int reg)
{
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
@@ -2974,19 +3075,6 @@ static void gen_vfp_msr(TCGv_i32 tmp)
tcg_temp_free_i32(tmp);
}
-static void gen_neon_dup_u8(TCGv_i32 var, int shift)
-{
- TCGv_i32 tmp = tcg_temp_new_i32();
- if (shift)
- tcg_gen_shri_i32(var, var, shift);
- tcg_gen_ext8u_i32(var, var);
- tcg_gen_shli_i32(tmp, var, 8);
- tcg_gen_or_i32(var, var, tmp);
- tcg_gen_shli_i32(tmp, var, 16);
- tcg_gen_or_i32(var, var, tmp);
- tcg_temp_free_i32(tmp);
-}
-
static void gen_neon_dup_low16(TCGv_i32 var)
{
TCGv_i32 tmp = tcg_temp_new_i32();
@@ -3005,28 +3093,6 @@ static void gen_neon_dup_high16(TCGv_i32 var)
tcg_temp_free_i32(tmp);
}
-static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
-{
- /* Load a single Neon element and replicate into a 32 bit TCG reg */
- TCGv_i32 tmp = tcg_temp_new_i32();
- switch (size) {
- case 0:
- gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
- gen_neon_dup_u8(tmp, 0);
- break;
- case 1:
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
- gen_neon_dup_low16(tmp);
- break;
- case 2:
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
- break;
- default: /* Avoid compiler warnings. */
- abort();
- }
- return tmp;
-}
-
static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
uint32_t dp)
{
@@ -3432,17 +3498,10 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
tmp = load_reg(s, rd);
if (insn & (1 << 23)) {
/* VDUP */
- if (size == 0) {
- gen_neon_dup_u8(tmp, 0);
- } else if (size == 1) {
- gen_neon_dup_low16(tmp);
- }
- for (n = 0; n <= pass * 2; n++) {
- tmp2 = tcg_temp_new_i32();
- tcg_gen_mov_i32(tmp2, tmp);
- neon_store_reg(rn, n, tmp2);
- }
- neon_store_reg(rn, n, tmp);
+ int vec_size = pass ? 16 : 8;
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0),
+ vec_size, vec_size, tmp);
+ tcg_temp_free_i32(tmp);
} else {
/* VMOV */
switch (size) {
@@ -4907,17 +4966,17 @@ static struct {
int nregs;
int interleave;
int spacing;
-} neon_ls_element_type[11] = {
- {4, 4, 1},
- {4, 4, 2},
+} const neon_ls_element_type[11] = {
+ {1, 4, 1},
+ {1, 4, 2},
{4, 1, 1},
- {4, 2, 1},
- {3, 3, 1},
- {3, 3, 2},
+ {2, 2, 2},
+ {1, 3, 1},
+ {1, 3, 2},
{3, 1, 1},
{1, 1, 1},
- {2, 2, 1},
- {2, 2, 2},
+ {1, 2, 1},
+ {1, 2, 2},
{2, 1, 1}
};
@@ -4933,10 +4992,11 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
int stride;
int size;
int reg;
- int pass;
int load;
- int shift;
int n;
+ int vec_size;
+ int mmu_idx;
+ TCGMemOp endian;
TCGv_i32 addr;
TCGv_i32 tmp;
TCGv_i32 tmp2;
@@ -4948,7 +5008,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
*/
if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
@@ -4958,6 +5018,8 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
rn = (insn >> 16) & 0xf;
rm = insn & 0xf;
load = (insn & (1 << 21)) != 0;
+ endian = s->be_data;
+ mmu_idx = get_mem_index(s);
if ((insn & (1 << 23)) == 0) {
/* Load store all elements. */
op = (insn >> 8) & 0xf;
@@ -4982,104 +5044,44 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
nregs = neon_ls_element_type[op].nregs;
interleave = neon_ls_element_type[op].interleave;
spacing = neon_ls_element_type[op].spacing;
- if (size == 3 && (interleave | spacing) != 1)
+ if (size == 3 && (interleave | spacing) != 1) {
return 1;
+ }
+ /* For our purposes, bytes are always little-endian. */
+ if (size == 0) {
+ endian = MO_LE;
+ }
+ /* Consecutive little-endian elements from a single register
+ * can be promoted to a larger little-endian operation.
+ */
+ if (interleave == 1 && endian == MO_LE) {
+ size = 3;
+ }
+ tmp64 = tcg_temp_new_i64();
addr = tcg_temp_new_i32();
+ tmp2 = tcg_const_i32(1 << size);
load_reg_var(s, addr, rn);
- stride = (1 << size) * interleave;
for (reg = 0; reg < nregs; reg++) {
- if (interleave > 2 || (interleave == 2 && nregs == 2)) {
- load_reg_var(s, addr, rn);
- tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
- } else if (interleave == 2 && nregs == 4 && reg == 2) {
- load_reg_var(s, addr, rn);
- tcg_gen_addi_i32(addr, addr, 1 << size);
- }
- if (size == 3) {
- tmp64 = tcg_temp_new_i64();
- if (load) {
- gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
- neon_store_reg64(tmp64, rd);
- } else {
- neon_load_reg64(tmp64, rd);
- gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
- }
- tcg_temp_free_i64(tmp64);
- tcg_gen_addi_i32(addr, addr, stride);
- } else {
- for (pass = 0; pass < 2; pass++) {
- if (size == 2) {
- if (load) {
- tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
- neon_store_reg(rd, pass, tmp);
- } else {
- tmp = neon_load_reg(rd, pass);
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
- tcg_temp_free_i32(tmp);
- }
- tcg_gen_addi_i32(addr, addr, stride);
- } else if (size == 1) {
- if (load) {
- tmp = tcg_temp_new_i32();
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
- tcg_gen_addi_i32(addr, addr, stride);
- tmp2 = tcg_temp_new_i32();
- gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
- tcg_gen_addi_i32(addr, addr, stride);
- tcg_gen_shli_i32(tmp2, tmp2, 16);
- tcg_gen_or_i32(tmp, tmp, tmp2);
- tcg_temp_free_i32(tmp2);
- neon_store_reg(rd, pass, tmp);
- } else {
- tmp = neon_load_reg(rd, pass);
- tmp2 = tcg_temp_new_i32();
- tcg_gen_shri_i32(tmp2, tmp, 16);
- gen_aa32_st16(s, tmp, addr, get_mem_index(s));
- tcg_temp_free_i32(tmp);
- tcg_gen_addi_i32(addr, addr, stride);
- gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
- tcg_temp_free_i32(tmp2);
- tcg_gen_addi_i32(addr, addr, stride);
- }
- } else /* size == 0 */ {
- if (load) {
- tmp2 = NULL;
- for (n = 0; n < 4; n++) {
- tmp = tcg_temp_new_i32();
- gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
- tcg_gen_addi_i32(addr, addr, stride);
- if (n == 0) {
- tmp2 = tmp;
- } else {
- tcg_gen_shli_i32(tmp, tmp, n * 8);
- tcg_gen_or_i32(tmp2, tmp2, tmp);
- tcg_temp_free_i32(tmp);
- }
- }
- neon_store_reg(rd, pass, tmp2);
- } else {
- tmp2 = neon_load_reg(rd, pass);
- for (n = 0; n < 4; n++) {
- tmp = tcg_temp_new_i32();
- if (n == 0) {
- tcg_gen_mov_i32(tmp, tmp2);
- } else {
- tcg_gen_shri_i32(tmp, tmp2, n * 8);
- }
- gen_aa32_st8(s, tmp, addr, get_mem_index(s));
- tcg_temp_free_i32(tmp);
- tcg_gen_addi_i32(addr, addr, stride);
- }
- tcg_temp_free_i32(tmp2);
- }
+ for (n = 0; n < 8 >> size; n++) {
+ int xs;
+ for (xs = 0; xs < interleave; xs++) {
+ int tt = rd + reg + spacing * xs;
+
+ if (load) {
+ gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
+ neon_store_element64(tt, n, size, tmp64);
+ } else {
+ neon_load_element64(tmp64, tt, n, size);
+ gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
}
+ tcg_gen_add_i32(addr, addr, tmp2);
}
}
- rd += spacing;
}
tcg_temp_free_i32(addr);
- stride = nregs * 8;
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i64(tmp64);
+ stride = nregs * interleave * 8;
} else {
size = (insn >> 10) & 3;
if (size == 3) {
@@ -5106,45 +5108,50 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
}
addr = tcg_temp_new_i32();
load_reg_var(s, addr, rn);
- if (nregs == 1) {
- /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
- tmp = gen_load_and_replicate(s, addr, size);
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
- if (insn & (1 << 5)) {
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
- }
- tcg_temp_free_i32(tmp);
- } else {
- /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
- stride = (insn & (1 << 5)) ? 2 : 1;
- for (reg = 0; reg < nregs; reg++) {
- tmp = gen_load_and_replicate(s, addr, size);
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
- tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
- tcg_temp_free_i32(tmp);
- tcg_gen_addi_i32(addr, addr, 1 << size);
- rd += stride;
+
+ /* VLD1 to all lanes: bit 5 indicates how many Dregs to write.
+ * VLD2/3/4 to all lanes: bit 5 indicates register stride.
+ */
+ stride = (insn & (1 << 5)) ? 2 : 1;
+ vec_size = nregs == 1 ? stride * 8 : 8;
+
+ tmp = tcg_temp_new_i32();
+ for (reg = 0; reg < nregs; reg++) {
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
+ s->be_data | size);
+ if ((rd & 1) && vec_size == 16) {
+ /* We cannot write 16 bytes at once because the
+ * destination is unaligned.
+ */
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
+ 8, 8, tmp);
+ tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0),
+ neon_reg_offset(rd, 0), 8, 8);
+ } else {
+ tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0),
+ vec_size, vec_size, tmp);
}
+ tcg_gen_addi_i32(addr, addr, 1 << size);
+ rd += stride;
}
+ tcg_temp_free_i32(tmp);
tcg_temp_free_i32(addr);
stride = (1 << size) * nregs;
} else {
/* Single element. */
int idx = (insn >> 4) & 0xf;
- pass = (insn >> 7) & 1;
+ int reg_idx;
switch (size) {
case 0:
- shift = ((insn >> 5) & 3) * 8;
+ reg_idx = (insn >> 5) & 7;
stride = 1;
break;
case 1:
- shift = ((insn >> 6) & 1) * 16;
+ reg_idx = (insn >> 6) & 3;
stride = (insn & (1 << 5)) ? 2 : 1;
break;
case 2:
- shift = 0;
+ reg_idx = (insn >> 7) & 1;
stride = (insn & (1 << 6)) ? 2 : 1;
break;
default:
@@ -5184,52 +5191,24 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
*/
return 1;
}
+ tmp = tcg_temp_new_i32();
addr = tcg_temp_new_i32();
load_reg_var(s, addr, rn);
for (reg = 0; reg < nregs; reg++) {
if (load) {
- tmp = tcg_temp_new_i32();
- switch (size) {
- case 0:
- gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
- break;
- case 1:
- gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
- break;
- case 2:
- gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
- break;
- default: /* Avoid compiler warnings. */
- abort();
- }
- if (size != 2) {
- tmp2 = neon_load_reg(rd, pass);
- tcg_gen_deposit_i32(tmp, tmp2, tmp,
- shift, size ? 16 : 8);
- tcg_temp_free_i32(tmp2);
- }
- neon_store_reg(rd, pass, tmp);
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
+ s->be_data | size);
+ neon_store_element(rd, reg_idx, size, tmp);
} else { /* Store */
- tmp = neon_load_reg(rd, pass);
- if (shift)
- tcg_gen_shri_i32(tmp, tmp, shift);
- switch (size) {
- case 0:
- gen_aa32_st8(s, tmp, addr, get_mem_index(s));
- break;
- case 1:
- gen_aa32_st16(s, tmp, addr, get_mem_index(s));
- break;
- case 2:
- gen_aa32_st32(s, tmp, addr, get_mem_index(s));
- break;
- }
- tcg_temp_free_i32(tmp);
+ neon_load_element(tmp, rd, reg_idx, size);
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
+ s->be_data | size);
}
rd += stride;
tcg_gen_addi_i32(addr, addr, 1 << size);
}
tcg_temp_free_i32(addr);
+ tcg_temp_free_i32(tmp);
stride = nregs * (1 << size);
}
}
@@ -5250,14 +5229,6 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
return 0;
}
-/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
-static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
-{
- tcg_gen_and_i32(t, t, c);
- tcg_gen_andc_i32(f, f, c);
- tcg_gen_or_i32(dest, t, f);
-}
-
static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
{
switch (size) {
@@ -5464,7 +5435,7 @@ static void gen_neon_narrow_op(int op, int u, int size,
#define NEON_3R_VABA 15
#define NEON_3R_VADD_VSUB 16
#define NEON_3R_VTST_VCEQ 17
-#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
+#define NEON_3R_VML 18 /* VMLA, VMLS */
#define NEON_3R_VMUL 19
#define NEON_3R_VPMAX 20
#define NEON_3R_VPMIN 21
@@ -5689,7 +5660,7 @@ static const uint8_t neon_2rm_sizes[] = {
static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
int q, int rd, int rn, int rm)
{
- if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
+ if (dc_isar_feature(aa32_rdm, s)) {
int opr_sz = (1 + q) * 8;
tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd),
vfp_reg_offset(1, rn),
@@ -5700,6 +5671,483 @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
return 1;
}
+/*
+ * Expanders for VBitOps_VBIF, VBIT, VBSL.
+ */
+static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
+{
+ tcg_gen_xor_i64(rn, rn, rm);
+ tcg_gen_and_i64(rn, rn, rd);
+ tcg_gen_xor_i64(rd, rm, rn);
+}
+
+static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
+{
+ tcg_gen_xor_i64(rn, rn, rd);
+ tcg_gen_and_i64(rn, rn, rm);
+ tcg_gen_xor_i64(rd, rd, rn);
+}
+
+static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
+{
+ tcg_gen_xor_i64(rn, rn, rd);
+ tcg_gen_andc_i64(rn, rn, rm);
+ tcg_gen_xor_i64(rd, rd, rn);
+}
+
+static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
+{
+ tcg_gen_xor_vec(vece, rn, rn, rm);
+ tcg_gen_and_vec(vece, rn, rn, rd);
+ tcg_gen_xor_vec(vece, rd, rm, rn);
+}
+
+static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
+{
+ tcg_gen_xor_vec(vece, rn, rn, rd);
+ tcg_gen_and_vec(vece, rn, rn, rm);
+ tcg_gen_xor_vec(vece, rd, rd, rn);
+}
+
+static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
+{
+ tcg_gen_xor_vec(vece, rn, rn, rd);
+ tcg_gen_andc_vec(vece, rn, rn, rm);
+ tcg_gen_xor_vec(vece, rd, rd, rn);
+}
+
+const GVecGen3 bsl_op = {
+ .fni8 = gen_bsl_i64,
+ .fniv = gen_bsl_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true
+};
+
+const GVecGen3 bit_op = {
+ .fni8 = gen_bit_i64,
+ .fniv = gen_bit_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true
+};
+
+const GVecGen3 bif_op = {
+ .fni8 = gen_bif_i64,
+ .fniv = gen_bif_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true
+};
+
+static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_sar8i_i64(a, a, shift);
+ tcg_gen_vec_add8_i64(d, d, a);
+}
+
+static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_sar16i_i64(a, a, shift);
+ tcg_gen_vec_add16_i64(d, d, a);
+}
+
+static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+ tcg_gen_sari_i32(a, a, shift);
+ tcg_gen_add_i32(d, d, a);
+}
+
+static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_sari_i64(a, a, shift);
+ tcg_gen_add_i64(d, d, a);
+}
+
+static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ tcg_gen_sari_vec(vece, a, a, sh);
+ tcg_gen_add_vec(vece, d, d, a);
+}
+
+const GVecGen2i ssra_op[4] = {
+ { .fni8 = gen_ssra8_i64,
+ .fniv = gen_ssra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_sari_vec,
+ .vece = MO_8 },
+ { .fni8 = gen_ssra16_i64,
+ .fniv = gen_ssra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_sari_vec,
+ .vece = MO_16 },
+ { .fni4 = gen_ssra32_i32,
+ .fniv = gen_ssra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_sari_vec,
+ .vece = MO_32 },
+ { .fni8 = gen_ssra64_i64,
+ .fniv = gen_ssra_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .opc = INDEX_op_sari_vec,
+ .vece = MO_64 },
+};
+
+static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_shr8i_i64(a, a, shift);
+ tcg_gen_vec_add8_i64(d, d, a);
+}
+
+static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_vec_shr16i_i64(a, a, shift);
+ tcg_gen_vec_add16_i64(d, d, a);
+}
+
+static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+ tcg_gen_shri_i32(a, a, shift);
+ tcg_gen_add_i32(d, d, a);
+}
+
+static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_shri_i64(a, a, shift);
+ tcg_gen_add_i64(d, d, a);
+}
+
+static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ tcg_gen_shri_vec(vece, a, a, sh);
+ tcg_gen_add_vec(vece, d, d, a);
+}
+
+const GVecGen2i usra_op[4] = {
+ { .fni8 = gen_usra8_i64,
+ .fniv = gen_usra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_8, },
+ { .fni8 = gen_usra16_i64,
+ .fniv = gen_usra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_16, },
+ { .fni4 = gen_usra32_i32,
+ .fniv = gen_usra_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_32, },
+ { .fni8 = gen_usra64_i64,
+ .fniv = gen_usra_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_64, },
+};
+
+static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ uint64_t mask = dup_const(MO_8, 0xff >> shift);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t, a, shift);
+ tcg_gen_andi_i64(t, t, mask);
+ tcg_gen_andi_i64(d, d, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ uint64_t mask = dup_const(MO_16, 0xffff >> shift);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t, a, shift);
+ tcg_gen_andi_i64(t, t, mask);
+ tcg_gen_andi_i64(d, d, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+ tcg_gen_shri_i32(a, a, shift);
+ tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
+}
+
+static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_shri_i64(a, a, shift);
+ tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
+}
+
+static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ if (sh == 0) {
+ tcg_gen_mov_vec(d, a);
+ } else {
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ TCGv_vec m = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
+ tcg_gen_shri_vec(vece, t, a, sh);
+ tcg_gen_and_vec(vece, d, d, m);
+ tcg_gen_or_vec(vece, d, d, t);
+
+ tcg_temp_free_vec(t);
+ tcg_temp_free_vec(m);
+ }
+}
+
+const GVecGen2i sri_op[4] = {
+ { .fni8 = gen_shr8_ins_i64,
+ .fniv = gen_shr_ins_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_8 },
+ { .fni8 = gen_shr16_ins_i64,
+ .fniv = gen_shr_ins_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_16 },
+ { .fni4 = gen_shr32_ins_i32,
+ .fniv = gen_shr_ins_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_32 },
+ { .fni8 = gen_shr64_ins_i64,
+ .fniv = gen_shr_ins_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .opc = INDEX_op_shri_vec,
+ .vece = MO_64 },
+};
+
+static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ uint64_t mask = dup_const(MO_8, 0xff << shift);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shli_i64(t, a, shift);
+ tcg_gen_andi_i64(t, t, mask);
+ tcg_gen_andi_i64(d, d, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ uint64_t mask = dup_const(MO_16, 0xffff << shift);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shli_i64(t, a, shift);
+ tcg_gen_andi_i64(t, t, mask);
+ tcg_gen_andi_i64(d, d, ~mask);
+ tcg_gen_or_i64(d, d, t);
+ tcg_temp_free_i64(t);
+}
+
+static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
+{
+ tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
+}
+
+static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
+{
+ tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
+}
+
+static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
+{
+ if (sh == 0) {
+ tcg_gen_mov_vec(d, a);
+ } else {
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
+ TCGv_vec m = tcg_temp_new_vec_matching(d);
+
+ tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
+ tcg_gen_shli_vec(vece, t, a, sh);
+ tcg_gen_and_vec(vece, d, d, m);
+ tcg_gen_or_vec(vece, d, d, t);
+
+ tcg_temp_free_vec(t);
+ tcg_temp_free_vec(m);
+ }
+}
+
+const GVecGen2i sli_op[4] = {
+ { .fni8 = gen_shl8_ins_i64,
+ .fniv = gen_shl_ins_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shli_vec,
+ .vece = MO_8 },
+ { .fni8 = gen_shl16_ins_i64,
+ .fniv = gen_shl_ins_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shli_vec,
+ .vece = MO_16 },
+ { .fni4 = gen_shl32_ins_i32,
+ .fniv = gen_shl_ins_vec,
+ .load_dest = true,
+ .opc = INDEX_op_shli_vec,
+ .vece = MO_32 },
+ { .fni8 = gen_shl64_ins_i64,
+ .fniv = gen_shl_ins_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .opc = INDEX_op_shli_vec,
+ .vece = MO_64 },
+};
+
+static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ gen_helper_neon_mul_u8(a, a, b);
+ gen_helper_neon_add_u8(d, d, a);
+}
+
+static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ gen_helper_neon_mul_u8(a, a, b);
+ gen_helper_neon_sub_u8(d, d, a);
+}
+
+static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ gen_helper_neon_mul_u16(a, a, b);
+ gen_helper_neon_add_u16(d, d, a);
+}
+
+static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ gen_helper_neon_mul_u16(a, a, b);
+ gen_helper_neon_sub_u16(d, d, a);
+}
+
+static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_mul_i32(a, a, b);
+ tcg_gen_add_i32(d, d, a);
+}
+
+static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_mul_i32(a, a, b);
+ tcg_gen_sub_i32(d, d, a);
+}
+
+static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_mul_i64(a, a, b);
+ tcg_gen_add_i64(d, d, a);
+}
+
+static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_mul_i64(a, a, b);
+ tcg_gen_sub_i64(d, d, a);
+}
+
+static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_mul_vec(vece, a, a, b);
+ tcg_gen_add_vec(vece, d, d, a);
+}
+
+static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_mul_vec(vece, a, a, b);
+ tcg_gen_sub_vec(vece, d, d, a);
+}
+
+/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
+ * these tables are shared with AArch64 which does support them.
+ */
+const GVecGen3 mla_op[4] = {
+ { .fni4 = gen_mla8_i32,
+ .fniv = gen_mla_vec,
+ .opc = INDEX_op_mul_vec,
+ .load_dest = true,
+ .vece = MO_8 },
+ { .fni4 = gen_mla16_i32,
+ .fniv = gen_mla_vec,
+ .opc = INDEX_op_mul_vec,
+ .load_dest = true,
+ .vece = MO_16 },
+ { .fni4 = gen_mla32_i32,
+ .fniv = gen_mla_vec,
+ .opc = INDEX_op_mul_vec,
+ .load_dest = true,
+ .vece = MO_32 },
+ { .fni8 = gen_mla64_i64,
+ .fniv = gen_mla_vec,
+ .opc = INDEX_op_mul_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .vece = MO_64 },
+};
+
+const GVecGen3 mls_op[4] = {
+ { .fni4 = gen_mls8_i32,
+ .fniv = gen_mls_vec,
+ .opc = INDEX_op_mul_vec,
+ .load_dest = true,
+ .vece = MO_8 },
+ { .fni4 = gen_mls16_i32,
+ .fniv = gen_mls_vec,
+ .opc = INDEX_op_mul_vec,
+ .load_dest = true,
+ .vece = MO_16 },
+ { .fni4 = gen_mls32_i32,
+ .fniv = gen_mls_vec,
+ .opc = INDEX_op_mul_vec,
+ .load_dest = true,
+ .vece = MO_32 },
+ { .fni8 = gen_mls64_i64,
+ .fniv = gen_mls_vec,
+ .opc = INDEX_op_mul_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .load_dest = true,
+ .vece = MO_64 },
+};
+
+/* CMTST : test is "if (X & Y != 0)". */
+static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+ tcg_gen_and_i32(d, a, b);
+ tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
+ tcg_gen_neg_i32(d, d);
+}
+
+void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ tcg_gen_and_i64(d, a, b);
+ tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
+ tcg_gen_neg_i64(d, d);
+}
+
+static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_and_vec(vece, d, a, b);
+ tcg_gen_dupi_vec(vece, a, 0);
+ tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
+}
+
+const GVecGen3 cmtst_op[4] = {
+ { .fni4 = gen_helper_neon_tst_u8,
+ .fniv = gen_cmtst_vec,
+ .vece = MO_8 },
+ { .fni4 = gen_helper_neon_tst_u16,
+ .fniv = gen_cmtst_vec,
+ .vece = MO_16 },
+ { .fni4 = gen_cmtst_i32,
+ .fniv = gen_cmtst_vec,
+ .vece = MO_32 },
+ { .fni8 = gen_cmtst_i64,
+ .fniv = gen_cmtst_vec,
+ .prefer_i64 = TCG_TARGET_REG_BITS == 64,
+ .vece = MO_64 },
+};
+
/* Translate a NEON data processing instruction. Return nonzero if the
instruction is invalid.
We process data in a mixture of 32-bit and 64-bit chunks.
@@ -5709,14 +6157,15 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
{
int op;
int q;
- int rd, rn, rm;
+ int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs;
int size;
int shift;
int pass;
int count;
int pairwise;
int u;
- uint32_t imm, mask;
+ int vec_size;
+ uint32_t imm;
TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
TCGv_ptr ptr1, ptr2, ptr3;
TCGv_i64 tmp64;
@@ -5727,7 +6176,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
*/
if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
@@ -5739,6 +6188,11 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
VFP_DREG_N(rn, insn);
VFP_DREG_M(rm, insn);
size = (insn >> 20) & 3;
+ vec_size = q ? 16 : 8;
+ rd_ofs = neon_reg_offset(rd, 0);
+ rn_ofs = neon_reg_offset(rn, 0);
+ rm_ofs = neon_reg_offset(rm, 0);
+
if ((insn & (1 << 23)) == 0) {
/* Three register same length. */
op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
@@ -5763,7 +6217,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
return 1;
}
if (!u) { /* SHA-1 */
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
+ if (!dc_isar_feature(aa32_sha1, s)) {
return 1;
}
ptr1 = vfp_reg_ptr(true, rd);
@@ -5773,7 +6227,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
tcg_temp_free_i32(tmp4);
} else { /* SHA-256 */
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
+ if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
return 1;
}
ptr1 = vfp_reg_ptr(true, rd);
@@ -5829,8 +6283,100 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
q, rd, rn, rm);
}
return 1;
+
+ case NEON_3R_LOGIC: /* Logic ops. */
+ switch ((u << 2) | size) {
+ case 0: /* VAND */
+ tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size);
+ break;
+ case 1: /* VBIC */
+ tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size);
+ break;
+ case 2:
+ if (rn == rm) {
+ /* VMOV */
+ tcg_gen_gvec_mov(0, rd_ofs, rn_ofs, vec_size, vec_size);
+ } else {
+ /* VORR */
+ tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size);
+ }
+ break;
+ case 3: /* VORN */
+ tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size);
+ break;
+ case 4: /* VEOR */
+ tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size);
+ break;
+ case 5: /* VBSL */
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size, &bsl_op);
+ break;
+ case 6: /* VBIT */
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size, &bit_op);
+ break;
+ case 7: /* VBIF */
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size, &bif_op);
+ break;
+ }
+ return 0;
+
+ case NEON_3R_VADD_VSUB:
+ if (u) {
+ tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size);
+ } else {
+ tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size);
+ }
+ return 0;
+
+ case NEON_3R_VMUL: /* VMUL */
+ if (u) {
+ /* Polynomial case allows only P8 and is handled below. */
+ if (size != 0) {
+ return 1;
+ }
+ } else {
+ tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size);
+ return 0;
+ }
+ break;
+
+ case NEON_3R_VML: /* VMLA, VMLS */
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size,
+ u ? &mls_op[size] : &mla_op[size]);
+ return 0;
+
+ case NEON_3R_VTST_VCEQ:
+ if (u) { /* VCEQ */
+ tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size);
+ } else { /* VTST */
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs,
+ vec_size, vec_size, &cmtst_op[size]);
+ }
+ return 0;
+
+ case NEON_3R_VCGT:
+ tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size,
+ rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
+ return 0;
+
+ case NEON_3R_VCGE:
+ tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size,
+ rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
+ return 0;
}
- if (size == 3 && op != NEON_3R_LOGIC) {
+
+ if (size == 3) {
/* 64-bit element instructions. */
for (pass = 0; pass < (q ? 2 : 1); pass++) {
neon_load_reg64(cpu_V0, rn + pass);
@@ -5886,13 +6432,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
cpu_V1, cpu_V0);
}
break;
- case NEON_3R_VADD_VSUB:
- if (u) {
- tcg_gen_sub_i64(CPU_V001);
- } else {
- tcg_gen_add_i64(CPU_V001);
- }
- break;
default:
abort();
}
@@ -5942,12 +6481,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
return 1;
}
break;
- case NEON_3R_VMUL:
- if (u && (size != 0)) {
- /* UNDEF on invalid size for polynomial subcase */
- return 1;
- }
- break;
case NEON_3R_VFM_VQRDMLSH:
if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
return 1;
@@ -5988,52 +6521,12 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
case NEON_3R_VRHADD:
GEN_NEON_INTEGER_OP(rhadd);
break;
- case NEON_3R_LOGIC: /* Logic ops. */
- switch ((u << 2) | size) {
- case 0: /* VAND */
- tcg_gen_and_i32(tmp, tmp, tmp2);
- break;
- case 1: /* BIC */
- tcg_gen_andc_i32(tmp, tmp, tmp2);
- break;
- case 2: /* VORR */
- tcg_gen_or_i32(tmp, tmp, tmp2);
- break;
- case 3: /* VORN */
- tcg_gen_orc_i32(tmp, tmp, tmp2);
- break;
- case 4: /* VEOR */
- tcg_gen_xor_i32(tmp, tmp, tmp2);
- break;
- case 5: /* VBSL */
- tmp3 = neon_load_reg(rd, pass);
- gen_neon_bsl(tmp, tmp, tmp2, tmp3);
- tcg_temp_free_i32(tmp3);
- break;
- case 6: /* VBIT */
- tmp3 = neon_load_reg(rd, pass);
- gen_neon_bsl(tmp, tmp, tmp3, tmp2);
- tcg_temp_free_i32(tmp3);
- break;
- case 7: /* VBIF */
- tmp3 = neon_load_reg(rd, pass);
- gen_neon_bsl(tmp, tmp3, tmp, tmp2);
- tcg_temp_free_i32(tmp3);
- break;
- }
- break;
case NEON_3R_VHSUB:
GEN_NEON_INTEGER_OP(hsub);
break;
case NEON_3R_VQSUB:
GEN_NEON_INTEGER_OP_ENV(qsub);
break;
- case NEON_3R_VCGT:
- GEN_NEON_INTEGER_OP(cgt);
- break;
- case NEON_3R_VCGE:
- GEN_NEON_INTEGER_OP(cge);
- break;
case NEON_3R_VSHL:
GEN_NEON_INTEGER_OP(shl);
break;
@@ -6061,61 +6554,9 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tmp2 = neon_load_reg(rd, pass);
gen_neon_add(size, tmp, tmp2);
break;
- case NEON_3R_VADD_VSUB:
- if (!u) { /* VADD */
- gen_neon_add(size, tmp, tmp2);
- } else { /* VSUB */
- switch (size) {
- case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
- case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
- case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
- default: abort();
- }
- }
- break;
- case NEON_3R_VTST_VCEQ:
- if (!u) { /* VTST */
- switch (size) {
- case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
- case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
- case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
- default: abort();
- }
- } else { /* VCEQ */
- switch (size) {
- case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
- case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
- case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
- default: abort();
- }
- }
- break;
- case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
- switch (size) {
- case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
- case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
- case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
- default: abort();
- }
- tcg_temp_free_i32(tmp2);
- tmp2 = neon_load_reg(rd, pass);
- if (u) { /* VMLS */
- gen_neon_rsb(size, tmp, tmp2);
- } else { /* VMLA */
- gen_neon_add(size, tmp, tmp2);
- }
- break;
case NEON_3R_VMUL:
- if (u) { /* polynomial */
- gen_helper_neon_mul_p8(tmp, tmp, tmp2);
- } else { /* Integer */
- switch (size) {
- case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
- case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
- case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
- default: abort();
- }
- }
+ /* VMUL.P8; other cases already eliminated. */
+ gen_helper_neon_mul_p8(tmp, tmp, tmp2);
break;
case NEON_3R_VPMAX:
GEN_NEON_INTEGER_OP(pmax);
@@ -6297,8 +6738,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
size--;
}
shift = (insn >> 16) & ((1 << (3 + size)) - 1);
- /* To avoid excessive duplication of ops we implement shift
- by immediate using the variable shift operations. */
if (op < 8) {
/* Shift by immediate:
VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
@@ -6310,43 +6749,99 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
}
/* Right shifts are encoded as N - shift, where N is the
element size in bits. */
- if (op <= 4)
+ if (op <= 4) {
shift = shift - (1 << (size + 3));
+ }
+
+ switch (op) {
+ case 0: /* VSHR */
+ /* Right shift comes here negative. */
+ shift = -shift;
+ /* Shifts larger than the element size are architecturally
+ * valid. Unsigned results in all zeros; signed results
+ * in all sign bits.
+ */
+ if (!u) {
+ tcg_gen_gvec_sari(size, rd_ofs, rm_ofs,
+ MIN(shift, (8 << size) - 1),
+ vec_size, vec_size);
+ } else if (shift >= 8 << size) {
+ tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
+ } else {
+ tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
+ vec_size, vec_size);
+ }
+ return 0;
+
+ case 1: /* VSRA */
+ /* Right shift comes here negative. */
+ shift = -shift;
+ /* Shifts larger than the element size are architecturally
+ * valid. Unsigned results in all zeros; signed results
+ * in all sign bits.
+ */
+ if (!u) {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
+ MIN(shift, (8 << size) - 1),
+ &ssra_op[size]);
+ } else if (shift >= 8 << size) {
+ /* rd += 0 */
+ } else {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
+ shift, &usra_op[size]);
+ }
+ return 0;
+
+ case 4: /* VSRI */
+ if (!u) {
+ return 1;
+ }
+ /* Right shift comes here negative. */
+ shift = -shift;
+ /* Shift out of range leaves destination unchanged. */
+ if (shift < 8 << size) {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size,
+ shift, &sri_op[size]);
+ }
+ return 0;
+
+ case 5: /* VSHL, VSLI */
+ if (u) { /* VSLI */
+ /* Shift out of range leaves destination unchanged. */
+ if (shift < 8 << size) {
+ tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size,
+ vec_size, shift, &sli_op[size]);
+ }
+ } else { /* VSHL */
+ /* Shifts larger than the element size are
+ * architecturally valid and results in zero.
+ */
+ if (shift >= 8 << size) {
+ tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
+ } else {
+ tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
+ vec_size, vec_size);
+ }
+ }
+ return 0;
+ }
+
if (size == 3) {
count = q + 1;
} else {
count = q ? 4: 2;
}
- switch (size) {
- case 0:
- imm = (uint8_t) shift;
- imm |= imm << 8;
- imm |= imm << 16;
- break;
- case 1:
- imm = (uint16_t) shift;
- imm |= imm << 16;
- break;
- case 2:
- case 3:
- imm = shift;
- break;
- default:
- abort();
- }
+
+ /* To avoid excessive duplication of ops we implement shift
+ * by immediate using the variable shift operations.
+ */
+ imm = dup_const(size, shift);
for (pass = 0; pass < count; pass++) {
if (size == 3) {
neon_load_reg64(cpu_V0, rm + pass);
tcg_gen_movi_i64(cpu_V1, imm);
switch (op) {
- case 0: /* VSHR */
- case 1: /* VSRA */
- if (u)
- gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
- else
- gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
- break;
case 2: /* VRSHR */
case 3: /* VRSRA */
if (u)
@@ -6354,10 +6849,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
else
gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
break;
- case 4: /* VSRI */
- case 5: /* VSHL, VSLI */
- gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
- break;
case 6: /* VQSHLU */
gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
cpu_V0, cpu_V1);
@@ -6371,26 +6862,13 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
cpu_V0, cpu_V1);
}
break;
+ default:
+ g_assert_not_reached();
}
- if (op == 1 || op == 3) {
+ if (op == 3) {
/* Accumulate. */
neon_load_reg64(cpu_V1, rd + pass);
tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
- } else if (op == 4 || (op == 5 && u)) {
- /* Insert */
- neon_load_reg64(cpu_V1, rd + pass);
- uint64_t mask;
- if (shift < -63 || shift > 63) {
- mask = 0;
- } else {
- if (op == 4) {
- mask = 0xffffffffffffffffull >> -shift;
- } else {
- mask = 0xffffffffffffffffull << shift;
- }
- }
- tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
- tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
}
neon_store_reg64(cpu_V0, rd + pass);
} else { /* size < 3 */
@@ -6399,23 +6877,10 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tmp2 = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp2, imm);
switch (op) {
- case 0: /* VSHR */
- case 1: /* VSRA */
- GEN_NEON_INTEGER_OP(shl);
- break;
case 2: /* VRSHR */
case 3: /* VRSRA */
GEN_NEON_INTEGER_OP(rshl);
break;
- case 4: /* VSRI */
- case 5: /* VSHL, VSLI */
- switch (size) {
- case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
- case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
- case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
- default: abort();
- }
- break;
case 6: /* VQSHLU */
switch (size) {
case 0:
@@ -6437,50 +6902,16 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
case 7: /* VQSHL */
GEN_NEON_INTEGER_OP_ENV(qshl);
break;
+ default:
+ g_assert_not_reached();
}
tcg_temp_free_i32(tmp2);
- if (op == 1 || op == 3) {
+ if (op == 3) {
/* Accumulate. */
tmp2 = neon_load_reg(rd, pass);
gen_neon_add(size, tmp, tmp2);
tcg_temp_free_i32(tmp2);
- } else if (op == 4 || (op == 5 && u)) {
- /* Insert */
- switch (size) {
- case 0:
- if (op == 4)
- mask = 0xff >> -shift;
- else
- mask = (uint8_t)(0xff << shift);
- mask |= mask << 8;
- mask |= mask << 16;
- break;
- case 1:
- if (op == 4)
- mask = 0xffff >> -shift;
- else
- mask = (uint16_t)(0xffff << shift);
- mask |= mask << 16;
- break;
- case 2:
- if (shift < -31 || shift > 31) {
- mask = 0;
- } else {
- if (op == 4)
- mask = 0xffffffffu >> -shift;
- else
- mask = 0xffffffffu << shift;
- }
- break;
- default:
- abort();
- }
- tmp2 = neon_load_reg(rd, pass);
- tcg_gen_andi_i32(tmp, tmp, mask);
- tcg_gen_andi_i32(tmp2, tmp2, ~mask);
- tcg_gen_or_i32(tmp, tmp, tmp2);
- tcg_temp_free_i32(tmp2);
}
neon_store_reg(rd, pass, tmp);
}
@@ -6629,7 +7060,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
return 1;
}
} else { /* (insn & 0x00380080) == 0 */
- int invert;
+ int invert, reg_ofs, vec_size;
+
if (q && (rd & 1)) {
return 1;
}
@@ -6669,8 +7101,9 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
break;
case 14:
imm |= (imm << 8) | (imm << 16) | (imm << 24);
- if (invert)
+ if (invert) {
imm = ~imm;
+ }
break;
case 15:
if (invert) {
@@ -6680,36 +7113,45 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
| ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
break;
}
- if (invert)
+ if (invert) {
imm = ~imm;
+ }
- for (pass = 0; pass < (q ? 4 : 2); pass++) {
- if (op & 1 && op < 12) {
- tmp = neon_load_reg(rd, pass);
- if (invert) {
- /* The immediate value has already been inverted, so
- BIC becomes AND. */
- tcg_gen_andi_i32(tmp, tmp, imm);
- } else {
- tcg_gen_ori_i32(tmp, tmp, imm);
- }
+ reg_ofs = neon_reg_offset(rd, 0);
+ vec_size = q ? 16 : 8;
+
+ if (op & 1 && op < 12) {
+ if (invert) {
+ /* The immediate value has already been inverted,
+ * so BIC becomes AND.
+ */
+ tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
+ vec_size, vec_size);
} else {
- /* VMOV, VMVN. */
- tmp = tcg_temp_new_i32();
- if (op == 14 && invert) {
+ tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
+ vec_size, vec_size);
+ }
+ } else {
+ /* VMOV, VMVN. */
+ if (op == 14 && invert) {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ for (pass = 0; pass <= q; ++pass) {
+ uint64_t val = 0;
int n;
- uint32_t val;
- val = 0;
- for (n = 0; n < 4; n++) {
- if (imm & (1 << (n + (pass & 1) * 4)))
- val |= 0xff << (n * 8);
+
+ for (n = 0; n < 8; n++) {
+ if (imm & (1 << (n + pass * 8))) {
+ val |= 0xffull << (n * 8);
+ }
}
- tcg_gen_movi_i32(tmp, val);
- } else {
- tcg_gen_movi_i32(tmp, imm);
+ tcg_gen_movi_i64(t64, val);
+ neon_store_reg64(t64, rd + pass);
}
+ tcg_temp_free_i64(t64);
+ } else {
+ tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
}
- neon_store_reg(rd, pass, tmp);
}
}
} else { /* (insn & 0x00800010 == 0x00800000) */
@@ -6768,7 +7210,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
if (op == 14 && size == 2) {
TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
+ if (!dc_isar_feature(aa32_pmull, s)) {
return 1;
}
tcg_rn = tcg_temp_new_i64();
@@ -7085,7 +7527,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
{
NeonGenThreeOpEnvFn *fn;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) {
+ if (!dc_isar_feature(aa32_rdm, s)) {
return 1;
}
if (u && ((rd | rn) & 1)) {
@@ -7359,8 +7801,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
break;
}
case NEON_2RM_AESE: case NEON_2RM_AESMC:
- if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
- || ((rm | rd) & 1)) {
+ if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) {
return 1;
}
ptr1 = vfp_reg_ptr(true, rd);
@@ -7381,8 +7822,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tcg_temp_free_i32(tmp3);
break;
case NEON_2RM_SHA1H:
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
- || ((rm | rd) & 1)) {
+ if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
return 1;
}
ptr1 = vfp_reg_ptr(true, rd);
@@ -7399,10 +7839,10 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
}
/* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
if (q) {
- if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
+ if (!dc_isar_feature(aa32_sha2, s)) {
return 1;
}
- } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
+ } else if (!dc_isar_feature(aa32_sha1, s)) {
return 1;
}
ptr1 = vfp_reg_ptr(true, rd);
@@ -7415,6 +7855,14 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tcg_temp_free_ptr(ptr1);
tcg_temp_free_ptr(ptr2);
break;
+
+ case NEON_2RM_VMVN:
+ tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
+ break;
+ case NEON_2RM_VNEG:
+ tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size);
+ break;
+
default:
elementwise:
for (pass = 0; pass < (q ? 4 : 2); pass++) {
@@ -7455,9 +7903,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
case NEON_2RM_VCNT:
gen_helper_neon_cnt_u8(tmp, tmp);
break;
- case NEON_2RM_VMVN:
- tcg_gen_not_i32(tmp, tmp);
- break;
case NEON_2RM_VQABS:
switch (size) {
case 0:
@@ -7530,11 +7975,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
default: abort();
}
break;
- case NEON_2RM_VNEG:
- tmp2 = tcg_const_i32(0);
- gen_neon_rsb(size, tmp, tmp2);
- tcg_temp_free_i32(tmp2);
- break;
case NEON_2RM_VCGT0_F:
{
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
@@ -7757,28 +8197,25 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tcg_temp_free_i32(tmp);
} else if ((insn & 0x380) == 0) {
/* VDUP */
+ int element;
+ TCGMemOp size;
+
if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
return 1;
}
- if (insn & (1 << 19)) {
- tmp = neon_load_reg(rm, 1);
- } else {
- tmp = neon_load_reg(rm, 0);
- }
if (insn & (1 << 16)) {
- gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
+ size = MO_8;
+ element = (insn >> 17) & 7;
} else if (insn & (1 << 17)) {
- if ((insn >> 18) & 1)
- gen_neon_dup_high16(tmp);
- else
- gen_neon_dup_low16(tmp);
- }
- for (pass = 0; pass < (q ? 4 : 2); pass++) {
- tmp2 = tcg_temp_new_i32();
- tcg_gen_mov_i32(tmp2, tmp);
- neon_store_reg(rd, pass, tmp2);
+ size = MO_16;
+ element = (insn >> 18) & 3;
+ } else {
+ size = MO_32;
+ element = (insn >> 19) & 1;
}
- tcg_temp_free_i32(tmp);
+ tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0),
+ neon_element_offset(rm, element, size),
+ q ? 16 : 8, q ? 16 : 8);
} else {
return 1;
}
@@ -7813,8 +8250,8 @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
/* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */
int size = extract32(insn, 20, 1);
data = extract32(insn, 23, 2); /* rot */
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
- || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
+ if (!dc_isar_feature(aa32_vcma, s)
+ || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
return 1;
}
fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
@@ -7822,15 +8259,15 @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
/* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */
int size = extract32(insn, 20, 1);
data = extract32(insn, 24, 1); /* rot */
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)
- || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) {
+ if (!dc_isar_feature(aa32_vcma, s)
+ || (!size && !dc_isar_feature(aa32_fp16_arith, s))) {
return 1;
}
fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
} else if ((insn & 0xfeb00f00) == 0xfc200d00) {
/* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */
bool u = extract32(insn, 4, 1);
- if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
+ if (!dc_isar_feature(aa32_dp, s)) {
return 1;
}
fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
@@ -7840,7 +8277,7 @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn)
if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
if (!s->vfp_enabled) {
@@ -7892,11 +8329,11 @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
int size = extract32(insn, 23, 1);
int index;
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) {
+ if (!dc_isar_feature(aa32_vcma, s)) {
return 1;
}
if (size == 0) {
- if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) {
+ if (!dc_isar_feature(aa32_fp16_arith, s)) {
return 1;
}
/* For fp16, rm is just Vm, and index is M. */
@@ -7913,7 +8350,7 @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
} else if ((insn & 0xffb00f00) == 0xfe200d00) {
/* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */
int u = extract32(insn, 4, 1);
- if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) {
+ if (!dc_isar_feature(aa32_dp, s)) {
return 1;
}
fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
@@ -7926,7 +8363,7 @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn)
if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
+ syn_simd_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
if (!s->vfp_enabled) {
@@ -8889,8 +9326,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
* op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
* Bits 8, 10 and 11 should be zero.
*/
- if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
- (c & 0xd) != 0) {
+ if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) {
goto illegal_op;
}
@@ -9758,7 +10194,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
case 1:
case 3:
/* SDIV, UDIV */
- if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
+ if (!dc_isar_feature(arm_div, s)) {
goto illegal_op;
}
if (((insn >> 5) & 7) || (rd != 15)) {
@@ -10785,7 +11221,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
case 0x28:
case 0x29:
case 0x2a:
- if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
+ if (!dc_isar_feature(aa32_crc32, s)) {
goto illegal_op;
}
break;
@@ -10966,7 +11402,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
tmp2 = load_reg(s, rm);
if ((op & 0x50) == 0x10) {
/* sdiv, udiv */
- if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
+ if (!dc_isar_feature(thumb_div, s)) {
goto illegal_op;
}
if (op & 0x20)
@@ -12586,6 +13022,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
CPUARMState *env = cs->env_ptr;
ARMCPU *cpu = arm_env_get_cpu(env);
+ dc->isar = &cpu->isar;
dc->pc = dc->base.pc_first;
dc->condjmp = 0;
@@ -12703,7 +13140,6 @@ static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
tcg_gen_movi_i32(tmp, 0);
store_cpu_field(tmp, condexec_bits);
}
- tcg_clear_temp_count();
}
static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
@@ -13092,11 +13528,6 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
translator_loop(ops, &dc.base, cpu, tb);
}
-static const char *cpu_mode_names[16] = {
- "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
- "???", "???", "hyp", "und", "???", "???", "???", "sys"
-};
-
void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
@@ -13162,7 +13593,7 @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
psr & CPSR_V ? 'V' : '-',
psr & CPSR_T ? 'T' : 'A',
ns_status,
- cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
+ aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26);
}
if (flags & CPU_DUMP_FPU) {
diff --git a/target/arm/translate.h b/target/arm/translate.h
index c1b65f3efb..1550aa8bc7 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -7,6 +7,7 @@
/* internal defines */
typedef struct DisasContext {
DisasContextBase base;
+ const ARMISARegisters *isar;
target_ulong pc;
target_ulong page_start;
@@ -190,4 +191,24 @@ static inline TCGv_i32 get_ahp_flag(void)
return ret;
}
+
+/* Vector operations shared between ARM and AArch64. */
+extern const GVecGen3 bsl_op;
+extern const GVecGen3 bit_op;
+extern const GVecGen3 bif_op;
+extern const GVecGen3 mla_op[4];
+extern const GVecGen3 mls_op[4];
+extern const GVecGen3 cmtst_op[4];
+extern const GVecGen2i ssra_op[4];
+extern const GVecGen2i usra_op[4];
+extern const GVecGen2i sri_op[4];
+extern const GVecGen2i sli_op[4];
+void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+
+/*
+ * Forward to the isar_feature_* tests given a DisasContext pointer.
+ */
+#define dc_isar_feature(name, ctx) \
+ ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
+
#endif /* TARGET_ARM_TRANSLATE_H */
diff --git a/target/mips/mips-defs.h b/target/mips/mips-defs.h
index 71ea4ef892..5177618615 100644
--- a/target/mips/mips-defs.h
+++ b/target/mips/mips-defs.h
@@ -64,9 +64,11 @@
#define INSN_LOONGSON2E 0x0001000000000000ULL
#define INSN_LOONGSON2F 0x0002000000000000ULL
#define INSN_VR54XX 0x0004000000000000ULL
+#define INSN_R5900 0x0008000000000000ULL
/*
* bits 56-63: vendor-specific ASEs
*/
+#define ASE_MMI 0x0100000000000000ULL
/* MIPS CPU defines. */
#define CPU_MIPS1 (ISA_MIPS1)
@@ -74,6 +76,7 @@
#define CPU_MIPS3 (CPU_MIPS2 | ISA_MIPS3)
#define CPU_MIPS4 (CPU_MIPS3 | ISA_MIPS4)
#define CPU_VR54XX (CPU_MIPS4 | INSN_VR54XX)
+#define CPU_R5900 (CPU_MIPS3 | INSN_R5900)
#define CPU_LOONGSON2E (CPU_MIPS3 | INSN_LOONGSON2E)
#define CPU_LOONGSON2F (CPU_MIPS3 | INSN_LOONGSON2F)
diff --git a/target/mips/translate.c b/target/mips/translate.c
index 3a0bdd55c8..c44a751be9 100644
--- a/target/mips/translate.c
+++ b/target/mips/translate.c
@@ -1,5 +1,5 @@
/*
- * MIPS32 emulation for qemu: main translation routines.
+ * MIPS emulation for QEMU - main translation routines
*
* Copyright (c) 2004-2005 Jocelyn Mayer
* Copyright (c) 2006 Marius Groeger (FPU operations)
@@ -463,8 +463,10 @@ enum {
OPC_WSBH = (0x02 << 6) | OPC_BSHFL,
OPC_SEB = (0x10 << 6) | OPC_BSHFL,
OPC_SEH = (0x18 << 6) | OPC_BSHFL,
- OPC_ALIGN = (0x08 << 6) | OPC_BSHFL, /* 010.bp */
- OPC_ALIGN_END = (0x0B << 6) | OPC_BSHFL, /* 010.00 to 010.11 */
+ OPC_ALIGN = (0x08 << 6) | OPC_BSHFL, /* 010.bp (010.00 to 010.11) */
+ OPC_ALIGN_1 = (0x09 << 6) | OPC_BSHFL,
+ OPC_ALIGN_2 = (0x0A << 6) | OPC_BSHFL,
+ OPC_ALIGN_3 = (0x0B << 6) | OPC_BSHFL,
OPC_BITSWAP = (0x00 << 6) | OPC_BSHFL /* 00000 */
};
@@ -474,8 +476,14 @@ enum {
enum {
OPC_DSBH = (0x02 << 6) | OPC_DBSHFL,
OPC_DSHD = (0x05 << 6) | OPC_DBSHFL,
- OPC_DALIGN = (0x08 << 6) | OPC_DBSHFL, /* 01.bp */
- OPC_DALIGN_END = (0x0F << 6) | OPC_DBSHFL, /* 01.000 to 01.111 */
+ OPC_DALIGN = (0x08 << 6) | OPC_DBSHFL, /* 01.bp (01.000 to 01.111) */
+ OPC_DALIGN_1 = (0x09 << 6) | OPC_DBSHFL,
+ OPC_DALIGN_2 = (0x0A << 6) | OPC_DBSHFL,
+ OPC_DALIGN_3 = (0x0B << 6) | OPC_DBSHFL,
+ OPC_DALIGN_4 = (0x0C << 6) | OPC_DBSHFL,
+ OPC_DALIGN_5 = (0x0D << 6) | OPC_DBSHFL,
+ OPC_DALIGN_6 = (0x0E << 6) | OPC_DBSHFL,
+ OPC_DALIGN_7 = (0x0F << 6) | OPC_DBSHFL,
OPC_DBITSWAP = (0x00 << 6) | OPC_DBSHFL, /* 00000 */
};
@@ -1927,6 +1935,440 @@ enum {
OPC_MXU_Q8MACSU = 0x01,
};
+/*
+ * Overview of the TX79-specific instruction set
+ * =============================================
+ *
+ * The R5900 and the C790 have 128-bit wide GPRs, where the upper 64 bits
+ * are only used by the specific quadword (128-bit) LQ/SQ load/store
+ * instructions and certain multimedia instructions (MMIs). These MMIs
+ * configure the 128-bit data path as two 64-bit, four 32-bit, eight 16-bit
+ * or sixteen 8-bit paths.
+ *
+ * Reference:
+ *
+ * The Toshiba TX System RISC TX79 Core Architecture manual,
+ * https://wiki.qemu.org/File:C790.pdf
+ *
+ * Three-Operand Multiply and Multiply-Add (4 instructions)
+ * --------------------------------------------------------
+ * MADD [rd,] rs, rt Multiply/Add
+ * MADDU [rd,] rs, rt Multiply/Add Unsigned
+ * MULT [rd,] rs, rt Multiply (3-operand)
+ * MULTU [rd,] rs, rt Multiply Unsigned (3-operand)
+ *
+ * Multiply Instructions for Pipeline 1 (10 instructions)
+ * ------------------------------------------------------
+ * MULT1 [rd,] rs, rt Multiply Pipeline 1
+ * MULTU1 [rd,] rs, rt Multiply Unsigned Pipeline 1
+ * DIV1 rs, rt Divide Pipeline 1
+ * DIVU1 rs, rt Divide Unsigned Pipeline 1
+ * MADD1 [rd,] rs, rt Multiply-Add Pipeline 1
+ * MADDU1 [rd,] rs, rt Multiply-Add Unsigned Pipeline 1
+ * MFHI1 rd Move From HI1 Register
+ * MFLO1 rd Move From LO1 Register
+ * MTHI1 rs Move To HI1 Register
+ * MTLO1 rs Move To LO1 Register
+ *
+ * Arithmetic (19 instructions)
+ * ----------------------------
+ * PADDB rd, rs, rt Parallel Add Byte
+ * PSUBB rd, rs, rt Parallel Subtract Byte
+ * PADDH rd, rs, rt Parallel Add Halfword
+ * PSUBH rd, rs, rt Parallel Subtract Halfword
+ * PADDW rd, rs, rt Parallel Add Word
+ * PSUBW rd, rs, rt Parallel Subtract Word
+ * PADSBH rd, rs, rt Parallel Add/Subtract Halfword
+ * PADDSB rd, rs, rt Parallel Add with Signed Saturation Byte
+ * PSUBSB rd, rs, rt Parallel Subtract with Signed Saturation Byte
+ * PADDSH rd, rs, rt Parallel Add with Signed Saturation Halfword
+ * PSUBSH rd, rs, rt Parallel Subtract with Signed Saturation Halfword
+ * PADDSW rd, rs, rt Parallel Add with Signed Saturation Word
+ * PSUBSW rd, rs, rt Parallel Subtract with Signed Saturation Word
+ * PADDUB rd, rs, rt Parallel Add with Unsigned saturation Byte
+ * PSUBUB rd, rs, rt Parallel Subtract with Unsigned saturation Byte
+ * PADDUH rd, rs, rt Parallel Add with Unsigned saturation Halfword
+ * PSUBUH rd, rs, rt Parallel Subtract with Unsigned saturation Halfword
+ * PADDUW rd, rs, rt Parallel Add with Unsigned saturation Word
+ * PSUBUW rd, rs, rt Parallel Subtract with Unsigned saturation Word
+ *
+ * Min/Max (4 instructions)
+ * ------------------------
+ * PMAXH rd, rs, rt Parallel Maximum Halfword
+ * PMINH rd, rs, rt Parallel Minimum Halfword
+ * PMAXW rd, rs, rt Parallel Maximum Word
+ * PMINW rd, rs, rt Parallel Minimum Word
+ *
+ * Absolute (2 instructions)
+ * -------------------------
+ * PABSH rd, rt Parallel Absolute Halfword
+ * PABSW rd, rt Parallel Absolute Word
+ *
+ * Logical (4 instructions)
+ * ------------------------
+ * PAND rd, rs, rt Parallel AND
+ * POR rd, rs, rt Parallel OR
+ * PXOR rd, rs, rt Parallel XOR
+ * PNOR rd, rs, rt Parallel NOR
+ *
+ * Shift (9 instructions)
+ * ----------------------
+ * PSLLH rd, rt, sa Parallel Shift Left Logical Halfword
+ * PSRLH rd, rt, sa Parallel Shift Right Logical Halfword
+ * PSRAH rd, rt, sa Parallel Shift Right Arithmetic Halfword
+ * PSLLW rd, rt, sa Parallel Shift Left Logical Word
+ * PSRLW rd, rt, sa Parallel Shift Right Logical Word
+ * PSRAW rd, rt, sa Parallel Shift Right Arithmetic Word
+ * PSLLVW rd, rt, rs Parallel Shift Left Logical Variable Word
+ * PSRLVW rd, rt, rs Parallel Shift Right Logical Variable Word
+ * PSRAVW rd, rt, rs Parallel Shift Right Arithmetic Variable Word
+ *
+ * Compare (6 instructions)
+ * ------------------------
+ * PCGTB rd, rs, rt Parallel Compare for Greater Than Byte
+ * PCEQB rd, rs, rt Parallel Compare for Equal Byte
+ * PCGTH rd, rs, rt Parallel Compare for Greater Than Halfword
+ * PCEQH rd, rs, rt Parallel Compare for Equal Halfword
+ * PCGTW rd, rs, rt Parallel Compare for Greater Than Word
+ * PCEQW rd, rs, rt Parallel Compare for Equal Word
+ *
+ * LZC (1 instruction)
+ * -------------------
+ * PLZCW rd, rs Parallel Leading Zero or One Count Word
+ *
+ * Quadword Load and Store (2 instructions)
+ * ----------------------------------------
+ * LQ rt, offset(base) Load Quadword
+ * SQ rt, offset(base) Store Quadword
+ *
+ * Multiply and Divide (19 instructions)
+ * -------------------------------------
+ * PMULTW rd, rs, rt Parallel Multiply Word
+ * PMULTUW rd, rs, rt Parallel Multiply Unsigned Word
+ * PDIVW rs, rt Parallel Divide Word
+ * PDIVUW rs, rt Parallel Divide Unsigned Word
+ * PMADDW rd, rs, rt Parallel Multiply-Add Word
+ * PMADDUW rd, rs, rt Parallel Multiply-Add Unsigned Word
+ * PMSUBW rd, rs, rt Parallel Multiply-Subtract Word
+ * PMULTH rd, rs, rt Parallel Multiply Halfword
+ * PMADDH rd, rs, rt Parallel Multiply-Add Halfword
+ * PMSUBH rd, rs, rt Parallel Multiply-Subtract Halfword
+ * PHMADH rd, rs, rt Parallel Horizontal Multiply-Add Halfword
+ * PHMSBH rd, rs, rt Parallel Horizontal Multiply-Subtract Halfword
+ * PDIVBW rs, rt Parallel Divide Broadcast Word
+ * PMFHI rd Parallel Move From HI Register
+ * PMFLO rd Parallel Move From LO Register
+ * PMTHI rs Parallel Move To HI Register
+ * PMTLO rs Parallel Move To LO Register
+ * PMFHL rd Parallel Move From HI/LO Register
+ * PMTHL rs Parallel Move To HI/LO Register
+ *
+ * Pack/Extend (11 instructions)
+ * -----------------------------
+ * PPAC5 rd, rt Parallel Pack to 5 bits
+ * PPACB rd, rs, rt Parallel Pack to Byte
+ * PPACH rd, rs, rt Parallel Pack to Halfword
+ * PPACW rd, rs, rt Parallel Pack to Word
+ * PEXT5 rd, rt Parallel Extend Upper from 5 bits
+ * PEXTUB rd, rs, rt Parallel Extend Upper from Byte
+ * PEXTLB rd, rs, rt Parallel Extend Lower from Byte
+ * PEXTUH rd, rs, rt Parallel Extend Upper from Halfword
+ * PEXTLH rd, rs, rt Parallel Extend Lower from Halfword
+ * PEXTUW rd, rs, rt Parallel Extend Upper from Word
+ * PEXTLW rd, rs, rt Parallel Extend Lower from Word
+ *
+ * Others (16 instructions)
+ * ------------------------
+ * PCPYH rd, rt Parallel Copy Halfword
+ * PCPYLD rd, rs, rt Parallel Copy Lower Doubleword
+ * PCPYUD rd, rs, rt Parallel Copy Upper Doubleword
+ * PREVH rd, rt Parallel Reverse Halfword
+ * PINTH rd, rs, rt Parallel Interleave Halfword
+ * PINTEH rd, rs, rt Parallel Interleave Even Halfword
+ * PEXEH rd, rt Parallel Exchange Even Halfword
+ * PEXCH rd, rt Parallel Exchange Center Halfword
+ * PEXEW rd, rt Parallel Exchange Even Word
+ * PEXCW rd, rt Parallel Exchange Center Word
+ * QFSRV rd, rs, rt Quadword Funnel Shift Right Variable
+ * MFSA rd Move from Shift Amount Register
+ * MTSA rs Move to Shift Amount Register
+ * MTSAB rs, immediate Move Byte Count to Shift Amount Register
+ * MTSAH rs, immediate Move Halfword Count to Shift Amount Register
+ * PROT3W rd, rt Parallel Rotate 3 Words
+ *
+ * The TX79-specific Multimedia Instruction encodings
+ * ==================================================
+ *
+ * TX79 Multimedia Instruction encoding table keys:
+ *
+ * * This code is reserved for future use. An attempt to execute it
+ * causes a Reserved Instruction exception.
+ * % This code indicates an instruction class. The instruction word
+ * must be further decoded by examining additional tables that show
+ * the values for other instruction fields.
+ * # This code is reserved for the unsupported instructions DMULT,
+ * DMULTU, DDIV, DDIVU, LL, LLD, SC, SCD, LWC2 and SWC2. An attempt
+ * to execute it causes a Reserved Instruction exception.
+ *
+ * TX79 Multimedia Instructions encoded by opcode field (MMI, LQ, SQ):
+ *
+ * 31 26 0
+ * +--------+----------------------------------------+
+ * | opcode | |
+ * +--------+----------------------------------------+
+ *
+ * opcode bits 28..26
+ * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
+ * 31..29 | 000 | 001 | 010 | 011 | 100 | 101 | 110 | 111
+ * -------+-------+-------+-------+-------+-------+-------+-------+-------
+ * 0 000 |SPECIAL| REGIMM| J | JAL | BEQ | BNE | BLEZ | BGTZ
+ * 1 001 | ADDI | ADDIU | SLTI | SLTIU | ANDI | ORI | XORI | LUI
+ * 2 010 | COP0 | COP1 | * | * | BEQL | BNEL | BLEZL | BGTZL
+ * 3 011 | DADDI | DADDIU| LDL | LDR | MMI% | * | LQ | SQ
+ * 4 100 | LB | LH | LWL | LW | LBU | LHU | LWR | LWU
+ * 5 101 | SB | SH | SWL | SW | SDL | SDR | SWR | CACHE
+ * 6 110 | # | LWC1 | # | PREF | # | LDC1 | # | LD
+ * 7 111 | # | SWC1 | # | * | # | SDC1 | # | SD
+ */
+
+enum {
+ TX79_CLASS_MMI = 0x1C << 26, /* Same as OPC_SPECIAL2 */
+ TX79_LQ = 0x1E << 26, /* Same as OPC_MSA */
+ TX79_SQ = 0x1F << 26, /* Same as OPC_SPECIAL3 */
+};
+
+/*
+ * TX79 Multimedia Instructions with opcode field = MMI:
+ *
+ * 31 26 5 0
+ * +--------+-------------------------------+--------+
+ * | MMI | |function|
+ * +--------+-------------------------------+--------+
+ *
+ * function bits 2..0
+ * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
+ * 5..3 | 000 | 001 | 010 | 011 | 100 | 101 | 110 | 111
+ * -------+-------+-------+-------+-------+-------+-------+-------+-------
+ * 0 000 | MADD | MADDU | * | * | PLZCW | * | * | *
+ * 1 001 | MMI0% | MMI2% | * | * | * | * | * | *
+ * 2 010 | MFHI1 | MTHI1 | MFLO1 | MTLO1 | * | * | * | *
+ * 3 011 | MULT1 | MULTU1| DIV1 | DIVU1 | * | * | * | *
+ * 4 100 | MADD1 | MADDU1| * | * | * | * | * | *
+ * 5 101 | MMI1% | MMI3% | * | * | * | * | * | *
+ * 6 110 | PMFHL | PMTHL | * | * | PSLLH | * | PSRLH | PSRAH
+ * 7 111 | * | * | * | * | PSLLW | * | PSRLW | PSRAW
+ */
+
+#define MASK_TX79_MMI(op) (MASK_OP_MAJOR(op) | ((op) & 0x3F))
+enum {
+ TX79_MMI_MADD = 0x00 | TX79_CLASS_MMI, /* Same as OPC_MADD */
+ TX79_MMI_MADDU = 0x01 | TX79_CLASS_MMI, /* Same as OPC_MADDU */
+ TX79_MMI_PLZCW = 0x04 | TX79_CLASS_MMI,
+ TX79_MMI_CLASS_MMI0 = 0x08 | TX79_CLASS_MMI,
+ TX79_MMI_CLASS_MMI2 = 0x09 | TX79_CLASS_MMI,
+ TX79_MMI_MFHI1 = 0x10 | TX79_CLASS_MMI, /* Same minor as OPC_MFHI */
+ TX79_MMI_MTHI1 = 0x11 | TX79_CLASS_MMI, /* Same minor as OPC_MTHI */
+ TX79_MMI_MFLO1 = 0x12 | TX79_CLASS_MMI, /* Same minor as OPC_MFLO */
+ TX79_MMI_MTLO1 = 0x13 | TX79_CLASS_MMI, /* Same minor as OPC_MTLO */
+ TX79_MMI_MULT1 = 0x18 | TX79_CLASS_MMI, /* Same minor as OPC_MULT */
+ TX79_MMI_MULTU1 = 0x19 | TX79_CLASS_MMI, /* Same minor as OPC_MULTU */
+ TX79_MMI_DIV1 = 0x1A | TX79_CLASS_MMI, /* Same minor as OPC_DIV */
+ TX79_MMI_DIVU1 = 0x1B | TX79_CLASS_MMI, /* Same minor as OPC_DIVU */
+ TX79_MMI_MADD1 = 0x20 | TX79_CLASS_MMI,
+ TX79_MMI_MADDU1 = 0x21 | TX79_CLASS_MMI,
+ TX79_MMI_CLASS_MMI1 = 0x28 | TX79_CLASS_MMI,
+ TX79_MMI_CLASS_MMI3 = 0x29 | TX79_CLASS_MMI,
+ TX79_MMI_PMFHL = 0x30 | TX79_CLASS_MMI,
+ TX79_MMI_PMTHL = 0x31 | TX79_CLASS_MMI,
+ TX79_MMI_PSLLH = 0x34 | TX79_CLASS_MMI,
+ TX79_MMI_PSRLH = 0x36 | TX79_CLASS_MMI,
+ TX79_MMI_PSRAH = 0x37 | TX79_CLASS_MMI,
+ TX79_MMI_PSLLW = 0x3C | TX79_CLASS_MMI,
+ TX79_MMI_PSRLW = 0x3E | TX79_CLASS_MMI,
+ TX79_MMI_PSRAW = 0x3F | TX79_CLASS_MMI,
+};
+
+/*
+ * TX79 Multimedia Instructions with opcode field = MMI and bits 5..0 = MMI0:
+ *
+ * 31 26 10 6 5 0
+ * +--------+----------------------+--------+--------+
+ * | MMI | |function| MMI0 |
+ * +--------+----------------------+--------+--------+
+ *
+ * function bits 7..6
+ * bits | 0 | 1 | 2 | 3
+ * 10..8 | 00 | 01 | 10 | 11
+ * -------+-------+-------+-------+-------
+ * 0 000 | PADDW | PSUBW | PCGTW | PMAXW
+ * 1 001 | PADDH | PSUBH | PCGTH | PMAXH
+ * 2 010 | PADDB | PSUBB | PCGTB | *
+ * 3 011 | * | * | * | *
+ * 4 100 | PADDSW| PSUBSW| PEXTLW| PPACW
+ * 5 101 | PADDSH| PSUBSH| PEXTLH| PPACH
+ * 6 110 | PADDSB| PSUBSB| PEXTLB| PPACB
+ * 7 111 | * | * | PEXT5 | PPAC5
+ */
+
+#define MASK_TX79_MMI0(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF))
+enum {
+ TX79_MMI0_PADDW = (0x00 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PSUBW = (0x01 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PCGTW = (0x02 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PMAXW = (0x03 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PADDH = (0x04 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PSUBH = (0x05 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PCGTH = (0x06 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PMAXH = (0x07 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PADDB = (0x08 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PSUBB = (0x09 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PCGTB = (0x0A << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PADDSW = (0x10 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PSUBSW = (0x11 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PEXTLW = (0x12 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PPACW = (0x13 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PADDSH = (0x14 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PSUBSH = (0x15 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PEXTLH = (0x16 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PPACH = (0x17 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PADDSB = (0x18 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PSUBSB = (0x19 << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PEXTLB = (0x1A << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PPACB = (0x1B << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PEXT5 = (0x1E << 6) | TX79_MMI_CLASS_MMI0,
+ TX79_MMI0_PPAC5 = (0x1F << 6) | TX79_MMI_CLASS_MMI0,
+};
+
+/*
+ * TX79 Multimedia Instructions with opcode field = MMI and bits 5..0 = MMI1:
+ *
+ * 31 26 10 6 5 0
+ * +--------+----------------------+--------+--------+
+ * | MMI | |function| MMI1 |
+ * +--------+----------------------+--------+--------+
+ *
+ * function bits 7..6
+ * bits | 0 | 1 | 2 | 3
+ * 10..8 | 00 | 01 | 10 | 11
+ * -------+-------+-------+-------+-------
+ * 0 000 | * | PABSW | PCEQW | PMINW
+ * 1 001 | PADSBH| PABSH | PCEQH | PMINH
+ * 2 010 | * | * | PCEQB | *
+ * 3 011 | * | * | * | *
+ * 4 100 | PADDUW| PSUBUW| PEXTUW| *
+ * 5 101 | PADDUH| PSUBUH| PEXTUH| *
+ * 6 110 | PADDUB| PSUBUB| PEXTUB| QFSRV
+ * 7 111 | * | * | * | *
+ */
+
+#define MASK_TX79_MMI1(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF))
+enum {
+ TX79_MMI1_PABSW = (0x01 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PCEQW = (0x02 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PMINW = (0x03 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PADSBH = (0x04 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PABSH = (0x05 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PCEQH = (0x06 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PMINH = (0x07 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PCEQB = (0x0A << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PADDUW = (0x10 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PSUBUW = (0x11 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PEXTUW = (0x12 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PADDUH = (0x14 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PSUBUH = (0x15 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PEXTUH = (0x16 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PADDUB = (0x18 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PSUBUB = (0x19 << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_PEXTUB = (0x1A << 6) | TX79_MMI_CLASS_MMI1,
+ TX79_MMI1_QFSRV = (0x1B << 6) | TX79_MMI_CLASS_MMI1,
+};
+
+/*
+ * TX79 Multimedia Instructions with opcode field = MMI and bits 5..0 = MMI2:
+ *
+ * 31 26 10 6 5 0
+ * +--------+----------------------+--------+--------+
+ * | MMI | |function| MMI2 |
+ * +--------+----------------------+--------+--------+
+ *
+ * function bits 7..6
+ * bits | 0 | 1 | 2 | 3
+ * 10..8 | 00 | 01 | 10 | 11
+ * -------+-------+-------+-------+-------
+ * 0 000 | PMADDW| * | PSLLVW| PSRLVW
+ * 1 001 | PMSUBW| * | * | *
+ * 2 010 | PMFHI | PMFLO | PINTH | *
+ * 3 011 | PMULTW| PDIVW | PCPYLD| *
+ * 4 100 | PMADDH| PHMADH| PAND | PXOR
+ * 5 101 | PMSUBH| PHMSBH| * | *
+ * 6 110 | * | * | PEXEH | PREVH
+ * 7 111 | PMULTH| PDIVBW| PEXEW | PROT3W
+ */
+
+#define MASK_TX79_MMI2(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF))
+enum {
+ TX79_MMI2_PMADDW = (0x00 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PSLLVW = (0x02 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PSRLVW = (0x03 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PMSUBW = (0x04 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PMFHI = (0x08 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PMFLO = (0x09 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PINTH = (0x0A << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PMULTW = (0x0C << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PDIVW = (0x0D << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PCPYLD = (0x0E << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PMADDH = (0x10 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PHMADH = (0x11 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PAND = (0x12 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PXOR = (0x13 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PMSUBH = (0x14 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PHMSBH = (0x15 << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PEXEH = (0x1A << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PREVH = (0x1B << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PMULTH = (0x1C << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PDIVBW = (0x1D << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PEXEW = (0x1E << 6) | TX79_MMI_CLASS_MMI2,
+ TX79_MMI2_PROT3W = (0x1F << 6) | TX79_MMI_CLASS_MMI2,
+};
+
+/*
+ * TX79 Multimedia Instructions with opcode field = MMI and bits 5..0 = MMI3:
+ *
+ * 31 26 10 6 5 0
+ * +--------+----------------------+--------+--------+
+ * | MMI | |function| MMI3 |
+ * +--------+----------------------+--------+--------+
+ *
+ * function bits 7..6
+ * bits | 0 | 1 | 2 | 3
+ * 10..8 | 00 | 01 | 10 | 11
+ * -------+-------+-------+-------+-------
+ * 0 000 |PMADDUW| * | * | PSRAVW
+ * 1 001 | * | * | * | *
+ * 2 010 | PMTHI | PMTLO | PINTEH| *
+ * 3 011 |PMULTUW| PDIVUW| PCPYUD| *
+ * 4 100 | * | * | POR | PNOR
+ * 5 101 | * | * | * | *
+ * 6 110 | * | * | PEXCH | PCPYH
+ * 7 111 | * | * | PEXCW | *
+ */
+
+#define MASK_TX79_MMI3(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF))
+enum {
+ TX79_MMI3_PMADDUW = (0x00 << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_PSRAVW = (0x03 << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_PMTHI = (0x08 << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_PMTLO = (0x09 << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_PINTEH = (0x0A << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_PMULTUW = (0x0C << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_PDIVUW = (0x0D << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_PCPYUD = (0x0E << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_POR = (0x12 << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_PNOR = (0x13 << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_PEXCH = (0x1A << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_PCPYH = (0x1B << 6) | TX79_MMI_CLASS_MMI3,
+ TX79_MMI3_PEXCW = (0x1E << 6) | TX79_MMI_CLASS_MMI3,
+};
/* global register indices */
static TCGv cpu_gpr[32], cpu_PC;
@@ -2438,6 +2880,21 @@ static inline void check_insn_opc_removed(DisasContext *ctx, uint64_t flags)
}
}
+/*
+ * The Linux kernel traps certain reserved instruction exceptions to
+ * emulate the corresponding instructions. QEMU is the kernel in user
+ * mode, so those traps are emulated by accepting the instructions.
+ *
+ * A reserved instruction exception is generated for flagged CPUs if
+ * QEMU runs in system mode.
+ */
+static inline void check_insn_opc_user_only(DisasContext *ctx, uint64_t flags)
+{
+#ifndef CONFIG_USER_ONLY
+ check_insn_opc_removed(ctx, flags);
+#endif
+}
+
/* This code generates a "reserved instruction" exception if the
CPU does not support 64-bit paired-single (PS) floating point data type */
static inline void check_ps(DisasContext *ctx)
@@ -3795,17 +4252,21 @@ static void gen_shift(DisasContext *ctx, uint32_t opc,
/* Arithmetic on HI/LO registers */
static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg)
{
- if (reg == 0 && (opc == OPC_MFHI || opc == OPC_MFLO)) {
+ if (reg == 0 && (opc == OPC_MFHI || opc == TX79_MMI_MFHI1 ||
+ opc == OPC_MFLO || opc == TX79_MMI_MFLO1)) {
/* Treat as NOP. */
return;
}
if (acc != 0) {
- check_dsp(ctx);
+ if (!(ctx->insn_flags & INSN_R5900)) {
+ check_dsp(ctx);
+ }
}
switch (opc) {
case OPC_MFHI:
+ case TX79_MMI_MFHI1:
#if defined(TARGET_MIPS64)
if (acc != 0) {
tcg_gen_ext32s_tl(cpu_gpr[reg], cpu_HI[acc]);
@@ -3816,6 +4277,7 @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg)
}
break;
case OPC_MFLO:
+ case TX79_MMI_MFLO1:
#if defined(TARGET_MIPS64)
if (acc != 0) {
tcg_gen_ext32s_tl(cpu_gpr[reg], cpu_LO[acc]);
@@ -3826,6 +4288,7 @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg)
}
break;
case OPC_MTHI:
+ case TX79_MMI_MTHI1:
if (reg != 0) {
#if defined(TARGET_MIPS64)
if (acc != 0) {
@@ -3840,6 +4303,7 @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg)
}
break;
case OPC_MTLO:
+ case TX79_MMI_MTLO1:
if (reg != 0) {
#if defined(TARGET_MIPS64)
if (acc != 0) {
@@ -4152,11 +4616,14 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc,
gen_load_gpr(t1, rt);
if (acc != 0) {
- check_dsp(ctx);
+ if (!(ctx->insn_flags & INSN_R5900)) {
+ check_dsp(ctx);
+ }
}
switch (opc) {
case OPC_DIV:
+ case TX79_MMI_DIV1:
{
TCGv t2 = tcg_temp_new();
TCGv t3 = tcg_temp_new();
@@ -4178,6 +4645,7 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc,
}
break;
case OPC_DIVU:
+ case TX79_MMI_DIVU1:
{
TCGv t2 = tcg_const_tl(0);
TCGv t3 = tcg_const_tl(1);
@@ -4332,6 +4800,84 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc,
tcg_temp_free(t1);
}
+/*
+ * These MULT and MULTU instructions implemented in for example the
+ * Toshiba/Sony R5900 and the Toshiba TX19, TX39 and TX79 core
+ * architectures are special three-operand variants with the syntax
+ *
+ * MULT[U][1] rd, rs, rt
+ *
+ * such that
+ *
+ * (rd, LO, HI) <- rs * rt
+ *
+ * where the low-order 32-bits of the result is placed into both the
+ * GPR rd and the special register LO. The high-order 32-bits of the
+ * result is placed into the special register HI.
+ *
+ * If the GPR rd is omitted in assembly language, it is taken to be 0,
+ * which is the zero register that always reads as 0.
+ */
+static void gen_mul_txx9(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int acc = 0;
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ switch (opc) {
+ case TX79_MMI_MULT1:
+ acc = 1;
+ /* Fall through */
+ case OPC_MULT:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_muls2_i32(t2, t3, t2, t3);
+ if (rd) {
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ }
+ tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
+ tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case TX79_MMI_MULTU1:
+ acc = 1;
+ /* Fall through */
+ case OPC_MULTU:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mulu2_i32(t2, t3, t2, t3);
+ if (rd) {
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ }
+ tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
+ tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ default:
+ MIPS_INVAL("mul TXx9");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
static void gen_mul_vr54xx (DisasContext *ctx, uint32_t opc,
int rd, int rs, int rt)
{
@@ -23029,7 +23575,7 @@ static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx)
case OPC_MOVN: /* Conditional move */
case OPC_MOVZ:
check_insn(ctx, ISA_MIPS4 | ISA_MIPS32 |
- INSN_LOONGSON2E | INSN_LOONGSON2F);
+ INSN_LOONGSON2E | INSN_LOONGSON2F | INSN_R5900);
gen_cond_move(ctx, op1, rd, rs, rt);
break;
case OPC_MFHI: /* Move from HI/LO */
@@ -23056,6 +23602,8 @@ static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx)
check_insn(ctx, INSN_VR54XX);
op1 = MASK_MUL_VR54XX(ctx->opcode);
gen_mul_vr54xx(ctx, op1, rd, rs, rt);
+ } else if (ctx->insn_flags & INSN_R5900) {
+ gen_mul_txx9(ctx, op1, rd, rs, rt);
} else {
gen_muldiv(ctx, op1, rd & 3, rs, rt);
}
@@ -23070,6 +23618,7 @@ static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx)
case OPC_DDIV:
case OPC_DDIVU:
check_insn(ctx, ISA_MIPS3);
+ check_insn_opc_user_only(ctx, INSN_R5900);
check_mips_64(ctx);
gen_muldiv(ctx, op1, 0, rs, rt);
break;
@@ -23416,7 +23965,9 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
op2 = MASK_BSHFL(ctx->opcode);
switch (op2) {
case OPC_ALIGN:
- case OPC_ALIGN_END:
+ case OPC_ALIGN_1:
+ case OPC_ALIGN_2:
+ case OPC_ALIGN_3:
gen_align(ctx, 32, rd, rs, rt, sa & 3);
break;
case OPC_BITSWAP:
@@ -23442,7 +23993,13 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
op2 = MASK_DBSHFL(ctx->opcode);
switch (op2) {
case OPC_DALIGN:
- case OPC_DALIGN_END:
+ case OPC_DALIGN_1:
+ case OPC_DALIGN_2:
+ case OPC_DALIGN_3:
+ case OPC_DALIGN_4:
+ case OPC_DALIGN_5:
+ case OPC_DALIGN_6:
+ case OPC_DALIGN_7:
gen_align(ctx, 64, rd, rs, rt, sa & 7);
break;
case OPC_DBITSWAP:
@@ -23986,6 +24543,250 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx)
}
}
+static void decode_tx79_mmi0(CPUMIPSState *env, DisasContext *ctx)
+{
+ uint32_t opc = MASK_TX79_MMI0(ctx->opcode);
+
+ switch (opc) {
+ case TX79_MMI0_PADDW: /* TODO: TX79_MMI0_PADDW */
+ case TX79_MMI0_PSUBW: /* TODO: TX79_MMI0_PSUBW */
+ case TX79_MMI0_PCGTW: /* TODO: TX79_MMI0_PCGTW */
+ case TX79_MMI0_PMAXW: /* TODO: TX79_MMI0_PMAXW */
+ case TX79_MMI0_PADDH: /* TODO: TX79_MMI0_PADDH */
+ case TX79_MMI0_PSUBH: /* TODO: TX79_MMI0_PSUBH */
+ case TX79_MMI0_PCGTH: /* TODO: TX79_MMI0_PCGTH */
+ case TX79_MMI0_PMAXH: /* TODO: TX79_MMI0_PMAXH */
+ case TX79_MMI0_PADDB: /* TODO: TX79_MMI0_PADDB */
+ case TX79_MMI0_PSUBB: /* TODO: TX79_MMI0_PSUBB */
+ case TX79_MMI0_PCGTB: /* TODO: TX79_MMI0_PCGTB */
+ case TX79_MMI0_PADDSW: /* TODO: TX79_MMI0_PADDSW */
+ case TX79_MMI0_PSUBSW: /* TODO: TX79_MMI0_PSUBSW */
+ case TX79_MMI0_PEXTLW: /* TODO: TX79_MMI0_PEXTLW */
+ case TX79_MMI0_PPACW: /* TODO: TX79_MMI0_PPACW */
+ case TX79_MMI0_PADDSH: /* TODO: TX79_MMI0_PADDSH */
+ case TX79_MMI0_PSUBSH: /* TODO: TX79_MMI0_PSUBSH */
+ case TX79_MMI0_PEXTLH: /* TODO: TX79_MMI0_PEXTLH */
+ case TX79_MMI0_PPACH: /* TODO: TX79_MMI0_PPACH */
+ case TX79_MMI0_PADDSB: /* TODO: TX79_MMI0_PADDSB */
+ case TX79_MMI0_PSUBSB: /* TODO: TX79_MMI0_PSUBSB */
+ case TX79_MMI0_PEXTLB: /* TODO: TX79_MMI0_PEXTLB */
+ case TX79_MMI0_PPACB: /* TODO: TX79_MMI0_PPACB */
+ case TX79_MMI0_PEXT5: /* TODO: TX79_MMI0_PEXT5 */
+ case TX79_MMI0_PPAC5: /* TODO: TX79_MMI0_PPAC5 */
+ generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_MMI_CLASS_MMI0 */
+ break;
+ default:
+ MIPS_INVAL("TX79 MMI class MMI0");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void decode_tx79_mmi1(CPUMIPSState *env, DisasContext *ctx)
+{
+ uint32_t opc = MASK_TX79_MMI1(ctx->opcode);
+
+ switch (opc) {
+ case TX79_MMI1_PABSW: /* TODO: TX79_MMI1_PABSW */
+ case TX79_MMI1_PCEQW: /* TODO: TX79_MMI1_PCEQW */
+ case TX79_MMI1_PMINW: /* TODO: TX79_MMI1_PMINW */
+ case TX79_MMI1_PADSBH: /* TODO: TX79_MMI1_PADSBH */
+ case TX79_MMI1_PABSH: /* TODO: TX79_MMI1_PABSH */
+ case TX79_MMI1_PCEQH: /* TODO: TX79_MMI1_PCEQH */
+ case TX79_MMI1_PMINH: /* TODO: TX79_MMI1_PMINH */
+ case TX79_MMI1_PCEQB: /* TODO: TX79_MMI1_PCEQB */
+ case TX79_MMI1_PADDUW: /* TODO: TX79_MMI1_PADDUW */
+ case TX79_MMI1_PSUBUW: /* TODO: TX79_MMI1_PSUBUW */
+ case TX79_MMI1_PEXTUW: /* TODO: TX79_MMI1_PEXTUW */
+ case TX79_MMI1_PADDUH: /* TODO: TX79_MMI1_PADDUH */
+ case TX79_MMI1_PSUBUH: /* TODO: TX79_MMI1_PSUBUH */
+ case TX79_MMI1_PEXTUH: /* TODO: TX79_MMI1_PEXTUH */
+ case TX79_MMI1_PADDUB: /* TODO: TX79_MMI1_PADDUB */
+ case TX79_MMI1_PSUBUB: /* TODO: TX79_MMI1_PSUBUB */
+ case TX79_MMI1_PEXTUB: /* TODO: TX79_MMI1_PEXTUB */
+ case TX79_MMI1_QFSRV: /* TODO: TX79_MMI1_QFSRV */
+ generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_MMI_CLASS_MMI1 */
+ break;
+ default:
+ MIPS_INVAL("TX79 MMI class MMI1");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void decode_tx79_mmi2(CPUMIPSState *env, DisasContext *ctx)
+{
+ uint32_t opc = MASK_TX79_MMI2(ctx->opcode);
+
+ switch (opc) {
+ case TX79_MMI2_PMADDW: /* TODO: TX79_MMI2_PMADDW */
+ case TX79_MMI2_PSLLVW: /* TODO: TX79_MMI2_PSLLVW */
+ case TX79_MMI2_PSRLVW: /* TODO: TX79_MMI2_PSRLVW */
+ case TX79_MMI2_PMSUBW: /* TODO: TX79_MMI2_PMSUBW */
+ case TX79_MMI2_PMFHI: /* TODO: TX79_MMI2_PMFHI */
+ case TX79_MMI2_PMFLO: /* TODO: TX79_MMI2_PMFLO */
+ case TX79_MMI2_PINTH: /* TODO: TX79_MMI2_PINTH */
+ case TX79_MMI2_PMULTW: /* TODO: TX79_MMI2_PMULTW */
+ case TX79_MMI2_PDIVW: /* TODO: TX79_MMI2_PDIVW */
+ case TX79_MMI2_PCPYLD: /* TODO: TX79_MMI2_PCPYLD */
+ case TX79_MMI2_PMADDH: /* TODO: TX79_MMI2_PMADDH */
+ case TX79_MMI2_PHMADH: /* TODO: TX79_MMI2_PHMADH */
+ case TX79_MMI2_PAND: /* TODO: TX79_MMI2_PAND */
+ case TX79_MMI2_PXOR: /* TODO: TX79_MMI2_PXOR */
+ case TX79_MMI2_PMSUBH: /* TODO: TX79_MMI2_PMSUBH */
+ case TX79_MMI2_PHMSBH: /* TODO: TX79_MMI2_PHMSBH */
+ case TX79_MMI2_PEXEH: /* TODO: TX79_MMI2_PEXEH */
+ case TX79_MMI2_PREVH: /* TODO: TX79_MMI2_PREVH */
+ case TX79_MMI2_PMULTH: /* TODO: TX79_MMI2_PMULTH */
+ case TX79_MMI2_PDIVBW: /* TODO: TX79_MMI2_PDIVBW */
+ case TX79_MMI2_PEXEW: /* TODO: TX79_MMI2_PEXEW */
+ case TX79_MMI2_PROT3W: /* TODO: TX79_MMI2_PROT3W */
+ generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_MMI_CLASS_MMI2 */
+ break;
+ default:
+ MIPS_INVAL("TX79 MMI class MMI2");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void decode_tx79_mmi3(CPUMIPSState *env, DisasContext *ctx)
+{
+ uint32_t opc = MASK_TX79_MMI3(ctx->opcode);
+
+ switch (opc) {
+ case TX79_MMI3_PMADDUW: /* TODO: TX79_MMI3_PMADDUW */
+ case TX79_MMI3_PSRAVW: /* TODO: TX79_MMI3_PSRAVW */
+ case TX79_MMI3_PMTHI: /* TODO: TX79_MMI3_PMTHI */
+ case TX79_MMI3_PMTLO: /* TODO: TX79_MMI3_PMTLO */
+ case TX79_MMI3_PINTEH: /* TODO: TX79_MMI3_PINTEH */
+ case TX79_MMI3_PMULTUW: /* TODO: TX79_MMI3_PMULTUW */
+ case TX79_MMI3_PDIVUW: /* TODO: TX79_MMI3_PDIVUW */
+ case TX79_MMI3_PCPYUD: /* TODO: TX79_MMI3_PCPYUD */
+ case TX79_MMI3_POR: /* TODO: TX79_MMI3_POR */
+ case TX79_MMI3_PNOR: /* TODO: TX79_MMI3_PNOR */
+ case TX79_MMI3_PEXCH: /* TODO: TX79_MMI3_PEXCH */
+ case TX79_MMI3_PCPYH: /* TODO: TX79_MMI3_PCPYH */
+ case TX79_MMI3_PEXCW: /* TODO: TX79_MMI3_PEXCW */
+ generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_MMI_CLASS_MMI3 */
+ break;
+ default:
+ MIPS_INVAL("TX79 MMI class MMI3");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void decode_tx79_mmi(CPUMIPSState *env, DisasContext *ctx)
+{
+ uint32_t opc = MASK_TX79_MMI(ctx->opcode);
+ int rs = extract32(ctx->opcode, 21, 5);
+ int rt = extract32(ctx->opcode, 16, 5);
+ int rd = extract32(ctx->opcode, 11, 5);
+
+ switch (opc) {
+ case TX79_MMI_CLASS_MMI0:
+ decode_tx79_mmi0(env, ctx);
+ break;
+ case TX79_MMI_CLASS_MMI1:
+ decode_tx79_mmi1(env, ctx);
+ break;
+ case TX79_MMI_CLASS_MMI2:
+ decode_tx79_mmi2(env, ctx);
+ break;
+ case TX79_MMI_CLASS_MMI3:
+ decode_tx79_mmi3(env, ctx);
+ break;
+ case TX79_MMI_MULT1:
+ case TX79_MMI_MULTU1:
+ gen_mul_txx9(ctx, opc, rd, rs, rt);
+ break;
+ case TX79_MMI_DIV1:
+ case TX79_MMI_DIVU1:
+ gen_muldiv(ctx, opc, 1, rs, rt);
+ break;
+ case TX79_MMI_MTLO1:
+ case TX79_MMI_MTHI1:
+ gen_HILO(ctx, opc, 1, rs);
+ break;
+ case TX79_MMI_MFLO1:
+ case TX79_MMI_MFHI1:
+ gen_HILO(ctx, opc, 1, rd);
+ break;
+ case TX79_MMI_MADD: /* TODO: TX79_MMI_MADD */
+ case TX79_MMI_MADDU: /* TODO: TX79_MMI_MADDU */
+ case TX79_MMI_PLZCW: /* TODO: TX79_MMI_PLZCW */
+ case TX79_MMI_MADD1: /* TODO: TX79_MMI_MADD1 */
+ case TX79_MMI_MADDU1: /* TODO: TX79_MMI_MADDU1 */
+ case TX79_MMI_PMFHL: /* TODO: TX79_MMI_PMFHL */
+ case TX79_MMI_PMTHL: /* TODO: TX79_MMI_PMTHL */
+ case TX79_MMI_PSLLH: /* TODO: TX79_MMI_PSLLH */
+ case TX79_MMI_PSRLH: /* TODO: TX79_MMI_PSRLH */
+ case TX79_MMI_PSRAH: /* TODO: TX79_MMI_PSRAH */
+ case TX79_MMI_PSLLW: /* TODO: TX79_MMI_PSLLW */
+ case TX79_MMI_PSRLW: /* TODO: TX79_MMI_PSRLW */
+ case TX79_MMI_PSRAW: /* TODO: TX79_MMI_PSRAW */
+ generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_CLASS_MMI */
+ break;
+ default:
+ MIPS_INVAL("TX79 MMI class");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void decode_tx79_lq(CPUMIPSState *env, DisasContext *ctx)
+{
+ generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_LQ */
+}
+
+static void gen_tx79_sq(DisasContext *ctx, int base, int rt, int offset)
+{
+ generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_SQ */
+}
+
+/*
+ * The TX79-specific instruction Store Quadword
+ *
+ * +--------+-------+-------+------------------------+
+ * | 011111 | base | rt | offset | SQ
+ * +--------+-------+-------+------------------------+
+ * 6 5 5 16
+ *
+ * has the same opcode as the Read Hardware Register instruction
+ *
+ * +--------+-------+-------+-------+-------+--------+
+ * | 011111 | 00000 | rt | rd | 00000 | 111011 | RDHWR
+ * +--------+-------+-------+-------+-------+--------+
+ * 6 5 5 5 5 6
+ *
+ * that is required, trapped and emulated by the Linux kernel. However, all
+ * RDHWR encodings yield address error exceptions on the TX79 since the SQ
+ * offset is odd. Therefore all valid SQ instructions can execute normally.
+ * In user mode, QEMU must verify the upper and lower 11 bits to distinguish
+ * between SQ and RDHWR, as the Linux kernel does.
+ */
+static void decode_tx79_sq(CPUMIPSState *env, DisasContext *ctx)
+{
+ int base = extract32(ctx->opcode, 21, 5);
+ int rt = extract32(ctx->opcode, 16, 5);
+ int offset = extract32(ctx->opcode, 0, 16);
+
+#ifdef CONFIG_USER_ONLY
+ uint32_t op1 = MASK_SPECIAL3(ctx->opcode);
+ uint32_t op2 = extract32(ctx->opcode, 6, 5);
+
+ if (base == 0 && op2 == 0 && op1 == OPC_RDHWR) {
+ int rd = extract32(ctx->opcode, 11, 5);
+
+ gen_rdhwr(ctx, rt, rd, 0);
+ return;
+ }
+#endif
+
+ gen_tx79_sq(ctx, base, rt, offset);
+}
+
static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
{
int rs, rt, rd, sa;
@@ -24058,7 +24859,9 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
op2 = MASK_BSHFL(ctx->opcode);
switch (op2) {
case OPC_ALIGN:
- case OPC_ALIGN_END:
+ case OPC_ALIGN_1:
+ case OPC_ALIGN_2:
+ case OPC_ALIGN_3:
case OPC_BITSWAP:
check_insn(ctx, ISA_MIPS32R6);
decode_opc_special3_r6(env, ctx);
@@ -24084,7 +24887,13 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
op2 = MASK_DBSHFL(ctx->opcode);
switch (op2) {
case OPC_DALIGN:
- case OPC_DALIGN_END:
+ case OPC_DALIGN_1:
+ case OPC_DALIGN_2:
+ case OPC_DALIGN_3:
+ case OPC_DALIGN_4:
+ case OPC_DALIGN_5:
+ case OPC_DALIGN_6:
+ case OPC_DALIGN_7:
case OPC_DBITSWAP:
check_insn(ctx, ISA_MIPS32R6);
decode_opc_special3_r6(env, ctx);
@@ -25283,10 +26092,18 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
decode_opc_special(env, ctx);
break;
case OPC_SPECIAL2:
- decode_opc_special2_legacy(env, ctx);
+ if ((ctx->insn_flags & INSN_R5900) && (ctx->insn_flags & ASE_MMI)) {
+ decode_tx79_mmi(env, ctx);
+ } else {
+ decode_opc_special2_legacy(env, ctx);
+ }
break;
case OPC_SPECIAL3:
- decode_opc_special3(env, ctx);
+ if (ctx->insn_flags & INSN_R5900) {
+ decode_tx79_sq(env, ctx); /* TX79_SQ */
+ } else {
+ decode_opc_special3(env, ctx);
+ }
break;
case OPC_REGIMM:
op1 = MASK_REGIMM(ctx->opcode);
@@ -25573,6 +26390,7 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
break;
case OPC_LL: /* Load and stores */
check_insn(ctx, ISA_MIPS2);
+ check_insn_opc_user_only(ctx, INSN_R5900);
/* Fallthrough */
case OPC_LWL:
case OPC_LWR:
@@ -25598,6 +26416,7 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
case OPC_SC:
check_insn(ctx, ISA_MIPS2);
check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ check_insn_opc_user_only(ctx, INSN_R5900);
gen_st_cond(ctx, op, rt, rs, imm);
break;
case OPC_CACHE:
@@ -25611,7 +26430,8 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
break;
case OPC_PREF:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
- check_insn(ctx, ISA_MIPS4 | ISA_MIPS32);
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS32 |
+ INSN_R5900);
/* Treat as NOP. */
break;
@@ -25863,9 +26683,11 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
#if defined(TARGET_MIPS64)
/* MIPS64 opcodes */
+ case OPC_LLD:
+ check_insn_opc_user_only(ctx, INSN_R5900);
+ /* fall through */
case OPC_LDL:
case OPC_LDR:
- case OPC_LLD:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
/* fall through */
case OPC_LWU:
@@ -25886,6 +26708,7 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
case OPC_SCD:
check_insn_opc_removed(ctx, ISA_MIPS32R6);
check_insn(ctx, ISA_MIPS3);
+ check_insn_opc_user_only(ctx, INSN_R5900);
check_mips_64(ctx);
gen_st_cond(ctx, op, rt, rs, imm);
break;
@@ -25940,8 +26763,12 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
}
break;
case OPC_MSA: /* OPC_MDMX */
- /* MDMX: Not implemented. */
- gen_msa(env, ctx);
+ if (ctx->insn_flags & INSN_R5900) {
+ decode_tx79_lq(env, ctx); /* TX79_LQ */
+ } else {
+ /* MDMX: Not implemented. */
+ gen_msa(env, ctx);
+ }
break;
case OPC_PCREL:
check_insn(ctx, ISA_MIPS32R6);
diff --git a/target/mips/translate_init.inc.c b/target/mips/translate_init.inc.c
index acab097820..85da4a269c 100644
--- a/target/mips/translate_init.inc.c
+++ b/target/mips/translate_init.inc.c
@@ -411,6 +411,65 @@ const mips_def_t mips_defs[] =
.mmu_type = MMU_TYPE_R4000,
},
{
+ /*
+ * The Toshiba TX System RISC TX79 Core Architecture manual
+ *
+ * https://wiki.qemu.org/File:C790.pdf
+ *
+ * describes the C790 processor that is a follow-up to the R5900.
+ * There are a few notable differences in that the R5900 FPU
+ *
+ * - is not IEEE 754-1985 compliant,
+ * - does not implement double format, and
+ * - its machine code is nonstandard.
+ */
+ .name = "R5900",
+ .CP0_PRid = 0x00002E00,
+ /* No L2 cache, icache size 32k, dcache size 32k, uncached coherency. */
+ .CP0_Config0 = (0x3 << 9) | (0x3 << 6) | (0x2 << CP0C0_K0),
+ .CP0_Status_rw_bitmask = 0xF4C79C1F,
+#ifdef CONFIG_USER_ONLY
+ /*
+ * R5900 hardware traps to the Linux kernel for IEEE 754-1985 and LL/SC
+ * emulation. For user only, QEMU is the kernel, so we emulate the traps
+ * by simply emulating the instructions directly.
+ *
+ * Note: Config1 is only used internally, the R5900 has only Config0.
+ */
+ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU),
+ .CP0_LLAddr_rw_bitmask = 0xFFFFFFFF,
+ .CP0_LLAddr_shift = 4,
+ .CP1_fcr0 = (0x38 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0x0183FFFF,
+#else
+ /*
+ * The R5900 COP1 FPU implements single-precision floating-point
+ * operations but is not entirely IEEE 754-1985 compatible. In
+ * particular,
+ *
+ * - NaN (not a number) and +/- infinities are not supported;
+ * - exception mechanisms are not fully supported;
+ * - denormalized numbers are not supported;
+ * - rounding towards nearest and +/- infinities are not supported;
+ * - computed results usually differs in the least significant bit;
+ * - saturations can differ more than the least significant bit.
+ *
+ * Since only rounding towards zero is supported, the two least
+ * significant bits of FCR31 are hardwired to 01.
+ *
+ * FPU emulation is disabled here until it is implemented.
+ *
+ * Note: Config1 is only used internally, the R5900 has only Config0.
+ */
+ .CP0_Config1 = (47 << CP0C1_MMU),
+#endif /* !CONFIG_USER_ONLY */
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_R5900 | ASE_MMI,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
/* A generic CPU supporting MIPS32 Release 6 ISA.
FIXME: Support IEEE 754-2008 FP.
Eventually this should be replaced by a real CPU model. */
diff --git a/target/riscv/Makefile.objs b/target/riscv/Makefile.objs
index abd0a7cde3..fcc5d34c1f 100644
--- a/target/riscv/Makefile.objs
+++ b/target/riscv/Makefile.objs
@@ -1 +1 @@
-obj-y += translate.o op_helper.o helper.o cpu.o fpu_helper.o gdbstub.o pmp.o
+obj-y += translate.o op_helper.o cpu_helper.o cpu.o fpu_helper.o gdbstub.o pmp.o
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index d630e8fd6c..a025a0a3ba 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -74,8 +74,10 @@ const char * const riscv_intr_names[] = {
"s_external",
"h_external",
"m_external",
- "coprocessor",
- "host"
+ "reserved",
+ "reserved",
+ "reserved",
+ "reserved"
};
typedef struct RISCVCPUInfo {
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index d4f36295f0..4ee09b9cff 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -126,13 +126,18 @@ struct CPURISCVState {
target_ulong mhartid;
target_ulong mstatus;
+
/*
* CAUTION! Unlike the rest of this struct, mip is accessed asynchonously
- * by I/O threads and other vCPUs, so hold the iothread mutex before
- * operating on it. CPU_INTERRUPT_HARD should be in effect iff this is
- * non-zero. Use riscv_cpu_set_local_interrupt.
+ * by I/O threads. It should be read with atomic_read. It should be updated
+ * using riscv_cpu_update_mip with the iothread mutex held. The iothread
+ * mutex must be held because mip must be consistent with the CPU inturrept
+ * state. riscv_cpu_update_mip calls cpu_interrupt or cpu_reset_interrupt
+ * wuth the invariant that CPU_INTERRUPT_HARD is set iff mip is non-zero.
+ * mip is 32-bits to allow atomic_read on 32-bit hosts.
*/
- uint32_t mip; /* allow atomic_read for >= 32-bit hosts */
+ uint32_t mip;
+
target_ulong mie;
target_ulong mideleg;
@@ -247,7 +252,6 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
uintptr_t retaddr);
int riscv_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
int rw, int mmu_idx);
-
char *riscv_isa_string(RISCVCPU *cpu);
void riscv_cpu_list(FILE *f, fprintf_function cpu_fprintf);
@@ -255,6 +259,10 @@ void riscv_cpu_list(FILE *f, fprintf_function cpu_fprintf);
#define cpu_list riscv_cpu_list
#define cpu_mmu_index riscv_cpu_mmu_index
+#ifndef CONFIG_USER_ONLY
+uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value);
+#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
+#endif
void riscv_set_mode(CPURISCVState *env, target_ulong newpriv);
void riscv_translate_init(void);
@@ -285,10 +293,6 @@ void csr_write_helper(CPURISCVState *env, target_ulong val_to_write,
target_ulong csrno);
target_ulong csr_read_helper(CPURISCVState *env, target_ulong csrno);
-#ifndef CONFIG_USER_ONLY
-void riscv_set_local_interrupt(RISCVCPU *cpu, target_ulong mask, int value);
-#endif
-
#include "exec/cpu-all.h"
#endif /* RISCV_CPU_H */
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index 12b4757088..5439f4719e 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -6,242 +6,283 @@
(((target_ulong)(val) * ((mask) & ~((mask) << 1))) & \
(target_ulong)(mask)))
-#define PGSHIFT 12
-
-#define FSR_RD_SHIFT 5
-#define FSR_RD (0x7 << FSR_RD_SHIFT)
-
-#define FPEXC_NX 0x01
-#define FPEXC_UF 0x02
-#define FPEXC_OF 0x04
-#define FPEXC_DZ 0x08
-#define FPEXC_NV 0x10
-
-#define FSR_AEXC_SHIFT 0
-#define FSR_NVA (FPEXC_NV << FSR_AEXC_SHIFT)
-#define FSR_OFA (FPEXC_OF << FSR_AEXC_SHIFT)
-#define FSR_UFA (FPEXC_UF << FSR_AEXC_SHIFT)
-#define FSR_DZA (FPEXC_DZ << FSR_AEXC_SHIFT)
-#define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT)
-#define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
-
-/* CSR numbers */
-#define CSR_FFLAGS 0x1
-#define CSR_FRM 0x2
-#define CSR_FCSR 0x3
-#define CSR_CYCLE 0xc00
-#define CSR_TIME 0xc01
-#define CSR_INSTRET 0xc02
-#define CSR_HPMCOUNTER3 0xc03
-#define CSR_HPMCOUNTER4 0xc04
-#define CSR_HPMCOUNTER5 0xc05
-#define CSR_HPMCOUNTER6 0xc06
-#define CSR_HPMCOUNTER7 0xc07
-#define CSR_HPMCOUNTER8 0xc08
-#define CSR_HPMCOUNTER9 0xc09
-#define CSR_HPMCOUNTER10 0xc0a
-#define CSR_HPMCOUNTER11 0xc0b
-#define CSR_HPMCOUNTER12 0xc0c
-#define CSR_HPMCOUNTER13 0xc0d
-#define CSR_HPMCOUNTER14 0xc0e
-#define CSR_HPMCOUNTER15 0xc0f
-#define CSR_HPMCOUNTER16 0xc10
-#define CSR_HPMCOUNTER17 0xc11
-#define CSR_HPMCOUNTER18 0xc12
-#define CSR_HPMCOUNTER19 0xc13
-#define CSR_HPMCOUNTER20 0xc14
-#define CSR_HPMCOUNTER21 0xc15
-#define CSR_HPMCOUNTER22 0xc16
-#define CSR_HPMCOUNTER23 0xc17
-#define CSR_HPMCOUNTER24 0xc18
-#define CSR_HPMCOUNTER25 0xc19
-#define CSR_HPMCOUNTER26 0xc1a
-#define CSR_HPMCOUNTER27 0xc1b
-#define CSR_HPMCOUNTER28 0xc1c
-#define CSR_HPMCOUNTER29 0xc1d
-#define CSR_HPMCOUNTER30 0xc1e
-#define CSR_HPMCOUNTER31 0xc1f
-#define CSR_SSTATUS 0x100
-#define CSR_SIE 0x104
-#define CSR_STVEC 0x105
-#define CSR_SCOUNTEREN 0x106
-#define CSR_SSCRATCH 0x140
-#define CSR_SEPC 0x141
-#define CSR_SCAUSE 0x142
-#define CSR_SBADADDR 0x143
-#define CSR_SIP 0x144
-#define CSR_SPTBR 0x180
-#define CSR_SATP 0x180
-#define CSR_MSTATUS 0x300
-#define CSR_MISA 0x301
-#define CSR_MEDELEG 0x302
-#define CSR_MIDELEG 0x303
-#define CSR_MIE 0x304
-#define CSR_MTVEC 0x305
-#define CSR_MCOUNTEREN 0x306
-#define CSR_MSCRATCH 0x340
-#define CSR_MEPC 0x341
-#define CSR_MCAUSE 0x342
-#define CSR_MBADADDR 0x343
-#define CSR_MIP 0x344
-#define CSR_PMPCFG0 0x3a0
-#define CSR_PMPCFG1 0x3a1
-#define CSR_PMPCFG2 0x3a2
-#define CSR_PMPCFG3 0x3a3
-#define CSR_PMPADDR0 0x3b0
-#define CSR_PMPADDR1 0x3b1
-#define CSR_PMPADDR2 0x3b2
-#define CSR_PMPADDR3 0x3b3
-#define CSR_PMPADDR4 0x3b4
-#define CSR_PMPADDR5 0x3b5
-#define CSR_PMPADDR6 0x3b6
-#define CSR_PMPADDR7 0x3b7
-#define CSR_PMPADDR8 0x3b8
-#define CSR_PMPADDR9 0x3b9
-#define CSR_PMPADDR10 0x3ba
-#define CSR_PMPADDR11 0x3bb
-#define CSR_PMPADDR12 0x3bc
-#define CSR_PMPADDR13 0x3bd
-#define CSR_PMPADDR14 0x3be
-#define CSR_PMPADDR15 0x3bf
-#define CSR_TSELECT 0x7a0
-#define CSR_TDATA1 0x7a1
-#define CSR_TDATA2 0x7a2
-#define CSR_TDATA3 0x7a3
-#define CSR_DCSR 0x7b0
-#define CSR_DPC 0x7b1
-#define CSR_DSCRATCH 0x7b2
-#define CSR_MCYCLE 0xb00
-#define CSR_MINSTRET 0xb02
-#define CSR_MHPMCOUNTER3 0xb03
-#define CSR_MHPMCOUNTER4 0xb04
-#define CSR_MHPMCOUNTER5 0xb05
-#define CSR_MHPMCOUNTER6 0xb06
-#define CSR_MHPMCOUNTER7 0xb07
-#define CSR_MHPMCOUNTER8 0xb08
-#define CSR_MHPMCOUNTER9 0xb09
-#define CSR_MHPMCOUNTER10 0xb0a
-#define CSR_MHPMCOUNTER11 0xb0b
-#define CSR_MHPMCOUNTER12 0xb0c
-#define CSR_MHPMCOUNTER13 0xb0d
-#define CSR_MHPMCOUNTER14 0xb0e
-#define CSR_MHPMCOUNTER15 0xb0f
-#define CSR_MHPMCOUNTER16 0xb10
-#define CSR_MHPMCOUNTER17 0xb11
-#define CSR_MHPMCOUNTER18 0xb12
-#define CSR_MHPMCOUNTER19 0xb13
-#define CSR_MHPMCOUNTER20 0xb14
-#define CSR_MHPMCOUNTER21 0xb15
-#define CSR_MHPMCOUNTER22 0xb16
-#define CSR_MHPMCOUNTER23 0xb17
-#define CSR_MHPMCOUNTER24 0xb18
-#define CSR_MHPMCOUNTER25 0xb19
-#define CSR_MHPMCOUNTER26 0xb1a
-#define CSR_MHPMCOUNTER27 0xb1b
-#define CSR_MHPMCOUNTER28 0xb1c
-#define CSR_MHPMCOUNTER29 0xb1d
-#define CSR_MHPMCOUNTER30 0xb1e
-#define CSR_MHPMCOUNTER31 0xb1f
-#define CSR_MUCOUNTEREN 0x320
-#define CSR_MSCOUNTEREN 0x321
-#define CSR_MHPMEVENT3 0x323
-#define CSR_MHPMEVENT4 0x324
-#define CSR_MHPMEVENT5 0x325
-#define CSR_MHPMEVENT6 0x326
-#define CSR_MHPMEVENT7 0x327
-#define CSR_MHPMEVENT8 0x328
-#define CSR_MHPMEVENT9 0x329
-#define CSR_MHPMEVENT10 0x32a
-#define CSR_MHPMEVENT11 0x32b
-#define CSR_MHPMEVENT12 0x32c
-#define CSR_MHPMEVENT13 0x32d
-#define CSR_MHPMEVENT14 0x32e
-#define CSR_MHPMEVENT15 0x32f
-#define CSR_MHPMEVENT16 0x330
-#define CSR_MHPMEVENT17 0x331
-#define CSR_MHPMEVENT18 0x332
-#define CSR_MHPMEVENT19 0x333
-#define CSR_MHPMEVENT20 0x334
-#define CSR_MHPMEVENT21 0x335
-#define CSR_MHPMEVENT22 0x336
-#define CSR_MHPMEVENT23 0x337
-#define CSR_MHPMEVENT24 0x338
-#define CSR_MHPMEVENT25 0x339
-#define CSR_MHPMEVENT26 0x33a
-#define CSR_MHPMEVENT27 0x33b
-#define CSR_MHPMEVENT28 0x33c
-#define CSR_MHPMEVENT29 0x33d
-#define CSR_MHPMEVENT30 0x33e
-#define CSR_MHPMEVENT31 0x33f
-#define CSR_MVENDORID 0xf11
-#define CSR_MARCHID 0xf12
-#define CSR_MIMPID 0xf13
-#define CSR_MHARTID 0xf14
-#define CSR_CYCLEH 0xc80
-#define CSR_TIMEH 0xc81
-#define CSR_INSTRETH 0xc82
-#define CSR_HPMCOUNTER3H 0xc83
-#define CSR_HPMCOUNTER4H 0xc84
-#define CSR_HPMCOUNTER5H 0xc85
-#define CSR_HPMCOUNTER6H 0xc86
-#define CSR_HPMCOUNTER7H 0xc87
-#define CSR_HPMCOUNTER8H 0xc88
-#define CSR_HPMCOUNTER9H 0xc89
-#define CSR_HPMCOUNTER10H 0xc8a
-#define CSR_HPMCOUNTER11H 0xc8b
-#define CSR_HPMCOUNTER12H 0xc8c
-#define CSR_HPMCOUNTER13H 0xc8d
-#define CSR_HPMCOUNTER14H 0xc8e
-#define CSR_HPMCOUNTER15H 0xc8f
-#define CSR_HPMCOUNTER16H 0xc90
-#define CSR_HPMCOUNTER17H 0xc91
-#define CSR_HPMCOUNTER18H 0xc92
-#define CSR_HPMCOUNTER19H 0xc93
-#define CSR_HPMCOUNTER20H 0xc94
-#define CSR_HPMCOUNTER21H 0xc95
-#define CSR_HPMCOUNTER22H 0xc96
-#define CSR_HPMCOUNTER23H 0xc97
-#define CSR_HPMCOUNTER24H 0xc98
-#define CSR_HPMCOUNTER25H 0xc99
-#define CSR_HPMCOUNTER26H 0xc9a
-#define CSR_HPMCOUNTER27H 0xc9b
-#define CSR_HPMCOUNTER28H 0xc9c
-#define CSR_HPMCOUNTER29H 0xc9d
-#define CSR_HPMCOUNTER30H 0xc9e
-#define CSR_HPMCOUNTER31H 0xc9f
-#define CSR_MCYCLEH 0xb80
-#define CSR_MINSTRETH 0xb82
-#define CSR_MHPMCOUNTER3H 0xb83
-#define CSR_MHPMCOUNTER4H 0xb84
-#define CSR_MHPMCOUNTER5H 0xb85
-#define CSR_MHPMCOUNTER6H 0xb86
-#define CSR_MHPMCOUNTER7H 0xb87
-#define CSR_MHPMCOUNTER8H 0xb88
-#define CSR_MHPMCOUNTER9H 0xb89
-#define CSR_MHPMCOUNTER10H 0xb8a
-#define CSR_MHPMCOUNTER11H 0xb8b
-#define CSR_MHPMCOUNTER12H 0xb8c
-#define CSR_MHPMCOUNTER13H 0xb8d
-#define CSR_MHPMCOUNTER14H 0xb8e
-#define CSR_MHPMCOUNTER15H 0xb8f
-#define CSR_MHPMCOUNTER16H 0xb90
-#define CSR_MHPMCOUNTER17H 0xb91
-#define CSR_MHPMCOUNTER18H 0xb92
-#define CSR_MHPMCOUNTER19H 0xb93
-#define CSR_MHPMCOUNTER20H 0xb94
-#define CSR_MHPMCOUNTER21H 0xb95
-#define CSR_MHPMCOUNTER22H 0xb96
-#define CSR_MHPMCOUNTER23H 0xb97
-#define CSR_MHPMCOUNTER24H 0xb98
-#define CSR_MHPMCOUNTER25H 0xb99
-#define CSR_MHPMCOUNTER26H 0xb9a
-#define CSR_MHPMCOUNTER27H 0xb9b
-#define CSR_MHPMCOUNTER28H 0xb9c
-#define CSR_MHPMCOUNTER29H 0xb9d
-#define CSR_MHPMCOUNTER30H 0xb9e
-#define CSR_MHPMCOUNTER31H 0xb9f
-
-/* mstatus bits */
+/* Floating point round mode */
+#define FSR_RD_SHIFT 5
+#define FSR_RD (0x7 << FSR_RD_SHIFT)
+
+/* Floating point accrued exception flags */
+#define FPEXC_NX 0x01
+#define FPEXC_UF 0x02
+#define FPEXC_OF 0x04
+#define FPEXC_DZ 0x08
+#define FPEXC_NV 0x10
+
+/* Floating point status register bits */
+#define FSR_AEXC_SHIFT 0
+#define FSR_NVA (FPEXC_NV << FSR_AEXC_SHIFT)
+#define FSR_OFA (FPEXC_OF << FSR_AEXC_SHIFT)
+#define FSR_UFA (FPEXC_UF << FSR_AEXC_SHIFT)
+#define FSR_DZA (FPEXC_DZ << FSR_AEXC_SHIFT)
+#define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT)
+#define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
+
+/* Control and Status Registers */
+
+/* User Trap Setup */
+#define CSR_USTATUS 0x000
+#define CSR_UIE 0x004
+#define CSR_UTVEC 0x005
+
+/* User Trap Handling */
+#define CSR_USCRATCH 0x040
+#define CSR_UEPC 0x041
+#define CSR_UCAUSE 0x042
+#define CSR_UTVAL 0x043
+#define CSR_UIP 0x044
+
+/* User Floating-Point CSRs */
+#define CSR_FFLAGS 0x001
+#define CSR_FRM 0x002
+#define CSR_FCSR 0x003
+
+/* User Timers and Counters */
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_HPMCOUNTER3 0xc03
+#define CSR_HPMCOUNTER4 0xc04
+#define CSR_HPMCOUNTER5 0xc05
+#define CSR_HPMCOUNTER6 0xc06
+#define CSR_HPMCOUNTER7 0xc07
+#define CSR_HPMCOUNTER8 0xc08
+#define CSR_HPMCOUNTER9 0xc09
+#define CSR_HPMCOUNTER10 0xc0a
+#define CSR_HPMCOUNTER11 0xc0b
+#define CSR_HPMCOUNTER12 0xc0c
+#define CSR_HPMCOUNTER13 0xc0d
+#define CSR_HPMCOUNTER14 0xc0e
+#define CSR_HPMCOUNTER15 0xc0f
+#define CSR_HPMCOUNTER16 0xc10
+#define CSR_HPMCOUNTER17 0xc11
+#define CSR_HPMCOUNTER18 0xc12
+#define CSR_HPMCOUNTER19 0xc13
+#define CSR_HPMCOUNTER20 0xc14
+#define CSR_HPMCOUNTER21 0xc15
+#define CSR_HPMCOUNTER22 0xc16
+#define CSR_HPMCOUNTER23 0xc17
+#define CSR_HPMCOUNTER24 0xc18
+#define CSR_HPMCOUNTER25 0xc19
+#define CSR_HPMCOUNTER26 0xc1a
+#define CSR_HPMCOUNTER27 0xc1b
+#define CSR_HPMCOUNTER28 0xc1c
+#define CSR_HPMCOUNTER29 0xc1d
+#define CSR_HPMCOUNTER30 0xc1e
+#define CSR_HPMCOUNTER31 0xc1f
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
+#define CSR_HPMCOUNTER3H 0xc83
+#define CSR_HPMCOUNTER4H 0xc84
+#define CSR_HPMCOUNTER5H 0xc85
+#define CSR_HPMCOUNTER6H 0xc86
+#define CSR_HPMCOUNTER7H 0xc87
+#define CSR_HPMCOUNTER8H 0xc88
+#define CSR_HPMCOUNTER9H 0xc89
+#define CSR_HPMCOUNTER10H 0xc8a
+#define CSR_HPMCOUNTER11H 0xc8b
+#define CSR_HPMCOUNTER12H 0xc8c
+#define CSR_HPMCOUNTER13H 0xc8d
+#define CSR_HPMCOUNTER14H 0xc8e
+#define CSR_HPMCOUNTER15H 0xc8f
+#define CSR_HPMCOUNTER16H 0xc90
+#define CSR_HPMCOUNTER17H 0xc91
+#define CSR_HPMCOUNTER18H 0xc92
+#define CSR_HPMCOUNTER19H 0xc93
+#define CSR_HPMCOUNTER20H 0xc94
+#define CSR_HPMCOUNTER21H 0xc95
+#define CSR_HPMCOUNTER22H 0xc96
+#define CSR_HPMCOUNTER23H 0xc97
+#define CSR_HPMCOUNTER24H 0xc98
+#define CSR_HPMCOUNTER25H 0xc99
+#define CSR_HPMCOUNTER26H 0xc9a
+#define CSR_HPMCOUNTER27H 0xc9b
+#define CSR_HPMCOUNTER28H 0xc9c
+#define CSR_HPMCOUNTER29H 0xc9d
+#define CSR_HPMCOUNTER30H 0xc9e
+#define CSR_HPMCOUNTER31H 0xc9f
+
+/* Machine Timers and Counters */
+#define CSR_MCYCLE 0xb00
+#define CSR_MINSTRET 0xb02
+#define CSR_MCYCLEH 0xb80
+#define CSR_MINSTRETH 0xb82
+
+/* Machine Information Registers */
+#define CSR_MVENDORID 0xf11
+#define CSR_MARCHID 0xf12
+#define CSR_MIMPID 0xf13
+#define CSR_MHARTID 0xf14
+
+/* Machine Trap Setup */
+#define CSR_MSTATUS 0x300
+#define CSR_MISA 0x301
+#define CSR_MEDELEG 0x302
+#define CSR_MIDELEG 0x303
+#define CSR_MIE 0x304
+#define CSR_MTVEC 0x305
+#define CSR_MCOUNTEREN 0x306
+
+/* Legacy Counter Setup (priv v1.9.1) */
+#define CSR_MUCOUNTEREN 0x320
+#define CSR_MSCOUNTEREN 0x321
+
+/* Machine Trap Handling */
+#define CSR_MSCRATCH 0x340
+#define CSR_MEPC 0x341
+#define CSR_MCAUSE 0x342
+#define CSR_MBADADDR 0x343
+#define CSR_MIP 0x344
+
+/* Supervisor Trap Setup */
+#define CSR_SSTATUS 0x100
+#define CSR_SIE 0x104
+#define CSR_STVEC 0x105
+#define CSR_SCOUNTEREN 0x106
+
+/* Supervisor Trap Handling */
+#define CSR_SSCRATCH 0x140
+#define CSR_SEPC 0x141
+#define CSR_SCAUSE 0x142
+#define CSR_SBADADDR 0x143
+#define CSR_SIP 0x144
+
+/* Supervisor Protection and Translation */
+#define CSR_SPTBR 0x180
+#define CSR_SATP 0x180
+
+/* Physical Memory Protection */
+#define CSR_PMPCFG0 0x3a0
+#define CSR_PMPCFG1 0x3a1
+#define CSR_PMPCFG2 0x3a2
+#define CSR_PMPCFG3 0x3a3
+#define CSR_PMPADDR0 0x3b0
+#define CSR_PMPADDR1 0x3b1
+#define CSR_PMPADDR2 0x3b2
+#define CSR_PMPADDR3 0x3b3
+#define CSR_PMPADDR4 0x3b4
+#define CSR_PMPADDR5 0x3b5
+#define CSR_PMPADDR6 0x3b6
+#define CSR_PMPADDR7 0x3b7
+#define CSR_PMPADDR8 0x3b8
+#define CSR_PMPADDR9 0x3b9
+#define CSR_PMPADDR10 0x3ba
+#define CSR_PMPADDR11 0x3bb
+#define CSR_PMPADDR12 0x3bc
+#define CSR_PMPADDR13 0x3bd
+#define CSR_PMPADDR14 0x3be
+#define CSR_PMPADDR15 0x3bf
+
+/* Debug/Trace Registers (shared with Debug Mode) */
+#define CSR_TSELECT 0x7a0
+#define CSR_TDATA1 0x7a1
+#define CSR_TDATA2 0x7a2
+#define CSR_TDATA3 0x7a3
+
+/* Debug Mode Registers */
+#define CSR_DCSR 0x7b0
+#define CSR_DPC 0x7b1
+#define CSR_DSCRATCH 0x7b2
+
+/* Performance Counters */
+#define CSR_MHPMCOUNTER3 0xb03
+#define CSR_MHPMCOUNTER4 0xb04
+#define CSR_MHPMCOUNTER5 0xb05
+#define CSR_MHPMCOUNTER6 0xb06
+#define CSR_MHPMCOUNTER7 0xb07
+#define CSR_MHPMCOUNTER8 0xb08
+#define CSR_MHPMCOUNTER9 0xb09
+#define CSR_MHPMCOUNTER10 0xb0a
+#define CSR_MHPMCOUNTER11 0xb0b
+#define CSR_MHPMCOUNTER12 0xb0c
+#define CSR_MHPMCOUNTER13 0xb0d
+#define CSR_MHPMCOUNTER14 0xb0e
+#define CSR_MHPMCOUNTER15 0xb0f
+#define CSR_MHPMCOUNTER16 0xb10
+#define CSR_MHPMCOUNTER17 0xb11
+#define CSR_MHPMCOUNTER18 0xb12
+#define CSR_MHPMCOUNTER19 0xb13
+#define CSR_MHPMCOUNTER20 0xb14
+#define CSR_MHPMCOUNTER21 0xb15
+#define CSR_MHPMCOUNTER22 0xb16
+#define CSR_MHPMCOUNTER23 0xb17
+#define CSR_MHPMCOUNTER24 0xb18
+#define CSR_MHPMCOUNTER25 0xb19
+#define CSR_MHPMCOUNTER26 0xb1a
+#define CSR_MHPMCOUNTER27 0xb1b
+#define CSR_MHPMCOUNTER28 0xb1c
+#define CSR_MHPMCOUNTER29 0xb1d
+#define CSR_MHPMCOUNTER30 0xb1e
+#define CSR_MHPMCOUNTER31 0xb1f
+#define CSR_MHPMEVENT3 0x323
+#define CSR_MHPMEVENT4 0x324
+#define CSR_MHPMEVENT5 0x325
+#define CSR_MHPMEVENT6 0x326
+#define CSR_MHPMEVENT7 0x327
+#define CSR_MHPMEVENT8 0x328
+#define CSR_MHPMEVENT9 0x329
+#define CSR_MHPMEVENT10 0x32a
+#define CSR_MHPMEVENT11 0x32b
+#define CSR_MHPMEVENT12 0x32c
+#define CSR_MHPMEVENT13 0x32d
+#define CSR_MHPMEVENT14 0x32e
+#define CSR_MHPMEVENT15 0x32f
+#define CSR_MHPMEVENT16 0x330
+#define CSR_MHPMEVENT17 0x331
+#define CSR_MHPMEVENT18 0x332
+#define CSR_MHPMEVENT19 0x333
+#define CSR_MHPMEVENT20 0x334
+#define CSR_MHPMEVENT21 0x335
+#define CSR_MHPMEVENT22 0x336
+#define CSR_MHPMEVENT23 0x337
+#define CSR_MHPMEVENT24 0x338
+#define CSR_MHPMEVENT25 0x339
+#define CSR_MHPMEVENT26 0x33a
+#define CSR_MHPMEVENT27 0x33b
+#define CSR_MHPMEVENT28 0x33c
+#define CSR_MHPMEVENT29 0x33d
+#define CSR_MHPMEVENT30 0x33e
+#define CSR_MHPMEVENT31 0x33f
+#define CSR_MHPMCOUNTER3H 0xb83
+#define CSR_MHPMCOUNTER4H 0xb84
+#define CSR_MHPMCOUNTER5H 0xb85
+#define CSR_MHPMCOUNTER6H 0xb86
+#define CSR_MHPMCOUNTER7H 0xb87
+#define CSR_MHPMCOUNTER8H 0xb88
+#define CSR_MHPMCOUNTER9H 0xb89
+#define CSR_MHPMCOUNTER10H 0xb8a
+#define CSR_MHPMCOUNTER11H 0xb8b
+#define CSR_MHPMCOUNTER12H 0xb8c
+#define CSR_MHPMCOUNTER13H 0xb8d
+#define CSR_MHPMCOUNTER14H 0xb8e
+#define CSR_MHPMCOUNTER15H 0xb8f
+#define CSR_MHPMCOUNTER16H 0xb90
+#define CSR_MHPMCOUNTER17H 0xb91
+#define CSR_MHPMCOUNTER18H 0xb92
+#define CSR_MHPMCOUNTER19H 0xb93
+#define CSR_MHPMCOUNTER20H 0xb94
+#define CSR_MHPMCOUNTER21H 0xb95
+#define CSR_MHPMCOUNTER22H 0xb96
+#define CSR_MHPMCOUNTER23H 0xb97
+#define CSR_MHPMCOUNTER24H 0xb98
+#define CSR_MHPMCOUNTER25H 0xb99
+#define CSR_MHPMCOUNTER26H 0xb9a
+#define CSR_MHPMCOUNTER27H 0xb9b
+#define CSR_MHPMCOUNTER28H 0xb9c
+#define CSR_MHPMCOUNTER29H 0xb9d
+#define CSR_MHPMCOUNTER30H 0xb9e
+#define CSR_MHPMCOUNTER31H 0xb9f
+
+/* mstatus CSR bits */
#define MSTATUS_UIE 0x00000001
#define MSTATUS_SIE 0x00000002
#define MSTATUS_HIE 0x00000004
@@ -276,7 +317,7 @@
#define MSTATUS_SD MSTATUS64_SD
#endif
-/* sstatus bits */
+/* sstatus CSR bits */
#define SSTATUS_UIE 0x00000001
#define SSTATUS_SIE 0x00000002
#define SSTATUS_UPIE 0x00000010
@@ -297,83 +338,71 @@
#define SSTATUS_SD SSTATUS64_SD
#endif
-/* irqs */
-#define MIP_SSIP (1 << IRQ_S_SOFT)
-#define MIP_HSIP (1 << IRQ_H_SOFT)
-#define MIP_MSIP (1 << IRQ_M_SOFT)
-#define MIP_STIP (1 << IRQ_S_TIMER)
-#define MIP_HTIP (1 << IRQ_H_TIMER)
-#define MIP_MTIP (1 << IRQ_M_TIMER)
-#define MIP_SEIP (1 << IRQ_S_EXT)
-#define MIP_HEIP (1 << IRQ_H_EXT)
-#define MIP_MEIP (1 << IRQ_M_EXT)
-
-#define SIP_SSIP MIP_SSIP
-#define SIP_STIP MIP_STIP
-#define SIP_SEIP MIP_SEIP
-
+/* Privilege modes */
#define PRV_U 0
#define PRV_S 1
#define PRV_H 2
#define PRV_M 3
-/* privileged ISA 1.9.1 VM modes (mstatus.vm) */
-#define VM_1_09_MBARE 0
-#define VM_1_09_MBB 1
-#define VM_1_09_MBBID 2
-#define VM_1_09_SV32 8
-#define VM_1_09_SV39 9
-#define VM_1_09_SV48 10
-
-/* privileged ISA 1.10.0 VM modes (satp.mode) */
-#define VM_1_10_MBARE 0
-#define VM_1_10_SV32 1
-#define VM_1_10_SV39 8
-#define VM_1_10_SV48 9
-#define VM_1_10_SV57 10
-#define VM_1_10_SV64 11
-
-/* privileged ISA interrupt causes */
-#define IRQ_U_SOFT 0 /* since: priv-1.10 */
-#define IRQ_S_SOFT 1
-#define IRQ_H_SOFT 2 /* until: priv-1.9.1 */
-#define IRQ_M_SOFT 3 /* until: priv-1.9.1 */
-#define IRQ_U_TIMER 4 /* since: priv-1.10 */
-#define IRQ_S_TIMER 5
-#define IRQ_H_TIMER 6 /* until: priv-1.9.1 */
-#define IRQ_M_TIMER 7 /* until: priv-1.9.1 */
-#define IRQ_U_EXT 8 /* since: priv-1.10 */
-#define IRQ_S_EXT 9
-#define IRQ_H_EXT 10 /* until: priv-1.9.1 */
-#define IRQ_M_EXT 11 /* until: priv-1.9.1 */
-#define IRQ_X_COP 12 /* non-standard */
-
-/* Default addresses */
-#define DEFAULT_RSTVEC 0x00001000
-
-/* RV32 satp field masks */
-#define SATP32_MODE 0x80000000
-#define SATP32_ASID 0x7fc00000
-#define SATP32_PPN 0x003fffff
-
-/* RV64 satp field masks */
-#define SATP64_MODE 0xF000000000000000ULL
-#define SATP64_ASID 0x0FFFF00000000000ULL
-#define SATP64_PPN 0x00000FFFFFFFFFFFULL
+/* RV32 satp CSR field masks */
+#define SATP32_MODE 0x80000000
+#define SATP32_ASID 0x7fc00000
+#define SATP32_PPN 0x003fffff
+
+/* RV64 satp CSR field masks */
+#define SATP64_MODE 0xF000000000000000ULL
+#define SATP64_ASID 0x0FFFF00000000000ULL
+#define SATP64_PPN 0x00000FFFFFFFFFFFULL
#if defined(TARGET_RISCV32)
-#define SATP_MODE SATP32_MODE
-#define SATP_ASID SATP32_ASID
-#define SATP_PPN SATP32_PPN
+#define SATP_MODE SATP32_MODE
+#define SATP_ASID SATP32_ASID
+#define SATP_PPN SATP32_PPN
#endif
#if defined(TARGET_RISCV64)
-#define SATP_MODE SATP64_MODE
-#define SATP_ASID SATP64_ASID
-#define SATP_PPN SATP64_PPN
+#define SATP_MODE SATP64_MODE
+#define SATP_ASID SATP64_ASID
+#define SATP_PPN SATP64_PPN
#endif
-/* RISCV Exception Codes */
-#define EXCP_NONE -1 /* not a real RISCV exception code */
+/* VM modes (mstatus.vm) privileged ISA 1.9.1 */
+#define VM_1_09_MBARE 0
+#define VM_1_09_MBB 1
+#define VM_1_09_MBBID 2
+#define VM_1_09_SV32 8
+#define VM_1_09_SV39 9
+#define VM_1_09_SV48 10
+
+/* VM modes (satp.mode) privileged ISA 1.10 */
+#define VM_1_10_MBARE 0
+#define VM_1_10_SV32 1
+#define VM_1_10_SV39 8
+#define VM_1_10_SV48 9
+#define VM_1_10_SV57 10
+#define VM_1_10_SV64 11
+
+/* Page table entry (PTE) fields */
+#define PTE_V 0x001 /* Valid */
+#define PTE_R 0x002 /* Read */
+#define PTE_W 0x004 /* Write */
+#define PTE_X 0x008 /* Execute */
+#define PTE_U 0x010 /* User */
+#define PTE_G 0x020 /* Global */
+#define PTE_A 0x040 /* Accessed */
+#define PTE_D 0x080 /* Dirty */
+#define PTE_SOFT 0x300 /* Reserved for Software */
+
+/* Page table PPN shift amount */
+#define PTE_PPN_SHIFT 10
+
+/* Leaf page shift amount */
+#define PGSHIFT 12
+
+/* Default Reset Vector adress */
+#define DEFAULT_RSTVEC 0x1000
+
+/* Exception causes */
+#define EXCP_NONE -1 /* sentinel value */
#define RISCV_EXCP_INST_ADDR_MIS 0x0
#define RISCV_EXCP_INST_ACCESS_FAULT 0x1
#define RISCV_EXCP_ILLEGAL_INST 0x2
@@ -382,9 +411,7 @@
#define RISCV_EXCP_LOAD_ACCESS_FAULT 0x5
#define RISCV_EXCP_STORE_AMO_ADDR_MIS 0x6
#define RISCV_EXCP_STORE_AMO_ACCESS_FAULT 0x7
-#define RISCV_EXCP_U_ECALL 0x8 /* for convenience, report all
- ECALLs as this, handler
- fixes */
+#define RISCV_EXCP_U_ECALL 0x8
#define RISCV_EXCP_S_ECALL 0x9
#define RISCV_EXCP_H_ECALL 0xa
#define RISCV_EXCP_M_ECALL 0xb
@@ -395,15 +422,35 @@
#define RISCV_EXCP_INT_FLAG 0x80000000
#define RISCV_EXCP_INT_MASK 0x7fffffff
-/* page table entry (PTE) fields */
-#define PTE_V 0x001 /* Valid */
-#define PTE_R 0x002 /* Read */
-#define PTE_W 0x004 /* Write */
-#define PTE_X 0x008 /* Execute */
-#define PTE_U 0x010 /* User */
-#define PTE_G 0x020 /* Global */
-#define PTE_A 0x040 /* Accessed */
-#define PTE_D 0x080 /* Dirty */
-#define PTE_SOFT 0x300 /* Reserved for Software */
-
-#define PTE_PPN_SHIFT 10
+/* Interrupt causes */
+#define IRQ_U_SOFT 0
+#define IRQ_S_SOFT 1
+#define IRQ_H_SOFT 2 /* reserved */
+#define IRQ_M_SOFT 3
+#define IRQ_U_TIMER 4
+#define IRQ_S_TIMER 5
+#define IRQ_H_TIMER 6 /* reserved */
+#define IRQ_M_TIMER 7
+#define IRQ_U_EXT 8
+#define IRQ_S_EXT 9
+#define IRQ_H_EXT 10 /* reserved */
+#define IRQ_M_EXT 11
+
+/* mip masks */
+#define MIP_USIP (1 << IRQ_U_SOFT)
+#define MIP_SSIP (1 << IRQ_S_SOFT)
+#define MIP_HSIP (1 << IRQ_H_SOFT)
+#define MIP_MSIP (1 << IRQ_M_SOFT)
+#define MIP_UTIP (1 << IRQ_U_TIMER)
+#define MIP_STIP (1 << IRQ_S_TIMER)
+#define MIP_HTIP (1 << IRQ_H_TIMER)
+#define MIP_MTIP (1 << IRQ_M_TIMER)
+#define MIP_UEIP (1 << IRQ_U_EXT)
+#define MIP_SEIP (1 << IRQ_S_EXT)
+#define MIP_HEIP (1 << IRQ_H_EXT)
+#define MIP_MEIP (1 << IRQ_M_EXT)
+
+/* sip masks */
+#define SIP_SSIP MIP_SSIP
+#define SIP_STIP MIP_STIP
+#define SIP_SEIP MIP_SEIP
diff --git a/target/riscv/helper.c b/target/riscv/cpu_helper.c
index 63b3386b76..86f9f4730c 100644
--- a/target/riscv/helper.c
+++ b/target/riscv/cpu_helper.c
@@ -1,5 +1,5 @@
/*
- * RISC-V emulation helpers for qemu.
+ * RISC-V CPU helpers for qemu.
*
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
* Copyright (c) 2017-2018 SiFive, Inc.
@@ -72,6 +72,39 @@ bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
#if !defined(CONFIG_USER_ONLY)
+/* iothread_mutex must be held */
+uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
+{
+ CPURISCVState *env = &cpu->env;
+ uint32_t old, new, cmp = atomic_read(&env->mip);
+
+ do {
+ old = cmp;
+ new = (old & ~mask) | (value & mask);
+ cmp = atomic_cmpxchg(&env->mip, old, new);
+ } while (old != cmp);
+
+ if (new && !old) {
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+ } else if (!new && old) {
+ cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+ }
+
+ return old;
+}
+
+void riscv_set_mode(CPURISCVState *env, target_ulong newpriv)
+{
+ if (newpriv > PRV_M) {
+ g_assert_not_reached();
+ }
+ if (newpriv == PRV_H) {
+ newpriv = PRV_U;
+ }
+ /* tlb_flush is unnecessary as mode is contained in mmu_idx */
+ env->priv = newpriv;
+}
+
/* get_physical_address - get the physical address for this virtual address
*
* Do a page table walk to obtain the physical address corresponding to a
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index aec7558e1b..3726299d4a 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -90,7 +90,7 @@ void csr_write_helper(CPURISCVState *env, target_ulong val_to_write,
target_ulong csrno)
{
#ifndef CONFIG_USER_ONLY
- uint64_t delegable_ints = MIP_SSIP | MIP_STIP | MIP_SEIP | (1 << IRQ_X_COP);
+ uint64_t delegable_ints = MIP_SSIP | MIP_STIP | MIP_SEIP;
uint64_t all_ints = delegable_ints | MIP_MSIP | MIP_MTIP;
#endif
@@ -171,10 +171,8 @@ void csr_write_helper(CPURISCVState *env, target_ulong val_to_write,
*/
qemu_mutex_lock_iothread();
RISCVCPU *cpu = riscv_env_get_cpu(env);
- riscv_set_local_interrupt(cpu, MIP_SSIP,
- (val_to_write & MIP_SSIP) != 0);
- riscv_set_local_interrupt(cpu, MIP_STIP,
- (val_to_write & MIP_STIP) != 0);
+ riscv_cpu_update_mip(cpu, MIP_SSIP | MIP_STIP,
+ (val_to_write & (MIP_SSIP | MIP_STIP)));
/*
* csrs, csrc on mip.SEIP is not decomposable into separate read and
* write steps, so a different implementation is needed
@@ -656,31 +654,6 @@ target_ulong helper_csrrc(CPURISCVState *env, target_ulong src,
#ifndef CONFIG_USER_ONLY
-/* iothread_mutex must be held */
-void riscv_set_local_interrupt(RISCVCPU *cpu, target_ulong mask, int value)
-{
- target_ulong old_mip = cpu->env.mip;
- cpu->env.mip = (old_mip & ~mask) | (value ? mask : 0);
-
- if (cpu->env.mip && !old_mip) {
- cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
- } else if (!cpu->env.mip && old_mip) {
- cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
- }
-}
-
-void riscv_set_mode(CPURISCVState *env, target_ulong newpriv)
-{
- if (newpriv > PRV_M) {
- g_assert_not_reached();
- }
- if (newpriv == PRV_H) {
- newpriv = PRV_U;
- }
- /* tlb_flush is unnecessary as mode is contained in mmu_idx */
- env->priv = newpriv;
-}
-
target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
{
if (!(env->priv >= PRV_S)) {
@@ -731,7 +704,6 @@ target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb)
return retpc;
}
-
void helper_wfi(CPURISCVState *env)
{
CPUState *cs = CPU(riscv_env_get_cpu(env));
diff --git a/tests/benchmark-crypto-cipher.c b/tests/benchmark-crypto-cipher.c
index f5a0d0bc32..67fdf8c31d 100644
--- a/tests/benchmark-crypto-cipher.c
+++ b/tests/benchmark-crypto-cipher.c
@@ -15,17 +15,27 @@
#include "crypto/init.h"
#include "crypto/cipher.h"
-static void test_cipher_speed(const void *opaque)
+static void test_cipher_speed(size_t chunk_size,
+ QCryptoCipherMode mode,
+ QCryptoCipherAlgorithm alg)
{
QCryptoCipher *cipher;
Error *err = NULL;
double total = 0.0;
- size_t chunk_size = (size_t)opaque;
uint8_t *key = NULL, *iv = NULL;
uint8_t *plaintext = NULL, *ciphertext = NULL;
- size_t nkey = qcrypto_cipher_get_key_len(QCRYPTO_CIPHER_ALG_AES_128);
- size_t niv = qcrypto_cipher_get_iv_len(QCRYPTO_CIPHER_ALG_AES_128,
- QCRYPTO_CIPHER_MODE_CBC);
+ size_t nkey;
+ size_t niv;
+
+ if (!qcrypto_cipher_supports(alg, mode)) {
+ return;
+ }
+
+ nkey = qcrypto_cipher_get_key_len(alg);
+ niv = qcrypto_cipher_get_iv_len(alg, mode);
+ if (mode == QCRYPTO_CIPHER_MODE_XTS) {
+ nkey *= 2;
+ }
key = g_new0(uint8_t, nkey);
memset(key, g_test_rand_int(), nkey);
@@ -38,14 +48,14 @@ static void test_cipher_speed(const void *opaque)
plaintext = g_new0(uint8_t, chunk_size);
memset(plaintext, g_test_rand_int(), chunk_size);
- cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALG_AES_128,
- QCRYPTO_CIPHER_MODE_CBC,
+ cipher = qcrypto_cipher_new(alg, mode,
key, nkey, &err);
g_assert(cipher != NULL);
- g_assert(qcrypto_cipher_setiv(cipher,
- iv, niv,
- &err) == 0);
+ if (mode != QCRYPTO_CIPHER_MODE_ECB)
+ g_assert(qcrypto_cipher_setiv(cipher,
+ iv, niv,
+ &err) == 0);
g_test_timer_start();
do {
@@ -55,13 +65,26 @@ static void test_cipher_speed(const void *opaque)
chunk_size,
&err) == 0);
total += chunk_size;
- } while (g_test_timer_elapsed() < 5.0);
+ } while (g_test_timer_elapsed() < 1.0);
total /= MiB;
- g_print("cbc(aes128): ");
- g_print("Testing chunk_size %zu bytes ", chunk_size);
- g_print("done: %.2f MB in %.2f secs: ", total, g_test_timer_last());
- g_print("%.2f MB/sec\n", total / g_test_timer_last());
+ g_print("Enc chunk %zu bytes ", chunk_size);
+ g_print("%.2f MB/sec ", total / g_test_timer_last());
+
+ total = 0.0;
+ g_test_timer_start();
+ do {
+ g_assert(qcrypto_cipher_decrypt(cipher,
+ plaintext,
+ ciphertext,
+ chunk_size,
+ &err) == 0);
+ total += chunk_size;
+ } while (g_test_timer_elapsed() < 1.0);
+
+ total /= MiB;
+ g_print("Dec chunk %zu bytes ", chunk_size);
+ g_print("%.2f MB/sec ", total / g_test_timer_last());
qcrypto_cipher_free(cipher);
g_free(plaintext);
@@ -70,19 +93,99 @@ static void test_cipher_speed(const void *opaque)
g_free(key);
}
-int main(int argc, char **argv)
+
+static void test_cipher_speed_ecb_aes_128(const void *opaque)
+{
+ size_t chunk_size = (size_t)opaque;
+ test_cipher_speed(chunk_size,
+ QCRYPTO_CIPHER_MODE_ECB,
+ QCRYPTO_CIPHER_ALG_AES_128);
+}
+
+static void test_cipher_speed_ecb_aes_256(const void *opaque)
{
- size_t i;
- char name[64];
+ size_t chunk_size = (size_t)opaque;
+ test_cipher_speed(chunk_size,
+ QCRYPTO_CIPHER_MODE_ECB,
+ QCRYPTO_CIPHER_ALG_AES_256);
+}
+
+static void test_cipher_speed_cbc_aes_128(const void *opaque)
+{
+ size_t chunk_size = (size_t)opaque;
+ test_cipher_speed(chunk_size,
+ QCRYPTO_CIPHER_MODE_CBC,
+ QCRYPTO_CIPHER_ALG_AES_128);
+}
+static void test_cipher_speed_cbc_aes_256(const void *opaque)
+{
+ size_t chunk_size = (size_t)opaque;
+ test_cipher_speed(chunk_size,
+ QCRYPTO_CIPHER_MODE_CBC,
+ QCRYPTO_CIPHER_ALG_AES_256);
+}
+
+static void test_cipher_speed_ctr_aes_128(const void *opaque)
+{
+ size_t chunk_size = (size_t)opaque;
+ test_cipher_speed(chunk_size,
+ QCRYPTO_CIPHER_MODE_CTR,
+ QCRYPTO_CIPHER_ALG_AES_128);
+}
+
+static void test_cipher_speed_ctr_aes_256(const void *opaque)
+{
+ size_t chunk_size = (size_t)opaque;
+ test_cipher_speed(chunk_size,
+ QCRYPTO_CIPHER_MODE_CTR,
+ QCRYPTO_CIPHER_ALG_AES_256);
+}
+
+static void test_cipher_speed_xts_aes_128(const void *opaque)
+{
+ size_t chunk_size = (size_t)opaque;
+ test_cipher_speed(chunk_size,
+ QCRYPTO_CIPHER_MODE_XTS,
+ QCRYPTO_CIPHER_ALG_AES_128);
+}
+
+static void test_cipher_speed_xts_aes_256(const void *opaque)
+{
+ size_t chunk_size = (size_t)opaque;
+ test_cipher_speed(chunk_size,
+ QCRYPTO_CIPHER_MODE_XTS,
+ QCRYPTO_CIPHER_ALG_AES_256);
+}
+
+
+int main(int argc, char **argv)
+{
g_test_init(&argc, &argv, NULL);
g_assert(qcrypto_init(NULL) == 0);
- for (i = 512; i <= 64 * KiB; i *= 2) {
- memset(name, 0 , sizeof(name));
- snprintf(name, sizeof(name), "/crypto/cipher/speed-%zu", i);
- g_test_add_data_func(name, (void *)i, test_cipher_speed);
- }
+#define ADD_TEST(mode, cipher, keysize, chunk) \
+ g_test_add_data_func( \
+ "/crypto/cipher/" #mode "-" #cipher "-" #keysize "/chunk-" #chunk, \
+ (void *)chunk, \
+ test_cipher_speed_ ## mode ## _ ## cipher ## _ ## keysize)
+
+#define ADD_TESTS(chunk) \
+ do { \
+ ADD_TEST(ecb, aes, 128, chunk); \
+ ADD_TEST(ecb, aes, 256, chunk); \
+ ADD_TEST(cbc, aes, 128, chunk); \
+ ADD_TEST(cbc, aes, 256, chunk); \
+ ADD_TEST(ctr, aes, 128, chunk); \
+ ADD_TEST(ctr, aes, 256, chunk); \
+ ADD_TEST(xts, aes, 128, chunk); \
+ ADD_TEST(xts, aes, 256, chunk); \
+ } while (0)
+
+ ADD_TESTS(512);
+ ADD_TESTS(4096);
+ ADD_TESTS(16384);
+ ADD_TESTS(65536);
return g_test_run();
}
diff --git a/tests/docker/dockerfiles/debian-bootstrap.pre b/tests/docker/dockerfiles/debian-bootstrap.pre
index 3b0ef95374..c164778c30 100755
--- a/tests/docker/dockerfiles/debian-bootstrap.pre
+++ b/tests/docker/dockerfiles/debian-bootstrap.pre
@@ -2,7 +2,7 @@
#
# Simple wrapper for debootstrap, run in the docker build context
#
-FAKEROOT=`which fakeroot 2> /dev/null`
+FAKEROOT=$(which fakeroot 2> /dev/null)
# debootstrap < 1.0.67 generates empty sources.list, see Debian#732255
MIN_DEBOOTSTRAP_VERSION=1.0.67
@@ -52,7 +52,7 @@ fi
if [ -z $DEBOOTSTRAP_DIR ]; then
NEED_DEBOOTSTRAP=false
- DEBOOTSTRAP=`which debootstrap 2> /dev/null`
+ DEBOOTSTRAP=$(which debootstrap 2> /dev/null)
if [ -z $DEBOOTSTRAP ]; then
echo "No debootstrap installed, attempting to install from SCM"
NEED_DEBOOTSTRAP=true
diff --git a/tests/migration-test.c b/tests/migration-test.c
index b7920255c5..06ca5068d8 100644
--- a/tests/migration-test.c
+++ b/tests/migration-test.c
@@ -803,6 +803,22 @@ int main(int argc, char **argv)
return 0;
}
+ /*
+ * Similar to ppc64, s390x seems to be touchy with TCG, so disable it
+ * there until the problems are resolved
+ */
+ if (g_str_equal(qtest_get_arch(), "s390x")) {
+#if defined(HOST_S390X)
+ if (access("/dev/kvm", R_OK | W_OK)) {
+ g_test_message("Skipping test: kvm not available");
+ return 0;
+ }
+#else
+ g_test_message("Skipping test: Need s390x host to work properly");
+ return 0;
+#endif
+ }
+
tmpfs = mkdtemp(template);
if (!tmpfs) {
g_test_message("mkdtemp on path (%s): %s\n", template, strerror(errno));
diff --git a/tests/tcg/mips/mipsr5900/Makefile b/tests/tcg/mips/mipsr5900/Makefile
new file mode 100644
index 0000000000..a1c388bc3c
--- /dev/null
+++ b/tests/tcg/mips/mipsr5900/Makefile
@@ -0,0 +1,30 @@
+-include ../../config-host.mak
+
+CROSS=mipsr5900el-unknown-linux-gnu-
+
+SIM=qemu-mipsel
+SIM_FLAGS=-cpu R5900
+
+CC = $(CROSS)gcc
+CFLAGS = -Wall -mabi=32 -march=r5900 -static
+
+TESTCASES = div1.tst
+TESTCASES += divu1.tst
+TESTCASES += mflohi1.tst
+TESTCASES += mtlohi1.tst
+TESTCASES += mult.tst
+TESTCASES += multu.tst
+
+all: $(TESTCASES)
+
+%.tst: %.c
+ $(CC) $(CFLAGS) $< -o $@
+
+check: $(TESTCASES)
+ @for case in $(TESTCASES); do \
+ echo $(SIM) $(SIM_FLAGS) ./$$case;\
+ $(SIM) $(SIM_FLAGS) ./$$case; \
+ done
+
+clean:
+ $(RM) -rf $(TESTCASES)
diff --git a/tests/tcg/mips/mipsr5900/div1.c b/tests/tcg/mips/mipsr5900/div1.c
new file mode 100644
index 0000000000..83dafa018b
--- /dev/null
+++ b/tests/tcg/mips/mipsr5900/div1.c
@@ -0,0 +1,73 @@
+/*
+ * Test R5900-specific DIV1.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <assert.h>
+
+struct quotient_remainder { int32_t quotient, remainder; };
+
+static struct quotient_remainder div1(int32_t rs, int32_t rt)
+{
+ int32_t lo, hi;
+
+ __asm__ __volatile__ (
+ " div1 $0, %2, %3\n"
+ " mflo1 %0\n"
+ " mfhi1 %1\n"
+ : "=r" (lo), "=r" (hi)
+ : "r" (rs), "r" (rt));
+
+ assert(rs / rt == lo);
+ assert(rs % rt == hi);
+
+ return (struct quotient_remainder) { .quotient = lo, .remainder = hi };
+}
+
+static void verify_div1(int32_t rs, int32_t rt,
+ int32_t expected_quotient,
+ int32_t expected_remainder)
+{
+ struct quotient_remainder qr = div1(rs, rt);
+
+ assert(qr.quotient == expected_quotient);
+ assert(qr.remainder == expected_remainder);
+}
+
+static void verify_div1_negations(int32_t rs, int32_t rt,
+ int32_t expected_quotient,
+ int32_t expected_remainder)
+{
+ verify_div1(rs, rt, expected_quotient, expected_remainder);
+ verify_div1(rs, -rt, -expected_quotient, expected_remainder);
+ verify_div1(-rs, rt, -expected_quotient, -expected_remainder);
+ verify_div1(-rs, -rt, expected_quotient, -expected_remainder);
+}
+
+int main()
+{
+ verify_div1_negations(0, 1, 0, 0);
+ verify_div1_negations(1, 1, 1, 0);
+ verify_div1_negations(1, 2, 0, 1);
+ verify_div1_negations(17, 19, 0, 17);
+ verify_div1_negations(19, 17, 1, 2);
+ verify_div1_negations(77773, 101, 770, 3);
+
+ verify_div1(-0x80000000, 1, -0x80000000, 0);
+
+ /*
+ * Supplementary explanation from the Toshiba TX System RISC TX79 Core
+ * Architecture manual, A-38 and B-7, https://wiki.qemu.org/File:C790.pdf
+ *
+ * Normally, when 0x80000000 (-2147483648) the signed minimum value is
+ * divided by 0xFFFFFFFF (-1), the operation will result in an overflow.
+ * However, in this instruction an overflow exception doesn't occur and
+ * the result will be as follows:
+ *
+ * Quotient is 0x80000000 (-2147483648), and remainder is 0x00000000 (0).
+ */
+ verify_div1(-0x80000000, -1, -0x80000000, 0);
+
+ return 0;
+}
diff --git a/tests/tcg/mips/mipsr5900/divu1.c b/tests/tcg/mips/mipsr5900/divu1.c
new file mode 100644
index 0000000000..72aeed31de
--- /dev/null
+++ b/tests/tcg/mips/mipsr5900/divu1.c
@@ -0,0 +1,48 @@
+/*
+ * Test R5900-specific DIVU1.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <assert.h>
+
+struct quotient_remainder { uint32_t quotient, remainder; };
+
+static struct quotient_remainder divu1(uint32_t rs, uint32_t rt)
+{
+ uint32_t lo, hi;
+
+ __asm__ __volatile__ (
+ " divu1 $0, %2, %3\n"
+ " mflo1 %0\n"
+ " mfhi1 %1\n"
+ : "=r" (lo), "=r" (hi)
+ : "r" (rs), "r" (rt));
+
+ assert(rs / rt == lo);
+ assert(rs % rt == hi);
+
+ return (struct quotient_remainder) { .quotient = lo, .remainder = hi };
+}
+
+static void verify_divu1(uint32_t rs, uint32_t rt,
+ uint32_t expected_quotient,
+ uint32_t expected_remainder)
+{
+ struct quotient_remainder qr = divu1(rs, rt);
+
+ assert(qr.quotient == expected_quotient);
+ assert(qr.remainder == expected_remainder);
+}
+
+int main()
+{
+ verify_divu1(0, 1, 0, 0);
+ verify_divu1(1, 1, 1, 0);
+ verify_divu1(1, 2, 0, 1);
+ verify_divu1(17, 19, 0, 17);
+ verify_divu1(19, 17, 1, 2);
+ verify_divu1(77773, 101, 770, 3);
+
+ return 0;
+}
diff --git a/tests/tcg/mips/mipsr5900/mflohi1.c b/tests/tcg/mips/mipsr5900/mflohi1.c
new file mode 100644
index 0000000000..eed3683dc5
--- /dev/null
+++ b/tests/tcg/mips/mipsr5900/mflohi1.c
@@ -0,0 +1,35 @@
+/*
+ * Test R5900-specific MFLO1 and MFHI1.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <assert.h>
+
+int main()
+{
+ int32_t rs = 12207031, rt = 305175781;
+ int32_t rs1 = 32452867, rt1 = 49979687;
+ int64_t lo, hi, lo1, hi1;
+ int64_t r, r1;
+
+ /* Test both LO/HI and LO1/HI1 to verify separation. */
+ __asm__ __volatile__ (
+ " mult $0, %4, %5\n"
+ " mult1 $0, %6, %7\n"
+ " mflo %0\n"
+ " mfhi %1\n"
+ " mflo1 %2\n"
+ " mfhi1 %3\n"
+ : "=r" (lo), "=r" (hi),
+ "=r" (lo1), "=r" (hi1)
+ : "r" (rs), "r" (rt),
+ "r" (rs1), "r" (rt1));
+ r = ((int64_t)hi << 32) | (uint32_t)lo;
+ r1 = ((int64_t)hi1 << 32) | (uint32_t)lo1;
+
+ assert(r == 3725290219116211);
+ assert(r1 == 1621984134912629);
+
+ return 0;
+}
diff --git a/tests/tcg/mips/mipsr5900/mtlohi1.c b/tests/tcg/mips/mipsr5900/mtlohi1.c
new file mode 100644
index 0000000000..7f3e72835a
--- /dev/null
+++ b/tests/tcg/mips/mipsr5900/mtlohi1.c
@@ -0,0 +1,40 @@
+/*
+ * Test R5900-specific MTLO1 and MTHI1.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <assert.h>
+
+int main()
+{
+ int32_t tlo = 12207031, thi = 305175781;
+ int32_t tlo1 = 32452867, thi1 = 49979687;
+ int32_t flo, fhi, flo1, fhi1;
+
+ /* Test both LO/HI and LO1/HI1 to verify separation. */
+ __asm__ __volatile__ (
+ " mtlo %4\n"
+ " mthi %5\n"
+ " mtlo1 %6\n"
+ " mthi1 %7\n"
+ " move %0, $0\n"
+ " move %1, $0\n"
+ " move %2, $0\n"
+ " move %3, $0\n"
+ " mflo %0\n"
+ " mfhi %1\n"
+ " mflo1 %2\n"
+ " mfhi1 %3\n"
+ : "=r" (flo), "=r" (fhi),
+ "=r" (flo1), "=r" (fhi1)
+ : "r" (tlo), "r" (thi),
+ "r" (tlo1), "r" (thi1));
+
+ assert(flo == 12207031);
+ assert(fhi == 305175781);
+ assert(flo1 == 32452867);
+ assert(fhi1 == 49979687);
+
+ return 0;
+}
diff --git a/tests/tcg/mips/mipsr5900/mult.c b/tests/tcg/mips/mipsr5900/mult.c
new file mode 100644
index 0000000000..5710b395e6
--- /dev/null
+++ b/tests/tcg/mips/mipsr5900/mult.c
@@ -0,0 +1,76 @@
+/*
+ * Test R5900-specific three-operand MULT and MULT1.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <assert.h>
+
+static int64_t mult(int32_t rs, int32_t rt)
+{
+ int32_t rd, lo, hi;
+ int64_t r;
+
+ __asm__ __volatile__ (
+ " mult %0, %3, %4\n"
+ " mflo %1\n"
+ " mfhi %2\n"
+ : "=r" (rd), "=r" (lo), "=r" (hi)
+ : "r" (rs), "r" (rt));
+ r = ((int64_t)hi << 32) | (uint32_t)lo;
+
+ assert((int64_t)rs * rt == r);
+ assert(rd == lo);
+
+ return r;
+}
+
+static int64_t mult1(int32_t rs, int32_t rt)
+{
+ int32_t rd, lo, hi;
+ int64_t r;
+
+ __asm__ __volatile__ (
+ " mult1 %0, %3, %4\n"
+ " mflo1 %1\n"
+ " mfhi1 %2\n"
+ : "=r" (rd), "=r" (lo), "=r" (hi)
+ : "r" (rs), "r" (rt));
+ r = ((int64_t)hi << 32) | (uint32_t)lo;
+
+ assert((int64_t)rs * rt == r);
+ assert(rd == lo);
+
+ return r;
+}
+
+static int64_t mult_variants(int32_t rs, int32_t rt)
+{
+ int64_t rd = mult(rs, rt);
+ int64_t rd1 = mult1(rs, rt);
+
+ assert(rd == rd1);
+
+ return rd;
+}
+
+static void verify_mult_negations(int32_t rs, int32_t rt, int64_t expected)
+{
+ assert(mult_variants(rs, rt) == expected);
+ assert(mult_variants(-rs, rt) == -expected);
+ assert(mult_variants(rs, -rt) == -expected);
+ assert(mult_variants(-rs, -rt) == expected);
+}
+
+int main()
+{
+ verify_mult_negations(17, 19, 323);
+ verify_mult_negations(77773, 99991, 7776600043);
+ verify_mult_negations(12207031, 305175781, 3725290219116211);
+
+ assert(mult_variants(-0x80000000, 0x7FFFFFFF) == -0x3FFFFFFF80000000);
+ assert(mult_variants(-0x80000000, -0x7FFFFFFF) == 0x3FFFFFFF80000000);
+ assert(mult_variants(-0x80000000, -0x80000000) == 0x4000000000000000);
+
+ return 0;
+}
diff --git a/tests/tcg/mips/mipsr5900/multu.c b/tests/tcg/mips/mipsr5900/multu.c
new file mode 100644
index 0000000000..f043904d69
--- /dev/null
+++ b/tests/tcg/mips/mipsr5900/multu.c
@@ -0,0 +1,68 @@
+/*
+ * Test R5900-specific three-operand MULTU and MULTU1.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <assert.h>
+
+static uint64_t multu(uint32_t rs, uint32_t rt)
+{
+ uint32_t rd, lo, hi;
+ uint64_t r;
+
+ __asm__ __volatile__ (
+ " multu %0, %3, %4\n"
+ " mflo %1\n"
+ " mfhi %2\n"
+ : "=r" (rd), "=r" (lo), "=r" (hi)
+ : "r" (rs), "r" (rt));
+ r = ((uint64_t)hi << 32) | (uint32_t)lo;
+
+ assert((uint64_t)rs * rt == r);
+ assert(rd == lo);
+
+ return r;
+}
+
+static uint64_t multu1(uint32_t rs, uint32_t rt)
+{
+ uint32_t rd, lo, hi;
+ uint64_t r;
+
+ __asm__ __volatile__ (
+ " multu1 %0, %3, %4\n"
+ " mflo1 %1\n"
+ " mfhi1 %2\n"
+ : "=r" (rd), "=r" (lo), "=r" (hi)
+ : "r" (rs), "r" (rt));
+ r = ((uint64_t)hi << 32) | (uint32_t)lo;
+
+ assert((uint64_t)rs * rt == r);
+ assert(rd == lo);
+
+ return r;
+}
+
+static uint64_t multu_variants(uint32_t rs, uint32_t rt)
+{
+ uint64_t rd = multu(rs, rt);
+ uint64_t rd1 = multu1(rs, rt);
+
+ assert(rd == rd1);
+
+ return rd;
+}
+
+int main()
+{
+ assert(multu_variants(17, 19) == 323);
+ assert(multu_variants(77773, 99991) == 7776600043);
+ assert(multu_variants(12207031, 305175781) == 3725290219116211);
+
+ assert(multu_variants(0x80000000U, 0x7FFFFFFF) == 0x3FFFFFFF80000000);
+ assert(multu_variants(0x80000000U, 0x80000000U) == 0x4000000000000000);
+ assert(multu_variants(0xFFFFFFFFU, 0xFFFFFFFFU) == 0xFFFFFFFE00000001U);
+
+ return 0;
+}
diff --git a/tests/test-crypto-xts.c b/tests/test-crypto-xts.c
index 1f1412c45a..6fb61cf635 100644
--- a/tests/test-crypto-xts.c
+++ b/tests/test-crypto-xts.c
@@ -1,7 +1,7 @@
/*
* QEMU Crypto XTS cipher mode
*
- * Copyright (c) 2015-2016 Red Hat, Inc.
+ * Copyright (c) 2015-2018 Red Hat, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -340,70 +340,161 @@ static void test_xts_aes_decrypt(const void *ctx,
static void test_xts(const void *opaque)
{
const QCryptoXTSTestData *data = opaque;
- unsigned char out[512], Torg[16], T[16];
+ uint8_t out[512], Torg[16], T[16];
uint64_t seq;
- int j;
- unsigned long len;
struct TestAES aesdata;
struct TestAES aestweak;
- for (j = 0; j < 2; j++) {
- /* skip the cases where
- * the length is smaller than 2*blocklen
- * or the length is not a multiple of 32
- */
- if ((j == 1) && ((data->PTLEN < 32) || (data->PTLEN % 32))) {
- continue;
- }
- len = data->PTLEN / 2;
-
- AES_set_encrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.enc);
- AES_set_decrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.dec);
- AES_set_encrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.enc);
- AES_set_decrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.dec);
-
- seq = data->seqnum;
- STORE64L(seq, Torg);
- memset(Torg + 8, 0, 8);
-
- memcpy(T, Torg, sizeof(T));
- if (j == 0) {
- xts_encrypt(&aesdata, &aestweak,
- test_xts_aes_encrypt,
- test_xts_aes_decrypt,
- T, data->PTLEN, out, data->PTX);
- } else {
- xts_encrypt(&aesdata, &aestweak,
- test_xts_aes_encrypt,
- test_xts_aes_decrypt,
- T, len, out, data->PTX);
- xts_encrypt(&aesdata, &aestweak,
- test_xts_aes_encrypt,
- test_xts_aes_decrypt,
- T, len, &out[len], &data->PTX[len]);
- }
+ AES_set_encrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.enc);
+ AES_set_decrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.dec);
+ AES_set_encrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.enc);
+ AES_set_decrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.dec);
- g_assert(memcmp(out, data->CTX, data->PTLEN) == 0);
-
- memcpy(T, Torg, sizeof(T));
- if (j == 0) {
- xts_decrypt(&aesdata, &aestweak,
- test_xts_aes_encrypt,
- test_xts_aes_decrypt,
- T, data->PTLEN, out, data->CTX);
- } else {
- xts_decrypt(&aesdata, &aestweak,
- test_xts_aes_encrypt,
- test_xts_aes_decrypt,
- T, len, out, data->CTX);
- xts_decrypt(&aesdata, &aestweak,
- test_xts_aes_encrypt,
- test_xts_aes_decrypt,
- T, len, &out[len], &data->CTX[len]);
- }
+ seq = data->seqnum;
+ STORE64L(seq, Torg);
+ memset(Torg + 8, 0, 8);
- g_assert(memcmp(out, data->PTX, data->PTLEN) == 0);
- }
+ memcpy(T, Torg, sizeof(T));
+ xts_encrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T, data->PTLEN, out, data->PTX);
+
+ g_assert(memcmp(out, data->CTX, data->PTLEN) == 0);
+
+ memcpy(T, Torg, sizeof(T));
+ xts_decrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T, data->PTLEN, out, data->CTX);
+
+ g_assert(memcmp(out, data->PTX, data->PTLEN) == 0);
+}
+
+
+static void test_xts_split(const void *opaque)
+{
+ const QCryptoXTSTestData *data = opaque;
+ uint8_t out[512], Torg[16], T[16];
+ uint64_t seq;
+ unsigned long len = data->PTLEN / 2;
+ struct TestAES aesdata;
+ struct TestAES aestweak;
+
+ AES_set_encrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.enc);
+ AES_set_decrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.dec);
+ AES_set_encrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.enc);
+ AES_set_decrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.dec);
+
+ seq = data->seqnum;
+ STORE64L(seq, Torg);
+ memset(Torg + 8, 0, 8);
+
+ memcpy(T, Torg, sizeof(T));
+ xts_encrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T, len, out, data->PTX);
+ xts_encrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T, len, &out[len], &data->PTX[len]);
+
+ g_assert(memcmp(out, data->CTX, data->PTLEN) == 0);
+
+ memcpy(T, Torg, sizeof(T));
+ xts_decrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T, len, out, data->CTX);
+ xts_decrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T, len, &out[len], &data->CTX[len]);
+
+ g_assert(memcmp(out, data->PTX, data->PTLEN) == 0);
+}
+
+
+static void test_xts_unaligned(const void *opaque)
+{
+#define BAD_ALIGN 3
+ const QCryptoXTSTestData *data = opaque;
+ uint8_t in[512 + BAD_ALIGN], out[512 + BAD_ALIGN];
+ uint8_t Torg[16], T[16 + BAD_ALIGN];
+ uint64_t seq;
+ struct TestAES aesdata;
+ struct TestAES aestweak;
+
+ AES_set_encrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.enc);
+ AES_set_decrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.dec);
+ AES_set_encrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.enc);
+ AES_set_decrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.dec);
+
+ seq = data->seqnum;
+ STORE64L(seq, Torg);
+ memset(Torg + 8, 0, 8);
+
+ /* IV not aligned */
+ memcpy(T + BAD_ALIGN, Torg, 16);
+ memcpy(in, data->PTX, data->PTLEN);
+ xts_encrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T + BAD_ALIGN, data->PTLEN, out, in);
+
+ g_assert(memcmp(out, data->CTX, data->PTLEN) == 0);
+
+ /* plain text not aligned */
+ memcpy(T, Torg, 16);
+ memcpy(in + BAD_ALIGN, data->PTX, data->PTLEN);
+ xts_encrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T, data->PTLEN, out, in + BAD_ALIGN);
+
+ g_assert(memcmp(out, data->CTX, data->PTLEN) == 0);
+
+ /* cipher text not aligned */
+ memcpy(T, Torg, 16);
+ memcpy(in, data->PTX, data->PTLEN);
+ xts_encrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T, data->PTLEN, out + BAD_ALIGN, in);
+
+ g_assert(memcmp(out + BAD_ALIGN, data->CTX, data->PTLEN) == 0);
+
+
+ /* IV not aligned */
+ memcpy(T + BAD_ALIGN, Torg, 16);
+ memcpy(in, data->CTX, data->PTLEN);
+ xts_decrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T + BAD_ALIGN, data->PTLEN, out, in);
+
+ g_assert(memcmp(out, data->PTX, data->PTLEN) == 0);
+
+ /* cipher text not aligned */
+ memcpy(T, Torg, 16);
+ memcpy(in + BAD_ALIGN, data->CTX, data->PTLEN);
+ xts_decrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T, data->PTLEN, out, in + BAD_ALIGN);
+
+ g_assert(memcmp(out, data->PTX, data->PTLEN) == 0);
+
+ /* plain text not aligned */
+ memcpy(T, Torg, 16);
+ memcpy(in, data->CTX, data->PTLEN);
+ xts_decrypt(&aesdata, &aestweak,
+ test_xts_aes_encrypt,
+ test_xts_aes_decrypt,
+ T, data->PTLEN, out + BAD_ALIGN, in);
+
+ g_assert(memcmp(out + BAD_ALIGN, data->PTX, data->PTLEN) == 0);
}
@@ -416,7 +507,22 @@ int main(int argc, char **argv)
g_assert(qcrypto_init(NULL) == 0);
for (i = 0; i < G_N_ELEMENTS(test_data); i++) {
- g_test_add_data_func(test_data[i].path, &test_data[i], test_xts);
+ gchar *path = g_strdup_printf("%s/basic", test_data[i].path);
+ g_test_add_data_func(path, &test_data[i], test_xts);
+ g_free(path);
+
+ /* skip the cases where the length is smaller than 2*blocklen
+ * or the length is not a multiple of 32
+ */
+ if ((test_data[i].PTLEN >= 32) && !(test_data[i].PTLEN % 32)) {
+ path = g_strdup_printf("%s/split", test_data[i].path);
+ g_test_add_data_func(path, &test_data[i], test_xts_split);
+ g_free(path);
+ }
+
+ path = g_strdup_printf("%s/unaligned", test_data[i].path);
+ g_test_add_data_func(path, &test_data[i], test_xts_unaligned);
+ g_free(path);
}
return g_test_run();