diff options
92 files changed, 4564 insertions, 1402 deletions
diff --git a/.gitlab-ci.d/cirrus/freebsd-12.vars b/.gitlab-ci.d/cirrus/freebsd-12.vars index b4842271b2..f59263731f 100644 --- a/.gitlab-ci.d/cirrus/freebsd-12.vars +++ b/.gitlab-ci.d/cirrus/freebsd-12.vars @@ -1,4 +1,5 @@ # THIS FILE WAS AUTO-GENERATED +# ... and then edited to fix py39, pending proper lcitool update. # # $ lcitool variables freebsd-12 qemu # @@ -11,6 +12,6 @@ MAKE='/usr/local/bin/gmake' NINJA='/usr/local/bin/ninja' PACKAGING_COMMAND='pkg' PIP3='/usr/local/bin/pip-3.8' -PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils dtc fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py38-numpy py38-pillow py38-pip py38-sphinx py38-sphinx_rtd_theme py38-virtualenv py38-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd' +PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils dtc fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-virtualenv py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd' PYPI_PKGS='' PYTHON='/usr/local/bin/python3' diff --git a/.gitlab-ci.d/cirrus/freebsd-13.vars b/.gitlab-ci.d/cirrus/freebsd-13.vars index 546a82dd75..40fc961398 100644 --- a/.gitlab-ci.d/cirrus/freebsd-13.vars +++ b/.gitlab-ci.d/cirrus/freebsd-13.vars @@ -1,4 +1,5 @@ # THIS FILE WAS AUTO-GENERATED +# ... and then edited to fix py39, pending proper lcitool update. # # $ lcitool variables freebsd-13 qemu # @@ -11,6 +12,6 @@ MAKE='/usr/local/bin/gmake' NINJA='/usr/local/bin/ninja' PACKAGING_COMMAND='pkg' PIP3='/usr/local/bin/pip-3.8' -PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils dtc fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py38-numpy py38-pillow py38-pip py38-sphinx py38-sphinx_rtd_theme py38-virtualenv py38-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd' +PKGS='alsa-lib bash bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage ctags curl cyrus-sasl dbus diffutils dtc fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 libepoxy libffi libgcrypt libjpeg-turbo libnfs libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv perl5 pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-virtualenv py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy spice-protocol tesseract texinfo usbredir virglrenderer vte3 zstd' PYPI_PKGS='' PYTHON='/usr/local/bin/python3' diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index f90f4312ea..a46f3a654d 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -2248,7 +2248,7 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); uintptr_t index, index2; CPUTLBEntry *entry, *entry2; - target_ulong page2, tlb_addr, tlb_addr2; + target_ulong page1, page2, tlb_addr, tlb_addr2; MemOpIdx oi; size_t size2; int i; @@ -2256,15 +2256,17 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, /* * Ensure the second page is in the TLB. Note that the first page * is already guaranteed to be filled, and that the second page - * cannot evict the first. + * cannot evict the first. An exception to this rule is PAGE_WRITE_INV + * handling: the first page could have evicted itself. */ + page1 = addr & TARGET_PAGE_MASK; page2 = (addr + size) & TARGET_PAGE_MASK; size2 = (addr + size) & ~TARGET_PAGE_MASK; index2 = tlb_index(env, mmu_idx, page2); entry2 = tlb_entry(env, mmu_idx, page2); tlb_addr2 = tlb_addr_write(entry2); - if (!tlb_hit_page(tlb_addr2, page2)) { + if (page1 != page2 && !tlb_hit_page(tlb_addr2, page2)) { if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, mmu_idx, retaddr); diff --git a/block/io_uring.c b/block/io_uring.c index d48e472e74..f8a19fd97f 100644 --- a/block/io_uring.c +++ b/block/io_uring.c @@ -73,12 +73,8 @@ static void luring_resubmit(LuringState *s, LuringAIOCB *luringcb) /** * luring_resubmit_short_read: * - * Before Linux commit 9d93a3f5a0c ("io_uring: punt short reads to async - * context") a buffered I/O request with the start of the file range in the - * page cache could result in a short read. Applications need to resubmit the - * remaining read request. - * - * This is a slow path but recent kernels never take it. + * Short reads are rare but may occur. The remaining read request needs to be + * resubmitted. */ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, int nread) @@ -89,7 +85,7 @@ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, trace_luring_resubmit_short_read(s, luringcb, nread); /* Update read position */ - luringcb->total_read = nread; + luringcb->total_read += nread; remaining = luringcb->qiov->size - luringcb->total_read; /* Shorten qiov */ @@ -103,7 +99,7 @@ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, remaining); /* Update sqe */ - luringcb->sqeq.off = nread; + luringcb->sqeq.off += nread; luringcb->sqeq.addr = (__u64)(uintptr_t)luringcb->resubmit_qiov.iov; luringcb->sqeq.len = luringcb->resubmit_qiov.niov; diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index 19a91b575f..7ee26626d5 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -225,6 +225,14 @@ Use the more generic event ``DEVICE_UNPLUG_GUEST_ERROR`` instead. System emulator machines ------------------------ +Arm ``virt`` machine ``dtb-kaslr-seed`` property +'''''''''''''''''''''''''''''''''''''''''''''''' + +The ``dtb-kaslr-seed`` property on the ``virt`` board has been +deprecated; use the new name ``dtb-randomness`` instead. The new name +better reflects the way this property affects all random data within +the device tree blob, not just the ``kaslr-seed`` node. + PPC 405 ``taihu`` machine (since 7.0) ''''''''''''''''''''''''''''''''''''' diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 83b4410065..8e494c8bea 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -65,6 +65,10 @@ the following architecture extensions: - FEAT_SHA512 (Advanced SIMD SHA512 instructions) - FEAT_SM3 (Advanced SIMD SM3 instructions) - FEAT_SM4 (Advanced SIMD SM4 instructions) +- FEAT_SME (Scalable Matrix Extension) +- FEAT_SME_FA64 (Full A64 instruction set in Streaming SVE mode) +- FEAT_SME_F64F64 (Double-precision floating-point outer product instructions) +- FEAT_SME_I16I64 (16-bit to 64-bit integer widening outer product instructions) - FEAT_SPECRES (Speculation restriction instructions) - FEAT_SSBS (Speculative Store Bypass Safe) - FEAT_TLBIOS (TLB invalidate instructions in Outer Shareable domain) diff --git a/docs/system/arm/virt.rst b/docs/system/arm/virt.rst index 3d1058a80c..3b6ba69a9a 100644 --- a/docs/system/arm/virt.rst +++ b/docs/system/arm/virt.rst @@ -126,13 +126,18 @@ ras Set ``on``/``off`` to enable/disable reporting host memory errors to a guest using ACPI and guest external abort exceptions. The default is off. +dtb-randomness + Set ``on``/``off`` to pass random seeds via the guest DTB + rng-seed and kaslr-seed nodes (in both "/chosen" and + "/secure-chosen") to use for features like the random number + generator and address space randomisation. The default is + ``on``. You will want to disable it if your trusted boot chain + will verify the DTB it is passed, since this option causes the + DTB to be non-deterministic. It would be the responsibility of + the firmware to come up with a seed and pass it on if it wants to. + dtb-kaslr-seed - Set ``on``/``off`` to pass a random seed via the guest dtb - kaslr-seed node (in both "/chosen" and /secure-chosen) to use - for features like address space randomisation. The default is - ``on``. You will want to disable it if your trusted boot chain will - verify the DTB it is passed. It would be the responsibility of the - firmware to come up with a seed and pass it on if it wants to. + A deprecated synonym for dtb-randomness. Linux guest kernel configuration """""""""""""""""""""""""""""""" diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 5502aa60c8..9633f822f3 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -221,14 +221,18 @@ static bool cpu_type_valid(const char *cpu) return false; } -static void create_kaslr_seed(MachineState *ms, const char *node) +static void create_randomness(MachineState *ms, const char *node) { - uint64_t seed; + struct { + uint64_t kaslr; + uint8_t rng[32]; + } seed; if (qemu_guest_getrandom(&seed, sizeof(seed), NULL)) { return; } - qemu_fdt_setprop_u64(ms->fdt, node, "kaslr-seed", seed); + qemu_fdt_setprop_u64(ms->fdt, node, "kaslr-seed", seed.kaslr); + qemu_fdt_setprop(ms->fdt, node, "rng-seed", seed.rng, sizeof(seed.rng)); } static void create_fdt(VirtMachineState *vms) @@ -251,14 +255,14 @@ static void create_fdt(VirtMachineState *vms) /* /chosen must exist for load_dtb to fill in necessary properties later */ qemu_fdt_add_subnode(fdt, "/chosen"); - if (vms->dtb_kaslr_seed) { - create_kaslr_seed(ms, "/chosen"); + if (vms->dtb_randomness) { + create_randomness(ms, "/chosen"); } if (vms->secure) { qemu_fdt_add_subnode(fdt, "/secure-chosen"); - if (vms->dtb_kaslr_seed) { - create_kaslr_seed(ms, "/secure-chosen"); + if (vms->dtb_randomness) { + create_randomness(ms, "/secure-chosen"); } } @@ -2340,18 +2344,18 @@ static void virt_set_its(Object *obj, bool value, Error **errp) vms->its = value; } -static bool virt_get_dtb_kaslr_seed(Object *obj, Error **errp) +static bool virt_get_dtb_randomness(Object *obj, Error **errp) { VirtMachineState *vms = VIRT_MACHINE(obj); - return vms->dtb_kaslr_seed; + return vms->dtb_randomness; } -static void virt_set_dtb_kaslr_seed(Object *obj, bool value, Error **errp) +static void virt_set_dtb_randomness(Object *obj, bool value, Error **errp) { VirtMachineState *vms = VIRT_MACHINE(obj); - vms->dtb_kaslr_seed = value; + vms->dtb_randomness = value; } static char *virt_get_oem_id(Object *obj, Error **errp) @@ -2980,12 +2984,18 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) "Set on/off to enable/disable " "ITS instantiation"); + object_class_property_add_bool(oc, "dtb-randomness", + virt_get_dtb_randomness, + virt_set_dtb_randomness); + object_class_property_set_description(oc, "dtb-randomness", + "Set off to disable passing random or " + "non-deterministic dtb nodes to guest"); + object_class_property_add_bool(oc, "dtb-kaslr-seed", - virt_get_dtb_kaslr_seed, - virt_set_dtb_kaslr_seed); + virt_get_dtb_randomness, + virt_set_dtb_randomness); object_class_property_set_description(oc, "dtb-kaslr-seed", - "Set off to disable passing of kaslr-seed " - "dtb node to guest"); + "Deprecated synonym of dtb-randomness"); object_class_property_add_str(oc, "x-oem-id", virt_get_oem_id, @@ -3053,8 +3063,8 @@ static void virt_instance_init(Object *obj) /* MTE is disabled by default. */ vms->mte = false; - /* Supply a kaslr-seed by default */ - vms->dtb_kaslr_seed = true; + /* Supply kaslr-seed and rng-seed by default */ + vms->dtb_randomness = true; vms->irqmap = a15irqmap; diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c index 1ce1d7b07d..c7b75ed12e 100644 --- a/hw/intc/pnv_xive.c +++ b/hw/intc/pnv_xive.c @@ -67,26 +67,6 @@ static const XiveVstInfo vst_infos[] = { (xive)->chip->chip_id, ## __VA_ARGS__); /* - * QEMU version of the GETFIELD/SETFIELD macros - * - * TODO: It might be better to use the existing extract64() and - * deposit64() but this means that all the register definitions will - * change and become incompatible with the ones found in skiboot. - * - * Keep it as it is for now until we find a common ground. - */ -static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) -{ - return (word & mask) >> ctz64(mask); -} - -static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, - uint64_t value) -{ - return (word & ~mask) | ((value << ctz64(mask)) & mask); -} - -/* * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID * field overrides the hardwired chip ID in the Powerbus operations * and for CAM compares diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c index f31c53c28d..f22ce5ca59 100644 --- a/hw/intc/pnv_xive2.c +++ b/hw/intc/pnv_xive2.c @@ -76,26 +76,6 @@ static const XiveVstInfo vst_infos[] = { (xive)->chip->chip_id, ## __VA_ARGS__); /* - * QEMU version of the GETFIELD/SETFIELD macros - * - * TODO: It might be better to use the existing extract64() and - * deposit64() but this means that all the register definitions will - * change and become incompatible with the ones found in skiboot. - * - * Keep it as it is for now until we find a common ground. - */ -static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) -{ - return (word & mask) >> ctz64(mask); -} - -static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, - uint64_t value) -{ - return (word & ~mask) | ((value << ctz64(mask)) & mask); -} - -/* * TODO: Document block id override */ static uint32_t pnv_xive2_block_id(PnvXive2 *xive) diff --git a/hw/m68k/bootinfo.h b/hw/m68k/bootinfo.h index adbf0c5521..bd8b212fd3 100644 --- a/hw/m68k/bootinfo.h +++ b/hw/m68k/bootinfo.h @@ -54,6 +54,22 @@ stb_phys(as, base++, string[i]); \ } \ stb_phys(as, base++, 0); \ - base = (parameters_base + 1) & ~1; \ + base = (base + 1) & ~1; \ + } while (0) + +#define BOOTINFODATA(as, base, id, data, len) \ + do { \ + int i; \ + stw_phys(as, base, id); \ + base += 2; \ + stw_phys(as, base, \ + (sizeof(struct bi_record) + len + 3) & ~1); \ + base += 2; \ + stw_phys(as, base, len); \ + base += 2; \ + for (i = 0; i < len; ++i) { \ + stb_phys(as, base++, data[i]); \ + } \ + base = (base + 1) & ~1; \ } while (0) #endif diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c index e215aa3d42..0aa383fa6b 100644 --- a/hw/m68k/virt.c +++ b/hw/m68k/virt.c @@ -9,6 +9,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "qemu/guest-random.h" #include "sysemu/sysemu.h" #include "cpu.h" #include "hw/boards.h" @@ -120,6 +121,7 @@ static void virt_init(MachineState *machine) hwaddr io_base; int i; ResetInfo *reset_info; + uint8_t rng_seed[32]; if (ram_size > 3399672 * KiB) { /* @@ -245,6 +247,11 @@ static void virt_init(MachineState *machine) kernel_cmdline); } + /* Pass seed to RNG. */ + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + BOOTINFODATA(cs->as, parameters_base, BI_VIRT_RNG_SEED, + rng_seed, sizeof(rng_seed)); + /* load initrd */ if (initrd_filename) { initrd_size = get_image_size(initrd_filename); diff --git a/hw/pci-host/pnv_phb3.c b/hw/pci-host/pnv_phb3.c index 26ac9b7123..d58d3c1701 100644 --- a/hw/pci-host/pnv_phb3.c +++ b/hw/pci-host/pnv_phb3.c @@ -1052,7 +1052,8 @@ static void pnv_phb3_realize(DeviceState *dev, Error **errp) pci_setup_iommu(pci->bus, pnv_phb3_dma_iommu, phb); - pnv_phb_attach_root_port(PCI_HOST_BRIDGE(phb), TYPE_PNV_PHB3_ROOT_PORT); + pnv_phb_attach_root_port(pci, TYPE_PNV_PHB3_ROOT_PORT, + phb->phb_id, phb->chip_id); } void pnv_phb3_update_regions(PnvPHB3 *phb) @@ -1129,33 +1130,14 @@ static const TypeInfo pnv_phb3_root_bus_info = { .name = TYPE_PNV_PHB3_ROOT_BUS, .parent = TYPE_PCIE_BUS, .class_init = pnv_phb3_root_bus_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_PCIE_DEVICE }, - { } - }, }; static void pnv_phb3_root_port_realize(DeviceState *dev, Error **errp) { PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); PCIDevice *pci = PCI_DEVICE(dev); - PCIBus *bus = pci_get_bus(pci); - PnvPHB3 *phb = NULL; Error *local_err = NULL; - phb = (PnvPHB3 *) object_dynamic_cast(OBJECT(bus->qbus.parent), - TYPE_PNV_PHB3); - - if (!phb) { - error_setg(errp, -"pnv_phb3_root_port devices must be connected to pnv-phb3 buses"); - return; - } - - /* Set unique chassis/slot values for the root port */ - qdev_prop_set_uint8(&pci->qdev, "chassis", phb->chip_id); - qdev_prop_set_uint16(&pci->qdev, "slot", phb->phb_id); - rpc->parent_realize(dev, &local_err); if (local_err) { error_propagate(errp, local_err); diff --git a/hw/pci-host/pnv_phb4.c b/hw/pci-host/pnv_phb4.c index 6594016121..67ddde4a6e 100644 --- a/hw/pci-host/pnv_phb4.c +++ b/hw/pci-host/pnv_phb4.c @@ -31,22 +31,6 @@ qemu_log_mask(LOG_GUEST_ERROR, "phb4_pec[%d:%d]: " fmt "\n", \ (pec)->chip_id, (pec)->index, ## __VA_ARGS__) -/* - * QEMU version of the GETFIELD/SETFIELD macros - * - * These are common with the PnvXive model. - */ -static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) -{ - return (word & mask) >> ctz64(mask); -} - -static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, - uint64_t value) -{ - return (word & ~mask) | ((value << ctz64(mask)) & mask); -} - static PCIDevice *pnv_phb4_find_cfg_dev(PnvPHB4 *phb) { PCIHostState *pci = PCI_HOST_BRIDGE(phb); @@ -1547,6 +1531,7 @@ static void pnv_phb4_instance_init(Object *obj) static void pnv_phb4_realize(DeviceState *dev, Error **errp) { PnvPHB4 *phb = PNV_PHB4(dev); + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(phb->pec); PCIHostState *pci = PCI_HOST_BRIDGE(dev); XiveSource *xsrc = &phb->xsrc; int nr_irqs; @@ -1583,6 +1568,10 @@ static void pnv_phb4_realize(DeviceState *dev, Error **errp) pci_setup_iommu(pci->bus, pnv_phb4_dma_iommu, phb); pci->bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; + /* Add a single Root port if running with defaults */ + pnv_phb_attach_root_port(pci, pecc->rp_model, + phb->phb_id, phb->chip_id); + /* Setup XIVE Source */ if (phb->big_phb) { nr_irqs = PNV_PHB4_MAX_INTs; @@ -1747,10 +1736,6 @@ static const TypeInfo pnv_phb4_root_bus_info = { .name = TYPE_PNV_PHB4_ROOT_BUS, .parent = TYPE_PCIE_BUS, .class_init = pnv_phb4_root_bus_class_init, - .interfaces = (InterfaceInfo[]) { - { INTERFACE_PCIE_DEVICE }, - { } - }, }; static void pnv_phb4_root_port_reset(DeviceState *dev) @@ -1777,23 +1762,8 @@ static void pnv_phb4_root_port_reset(DeviceState *dev) static void pnv_phb4_root_port_realize(DeviceState *dev, Error **errp) { PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); - PCIDevice *pci = PCI_DEVICE(dev); - PCIBus *bus = pci_get_bus(pci); - PnvPHB4 *phb = NULL; Error *local_err = NULL; - phb = (PnvPHB4 *) object_dynamic_cast(OBJECT(bus->qbus.parent), - TYPE_PNV_PHB4); - - if (!phb) { - error_setg(errp, "%s must be connected to pnv-phb4 buses", dev->id); - return; - } - - /* Set unique chassis/slot values for the root port */ - qdev_prop_set_uint8(&pci->qdev, "chassis", phb->chip_id); - qdev_prop_set_uint16(&pci->qdev, "slot", phb->phb_id); - rpc->parent_realize(dev, &local_err); if (local_err) { error_propagate(errp, local_err); diff --git a/hw/pci-host/pnv_phb4_pec.c b/hw/pci-host/pnv_phb4_pec.c index 8b7e823fa5..c9aaf1c28e 100644 --- a/hw/pci-host/pnv_phb4_pec.c +++ b/hw/pci-host/pnv_phb4_pec.c @@ -130,9 +130,6 @@ static void pnv_pec_default_phb_realize(PnvPhb4PecState *pec, if (!sysbus_realize(SYS_BUS_DEVICE(phb), errp)) { return; } - - /* Add a single Root port if running with defaults */ - pnv_phb_attach_root_port(PCI_HOST_BRIDGE(phb), pecc->rp_model); } static void pnv_pec_realize(DeviceState *dev, Error **errp) diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 7c08a78d6c..d3f77c8367 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -652,25 +652,19 @@ static ISABus *pnv_isa_create(PnvChip *chip, Error **errp) return PNV_CHIP_GET_CLASS(chip)->isa_create(chip, errp); } -static int pnv_chip_power8_pic_print_info_child(Object *child, void *opaque) -{ - Monitor *mon = opaque; - PnvPHB3 *phb3 = (PnvPHB3 *) object_dynamic_cast(child, TYPE_PNV_PHB3); - - if (phb3) { - pnv_phb3_msi_pic_print_info(&phb3->msis, mon); - ics_pic_print_info(&phb3->lsis, mon); - } - return 0; -} - static void pnv_chip_power8_pic_print_info(PnvChip *chip, Monitor *mon) { Pnv8Chip *chip8 = PNV8_CHIP(chip); + int i; ics_pic_print_info(&chip8->psi.ics, mon); - object_child_foreach(OBJECT(chip), - pnv_chip_power8_pic_print_info_child, mon); + + for (i = 0; i < chip8->num_phbs; i++) { + PnvPHB3 *phb3 = &chip8->phbs[i]; + + pnv_phb3_msi_pic_print_info(&phb3->msis, mon); + ics_pic_print_info(&phb3->lsis, mon); + } } static int pnv_chip_power9_pic_print_info_child(Object *child, void *opaque) @@ -1189,10 +1183,26 @@ static void pnv_chip_icp_realize(Pnv8Chip *chip8, Error **errp) } } -/* Attach a root port device */ -void pnv_phb_attach_root_port(PCIHostState *pci, const char *name) +/* + * Attach a root port device. + * + * 'index' will be used both as a PCIE slot value and to calculate + * QOM id. 'chip_id' is going to be used as PCIE chassis for the + * root port. + */ +void pnv_phb_attach_root_port(PCIHostState *pci, const char *name, + int index, int chip_id) { PCIDevice *root = pci_new(PCI_DEVFN(0, 0), name); + g_autofree char *default_id = g_strdup_printf("%s[%d]", name, index); + const char *dev_id = DEVICE(root)->id; + + object_property_add_child(OBJECT(pci->bus), dev_id ? dev_id : default_id, + OBJECT(root)); + + /* Set unique chassis/slot values for the root port */ + qdev_prop_set_uint8(DEVICE(root), "chassis", chip_id); + qdev_prop_set_uint16(DEVICE(root), "slot", index); pci_realize_and_unref(root, pci->bus, &error_fatal); } @@ -1934,44 +1944,28 @@ PowerPCCPU *pnv_chip_find_cpu(PnvChip *chip, uint32_t pir) return NULL; } -typedef struct ForeachPhb3Args { - int irq; - ICSState *ics; -} ForeachPhb3Args; - -static int pnv_ics_get_child(Object *child, void *opaque) -{ - ForeachPhb3Args *args = opaque; - PnvPHB3 *phb3 = (PnvPHB3 *) object_dynamic_cast(child, TYPE_PNV_PHB3); - - if (phb3) { - if (ics_valid_irq(&phb3->lsis, args->irq)) { - args->ics = &phb3->lsis; - } - if (ics_valid_irq(ICS(&phb3->msis), args->irq)) { - args->ics = ICS(&phb3->msis); - } - } - return args->ics ? 1 : 0; -} - static ICSState *pnv_ics_get(XICSFabric *xi, int irq) { PnvMachineState *pnv = PNV_MACHINE(xi); - ForeachPhb3Args args = { irq, NULL }; - int i; + int i, j; for (i = 0; i < pnv->num_chips; i++) { - PnvChip *chip = pnv->chips[i]; Pnv8Chip *chip8 = PNV8_CHIP(pnv->chips[i]); if (ics_valid_irq(&chip8->psi.ics, irq)) { return &chip8->psi.ics; } - object_child_foreach(OBJECT(chip), pnv_ics_get_child, &args); - if (args.ics) { - return args.ics; + for (j = 0; j < chip8->num_phbs; j++) { + PnvPHB3 *phb3 = &chip8->phbs[j]; + + if (ics_valid_irq(&phb3->lsis, irq)) { + return &phb3->lsis; + } + + if (ics_valid_irq(ICS(&phb3->msis), irq)) { + return ICS(&phb3->msis); + } } } return NULL; @@ -1990,28 +1984,22 @@ PnvChip *pnv_get_chip(PnvMachineState *pnv, uint32_t chip_id) return NULL; } -static int pnv_ics_resend_child(Object *child, void *opaque) -{ - PnvPHB3 *phb3 = (PnvPHB3 *) object_dynamic_cast(child, TYPE_PNV_PHB3); - - if (phb3) { - ics_resend(&phb3->lsis); - ics_resend(ICS(&phb3->msis)); - } - return 0; -} - static void pnv_ics_resend(XICSFabric *xi) { PnvMachineState *pnv = PNV_MACHINE(xi); - int i; + int i, j; for (i = 0; i < pnv->num_chips; i++) { - PnvChip *chip = pnv->chips[i]; Pnv8Chip *chip8 = PNV8_CHIP(pnv->chips[i]); ics_resend(&chip8->psi.ics); - object_child_foreach(OBJECT(chip), pnv_ics_resend_child, NULL); + + for (j = 0; j < chip8->num_phbs; j++) { + PnvPHB3 *phb3 = &chip8->phbs[j]; + + ics_resend(&phb3->lsis); + ics_resend(ICS(&phb3->msis)); + } } } diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index fd4942e881..9a5382d527 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -898,6 +898,8 @@ static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt) add_str(hypertas, "hcall-hpt-resize"); } + add_str(hypertas, "hcall-watchdog"); + _FDT(fdt_setprop(fdt, rtas, "ibm,hypertas-functions", hypertas->str, hypertas->len)); g_string_free(hypertas, TRUE); @@ -3051,6 +3053,8 @@ static void spapr_machine_init(MachineState *machine) spapr->vof->fw_size = fw_size; /* for claim() on itself */ spapr_register_hypercall(KVMPPC_H_VOF_CLIENT, spapr_h_vof_client); } + + spapr_watchdog_init(spapr); } #define DEFAULT_KVM_TYPE "auto" diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index 81e5a1aea3..63e34d457a 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -279,7 +279,7 @@ static const VMStateDescription vmstate_spapr_tce_table_ex = { static const VMStateDescription vmstate_spapr_tce_table = { .name = "spapr_iommu", - .version_id = 2, + .version_id = 3, .minimum_version_id = 2, .pre_save = spapr_tce_table_pre_save, .post_load = spapr_tce_table_post_load, @@ -292,6 +292,7 @@ static const VMStateDescription vmstate_spapr_tce_table = { VMSTATE_BOOL(bypass, SpaprTceTable), VMSTATE_VARRAY_UINT32_ALLOC(mig_table, SpaprTceTable, mig_nb_table, 0, vmstate_info_uint64, uint64_t), + VMSTATE_BOOL_V(def_win, SpaprTceTable, 3), VMSTATE_END_OF_LIST() }, diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index b2f5fbef0c..67e9d468aa 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -2067,6 +2067,7 @@ void spapr_phb_dma_reset(SpaprPhbState *sphb) tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[0]); spapr_tce_table_enable(tcet, SPAPR_TCE_PAGE_SHIFT, sphb->dma_win_addr, sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT); + tcet->def_win = true; } static void spapr_phb_reset(DeviceState *qdev) @@ -2359,8 +2360,9 @@ int spapr_dt_phb(SpaprMachineState *spapr, SpaprPhbState *phb, cpu_to_be32(RTAS_IBM_REMOVE_PE_DMA_WINDOW) }; uint32_t ddw_extensions[] = { - cpu_to_be32(1), - cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW) + cpu_to_be32(2), + cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW), + cpu_to_be32(1), /* 1: ibm,query-pe-dma-window 6 outputs, PAPR 2.8 */ }; SpaprTceTable *tcet; SpaprDrc *drc; diff --git a/hw/ppc/spapr_rtas_ddw.c b/hw/ppc/spapr_rtas_ddw.c index 13d339c807..7ba11382bc 100644 --- a/hw/ppc/spapr_rtas_ddw.c +++ b/hw/ppc/spapr_rtas_ddw.c @@ -100,7 +100,7 @@ static void rtas_ibm_query_pe_dma_window(PowerPCCPU *cpu, uint64_t buid; uint32_t avail, addr, pgmask = 0; - if ((nargs != 3) || (nret != 5)) { + if ((nargs != 3) || ((nret != 5) && (nret != 6))) { goto param_error_exit; } @@ -118,9 +118,20 @@ static void rtas_ibm_query_pe_dma_window(PowerPCCPU *cpu, rtas_st(rets, 0, RTAS_OUT_SUCCESS); rtas_st(rets, 1, avail); - rtas_st(rets, 2, 0x80000000); /* The largest window we can possibly have */ - rtas_st(rets, 3, pgmask); - rtas_st(rets, 4, 0); /* DMA migration mask, not supported */ + if (nret == 6) { + /* + * Set the Max TCE number as 1<<(58-21) = 0x20.0000.0000 + * 1<<59 is the huge window start and 21 is 2M page shift. + */ + rtas_st(rets, 2, 0x00000020); + rtas_st(rets, 3, 0x00000000); + rtas_st(rets, 4, pgmask); + rtas_st(rets, 5, 0); /* DMA migration mask, not supported */ + } else { + rtas_st(rets, 2, 0x80000000); + rtas_st(rets, 3, pgmask); + rtas_st(rets, 4, 0); /* DMA migration mask, not supported */ + } trace_spapr_iommu_ddw_query(buid, addr, avail, 0x80000000, pgmask); return; @@ -215,6 +226,7 @@ static void rtas_ibm_remove_pe_dma_window(PowerPCCPU *cpu, SpaprPhbState *sphb; SpaprTceTable *tcet; uint32_t liobn; + bool def_win_removed; if ((nargs != 1) || (nret != 1)) { goto param_error_exit; @@ -231,9 +243,23 @@ static void rtas_ibm_remove_pe_dma_window(PowerPCCPU *cpu, goto param_error_exit; } + def_win_removed = tcet->def_win; spapr_tce_table_disable(tcet); trace_spapr_iommu_ddw_remove(liobn); + /* + * PAPR+/LoPAPR says: + * The platform must restore the default DMA window for the PE on a call + * to the ibm,remove-pe-dma-window RTAS call when all of the following + * are true: + * a. The call removes the last DMA window remaining for the PE. + * b. The DMA window being removed is not the default window + */ + if (spapr_phb_get_active_win_num(sphb) == 0 && !def_win_removed) { + spapr_phb_dma_reset(sphb); + trace_spapr_iommu_ddw_reset(sphb->buid, 0); + } + rtas_st(rets, 0, RTAS_OUT_SUCCESS); return; diff --git a/hw/watchdog/meson.build b/hw/watchdog/meson.build index 054c403dea..8974b5cf4c 100644 --- a/hw/watchdog/meson.build +++ b/hw/watchdog/meson.build @@ -6,3 +6,4 @@ softmmu_ss.add(when: 'CONFIG_WDT_DIAG288', if_true: files('wdt_diag288.c')) softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('wdt_aspeed.c')) softmmu_ss.add(when: 'CONFIG_WDT_IMX2', if_true: files('wdt_imx2.c')) softmmu_ss.add(when: 'CONFIG_WDT_SBSA', if_true: files('sbsa_gwdt.c')) +specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_watchdog.c')) diff --git a/hw/watchdog/spapr_watchdog.c b/hw/watchdog/spapr_watchdog.c new file mode 100644 index 0000000000..55ff1f03c1 --- /dev/null +++ b/hw/watchdog/spapr_watchdog.c @@ -0,0 +1,274 @@ +/* + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "target/ppc/cpu.h" +#include "migration/vmstate.h" +#include "trace.h" + +#include "hw/ppc/spapr.h" + +#define FIELD_BE(reg, field, start, len) \ + FIELD(reg, field, 64 - (start + len), len) + +/* + * Bits 47: "leaveOtherWatchdogsRunningOnTimeout", specified on + * the "Start watchdog" operation, + * 0 - stop out-standing watchdogs on timeout, + * 1 - leave outstanding watchdogs running on timeout + */ +FIELD_BE(PSERIES_WDTF, LEAVE_OTHER, 47, 1) + +/* Bits 48-55: "operation" */ +FIELD_BE(PSERIES_WDTF, OP, 48, 8) +#define PSERIES_WDTF_OP_START 0x1 +#define PSERIES_WDTF_OP_STOP 0x2 +#define PSERIES_WDTF_OP_QUERY 0x3 +#define PSERIES_WDTF_OP_QUERY_LPM 0x4 + +/* Bits 56-63: "timeoutAction" */ +FIELD_BE(PSERIES_WDTF, ACTION, 56, 8) +#define PSERIES_WDTF_ACTION_HARD_POWER_OFF 0x1 +#define PSERIES_WDTF_ACTION_HARD_RESTART 0x2 +#define PSERIES_WDTF_ACTION_DUMP_RESTART 0x3 + +FIELD_BE(PSERIES_WDTF, RESERVED, 0, 47) + +/* Special watchdogNumber for the "stop all watchdogs" operation */ +#define PSERIES_WDT_STOP_ALL ((uint64_t)~0) + +/* + * For the "Query watchdog capabilities" operation, a uint64 structure + * defined as: + * Bits 0-15: The minimum supported timeout in milliseconds + * Bits 16-31: The number of watchdogs supported + * Bits 32-63: Reserved + */ +FIELD_BE(PSERIES_WDTQ, MIN_TIMEOUT, 0, 16) +FIELD_BE(PSERIES_WDTQ, NUM, 16, 16) + +/* + * For the "Query watchdog LPM requirement" operation: + * 1 = The given "watchdogNumber" must be stopped prior to suspending + * 2 = The given "watchdogNumber" does not have to be stopped prior to + * suspending + */ +#define PSERIES_WDTQL_STOPPED 1 +#define PSERIES_WDTQL_QUERY_NOT_STOPPED 2 + +#define WDT_MIN_TIMEOUT 1 /* 1ms */ + +static target_ulong watchdog_stop(unsigned watchdogNumber, SpaprWatchdog *w) +{ + target_ulong ret = H_NOOP; + + if (timer_pending(&w->timer)) { + timer_del(&w->timer); + ret = H_SUCCESS; + } + trace_spapr_watchdog_stop(watchdogNumber, ret); + + return ret; +} + +static target_ulong watchdog_stop_all(SpaprMachineState *spapr) +{ + target_ulong ret = H_NOOP; + int i; + + for (i = 1; i <= ARRAY_SIZE(spapr->wds); ++i) { + target_ulong r = watchdog_stop(i, &spapr->wds[i - 1]); + + if (r != H_NOOP && r != H_SUCCESS) { + ret = r; + } + } + + return ret; +} + +static void watchdog_expired(void *pw) +{ + SpaprWatchdog *w = pw; + CPUState *cs; + SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); + unsigned num = w - spapr->wds; + + g_assert(num < ARRAY_SIZE(spapr->wds)); + trace_spapr_watchdog_expired(num, w->action); + switch (w->action) { + case PSERIES_WDTF_ACTION_HARD_POWER_OFF: + qemu_system_vmstop_request(RUN_STATE_SHUTDOWN); + break; + case PSERIES_WDTF_ACTION_HARD_RESTART: + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + break; + case PSERIES_WDTF_ACTION_DUMP_RESTART: + CPU_FOREACH(cs) { + async_run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); + } + break; + } + if (!w->leave_others) { + watchdog_stop_all(spapr); + } +} + +static target_ulong h_watchdog(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + target_ulong ret = H_SUCCESS; + target_ulong flags = args[0]; + target_ulong watchdogNumber = args[1]; /* 1-Based per PAPR */ + target_ulong timeoutInMs = args[2]; + unsigned operation = FIELD_EX64(flags, PSERIES_WDTF, OP); + unsigned timeoutAction = FIELD_EX64(flags, PSERIES_WDTF, ACTION); + SpaprWatchdog *w; + + if (FIELD_EX64(flags, PSERIES_WDTF, RESERVED)) { + return H_PARAMETER; + } + + switch (operation) { + case PSERIES_WDTF_OP_START: + if (watchdogNumber > ARRAY_SIZE(spapr->wds)) { + return H_P2; + } + if (timeoutInMs <= WDT_MIN_TIMEOUT) { + return H_P3; + } + + w = &spapr->wds[watchdogNumber - 1]; + switch (timeoutAction) { + case PSERIES_WDTF_ACTION_HARD_POWER_OFF: + case PSERIES_WDTF_ACTION_HARD_RESTART: + case PSERIES_WDTF_ACTION_DUMP_RESTART: + w->action = timeoutAction; + break; + default: + return H_PARAMETER; + } + w->leave_others = FIELD_EX64(flags, PSERIES_WDTF, LEAVE_OTHER); + timer_mod(&w->timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + timeoutInMs); + trace_spapr_watchdog_start(flags, watchdogNumber, timeoutInMs); + break; + case PSERIES_WDTF_OP_STOP: + if (watchdogNumber == PSERIES_WDT_STOP_ALL) { + ret = watchdog_stop_all(spapr); + } else if (watchdogNumber <= ARRAY_SIZE(spapr->wds)) { + ret = watchdog_stop(watchdogNumber, + &spapr->wds[watchdogNumber - 1]); + } else { + return H_P2; + } + break; + case PSERIES_WDTF_OP_QUERY: + args[0] = FIELD_DP64(0, PSERIES_WDTQ, MIN_TIMEOUT, WDT_MIN_TIMEOUT); + args[0] = FIELD_DP64(args[0], PSERIES_WDTQ, NUM, + ARRAY_SIZE(spapr->wds)); + trace_spapr_watchdog_query(args[0]); + break; + case PSERIES_WDTF_OP_QUERY_LPM: + if (watchdogNumber > ARRAY_SIZE(spapr->wds)) { + return H_P2; + } + args[0] = PSERIES_WDTQL_QUERY_NOT_STOPPED; + trace_spapr_watchdog_query_lpm(args[0]); + break; + default: + return H_PARAMETER; + } + + return ret; +} + +void spapr_watchdog_init(SpaprMachineState *spapr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(spapr->wds); ++i) { + char name[16]; + SpaprWatchdog *w = &spapr->wds[i]; + + snprintf(name, sizeof(name) - 1, "wdt%d", i + 1); + object_initialize_child_with_props(OBJECT(spapr), name, w, + sizeof(SpaprWatchdog), + TYPE_SPAPR_WDT, + &error_fatal, NULL); + qdev_realize(DEVICE(w), NULL, &error_fatal); + } +} + +static bool watchdog_needed(void *opaque) +{ + SpaprWatchdog *w = opaque; + + return timer_pending(&w->timer); +} + +static const VMStateDescription vmstate_wdt = { + .name = "spapr_watchdog", + .version_id = 1, + .minimum_version_id = 1, + .needed = watchdog_needed, + .fields = (VMStateField[]) { + VMSTATE_TIMER(timer, SpaprWatchdog), + VMSTATE_UINT8(action, SpaprWatchdog), + VMSTATE_UINT8(leave_others, SpaprWatchdog), + VMSTATE_END_OF_LIST() + } +}; + +static void spapr_wdt_realize(DeviceState *dev, Error **errp) +{ + SpaprWatchdog *w = SPAPR_WDT(dev); + Object *o = OBJECT(dev); + + timer_init_ms(&w->timer, QEMU_CLOCK_VIRTUAL, watchdog_expired, w); + + object_property_add_uint64_ptr(o, "expire", + (uint64_t *)&w->timer.expire_time, + OBJ_PROP_FLAG_READ); + object_property_add_uint8_ptr(o, "action", &w->action, OBJ_PROP_FLAG_READ); + object_property_add_uint8_ptr(o, "leaveOtherWatchdogsRunningOnTimeout", + &w->leave_others, OBJ_PROP_FLAG_READ); +} + +static void spapr_wdt_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = spapr_wdt_realize; + dc->vmsd = &vmstate_wdt; + dc->user_creatable = false; +} + +static const TypeInfo spapr_wdt_info = { + .name = TYPE_SPAPR_WDT, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SpaprWatchdog), + .class_init = spapr_wdt_class_init, +}; + +static void spapr_watchdog_register_types(void) +{ + spapr_register_hypercall(H_WATCHDOG, h_watchdog); + type_register_static(&spapr_wdt_info); +} + +type_init(spapr_watchdog_register_types) diff --git a/hw/watchdog/trace-events b/hw/watchdog/trace-events index e7523e22aa..89ccbcfdfd 100644 --- a/hw/watchdog/trace-events +++ b/hw/watchdog/trace-events @@ -9,3 +9,10 @@ cmsdk_apb_watchdog_lock(uint32_t lock) "CMSDK APB watchdog: lock %" PRIu32 # wdt-aspeed.c aspeed_wdt_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size=%d" aspeed_wdt_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size=%d value=0x%"PRIx64 + +# spapr_watchdog.c +spapr_watchdog_start(uint64_t flags, uint64_t num, uint64_t timeout) "Flags 0x%" PRIx64 " num=%" PRId64 " %" PRIu64 "ms" +spapr_watchdog_stop(uint64_t num, uint64_t ret) "num=%" PRIu64 " ret=%" PRId64 +spapr_watchdog_query(uint64_t caps) "caps=0x%" PRIx64 +spapr_watchdog_query_lpm(uint64_t caps) "caps=0x%" PRIx64 +spapr_watchdog_expired(uint64_t num, unsigned action) "num=%" PRIu64 " action=%u" diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index 15feabac63..6ec479ca2b 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -152,7 +152,7 @@ struct VirtMachineState { bool virt; bool ras; bool mte; - bool dtb_kaslr_seed; + bool dtb_randomness; OnOffAuto acpi; VirtGICType gic_version; VirtIOMMUType iommu; diff --git a/include/hw/pci-host/pnv_phb3_regs.h b/include/hw/pci-host/pnv_phb3_regs.h index a174ef1f70..38f8ce9d74 100644 --- a/include/hw/pci-host/pnv_phb3_regs.h +++ b/include/hw/pci-host/pnv_phb3_regs.h @@ -13,22 +13,6 @@ #include "qemu/host-utils.h" /* - * QEMU version of the GETFIELD/SETFIELD macros - * - * These are common with the PnvXive model. - */ -static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) -{ - return (word & mask) >> ctz64(mask); -} - -static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, - uint64_t value) -{ - return (word & ~mask) | ((value << ctz64(mask)) & mask); -} - -/* * PBCQ XSCOM registers */ diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h index 86cb7d7f97..b991194223 100644 --- a/include/hw/ppc/pnv.h +++ b/include/hw/ppc/pnv.h @@ -189,7 +189,8 @@ DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER10, TYPE_PNV_CHIP_POWER10) PowerPCCPU *pnv_chip_find_cpu(PnvChip *chip, uint32_t pir); -void pnv_phb_attach_root_port(PCIHostState *pci, const char *name); +void pnv_phb_attach_root_port(PCIHostState *pci, const char *name, + int index, int chip_id); #define TYPE_PNV_MACHINE MACHINE_TYPE_NAME("powernv") typedef struct PnvMachineClass PnvMachineClass; diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 072dda2c72..530d739b1d 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -164,6 +164,21 @@ struct SpaprMachineClass { SpaprIrq *irq; }; +#define WDT_MAX_WATCHDOGS 4 /* Maximum number of watchdog devices */ + +#define TYPE_SPAPR_WDT "spapr-wdt" +OBJECT_DECLARE_SIMPLE_TYPE(SpaprWatchdog, SPAPR_WDT) + +typedef struct SpaprWatchdog { + /*< private >*/ + DeviceState parent_obj; + /*< public >*/ + + QEMUTimer timer; + uint8_t action; /* One of PSERIES_WDTF_ACTION_xxx */ + uint8_t leave_others; /* leaveOtherWatchdogsRunningOnTimeout */ +} SpaprWatchdog; + /** * SpaprMachineState: */ @@ -264,6 +279,8 @@ struct SpaprMachineState { uint32_t FORM2_assoc_array[NUMA_NODES_MAX_NUM][FORM2_NUMA_ASSOC_SIZE]; Error *fwnmi_migration_blocker; + + SpaprWatchdog wds[WDT_MAX_WATCHDOGS]; }; #define H_SUCCESS 0 @@ -344,6 +361,7 @@ struct SpaprMachineState { #define H_P7 -60 #define H_P8 -61 #define H_P9 -62 +#define H_NOOP -63 #define H_UNSUPPORTED -67 #define H_OVERLAP -68 #define H_UNSUPPORTED_FLAG -256 @@ -564,8 +582,9 @@ struct SpaprMachineState { #define H_SCM_HEALTH 0x400 #define H_RPT_INVALIDATE 0x448 #define H_SCM_FLUSH 0x44C +#define H_WATCHDOG 0x45C -#define MAX_HCALL_OPCODE H_SCM_FLUSH +#define MAX_HCALL_OPCODE H_WATCHDOG /* The hcalls above are standardized in PAPR and implemented by pHyp * as well. @@ -902,6 +921,7 @@ struct SpaprTceTable { bool bypass; bool need_vfio; bool skipping_replay; + bool def_win; int fd; MemoryRegion root; IOMMUMemoryRegion iommu; @@ -1027,6 +1047,7 @@ extern const VMStateDescription vmstate_spapr_cap_large_decr; extern const VMStateDescription vmstate_spapr_cap_ccf_assist; extern const VMStateDescription vmstate_spapr_cap_fwnmi; extern const VMStateDescription vmstate_spapr_cap_rpt_invalidate; +extern const VMStateDescription vmstate_spapr_wdt; static inline uint8_t spapr_get_cap(SpaprMachineState *spapr, int cap) { @@ -1063,4 +1084,7 @@ target_ulong spapr_vof_client_architecture_support(MachineState *ms, target_ulong ovec_addr); void spapr_vof_client_dt_finalize(SpaprMachineState *spapr, void *fdt); +/* H_WATCHDOG */ +void spapr_watchdog_init(SpaprMachineState *spapr); + #endif /* HW_SPAPR_H */ diff --git a/include/standard-headers/asm-m68k/bootinfo-virt.h b/include/standard-headers/asm-m68k/bootinfo-virt.h index 81be1e0924..1b1ffd4705 100644 --- a/include/standard-headers/asm-m68k/bootinfo-virt.h +++ b/include/standard-headers/asm-m68k/bootinfo-virt.h @@ -12,6 +12,7 @@ #define BI_VIRT_GF_TTY_BASE 0x8003 #define BI_VIRT_VIRTIO_BASE 0x8004 #define BI_VIRT_CTRL_BASE 0x8005 +#define BI_VIRT_RNG_SEED 0x8006 #define VIRT_BOOTI_VERSION MK_BI_VERSION(2, 0) diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c index f7ef36cd9f..9875d609a9 100644 --- a/linux-user/aarch64/cpu_loop.c +++ b/linux-user/aarch64/cpu_loop.c @@ -89,6 +89,15 @@ void cpu_loop(CPUARMState *env) switch (trapnr) { case EXCP_SWI: + /* + * On syscall, PSTATE.ZA is preserved, along with the ZA matrix. + * PSTATE.SM is cleared, per SMSTOP, which does ResetSVEState. + */ + if (FIELD_EX64(env->svcr, SVCR, SM)) { + env->svcr = FIELD_DP64(env->svcr, SVCR, SM, 0); + arm_rebuild_hflags(env); + arm_reset_sve_state(env); + } ret = do_syscall(env, env->xregs[8], env->xregs[0], diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c index 7da0e36c6d..6a2c6e06d2 100644 --- a/linux-user/aarch64/signal.c +++ b/linux-user/aarch64/signal.c @@ -78,7 +78,8 @@ struct target_extra_context { struct target_sve_context { struct target_aarch64_ctx head; uint16_t vl; - uint16_t reserved[3]; + uint16_t flags; + uint16_t reserved[2]; /* The actual SVE data immediately follows. It is laid out * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of * the original struct pointer. @@ -101,6 +102,24 @@ struct target_sve_context { #define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \ (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17)) +#define TARGET_SVE_SIG_FLAG_SM 1 + +#define TARGET_ZA_MAGIC 0x54366345 + +struct target_za_context { + struct target_aarch64_ctx head; + uint16_t vl; + uint16_t reserved[3]; + /* The actual ZA data immediately follows. */ +}; + +#define TARGET_ZA_SIG_REGS_OFFSET \ + QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES) +#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \ + (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N)) +#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \ + TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES) + struct target_rt_sigframe { struct target_siginfo info; struct target_ucontext uc; @@ -173,13 +192,17 @@ static void target_setup_end_record(struct target_aarch64_ctx *end) } static void target_setup_sve_record(struct target_sve_context *sve, - CPUARMState *env, int vq, int size) + CPUARMState *env, int size) { - int i, j; + int i, j, vq = sve_vq(env); + memset(sve, 0, sizeof(*sve)); __put_user(TARGET_SVE_MAGIC, &sve->head.magic); __put_user(size, &sve->head.size); __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl); + if (FIELD_EX64(env->svcr, SVCR, SM)) { + __put_user(TARGET_SVE_SIG_FLAG_SM, &sve->flags); + } /* Note that SVE regs are stored as a byte stream, with each byte element * at a subsequent address. This corresponds to a little-endian store @@ -200,6 +223,35 @@ static void target_setup_sve_record(struct target_sve_context *sve, } } +static void target_setup_za_record(struct target_za_context *za, + CPUARMState *env, int size) +{ + int vq = sme_vq(env); + int vl = vq * TARGET_SVE_VQ_BYTES; + int i, j; + + memset(za, 0, sizeof(*za)); + __put_user(TARGET_ZA_MAGIC, &za->head.magic); + __put_user(size, &za->head.size); + __put_user(vl, &za->vl); + + if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) { + return; + } + assert(size == TARGET_ZA_SIG_CONTEXT_SIZE(vq)); + + /* + * Note that ZA vectors are stored as a byte stream, + * with each byte element at a subsequent address. + */ + for (i = 0; i < vl; ++i) { + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); + for (j = 0; j < vq * 2; ++j) { + __put_user_e(env->zarray[i].d[j], z + j, le); + } + } +} + static void target_restore_general_frame(CPUARMState *env, struct target_rt_sigframe *sf) { @@ -243,12 +295,50 @@ static void target_restore_fpsimd_record(CPUARMState *env, } } -static void target_restore_sve_record(CPUARMState *env, - struct target_sve_context *sve, int vq) +static bool target_restore_sve_record(CPUARMState *env, + struct target_sve_context *sve, + int size, int *svcr) { - int i, j; + int i, j, vl, vq, flags; + bool sm; - /* Note that SVE regs are stored as a byte stream, with each byte element + __get_user(vl, &sve->vl); + __get_user(flags, &sve->flags); + + sm = flags & TARGET_SVE_SIG_FLAG_SM; + + /* The cpu must support Streaming or Non-streaming SVE. */ + if (sm + ? !cpu_isar_feature(aa64_sme, env_archcpu(env)) + : !cpu_isar_feature(aa64_sve, env_archcpu(env))) { + return false; + } + + /* + * Note that we cannot use sve_vq() because that depends on the + * current setting of PSTATE.SM, not the state to be restored. + */ + vq = sve_vqm1_for_el_sm(env, 0, sm) + 1; + + /* Reject mismatched VL. */ + if (vl != vq * TARGET_SVE_VQ_BYTES) { + return false; + } + + /* Accept empty record -- used to clear PSTATE.SM. */ + if (size <= sizeof(*sve)) { + return true; + } + + /* Reject non-empty but incomplete record. */ + if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) { + return false; + } + + *svcr = FIELD_DP64(*svcr, SVCR, SM, sm); + + /* + * Note that SVE regs are stored as a byte stream, with each byte element * at a subsequent address. This corresponds to a little-endian load * of our 64-bit hunks. */ @@ -270,6 +360,46 @@ static void target_restore_sve_record(CPUARMState *env, } } } + return true; +} + +static bool target_restore_za_record(CPUARMState *env, + struct target_za_context *za, + int size, int *svcr) +{ + int i, j, vl, vq; + + if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) { + return false; + } + + __get_user(vl, &za->vl); + vq = sme_vq(env); + + /* Reject mismatched VL. */ + if (vl != vq * TARGET_SVE_VQ_BYTES) { + return false; + } + + /* Accept empty record -- used to clear PSTATE.ZA. */ + if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) { + return true; + } + + /* Reject non-empty but incomplete record. */ + if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) { + return false; + } + + *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1); + + for (i = 0; i < vl; ++i) { + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); + for (j = 0; j < vq * 2; ++j) { + __get_user_e(env->zarray[i].d[j], z + j, le); + } + } + return true; } static int target_restore_sigframe(CPUARMState *env, @@ -278,10 +408,12 @@ static int target_restore_sigframe(CPUARMState *env, struct target_aarch64_ctx *ctx, *extra = NULL; struct target_fpsimd_context *fpsimd = NULL; struct target_sve_context *sve = NULL; + struct target_za_context *za = NULL; uint64_t extra_datap = 0; bool used_extra = false; - bool err = false; - int vq = 0, sve_size = 0; + int sve_size = 0; + int za_size = 0; + int svcr = 0; target_restore_general_frame(env, sf); @@ -294,8 +426,7 @@ static int target_restore_sigframe(CPUARMState *env, switch (magic) { case 0: if (size != 0) { - err = true; - goto exit; + goto err; } if (used_extra) { ctx = NULL; @@ -307,42 +438,46 @@ static int target_restore_sigframe(CPUARMState *env, case TARGET_FPSIMD_MAGIC: if (fpsimd || size != sizeof(struct target_fpsimd_context)) { - err = true; - goto exit; + goto err; } fpsimd = (struct target_fpsimd_context *)ctx; break; case TARGET_SVE_MAGIC: - if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { - vq = sve_vq(env); - sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); - if (!sve && size == sve_size) { - sve = (struct target_sve_context *)ctx; - break; - } + if (sve || size < sizeof(struct target_sve_context)) { + goto err; } - err = true; - goto exit; + sve = (struct target_sve_context *)ctx; + sve_size = size; + break; + + case TARGET_ZA_MAGIC: + if (za || size < sizeof(struct target_za_context)) { + goto err; + } + za = (struct target_za_context *)ctx; + za_size = size; + break; case TARGET_EXTRA_MAGIC: if (extra || size != sizeof(struct target_extra_context)) { - err = true; - goto exit; + goto err; } __get_user(extra_datap, &((struct target_extra_context *)ctx)->datap); __get_user(extra_size, &((struct target_extra_context *)ctx)->size); extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0); + if (!extra) { + return 1; + } break; default: /* Unknown record -- we certainly didn't generate it. * Did we in fact get out of sync? */ - err = true; - goto exit; + goto err; } ctx = (void *)ctx + size; } @@ -351,17 +486,26 @@ static int target_restore_sigframe(CPUARMState *env, if (fpsimd) { target_restore_fpsimd_record(env, fpsimd); } else { - err = true; + goto err; } /* SVE data, if present, overwrites FPSIMD data. */ - if (sve) { - target_restore_sve_record(env, sve, vq); + if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) { + goto err; + } + if (za && !target_restore_za_record(env, za, za_size, &svcr)) { + goto err; + } + if (env->svcr != svcr) { + env->svcr = svcr; + arm_rebuild_hflags(env); } + unlock_user(extra, extra_datap, 0); + return 0; - exit: + err: unlock_user(extra, extra_datap, 0); - return err; + return 1; } static abi_ulong get_sigframe(struct target_sigaction *ka, @@ -423,7 +567,8 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, .total_size = offsetof(struct target_rt_sigframe, uc.tuc_mcontext.__reserved), }; - int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0; + int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0; + int sve_size = 0, za_size = 0; struct target_rt_sigframe *frame; struct target_rt_frame_record *fr; abi_ulong frame_addr, return_addr; @@ -433,11 +578,20 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, &layout); /* SVE state needs saving only if it exists. */ - if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { - vq = sve_vq(env); - sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); + if (cpu_isar_feature(aa64_sve, env_archcpu(env)) || + cpu_isar_feature(aa64_sme, env_archcpu(env))) { + sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq(env)), 16); sve_ofs = alloc_sigframe_space(sve_size, &layout); } + if (cpu_isar_feature(aa64_sme, env_archcpu(env))) { + /* ZA state needs saving only if it is enabled. */ + if (FIELD_EX64(env->svcr, SVCR, ZA)) { + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env)); + } else { + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0); + } + za_ofs = alloc_sigframe_space(za_size, &layout); + } if (layout.extra_ofs) { /* Reserve space for the extra end marker. The standard end marker @@ -484,7 +638,10 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, target_setup_end_record((void *)frame + layout.extra_end_ofs); } if (sve_ofs) { - target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size); + target_setup_sve_record((void *)frame + sve_ofs, env, sve_size); + } + if (za_ofs) { + target_setup_za_record((void *)frame + za_ofs, env, za_size); } /* Set up the stack frame for unwinding. */ @@ -508,6 +665,18 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, env->btype = 2; } + /* + * Invoke the signal handler with both SM and ZA disabled. + * When clearing SM, ResetSVEState, per SMSTOP. + */ + if (FIELD_EX64(env->svcr, SVCR, SM)) { + arm_reset_sve_state(env); + } + if (env->svcr) { + env->svcr = 0; + arm_rebuild_hflags(env); + } + if (info) { tswap_siginfo(&frame->info, info); env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); diff --git a/linux-user/aarch64/target_cpu.h b/linux-user/aarch64/target_cpu.h index 97a477bd3e..f90359faf2 100644 --- a/linux-user/aarch64/target_cpu.h +++ b/linux-user/aarch64/target_cpu.h @@ -34,10 +34,13 @@ static inline void cpu_clone_regs_parent(CPUARMState *env, unsigned flags) static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls) { - /* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is + /* + * Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is * different from AArch32 Linux, which uses TPIDRRO. */ env->cp15.tpidr_el[0] = newtls; + /* TPIDR2_EL0 is cleared with CLONE_SETTLS. */ + env->cp15.tpidr2_el0 = 0; } static inline abi_ulong get_sp_from_cpustate(CPUARMState *state) diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h index 1d440ffbea..907c314146 100644 --- a/linux-user/aarch64/target_prctl.h +++ b/linux-user/aarch64/target_prctl.h @@ -6,17 +6,18 @@ #ifndef AARCH64_TARGET_PRCTL_H #define AARCH64_TARGET_PRCTL_H -static abi_long do_prctl_get_vl(CPUArchState *env) +static abi_long do_prctl_sve_get_vl(CPUArchState *env) { ARMCPU *cpu = env_archcpu(env); if (cpu_isar_feature(aa64_sve, cpu)) { + /* PSTATE.SM is always unset on syscall entry. */ return sve_vq(env) * 16; } return -TARGET_EINVAL; } -#define do_prctl_get_vl do_prctl_get_vl +#define do_prctl_sve_get_vl do_prctl_sve_get_vl -static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2) +static abi_long do_prctl_sve_set_vl(CPUArchState *env, abi_long arg2) { /* * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT. @@ -27,6 +28,7 @@ static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2) && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { uint32_t vq, old_vq; + /* PSTATE.SM is always unset on syscall entry. */ old_vq = sve_vq(env); /* @@ -47,7 +49,59 @@ static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2) } return -TARGET_EINVAL; } -#define do_prctl_set_vl do_prctl_set_vl +#define do_prctl_sve_set_vl do_prctl_sve_set_vl + +static abi_long do_prctl_sme_get_vl(CPUArchState *env) +{ + ARMCPU *cpu = env_archcpu(env); + if (cpu_isar_feature(aa64_sme, cpu)) { + return sme_vq(env) * 16; + } + return -TARGET_EINVAL; +} +#define do_prctl_sme_get_vl do_prctl_sme_get_vl + +static abi_long do_prctl_sme_set_vl(CPUArchState *env, abi_long arg2) +{ + /* + * We cannot support either PR_SME_SET_VL_ONEXEC or PR_SME_VL_INHERIT. + * Note the kernel definition of sve_vl_valid allows for VQ=512, + * i.e. VL=8192, even though the architectural maximum is VQ=16. + */ + if (cpu_isar_feature(aa64_sme, env_archcpu(env)) + && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { + int vq, old_vq; + + old_vq = sme_vq(env); + + /* + * Bound the value of vq, so that we know that it fits into + * the 4-bit field in SMCR_EL1. Because PSTATE.SM is cleared + * on syscall entry, we are not modifying the current SVE + * vector length. + */ + vq = MAX(arg2 / 16, 1); + vq = MIN(vq, 16); + env->vfp.smcr_el[1] = + FIELD_DP64(env->vfp.smcr_el[1], SMCR, LEN, vq - 1); + + /* Delay rebuilding hflags until we know if ZA must change. */ + vq = sve_vqm1_for_el_sm(env, 0, true) + 1; + + if (vq != old_vq) { + /* + * PSTATE.ZA state is cleared on any change to SVL. + * We need not call arm_rebuild_hflags because PSTATE.SM was + * cleared on syscall entry, so this hasn't changed VL. + */ + env->svcr = FIELD_DP64(env->svcr, SVCR, ZA, 0); + arm_rebuild_hflags(env); + } + return vq * 16; + } + return -TARGET_EINVAL; +} +#define do_prctl_sme_set_vl do_prctl_sme_set_vl static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2) { diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 1de77c7959..ce902dbd56 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -605,6 +605,18 @@ enum { ARM_HWCAP2_A64_RNG = 1 << 16, ARM_HWCAP2_A64_BTI = 1 << 17, ARM_HWCAP2_A64_MTE = 1 << 18, + ARM_HWCAP2_A64_ECV = 1 << 19, + ARM_HWCAP2_A64_AFP = 1 << 20, + ARM_HWCAP2_A64_RPRES = 1 << 21, + ARM_HWCAP2_A64_MTE3 = 1 << 22, + ARM_HWCAP2_A64_SME = 1 << 23, + ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, + ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, + ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, + ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, + ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, + ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, + ARM_HWCAP2_A64_SME_FA64 = 1 << 30, }; #define ELF_HWCAP get_elf_hwcap() @@ -674,6 +686,14 @@ static uint32_t get_elf_hwcap2(void) GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); + GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | + ARM_HWCAP2_A64_SME_F32F32 | + ARM_HWCAP2_A64_SME_B16F32 | + ARM_HWCAP2_A64_SME_F16F32 | + ARM_HWCAP2_A64_SME_I8I32)); + GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); + GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); + GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); return hwcaps; } diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 669add74c1..991b85e6b4 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -6343,6 +6343,12 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) #ifndef PR_SET_SYSCALL_USER_DISPATCH # define PR_SET_SYSCALL_USER_DISPATCH 59 #endif +#ifndef PR_SME_SET_VL +# define PR_SME_SET_VL 63 +# define PR_SME_GET_VL 64 +# define PR_SME_VL_LEN_MASK 0xffff +# define PR_SME_VL_INHERIT (1 << 17) +#endif #include "target_prctl.h" @@ -6362,11 +6368,11 @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) #ifndef do_prctl_set_fp_mode #define do_prctl_set_fp_mode do_prctl_inval1 #endif -#ifndef do_prctl_get_vl -#define do_prctl_get_vl do_prctl_inval0 +#ifndef do_prctl_sve_get_vl +#define do_prctl_sve_get_vl do_prctl_inval0 #endif -#ifndef do_prctl_set_vl -#define do_prctl_set_vl do_prctl_inval1 +#ifndef do_prctl_sve_set_vl +#define do_prctl_sve_set_vl do_prctl_inval1 #endif #ifndef do_prctl_reset_keys #define do_prctl_reset_keys do_prctl_inval1 @@ -6383,6 +6389,12 @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) #ifndef do_prctl_set_unalign #define do_prctl_set_unalign do_prctl_inval1 #endif +#ifndef do_prctl_sme_get_vl +#define do_prctl_sme_get_vl do_prctl_inval0 +#endif +#ifndef do_prctl_sme_set_vl +#define do_prctl_sme_set_vl do_prctl_inval1 +#endif static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) @@ -6431,9 +6443,13 @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, case PR_SET_FP_MODE: return do_prctl_set_fp_mode(env, arg2); case PR_SVE_GET_VL: - return do_prctl_get_vl(env); + return do_prctl_sve_get_vl(env); case PR_SVE_SET_VL: - return do_prctl_set_vl(env, arg2); + return do_prctl_sve_set_vl(env, arg2); + case PR_SME_GET_VL: + return do_prctl_sme_get_vl(env); + case PR_SME_SET_VL: + return do_prctl_sme_set_vl(env, arg2); case PR_PAC_RESET_KEYS: if (arg3 || arg4 || arg5) { return -TARGET_EINVAL; diff --git a/pc-bios/s390-ccw.img b/pc-bios/s390-ccw.img Binary files differindex a560c1272f..39f9680a0e 100644 --- a/pc-bios/s390-ccw.img +++ b/pc-bios/s390-ccw.img diff --git a/pc-bios/s390-ccw/bootmap.c b/pc-bios/s390-ccw/bootmap.c index 56411ab3b6..994e59c0b0 100644 --- a/pc-bios/s390-ccw/bootmap.c +++ b/pc-bios/s390-ccw/bootmap.c @@ -780,18 +780,37 @@ static void ipl_iso_el_torito(void) } } +/** + * Detect whether we're trying to boot from an .ISO image. + * These always have a signature string "CD001" at offset 0x8001. + */ +static bool has_iso_signature(void) +{ + int blksize = virtio_get_block_size(); + + if (!blksize || virtio_read(0x8000 / blksize, sec)) { + return false; + } + + return !memcmp("CD001", &sec[1], 5); +} + /*********************************************************************** * Bus specific IPL sequences */ static void zipl_load_vblk(void) { - if (virtio_guessed_disk_nature()) { - virtio_assume_iso9660(); + int blksize = virtio_get_block_size(); + + if (blksize == VIRTIO_ISO_BLOCK_SIZE || has_iso_signature()) { + if (blksize != VIRTIO_ISO_BLOCK_SIZE) { + virtio_assume_iso9660(); + } + ipl_iso_el_torito(); } - ipl_iso_el_torito(); - if (virtio_guessed_disk_nature()) { + if (blksize != VIRTIO_DASD_DEFAULT_BLOCK_SIZE) { sclp_print("Using guessed DASD geometry.\n"); virtio_assume_eckd(); } diff --git a/pc-bios/s390-ccw/main.c b/pc-bios/s390-ccw/main.c index 5d2b7ba94d..a2def83e82 100644 --- a/pc-bios/s390-ccw/main.c +++ b/pc-bios/s390-ccw/main.c @@ -14,6 +14,7 @@ #include "s390-ccw.h" #include "cio.h" #include "virtio.h" +#include "virtio-scsi.h" #include "dasd-ipl.h" char stack[PAGE_SIZE * 8] __attribute__((__aligned__(PAGE_SIZE))); @@ -218,6 +219,7 @@ static int virtio_setup(void) { VDev *vdev = virtio_get_device(); QemuIplParameters *early_qipl = (QemuIplParameters *)QIPL_ADDRESS; + int ret; memcpy(&qipl, early_qipl, sizeof(QemuIplParameters)); @@ -225,18 +227,26 @@ static int virtio_setup(void) menu_setup(); } - if (virtio_get_device_type() == VIRTIO_ID_NET) { + switch (vdev->senseid.cu_model) { + case VIRTIO_ID_NET: sclp_print("Network boot device detected\n"); vdev->netboot_start_addr = qipl.netboot_start_addr; - } else { - int ret = virtio_blk_setup_device(blk_schid); - if (ret) { - return ret; - } + return 0; + case VIRTIO_ID_BLOCK: + ret = virtio_blk_setup_device(blk_schid); + break; + case VIRTIO_ID_SCSI: + ret = virtio_scsi_setup_device(blk_schid); + break; + default: + panic("\n! No IPL device available !\n"); + } + + if (!ret) { IPL_assert(virtio_ipl_disk_is_valid(), "No valid IPL device detected"); } - return 0; + return ret; } static void ipl_boot_device(void) @@ -281,7 +291,7 @@ static void probe_boot_device(void) sclp_print("Could not find a suitable boot device (none specified)\n"); } -int main(void) +void main(void) { sclp_setup(); css_setup(); @@ -294,5 +304,4 @@ int main(void) } panic("Failed to load OS from hard disk\n"); - return 0; /* make compiler happy */ } diff --git a/pc-bios/s390-ccw/netboot.mak b/pc-bios/s390-ccw/netboot.mak index ee59a5f4de..7639da194c 100644 --- a/pc-bios/s390-ccw/netboot.mak +++ b/pc-bios/s390-ccw/netboot.mak @@ -16,9 +16,12 @@ s390-netboot.elf: $(NETOBJS) libnet.a libc.a s390-netboot.img: s390-netboot.elf $(call quiet-command,$(STRIP) --strip-unneeded $< -o $@,"STRIP","$(TARGET_DIR)$@") +# SLOF is GCC-only, so ignore warnings about GNU extensions with Clang here +NO_GNU_WARN := $(call cc-option,-Werror $(QEMU_CFLAGS),-Wno-gnu) + # libc files: -LIBC_CFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \ +LIBC_CFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(NO_GNU_WARN) $(LIBC_INC) $(LIBNET_INC) \ -MMD -MP -MT $@ -MF $(@:%.o=%.d) CTYPE_OBJS = isdigit.o isxdigit.o toupper.o @@ -52,7 +55,7 @@ libc.a: $(LIBCOBJS) LIBNETOBJS := args.o dhcp.o dns.o icmpv6.o ipv6.o tcp.o udp.o bootp.o \ dhcpv6.o ethernet.o ipv4.o ndp.o tftp.o pxelinux.o -LIBNETCFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(LIBC_INC) $(LIBNET_INC) \ +LIBNETCFLAGS = $(EXTRA_CFLAGS) $(CFLAGS) $(NO_GNU_WARN) $(LIBC_INC) $(LIBNET_INC) \ -DDHCPARCH=0x1F -MMD -MP -MT $@ -MF $(@:%.o=%.d) %.o : $(SLOF_DIR)/lib/libnet/%.c diff --git a/pc-bios/s390-ccw/s390-ccw.h b/pc-bios/s390-ccw/s390-ccw.h index 79db69ff54..b88e0550ab 100644 --- a/pc-bios/s390-ccw/s390-ccw.h +++ b/pc-bios/s390-ccw/s390-ccw.h @@ -57,6 +57,7 @@ void write_subsystem_identification(void); void write_iplb_location(void); extern char stack[PAGE_SIZE * 8] __attribute__((__aligned__(PAGE_SIZE))); unsigned int get_loadparm_index(void); +void main(void); /* sclp.c */ void sclp_print(const char *string); diff --git a/pc-bios/s390-ccw/virtio-blkdev.c b/pc-bios/s390-ccw/virtio-blkdev.c index 7d35050292..8271c47296 100644 --- a/pc-bios/s390-ccw/virtio-blkdev.c +++ b/pc-bios/s390-ccw/virtio-blkdev.c @@ -13,6 +13,9 @@ #include "virtio.h" #include "virtio-scsi.h" +#define VIRTIO_BLK_F_GEOMETRY (1 << 4) +#define VIRTIO_BLK_F_BLK_SIZE (1 << 6) + static int virtio_blk_read_many(VDev *vdev, ulong sector, void *load_addr, int sec_num) { @@ -112,23 +115,6 @@ VirtioGDN virtio_guessed_disk_nature(void) return virtio_get_device()->guessed_disk_nature; } -void virtio_assume_scsi(void) -{ - VDev *vdev = virtio_get_device(); - - switch (vdev->senseid.cu_model) { - case VIRTIO_ID_BLOCK: - vdev->guessed_disk_nature = VIRTIO_GDN_SCSI; - vdev->config.blk.blk_size = VIRTIO_SCSI_BLOCK_SIZE; - vdev->config.blk.physical_block_exp = 0; - vdev->blk_factor = 1; - break; - case VIRTIO_ID_SCSI: - vdev->scsi_block_size = VIRTIO_SCSI_BLOCK_SIZE; - break; - } -} - void virtio_assume_iso9660(void) { VDev *vdev = virtio_get_device(); @@ -155,7 +141,7 @@ void virtio_assume_eckd(void) vdev->config.blk.physical_block_exp = 0; switch (vdev->senseid.cu_model) { case VIRTIO_ID_BLOCK: - vdev->config.blk.blk_size = 4096; + vdev->config.blk.blk_size = VIRTIO_DASD_DEFAULT_BLOCK_SIZE; break; case VIRTIO_ID_SCSI: vdev->config.blk.blk_size = vdev->scsi_block_size; @@ -166,46 +152,19 @@ void virtio_assume_eckd(void) virtio_eckd_sectors_for_block_size(vdev->config.blk.blk_size); } -bool virtio_disk_is_scsi(void) -{ - VDev *vdev = virtio_get_device(); - - if (vdev->guessed_disk_nature == VIRTIO_GDN_SCSI) { - return true; - } - switch (vdev->senseid.cu_model) { - case VIRTIO_ID_BLOCK: - return (vdev->config.blk.geometry.heads == 255) - && (vdev->config.blk.geometry.sectors == 63) - && (virtio_get_block_size() == VIRTIO_SCSI_BLOCK_SIZE); - case VIRTIO_ID_SCSI: - return true; - } - return false; -} - -bool virtio_disk_is_eckd(void) +bool virtio_ipl_disk_is_valid(void) { + int blksize = virtio_get_block_size(); VDev *vdev = virtio_get_device(); - const int block_size = virtio_get_block_size(); - if (vdev->guessed_disk_nature == VIRTIO_GDN_DASD) { + if (vdev->guessed_disk_nature == VIRTIO_GDN_SCSI || + vdev->guessed_disk_nature == VIRTIO_GDN_DASD) { return true; } - switch (vdev->senseid.cu_model) { - case VIRTIO_ID_BLOCK: - return (vdev->config.blk.geometry.heads == 15) - && (vdev->config.blk.geometry.sectors == - virtio_eckd_sectors_for_block_size(block_size)); - case VIRTIO_ID_SCSI: - return false; - } - return false; -} -bool virtio_ipl_disk_is_valid(void) -{ - return virtio_disk_is_scsi() || virtio_disk_is_eckd(); + return (vdev->senseid.cu_model == VIRTIO_ID_BLOCK || + vdev->senseid.cu_model == VIRTIO_ID_SCSI) && + blksize >= 512 && blksize <= 4096; } int virtio_get_block_size(void) @@ -266,34 +225,12 @@ uint64_t virtio_get_blocks(void) int virtio_blk_setup_device(SubChannelId schid) { VDev *vdev = virtio_get_device(); - int ret = 0; + vdev->guest_features[0] = VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_BLK_SIZE; vdev->schid = schid; virtio_setup_ccw(vdev); - switch (vdev->senseid.cu_model) { - case VIRTIO_ID_BLOCK: - sclp_print("Using virtio-blk.\n"); - if (!virtio_ipl_disk_is_valid()) { - /* make sure all getters but blocksize return 0 for - * invalid IPL disk - */ - memset(&vdev->config.blk, 0, sizeof(vdev->config.blk)); - virtio_assume_scsi(); - } - break; - case VIRTIO_ID_SCSI: - IPL_assert(vdev->config.scsi.sense_size == VIRTIO_SCSI_SENSE_SIZE, - "Config: sense size mismatch"); - IPL_assert(vdev->config.scsi.cdb_size == VIRTIO_SCSI_CDB_SIZE, - "Config: CDB size mismatch"); - - sclp_print("Using virtio-scsi.\n"); - ret = virtio_scsi_setup(vdev); - break; - default: - panic("\n! No IPL device available !\n"); - } + sclp_print("Using virtio-blk.\n"); - return ret; + return 0; } diff --git a/pc-bios/s390-ccw/virtio-scsi.c b/pc-bios/s390-ccw/virtio-scsi.c index 2c8d0f3097..3b7069270c 100644 --- a/pc-bios/s390-ccw/virtio-scsi.c +++ b/pc-bios/s390-ccw/virtio-scsi.c @@ -329,7 +329,7 @@ static void scsi_parse_capacity_report(void *data, } } -int virtio_scsi_setup(VDev *vdev) +static int virtio_scsi_setup(VDev *vdev) { int retry_test_unit_ready = 3; uint8_t data[256]; @@ -430,3 +430,20 @@ int virtio_scsi_setup(VDev *vdev) return 0; } + +int virtio_scsi_setup_device(SubChannelId schid) +{ + VDev *vdev = virtio_get_device(); + + vdev->schid = schid; + virtio_setup_ccw(vdev); + + IPL_assert(vdev->config.scsi.sense_size == VIRTIO_SCSI_SENSE_SIZE, + "Config: sense size mismatch"); + IPL_assert(vdev->config.scsi.cdb_size == VIRTIO_SCSI_CDB_SIZE, + "Config: CDB size mismatch"); + + sclp_print("Using virtio-scsi.\n"); + + return virtio_scsi_setup(vdev); +} diff --git a/pc-bios/s390-ccw/virtio-scsi.h b/pc-bios/s390-ccw/virtio-scsi.h index 4b14c2c2f9..e6b6cd4815 100644 --- a/pc-bios/s390-ccw/virtio-scsi.h +++ b/pc-bios/s390-ccw/virtio-scsi.h @@ -67,8 +67,8 @@ static inline bool virtio_scsi_response_ok(const VirtioScsiCmdResp *r) return r->response == VIRTIO_SCSI_S_OK && r->status == CDB_STATUS_GOOD; } -int virtio_scsi_setup(VDev *vdev); int virtio_scsi_read_many(VDev *vdev, ulong sector, void *load_addr, int sec_num); +int virtio_scsi_setup_device(SubChannelId schid); #endif /* VIRTIO_SCSI_H */ diff --git a/pc-bios/s390-ccw/virtio.c b/pc-bios/s390-ccw/virtio.c index 5d2c6e3381..f37510f312 100644 --- a/pc-bios/s390-ccw/virtio.c +++ b/pc-bios/s390-ccw/virtio.c @@ -220,7 +220,7 @@ int virtio_run(VDev *vdev, int vqid, VirtioCmd *cmd) void virtio_setup_ccw(VDev *vdev) { int i, rc, cfg_size = 0; - unsigned char status = VIRTIO_CONFIG_S_DRIVER_OK; + uint8_t status; struct VirtioFeatureDesc { uint32_t features; uint8_t index; @@ -234,6 +234,10 @@ void virtio_setup_ccw(VDev *vdev) run_ccw(vdev, CCW_CMD_VDEV_RESET, NULL, 0, false); + status = VIRTIO_CONFIG_S_ACKNOWLEDGE; + rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false); + IPL_assert(rc == 0, "Could not write ACKNOWLEDGE status to host"); + switch (vdev->senseid.cu_model) { case VIRTIO_ID_NET: vdev->nr_vqs = 2; @@ -253,9 +257,10 @@ void virtio_setup_ccw(VDev *vdev) default: panic("Unsupported virtio device\n"); } - IPL_assert( - run_ccw(vdev, CCW_CMD_READ_CONF, &vdev->config, cfg_size, false) == 0, - "Could not get block device configuration"); + + status |= VIRTIO_CONFIG_S_DRIVER; + rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false); + IPL_assert(rc == 0, "Could not write DRIVER status to host"); /* Feature negotiation */ for (i = 0; i < ARRAY_SIZE(vdev->guest_features); i++) { @@ -269,6 +274,9 @@ void virtio_setup_ccw(VDev *vdev) IPL_assert(rc == 0, "Could not set features bits"); } + rc = run_ccw(vdev, CCW_CMD_READ_CONF, &vdev->config, cfg_size, false); + IPL_assert(rc == 0, "Could not get virtio device configuration"); + for (i = 0; i < vdev->nr_vqs; i++) { VqInfo info = { .queue = (unsigned long long) ring_area + (i * VIRTIO_RING_SIZE), @@ -281,9 +289,8 @@ void virtio_setup_ccw(VDev *vdev) .num = 0, }; - IPL_assert( - run_ccw(vdev, CCW_CMD_READ_VQ_CONF, &config, sizeof(config), false) == 0, - "Could not get block device VQ configuration"); + rc = run_ccw(vdev, CCW_CMD_READ_VQ_CONF, &config, sizeof(config), false); + IPL_assert(rc == 0, "Could not get virtio device VQ configuration"); info.num = config.num; vring_init(&vdev->vrings[i], &info); vdev->vrings[i].schid = vdev->schid; @@ -291,9 +298,10 @@ void virtio_setup_ccw(VDev *vdev) run_ccw(vdev, CCW_CMD_SET_VQ, &info, sizeof(info), false) == 0, "Cannot set VQ info"); } - IPL_assert( - run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false) == 0, - "Could not write status to host"); + + status |= VIRTIO_CONFIG_S_DRIVER_OK; + rc = run_ccw(vdev, CCW_CMD_WRITE_STATUS, &status, sizeof(status), false); + IPL_assert(rc == 0, "Could not write DRIVER_OK status to host"); } bool virtio_is_supported(SubChannelId schid) diff --git a/pc-bios/s390-ccw/virtio.h b/pc-bios/s390-ccw/virtio.h index 19fceb6495..e657d381ec 100644 --- a/pc-bios/s390-ccw/virtio.h +++ b/pc-bios/s390-ccw/virtio.h @@ -182,22 +182,20 @@ enum guessed_disk_nature_type { typedef enum guessed_disk_nature_type VirtioGDN; VirtioGDN virtio_guessed_disk_nature(void); -void virtio_assume_scsi(void); void virtio_assume_eckd(void); void virtio_assume_iso9660(void); -extern bool virtio_disk_is_scsi(void); -extern bool virtio_disk_is_eckd(void); -extern bool virtio_ipl_disk_is_valid(void); -extern int virtio_get_block_size(void); -extern uint8_t virtio_get_heads(void); -extern uint8_t virtio_get_sectors(void); -extern uint64_t virtio_get_blocks(void); -extern int virtio_read_many(ulong sector, void *load_addr, int sec_num); +bool virtio_ipl_disk_is_valid(void); +int virtio_get_block_size(void); +uint8_t virtio_get_heads(void); +uint8_t virtio_get_sectors(void); +uint64_t virtio_get_blocks(void); +int virtio_read_many(ulong sector, void *load_addr, int sec_num); #define VIRTIO_SECTOR_SIZE 512 #define VIRTIO_ISO_BLOCK_SIZE 2048 #define VIRTIO_SCSI_BLOCK_SIZE 512 +#define VIRTIO_DASD_DEFAULT_BLOCK_SIZE 4096 static inline ulong virtio_sector_adjust(ulong sector) { diff --git a/pc-bios/s390-netboot.img b/pc-bios/s390-netboot.img Binary files differindex bc34af8a28..682da24a05 100644 --- a/pc-bios/s390-netboot.img +++ b/pc-bios/s390-netboot.img diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h index d30758ee71..7e78c2c05c 100644 --- a/target/arm/cpregs.h +++ b/target/arm/cpregs.h @@ -442,6 +442,9 @@ void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, /* CPReadFn that can be used for read-as-zero behaviour */ uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri); +/* CPWriteFn that just writes the value to ri->fieldoffset */ +void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value); + /* * CPResetFn that does nothing, for use if no reset is required even * if fieldoffset is non zero. diff --git a/target/arm/cpu.c b/target/arm/cpu.c index ae6dca2f01..5de7e097e9 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -204,13 +204,23 @@ static void arm_cpu_reset(DeviceState *dev) /* and to the FP/Neon instructions */ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, CPACR_EL1, FPEN, 3); - /* and to the SVE instructions */ - env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, - CPACR_EL1, ZEN, 3); - /* with reasonable vector length */ + /* and to the SVE instructions, with default vector length */ if (cpu_isar_feature(aa64_sve, cpu)) { + env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, + CPACR_EL1, ZEN, 3); env->vfp.zcr_el[1] = cpu->sve_default_vq - 1; } + /* and for SME instructions, with default vector length, and TPIDR2 */ + if (cpu_isar_feature(aa64_sme, cpu)) { + env->cp15.sctlr_el[1] |= SCTLR_EnTP2; + env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1, + CPACR_EL1, SMEN, 3); + env->vfp.smcr_el[1] = cpu->sme_default_vq - 1; + if (cpu_isar_feature(aa64_sme_fa64, cpu)) { + env->vfp.smcr_el[1] = FIELD_DP64(env->vfp.smcr_el[1], + SMCR, FA64, 1); + } + } /* * Enable 48-bit address space (TODO: take reserved_va into account). * Enable TBI0 but not TBI1. @@ -878,6 +888,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) int i; int el = arm_current_el(env); const char *ns_status; + bool sve; qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc); for (i = 0; i < 32; i++) { @@ -904,6 +915,12 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) el, psr & PSTATE_SP ? 'h' : 't'); + if (cpu_isar_feature(aa64_sme, cpu)) { + qemu_fprintf(f, " SVCR=%08" PRIx64 " %c%c", + env->svcr, + (FIELD_EX64(env->svcr, SVCR, ZA) ? 'Z' : '-'), + (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-')); + } if (cpu_isar_feature(aa64_bti, cpu)) { qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10); } @@ -918,7 +935,15 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) qemu_fprintf(f, " FPCR=%08x FPSR=%08x\n", vfp_get_fpcr(env), vfp_get_fpsr(env)); - if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) { + if (cpu_isar_feature(aa64_sme, cpu) && FIELD_EX64(env->svcr, SVCR, SM)) { + sve = sme_exception_el(env, el) == 0; + } else if (cpu_isar_feature(aa64_sve, cpu)) { + sve = sve_exception_el(env, el) == 0; + } else { + sve = false; + } + + if (sve) { int j, zcr_len = sve_vqm1_for_el(env, el); for (i = 0; i <= FFR_PRED_NUM; i++) { diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 4a4342f262..1e36a839ee 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -500,6 +500,7 @@ typedef struct CPUArchState { uint64_t dbgwcr[16]; /* watchpoint control registers */ uint64_t mdscr_el1; uint64_t oslsr_el1; /* OS Lock Status */ + uint64_t osdlr_el1; /* OS DoubleLock status */ uint64_t mdcr_el2; uint64_t mdcr_el3; /* Stores the architectural value of the counter *the last time it was @@ -988,6 +989,8 @@ struct ArchCPU { uint32_t mvfr2; uint32_t id_dfr0; uint32_t dbgdidr; + uint32_t dbgdevid; + uint32_t dbgdevid1; uint64_t id_aa64isar0; uint64_t id_aa64isar1; uint64_t id_aa64pfr0; @@ -2251,6 +2254,15 @@ FIELD(DBGDIDR, CTX_CMPS, 20, 4) FIELD(DBGDIDR, BRPS, 24, 4) FIELD(DBGDIDR, WRPS, 28, 4) +FIELD(DBGDEVID, PCSAMPLE, 0, 4) +FIELD(DBGDEVID, WPADDRMASK, 4, 4) +FIELD(DBGDEVID, BPADDRMASK, 8, 4) +FIELD(DBGDEVID, VECTORCATCH, 12, 4) +FIELD(DBGDEVID, VIRTEXTNS, 16, 4) +FIELD(DBGDEVID, DOUBLELOCK, 20, 4) +FIELD(DBGDEVID, AUXREGS, 24, 4) +FIELD(DBGDEVID, CIDMASK, 28, 4) + FIELD(MVFR0, SIMDREG, 0, 4) FIELD(MVFR0, FPSP, 4, 4) FIELD(MVFR0, FPDP, 8, 4) @@ -3146,6 +3158,11 @@ FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1) * the same thing as the current security state of the processor! */ FIELD(TBFLAG_A32, NS, 10, 1) +/* + * Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. + * This requires an SME trap from AArch32 mode when using NEON. + */ +FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1) /* * Bit usage when in AArch32 state, for M-profile only. @@ -3183,6 +3200,8 @@ FIELD(TBFLAG_A64, SMEEXC_EL, 20, 2) FIELD(TBFLAG_A64, PSTATE_SM, 22, 1) FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1) FIELD(TBFLAG_A64, SVL, 24, 4) +/* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */ +FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1) /* * Helpers for using the above. @@ -3719,11 +3738,21 @@ static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id) return FIELD_EX32(id->id_pfr2, ID_PFR2, SSBS) != 0; } +static inline bool isar_feature_aa32_debugv7p1(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 5; +} + static inline bool isar_feature_aa32_debugv8p2(const ARMISARegisters *id) { return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 8; } +static inline bool isar_feature_aa32_doublelock(const ARMISARegisters *id) +{ + return FIELD_EX32(id->dbgdevid, DBGDEVID, DOUBLELOCK) > 0; +} + /* * 64-bit feature tests via id registers. */ @@ -4148,6 +4177,11 @@ static inline bool isar_feature_aa64_sme_fa64(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, FA64); } +static inline bool isar_feature_aa64_doublelock(const ARMISARegisters *id) +{ + return FIELD_SEX64(id->id_aa64dfr0, ID_AA64DFR0, DOUBLELOCK) >= 0; +} + /* * Feature tests for "does this exist in either 32-bit or 64-bit?" */ diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 19188d6cc2..78e27f778a 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -79,6 +79,8 @@ static void aarch64_a57_initfn(Object *obj) cpu->isar.id_aa64isar0 = 0x00011120; cpu->isar.id_aa64mmfr0 = 0x00001124; cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.dbgdevid = 0x01110f13; + cpu->isar.dbgdevid1 = 0x2; cpu->isar.reset_pmcr_el0 = 0x41013000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ @@ -134,6 +136,8 @@ static void aarch64_a53_initfn(Object *obj) cpu->isar.id_aa64isar0 = 0x00011120; cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.dbgdevid = 0x00110f13; + cpu->isar.dbgdevid1 = 0x1; cpu->isar.reset_pmcr_el0 = 0x41033000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ @@ -187,6 +191,8 @@ static void aarch64_a72_initfn(Object *obj) cpu->isar.id_aa64isar0 = 0x00011120; cpu->isar.id_aa64mmfr0 = 0x00001124; cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.dbgdevid = 0x01110f13; + cpu->isar.dbgdevid1 = 0x2; cpu->isar.reset_pmcr_el0 = 0x41023000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ @@ -1018,6 +1024,7 @@ static void aarch64_max_initfn(Object *obj) */ t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */ t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */ + t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */ t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_2 */ cpu->isar.id_aa64pfr1 = t; @@ -1068,6 +1075,16 @@ static void aarch64_max_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */ cpu->isar.id_aa64dfr0 = t; + t = cpu->isar.id_aa64smfr0; + t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */ + t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */ + t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */ + t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */ + cpu->isar.id_aa64smfr0 = t; + /* Replicate the same data to the 32-bit id registers. */ aa32_max_features(cpu); diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c index b751a19c8a..3099b38e32 100644 --- a/target/arm/cpu_tcg.c +++ b/target/arm/cpu_tcg.c @@ -563,6 +563,8 @@ static void cortex_a7_initfn(Object *obj) cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x10011142; cpu->isar.dbgdidr = 0x3515f005; + cpu->isar.dbgdevid = 0x01110f13; + cpu->isar.dbgdevid1 = 0x1; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ @@ -606,6 +608,8 @@ static void cortex_a15_initfn(Object *obj) cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x10011142; cpu->isar.dbgdidr = 0x3515f021; + cpu->isar.dbgdevid = 0x01110f13; + cpu->isar.dbgdevid1 = 0x0; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ @@ -1098,6 +1102,8 @@ static void arm_max_initfn(Object *obj) cpu->isar.id_isar5 = 0x00011121; cpu->isar.id_isar6 = 0; cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.dbgdevid = 0x00110f13; + cpu->isar.dbgdevid1 = 0x2; cpu->isar.reset_pmcr_el0 = 0x41013000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index b18a6bd3a2..d09fccb0a4 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -6,8 +6,10 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "internals.h" +#include "cpregs.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" @@ -140,6 +142,9 @@ static bool aa32_generate_debug_exceptions(CPUARMState *env) */ bool arm_generate_debug_exceptions(CPUARMState *env) { + if ((env->cp15.oslsr_el1 & 1) || (env->cp15.osdlr_el1 & 1)) { + return false; + } if (is_a64(env)) { return aa64_generate_debug_exceptions(env); } else { @@ -528,6 +533,581 @@ void HELPER(exception_swstep)(CPUARMState *env, uint32_t syndrome) raise_exception_debug(env, EXCP_UDEF, syndrome); } +/* + * Check for traps to "powerdown debug" registers, which are controlled + * by MDCR.TDOSA + */ +static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); + bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || + (arm_hcr_el2_eff(env) & HCR_TGE); + + if (el < 2 && mdcr_el2_tdosa) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +/* + * Check for traps to "debug ROM" registers, which are controlled + * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. + */ +static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); + bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || + (arm_hcr_el2_eff(env) & HCR_TGE); + + if (el < 2 && mdcr_el2_tdra) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +/* + * Check for traps to general debug registers, which are controlled + * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. + */ +static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); + bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || + (arm_hcr_el2_eff(env) & HCR_TGE); + + if (el < 2 && mdcr_el2_tda) { + return CP_ACCESS_TRAP_EL2; + } + if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Writes to OSLAR_EL1 may update the OS lock status, which can be + * read via a bit in OSLSR_EL1. + */ + int oslock; + + if (ri->state == ARM_CP_STATE_AA32) { + oslock = (value == 0xC5ACCE55); + } else { + oslock = value & 1; + } + + env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); +} + +static void osdlr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + /* + * Only defined bit is bit 0 (DLK); if Feat_DoubleLock is not + * implemented this is RAZ/WI. + */ + if(arm_feature(env, ARM_FEATURE_AARCH64) + ? cpu_isar_feature(aa64_doublelock, cpu) + : cpu_isar_feature(aa32_doublelock, cpu)) { + env->cp15.osdlr_el1 = value & 1; + } +} + +static const ARMCPRegInfo debug_cp_reginfo[] = { + /* + * DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped + * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; + * unlike DBGDRAR it is never accessible from EL0. + * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 + * accessor. + */ + { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL0_R, .accessfn = access_tdra, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, + .access = PL1_R, .accessfn = access_tdra, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, + .access = PL0_R, .accessfn = access_tdra, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ + { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), + .resetvalue = 0 }, + /* + * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external + * Debug Communication Channel is not implemented. + */ + { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, + .access = PL0_R, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* + * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as + * it is unlikely a guest will care. + * We don't implement the configurable EL0 access. + */ + { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, + .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, + .type = ARM_CP_ALIAS, + .access = PL1_R, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, + { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, + .access = PL1_W, .type = ARM_CP_NO_RAW, + .accessfn = access_tdosa, + .writefn = oslar_write }, + { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, + .access = PL1_R, .resetvalue = 10, + .accessfn = access_tdosa, + .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, + /* Dummy OSDLR_EL1: 32-bit Linux will read this */ + { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, + .access = PL1_RW, .accessfn = access_tdosa, + .writefn = osdlr_write, + .fieldoffset = offsetof(CPUARMState, cp15.osdlr_el1) }, + /* + * Dummy DBGVCR: Linux wants to clear this on startup, but we don't + * implement vector catch debug events yet. + */ + { .name = "DBGVCR", + .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tda, + .type = ARM_CP_NOP }, + /* + * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor + * to save and restore a 32-bit guest's DBGVCR) + */ + { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, + .access = PL2_RW, .accessfn = access_tda, + .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, + /* + * Dummy MDCCINT_EL1, since we don't implement the Debug Communications + * Channel but Linux may try to access this register. The 32-bit + * alias is DBGDCCINT. + */ + { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, + .access = PL1_RW, .accessfn = access_tda, + .type = ARM_CP_NOP }, +}; + +static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { + /* 64 bit access versions of the (dummy) debug registers */ + { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, + .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, + { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, + .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, +}; + +void hw_watchpoint_update(ARMCPU *cpu, int n) +{ + CPUARMState *env = &cpu->env; + vaddr len = 0; + vaddr wvr = env->cp15.dbgwvr[n]; + uint64_t wcr = env->cp15.dbgwcr[n]; + int mask; + int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; + + if (env->cpu_watchpoint[n]) { + cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); + env->cpu_watchpoint[n] = NULL; + } + + if (!FIELD_EX64(wcr, DBGWCR, E)) { + /* E bit clear : watchpoint disabled */ + return; + } + + switch (FIELD_EX64(wcr, DBGWCR, LSC)) { + case 0: + /* LSC 00 is reserved and must behave as if the wp is disabled */ + return; + case 1: + flags |= BP_MEM_READ; + break; + case 2: + flags |= BP_MEM_WRITE; + break; + case 3: + flags |= BP_MEM_ACCESS; + break; + } + + /* + * Attempts to use both MASK and BAS fields simultaneously are + * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, + * thus generating a watchpoint for every byte in the masked region. + */ + mask = FIELD_EX64(wcr, DBGWCR, MASK); + if (mask == 1 || mask == 2) { + /* + * Reserved values of MASK; we must act as if the mask value was + * some non-reserved value, or as if the watchpoint were disabled. + * We choose the latter. + */ + return; + } else if (mask) { + /* Watchpoint covers an aligned area up to 2GB in size */ + len = 1ULL << mask; + /* + * If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE + * whether the watchpoint fires when the unmasked bits match; we opt + * to generate the exceptions. + */ + wvr &= ~(len - 1); + } else { + /* Watchpoint covers bytes defined by the byte address select bits */ + int bas = FIELD_EX64(wcr, DBGWCR, BAS); + int basstart; + + if (extract64(wvr, 2, 1)) { + /* + * Deprecated case of an only 4-aligned address. BAS[7:4] are + * ignored, and BAS[3:0] define which bytes to watch. + */ + bas &= 0xf; + } + + if (bas == 0) { + /* This must act as if the watchpoint is disabled */ + return; + } + + /* + * The BAS bits are supposed to be programmed to indicate a contiguous + * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether + * we fire for each byte in the word/doubleword addressed by the WVR. + * We choose to ignore any non-zero bits after the first range of 1s. + */ + basstart = ctz32(bas); + len = cto32(bas >> basstart); + wvr += basstart; + } + + cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, + &env->cpu_watchpoint[n]); +} + +void hw_watchpoint_update_all(ARMCPU *cpu) +{ + int i; + CPUARMState *env = &cpu->env; + + /* + * Completely clear out existing QEMU watchpoints and our array, to + * avoid possible stale entries following migration load. + */ + cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); + memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); + + for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { + hw_watchpoint_update(cpu, i); + } +} + +static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + /* + * Bits [1:0] are RES0. + * + * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA) + * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if + * they contain the value written. It is CONSTRAINED UNPREDICTABLE + * whether the RESS bits are ignored when comparing an address. + * + * Therefore we are allowed to compare the entire register, which lets + * us avoid considering whether or not FEAT_LVA is actually enabled. + */ + value &= ~3ULL; + + raw_write(env, ri, value); + hw_watchpoint_update(cpu, i); +} + +static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + raw_write(env, ri, value); + hw_watchpoint_update(cpu, i); +} + +void hw_breakpoint_update(ARMCPU *cpu, int n) +{ + CPUARMState *env = &cpu->env; + uint64_t bvr = env->cp15.dbgbvr[n]; + uint64_t bcr = env->cp15.dbgbcr[n]; + vaddr addr; + int bt; + int flags = BP_CPU; + + if (env->cpu_breakpoint[n]) { + cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); + env->cpu_breakpoint[n] = NULL; + } + + if (!extract64(bcr, 0, 1)) { + /* E bit clear : watchpoint disabled */ + return; + } + + bt = extract64(bcr, 20, 4); + + switch (bt) { + case 4: /* unlinked address mismatch (reserved if AArch64) */ + case 5: /* linked address mismatch (reserved if AArch64) */ + qemu_log_mask(LOG_UNIMP, + "arm: address mismatch breakpoint types not implemented\n"); + return; + case 0: /* unlinked address match */ + case 1: /* linked address match */ + { + /* + * Bits [1:0] are RES0. + * + * It is IMPLEMENTATION DEFINED whether bits [63:49] + * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit + * of the VA field ([48] or [52] for FEAT_LVA), or whether the + * value is read as written. It is CONSTRAINED UNPREDICTABLE + * whether the RESS bits are ignored when comparing an address. + * Therefore we are allowed to compare the entire register, which + * lets us avoid considering whether FEAT_LVA is actually enabled. + * + * The BAS field is used to allow setting breakpoints on 16-bit + * wide instructions; it is CONSTRAINED UNPREDICTABLE whether + * a bp will fire if the addresses covered by the bp and the addresses + * covered by the insn overlap but the insn doesn't start at the + * start of the bp address range. We choose to require the insn and + * the bp to have the same address. The constraints on writing to + * BAS enforced in dbgbcr_write mean we have only four cases: + * 0b0000 => no breakpoint + * 0b0011 => breakpoint on addr + * 0b1100 => breakpoint on addr + 2 + * 0b1111 => breakpoint on addr + * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). + */ + int bas = extract64(bcr, 5, 4); + addr = bvr & ~3ULL; + if (bas == 0) { + return; + } + if (bas == 0xc) { + addr += 2; + } + break; + } + case 2: /* unlinked context ID match */ + case 8: /* unlinked VMID match (reserved if no EL2) */ + case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ + qemu_log_mask(LOG_UNIMP, + "arm: unlinked context breakpoint types not implemented\n"); + return; + case 9: /* linked VMID match (reserved if no EL2) */ + case 11: /* linked context ID and VMID match (reserved if no EL2) */ + case 3: /* linked context ID match */ + default: + /* + * We must generate no events for Linked context matches (unless + * they are linked to by some other bp/wp, which is handled in + * updates for the linking bp/wp). We choose to also generate no events + * for reserved values. + */ + return; + } + + cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); +} + +void hw_breakpoint_update_all(ARMCPU *cpu) +{ + int i; + CPUARMState *env = &cpu->env; + + /* + * Completely clear out existing QEMU breakpoints and our array, to + * avoid possible stale entries following migration load. + */ + cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); + memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); + + for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { + hw_breakpoint_update(cpu, i); + } +} + +static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + raw_write(env, ri, value); + hw_breakpoint_update(cpu, i); +} + +static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + int i = ri->crm; + + /* + * BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only + * copy of BAS[0]. + */ + value = deposit64(value, 6, 1, extract64(value, 5, 1)); + value = deposit64(value, 8, 1, extract64(value, 7, 1)); + + raw_write(env, ri, value); + hw_breakpoint_update(cpu, i); +} + +void define_debug_regs(ARMCPU *cpu) +{ + /* + * Define v7 and v8 architectural debug registers. + * These are just dummy implementations for now. + */ + int i; + int wrps, brps, ctx_cmps; + + /* + * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot + * use AArch32. Given that bit 15 is RES1, if the value is 0 then + * the register must not exist for this cpu. + */ + if (cpu->isar.dbgdidr != 0) { + ARMCPRegInfo dbgdidr = { + .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, + .opc1 = 0, .opc2 = 0, + .access = PL0_R, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, + }; + define_one_arm_cp_reg(cpu, &dbgdidr); + } + + /* + * DBGDEVID is present in the v7 debug architecture if + * DBGDIDR.DEVID_imp is 1 (bit 15); from v7.1 and on it is + * mandatory (and bit 15 is RES1). DBGDEVID1 and DBGDEVID2 exist + * from v7.1 of the debug architecture. Because no fields have yet + * been defined in DBGDEVID2 (and quite possibly none will ever + * be) we don't define an ARMISARegisters field for it. + * These registers exist only if EL1 can use AArch32, but that + * happens naturally because they are only PL1 accessible anyway. + */ + if (extract32(cpu->isar.dbgdidr, 15, 1)) { + ARMCPRegInfo dbgdevid = { + .name = "DBGDEVID", + .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 2, .crn = 7, + .access = PL1_R, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid, + }; + define_one_arm_cp_reg(cpu, &dbgdevid); + } + if (cpu_isar_feature(aa32_debugv7p1, cpu)) { + ARMCPRegInfo dbgdevid12[] = { + { + .name = "DBGDEVID1", + .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 1, .crn = 7, + .access = PL1_R, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdevid1, + }, { + .name = "DBGDEVID2", + .cp = 14, .opc1 = 0, .crn = 7, .opc2 = 0, .crn = 7, + .access = PL1_R, .accessfn = access_tda, + .type = ARM_CP_CONST, .resetvalue = 0, + }, + }; + define_arm_cp_regs(cpu, dbgdevid12); + } + + brps = arm_num_brps(cpu); + wrps = arm_num_wrps(cpu); + ctx_cmps = arm_num_ctx_cmps(cpu); + + assert(ctx_cmps <= brps); + + define_arm_cp_regs(cpu, debug_cp_reginfo); + + if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { + define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); + } + + for (i = 0; i < brps; i++) { + char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i); + char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i); + ARMCPRegInfo dbgregs[] = { + { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), + .writefn = dbgbvr_write, .raw_writefn = raw_write + }, + { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), + .writefn = dbgbcr_write, .raw_writefn = raw_write + }, + }; + define_arm_cp_regs(cpu, dbgregs); + g_free(dbgbvr_el1_name); + g_free(dbgbcr_el1_name); + } + + for (i = 0; i < wrps; i++) { + char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i); + char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i); + ARMCPRegInfo dbgregs[] = { + { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), + .writefn = dbgwvr_write, .raw_writefn = raw_write + }, + { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH, + .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, + .access = PL1_RW, .accessfn = access_tda, + .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), + .writefn = dbgwcr_write, .raw_writefn = raw_write + }, + }; + define_arm_cp_regs(cpu, dbgregs); + g_free(dbgwvr_el1_name); + g_free(dbgwcr_el1_name); + } +} + #if !defined(CONFIG_USER_ONLY) vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len) diff --git a/target/arm/helper-sme.h b/target/arm/helper-sme.h index 3bd48c235f..d2d544a696 100644 --- a/target/arm/helper-sme.h +++ b/target/arm/helper-sme.h @@ -19,3 +19,129 @@ DEF_HELPER_FLAGS_2(set_pstate_sm, TCG_CALL_NO_RWG, void, env, i32) DEF_HELPER_FLAGS_2(set_pstate_za, TCG_CALL_NO_RWG, void, env, i32) + +DEF_HELPER_FLAGS_3(sme_zero, TCG_CALL_NO_RWG, void, env, i32, i32) + +/* Move to/from vertical array slices, i.e. columns, so 'c'. */ +DEF_HELPER_FLAGS_4(sme_mova_cz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_cz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_cz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_bfmopa, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_sumopa_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_usmopa_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_smopa_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_umopa_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_sumopa_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_6(sme_usmopa_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, i32) diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h index dc629f851a..cc4e1d8948 100644 --- a/target/arm/helper-sve.h +++ b/target/arm/helper-sve.h @@ -325,6 +325,8 @@ DEF_HELPER_FLAGS_5(sve_sel_zpzz_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve_sel_zpzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(sve_sel_zpzz_q, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sve2_addp_zpzz_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) @@ -717,6 +719,8 @@ DEF_HELPER_FLAGS_4(sve_revh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_revw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(sme_revd_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + DEF_HELPER_FLAGS_4(sve_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_rbit_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve_rbit_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) diff --git a/target/arm/helper.c b/target/arm/helper.c index f6dcb1a115..cfcad97ce0 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -51,8 +51,7 @@ static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) } } -static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) +void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { assert(ri->fieldoffset); if (cpreg_field_is_64bit(ri)) { @@ -302,71 +301,6 @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, return CP_ACCESS_TRAP_UNCATEGORIZED; } -static uint64_t arm_mdcr_el2_eff(CPUARMState *env) -{ - return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; -} - -/* Check for traps to "powerdown debug" registers, which are controlled - * by MDCR.TDOSA - */ -static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - int el = arm_current_el(env); - uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); - bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || - (arm_hcr_el2_eff(env) & HCR_TGE); - - if (el < 2 && mdcr_el2_tdosa) { - return CP_ACCESS_TRAP_EL2; - } - if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { - return CP_ACCESS_TRAP_EL3; - } - return CP_ACCESS_OK; -} - -/* Check for traps to "debug ROM" registers, which are controlled - * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. - */ -static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - int el = arm_current_el(env); - uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); - bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || - (arm_hcr_el2_eff(env) & HCR_TGE); - - if (el < 2 && mdcr_el2_tdra) { - return CP_ACCESS_TRAP_EL2; - } - if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { - return CP_ACCESS_TRAP_EL3; - } - return CP_ACCESS_OK; -} - -/* Check for traps to general debug registers, which are controlled - * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. - */ -static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) -{ - int el = arm_current_el(env); - uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); - bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || - (arm_hcr_el2_eff(env) & HCR_TGE); - - if (el < 2 && mdcr_el2_tda) { - return CP_ACCESS_TRAP_EL2; - } - if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { - return CP_ACCESS_TRAP_EL3; - } - return CP_ACCESS_OK; -} - /* Check for traps to performance monitor registers, which are controlled * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. */ @@ -5979,111 +5913,6 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } -static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Writes to OSLAR_EL1 may update the OS lock status, which can be - * read via a bit in OSLSR_EL1. - */ - int oslock; - - if (ri->state == ARM_CP_STATE_AA32) { - oslock = (value == 0xC5ACCE55); - } else { - oslock = value & 1; - } - - env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); -} - -static const ARMCPRegInfo debug_cp_reginfo[] = { - /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped - * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; - * unlike DBGDRAR it is never accessible from EL0. - * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 - * accessor. - */ - { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, - .access = PL0_R, .accessfn = access_tdra, - .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, - .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, - .access = PL1_R, .accessfn = access_tdra, - .type = ARM_CP_CONST, .resetvalue = 0 }, - { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, - .access = PL0_R, .accessfn = access_tdra, - .type = ARM_CP_CONST, .resetvalue = 0 }, - /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ - { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, - .access = PL1_RW, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), - .resetvalue = 0 }, - /* - * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external - * Debug Communication Channel is not implemented. - */ - { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, - .access = PL0_R, .accessfn = access_tda, - .type = ARM_CP_CONST, .resetvalue = 0 }, - /* - * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as - * it is unlikely a guest will care. - * We don't implement the configurable EL0 access. - */ - { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, - .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, - .type = ARM_CP_ALIAS, - .access = PL1_R, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, - { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, - .access = PL1_W, .type = ARM_CP_NO_RAW, - .accessfn = access_tdosa, - .writefn = oslar_write }, - { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, - .access = PL1_R, .resetvalue = 10, - .accessfn = access_tdosa, - .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, - /* Dummy OSDLR_EL1: 32-bit Linux will read this */ - { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, - .access = PL1_RW, .accessfn = access_tdosa, - .type = ARM_CP_NOP }, - /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't - * implement vector catch debug events yet. - */ - { .name = "DBGVCR", - .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tda, - .type = ARM_CP_NOP }, - /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor - * to save and restore a 32-bit guest's DBGVCR) - */ - { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, - .access = PL2_RW, .accessfn = access_tda, - .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, - /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications - * Channel but Linux may try to access this register. The 32-bit - * alias is DBGDCCINT. - */ - { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, - .access = PL1_RW, .accessfn = access_tda, - .type = ARM_CP_NOP }, -}; - -static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { - /* 64 bit access versions of the (dummy) debug registers */ - { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, - .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, - { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, - .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, -}; - /* * Check for traps to RAS registers, which are controlled * by HCR_EL2.TERR and SCR_EL3.TERR. @@ -6269,6 +6098,32 @@ int sme_exception_el(CPUARMState *env, int el) return 0; } +/* This corresponds to the ARM pseudocode function IsFullA64Enabled(). */ +static bool sme_fa64(CPUARMState *env, int el) +{ + if (!cpu_isar_feature(aa64_sme_fa64, env_archcpu(env))) { + return false; + } + + if (el <= 1 && !el_is_in_host(env, el)) { + if (!FIELD_EX64(env->vfp.smcr_el[1], SMCR, FA64)) { + return false; + } + } + if (el <= 2 && arm_is_el2_enabled(env)) { + if (!FIELD_EX64(env->vfp.smcr_el[2], SMCR, FA64)) { + return false; + } + } + if (arm_feature(env, ARM_FEATURE_EL3)) { + if (!FIELD_EX64(env->vfp.smcr_el[3], SMCR, FA64)) { + return false; + } + } + + return true; +} + /* * Given that SVE is enabled, return the vector length for EL. */ @@ -6462,346 +6317,6 @@ static const ARMCPRegInfo sme_reginfo[] = { }; #endif /* TARGET_AARCH64 */ -void hw_watchpoint_update(ARMCPU *cpu, int n) -{ - CPUARMState *env = &cpu->env; - vaddr len = 0; - vaddr wvr = env->cp15.dbgwvr[n]; - uint64_t wcr = env->cp15.dbgwcr[n]; - int mask; - int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; - - if (env->cpu_watchpoint[n]) { - cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); - env->cpu_watchpoint[n] = NULL; - } - - if (!FIELD_EX64(wcr, DBGWCR, E)) { - /* E bit clear : watchpoint disabled */ - return; - } - - switch (FIELD_EX64(wcr, DBGWCR, LSC)) { - case 0: - /* LSC 00 is reserved and must behave as if the wp is disabled */ - return; - case 1: - flags |= BP_MEM_READ; - break; - case 2: - flags |= BP_MEM_WRITE; - break; - case 3: - flags |= BP_MEM_ACCESS; - break; - } - - /* Attempts to use both MASK and BAS fields simultaneously are - * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, - * thus generating a watchpoint for every byte in the masked region. - */ - mask = FIELD_EX64(wcr, DBGWCR, MASK); - if (mask == 1 || mask == 2) { - /* Reserved values of MASK; we must act as if the mask value was - * some non-reserved value, or as if the watchpoint were disabled. - * We choose the latter. - */ - return; - } else if (mask) { - /* Watchpoint covers an aligned area up to 2GB in size */ - len = 1ULL << mask; - /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE - * whether the watchpoint fires when the unmasked bits match; we opt - * to generate the exceptions. - */ - wvr &= ~(len - 1); - } else { - /* Watchpoint covers bytes defined by the byte address select bits */ - int bas = FIELD_EX64(wcr, DBGWCR, BAS); - int basstart; - - if (extract64(wvr, 2, 1)) { - /* Deprecated case of an only 4-aligned address. BAS[7:4] are - * ignored, and BAS[3:0] define which bytes to watch. - */ - bas &= 0xf; - } - - if (bas == 0) { - /* This must act as if the watchpoint is disabled */ - return; - } - - /* The BAS bits are supposed to be programmed to indicate a contiguous - * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether - * we fire for each byte in the word/doubleword addressed by the WVR. - * We choose to ignore any non-zero bits after the first range of 1s. - */ - basstart = ctz32(bas); - len = cto32(bas >> basstart); - wvr += basstart; - } - - cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, - &env->cpu_watchpoint[n]); -} - -void hw_watchpoint_update_all(ARMCPU *cpu) -{ - int i; - CPUARMState *env = &cpu->env; - - /* Completely clear out existing QEMU watchpoints and our array, to - * avoid possible stale entries following migration load. - */ - cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); - memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); - - for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { - hw_watchpoint_update(cpu, i); - } -} - -static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - int i = ri->crm; - - /* - * Bits [1:0] are RES0. - * - * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA) - * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if - * they contain the value written. It is CONSTRAINED UNPREDICTABLE - * whether the RESS bits are ignored when comparing an address. - * - * Therefore we are allowed to compare the entire register, which lets - * us avoid considering whether or not FEAT_LVA is actually enabled. - */ - value &= ~3ULL; - - raw_write(env, ri, value); - hw_watchpoint_update(cpu, i); -} - -static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - int i = ri->crm; - - raw_write(env, ri, value); - hw_watchpoint_update(cpu, i); -} - -void hw_breakpoint_update(ARMCPU *cpu, int n) -{ - CPUARMState *env = &cpu->env; - uint64_t bvr = env->cp15.dbgbvr[n]; - uint64_t bcr = env->cp15.dbgbcr[n]; - vaddr addr; - int bt; - int flags = BP_CPU; - - if (env->cpu_breakpoint[n]) { - cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); - env->cpu_breakpoint[n] = NULL; - } - - if (!extract64(bcr, 0, 1)) { - /* E bit clear : watchpoint disabled */ - return; - } - - bt = extract64(bcr, 20, 4); - - switch (bt) { - case 4: /* unlinked address mismatch (reserved if AArch64) */ - case 5: /* linked address mismatch (reserved if AArch64) */ - qemu_log_mask(LOG_UNIMP, - "arm: address mismatch breakpoint types not implemented\n"); - return; - case 0: /* unlinked address match */ - case 1: /* linked address match */ - { - /* - * Bits [1:0] are RES0. - * - * It is IMPLEMENTATION DEFINED whether bits [63:49] - * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit - * of the VA field ([48] or [52] for FEAT_LVA), or whether the - * value is read as written. It is CONSTRAINED UNPREDICTABLE - * whether the RESS bits are ignored when comparing an address. - * Therefore we are allowed to compare the entire register, which - * lets us avoid considering whether FEAT_LVA is actually enabled. - * - * The BAS field is used to allow setting breakpoints on 16-bit - * wide instructions; it is CONSTRAINED UNPREDICTABLE whether - * a bp will fire if the addresses covered by the bp and the addresses - * covered by the insn overlap but the insn doesn't start at the - * start of the bp address range. We choose to require the insn and - * the bp to have the same address. The constraints on writing to - * BAS enforced in dbgbcr_write mean we have only four cases: - * 0b0000 => no breakpoint - * 0b0011 => breakpoint on addr - * 0b1100 => breakpoint on addr + 2 - * 0b1111 => breakpoint on addr - * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). - */ - int bas = extract64(bcr, 5, 4); - addr = bvr & ~3ULL; - if (bas == 0) { - return; - } - if (bas == 0xc) { - addr += 2; - } - break; - } - case 2: /* unlinked context ID match */ - case 8: /* unlinked VMID match (reserved if no EL2) */ - case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ - qemu_log_mask(LOG_UNIMP, - "arm: unlinked context breakpoint types not implemented\n"); - return; - case 9: /* linked VMID match (reserved if no EL2) */ - case 11: /* linked context ID and VMID match (reserved if no EL2) */ - case 3: /* linked context ID match */ - default: - /* We must generate no events for Linked context matches (unless - * they are linked to by some other bp/wp, which is handled in - * updates for the linking bp/wp). We choose to also generate no events - * for reserved values. - */ - return; - } - - cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); -} - -void hw_breakpoint_update_all(ARMCPU *cpu) -{ - int i; - CPUARMState *env = &cpu->env; - - /* Completely clear out existing QEMU breakpoints and our array, to - * avoid possible stale entries following migration load. - */ - cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); - memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); - - for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { - hw_breakpoint_update(cpu, i); - } -} - -static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - int i = ri->crm; - - raw_write(env, ri, value); - hw_breakpoint_update(cpu, i); -} - -static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - ARMCPU *cpu = env_archcpu(env); - int i = ri->crm; - - /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only - * copy of BAS[0]. - */ - value = deposit64(value, 6, 1, extract64(value, 5, 1)); - value = deposit64(value, 8, 1, extract64(value, 7, 1)); - - raw_write(env, ri, value); - hw_breakpoint_update(cpu, i); -} - -static void define_debug_regs(ARMCPU *cpu) -{ - /* Define v7 and v8 architectural debug registers. - * These are just dummy implementations for now. - */ - int i; - int wrps, brps, ctx_cmps; - - /* - * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot - * use AArch32. Given that bit 15 is RES1, if the value is 0 then - * the register must not exist for this cpu. - */ - if (cpu->isar.dbgdidr != 0) { - ARMCPRegInfo dbgdidr = { - .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, - .opc1 = 0, .opc2 = 0, - .access = PL0_R, .accessfn = access_tda, - .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, - }; - define_one_arm_cp_reg(cpu, &dbgdidr); - } - - brps = arm_num_brps(cpu); - wrps = arm_num_wrps(cpu); - ctx_cmps = arm_num_ctx_cmps(cpu); - - assert(ctx_cmps <= brps); - - define_arm_cp_regs(cpu, debug_cp_reginfo); - - if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { - define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); - } - - for (i = 0; i < brps; i++) { - char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i); - char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i); - ARMCPRegInfo dbgregs[] = { - { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, - .access = PL1_RW, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), - .writefn = dbgbvr_write, .raw_writefn = raw_write - }, - { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, - .access = PL1_RW, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), - .writefn = dbgbcr_write, .raw_writefn = raw_write - }, - }; - define_arm_cp_regs(cpu, dbgregs); - g_free(dbgbvr_el1_name); - g_free(dbgbcr_el1_name); - } - - for (i = 0; i < wrps; i++) { - char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i); - char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i); - ARMCPRegInfo dbgregs[] = { - { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, - .access = PL1_RW, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), - .writefn = dbgwvr_write, .raw_writefn = raw_write - }, - { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH, - .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, - .access = PL1_RW, .accessfn = access_tda, - .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), - .writefn = dbgwcr_write, .raw_writefn = raw_write - }, - }; - define_arm_cp_regs(cpu, dbgregs); - g_free(dbgwvr_el1_name); - g_free(dbgwcr_el1_name); - } -} - static void define_pmu_regs(ARMCPU *cpu) { /* @@ -11312,6 +10827,20 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, DP_TBFLAG_ANY(flags, PSTATE__IL, 1); } + /* + * The SME exception we are testing for is raised via + * AArch64.CheckFPAdvSIMDEnabled(), as called from + * AArch32.CheckAdvSIMDOrFPEnabled(). + */ + if (el == 0 + && FIELD_EX64(env->svcr, SVCR, SM) + && (!arm_is_el2_enabled(env) + || (arm_el_is_aa64(env, 2) && !(env->cp15.hcr_el2 & HCR_TGE))) + && arm_el_is_aa64(env, 1) + && !sme_fa64(env, el)) { + DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1); + } + return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); } @@ -11361,6 +10890,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, } if (FIELD_EX64(env->svcr, SVCR, SM)) { DP_TBFLAG_A64(flags, PSTATE_SM, 1); + DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el)); } DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA)); } @@ -11712,6 +11242,19 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, return; } + old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; + new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; + + /* + * Both AArch64.TakeException and AArch64.ExceptionReturn + * invoke ResetSVEState when taking an exception from, or + * returning to, AArch32 state when PSTATE.SM is enabled. + */ + if (old_a64 != new_a64 && FIELD_EX64(env->svcr, SVCR, SM)) { + arm_reset_sve_state(env); + return; + } + /* * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped * at ELx, or not available because the EL is in AArch32 state, then @@ -11724,10 +11267,8 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, * we already have the correct register contents when encountering the * vq0->vq0 transition between EL0->EL1. */ - old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; old_len = (old_a64 && !sve_exception_el(env, old_el) ? sve_vqm1_for_el(env, old_el) : 0); - new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; new_len = (new_a64 && !sve_exception_el(env, new_el) ? sve_vqm1_for_el(env, new_el) : 0); diff --git a/target/arm/helper.h b/target/arm/helper.h index 3a8ce42ab0..92f36d9dbb 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -1019,6 +1019,24 @@ DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + #ifdef TARGET_AARCH64 #include "helper-a64.h" #include "helper-sve.h" diff --git a/target/arm/internals.h b/target/arm/internals.h index c66f74a0db..00e2e710f6 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -1307,6 +1307,15 @@ int exception_target_el(CPUARMState *env); bool arm_singlestep_active(CPUARMState *env); bool arm_generate_debug_exceptions(CPUARMState *env); +/* Add the cpreg definitions for debug related system registers */ +void define_debug_regs(ARMCPU *cpu); + +/* Effective value of MDCR_EL2 */ +static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) +{ + return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; +} + /* Powers of 2 for sve_vq_map et al. */ #define SVE_VQ_POW2_MAP \ ((1 << (1 - 1)) | (1 << (2 - 1)) | \ diff --git a/target/arm/meson.build b/target/arm/meson.build index 43dc600547..87e911b27f 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -1,5 +1,7 @@ gen = [ decodetree.process('sve.decode', extra_args: '--decode=disas_sve'), + decodetree.process('sme.decode', extra_args: '--decode=disas_sme'), + decodetree.process('sme-fa64.decode', extra_args: '--static-decode=disas_sme_fa64'), decodetree.process('neon-shared.decode', extra_args: '--decode=disas_neon_shared'), decodetree.process('neon-dp.decode', extra_args: '--decode=disas_neon_dp'), decodetree.process('neon-ls.decode', extra_args: '--decode=disas_neon_ls'), @@ -50,6 +52,7 @@ arm_ss.add(when: 'TARGET_AARCH64', if_true: files( 'sme_helper.c', 'translate-a64.c', 'translate-sve.c', + 'translate-sme.c', )) arm_softmmu_ss = ss.source_set() diff --git a/target/arm/ptw.c b/target/arm/ptw.c index da478104f0..e71fc1f429 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -1257,7 +1257,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, * clear the lower bits here before ORing in the low vaddr bits. */ page_size = (1ULL << ((stride * (4 - level)) + 3)); - descaddr &= ~(page_size - 1); + descaddr &= ~(hwaddr)(page_size - 1); descaddr |= (address & (page_size - 1)); /* Extract attributes from the descriptor */ attrs = extract64(descriptor, 2, 10) diff --git a/target/arm/sme-fa64.decode b/target/arm/sme-fa64.decode new file mode 100644 index 0000000000..47708ccc8d --- /dev/null +++ b/target/arm/sme-fa64.decode @@ -0,0 +1,60 @@ +# AArch64 SME allowed instruction decoding +# +# Copyright (c) 2022 Linaro, Ltd +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, see <http://www.gnu.org/licenses/>. + +# +# This file is processed by scripts/decodetree.py +# + +# These patterns are taken from Appendix E1.1 of DDI0616 A.a, +# Arm Architecture Reference Manual Supplement, +# The Scalable Matrix Extension (SME), for Armv9-A + +{ + [ + OK 0-00 1110 0000 0001 0010 11-- ---- ---- # SMOV W|Xd,Vn.B[0] + OK 0-00 1110 0000 0010 0010 11-- ---- ---- # SMOV W|Xd,Vn.H[0] + OK 0100 1110 0000 0100 0010 11-- ---- ---- # SMOV Xd,Vn.S[0] + OK 0000 1110 0000 0001 0011 11-- ---- ---- # UMOV Wd,Vn.B[0] + OK 0000 1110 0000 0010 0011 11-- ---- ---- # UMOV Wd,Vn.H[0] + OK 0000 1110 0000 0100 0011 11-- ---- ---- # UMOV Wd,Vn.S[0] + OK 0100 1110 0000 1000 0011 11-- ---- ---- # UMOV Xd,Vn.D[0] + ] + FAIL 0--0 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD vector operations +} + +{ + [ + OK 0101 1110 --1- ---- 11-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar) + OK 0101 1110 -10- ---- 00-1 11-- ---- ---- # FMULX/FRECPS/FRSQRTS (scalar, FP16) + OK 01-1 1110 1-10 0001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar) + OK 01-1 1110 1111 1001 11-1 10-- ---- ---- # FRECPE/FRSQRTE/FRECPX (scalar, FP16) + ] + FAIL 01-1 111- ---- ---- ---- ---- ---- ---- # Advanced SIMD single-element operations +} + +FAIL 0-00 110- ---- ---- ---- ---- ---- ---- # Advanced SIMD structure load/store +FAIL 1100 1110 ---- ---- ---- ---- ---- ---- # Advanced SIMD cryptography extensions +FAIL 0001 1110 0111 1110 0000 00-- ---- ---- # FJCVTZS + +# These are the "avoidance of doubt" final table of Illegal Advanced SIMD instructions +# We don't actually need to include these, as the default is OK. +# -001 111- ---- ---- ---- ---- ---- ---- # Scalar floating-point operations +# --10 110- ---- ---- ---- ---- ---- ---- # Load/store pair of FP registers +# --01 1100 ---- ---- ---- ---- ---- ---- # Load FP register (PC-relative literal) +# --11 1100 --0- ---- ---- ---- ---- ---- # Load/store FP register (unscaled imm) +# --11 1100 --1- ---- ---- ---- ---- --10 # Load/store FP register (register offset) +# --11 1101 ---- ---- ---- ---- ---- ---- # Load/store FP register (scaled imm) diff --git a/target/arm/sme.decode b/target/arm/sme.decode new file mode 100644 index 0000000000..628804e37a --- /dev/null +++ b/target/arm/sme.decode @@ -0,0 +1,88 @@ +# AArch64 SME instruction descriptions +# +# Copyright (c) 2022 Linaro, Ltd +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, see <http://www.gnu.org/licenses/>. + +# +# This file is processed by scripts/decodetree.py +# + +### SME Misc + +ZERO 11000000 00 001 00000000000 imm:8 + +### SME Move into/from Array + +%mova_rs 13:2 !function=plus_12 +&mova esz rs pg zr za_imm v:bool to_vec:bool + +MOVA 11000000 esz:2 00000 0 v:1 .. pg:3 zr:5 0 za_imm:4 \ + &mova to_vec=0 rs=%mova_rs +MOVA 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za_imm:4 \ + &mova to_vec=0 rs=%mova_rs esz=4 + +MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \ + &mova to_vec=1 rs=%mova_rs +MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \ + &mova to_vec=1 rs=%mova_rs esz=4 + +### SME Memory + +&ldst esz rs pg rn rm za_imm v:bool st:bool + +LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ + &ldst rs=%mova_rs +LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \ + &ldst esz=4 rs=%mova_rs + +&ldstr rv rn imm +@ldstr ....... ... . ...... .. ... rn:5 . imm:4 \ + &ldstr rv=%mova_rs + +LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr +STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr + +### SME Add Vector to Array + +&adda zad zn pm pn +@adda_32 ........ .. ..... . pm:3 pn:3 zn:5 ... zad:2 &adda +@adda_64 ........ .. ..... . pm:3 pn:3 zn:5 .. zad:3 &adda + +ADDHA_s 11000000 10 01000 0 ... ... ..... 000 .. @adda_32 +ADDVA_s 11000000 10 01000 1 ... ... ..... 000 .. @adda_32 +ADDHA_d 11000000 11 01000 0 ... ... ..... 00 ... @adda_64 +ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64 + +### SME Outer Product + +&op zad zn zm pm pn sub:bool +@op_32 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 .. zad:2 &op +@op_64 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 . zad:3 &op + +FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32 +FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64 + +BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32 +FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32 + +SMOPA_s 1010000 0 10 0 ..... ... ... ..... . 00 .. @op_32 +SUMOPA_s 1010000 0 10 1 ..... ... ... ..... . 00 .. @op_32 +USMOPA_s 1010000 1 10 0 ..... ... ... ..... . 00 .. @op_32 +UMOPA_s 1010000 1 10 1 ..... ... ... ..... . 00 .. @op_32 + +SMOPA_d 1010000 0 11 0 ..... ... ... ..... . 0 ... @op_64 +SUMOPA_d 1010000 0 11 1 ..... ... ... ..... . 0 ... @op_64 +USMOPA_d 1010000 1 11 0 ..... ... ... ..... . 0 ... @op_64 +UMOPA_d 1010000 1 11 1 ..... ... ... ..... . 0 ... @op_64 diff --git a/target/arm/sme_helper.c b/target/arm/sme_helper.c index b215725594..f891306bb9 100644 --- a/target/arm/sme_helper.c +++ b/target/arm/sme_helper.c @@ -20,7 +20,14 @@ #include "qemu/osdep.h" #include "cpu.h" #include "internals.h" +#include "tcg/tcg-gvec-desc.h" #include "exec/helper-proto.h" +#include "exec/cpu_ldst.h" +#include "exec/exec-all.h" +#include "qemu/int128.h" +#include "fpu/softfloat.h" +#include "vec_internal.h" +#include "sve_ldst_internal.h" /* ResetSVEState */ void arm_reset_sve_state(CPUARMState *env) @@ -59,3 +66,1136 @@ void helper_set_pstate_za(CPUARMState *env, uint32_t i) memset(env->zarray, 0, sizeof(env->zarray)); } } + +void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl) +{ + uint32_t i; + + /* + * Special case clearing the entire ZA space. + * This falls into the CONSTRAINED UNPREDICTABLE zeroing of any + * parts of the ZA storage outside of SVL. + */ + if (imm == 0xff) { + memset(env->zarray, 0, sizeof(env->zarray)); + return; + } + + /* + * Recall that ZAnH.D[m] is spread across ZA[n+8*m], + * so each row is discontiguous within ZA[]. + */ + for (i = 0; i < svl; i++) { + if (imm & (1 << (i % 8))) { + memset(&env->zarray[i], 0, svl); + } + } +} + + +/* + * When considering the ZA storage as an array of elements of + * type T, the index within that array of the Nth element of + * a vertical slice of a tile can be calculated like this, + * regardless of the size of type T. This is because the tiles + * are interleaved, so if type T is size N bytes then row 1 of + * the tile is N rows away from row 0. The division by N to + * convert a byte offset into an array index and the multiplication + * by N to convert from vslice-index-within-the-tile to + * the index within the ZA storage cancel out. + */ +#define tile_vslice_index(i) ((i) * sizeof(ARMVectorReg)) + +/* + * When doing byte arithmetic on the ZA storage, the element + * byteoff bytes away in a tile vertical slice is always this + * many bytes away in the ZA storage, regardless of the + * size of the tile element, assuming that byteoff is a multiple + * of the element size. Again this is because of the interleaving + * of the tiles. For instance if we have 1 byte per element then + * each row of the ZA storage has one byte of the vslice data, + * and (counting from 0) byte 8 goes in row 8 of the storage + * at offset (8 * row-size-in-bytes). + * If we have 8 bytes per element then each row of the ZA storage + * has 8 bytes of the data, but there are 8 interleaved tiles and + * so byte 8 of the data goes into row 1 of the tile, + * which is again row 8 of the storage, so the offset is still + * (8 * row-size-in-bytes). Similarly for other element sizes. + */ +#define tile_vslice_offset(byteoff) ((byteoff) * sizeof(ARMVectorReg)) + + +/* + * Move Zreg vector to ZArray column. + */ +#define DO_MOVA_C(NAME, TYPE, H) \ +void HELPER(NAME)(void *za, void *vn, void *vg, uint32_t desc) \ +{ \ + int i, oprsz = simd_oprsz(desc); \ + for (i = 0; i < oprsz; ) { \ + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ + do { \ + if (pg & 1) { \ + *(TYPE *)(za + tile_vslice_offset(i)) = *(TYPE *)(vn + H(i)); \ + } \ + i += sizeof(TYPE); \ + pg >>= sizeof(TYPE); \ + } while (i & 15); \ + } \ +} + +DO_MOVA_C(sme_mova_cz_b, uint8_t, H1) +DO_MOVA_C(sme_mova_cz_h, uint16_t, H1_2) +DO_MOVA_C(sme_mova_cz_s, uint32_t, H1_4) + +void HELPER(sme_mova_cz_d)(void *za, void *vn, void *vg, uint32_t desc) +{ + int i, oprsz = simd_oprsz(desc) / 8; + uint8_t *pg = vg; + uint64_t *n = vn; + uint64_t *a = za; + + for (i = 0; i < oprsz; i++) { + if (pg[H1(i)] & 1) { + a[tile_vslice_index(i)] = n[i]; + } + } +} + +void HELPER(sme_mova_cz_q)(void *za, void *vn, void *vg, uint32_t desc) +{ + int i, oprsz = simd_oprsz(desc) / 16; + uint16_t *pg = vg; + Int128 *n = vn; + Int128 *a = za; + + /* + * Int128 is used here simply to copy 16 bytes, and to simplify + * the address arithmetic. + */ + for (i = 0; i < oprsz; i++) { + if (pg[H2(i)] & 1) { + a[tile_vslice_index(i)] = n[i]; + } + } +} + +#undef DO_MOVA_C + +/* + * Move ZArray column to Zreg vector. + */ +#define DO_MOVA_Z(NAME, TYPE, H) \ +void HELPER(NAME)(void *vd, void *za, void *vg, uint32_t desc) \ +{ \ + int i, oprsz = simd_oprsz(desc); \ + for (i = 0; i < oprsz; ) { \ + uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ + do { \ + if (pg & 1) { \ + *(TYPE *)(vd + H(i)) = *(TYPE *)(za + tile_vslice_offset(i)); \ + } \ + i += sizeof(TYPE); \ + pg >>= sizeof(TYPE); \ + } while (i & 15); \ + } \ +} + +DO_MOVA_Z(sme_mova_zc_b, uint8_t, H1) +DO_MOVA_Z(sme_mova_zc_h, uint16_t, H1_2) +DO_MOVA_Z(sme_mova_zc_s, uint32_t, H1_4) + +void HELPER(sme_mova_zc_d)(void *vd, void *za, void *vg, uint32_t desc) +{ + int i, oprsz = simd_oprsz(desc) / 8; + uint8_t *pg = vg; + uint64_t *d = vd; + uint64_t *a = za; + + for (i = 0; i < oprsz; i++) { + if (pg[H1(i)] & 1) { + d[i] = a[tile_vslice_index(i)]; + } + } +} + +void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc) +{ + int i, oprsz = simd_oprsz(desc) / 16; + uint16_t *pg = vg; + Int128 *d = vd; + Int128 *a = za; + + /* + * Int128 is used here simply to copy 16 bytes, and to simplify + * the address arithmetic. + */ + for (i = 0; i < oprsz; i++, za += sizeof(ARMVectorReg)) { + if (pg[H2(i)] & 1) { + d[i] = a[tile_vslice_index(i)]; + } + } +} + +#undef DO_MOVA_Z + +/* + * Clear elements in a tile slice comprising len bytes. + */ + +typedef void ClearFn(void *ptr, size_t off, size_t len); + +static void clear_horizontal(void *ptr, size_t off, size_t len) +{ + memset(ptr + off, 0, len); +} + +static void clear_vertical_b(void *vptr, size_t off, size_t len) +{ + for (size_t i = 0; i < len; ++i) { + *(uint8_t *)(vptr + tile_vslice_offset(i + off)) = 0; + } +} + +static void clear_vertical_h(void *vptr, size_t off, size_t len) +{ + for (size_t i = 0; i < len; i += 2) { + *(uint16_t *)(vptr + tile_vslice_offset(i + off)) = 0; + } +} + +static void clear_vertical_s(void *vptr, size_t off, size_t len) +{ + for (size_t i = 0; i < len; i += 4) { + *(uint32_t *)(vptr + tile_vslice_offset(i + off)) = 0; + } +} + +static void clear_vertical_d(void *vptr, size_t off, size_t len) +{ + for (size_t i = 0; i < len; i += 8) { + *(uint64_t *)(vptr + tile_vslice_offset(i + off)) = 0; + } +} + +static void clear_vertical_q(void *vptr, size_t off, size_t len) +{ + for (size_t i = 0; i < len; i += 16) { + memset(vptr + tile_vslice_offset(i + off), 0, 16); + } +} + +/* + * Copy elements from an array into a tile slice comprising len bytes. + */ + +typedef void CopyFn(void *dst, const void *src, size_t len); + +static void copy_horizontal(void *dst, const void *src, size_t len) +{ + memcpy(dst, src, len); +} + +static void copy_vertical_b(void *vdst, const void *vsrc, size_t len) +{ + const uint8_t *src = vsrc; + uint8_t *dst = vdst; + size_t i; + + for (i = 0; i < len; ++i) { + dst[tile_vslice_index(i)] = src[i]; + } +} + +static void copy_vertical_h(void *vdst, const void *vsrc, size_t len) +{ + const uint16_t *src = vsrc; + uint16_t *dst = vdst; + size_t i; + + for (i = 0; i < len / 2; ++i) { + dst[tile_vslice_index(i)] = src[i]; + } +} + +static void copy_vertical_s(void *vdst, const void *vsrc, size_t len) +{ + const uint32_t *src = vsrc; + uint32_t *dst = vdst; + size_t i; + + for (i = 0; i < len / 4; ++i) { + dst[tile_vslice_index(i)] = src[i]; + } +} + +static void copy_vertical_d(void *vdst, const void *vsrc, size_t len) +{ + const uint64_t *src = vsrc; + uint64_t *dst = vdst; + size_t i; + + for (i = 0; i < len / 8; ++i) { + dst[tile_vslice_index(i)] = src[i]; + } +} + +static void copy_vertical_q(void *vdst, const void *vsrc, size_t len) +{ + for (size_t i = 0; i < len; i += 16) { + memcpy(vdst + tile_vslice_offset(i), vsrc + i, 16); + } +} + +/* + * Host and TLB primitives for vertical tile slice addressing. + */ + +#define DO_LD(NAME, TYPE, HOST, TLB) \ +static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \ +{ \ + TYPE val = HOST(host); \ + *(TYPE *)(za + tile_vslice_offset(off)) = val; \ +} \ +static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \ + intptr_t off, target_ulong addr, uintptr_t ra) \ +{ \ + TYPE val = TLB(env, useronly_clean_ptr(addr), ra); \ + *(TYPE *)(za + tile_vslice_offset(off)) = val; \ +} + +#define DO_ST(NAME, TYPE, HOST, TLB) \ +static inline void sme_##NAME##_v_host(void *za, intptr_t off, void *host) \ +{ \ + TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \ + HOST(host, val); \ +} \ +static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \ + intptr_t off, target_ulong addr, uintptr_t ra) \ +{ \ + TYPE val = *(TYPE *)(za + tile_vslice_offset(off)); \ + TLB(env, useronly_clean_ptr(addr), val, ra); \ +} + +/* + * The ARMVectorReg elements are stored in host-endian 64-bit units. + * For 128-bit quantities, the sequence defined by the Elem[] pseudocode + * corresponds to storing the two 64-bit pieces in little-endian order. + */ +#define DO_LDQ(HNAME, VNAME, BE, HOST, TLB) \ +static inline void HNAME##_host(void *za, intptr_t off, void *host) \ +{ \ + uint64_t val0 = HOST(host), val1 = HOST(host + 8); \ + uint64_t *ptr = za + off; \ + ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \ +} \ +static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \ +{ \ + HNAME##_host(za, tile_vslice_offset(off), host); \ +} \ +static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \ + target_ulong addr, uintptr_t ra) \ +{ \ + uint64_t val0 = TLB(env, useronly_clean_ptr(addr), ra); \ + uint64_t val1 = TLB(env, useronly_clean_ptr(addr + 8), ra); \ + uint64_t *ptr = za + off; \ + ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \ +} \ +static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \ + target_ulong addr, uintptr_t ra) \ +{ \ + HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \ +} + +#define DO_STQ(HNAME, VNAME, BE, HOST, TLB) \ +static inline void HNAME##_host(void *za, intptr_t off, void *host) \ +{ \ + uint64_t *ptr = za + off; \ + HOST(host, ptr[BE]); \ + HOST(host + 1, ptr[!BE]); \ +} \ +static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \ +{ \ + HNAME##_host(za, tile_vslice_offset(off), host); \ +} \ +static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \ + target_ulong addr, uintptr_t ra) \ +{ \ + uint64_t *ptr = za + off; \ + TLB(env, useronly_clean_ptr(addr), ptr[BE], ra); \ + TLB(env, useronly_clean_ptr(addr + 8), ptr[!BE], ra); \ +} \ +static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \ + target_ulong addr, uintptr_t ra) \ +{ \ + HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \ +} + +DO_LD(ld1b, uint8_t, ldub_p, cpu_ldub_data_ra) +DO_LD(ld1h_be, uint16_t, lduw_be_p, cpu_lduw_be_data_ra) +DO_LD(ld1h_le, uint16_t, lduw_le_p, cpu_lduw_le_data_ra) +DO_LD(ld1s_be, uint32_t, ldl_be_p, cpu_ldl_be_data_ra) +DO_LD(ld1s_le, uint32_t, ldl_le_p, cpu_ldl_le_data_ra) +DO_LD(ld1d_be, uint64_t, ldq_be_p, cpu_ldq_be_data_ra) +DO_LD(ld1d_le, uint64_t, ldq_le_p, cpu_ldq_le_data_ra) + +DO_LDQ(sve_ld1qq_be, sme_ld1q_be, 1, ldq_be_p, cpu_ldq_be_data_ra) +DO_LDQ(sve_ld1qq_le, sme_ld1q_le, 0, ldq_le_p, cpu_ldq_le_data_ra) + +DO_ST(st1b, uint8_t, stb_p, cpu_stb_data_ra) +DO_ST(st1h_be, uint16_t, stw_be_p, cpu_stw_be_data_ra) +DO_ST(st1h_le, uint16_t, stw_le_p, cpu_stw_le_data_ra) +DO_ST(st1s_be, uint32_t, stl_be_p, cpu_stl_be_data_ra) +DO_ST(st1s_le, uint32_t, stl_le_p, cpu_stl_le_data_ra) +DO_ST(st1d_be, uint64_t, stq_be_p, cpu_stq_be_data_ra) +DO_ST(st1d_le, uint64_t, stq_le_p, cpu_stq_le_data_ra) + +DO_STQ(sve_st1qq_be, sme_st1q_be, 1, stq_be_p, cpu_stq_be_data_ra) +DO_STQ(sve_st1qq_le, sme_st1q_le, 0, stq_le_p, cpu_stq_le_data_ra) + +#undef DO_LD +#undef DO_ST +#undef DO_LDQ +#undef DO_STQ + +/* + * Common helper for all contiguous predicated loads. + */ + +static inline QEMU_ALWAYS_INLINE +void sme_ld1(CPUARMState *env, void *za, uint64_t *vg, + const target_ulong addr, uint32_t desc, const uintptr_t ra, + const int esz, uint32_t mtedesc, bool vertical, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn, + ClearFn *clr_fn, + CopyFn *cpy_fn) +{ + const intptr_t reg_max = simd_oprsz(desc); + const intptr_t esize = 1 << esz; + intptr_t reg_off, reg_last; + SVEContLdSt info; + void *host; + int flags; + + /* Find the active elements. */ + if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) { + /* The entire predicate was false; no load occurs. */ + clr_fn(za, 0, reg_max); + return; + } + + /* Probe the page(s). Exit with exception for any invalid page. */ + sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, ra); + + /* Handle watchpoints for all active elements. */ + sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize, + BP_MEM_READ, ra); + + /* + * Handle mte checks for all active elements. + * Since TBI must be set for MTE, !mtedesc => !mte_active. + */ + if (mtedesc) { + sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize, + mtedesc, ra); + } + + flags = info.page[0].flags | info.page[1].flags; + if (unlikely(flags != 0)) { +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + /* + * At least one page includes MMIO. + * Any bus operation can fail with cpu_transaction_failed, + * which for ARM will raise SyncExternal. Perform the load + * into scratch memory to preserve register state until the end. + */ + ARMVectorReg scratch = { }; + + reg_off = info.reg_off_first[0]; + reg_last = info.reg_off_last[1]; + if (reg_last < 0) { + reg_last = info.reg_off_split; + if (reg_last < 0) { + reg_last = info.reg_off_last[0]; + } + } + + do { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + tlb_fn(env, &scratch, reg_off, addr + reg_off, ra); + } + reg_off += esize; + } while (reg_off & 63); + } while (reg_off <= reg_last); + + cpy_fn(za, &scratch, reg_max); + return; +#endif + } + + /* The entire operation is in RAM, on valid pages. */ + + reg_off = info.reg_off_first[0]; + reg_last = info.reg_off_last[0]; + host = info.page[0].host; + + if (!vertical) { + memset(za, 0, reg_max); + } else if (reg_off) { + clr_fn(za, 0, reg_off); + } + + while (reg_off <= reg_last) { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + host_fn(za, reg_off, host + reg_off); + } else if (vertical) { + clr_fn(za, reg_off, esize); + } + reg_off += esize; + } while (reg_off <= reg_last && (reg_off & 63)); + } + + /* + * Use the slow path to manage the cross-page misalignment. + * But we know this is RAM and cannot trap. + */ + reg_off = info.reg_off_split; + if (unlikely(reg_off >= 0)) { + tlb_fn(env, za, reg_off, addr + reg_off, ra); + } + + reg_off = info.reg_off_first[1]; + if (unlikely(reg_off >= 0)) { + reg_last = info.reg_off_last[1]; + host = info.page[1].host; + + do { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + host_fn(za, reg_off, host + reg_off); + } else if (vertical) { + clr_fn(za, reg_off, esize); + } + reg_off += esize; + } while (reg_off & 63); + } while (reg_off <= reg_last); + } +} + +static inline QEMU_ALWAYS_INLINE +void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg, + target_ulong addr, uint32_t desc, uintptr_t ra, + const int esz, bool vertical, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn, + ClearFn *clr_fn, + CopyFn *cpy_fn) +{ + uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + int bit55 = extract64(addr, 55, 1); + + /* Remove mtedesc from the normal sve descriptor. */ + desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + + /* Perform gross MTE suppression early. */ + if (!tbi_check(desc, bit55) || + tcma_check(desc, bit55, allocation_tag_from_addr(addr))) { + mtedesc = 0; + } + + sme_ld1(env, za, vg, addr, desc, ra, esz, mtedesc, vertical, + host_fn, tlb_fn, clr_fn, cpy_fn); +} + +#define DO_LD(L, END, ESZ) \ +void HELPER(sme_ld1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ + sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ + clear_horizontal, copy_horizontal); \ +} \ +void HELPER(sme_ld1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ + sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ + clear_vertical_##L, copy_vertical_##L); \ +} \ +void HELPER(sme_ld1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ + sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ + clear_horizontal, copy_horizontal); \ +} \ +void HELPER(sme_ld1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ + sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ + clear_vertical_##L, copy_vertical_##L); \ +} + +DO_LD(b, , MO_8) +DO_LD(h, _be, MO_16) +DO_LD(h, _le, MO_16) +DO_LD(s, _be, MO_32) +DO_LD(s, _le, MO_32) +DO_LD(d, _be, MO_64) +DO_LD(d, _le, MO_64) +DO_LD(q, _be, MO_128) +DO_LD(q, _le, MO_128) + +#undef DO_LD + +/* + * Common helper for all contiguous predicated stores. + */ + +static inline QEMU_ALWAYS_INLINE +void sme_st1(CPUARMState *env, void *za, uint64_t *vg, + const target_ulong addr, uint32_t desc, const uintptr_t ra, + const int esz, uint32_t mtedesc, bool vertical, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn) +{ + const intptr_t reg_max = simd_oprsz(desc); + const intptr_t esize = 1 << esz; + intptr_t reg_off, reg_last; + SVEContLdSt info; + void *host; + int flags; + + /* Find the active elements. */ + if (!sve_cont_ldst_elements(&info, addr, vg, reg_max, esz, esize)) { + /* The entire predicate was false; no store occurs. */ + return; + } + + /* Probe the page(s). Exit with exception for any invalid page. */ + sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, ra); + + /* Handle watchpoints for all active elements. */ + sve_cont_ldst_watchpoints(&info, env, vg, addr, esize, esize, + BP_MEM_WRITE, ra); + + /* + * Handle mte checks for all active elements. + * Since TBI must be set for MTE, !mtedesc => !mte_active. + */ + if (mtedesc) { + sve_cont_ldst_mte_check(&info, env, vg, addr, esize, esize, + mtedesc, ra); + } + + flags = info.page[0].flags | info.page[1].flags; + if (unlikely(flags != 0)) { +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else + /* + * At least one page includes MMIO. + * Any bus operation can fail with cpu_transaction_failed, + * which for ARM will raise SyncExternal. We cannot avoid + * this fault and will leave with the store incomplete. + */ + reg_off = info.reg_off_first[0]; + reg_last = info.reg_off_last[1]; + if (reg_last < 0) { + reg_last = info.reg_off_split; + if (reg_last < 0) { + reg_last = info.reg_off_last[0]; + } + } + + do { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + tlb_fn(env, za, reg_off, addr + reg_off, ra); + } + reg_off += esize; + } while (reg_off & 63); + } while (reg_off <= reg_last); + return; +#endif + } + + reg_off = info.reg_off_first[0]; + reg_last = info.reg_off_last[0]; + host = info.page[0].host; + + while (reg_off <= reg_last) { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + host_fn(za, reg_off, host + reg_off); + } + reg_off += 1 << esz; + } while (reg_off <= reg_last && (reg_off & 63)); + } + + /* + * Use the slow path to manage the cross-page misalignment. + * But we know this is RAM and cannot trap. + */ + reg_off = info.reg_off_split; + if (unlikely(reg_off >= 0)) { + tlb_fn(env, za, reg_off, addr + reg_off, ra); + } + + reg_off = info.reg_off_first[1]; + if (unlikely(reg_off >= 0)) { + reg_last = info.reg_off_last[1]; + host = info.page[1].host; + + do { + uint64_t pg = vg[reg_off >> 6]; + do { + if ((pg >> (reg_off & 63)) & 1) { + host_fn(za, reg_off, host + reg_off); + } + reg_off += 1 << esz; + } while (reg_off & 63); + } while (reg_off <= reg_last); + } +} + +static inline QEMU_ALWAYS_INLINE +void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr, + uint32_t desc, uintptr_t ra, int esz, bool vertical, + sve_ldst1_host_fn *host_fn, + sve_ldst1_tlb_fn *tlb_fn) +{ + uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + int bit55 = extract64(addr, 55, 1); + + /* Remove mtedesc from the normal sve descriptor. */ + desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + + /* Perform gross MTE suppression early. */ + if (!tbi_check(desc, bit55) || + tcma_check(desc, bit55, allocation_tag_from_addr(addr))) { + mtedesc = 0; + } + + sme_st1(env, za, vg, addr, desc, ra, esz, mtedesc, + vertical, host_fn, tlb_fn); +} + +#define DO_ST(L, END, ESZ) \ +void HELPER(sme_st1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ + sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ +} \ +void HELPER(sme_st1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ + sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ +} \ +void HELPER(sme_st1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ + sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ +} \ +void HELPER(sme_st1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ + target_ulong addr, uint32_t desc) \ +{ \ + sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ + sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ +} + +DO_ST(b, , MO_8) +DO_ST(h, _be, MO_16) +DO_ST(h, _le, MO_16) +DO_ST(s, _be, MO_32) +DO_ST(s, _le, MO_32) +DO_ST(d, _be, MO_64) +DO_ST(d, _le, MO_64) +DO_ST(q, _be, MO_128) +DO_ST(q, _le, MO_128) + +#undef DO_ST + +void HELPER(sme_addha_s)(void *vzda, void *vzn, void *vpn, + void *vpm, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 4; + uint64_t *pn = vpn, *pm = vpm; + uint32_t *zda = vzda, *zn = vzn; + + for (row = 0; row < oprsz; ) { + uint64_t pa = pn[row >> 4]; + do { + if (pa & 1) { + for (col = 0; col < oprsz; ) { + uint64_t pb = pm[col >> 4]; + do { + if (pb & 1) { + zda[tile_vslice_index(row) + H4(col)] += zn[H4(col)]; + } + pb >>= 4; + } while (++col & 15); + } + } + pa >>= 4; + } while (++row & 15); + } +} + +void HELPER(sme_addha_d)(void *vzda, void *vzn, void *vpn, + void *vpm, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; + uint8_t *pn = vpn, *pm = vpm; + uint64_t *zda = vzda, *zn = vzn; + + for (row = 0; row < oprsz; ++row) { + if (pn[H1(row)] & 1) { + for (col = 0; col < oprsz; ++col) { + if (pm[H1(col)] & 1) { + zda[tile_vslice_index(row) + col] += zn[col]; + } + } + } + } +} + +void HELPER(sme_addva_s)(void *vzda, void *vzn, void *vpn, + void *vpm, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 4; + uint64_t *pn = vpn, *pm = vpm; + uint32_t *zda = vzda, *zn = vzn; + + for (row = 0; row < oprsz; ) { + uint64_t pa = pn[row >> 4]; + do { + if (pa & 1) { + uint32_t zn_row = zn[H4(row)]; + for (col = 0; col < oprsz; ) { + uint64_t pb = pm[col >> 4]; + do { + if (pb & 1) { + zda[tile_vslice_index(row) + H4(col)] += zn_row; + } + pb >>= 4; + } while (++col & 15); + } + } + pa >>= 4; + } while (++row & 15); + } +} + +void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn, + void *vpm, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; + uint8_t *pn = vpn, *pm = vpm; + uint64_t *zda = vzda, *zn = vzn; + + for (row = 0; row < oprsz; ++row) { + if (pn[H1(row)] & 1) { + uint64_t zn_row = zn[row]; + for (col = 0; col < oprsz; ++col) { + if (pm[H1(col)] & 1) { + zda[tile_vslice_index(row) + col] += zn_row; + } + } + } + } +} + +void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, void *vst, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_maxsz(desc); + uint32_t neg = simd_data(desc) << 31; + uint16_t *pn = vpn, *pm = vpm; + float_status fpst; + + /* + * Make a copy of float_status because this operation does not + * update the cumulative fp exception status. It also produces + * default nans. + */ + fpst = *(float_status *)vst; + set_default_nan_mode(true, &fpst); + + for (row = 0; row < oprsz; ) { + uint16_t pa = pn[H2(row >> 4)]; + do { + if (pa & 1) { + void *vza_row = vza + tile_vslice_offset(row); + uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ neg; + + for (col = 0; col < oprsz; ) { + uint16_t pb = pm[H2(col >> 4)]; + do { + if (pb & 1) { + uint32_t *a = vza_row + H1_4(col); + uint32_t *m = vzm + H1_4(col); + *a = float32_muladd(n, *m, *a, 0, vst); + } + col += 4; + pb >>= 4; + } while (col & 15); + } + } + row += 4; + pa >>= 4; + } while (row & 15); + } +} + +void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, void *vst, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; + uint64_t neg = (uint64_t)simd_data(desc) << 63; + uint64_t *za = vza, *zn = vzn, *zm = vzm; + uint8_t *pn = vpn, *pm = vpm; + float_status fpst = *(float_status *)vst; + + set_default_nan_mode(true, &fpst); + + for (row = 0; row < oprsz; ++row) { + if (pn[H1(row)] & 1) { + uint64_t *za_row = &za[tile_vslice_index(row)]; + uint64_t n = zn[row] ^ neg; + + for (col = 0; col < oprsz; ++col) { + if (pm[H1(col)] & 1) { + uint64_t *a = &za_row[col]; + *a = float64_muladd(n, zm[col], *a, 0, &fpst); + } + } + } + } +} + +/* + * Alter PAIR as needed for controlling predicates being false, + * and for NEG on an enabled row element. + */ +static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg) +{ + /* + * The pseudocode uses a conditional negate after the conditional zero. + * It is simpler here to unconditionally negate before conditional zero. + */ + pair ^= neg; + if (!(pg & 1)) { + pair &= 0xffff0000u; + } + if (!(pg & 4)) { + pair &= 0x0000ffffu; + } + return pair; +} + +static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2, + float_status *s_std, float_status *s_odd) +{ + float64 e1r = float16_to_float64(e1 & 0xffff, true, s_std); + float64 e1c = float16_to_float64(e1 >> 16, true, s_std); + float64 e2r = float16_to_float64(e2 & 0xffff, true, s_std); + float64 e2c = float16_to_float64(e2 >> 16, true, s_std); + float64 t64; + float32 t32; + + /* + * The ARM pseudocode function FPDot performs both multiplies + * and the add with a single rounding operation. Emulate this + * by performing the first multiply in round-to-odd, then doing + * the second multiply as fused multiply-add, and rounding to + * float32 all in one step. + */ + t64 = float64_mul(e1r, e2r, s_odd); + t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std); + + /* This conversion is exact, because we've already rounded. */ + t32 = float64_to_float32(t64, s_std); + + /* The final accumulation step is not fused. */ + return float32_add(sum, t32, s_std); +} + +void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, void *vst, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_maxsz(desc); + uint32_t neg = simd_data(desc) * 0x80008000u; + uint16_t *pn = vpn, *pm = vpm; + float_status fpst_odd, fpst_std; + + /* + * Make a copy of float_status because this operation does not + * update the cumulative fp exception status. It also produces + * default nans. Make a second copy with round-to-odd -- see above. + */ + fpst_std = *(float_status *)vst; + set_default_nan_mode(true, &fpst_std); + fpst_odd = fpst_std; + set_float_rounding_mode(float_round_to_odd, &fpst_odd); + + for (row = 0; row < oprsz; ) { + uint16_t prow = pn[H2(row >> 4)]; + do { + void *vza_row = vza + tile_vslice_offset(row); + uint32_t n = *(uint32_t *)(vzn + H1_4(row)); + + n = f16mop_adj_pair(n, prow, neg); + + for (col = 0; col < oprsz; ) { + uint16_t pcol = pm[H2(col >> 4)]; + do { + if (prow & pcol & 0b0101) { + uint32_t *a = vza_row + H1_4(col); + uint32_t m = *(uint32_t *)(vzm + H1_4(col)); + + m = f16mop_adj_pair(m, pcol, 0); + *a = f16_dotadd(*a, n, m, &fpst_std, &fpst_odd); + + col += 4; + pcol >>= 4; + } + } while (col & 15); + } + row += 4; + prow >>= 4; + } while (row & 15); + } +} + +void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn, + void *vpm, uint32_t desc) +{ + intptr_t row, col, oprsz = simd_maxsz(desc); + uint32_t neg = simd_data(desc) * 0x80008000u; + uint16_t *pn = vpn, *pm = vpm; + + for (row = 0; row < oprsz; ) { + uint16_t prow = pn[H2(row >> 4)]; + do { + void *vza_row = vza + tile_vslice_offset(row); + uint32_t n = *(uint32_t *)(vzn + H1_4(row)); + + n = f16mop_adj_pair(n, prow, neg); + + for (col = 0; col < oprsz; ) { + uint16_t pcol = pm[H2(col >> 4)]; + do { + if (prow & pcol & 0b0101) { + uint32_t *a = vza_row + H1_4(col); + uint32_t m = *(uint32_t *)(vzm + H1_4(col)); + + m = f16mop_adj_pair(m, pcol, 0); + *a = bfdotadd(*a, n, m); + + col += 4; + pcol >>= 4; + } + } while (col & 15); + } + row += 4; + prow >>= 4; + } while (row & 15); + } +} + +typedef uint64_t IMOPFn(uint64_t, uint64_t, uint64_t, uint8_t, bool); + +static inline void do_imopa(uint64_t *za, uint64_t *zn, uint64_t *zm, + uint8_t *pn, uint8_t *pm, + uint32_t desc, IMOPFn *fn) +{ + intptr_t row, col, oprsz = simd_oprsz(desc) / 8; + bool neg = simd_data(desc); + + for (row = 0; row < oprsz; ++row) { + uint8_t pa = pn[H1(row)]; + uint64_t *za_row = &za[tile_vslice_index(row)]; + uint64_t n = zn[row]; + + for (col = 0; col < oprsz; ++col) { + uint8_t pb = pm[H1(col)]; + uint64_t *a = &za_row[col]; + + *a = fn(n, zm[col], *a, pa & pb, neg); + } + } +} + +#define DEF_IMOP_32(NAME, NTYPE, MTYPE) \ +static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \ +{ \ + uint32_t sum0 = 0, sum1 = 0; \ + /* Apply P to N as a mask, making the inactive elements 0. */ \ + n &= expand_pred_b(p); \ + sum0 += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \ + sum0 += (NTYPE)(n >> 8) * (MTYPE)(m >> 8); \ + sum0 += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \ + sum0 += (NTYPE)(n >> 24) * (MTYPE)(m >> 24); \ + sum1 += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \ + sum1 += (NTYPE)(n >> 40) * (MTYPE)(m >> 40); \ + sum1 += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \ + sum1 += (NTYPE)(n >> 56) * (MTYPE)(m >> 56); \ + if (neg) { \ + sum0 = (uint32_t)a - sum0, sum1 = (uint32_t)(a >> 32) - sum1; \ + } else { \ + sum0 = (uint32_t)a + sum0, sum1 = (uint32_t)(a >> 32) + sum1; \ + } \ + return ((uint64_t)sum1 << 32) | sum0; \ +} + +#define DEF_IMOP_64(NAME, NTYPE, MTYPE) \ +static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \ +{ \ + uint64_t sum = 0; \ + /* Apply P to N as a mask, making the inactive elements 0. */ \ + n &= expand_pred_h(p); \ + sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \ + sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \ + sum += (NTYPE)(n >> 32) * (MTYPE)(m >> 32); \ + sum += (NTYPE)(n >> 48) * (MTYPE)(m >> 48); \ + return neg ? a - sum : a + sum; \ +} + +DEF_IMOP_32(smopa_s, int8_t, int8_t) +DEF_IMOP_32(umopa_s, uint8_t, uint8_t) +DEF_IMOP_32(sumopa_s, int8_t, uint8_t) +DEF_IMOP_32(usmopa_s, uint8_t, int8_t) + +DEF_IMOP_64(smopa_d, int16_t, int16_t) +DEF_IMOP_64(umopa_d, uint16_t, uint16_t) +DEF_IMOP_64(sumopa_d, int16_t, uint16_t) +DEF_IMOP_64(usmopa_d, uint16_t, int16_t) + +#define DEF_IMOPH(NAME) \ + void HELPER(sme_##NAME)(void *vza, void *vzn, void *vzm, void *vpn, \ + void *vpm, uint32_t desc) \ + { do_imopa(vza, vzn, vzm, vpn, vpm, desc, NAME); } + +DEF_IMOPH(smopa_s) +DEF_IMOPH(umopa_s) +DEF_IMOPH(sumopa_s) +DEF_IMOPH(usmopa_s) +DEF_IMOPH(smopa_d) +DEF_IMOPH(umopa_d) +DEF_IMOPH(sumopa_d) +DEF_IMOPH(usmopa_d) diff --git a/target/arm/sve.decode b/target/arm/sve.decode index a54feb2f61..14b3a69c36 100644 --- a/target/arm/sve.decode +++ b/target/arm/sve.decode @@ -449,14 +449,17 @@ INDEX_ri 00000100 esz:2 1 imm:s5 010001 rn:5 rd:5 # SVE index generation (register start, register increment) INDEX_rr 00000100 .. 1 ..... 010011 ..... ..... @rd_rn_rm -### SVE Stack Allocation Group +### SVE / Streaming SVE Stack Allocation Group # SVE stack frame adjustment ADDVL 00000100 001 ..... 01010 ...... ..... @rd_rn_i6 +ADDSVL 00000100 001 ..... 01011 ...... ..... @rd_rn_i6 ADDPL 00000100 011 ..... 01010 ...... ..... @rd_rn_i6 +ADDSPL 00000100 011 ..... 01011 ...... ..... @rd_rn_i6 # SVE stack frame size RDVL 00000100 101 11111 01010 imm:s6 rd:5 +RDSVL 00000100 101 11111 01011 imm:s6 rd:5 ### SVE Bitwise Shift - Unpredicated Group @@ -649,6 +652,7 @@ REVB 00000101 .. 1001 00 100 ... ..... ..... @rd_pg_rn REVH 00000101 .. 1001 01 100 ... ..... ..... @rd_pg_rn REVW 00000101 .. 1001 10 100 ... ..... ..... @rd_pg_rn RBIT 00000101 .. 1001 11 100 ... ..... ..... @rd_pg_rn +REVD 00000101 00 1011 10 100 ... ..... ..... @rd_pg_rn_e0 # SVE vector splice (predicated, destructive) SPLICE 00000101 .. 101 100 100 ... ..... ..... @rdn_pg_rm @@ -1183,10 +1187,10 @@ LD1RO_zpri 1010010 .. 01 0.... 001 ... ..... ..... \ @rpri_load_msz nreg=0 # SVE 32-bit gather prefetch (scalar plus 32-bit scaled offsets) -PRF 1000010 00 -1 ----- 0-- --- ----- 0 ---- +PRF_ns 1000010 00 -1 ----- 0-- --- ----- 0 ---- # SVE 32-bit gather prefetch (vector plus immediate) -PRF 1000010 -- 00 ----- 111 --- ----- 0 ---- +PRF_ns 1000010 -- 00 ----- 111 --- ----- 0 ---- # SVE contiguous prefetch (scalar plus immediate) PRF 1000010 11 1- ----- 0-- --- ----- 0 ---- @@ -1223,13 +1227,13 @@ LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \ @rpri_g_load esz=3 # SVE 64-bit gather prefetch (scalar plus 64-bit scaled offsets) -PRF 1100010 00 11 ----- 1-- --- ----- 0 ---- +PRF_ns 1100010 00 11 ----- 1-- --- ----- 0 ---- # SVE 64-bit gather prefetch (scalar plus unpacked 32-bit scaled offsets) -PRF 1100010 00 -1 ----- 0-- --- ----- 0 ---- +PRF_ns 1100010 00 -1 ----- 0-- --- ----- 0 ---- # SVE 64-bit gather prefetch (vector plus immediate) -PRF 1100010 -- 00 ----- 111 --- ----- 0 ---- +PRF_ns 1100010 -- 00 ----- 111 --- ----- 0 ---- ### SVE Memory Store Group @@ -1671,3 +1675,28 @@ BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2 ### SVE2 floating-point bfloat16 dot-product (indexed) BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2 + +### SVE broadcast predicate element + +&psel esz pd pn pm rv imm +%psel_rv 16:2 !function=plus_12 +%psel_imm_b 22:2 19:2 +%psel_imm_h 22:2 20:1 +%psel_imm_s 22:2 +%psel_imm_d 23:1 +@psel ........ .. . ... .. .. pn:4 . pm:4 . pd:4 \ + &psel rv=%psel_rv + +PSEL 00100101 .. 1 ..1 .. 01 .... 0 .... 0 .... \ + @psel esz=0 imm=%psel_imm_b +PSEL 00100101 .. 1 .10 .. 01 .... 0 .... 0 .... \ + @psel esz=1 imm=%psel_imm_h +PSEL 00100101 .. 1 100 .. 01 .... 0 .... 0 .... \ + @psel esz=2 imm=%psel_imm_s +PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \ + @psel esz=3 imm=%psel_imm_d + +### SVE clamp + +SCLAMP 01000100 .. 0 ..... 110000 ..... ..... @rda_rn_rm +UCLAMP 01000100 .. 0 ..... 110001 ..... ..... @rda_rn_rm diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c index 1654c0bbf9..d6f7ef94fe 100644 --- a/target/arm/sve_helper.c +++ b/target/arm/sve_helper.c @@ -931,6 +931,22 @@ DO_ZPZ_D(sve_revh_d, uint64_t, hswap64) DO_ZPZ_D(sve_revw_d, uint64_t, wswap64) +void HELPER(sme_revd_q)(void *vd, void *vn, void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 8; + uint64_t *d = vd, *n = vn; + uint8_t *pg = vg; + + for (i = 0; i < opr_sz; i += 2) { + if (pg[H1(i)] & 1) { + uint64_t n0 = n[i + 0]; + uint64_t n1 = n[i + 1]; + d[i + 0] = n1; + d[i + 1] = n0; + } + } +} + DO_ZPZ(sve_rbit_b, uint8_t, H1, revbit8) DO_ZPZ(sve_rbit_h, uint16_t, H1_2, revbit16) DO_ZPZ(sve_rbit_s, uint32_t, H1_4, revbit32) @@ -3565,6 +3581,18 @@ void HELPER(sve_sel_zpzz_d)(void *vd, void *vn, void *vm, } } +void HELPER(sve_sel_zpzz_q)(void *vd, void *vn, void *vm, + void *vg, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc) / 16; + Int128 *d = vd, *n = vn, *m = vm; + uint16_t *pg = vg; + + for (i = 0; i < opr_sz; i += 1) { + d[i] = (pg[H2(i)] & 1 ? n : m)[i]; + } +} + /* Two operand comparison controlled by a predicate. * ??? It is very tempting to want to be able to expand this inline * with x86 instructions, e.g. @@ -5337,6 +5365,9 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env, #ifdef CONFIG_USER_ONLY memset(&info->attrs, 0, sizeof(info->attrs)); + /* Require both MAP_ANON and PROT_MTE -- see allocation_tag_mem. */ + arm_tlb_mte_tagged(&info->attrs) = + (flags & PAGE_ANON) && (flags & PAGE_MTE); #else /* * Find the iotlbentry for addr and return the transaction attributes. @@ -5986,7 +6017,7 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr, * Disable MTE checking if the Tagged bit is not set. Since TBI must * be set within MTEDESC for MTE, !mtedesc => !mte_active. */ - if (arm_tlb_mte_tagged(&info.page[0].attrs)) { + if (!arm_tlb_mte_tagged(&info.page[0].attrs)) { mtedesc = 0; } diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index c86b97b1d4..b7b64f7358 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -1155,7 +1155,7 @@ static void do_vec_ld(DisasContext *s, int destidx, int element, * unallocated-encoding checks (otherwise the syndrome information * for the resulting exception will be incorrect). */ -static bool fp_access_check(DisasContext *s) +static bool fp_access_check_only(DisasContext *s) { if (s->fp_excp_el) { assert(!s->fp_access_checked); @@ -1170,21 +1170,44 @@ static bool fp_access_check(DisasContext *s) return true; } -/* Check that SVE access is enabled. If it is, return true. +static bool fp_access_check(DisasContext *s) +{ + if (!fp_access_check_only(s)) { + return false; + } + if (s->sme_trap_nonstreaming && s->is_nonstreaming) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_smetrap(SME_ET_Streaming, false)); + return false; + } + return true; +} + +/* + * Check that SVE access is enabled. If it is, return true. * If not, emit code to generate an appropriate exception and return false. + * This function corresponds to CheckSVEEnabled(). */ bool sve_access_check(DisasContext *s) { - if (s->sve_excp_el) { - assert(!s->sve_access_checked); - s->sve_access_checked = true; - + if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) { + assert(dc_isar_feature(aa64_sme, s)); + if (!sme_sm_enabled_check(s)) { + goto fail_exit; + } + } else if (s->sve_excp_el) { gen_exception_insn_el(s, s->pc_curr, EXCP_UDEF, syn_sve_access_trap(), s->sve_excp_el); - return false; + goto fail_exit; } s->sve_access_checked = true; return fp_access_check(s); + + fail_exit: + /* Assert that we only raise one exception per instruction. */ + assert(!s->sve_access_checked); + s->sve_access_checked = true; + return false; } /* @@ -1203,6 +1226,40 @@ static bool sme_access_check(DisasContext *s) return true; } +/* This function corresponds to CheckSMEEnabled. */ +bool sme_enabled_check(DisasContext *s) +{ + /* + * Note that unlike sve_excp_el, we have not constrained sme_excp_el + * to be zero when fp_excp_el has priority. This is because we need + * sme_excp_el by itself for cpregs access checks. + */ + if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) { + s->fp_access_checked = true; + return sme_access_check(s); + } + return fp_access_check_only(s); +} + +/* Common subroutine for CheckSMEAnd*Enabled. */ +bool sme_enabled_check_with_svcr(DisasContext *s, unsigned req) +{ + if (!sme_enabled_check(s)) { + return false; + } + if (FIELD_EX64(req, SVCR, SM) && !s->pstate_sm) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_smetrap(SME_ET_NotStreaming, false)); + return false; + } + if (FIELD_EX64(req, SVCR, ZA) && !s->pstate_za) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_smetrap(SME_ET_InactiveZA, false)); + return false; + } + return true; +} + /* * This utility function is for doing register extension with an * optional shift. You will likely want to pass a temporary for the @@ -1994,7 +2051,7 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread, default: g_assert_not_reached(); } - if ((ri->type & ARM_CP_FPU) && !fp_access_check(s)) { + if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { return; } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { return; @@ -14530,6 +14587,23 @@ static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn) } } +/* + * Include the generated SME FA64 decoder. + */ + +#include "decode-sme-fa64.c.inc" + +static bool trans_OK(DisasContext *s, arg_OK *a) +{ + return true; +} + +static bool trans_FAIL(DisasContext *s, arg_OK *a) +{ + s->is_nonstreaming = true; + return true; +} + /** * is_guarded_page: * @env: The cpu environment @@ -14657,6 +14731,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->mte_active[1] = EX_TBFLAG_A64(tb_flags, MTE0_ACTIVE); dc->pstate_sm = EX_TBFLAG_A64(tb_flags, PSTATE_SM); dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA); + dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = arm_cpu->cp_regs; @@ -14805,8 +14880,18 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) } } + s->is_nonstreaming = false; + if (s->sme_trap_nonstreaming) { + disas_sme_fa64(s, insn); + } + switch (extract32(insn, 25, 4)) { - case 0x0: case 0x1: case 0x3: /* UNALLOCATED */ + case 0x0: + if (!extract32(insn, 31, 1) || !disas_sme(s, insn)) { + unallocated_encoding(s); + } + break; + case 0x1: case 0x3: /* UNALLOCATED */ unallocated_encoding(s); break; case 0x2: diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h index f0970c6b8c..ad3762d1ac 100644 --- a/target/arm/translate-a64.h +++ b/target/arm/translate-a64.h @@ -29,6 +29,27 @@ void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v); bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, unsigned int imms, unsigned int immr); bool sve_access_check(DisasContext *s); +bool sme_enabled_check(DisasContext *s); +bool sme_enabled_check_with_svcr(DisasContext *s, unsigned); + +/* This function corresponds to CheckStreamingSVEEnabled. */ +static inline bool sme_sm_enabled_check(DisasContext *s) +{ + return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK); +} + +/* This function corresponds to CheckSMEAndZAEnabled. */ +static inline bool sme_za_enabled_check(DisasContext *s) +{ + return sme_enabled_check_with_svcr(s, R_SVCR_ZA_MASK); +} + +/* Note that this function corresponds to CheckStreamingSVEAndZAEnabled. */ +static inline bool sme_smza_enabled_check(DisasContext *s) +{ + return sme_enabled_check_with_svcr(s, R_SVCR_SM_MASK | R_SVCR_ZA_MASK); +} + TCGv_i64 clean_data_tbi(DisasContext *s, TCGv_i64 addr); TCGv_i64 gen_mte_check1(DisasContext *s, TCGv_i64 addr, bool is_write, bool tag_checked, int log2_size); @@ -107,6 +128,12 @@ static inline int vec_full_reg_size(DisasContext *s) return s->vl; } +/* Return the byte size of the vector register, SVL / 8. */ +static inline int streaming_vec_reg_size(DisasContext *s) +{ + return s->svl; +} + /* * Return the offset info CPUARMState of the predicate vector register Pn. * Note for this purpose, FFR is P16. @@ -122,6 +149,12 @@ static inline int pred_full_reg_size(DisasContext *s) return s->vl >> 3; } +/* Return the byte size of the predicate register, SVL / 64. */ +static inline int streaming_pred_reg_size(DisasContext *s) +{ + return s->svl >> 3; +} + /* * Round up the size of a register to a size allowed by * the tcg vector infrastructure. Any operation which uses this @@ -145,7 +178,16 @@ static inline int pred_gvec_reg_size(DisasContext *s) return size_for_gvec(pred_full_reg_size(s)); } +/* Return a newly allocated pointer to the predicate register. */ +static inline TCGv_ptr pred_full_reg_ptr(DisasContext *s, int regno) +{ + TCGv_ptr ret = tcg_temp_new_ptr(); + tcg_gen_addi_ptr(ret, cpu_env, pred_full_reg_offset(s, regno)); + return ret; +} + bool disas_sve(DisasContext *, uint32_t); +bool disas_sme(DisasContext *, uint32_t); void gen_gvec_rax1(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); @@ -153,4 +195,7 @@ void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, int64_t shift, uint32_t opr_sz, uint32_t max_sz); +void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm); +void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm); + #endif /* TARGET_ARM_TRANSLATE_A64_H */ diff --git a/target/arm/translate-sme.c b/target/arm/translate-sme.c new file mode 100644 index 0000000000..7b87a9df63 --- /dev/null +++ b/target/arm/translate-sme.c @@ -0,0 +1,373 @@ +/* + * AArch64 SME translation + * + * Copyright (c) 2022 Linaro, Ltd + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" +#include "tcg/tcg-gvec-desc.h" +#include "translate.h" +#include "exec/helper-gen.h" +#include "translate-a64.h" +#include "fpu/softfloat.h" + + +/* + * Include the generated decoder. + */ + +#include "decode-sme.c.inc" + + +/* + * Resolve tile.size[index] to a host pointer, where tile and index + * are always decoded together, dependent on the element size. + */ +static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs, + int tile_index, bool vertical) +{ + int tile = tile_index >> (4 - esz); + int index = esz == MO_128 ? 0 : extract32(tile_index, 0, 4 - esz); + int pos, len, offset; + TCGv_i32 tmp; + TCGv_ptr addr; + + /* Compute the final index, which is Rs+imm. */ + tmp = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(tmp, cpu_reg(s, rs)); + tcg_gen_addi_i32(tmp, tmp, index); + + /* Prepare a power-of-two modulo via extraction of @len bits. */ + len = ctz32(streaming_vec_reg_size(s)) - esz; + + if (vertical) { + /* + * Compute the byte offset of the index within the tile: + * (index % (svl / size)) * size + * = (index % (svl >> esz)) << esz + * Perform the power-of-two modulo via extraction of the low @len bits. + * Perform the multiply by shifting left by @pos bits. + * Perform these operations simultaneously via deposit into zero. + */ + pos = esz; + tcg_gen_deposit_z_i32(tmp, tmp, pos, len); + + /* + * For big-endian, adjust the indexed column byte offset within + * the uint64_t host words that make up env->zarray[]. + */ + if (HOST_BIG_ENDIAN && esz < MO_64) { + tcg_gen_xori_i32(tmp, tmp, 8 - (1 << esz)); + } + } else { + /* + * Compute the byte offset of the index within the tile: + * (index % (svl / size)) * (size * sizeof(row)) + * = (index % (svl >> esz)) << (esz + log2(sizeof(row))) + */ + pos = esz + ctz32(sizeof(ARMVectorReg)); + tcg_gen_deposit_z_i32(tmp, tmp, pos, len); + + /* Row slices are always aligned and need no endian adjustment. */ + } + + /* The tile byte offset within env->zarray is the row. */ + offset = tile * sizeof(ARMVectorReg); + + /* Include the byte offset of zarray to make this relative to env. */ + offset += offsetof(CPUARMState, zarray); + tcg_gen_addi_i32(tmp, tmp, offset); + + /* Add the byte offset to env to produce the final pointer. */ + addr = tcg_temp_new_ptr(); + tcg_gen_ext_i32_ptr(addr, tmp); + tcg_temp_free_i32(tmp); + tcg_gen_add_ptr(addr, addr, cpu_env); + + return addr; +} + +static bool trans_ZERO(DisasContext *s, arg_ZERO *a) +{ + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (sme_za_enabled_check(s)) { + gen_helper_sme_zero(cpu_env, tcg_constant_i32(a->imm), + tcg_constant_i32(streaming_vec_reg_size(s))); + } + return true; +} + +static bool trans_MOVA(DisasContext *s, arg_MOVA *a) +{ + static gen_helper_gvec_4 * const h_fns[5] = { + gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h, + gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d, + gen_helper_sve_sel_zpzz_q + }; + static gen_helper_gvec_3 * const cz_fns[5] = { + gen_helper_sme_mova_cz_b, gen_helper_sme_mova_cz_h, + gen_helper_sme_mova_cz_s, gen_helper_sme_mova_cz_d, + gen_helper_sme_mova_cz_q, + }; + static gen_helper_gvec_3 * const zc_fns[5] = { + gen_helper_sme_mova_zc_b, gen_helper_sme_mova_zc_h, + gen_helper_sme_mova_zc_s, gen_helper_sme_mova_zc_d, + gen_helper_sme_mova_zc_q, + }; + + TCGv_ptr t_za, t_zr, t_pg; + TCGv_i32 t_desc; + int svl; + + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (!sme_smza_enabled_check(s)) { + return true; + } + + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v); + t_zr = vec_full_reg_ptr(s, a->zr); + t_pg = pred_full_reg_ptr(s, a->pg); + + svl = streaming_vec_reg_size(s); + t_desc = tcg_constant_i32(simd_desc(svl, svl, 0)); + + if (a->v) { + /* Vertical slice -- use sme mova helpers. */ + if (a->to_vec) { + zc_fns[a->esz](t_zr, t_za, t_pg, t_desc); + } else { + cz_fns[a->esz](t_za, t_zr, t_pg, t_desc); + } + } else { + /* Horizontal slice -- reuse sve sel helpers. */ + if (a->to_vec) { + h_fns[a->esz](t_zr, t_za, t_zr, t_pg, t_desc); + } else { + h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc); + } + } + + tcg_temp_free_ptr(t_za); + tcg_temp_free_ptr(t_zr); + tcg_temp_free_ptr(t_pg); + + return true; +} + +static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) +{ + typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32); + + /* + * Indexed by [esz][be][v][mte][st], which is (except for load/store) + * also the order in which the elements appear in the function names, + * and so how we must concatenate the pieces. + */ + +#define FN_LS(F) { gen_helper_sme_ld1##F, gen_helper_sme_st1##F } +#define FN_MTE(F) { FN_LS(F), FN_LS(F##_mte) } +#define FN_HV(F) { FN_MTE(F##_h), FN_MTE(F##_v) } +#define FN_END(L, B) { FN_HV(L), FN_HV(B) } + + static GenLdSt1 * const fns[5][2][2][2][2] = { + FN_END(b, b), + FN_END(h_le, h_be), + FN_END(s_le, s_be), + FN_END(d_le, d_be), + FN_END(q_le, q_be), + }; + +#undef FN_LS +#undef FN_MTE +#undef FN_HV +#undef FN_END + + TCGv_ptr t_za, t_pg; + TCGv_i64 addr; + int svl, desc = 0; + bool be = s->be_data == MO_BE; + bool mte = s->mte_active[0]; + + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (!sme_smza_enabled_check(s)) { + return true; + } + + t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v); + t_pg = pred_full_reg_ptr(s, a->pg); + addr = tcg_temp_new_i64(); + + tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz); + tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn)); + + if (mte) { + desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); + desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid); + desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); + desc = FIELD_DP32(desc, MTEDESC, WRITE, a->st); + desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << a->esz) - 1); + desc <<= SVE_MTEDESC_SHIFT; + } else { + addr = clean_data_tbi(s, addr); + } + svl = streaming_vec_reg_size(s); + desc = simd_desc(svl, svl, desc); + + fns[a->esz][be][a->v][mte][a->st](cpu_env, t_za, t_pg, addr, + tcg_constant_i32(desc)); + + tcg_temp_free_ptr(t_za); + tcg_temp_free_ptr(t_pg); + tcg_temp_free_i64(addr); + return true; +} + +typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int); + +static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn) +{ + int svl = streaming_vec_reg_size(s); + int imm = a->imm; + TCGv_ptr base; + + if (!sme_za_enabled_check(s)) { + return true; + } + + /* ZA[n] equates to ZA0H.B[n]. */ + base = get_tile_rowcol(s, MO_8, a->rv, imm, false); + + fn(s, base, 0, svl, a->rn, imm * svl); + + tcg_temp_free_ptr(base); + return true; +} + +TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr) +TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str) + +static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz, + gen_helper_gvec_4 *fn) +{ + int svl = streaming_vec_reg_size(s); + uint32_t desc = simd_desc(svl, svl, 0); + TCGv_ptr za, zn, pn, pm; + + if (!sme_smza_enabled_check(s)) { + return true; + } + + /* Sum XZR+zad to find ZAd. */ + za = get_tile_rowcol(s, esz, 31, a->zad, false); + zn = vec_full_reg_ptr(s, a->zn); + pn = pred_full_reg_ptr(s, a->pn); + pm = pred_full_reg_ptr(s, a->pm); + + fn(za, zn, pn, pm, tcg_constant_i32(desc)); + + tcg_temp_free_ptr(za); + tcg_temp_free_ptr(zn); + tcg_temp_free_ptr(pn); + tcg_temp_free_ptr(pm); + return true; +} + +TRANS_FEAT(ADDHA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addha_s) +TRANS_FEAT(ADDVA_s, aa64_sme, do_adda, a, MO_32, gen_helper_sme_addva_s) +TRANS_FEAT(ADDHA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addha_d) +TRANS_FEAT(ADDVA_d, aa64_sme_i16i64, do_adda, a, MO_64, gen_helper_sme_addva_d) + +static bool do_outprod(DisasContext *s, arg_op *a, MemOp esz, + gen_helper_gvec_5 *fn) +{ + int svl = streaming_vec_reg_size(s); + uint32_t desc = simd_desc(svl, svl, a->sub); + TCGv_ptr za, zn, zm, pn, pm; + + if (!sme_smza_enabled_check(s)) { + return true; + } + + /* Sum XZR+zad to find ZAd. */ + za = get_tile_rowcol(s, esz, 31, a->zad, false); + zn = vec_full_reg_ptr(s, a->zn); + zm = vec_full_reg_ptr(s, a->zm); + pn = pred_full_reg_ptr(s, a->pn); + pm = pred_full_reg_ptr(s, a->pm); + + fn(za, zn, zm, pn, pm, tcg_constant_i32(desc)); + + tcg_temp_free_ptr(za); + tcg_temp_free_ptr(zn); + tcg_temp_free_ptr(pn); + tcg_temp_free_ptr(pm); + return true; +} + +static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz, + gen_helper_gvec_5_ptr *fn) +{ + int svl = streaming_vec_reg_size(s); + uint32_t desc = simd_desc(svl, svl, a->sub); + TCGv_ptr za, zn, zm, pn, pm, fpst; + + if (!sme_smza_enabled_check(s)) { + return true; + } + + /* Sum XZR+zad to find ZAd. */ + za = get_tile_rowcol(s, esz, 31, a->zad, false); + zn = vec_full_reg_ptr(s, a->zn); + zm = vec_full_reg_ptr(s, a->zm); + pn = pred_full_reg_ptr(s, a->pn); + pm = pred_full_reg_ptr(s, a->pm); + fpst = fpstatus_ptr(FPST_FPCR); + + fn(za, zn, zm, pn, pm, fpst, tcg_constant_i32(desc)); + + tcg_temp_free_ptr(za); + tcg_temp_free_ptr(zn); + tcg_temp_free_ptr(pn); + tcg_temp_free_ptr(pm); + tcg_temp_free_ptr(fpst); + return true; +} + +TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_h) +TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, gen_helper_sme_fmopa_s) +TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, gen_helper_sme_fmopa_d) + +/* TODO: FEAT_EBF16 */ +TRANS_FEAT(BFMOPA, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_bfmopa) + +TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s) +TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s) +TRANS_FEAT(SUMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_sumopa_s) +TRANS_FEAT(USMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_usmopa_s) + +TRANS_FEAT(SMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_smopa_d) +TRANS_FEAT(UMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_umopa_d) +TRANS_FEAT(SUMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_sumopa_d) +TRANS_FEAT(USMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_usmopa_d) diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c index 62b5f3040c..41f8b12259 100644 --- a/target/arm/translate-sve.c +++ b/target/arm/translate-sve.c @@ -1286,6 +1286,19 @@ static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a) return true; } +static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a) +{ + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (sme_enabled_check(s)) { + TCGv_i64 rd = cpu_reg_sp(s, a->rd); + TCGv_i64 rn = cpu_reg_sp(s, a->rn); + tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s)); + } + return true; +} + static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) { if (!dc_isar_feature(aa64_sve, s)) { @@ -1299,6 +1312,19 @@ static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a) return true; } +static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a) +{ + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (sme_enabled_check(s)) { + TCGv_i64 rd = cpu_reg_sp(s, a->rd); + TCGv_i64 rn = cpu_reg_sp(s, a->rn); + tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s)); + } + return true; +} + static bool trans_RDVL(DisasContext *s, arg_RDVL *a) { if (!dc_isar_feature(aa64_sve, s)) { @@ -1311,6 +1337,18 @@ static bool trans_RDVL(DisasContext *s, arg_RDVL *a) return true; } +static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a) +{ + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (sme_enabled_check(s)) { + TCGv_i64 reg = cpu_reg(s, a->rd); + tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s)); + } + return true; +} + /* *** SVE Compute Vector Address Group */ @@ -1320,10 +1358,10 @@ static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn) return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm); } -TRANS_FEAT(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32) -TRANS_FEAT(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64) -TRANS_FEAT(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32) -TRANS_FEAT(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32) +TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32) +TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64) +TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32) +TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32) /* *** SVE Integer Misc - Unpredicated Group @@ -1333,14 +1371,15 @@ static gen_helper_gvec_2 * const fexpa_fns[4] = { NULL, gen_helper_sve_fexpa_h, gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d, }; -TRANS_FEAT(FEXPA, aa64_sve, gen_gvec_ool_zz, - fexpa_fns[a->esz], a->rd, a->rn, 0) +TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz, + fexpa_fns[a->esz], a->rd, a->rn, 0) static gen_helper_gvec_3 * const ftssel_fns[4] = { NULL, gen_helper_sve_ftssel_h, gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d, }; -TRANS_FEAT(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, ftssel_fns[a->esz], a, 0) +TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, + ftssel_fns[a->esz], a, 0) /* *** SVE Predicate Logical Operations Group @@ -1785,7 +1824,8 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag) TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s) /* Note pat == 31 is #all, to set all elements. */ -TRANS_FEAT(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false) +TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve, + do_predset, 0, FFR_PRED_NUM, 31, false) /* Note pat == 32 is #unimp, to set no elements. */ TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false) @@ -1799,11 +1839,13 @@ static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a) .rd = a->rd, .pg = a->pg, .s = a->s, .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM, }; + + s->is_nonstreaming = true; return trans_AND_pppp(s, &alt_a); } -TRANS_FEAT(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM) -TRANS_FEAT(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn) +TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM) +TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn) static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a, void (*gen_fn)(TCGv_i32, TCGv_ptr, @@ -2533,7 +2575,8 @@ TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz, static gen_helper_gvec_3 * const compact_fns[4] = { NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d }; -TRANS_FEAT(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, compact_fns[a->esz], a, 0) +TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, + compact_fns[a->esz], a, 0) /* Call the helper that computes the ARM LastActiveElement pseudocode * function, scaled by the element size. This includes the not found @@ -2858,6 +2901,8 @@ TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0) TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz, a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0) +TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0) + TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz, gen_helper_sve_splice, a, a->esz) @@ -3856,9 +3901,9 @@ static gen_helper_gvec_3_ptr * const ftmad_fns[4] = { NULL, gen_helper_sve_ftmad_h, gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d, }; -TRANS_FEAT(FTMAD, aa64_sve, gen_gvec_fpst_zzz, - ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, - a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) +TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz, + ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm, + a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR) /* *** SVE Floating Point Accumulating Reduction Group @@ -3881,6 +3926,7 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) { return false; } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -3918,12 +3964,18 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) DO_FP3(FADD_zzz, fadd) DO_FP3(FSUB_zzz, fsub) DO_FP3(FMUL_zzz, fmul) -DO_FP3(FTSMUL, ftsmul) DO_FP3(FRECPS, recps) DO_FP3(FRSQRTS, rsqrts) #undef DO_FP3 +static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = { + NULL, gen_helper_gvec_ftsmul_h, + gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d +}; +TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz, + ftsmul_fns[a->esz], a, 0) + /* *** SVE Floating Point Arithmetic - Predicated Group */ @@ -4256,7 +4308,8 @@ TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz, * The load should begin at the address Rn + IMM. */ -static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) +void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs, + int len, int rn, int imm) { int len_align = QEMU_ALIGN_DOWN(len, 8); int len_remain = len % 8; @@ -4282,7 +4335,7 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) t0 = tcg_temp_new_i64(); for (i = 0; i < len_align; i += 8) { tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ); - tcg_gen_st_i64(t0, cpu_env, vofs + i); + tcg_gen_st_i64(t0, base, vofs + i); tcg_gen_addi_i64(clean_addr, clean_addr, 8); } tcg_temp_free_i64(t0); @@ -4295,6 +4348,12 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) clean_addr = new_tmp_a64_local(s); tcg_gen_mov_i64(clean_addr, t0); + if (base != cpu_env) { + TCGv_ptr b = tcg_temp_local_new_ptr(); + tcg_gen_mov_ptr(b, base); + base = b; + } + gen_set_label(loop); t0 = tcg_temp_new_i64(); @@ -4302,7 +4361,7 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) tcg_gen_addi_i64(clean_addr, clean_addr, 8); tp = tcg_temp_new_ptr(); - tcg_gen_add_ptr(tp, cpu_env, i); + tcg_gen_add_ptr(tp, base, i); tcg_gen_addi_ptr(i, i, 8); tcg_gen_st_i64(t0, tp, vofs); tcg_temp_free_ptr(tp); @@ -4310,6 +4369,11 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); tcg_temp_free_ptr(i); + + if (base != cpu_env) { + tcg_temp_free_ptr(base); + assert(len_remain == 0); + } } /* @@ -4338,13 +4402,14 @@ static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm) default: g_assert_not_reached(); } - tcg_gen_st_i64(t0, cpu_env, vofs + len_align); + tcg_gen_st_i64(t0, base, vofs + len_align); tcg_temp_free_i64(t0); } } /* Similarly for stores. */ -static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) +void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs, + int len, int rn, int imm) { int len_align = QEMU_ALIGN_DOWN(len, 8); int len_remain = len % 8; @@ -4370,7 +4435,7 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) t0 = tcg_temp_new_i64(); for (i = 0; i < len_align; i += 8) { - tcg_gen_ld_i64(t0, cpu_env, vofs + i); + tcg_gen_ld_i64(t0, base, vofs + i); tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ); tcg_gen_addi_i64(clean_addr, clean_addr, 8); } @@ -4384,11 +4449,17 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) clean_addr = new_tmp_a64_local(s); tcg_gen_mov_i64(clean_addr, t0); + if (base != cpu_env) { + TCGv_ptr b = tcg_temp_local_new_ptr(); + tcg_gen_mov_ptr(b, base); + base = b; + } + gen_set_label(loop); t0 = tcg_temp_new_i64(); tp = tcg_temp_new_ptr(); - tcg_gen_add_ptr(tp, cpu_env, i); + tcg_gen_add_ptr(tp, base, i); tcg_gen_ld_i64(t0, tp, vofs); tcg_gen_addi_ptr(i, i, 8); tcg_temp_free_ptr(tp); @@ -4399,12 +4470,17 @@ static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm) tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop); tcg_temp_free_ptr(i); + + if (base != cpu_env) { + tcg_temp_free_ptr(base); + assert(len_remain == 0); + } } /* Predicate register stores can be any multiple of 2. */ if (len_remain) { t0 = tcg_temp_new_i64(); - tcg_gen_ld_i64(t0, cpu_env, vofs + len_align); + tcg_gen_ld_i64(t0, base, vofs + len_align); switch (len_remain) { case 2: @@ -4436,7 +4512,7 @@ static bool trans_LDR_zri(DisasContext *s, arg_rri *a) if (sve_access_check(s)) { int size = vec_full_reg_size(s); int off = vec_full_reg_offset(s, a->rd); - do_ldr(s, off, size, a->rn, a->imm * size); + gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); } return true; } @@ -4449,7 +4525,7 @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a) if (sve_access_check(s)) { int size = pred_full_reg_size(s); int off = pred_full_reg_offset(s, a->rd); - do_ldr(s, off, size, a->rn, a->imm * size); + gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size); } return true; } @@ -4462,7 +4538,7 @@ static bool trans_STR_zri(DisasContext *s, arg_rri *a) if (sve_access_check(s)) { int size = vec_full_reg_size(s); int off = vec_full_reg_offset(s, a->rd); - do_str(s, off, size, a->rn, a->imm * size); + gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); } return true; } @@ -4475,7 +4551,7 @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a) if (sve_access_check(s)) { int size = pred_full_reg_size(s); int off = pred_full_reg_offset(s, a->rd); - do_str(s, off, size, a->rn, a->imm * size); + gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size); } return true; } @@ -4793,6 +4869,7 @@ static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a) if (!dc_isar_feature(aa64_sve, s)) { return false; } + s->is_nonstreaming = true; if (sve_access_check(s)) { TCGv_i64 addr = new_tmp_a64(s); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); @@ -4894,6 +4971,7 @@ static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a) if (!dc_isar_feature(aa64_sve, s)) { return false; } + s->is_nonstreaming = true; if (sve_access_check(s)) { int vsz = vec_full_reg_size(s); int elements = vsz >> dtype_esz[a->dtype]; @@ -5048,6 +5126,7 @@ static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a) if (a->rm == 31) { return false; } + s->is_nonstreaming = true; if (sve_access_check(s)) { TCGv_i64 addr = new_tmp_a64(s); tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype)); @@ -5062,6 +5141,7 @@ static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a) if (!dc_isar_feature(aa64_sve_f64mm, s)) { return false; } + s->is_nonstreaming = true; if (sve_access_check(s)) { TCGv_i64 addr = new_tmp_a64(s); tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32); @@ -5657,6 +5737,7 @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) if (!dc_isar_feature(aa64_sve, s)) { return false; } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -5688,6 +5769,7 @@ static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) if (!dc_isar_feature(aa64_sve, s)) { return false; } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -5722,6 +5804,7 @@ static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a) if (!dc_isar_feature(aa64_sve2, s)) { return false; } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -5845,6 +5928,7 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) if (!dc_isar_feature(aa64_sve, s)) { return false; } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -5875,6 +5959,7 @@ static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) if (!dc_isar_feature(aa64_sve, s)) { return false; } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -5909,6 +5994,7 @@ static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a) if (!dc_isar_feature(aa64_sve2, s)) { return false; } + s->is_nonstreaming = true; if (!sve_access_check(s)) { return true; } @@ -5953,6 +6039,17 @@ static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a) return true; } +static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a) +{ + if (!dc_isar_feature(aa64_sve, s)) { + return false; + } + /* Prefetch is a nop within QEMU. */ + s->is_nonstreaming = true; + (void)sve_access_check(s); + return true; +} + /* * Move Prefix * @@ -6181,9 +6278,13 @@ static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel) gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h, NULL, gen_helper_sve2_pmull_d, }; - if (a->esz == 0 - ? !dc_isar_feature(aa64_sve2_pmull128, s) - : !dc_isar_feature(aa64_sve, s)) { + + if (a->esz == 0) { + if (!dc_isar_feature(aa64_sve2_pmull128, s)) { + return false; + } + s->is_nonstreaming = true; + } else if (!dc_isar_feature(aa64_sve, s)) { return false; } return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel); @@ -6371,22 +6472,22 @@ static gen_helper_gvec_3 * const bext_fns[4] = { gen_helper_sve2_bext_b, gen_helper_sve2_bext_h, gen_helper_sve2_bext_s, gen_helper_sve2_bext_d, }; -TRANS_FEAT(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, - bext_fns[a->esz], a, 0) +TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, + bext_fns[a->esz], a, 0) static gen_helper_gvec_3 * const bdep_fns[4] = { gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h, gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d, }; -TRANS_FEAT(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, - bdep_fns[a->esz], a, 0) +TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, + bdep_fns[a->esz], a, 0) static gen_helper_gvec_3 * const bgrp_fns[4] = { gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h, gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d, }; -TRANS_FEAT(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, - bgrp_fns[a->esz], a, 0) +TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz, + bgrp_fns[a->esz], a, 0) static gen_helper_gvec_3 * const cadd_fns[4] = { gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h, @@ -7094,21 +7195,21 @@ DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt) static gen_helper_gvec_flags_4 * const match_fns[4] = { gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL }; -TRANS_FEAT(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz]) +TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz]) static gen_helper_gvec_flags_4 * const nmatch_fns[4] = { gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL }; -TRANS_FEAT(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz]) +TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz]) static gen_helper_gvec_4 * const histcnt_fns[4] = { NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d }; -TRANS_FEAT(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz, - histcnt_fns[a->esz], a, 0) +TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz, + histcnt_fns[a->esz], a, 0) -TRANS_FEAT(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz, - a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0) +TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz, + a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0) DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz) DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz) @@ -7120,10 +7221,12 @@ DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz) * SVE Integer Multiply-Add (unpredicated) */ -TRANS_FEAT(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_s, - a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR) -TRANS_FEAT(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_d, - a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR) +TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, + gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra, + 0, FPST_FPCR) +TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, + gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra, + 0, FPST_FPCR) static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = { NULL, gen_helper_sve2_sqdmlal_zzzw_h, @@ -7220,20 +7323,21 @@ TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz, TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0) -TRANS_FEAT(AESMC, aa64_sve2_aes, gen_gvec_ool_zz, - gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt) +TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz, + gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt) -TRANS_FEAT(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz, - gen_helper_crypto_aese, a, false) -TRANS_FEAT(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz, - gen_helper_crypto_aese, a, true) +TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz, + gen_helper_crypto_aese, a, false) +TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz, + gen_helper_crypto_aese, a, true) -TRANS_FEAT(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, - gen_helper_crypto_sm4e, a, 0) -TRANS_FEAT(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, - gen_helper_crypto_sm4ekey, a, 0) +TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, + gen_helper_crypto_sm4e, a, 0) +TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz, + gen_helper_crypto_sm4ekey, a, 0) -TRANS_FEAT(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a) +TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, + gen_gvec_rax1, a) TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz, gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR) @@ -7284,20 +7388,20 @@ TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true) TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false) TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true) -TRANS_FEAT(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, - gen_helper_gvec_smmla_b, a, 0) -TRANS_FEAT(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, - gen_helper_gvec_usmmla_b, a, 0) -TRANS_FEAT(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, - gen_helper_gvec_ummla_b, a, 0) +TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_smmla_b, a, 0) +TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_usmmla_b, a, 0) +TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_ummla_b, a, 0) TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, gen_helper_gvec_bfdot, a, 0) TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz, gen_helper_gvec_bfdot_idx, a) -TRANS_FEAT(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, - gen_helper_gvec_bfmmla, a, 0) +TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz, + gen_helper_gvec_bfmmla, a, 0) static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel) { @@ -7317,3 +7421,162 @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel) TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false) TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true) + +static bool trans_PSEL(DisasContext *s, arg_psel *a) +{ + int vl = vec_full_reg_size(s); + int pl = pred_gvec_reg_size(s); + int elements = vl >> a->esz; + TCGv_i64 tmp, didx, dbit; + TCGv_ptr ptr; + + if (!dc_isar_feature(aa64_sme, s)) { + return false; + } + if (!sve_access_check(s)) { + return true; + } + + tmp = tcg_temp_new_i64(); + dbit = tcg_temp_new_i64(); + didx = tcg_temp_new_i64(); + ptr = tcg_temp_new_ptr(); + + /* Compute the predicate element. */ + tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm); + if (is_power_of_2(elements)) { + tcg_gen_andi_i64(tmp, tmp, elements - 1); + } else { + tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements)); + } + + /* Extract the predicate byte and bit indices. */ + tcg_gen_shli_i64(tmp, tmp, a->esz); + tcg_gen_andi_i64(dbit, tmp, 7); + tcg_gen_shri_i64(didx, tmp, 3); + if (HOST_BIG_ENDIAN) { + tcg_gen_xori_i64(didx, didx, 7); + } + + /* Load the predicate word. */ + tcg_gen_trunc_i64_ptr(ptr, didx); + tcg_gen_add_ptr(ptr, ptr, cpu_env); + tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm)); + + /* Extract the predicate bit and replicate to MO_64. */ + tcg_gen_shr_i64(tmp, tmp, dbit); + tcg_gen_andi_i64(tmp, tmp, 1); + tcg_gen_neg_i64(tmp, tmp); + + /* Apply to either copy the source, or write zeros. */ + tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd), + pred_full_reg_offset(s, a->pn), tmp, pl, pl); + + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(dbit); + tcg_temp_free_i64(didx); + tcg_temp_free_ptr(ptr); + return true; +} + +static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) +{ + tcg_gen_smax_i32(d, a, n); + tcg_gen_smin_i32(d, d, m); +} + +static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) +{ + tcg_gen_smax_i64(d, a, n); + tcg_gen_smin_i64(d, d, m); +} + +static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, + TCGv_vec m, TCGv_vec a) +{ + tcg_gen_smax_vec(vece, d, a, n); + tcg_gen_smin_vec(vece, d, d, m); +} + +static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, + uint32_t a, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop[] = { + INDEX_op_smin_vec, INDEX_op_smax_vec, 0 + }; + static const GVecGen4 ops[4] = { + { .fniv = gen_sclamp_vec, + .fno = gen_helper_gvec_sclamp_b, + .opt_opc = vecop, + .vece = MO_8 }, + { .fniv = gen_sclamp_vec, + .fno = gen_helper_gvec_sclamp_h, + .opt_opc = vecop, + .vece = MO_16 }, + { .fni4 = gen_sclamp_i32, + .fniv = gen_sclamp_vec, + .fno = gen_helper_gvec_sclamp_s, + .opt_opc = vecop, + .vece = MO_32 }, + { .fni8 = gen_sclamp_i64, + .fniv = gen_sclamp_vec, + .fno = gen_helper_gvec_sclamp_d, + .opt_opc = vecop, + .vece = MO_64, + .prefer_i64 = TCG_TARGET_REG_BITS == 64 } + }; + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); +} + +TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a) + +static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a) +{ + tcg_gen_umax_i32(d, a, n); + tcg_gen_umin_i32(d, d, m); +} + +static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a) +{ + tcg_gen_umax_i64(d, a, n); + tcg_gen_umin_i64(d, d, m); +} + +static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n, + TCGv_vec m, TCGv_vec a) +{ + tcg_gen_umax_vec(vece, d, a, n); + tcg_gen_umin_vec(vece, d, d, m); +} + +static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m, + uint32_t a, uint32_t oprsz, uint32_t maxsz) +{ + static const TCGOpcode vecop[] = { + INDEX_op_umin_vec, INDEX_op_umax_vec, 0 + }; + static const GVecGen4 ops[4] = { + { .fniv = gen_uclamp_vec, + .fno = gen_helper_gvec_uclamp_b, + .opt_opc = vecop, + .vece = MO_8 }, + { .fniv = gen_uclamp_vec, + .fno = gen_helper_gvec_uclamp_h, + .opt_opc = vecop, + .vece = MO_16 }, + { .fni4 = gen_uclamp_i32, + .fniv = gen_uclamp_vec, + .fno = gen_helper_gvec_uclamp_s, + .opt_opc = vecop, + .vece = MO_32 }, + { .fni8 = gen_uclamp_i64, + .fniv = gen_uclamp_vec, + .fno = gen_helper_gvec_uclamp_d, + .opt_opc = vecop, + .vece = MO_64, + .prefer_i64 = TCG_TARGET_REG_BITS == 64 } + }; + tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]); +} + +TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a) diff --git a/target/arm/translate-vfp.c b/target/arm/translate-vfp.c index 82fdbcae53..bd5ae27d09 100644 --- a/target/arm/translate-vfp.c +++ b/target/arm/translate-vfp.c @@ -234,6 +234,18 @@ static bool vfp_access_check_a(DisasContext *s, bool ignore_vfp_enabled) return false; } + /* + * Note that rebuild_hflags_a32 has already accounted for being in EL0 + * and the higher EL in A64 mode, etc. Unlike A64 mode, there do not + * appear to be any insns which touch VFP which are allowed. + */ + if (s->sme_trap_nonstreaming) { + gen_exception_insn(s, s->pc_curr, EXCP_UDEF, + syn_smetrap(SME_ET_Streaming, + s->base.pc_next - s->pc_curr == 2)); + return false; + } + if (!s->vfp_enabled && !ignore_vfp_enabled) { assert(!arm_dc_feature(s, ARM_FEATURE_M)); unallocated_encoding(s); diff --git a/target/arm/translate.c b/target/arm/translate.c index 6617de775f..4ffb095c73 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -9378,6 +9378,8 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN); dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE); } + dc->sme_trap_nonstreaming = + EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING); } dc->cp_regs = cpu->cp_regs; dc->features = env->features; diff --git a/target/arm/translate.h b/target/arm/translate.h index 22fd882368..af5d4a7086 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -102,6 +102,10 @@ typedef struct DisasContext { bool pstate_sm; /* True if PSTATE.ZA is set. */ bool pstate_za; + /* True if non-streaming insns should raise an SME Streaming exception. */ + bool sme_trap_nonstreaming; + /* True if the current instruction is non-streaming. */ + bool is_nonstreaming; /* True if MVE insns are definitely not predicated by VPR or LTPSIZE */ bool mve_no_pred; /* @@ -152,6 +156,11 @@ static inline int plus_2(DisasContext *s, int x) return x + 2; } +static inline int plus_12(DisasContext *s, int x) +{ + return x + 12; +} + static inline int times_2(DisasContext *s, int x) { return x * 2; @@ -562,4 +571,11 @@ uint64_t asimd_imm_const(uint32_t imm, int cmode, int op); static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ { return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); } +#define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \ + static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ + { \ + s->is_nonstreaming = true; \ + return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \ + } + #endif /* TARGET_ARM_TRANSLATE_H */ diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c index 9a9c034e36..f59d3b26ea 100644 --- a/target/arm/vec_helper.c +++ b/target/arm/vec_helper.c @@ -2690,3 +2690,27 @@ void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm, } clear_tail(d, opr_sz, simd_maxsz(desc)); } + +#define DO_CLAMP(NAME, TYPE) \ +void HELPER(NAME)(void *d, void *n, void *m, void *a, uint32_t desc) \ +{ \ + intptr_t i, opr_sz = simd_oprsz(desc); \ + for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \ + TYPE aa = *(TYPE *)(a + i); \ + TYPE nn = *(TYPE *)(n + i); \ + TYPE mm = *(TYPE *)(m + i); \ + TYPE dd = MIN(MAX(aa, nn), mm); \ + *(TYPE *)(d + i) = dd; \ + } \ + clear_tail(d, opr_sz, simd_maxsz(desc)); \ +} + +DO_CLAMP(gvec_sclamp_b, int8_t) +DO_CLAMP(gvec_sclamp_h, int16_t) +DO_CLAMP(gvec_sclamp_s, int32_t) +DO_CLAMP(gvec_sclamp_d, int64_t) + +DO_CLAMP(gvec_uclamp_b, uint8_t) +DO_CLAMP(gvec_uclamp_h, uint16_t) +DO_CLAMP(gvec_uclamp_s, uint32_t) +DO_CLAMP(gvec_uclamp_d, uint64_t) diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c index 976be5e0d1..912b037c63 100644 --- a/target/ppc/cpu-models.c +++ b/target/ppc/cpu-models.c @@ -385,19 +385,19 @@ POWERPC_DEF_SVR("mpc8548e_v21", "MPC8548E v2.1", CPU_POWERPC_MPC8548E_v21, POWERPC_SVR_8548E_v21, e500v2) POWERPC_DEF_SVR("mpc8555_v10", "MPC8555 v1.0", - CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v2) + CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v1) POWERPC_DEF_SVR("mpc8555_v11", "MPC8555 v1.1", - CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v2) + CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v1) POWERPC_DEF_SVR("mpc8555e_v10", "MPC8555E v1.0", - CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v2) + CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v1) POWERPC_DEF_SVR("mpc8555e_v11", "MPC8555E v1.1", - CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v2) + CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v1) POWERPC_DEF_SVR("mpc8560_v10", "MPC8560 v1.0", - CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v2) + CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v1) POWERPC_DEF_SVR("mpc8560_v20", "MPC8560 v2.0", - CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v2) + CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v1) POWERPC_DEF_SVR("mpc8560_v21", "MPC8560 v2.1", - CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v2) + CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v1) POWERPC_DEF_SVR("mpc8567", "MPC8567", CPU_POWERPC_MPC8567, POWERPC_SVR_8567, e500v2) POWERPC_DEF_SVR("mpc8567e", "MPC8567E", @@ -879,7 +879,6 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { { "755", "755_v2.8" }, { "goldfinger", "755_v2.8" }, { "7400", "7400_v2.9" }, - { "max", "7400_v2.9" }, { "g4", "7400_v2.9" }, { "7410", "7410_v1.4" }, { "nitro", "7410_v1.4" }, @@ -918,6 +917,6 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { #endif { "ppc32", "604" }, { "ppc", "604" }, - { "default", "604" }, + { NULL, NULL } }; diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h index 76775a74a9..1326493a9a 100644 --- a/target/ppc/cpu-models.h +++ b/target/ppc/cpu-models.h @@ -184,13 +184,13 @@ enum { #define CPU_POWERPC_MPC8548E_v11 CPU_POWERPC_e500v2_v11 #define CPU_POWERPC_MPC8548E_v20 CPU_POWERPC_e500v2_v20 #define CPU_POWERPC_MPC8548E_v21 CPU_POWERPC_e500v2_v21 -#define CPU_POWERPC_MPC8555_v10 CPU_POWERPC_e500v2_v10 -#define CPU_POWERPC_MPC8555_v11 CPU_POWERPC_e500v2_v11 -#define CPU_POWERPC_MPC8555E_v10 CPU_POWERPC_e500v2_v10 -#define CPU_POWERPC_MPC8555E_v11 CPU_POWERPC_e500v2_v11 -#define CPU_POWERPC_MPC8560_v10 CPU_POWERPC_e500v2_v10 -#define CPU_POWERPC_MPC8560_v20 CPU_POWERPC_e500v2_v20 -#define CPU_POWERPC_MPC8560_v21 CPU_POWERPC_e500v2_v21 +#define CPU_POWERPC_MPC8555_v10 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8555_v11 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8555E_v10 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8555E_v11 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8560_v10 CPU_POWERPC_e500v1_v10 +#define CPU_POWERPC_MPC8560_v20 CPU_POWERPC_e500v1_v20 +#define CPU_POWERPC_MPC8560_v21 CPU_POWERPC_e500v1_v20 #define CPU_POWERPC_MPC8567 CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8567E CPU_POWERPC_e500v2_v22 #define CPU_POWERPC_MPC8568 CPU_POWERPC_e500v2_v22 diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 6d78078f37..7aaff9dcc5 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -47,6 +47,18 @@ PPC_BIT32(bs)) #define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be)) | PPC_BIT8(bs)) +/* + * QEMU version of the GETFIELD/SETFIELD macros from skiboot + * + * It might be better to use the existing extract64() and + * deposit64() but this means that all the register definitions will + * change and become incompatible with the ones found in skiboot. + */ +#define MASK_TO_LSH(m) (__builtin_ffsll(m) - 1) +#define GETFIELD(m, v) (((v) & (m)) >> MASK_TO_LSH(m)) +#define SETFIELD(m, v, val) \ + (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_TO_LSH(m)) & (m))) + /*****************************************************************************/ /* Exception vectors definitions */ enum { @@ -694,42 +706,42 @@ enum { /*****************************************************************************/ /* Floating point status and control register */ -#define FPSCR_DRN2 34 /* Decimal Floating-Point rounding control */ -#define FPSCR_DRN1 33 /* Decimal Floating-Point rounding control */ -#define FPSCR_DRN0 32 /* Decimal Floating-Point rounding control */ -#define FPSCR_FX 31 /* Floating-point exception summary */ -#define FPSCR_FEX 30 /* Floating-point enabled exception summary */ -#define FPSCR_VX 29 /* Floating-point invalid operation exception summ. */ -#define FPSCR_OX 28 /* Floating-point overflow exception */ -#define FPSCR_UX 27 /* Floating-point underflow exception */ -#define FPSCR_ZX 26 /* Floating-point zero divide exception */ -#define FPSCR_XX 25 /* Floating-point inexact exception */ -#define FPSCR_VXSNAN 24 /* Floating-point invalid operation exception (sNan) */ -#define FPSCR_VXISI 23 /* Floating-point invalid operation exception (inf) */ -#define FPSCR_VXIDI 22 /* Floating-point invalid operation exception (inf) */ -#define FPSCR_VXZDZ 21 /* Floating-point invalid operation exception (zero) */ -#define FPSCR_VXIMZ 20 /* Floating-point invalid operation exception (inf) */ -#define FPSCR_VXVC 19 /* Floating-point invalid operation exception (comp) */ -#define FPSCR_FR 18 /* Floating-point fraction rounded */ -#define FPSCR_FI 17 /* Floating-point fraction inexact */ -#define FPSCR_C 16 /* Floating-point result class descriptor */ -#define FPSCR_FL 15 /* Floating-point less than or negative */ -#define FPSCR_FG 14 /* Floating-point greater than or negative */ -#define FPSCR_FE 13 /* Floating-point equal or zero */ -#define FPSCR_FU 12 /* Floating-point unordered or NaN */ -#define FPSCR_FPCC 12 /* Floating-point condition code */ -#define FPSCR_FPRF 12 /* Floating-point result flags */ -#define FPSCR_VXSOFT 10 /* Floating-point invalid operation exception (soft) */ -#define FPSCR_VXSQRT 9 /* Floating-point invalid operation exception (sqrt) */ -#define FPSCR_VXCVI 8 /* Floating-point invalid operation exception (int) */ -#define FPSCR_VE 7 /* Floating-point invalid operation exception enable */ -#define FPSCR_OE 6 /* Floating-point overflow exception enable */ -#define FPSCR_UE 5 /* Floating-point underflow exception enable */ -#define FPSCR_ZE 4 /* Floating-point zero divide exception enable */ -#define FPSCR_XE 3 /* Floating-point inexact exception enable */ -#define FPSCR_NI 2 /* Floating-point non-IEEE mode */ -#define FPSCR_RN1 1 -#define FPSCR_RN0 0 /* Floating-point rounding control */ +#define FPSCR_DRN2 PPC_BIT_NR(29) /* Decimal Floating-Point rounding ctrl. */ +#define FPSCR_DRN1 PPC_BIT_NR(30) /* Decimal Floating-Point rounding ctrl. */ +#define FPSCR_DRN0 PPC_BIT_NR(31) /* Decimal Floating-Point rounding ctrl. */ +#define FPSCR_FX PPC_BIT_NR(32) /* Floating-point exception summary */ +#define FPSCR_FEX PPC_BIT_NR(33) /* Floating-point enabled exception summ.*/ +#define FPSCR_VX PPC_BIT_NR(34) /* Floating-point invalid op. excp. summ.*/ +#define FPSCR_OX PPC_BIT_NR(35) /* Floating-point overflow exception */ +#define FPSCR_UX PPC_BIT_NR(36) /* Floating-point underflow exception */ +#define FPSCR_ZX PPC_BIT_NR(37) /* Floating-point zero divide exception */ +#define FPSCR_XX PPC_BIT_NR(38) /* Floating-point inexact exception */ +#define FPSCR_VXSNAN PPC_BIT_NR(39) /* Floating-point invalid op. excp (sNan)*/ +#define FPSCR_VXISI PPC_BIT_NR(40) /* Floating-point invalid op. excp (inf) */ +#define FPSCR_VXIDI PPC_BIT_NR(41) /* Floating-point invalid op. excp (inf) */ +#define FPSCR_VXZDZ PPC_BIT_NR(42) /* Floating-point invalid op. excp (zero)*/ +#define FPSCR_VXIMZ PPC_BIT_NR(43) /* Floating-point invalid op. excp (inf) */ +#define FPSCR_VXVC PPC_BIT_NR(44) /* Floating-point invalid op. excp (comp)*/ +#define FPSCR_FR PPC_BIT_NR(45) /* Floating-point fraction rounded */ +#define FPSCR_FI PPC_BIT_NR(46) /* Floating-point fraction inexact */ +#define FPSCR_C PPC_BIT_NR(47) /* Floating-point result class descriptor*/ +#define FPSCR_FL PPC_BIT_NR(48) /* Floating-point less than or negative */ +#define FPSCR_FG PPC_BIT_NR(49) /* Floating-point greater than or neg. */ +#define FPSCR_FE PPC_BIT_NR(50) /* Floating-point equal or zero */ +#define FPSCR_FU PPC_BIT_NR(51) /* Floating-point unordered or NaN */ +#define FPSCR_FPCC PPC_BIT_NR(51) /* Floating-point condition code */ +#define FPSCR_FPRF PPC_BIT_NR(51) /* Floating-point result flags */ +#define FPSCR_VXSOFT PPC_BIT_NR(53) /* Floating-point invalid op. excp (soft)*/ +#define FPSCR_VXSQRT PPC_BIT_NR(54) /* Floating-point invalid op. excp (sqrt)*/ +#define FPSCR_VXCVI PPC_BIT_NR(55) /* Floating-point invalid op. excp (int) */ +#define FPSCR_VE PPC_BIT_NR(56) /* Floating-point invalid op. excp enable*/ +#define FPSCR_OE PPC_BIT_NR(57) /* Floating-point overflow excp. enable */ +#define FPSCR_UE PPC_BIT_NR(58) /* Floating-point underflow excp. enable */ +#define FPSCR_ZE PPC_BIT_NR(59) /* Floating-point zero divide excp enable*/ +#define FPSCR_XE PPC_BIT_NR(60) /* Floating-point inexact excp. enable */ +#define FPSCR_NI PPC_BIT_NR(61) /* Floating-point non-IEEE mode */ +#define FPSCR_RN1 PPC_BIT_NR(62) +#define FPSCR_RN0 PPC_BIT_NR(63) /* Floating-point rounding control */ /* Invalid operation exception summary */ #define FPSCR_IX ((1 << FPSCR_VXSNAN) | (1 << FPSCR_VXISI) | \ (1 << FPSCR_VXIDI) | (1 << FPSCR_VXZDZ) | \ @@ -2277,6 +2289,8 @@ enum { PPC2_ISA310 = 0x0000000000100000ULL, /* lwsync instruction */ PPC2_MEM_LWSYNC = 0x0000000000200000ULL, + /* ISA 2.06 BCD assist instructions */ + PPC2_BCDA_ISA206 = 0x0000000000400000ULL, #define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \ PPC2_ISA205 | PPC2_VSX207 | PPC2_PERM_ISA206 | \ @@ -2285,7 +2299,8 @@ enum { PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | \ PPC2_ALTIVEC_207 | PPC2_ISA207S | PPC2_DFP | \ PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | \ - PPC2_ISA300 | PPC2_ISA310 | PPC2_MEM_LWSYNC) + PPC2_ISA300 | PPC2_ISA310 | PPC2_MEM_LWSYNC | \ + PPC2_BCDA_ISA206) }; /*****************************************************************************/ diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index c16cb8dbe7..86ad28466a 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -47,6 +47,10 @@ #include "spr_common.h" #include "power8-pmu.h" +#ifndef CONFIG_USER_ONLY +#include "hw/boards.h" +#endif + /* #define PPC_DEBUG_SPR */ /* #define USE_APPLE_GDB */ @@ -5985,7 +5989,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data) PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | PPC2_FP_CVT_S64 | - PPC2_PM_ISA206 | PPC2_MEM_LWSYNC; + PPC2_PM_ISA206 | PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_VR) | (1ull << MSR_VSX) | @@ -6159,7 +6163,8 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data) PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | - PPC2_TM | PPC2_PM_ISA206 | PPC2_MEM_LWSYNC; + PPC2_TM | PPC2_PM_ISA206 | PPC2_MEM_LWSYNC | + PPC2_BCDA_ISA206; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_HV) | (1ull << MSR_TM) | @@ -6379,7 +6384,8 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | - PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_MEM_LWSYNC; + PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_MEM_LWSYNC | + PPC2_BCDA_ISA206; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_HV) | (1ull << MSR_TM) | @@ -6597,7 +6603,7 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL | PPC2_ISA310 | - PPC2_MEM_LWSYNC; + PPC2_MEM_LWSYNC | PPC2_BCDA_ISA206; pcc->msr_mask = (1ull << MSR_SF) | (1ull << MSR_HV) | (1ull << MSR_TM) | @@ -6963,6 +6969,21 @@ static ObjectClass *ppc_cpu_class_by_name(const char *name) } } + /* + * All ppc CPUs represent hardware that exists in the real world, i.e.: we + * do not have a "max" CPU with all possible emulated features enabled. + * Return the default CPU type for the machine because that has greater + * chance of being useful as the "max" CPU. + */ +#if !defined(CONFIG_USER_ONLY) + if (strcmp(name, "max") == 0) { + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + if (mc) { + return object_class_by_name(mc->default_cpu_type); + } + } +#endif + cpu_model = g_ascii_strdown(name, -1); p = ppc_cpu_lookup_alias(cpu_model); if (p) { diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c index 0d01ac3de0..5ba74b2124 100644 --- a/target/ppc/dfp_helper.c +++ b/target/ppc/dfp_helper.c @@ -1391,3 +1391,68 @@ DFP_HELPER_SHIFT(DSCLI, 64, 1) DFP_HELPER_SHIFT(DSCLIQ, 128, 1) DFP_HELPER_SHIFT(DSCRI, 64, 0) DFP_HELPER_SHIFT(DSCRIQ, 128, 0) + +target_ulong helper_CDTBCD(target_ulong s) +{ + uint64_t res = 0; + uint32_t dec32, declets; + uint8_t bcd[6]; + int i, w, sh; + decNumber a; + + for (w = 1; w >= 0; w--) { + res <<= 32; + declets = extract64(s, 32 * w, 20); + if (declets) { + /* decimal32 with zero exponent and word "w" declets */ + dec32 = (0x225ULL << 20) | declets; + decimal32ToNumber((decimal32 *)&dec32, &a); + decNumberGetBCD(&a, bcd); + for (i = 0; i < a.digits; i++) { + sh = 4 * (a.digits - 1 - i); + res |= (uint64_t)bcd[i] << sh; + } + } + } + + return res; +} + +target_ulong helper_CBCDTD(target_ulong s) +{ + uint64_t res = 0; + uint32_t dec32; + uint8_t bcd[6]; + int w, i, offs; + decNumber a; + decContext context; + + decContextDefault(&context, DEC_INIT_DECIMAL32); + + for (w = 1; w >= 0; w--) { + res <<= 32; + decNumberZero(&a); + /* Extract each BCD field of word "w" */ + for (i = 5; i >= 0; i--) { + offs = 4 * (5 - i) + 32 * w; + bcd[i] = extract64(s, offs, 4); + if (bcd[i] > 9) { + /* + * If the field value is greater than 9, the results are + * undefined. We could use a fixed value like 0 or 9, but + * an and with 9 seems to better match the hardware behavior. + */ + bcd[i] &= 9; + } + } + + /* Create a decNumber with the BCD values and convert to decimal32 */ + decNumberSetBCD(&a, bcd, 6); + decimal32FromNumber((decimal32 *)&dec32, &a, &context); + + /* Extract the two declets from the decimal32 value */ + res |= dec32 & 0xfffff; + } + + return res; +} diff --git a/target/ppc/helper.h b/target/ppc/helper.h index d627cfe6ed..ed0641a234 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -54,6 +54,8 @@ DEF_HELPER_3(sraw, tl, env, tl, tl) DEF_HELPER_FLAGS_2(CFUGED, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(PDEPD, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(PEXTD, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_1(CDTBCD, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(CBCDTD, TCG_CALL_NO_RWG_SE, tl, tl) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl) DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl) @@ -204,14 +206,14 @@ DEF_HELPER_FLAGS_5(vadduws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsububs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsubuhs, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_5(vsubuws, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) -DEF_HELPER_FLAGS_3(vadduqm, TCG_CALL_NO_RWG, void, avr, avr, avr) -DEF_HELPER_FLAGS_4(vaddecuq, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) -DEF_HELPER_FLAGS_4(vaddeuqm, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) -DEF_HELPER_FLAGS_3(vaddcuq, TCG_CALL_NO_RWG, void, avr, avr, avr) -DEF_HELPER_FLAGS_3(vsubuqm, TCG_CALL_NO_RWG, void, avr, avr, avr) -DEF_HELPER_FLAGS_4(vsubecuq, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) -DEF_HELPER_FLAGS_4(vsubeuqm, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) -DEF_HELPER_FLAGS_3(vsubcuq, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VADDUQM, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_4(VADDECUQ, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_4(VADDEUQM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_3(VADDCUQ, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VSUBUQM, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_4(VSUBECUQ, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_4(VSUBEUQM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr) +DEF_HELPER_FLAGS_3(VSUBCUQ, TCG_CALL_NO_RWG, void, avr, avr, avr) DEF_HELPER_FLAGS_4(vsldoi, TCG_CALL_NO_RWG, void, avr, avr, avr, i32) DEF_HELPER_FLAGS_3(vextractub, TCG_CALL_NO_RWG, void, avr, avr, i32) DEF_HELPER_FLAGS_3(vextractuh, TCG_CALL_NO_RWG, void, avr, avr, i32) @@ -318,7 +320,7 @@ DEF_HELPER_FLAGS_3(vbpermq, TCG_CALL_NO_RWG, void, avr, avr, avr) DEF_HELPER_FLAGS_3(vpmsumb, TCG_CALL_NO_RWG, void, avr, avr, avr) DEF_HELPER_FLAGS_3(vpmsumh, TCG_CALL_NO_RWG, void, avr, avr, avr) DEF_HELPER_FLAGS_3(vpmsumw, TCG_CALL_NO_RWG, void, avr, avr, avr) -DEF_HELPER_FLAGS_3(vpmsumd, TCG_CALL_NO_RWG, void, avr, avr, avr) +DEF_HELPER_FLAGS_3(VPMSUMD, TCG_CALL_NO_RWG, void, avr, avr, avr) DEF_HELPER_FLAGS_2(vextublx, TCG_CALL_NO_RWG, tl, tl, avr) DEF_HELPER_FLAGS_2(vextuhlx, TCG_CALL_NO_RWG, tl, tl, avr) DEF_HELPER_FLAGS_2(vextuwlx, TCG_CALL_NO_RWG, tl, tl, avr) diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode index 6ea48d5163..f7653ef9d5 100644 --- a/target/ppc/insn32.decode +++ b/target/ppc/insn32.decode @@ -21,11 +21,11 @@ @A ...... frt:5 fra:5 frb:5 frc:5 ..... rc:1 &A &D rt ra si:int64_t -@D ...... rt:5 ra:5 si:s16 &D +@D ...... rt:5 ra:5 si:s16 &D &D_bf bf l:bool ra imm -@D_bfs ...... bf:3 - l:1 ra:5 imm:s16 &D_bf -@D_bfu ...... bf:3 - l:1 ra:5 imm:16 &D_bf +@D_bfs ...... bf:3 . l:1 ra:5 imm:s16 &D_bf +@D_bfu ...... bf:3 . l:1 ra:5 imm:16 &D_bf %dq_si 4:s12 !function=times_16 %dq_rtp 22:4 !function=times_2 @@ -38,7 +38,7 @@ @DQ_TSXP ...... ..... ra:5 ............ .... &D si=%dq_si rt=%rt_tsxp %ds_si 2:s14 !function=times_4 -@DS ...... rt:5 ra:5 .............. .. &D si=%ds_si +@DS ...... rt:5 ra:5 .............. .. &D si=%ds_si %ds_rtp 22:4 !function=times_2 @DS_rtp ...... ....0 ra:5 .............. .. &D rt=%ds_rtp si=%ds_si @@ -49,10 +49,10 @@ &DX rt d %dx_d 6:s10 16:5 0:1 -@DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d +@DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d &VA vrt vra vrb rc -@VA ...... vrt:5 vra:5 vrb:5 rc:5 ...... &VA +@VA ...... vrt:5 vra:5 vrb:5 rc:5 ...... &VA &VC vrt vra vrb rc:bool @VC ...... vrt:5 vra:5 vrb:5 rc:1 .......... &VC @@ -61,7 +61,7 @@ @VN ...... vrt:5 vra:5 vrb:5 .. sh:3 ...... &VN &VX vrt vra vrb -@VX ...... vrt:5 vra:5 vrb:5 .......... . &VX +@VX ...... vrt:5 vra:5 vrb:5 .......... . &VX &VX_bf bf vra vrb @VX_bf ...... bf:3 .. vra:5 vrb:5 ........... &VX_bf @@ -76,17 +76,20 @@ @VX_tb_rc ...... vrt:5 ..... vrb:5 rc:1 .......... &VX_tb_rc &VX_uim4 vrt uim vrb -@VX_uim4 ...... vrt:5 . uim:4 vrb:5 ........... &VX_uim4 +@VX_uim4 ...... vrt:5 . uim:4 vrb:5 ........... &VX_uim4 &VX_tb vrt vrb -@VX_tb ...... vrt:5 ..... vrb:5 ........... &VX_tb +@VX_tb ...... vrt:5 ..... vrb:5 ........... &VX_tb &X rt ra rb -@X ...... rt:5 ra:5 rb:5 .......... . &X +@X ...... rt:5 ra:5 rb:5 .......... . &X &X_rc rt ra rb rc:bool @X_rc ...... rt:5 ra:5 rb:5 .......... rc:1 &X_rc +&X_sa rs ra +@X_sa ...... rs:5 ra:5 ..... .......... . &X_sa + %x_frtp 22:4 !function=times_2 %x_frap 17:4 !function=times_2 %x_frbp 12:4 !function=times_2 @@ -94,9 +97,15 @@ @X_tp_a_bp_rc ...... ....0 ra:5 ....0 .......... rc:1 &X_rc rt=%x_frtp rb=%x_frbp +&X_t rt +@X_t ...... rt:5 ..... ..... .......... . &X_t + &X_tb rt rb @X_tb ...... rt:5 ..... rb:5 .......... . &X_tb +&X_t_rc rt rc:bool +@X_t_rc ...... rt:5 ..... ..... .......... rc:1 &X_t_rc + &X_tb_rc rt rb rc:bool @X_tb_rc ...... rt:5 ..... rb:5 .......... rc:1 &X_tb_rc @@ -107,7 +116,7 @@ @X_t_bp_rc ...... rt:5 ..... ....0 .......... rc:1 &X_tb_rc rb=%x_frbp &X_bi rt bi -@X_bi ...... rt:5 bi:5 ----- .......... - &X_bi +@X_bi ...... rt:5 bi:5 ..... .......... . &X_bi &X_bf bf ra rb @X_bf ...... bf:3 .. ra:5 rb:5 .......... . &X_bf @@ -122,7 +131,13 @@ @X_bf_uim_bp ...... bf:3 . uim:6 ....0 .......... . &X_bf_uim rb=%x_frbp &X_bfl bf l:bool ra rb -@X_bfl ...... bf:3 - l:1 ra:5 rb:5 ..........- &X_bfl +@X_bfl ...... bf:3 . l:1 ra:5 rb:5 .......... . &X_bfl + +&X_imm2 rt imm +@X_imm2 ...... rt:5 ..... ... imm:2 .......... . &X_imm2 + +&X_imm3 rt imm +@X_imm3 ...... rt:5 ..... .. imm:3 .......... . &X_imm3 %x_xt 0:1 21:5 &X_imm5 xt imm:uint8_t vrb @@ -299,6 +314,12 @@ CNTTZDM 011111 ..... ..... ..... 1000111011 - @X PDEPD 011111 ..... ..... ..... 0010011100 - @X PEXTD 011111 ..... ..... ..... 0010111100 - @X +## BCD Assist + +ADDG6S 011111 ..... ..... ..... - 001001010 - @X +CDTBCD 011111 ..... ..... ----- 0100011010 - @X_sa +CBCDTD 011111 ..... ..... ----- 0100111010 - @X_sa + ### Float-Point Load Instructions LFS 110000 ..... ..... ................ @D @@ -334,6 +355,16 @@ SETBCR 011111 ..... ..... ----- 0110100000 - @X_bi SETNBC 011111 ..... ..... ----- 0111000000 - @X_bi SETNBCR 011111 ..... ..... ----- 0111100000 - @X_bi +### Move To/From FPSCR + +MFFS 111111 ..... 00000 ----- 1001000111 . @X_t_rc +MFFSCE 111111 ..... 00001 ----- 1001000111 - @X_t +MFFSCRN 111111 ..... 10110 ..... 1001000111 - @X_tb +MFFSCDRN 111111 ..... 10100 ..... 1001000111 - @X_tb +MFFSCRNI 111111 ..... 10111 ---.. 1001000111 - @X_imm2 +MFFSCDRNI 111111 ..... 10101 --... 1001000111 - @X_imm3 +MFFSL 111111 ..... 11000 ----- 1001000111 - @X_t + ### Decimal Floating-Point Arithmetic Instructions DADD 111011 ..... ..... ..... 0000000010 . @X_rc @@ -426,6 +457,10 @@ DSCLIQ 111111 ..... ..... ...... 001000010 . @Z22_tap_sh_rc DSCRI 111011 ..... ..... ...... 001100010 . @Z22_ta_sh_rc DSCRIQ 111111 ..... ..... ...... 001100010 . @Z22_tap_sh_rc +## Vector Exclusive-OR-based Instructions + +VPMSUMD 000100 ..... ..... ..... 10011001000 @VX + ## Vector Integer Instructions VCMPEQUB 000100 ..... ..... ..... . 0000000110 @VC @@ -546,6 +581,18 @@ VRLQNM 000100 ..... ..... ..... 00101000101 @VX ## Vector Integer Arithmetic Instructions +VADDCUQ 000100 ..... ..... ..... 00101000000 @VX +VADDUQM 000100 ..... ..... ..... 00100000000 @VX + +VADDEUQM 000100 ..... ..... ..... ..... 111100 @VA +VADDECUQ 000100 ..... ..... ..... ..... 111101 @VA + +VSUBCUQ 000100 ..... ..... ..... 10101000000 @VX +VSUBUQM 000100 ..... ..... ..... 10100000000 @VX + +VSUBECUQ 000100 ..... ..... ..... ..... 111111 @VA +VSUBEUQM 000100 ..... ..... ..... ..... 111110 @VA + VEXTSB2W 000100 ..... 10000 ..... 11000000010 @VX_tb VEXTSH2W 000100 ..... 10001 ..... 11000000010 @VX_tb VEXTSB2D 000100 ..... 11000 ..... 11000000010 @VX_tb diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c index 3ae03f73d3..d905f07d02 100644 --- a/target/ppc/int_helper.c +++ b/target/ppc/int_helper.c @@ -1484,52 +1484,24 @@ PMSUM(vpmsumb, u8, u16, uint16_t) PMSUM(vpmsumh, u16, u32, uint32_t) PMSUM(vpmsumw, u32, u64, uint64_t) -void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VPMSUMD(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { - -#ifdef CONFIG_INT128 - int i, j; - __uint128_t prod[2]; - - VECTOR_FOR_INORDER_I(i, u64) { - prod[i] = 0; - for (j = 0; j < 64; j++) { - if (a->u64[i] & (1ull << j)) { - prod[i] ^= (((__uint128_t)b->u64[i]) << j); - } - } - } - - r->u128 = prod[0] ^ prod[1]; - -#else int i, j; - ppc_avr_t prod[2]; - - VECTOR_FOR_INORDER_I(i, u64) { - prod[i].VsrD(1) = prod[i].VsrD(0) = 0; - for (j = 0; j < 64; j++) { - if (a->u64[i] & (1ull << j)) { - ppc_avr_t bshift; - if (j == 0) { - bshift.VsrD(0) = 0; - bshift.VsrD(1) = b->u64[i]; - } else { - bshift.VsrD(0) = b->u64[i] >> (64 - j); - bshift.VsrD(1) = b->u64[i] << j; - } - prod[i].VsrD(1) ^= bshift.VsrD(1); - prod[i].VsrD(0) ^= bshift.VsrD(0); + Int128 tmp, prod[2] = {int128_zero(), int128_zero()}; + + for (j = 0; j < 64; j++) { + for (i = 0; i < ARRAY_SIZE(r->u64); i++) { + if (a->VsrD(i) & (1ull << j)) { + tmp = int128_make64(b->VsrD(i)); + tmp = int128_lshift(tmp, j); + prod[i] = int128_xor(prod[i], tmp); } } } - r->VsrD(1) = prod[0].VsrD(1) ^ prod[1].VsrD(1); - r->VsrD(0) = prod[0].VsrD(0) ^ prod[1].VsrD(0); -#endif + r->s128 = int128_xor(prod[0], prod[1]); } - #if HOST_BIG_ENDIAN #define PKBIG 1 #else @@ -2204,189 +2176,66 @@ VGENERIC_DO(popcntd, u64) #undef VGENERIC_DO -#if HOST_BIG_ENDIAN -#define QW_ONE { .u64 = { 0, 1 } } -#else -#define QW_ONE { .u64 = { 1, 0 } } -#endif - -#ifndef CONFIG_INT128 - -static inline void avr_qw_not(ppc_avr_t *t, ppc_avr_t a) -{ - t->u64[0] = ~a.u64[0]; - t->u64[1] = ~a.u64[1]; -} - -static int avr_qw_cmpu(ppc_avr_t a, ppc_avr_t b) -{ - if (a.VsrD(0) < b.VsrD(0)) { - return -1; - } else if (a.VsrD(0) > b.VsrD(0)) { - return 1; - } else if (a.VsrD(1) < b.VsrD(1)) { - return -1; - } else if (a.VsrD(1) > b.VsrD(1)) { - return 1; - } else { - return 0; - } -} - -static void avr_qw_add(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) -{ - t->VsrD(1) = a.VsrD(1) + b.VsrD(1); - t->VsrD(0) = a.VsrD(0) + b.VsrD(0) + - (~a.VsrD(1) < b.VsrD(1)); -} - -static int avr_qw_addc(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b) -{ - ppc_avr_t not_a; - t->VsrD(1) = a.VsrD(1) + b.VsrD(1); - t->VsrD(0) = a.VsrD(0) + b.VsrD(0) + - (~a.VsrD(1) < b.VsrD(1)); - avr_qw_not(¬_a, a); - return avr_qw_cmpu(not_a, b) < 0; -} - -#endif - -void helper_vadduqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VADDUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { -#ifdef CONFIG_INT128 - r->u128 = a->u128 + b->u128; -#else - avr_qw_add(r, *a, *b); -#endif + r->s128 = int128_add(a->s128, b->s128); } -void helper_vaddeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +void helper_VADDEUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { -#ifdef CONFIG_INT128 - r->u128 = a->u128 + b->u128 + (c->u128 & 1); -#else - - if (c->VsrD(1) & 1) { - ppc_avr_t tmp; - - tmp.VsrD(0) = 0; - tmp.VsrD(1) = c->VsrD(1) & 1; - avr_qw_add(&tmp, *a, tmp); - avr_qw_add(r, tmp, *b); - } else { - avr_qw_add(r, *a, *b); - } -#endif + r->s128 = int128_add(int128_add(a->s128, b->s128), + int128_make64(int128_getlo(c->s128) & 1)); } -void helper_vaddcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VADDCUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { -#ifdef CONFIG_INT128 - r->u128 = (~a->u128 < b->u128); -#else - ppc_avr_t not_a; - - avr_qw_not(¬_a, *a); - + r->VsrD(1) = int128_ult(int128_not(a->s128), b->s128); r->VsrD(0) = 0; - r->VsrD(1) = (avr_qw_cmpu(not_a, *b) < 0); -#endif } -void helper_vaddecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +void helper_VADDECUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { -#ifdef CONFIG_INT128 - int carry_out = (~a->u128 < b->u128); - if (!carry_out && (c->u128 & 1)) { - carry_out = ((a->u128 + b->u128 + 1) == 0) && - ((a->u128 != 0) || (b->u128 != 0)); - } - r->u128 = carry_out; -#else - - int carry_in = c->VsrD(1) & 1; - int carry_out = 0; - ppc_avr_t tmp; - - carry_out = avr_qw_addc(&tmp, *a, *b); + bool carry_out = int128_ult(int128_not(a->s128), b->s128), + carry_in = int128_getlo(c->s128) & 1; if (!carry_out && carry_in) { - ppc_avr_t one = QW_ONE; - carry_out = avr_qw_addc(&tmp, tmp, one); + carry_out = (int128_nz(a->s128) || int128_nz(b->s128)) && + int128_eq(int128_add(a->s128, b->s128), int128_makes64(-1)); } + r->VsrD(0) = 0; r->VsrD(1) = carry_out; -#endif } -void helper_vsubuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VSUBUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { -#ifdef CONFIG_INT128 - r->u128 = a->u128 - b->u128; -#else - ppc_avr_t tmp; - ppc_avr_t one = QW_ONE; - - avr_qw_not(&tmp, *b); - avr_qw_add(&tmp, *a, tmp); - avr_qw_add(r, tmp, one); -#endif + r->s128 = int128_sub(a->s128, b->s128); } -void helper_vsubeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +void helper_VSUBEUQM(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { -#ifdef CONFIG_INT128 - r->u128 = a->u128 + ~b->u128 + (c->u128 & 1); -#else - ppc_avr_t tmp, sum; - - avr_qw_not(&tmp, *b); - avr_qw_add(&sum, *a, tmp); - - tmp.VsrD(0) = 0; - tmp.VsrD(1) = c->VsrD(1) & 1; - avr_qw_add(r, sum, tmp); -#endif + r->s128 = int128_add(int128_add(a->s128, int128_not(b->s128)), + int128_make64(int128_getlo(c->s128) & 1)); } -void helper_vsubcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) +void helper_VSUBCUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) { -#ifdef CONFIG_INT128 - r->u128 = (~a->u128 < ~b->u128) || - (a->u128 + ~b->u128 == (__uint128_t)-1); -#else - int carry = (avr_qw_cmpu(*a, *b) > 0); - if (!carry) { - ppc_avr_t tmp; - avr_qw_not(&tmp, *b); - avr_qw_add(&tmp, *a, tmp); - carry = ((tmp.VsrSD(0) == -1ull) && (tmp.VsrSD(1) == -1ull)); - } + Int128 tmp = int128_not(b->s128); + + r->VsrD(1) = int128_ult(int128_not(a->s128), tmp) || + int128_eq(int128_add(a->s128, tmp), int128_makes64(-1)); r->VsrD(0) = 0; - r->VsrD(1) = carry; -#endif } -void helper_vsubecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) +void helper_VSUBECUQ(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c) { -#ifdef CONFIG_INT128 - r->u128 = - (~a->u128 < ~b->u128) || - ((c->u128 & 1) && (a->u128 + ~b->u128 == (__uint128_t)-1)); -#else - int carry_in = c->VsrD(1) & 1; - int carry_out = (avr_qw_cmpu(*a, *b) > 0); - if (!carry_out && carry_in) { - ppc_avr_t tmp; - avr_qw_not(&tmp, *b); - avr_qw_add(&tmp, *a, tmp); - carry_out = ((tmp.VsrD(0) == -1ull) && (tmp.VsrD(1) == -1ull)); - } + Int128 tmp = int128_not(b->s128); + bool carry_out = int128_ult(int128_not(a->s128), tmp), + carry_in = int128_getlo(c->s128) & 1; + r->VsrD(1) = carry_out || (carry_in && int128_eq(int128_add(a->s128, tmp), + int128_makes64(-1))); r->VsrD(0) = 0; - r->VsrD(1) = carry_out; -#endif } #define BCD_PLUS_PREF_1 0xC diff --git a/target/ppc/internal.h b/target/ppc/internal.h index 2add128cd1..467f3046c8 100644 --- a/target/ppc/internal.h +++ b/target/ppc/internal.h @@ -159,9 +159,6 @@ EXTRACT_HELPER(FPL, 25, 1); EXTRACT_HELPER(FPFLM, 17, 8); EXTRACT_HELPER(FPW, 16, 1); -/* mffscrni */ -EXTRACT_HELPER(RM, 11, 2); - /* addpcis */ EXTRACT_HELPER_SPLIT_3(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0) #if defined(TARGET_PPC64) diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc index 1aab32be03..cb0097bedb 100644 --- a/target/ppc/translate/fixedpoint-impl.c.inc +++ b/target/ppc/translate/fixedpoint-impl.c.inc @@ -492,3 +492,54 @@ static bool trans_PEXTD(DisasContext *ctx, arg_X *a) #endif return true; } + +static bool trans_ADDG6S(DisasContext *ctx, arg_X *a) +{ + const uint64_t carry_bits = 0x1111111111111111ULL; + TCGv t0, t1, carry, zero = tcg_constant_tl(0); + + REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206); + + t0 = tcg_temp_new(); + t1 = tcg_const_tl(0); + carry = tcg_const_tl(0); + + for (int i = 0; i < 16; i++) { + tcg_gen_shri_tl(t0, cpu_gpr[a->ra], i * 4); + tcg_gen_andi_tl(t0, t0, 0xf); + tcg_gen_add_tl(t1, t1, t0); + + tcg_gen_shri_tl(t0, cpu_gpr[a->rb], i * 4); + tcg_gen_andi_tl(t0, t0, 0xf); + tcg_gen_add_tl(t1, t1, t0); + + tcg_gen_andi_tl(t1, t1, 0x10); + tcg_gen_setcond_tl(TCG_COND_NE, t1, t1, zero); + + tcg_gen_shli_tl(t0, t1, i * 4); + tcg_gen_or_tl(carry, carry, t0); + } + + tcg_gen_xori_tl(carry, carry, (target_long)carry_bits); + tcg_gen_muli_tl(cpu_gpr[a->rt], carry, 6); + + tcg_temp_free(t0); + tcg_temp_free(t1); + tcg_temp_free(carry); + + return true; +} + +static bool trans_CDTBCD(DisasContext *ctx, arg_X_sa *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206); + gen_helper_CDTBCD(cpu_gpr[a->ra], cpu_gpr[a->rs]); + return true; +} + +static bool trans_CBCDTD(DisasContext *ctx, arg_X_sa *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, BCDA_ISA206); + gen_helper_CBCDTD(cpu_gpr[a->ra], cpu_gpr[a->rs]); + return true; +} diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc index f9b58b844e..319513d001 100644 --- a/target/ppc/translate/fp-impl.c.inc +++ b/target/ppc/translate/fp-impl.c.inc @@ -615,141 +615,162 @@ static void gen_mcrfs(DisasContext *ctx) tcg_temp_free_i64(tnew_fpscr); } -/* mffs */ -static void gen_mffs(DisasContext *ctx) +static TCGv_i64 place_from_fpscr(int rt, uint64_t mask) { - TCGv_i64 t0; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } - t0 = tcg_temp_new_i64(); + TCGv_i64 fpscr = tcg_temp_new_i64(); + TCGv_i64 fpscr_masked = tcg_temp_new_i64(); + + tcg_gen_extu_tl_i64(fpscr, cpu_fpscr); + tcg_gen_andi_i64(fpscr_masked, fpscr, mask); + set_fpr(rt, fpscr_masked); + + tcg_temp_free_i64(fpscr_masked); + + return fpscr; +} + +static void store_fpscr_masked(TCGv_i64 fpscr, uint64_t clear_mask, + TCGv_i64 set_mask, uint32_t store_mask) +{ + TCGv_i64 fpscr_masked = tcg_temp_new_i64(); + TCGv_i32 st_mask = tcg_constant_i32(store_mask); + + tcg_gen_andi_i64(fpscr_masked, fpscr, ~clear_mask); + tcg_gen_or_i64(fpscr_masked, fpscr_masked, set_mask); + gen_helper_store_fpscr(cpu_env, fpscr_masked, st_mask); + + tcg_temp_free_i64(fpscr_masked); +} + +static bool trans_MFFS(DisasContext *ctx, arg_X_t_rc *a) +{ + TCGv_i64 fpscr; + + REQUIRE_FPU(ctx); + gen_reset_fpstatus(); - tcg_gen_extu_tl_i64(t0, cpu_fpscr); - set_fpr(rD(ctx->opcode), t0); - if (unlikely(Rc(ctx->opcode))) { + fpscr = place_from_fpscr(a->rt, UINT64_MAX); + if (a->rc) { gen_set_cr1_from_fpscr(ctx); } - tcg_temp_free_i64(t0); + + tcg_temp_free_i64(fpscr); + + return true; } -/* mffsl */ -static void gen_mffsl(DisasContext *ctx) +static bool trans_MFFSCE(DisasContext *ctx, arg_X_t *a) { - TCGv_i64 t0; + TCGv_i64 fpscr; - if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { - return gen_mffs(ctx); - } + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } - t0 = tcg_temp_new_i64(); gen_reset_fpstatus(); - tcg_gen_extu_tl_i64(t0, cpu_fpscr); - /* Mask everything except mode, status, and enables. */ - tcg_gen_andi_i64(t0, t0, FP_DRN | FP_STATUS | FP_ENABLES | FP_RN); - set_fpr(rD(ctx->opcode), t0); - tcg_temp_free_i64(t0); + fpscr = place_from_fpscr(a->rt, UINT64_MAX); + store_fpscr_masked(fpscr, FP_ENABLES, tcg_constant_i64(0), 0x0003); + + tcg_temp_free_i64(fpscr); + + return true; } -/* mffsce */ -static void gen_mffsce(DisasContext *ctx) +static bool trans_MFFSCRN(DisasContext *ctx, arg_X_tb *a) { - TCGv_i64 t0; - TCGv_i32 mask; - - if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { - return gen_mffs(ctx); - } + TCGv_i64 t1, fpscr; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); - t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + get_fpr(t1, a->rb); + tcg_gen_andi_i64(t1, t1, FP_RN); gen_reset_fpstatus(); - tcg_gen_extu_tl_i64(t0, cpu_fpscr); - set_fpr(rD(ctx->opcode), t0); + fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); + store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); - /* Clear exception enable bits in the FPSCR. */ - tcg_gen_andi_i64(t0, t0, ~FP_ENABLES); - mask = tcg_const_i32(0x0003); - gen_helper_store_fpscr(cpu_env, t0, mask); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(fpscr); - tcg_temp_free_i32(mask); - tcg_temp_free_i64(t0); + return true; } -static void gen_helper_mffscrn(DisasContext *ctx, TCGv_i64 t1) +static bool trans_MFFSCDRN(DisasContext *ctx, arg_X_tb *a) { - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i32 mask = tcg_const_i32(0x0001); + TCGv_i64 t1, fpscr; - gen_reset_fpstatus(); - tcg_gen_extu_tl_i64(t0, cpu_fpscr); - tcg_gen_andi_i64(t0, t0, FP_DRN | FP_ENABLES | FP_RN); - set_fpr(rD(ctx->opcode), t0); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); - /* Mask FPSCR value to clear RN. */ - tcg_gen_andi_i64(t0, t0, ~FP_RN); + t1 = tcg_temp_new_i64(); + get_fpr(t1, a->rb); + tcg_gen_andi_i64(t1, t1, FP_DRN); - /* Merge RN into FPSCR value. */ - tcg_gen_or_i64(t0, t0, t1); + gen_reset_fpstatus(); + fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); + store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); - gen_helper_store_fpscr(cpu_env, t0, mask); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(fpscr); - tcg_temp_free_i32(mask); - tcg_temp_free_i64(t0); + return true; } -/* mffscrn */ -static void gen_mffscrn(DisasContext *ctx) +static bool trans_MFFSCRNI(DisasContext *ctx, arg_X_imm2 *a) { - TCGv_i64 t1; - - if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { - return gen_mffs(ctx); - } + TCGv_i64 t1, fpscr; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); t1 = tcg_temp_new_i64(); - get_fpr(t1, rB(ctx->opcode)); - /* Mask FRB to get just RN. */ - tcg_gen_andi_i64(t1, t1, FP_RN); + tcg_gen_movi_i64(t1, a->imm); - gen_helper_mffscrn(ctx, t1); + gen_reset_fpstatus(); + fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); + store_fpscr_masked(fpscr, FP_RN, t1, 0x0001); tcg_temp_free_i64(t1); + tcg_temp_free_i64(fpscr); + + return true; } -/* mffscrni */ -static void gen_mffscrni(DisasContext *ctx) +static bool trans_MFFSCDRNI(DisasContext *ctx, arg_X_imm3 *a) { - TCGv_i64 t1; + TCGv_i64 t1, fpscr; - if (unlikely(!(ctx->insns_flags2 & PPC2_ISA300))) { - return gen_mffs(ctx); - } - - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); - t1 = tcg_const_i64((uint64_t)RM(ctx->opcode)); + t1 = tcg_temp_new_i64(); + tcg_gen_movi_i64(t1, (uint64_t)a->imm << FPSCR_DRN0); - gen_helper_mffscrn(ctx, t1); + gen_reset_fpstatus(); + fpscr = place_from_fpscr(a->rt, FP_DRN | FP_ENABLES | FP_NI | FP_RN); + store_fpscr_masked(fpscr, FP_DRN, t1, 0x0100); tcg_temp_free_i64(t1); + tcg_temp_free_i64(fpscr); + + return true; +} + +static bool trans_MFFSL(DisasContext *ctx, arg_X_t *a) +{ + TCGv_i64 fpscr; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_FPU(ctx); + + gen_reset_fpstatus(); + fpscr = place_from_fpscr(a->rt, + FP_DRN | FP_STATUS | FP_ENABLES | FP_NI | FP_RN); + + tcg_temp_free_i64(fpscr); + + return true; } /* mtfsb0 */ diff --git a/target/ppc/translate/fp-ops.c.inc b/target/ppc/translate/fp-ops.c.inc index 0538ab2d2d..1b65f5ab73 100644 --- a/target/ppc/translate/fp-ops.c.inc +++ b/target/ppc/translate/fp-ops.c.inc @@ -74,15 +74,6 @@ GEN_HANDLER_E(fcpsgn, 0x3F, 0x08, 0x00, 0x00000000, PPC_NONE, PPC2_ISA205), GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207), GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207), GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT), -GEN_HANDLER_E_2(mffs, 0x3F, 0x07, 0x12, 0x00, 0x00000000, PPC_FLOAT, PPC_NONE), -GEN_HANDLER_E_2(mffsce, 0x3F, 0x07, 0x12, 0x01, 0x00000000, PPC_FLOAT, - PPC2_ISA300), -GEN_HANDLER_E_2(mffsl, 0x3F, 0x07, 0x12, 0x18, 0x00000000, PPC_FLOAT, - PPC2_ISA300), -GEN_HANDLER_E_2(mffscrn, 0x3F, 0x07, 0x12, 0x16, 0x00000000, PPC_FLOAT, - PPC_NONE), -GEN_HANDLER_E_2(mffscrni, 0x3F, 0x07, 0x12, 0x17, 0x00000000, PPC_FLOAT, - PPC_NONE), GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT), GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT), GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00000000, PPC_FLOAT), diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 0b563bed37..e644ad3236 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -1234,18 +1234,6 @@ GEN_VXFORM_SAT(vsubuws, MO_32, sub, ussub, 0, 26); GEN_VXFORM_SAT(vsubsbs, MO_8, sub, sssub, 0, 28); GEN_VXFORM_SAT(vsubshs, MO_16, sub, sssub, 0, 29); GEN_VXFORM_SAT(vsubsws, MO_32, sub, sssub, 0, 30); -GEN_VXFORM(vadduqm, 0, 4); -GEN_VXFORM(vaddcuq, 0, 5); -GEN_VXFORM3(vaddeuqm, 30, 0); -GEN_VXFORM3(vaddecuq, 30, 0); -GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ - vaddecuq, PPC_NONE, PPC2_ALTIVEC_207) -GEN_VXFORM(vsubuqm, 0, 20); -GEN_VXFORM(vsubcuq, 0, 21); -GEN_VXFORM3(vsubeuqm, 31, 0); -GEN_VXFORM3(vsubecuq, 31, 0); -GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \ - vsubecuq, PPC_NONE, PPC2_ALTIVEC_207) GEN_VXFORM_TRANS(vsl, 2, 7); GEN_VXFORM_TRANS(vsr, 2, 11); GEN_VXFORM_ENV(vpkuhum, 7, 0); @@ -2572,6 +2560,12 @@ static bool do_va_helper(DisasContext *ctx, arg_VA *a, return true; } +TRANS_FLAGS2(ALTIVEC_207, VADDECUQ, do_va_helper, gen_helper_VADDECUQ) +TRANS_FLAGS2(ALTIVEC_207, VADDEUQM, do_va_helper, gen_helper_VADDEUQM) + +TRANS_FLAGS2(ALTIVEC_207, VSUBEUQM, do_va_helper, gen_helper_VSUBEUQM) +TRANS_FLAGS2(ALTIVEC_207, VSUBECUQ, do_va_helper, gen_helper_VSUBECUQ) + TRANS_FLAGS(ALTIVEC, VPERM, do_va_helper, gen_helper_VPERM) TRANS_FLAGS2(ISA300, VPERMR, do_va_helper, gen_helper_VPERMR) @@ -2717,7 +2711,6 @@ GEN_VXFORM_TRANS(vgbbd, 6, 20); GEN_VXFORM(vpmsumb, 4, 16) GEN_VXFORM(vpmsumh, 4, 17) GEN_VXFORM(vpmsumw, 4, 18) -GEN_VXFORM(vpmsumd, 4, 19) #define GEN_BCD(op) \ static void gen_##op(DisasContext *ctx) \ @@ -2862,11 +2855,6 @@ GEN_VXFORM_DUAL(vsubuwm, PPC_ALTIVEC, PPC_NONE, \ bcdus, PPC_NONE, PPC2_ISA300) GEN_VXFORM_DUAL(vsubsbs, PPC_ALTIVEC, PPC_NONE, \ bcdtrunc, PPC_NONE, PPC2_ISA300) -GEN_VXFORM_DUAL(vsubuqm, PPC2_ALTIVEC_207, PPC_NONE, \ - bcdtrunc, PPC_NONE, PPC2_ISA300) -GEN_VXFORM_DUAL(vsubcuq, PPC2_ALTIVEC_207, PPC_NONE, \ - bcdutrunc, PPC_NONE, PPC2_ISA300) - static void gen_vsbox(DisasContext *ctx) { @@ -3101,6 +3089,14 @@ static bool do_vx_helper(DisasContext *ctx, arg_VX *a, return true; } +TRANS_FLAGS2(ALTIVEC_207, VADDCUQ, do_vx_helper, gen_helper_VADDCUQ) +TRANS_FLAGS2(ALTIVEC_207, VADDUQM, do_vx_helper, gen_helper_VADDUQM) + +TRANS_FLAGS2(ALTIVEC_207, VPMSUMD, do_vx_helper, gen_helper_VPMSUMD) + +TRANS_FLAGS2(ALTIVEC_207, VSUBCUQ, do_vx_helper, gen_helper_VSUBCUQ) +TRANS_FLAGS2(ALTIVEC_207, VSUBUQM, do_vx_helper, gen_helper_VSUBUQM) + static bool do_vx_vmuleo(DisasContext *ctx, arg_VX *a, bool even, void (*gen_mul)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64)) { diff --git a/target/ppc/translate/vmx-ops.c.inc b/target/ppc/translate/vmx-ops.c.inc index d7cc57868e..a3a0fd0650 100644 --- a/target/ppc/translate/vmx-ops.c.inc +++ b/target/ppc/translate/vmx-ops.c.inc @@ -126,12 +126,8 @@ GEN_VXFORM(vsubuws, 0, 26), GEN_VXFORM_DUAL(vsubsbs, bcdtrunc, 0, 28, PPC_ALTIVEC, PPC2_ISA300), GEN_VXFORM(vsubshs, 0, 29), GEN_VXFORM_DUAL(vsubsws, xpnd04_2, 0, 30, PPC_ALTIVEC, PPC_NONE), -GEN_VXFORM_207(vadduqm, 0, 4), -GEN_VXFORM_207(vaddcuq, 0, 5), -GEN_VXFORM_DUAL(vaddeuqm, vaddecuq, 30, 0xFF, PPC_NONE, PPC2_ALTIVEC_207), -GEN_VXFORM_DUAL(vsubuqm, bcdtrunc, 0, 20, PPC2_ALTIVEC_207, PPC2_ISA300), -GEN_VXFORM_DUAL(vsubcuq, bcdutrunc, 0, 21, PPC2_ALTIVEC_207, PPC2_ISA300), -GEN_VXFORM_DUAL(vsubeuqm, vsubecuq, 31, 0xFF, PPC_NONE, PPC2_ALTIVEC_207), +GEN_VXFORM_300(bcdtrunc, 0, 20), +GEN_VXFORM_300(bcdutrunc, 0, 21), GEN_VXFORM(vsl, 2, 7), GEN_VXFORM(vsr, 2, 11), GEN_VXFORM(vpkuhum, 7, 0), @@ -237,7 +233,6 @@ GEN_VXFORM_207(vgbbd, 6, 20), GEN_VXFORM_207(vpmsumb, 4, 16), GEN_VXFORM_207(vpmsumh, 4, 17), GEN_VXFORM_207(vpmsumw, 4, 18), -GEN_VXFORM_207(vpmsumd, 4, 19), GEN_VXFORM_207(vsbox, 4, 23), diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c index aab9c47747..10dadb002a 100644 --- a/target/s390x/tcg/misc_helper.c +++ b/target/s390x/tcg/misc_helper.c @@ -158,6 +158,13 @@ void HELPER(spx)(CPUS390XState *env, uint64_t a1) if (prefix == old_prefix) { return; } + /* + * Since prefix got aligned to 8k and memory increments are a multiple of + * 8k checking the first page is sufficient + */ + if (!mmu_absolute_addr_valid(prefix, true)) { + tcg_s390_program_interrupt(env, PGM_ADDRESSING, GETPC()); + } env->psa = prefix; HELPER_LOG("prefix: %#x\n", prefix); diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index fd2433d625..e2ee005671 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -149,6 +149,7 @@ struct DisasContext { uint64_t pc_tmp; uint32_t ilen; enum cc_op cc_op; + bool exit_to_mainloop; }; /* Information carried about a condition to be evaluated. */ @@ -1123,19 +1124,9 @@ typedef struct { exiting the TB. */ #define DISAS_PC_UPDATED DISAS_TARGET_0 -/* We have emitted one or more goto_tb. No fixup required. */ -#define DISAS_GOTO_TB DISAS_TARGET_1 - /* We have updated the PC and CC values. */ #define DISAS_PC_CC_UPDATED DISAS_TARGET_2 -/* We are exiting the TB, but have neither emitted a goto_tb, nor - updated the PC for the next instruction to be executed. */ -#define DISAS_PC_STALE DISAS_TARGET_3 - -/* We are exiting the TB to the main loop. */ -#define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4 - /* Instruction flags */ #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */ @@ -1189,7 +1180,7 @@ static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest) tcg_gen_goto_tb(0); tcg_gen_movi_i64(psw_addr, dest); tcg_gen_exit_tb(s->base.tb, 0); - return DISAS_GOTO_TB; + return DISAS_NORETURN; } else { tcg_gen_movi_i64(psw_addr, dest); per_branch(s, false); @@ -1258,7 +1249,7 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, tcg_gen_movi_i64(psw_addr, dest); tcg_gen_exit_tb(s->base.tb, 1); - ret = DISAS_GOTO_TB; + ret = DISAS_NORETURN; } else { /* Fallthru can use goto_tb, but taken branch cannot. */ /* Store taken branch destination before the brcond. This @@ -3029,7 +3020,8 @@ static DisasJumpType op_lctl(DisasContext *s, DisasOps *o) tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ - return DISAS_PC_STALE_NOCHAIN; + s->exit_to_mainloop = true; + return DISAS_TOO_MANY; } static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) @@ -3040,7 +3032,8 @@ static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) tcg_temp_free_i32(r1); tcg_temp_free_i32(r3); /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ - return DISAS_PC_STALE_NOCHAIN; + s->exit_to_mainloop = true; + return DISAS_TOO_MANY; } static DisasJumpType op_lra(DisasContext *s, DisasOps *o) @@ -3996,7 +3989,7 @@ static DisasJumpType op_sacf(DisasContext *s, DisasOps *o) { gen_helper_sacf(cpu_env, o->in2); /* Addressing mode has changed, so end the block. */ - return DISAS_PC_STALE; + return DISAS_TOO_MANY; } #endif @@ -4032,7 +4025,7 @@ static DisasJumpType op_sam(DisasContext *s, DisasOps *o) tcg_temp_free_i64(tsam); /* Always exit the TB, since we (may have) changed execution mode. */ - return DISAS_PC_STALE; + return DISAS_TOO_MANY; } static DisasJumpType op_sar(DisasContext *s, DisasOps *o) @@ -4290,7 +4283,8 @@ static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) { tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8); /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ - return DISAS_PC_STALE_NOCHAIN; + s->exit_to_mainloop = true; + return DISAS_TOO_MANY; } static DisasJumpType op_stap(DisasContext *s, DisasOps *o) @@ -4555,7 +4549,8 @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) } /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ - return DISAS_PC_STALE_NOCHAIN; + s->exit_to_mainloop = true; + return DISAS_TOO_MANY; } static DisasJumpType op_stura(DisasContext *s, DisasOps *o) @@ -6565,13 +6560,13 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) /* io should be the last instruction in tb when icount is enabled */ if (unlikely(icount && ret == DISAS_NEXT)) { - ret = DISAS_PC_STALE; + ret = DISAS_TOO_MANY; } #ifndef CONFIG_USER_ONLY if (s->base.tb->flags & FLAG_MASK_PER) { /* An exception might be triggered, save PSW if not already done. */ - if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) { + if (ret == DISAS_NEXT || ret == DISAS_TOO_MANY) { tcg_gen_movi_i64(psw_addr, s->pc_tmp); } @@ -6598,6 +6593,7 @@ static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) dc->cc_op = CC_OP_DYNAMIC; dc->ex_value = dc->base.tb->cs_base; + dc->exit_to_mainloop = (dc->base.tb->flags & FLAG_MASK_PER) || dc->ex_value; } static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs) @@ -6634,12 +6630,9 @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) DisasContext *dc = container_of(dcbase, DisasContext, base); switch (dc->base.is_jmp) { - case DISAS_GOTO_TB: case DISAS_NORETURN: break; case DISAS_TOO_MANY: - case DISAS_PC_STALE: - case DISAS_PC_STALE_NOCHAIN: update_psw_addr(dc); /* FALLTHRU */ case DISAS_PC_UPDATED: @@ -6649,8 +6642,7 @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) /* FALLTHRU */ case DISAS_PC_CC_UPDATED: /* Exit the TB, either by raising a debug exception or by return. */ - if ((dc->base.tb->flags & FLAG_MASK_PER) || - dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) { + if (dc->exit_to_mainloop) { tcg_gen_exit_tb(NULL, 0); } else { tcg_gen_lookup_and_goto_ptr(); diff --git a/tcg/region.c b/tcg/region.c index 71ea81d671..88d6bb273f 100644 --- a/tcg/region.c +++ b/tcg/region.c @@ -548,7 +548,7 @@ static int alloc_code_gen_buffer_anon(size_t size, int prot, #ifdef CONFIG_POSIX #include "qemu/memfd.h" -static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) +static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) { void *buf_rw = NULL, *buf_rx = MAP_FAILED; int fd = -1; diff --git a/tests/qemu-iotests/tests/copy-before-write b/tests/qemu-iotests/tests/copy-before-write index 16efebbf8f..2ffe092b31 100755 --- a/tests/qemu-iotests/tests/copy-before-write +++ b/tests/qemu-iotests/tests/copy-before-write @@ -192,6 +192,11 @@ read 1048576/1048576 bytes at offset 0 def test_timeout_break_guest(self): log = self.do_cbw_timeout('break-guest-write') + # macOS and FreeBSD tend to represent ETIMEDOUT as + # "Operation timed out", when Linux prefer + # "Connection timed out" + log = log.replace('Operation timed out', + 'Connection timed out') self.assertEqual(log, """\ wrote 524288/524288 bytes at offset 0 512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) @@ -213,4 +218,5 @@ read failed: Permission denied if __name__ == '__main__': iotests.main(supported_fmts=['qcow2'], - supported_protocols=['file']) + supported_protocols=['file'], + required_fmts=['copy-before-write']) diff --git a/tests/tcg/ppc64/Makefile.target b/tests/tcg/ppc64/Makefile.target index babd209573..331fae628e 100644 --- a/tests/tcg/ppc64/Makefile.target +++ b/tests/tcg/ppc64/Makefile.target @@ -11,6 +11,7 @@ endif $(PPC64_TESTS): CFLAGS += -mpower8-vector PPC64_TESTS += mtfsf +PPC64_TESTS += mffsce ifneq ($(CROSS_CC_HAS_POWER10),) PPC64_TESTS += byte_reverse sha512-vector diff --git a/tests/tcg/ppc64le/Makefile.target b/tests/tcg/ppc64le/Makefile.target index 5b0eb5e870..6ca3003f02 100644 --- a/tests/tcg/ppc64le/Makefile.target +++ b/tests/tcg/ppc64le/Makefile.target @@ -24,6 +24,7 @@ run-sha512-vector: QEMU_OPTS+=-cpu POWER10 run-plugin-sha512-vector-with-%: QEMU_OPTS+=-cpu POWER10 PPC64LE_TESTS += mtfsf +PPC64LE_TESTS += mffsce PPC64LE_TESTS += signal_save_restore_xer PPC64LE_TESTS += xxspltw diff --git a/tests/tcg/ppc64le/mffsce.c b/tests/tcg/ppc64le/mffsce.c new file mode 100644 index 0000000000..20d882cb45 --- /dev/null +++ b/tests/tcg/ppc64le/mffsce.c @@ -0,0 +1,37 @@ +#include <stdlib.h> +#include <stdint.h> +#include <assert.h> + +#define MTFSF(FLM, FRB) asm volatile ("mtfsf %0, %1" :: "i" (FLM), "f" (FRB)) +#define MFFS(FRT) asm("mffs %0" : "=f" (FRT)) +#define MFFSCE(FRT) asm("mffsce %0" : "=f" (FRT)) + +#define PPC_BIT_NR(nr) (63 - (nr)) + +#define FP_VE (1ull << PPC_BIT_NR(56)) +#define FP_UE (1ull << PPC_BIT_NR(58)) +#define FP_ZE (1ull << PPC_BIT_NR(59)) +#define FP_XE (1ull << PPC_BIT_NR(60)) +#define FP_NI (1ull << PPC_BIT_NR(61)) +#define FP_RN1 (1ull << PPC_BIT_NR(63)) + +int main(void) +{ + uint64_t frt, fpscr; + uint64_t test_value = FP_VE | FP_UE | FP_ZE | + FP_XE | FP_NI | FP_RN1; + MTFSF(0b11111111, test_value); /* set test value to cpu fpscr */ + MFFSCE(frt); + MFFS(fpscr); /* read the value that mffsce stored to cpu fpscr */ + + /* the returned value should be as the cpu fpscr was before */ + assert((frt & 0xff) == test_value); + + /* + * the cpu fpscr last 3 bits should be unchanged + * and enable bits should be unset + */ + assert((fpscr & 0xff) == (test_value & 0x7)); + + return 0; +} |