summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS13
-rwxr-xr-xconfigure4
-rw-r--r--default-configs/aarch64-softmmu.mak6
-rw-r--r--default-configs/arm-softmmu.mak5
-rw-r--r--device_tree.c64
-rw-r--r--exec.c44
-rw-r--r--gdb-xml/aarch64-fpu.xml86
-rw-r--r--hw/arm/Makefile.objs3
-rw-r--r--hw/arm/allwinner-a10.c103
-rw-r--r--hw/arm/boot.c217
-rw-r--r--hw/arm/cubieboard.c69
-rw-r--r--hw/arm/digic.c115
-rw-r--r--hw/arm/digic_boards.c162
-rw-r--r--hw/arm/highbank.c33
-rw-r--r--hw/arm/vexpress.c62
-rw-r--r--hw/arm/virt.c106
-rw-r--r--hw/arm/xilinx_zynq.c21
-rw-r--r--hw/block/pflash_cfi01.c260
-rw-r--r--hw/char/Makefile.objs1
-rw-r--r--hw/char/digic-uart.c195
-rw-r--r--hw/core/loader.c7
-rw-r--r--hw/intc/Makefile.objs1
-rw-r--r--hw/intc/allwinner-a10-pic.c200
-rw-r--r--hw/intc/xics.c24
-rw-r--r--hw/microblaze/boot.c12
-rw-r--r--hw/nvram/spapr_nvram.c16
-rw-r--r--hw/pci-host/grackle.c2
-rw-r--r--hw/pci-host/uninorth.c4
-rw-r--r--hw/ppc/e500.c213
-rw-r--r--hw/ppc/e500plat.c6
-rw-r--r--hw/ppc/mpc8544ds.c6
-rw-r--r--hw/ppc/ppc440_bamboo.c24
-rw-r--r--hw/ppc/spapr.c49
-rw-r--r--hw/ppc/spapr_events.c6
-rw-r--r--hw/ppc/spapr_pci.c42
-rw-r--r--hw/ppc/spapr_rtas.c97
-rw-r--r--hw/ppc/spapr_vio.c12
-rw-r--r--hw/ppc/virtex_ml507.c2
-rw-r--r--hw/timer/Makefile.objs3
-rw-r--r--hw/timer/allwinner-a10-pit.c254
-rw-r--r--hw/timer/digic-timer.c163
-rw-r--r--include/exec/cpu-common.h1
-rw-r--r--include/hw/arm/allwinner-a10.h35
-rw-r--r--include/hw/arm/digic.h43
-rw-r--r--include/hw/char/digic-uart.h47
-rw-r--r--include/hw/intc/allwinner-a10-pic.h40
-rw-r--r--include/hw/ppc/spapr.h9
-rw-r--r--include/hw/ptimer.h4
-rw-r--r--include/hw/timer/allwinner-a10-pit.h58
-rw-r--r--include/hw/timer/digic-timer.h46
-rw-r--r--include/migration/vmstate.h10
-rw-r--r--include/qemu/host-utils.h32
-rw-r--r--include/sysemu/device_tree.h80
-rw-r--r--linux-user/signal.c6
-rw-r--r--target-arm/Makefile.objs5
-rw-r--r--target-arm/cpu-qom.h1
-rw-r--r--target-arm/cpu.c54
-rw-r--r--target-arm/cpu.h100
-rw-r--r--target-arm/cpu64.c11
-rw-r--r--target-arm/crypto_helper.c281
-rw-r--r--target-arm/gdbstub64.c4
-rw-r--r--target-arm/helper-a64.c79
-rw-r--r--target-arm/helper-a64.h24
-rw-r--r--target-arm/helper.c60
-rw-r--r--target-arm/helper.h7
-rw-r--r--target-arm/kvm.c495
-rw-r--r--target-arm/kvm32.c515
-rw-r--r--target-arm/kvm64.c204
-rw-r--r--target-arm/translate-a64.c1699
-rw-r--r--target-arm/translate.c102
-rw-r--r--target-arm/translate.h25
-rw-r--r--target-mips/translate.c119
-rw-r--r--target-ppc/cpu-models.c3
-rw-r--r--target-ppc/cpu-models.h7
-rw-r--r--target-ppc/cpu-qom.h2
-rw-r--r--target-ppc/cpu.h9
-rw-r--r--target-ppc/excp_helper.c5
-rw-r--r--target-ppc/helper_regs.h2
-rw-r--r--target-ppc/kvm.c32
-rw-r--r--target-ppc/translate.c605
-rw-r--r--target-ppc/translate_init.c91
-rw-r--r--target-sh4/translate.c167
-rw-r--r--tcg/i386/tcg-target.c2
-rw-r--r--tests/qom-test.c2
84 files changed, 6609 insertions, 1226 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 7f45d1d696..a5ab8f8cea 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -219,6 +219,13 @@ F: *win32*
ARM Machines
------------
+Allwinner-a10
+M: Li Guang <lig.fnst@cn.fujitsu.com>
+S: Maintained
+F: hw/*/allwinner-a10*
+F: include/hw/*/allwinner-a10*
+F: hw/arm/cubieboard.c
+
Exynos
M: Evgeny Voevodin <e.voevodin@samsung.com>
M: Maksim Kozlov <m.kozlov@samsung.com>
@@ -233,6 +240,12 @@ S: Supported
F: hw/arm/highbank.c
F: hw/net/xgmac.c
+Canon DIGIC
+M: Antony Pavlov <antonynpavlov@gmail.com>
+S: Maintained
+F: include/hw/arm/digic.h
+F: hw/*/digic*
+
Gumstix
M: qemu-devel@nongnu.org
S: Orphan
diff --git a/configure b/configure
index edfea9591c..07b6be34ff 100755
--- a/configure
+++ b/configure
@@ -4438,7 +4438,7 @@ case "$target_name" in
aarch64)
TARGET_BASE_ARCH=arm
bflt="yes"
- gdb_xml_files="aarch64-core.xml"
+ gdb_xml_files="aarch64-core.xml aarch64-fpu.xml"
;;
cris)
;;
@@ -4550,7 +4550,7 @@ case "$target_name" in
*)
esac
case "$target_name" in
- arm|i386|x86_64|ppcemb|ppc|ppc64|s390x)
+ aarch64|arm|i386|x86_64|ppcemb|ppc|ppc64|s390x)
# Make sure the target and host cpus are compatible
if test "$kvm" = "yes" -a "$target_softmmu" = "yes" -a \
\( "$target_name" = "$cpu" -o \
diff --git a/default-configs/aarch64-softmmu.mak b/default-configs/aarch64-softmmu.mak
new file mode 100644
index 0000000000..6d3b5c7a46
--- /dev/null
+++ b/default-configs/aarch64-softmmu.mak
@@ -0,0 +1,6 @@
+# Default configuration for aarch64-softmmu
+
+# We support all the 32 bit boards so need all their config
+include arm-softmmu.mak
+
+# Currently no 64-bit specific config requirements
diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
index e48f102af6..ce1d620842 100644
--- a/default-configs/arm-softmmu.mak
+++ b/default-configs/arm-softmmu.mak
@@ -64,6 +64,7 @@ CONFIG_XILINX_SPIPS=y
CONFIG_ARM11SCU=y
CONFIG_A9SCU=y
+CONFIG_DIGIC=y
CONFIG_MARVELL_88W8618=y
CONFIG_OMAP=y
CONFIG_TSC210X=y
@@ -82,3 +83,7 @@ CONFIG_VERSATILE_I2C=y
CONFIG_SDHCI=y
CONFIG_INTEGRATOR_DEBUG=y
+
+CONFIG_ALLWINNER_A10_PIT=y
+CONFIG_ALLWINNER_A10_PIC=y
+CONFIG_ALLWINNER_A10=y
diff --git a/device_tree.c b/device_tree.c
index 391da8c45e..ca83504819 100644
--- a/device_tree.c
+++ b/device_tree.c
@@ -131,12 +131,12 @@ static int findnode_nofail(void *fdt, const char *node_path)
return offset;
}
-int qemu_devtree_setprop(void *fdt, const char *node_path,
- const char *property, const void *val_array, int size)
+int qemu_fdt_setprop(void *fdt, const char *node_path,
+ const char *property, const void *val, int size)
{
int r;
- r = fdt_setprop(fdt, findnode_nofail(fdt, node_path), property, val_array, size);
+ r = fdt_setprop(fdt, findnode_nofail(fdt, node_path), property, val, size);
if (r < 0) {
fprintf(stderr, "%s: Couldn't set %s/%s: %s\n", __func__, node_path,
property, fdt_strerror(r));
@@ -146,8 +146,8 @@ int qemu_devtree_setprop(void *fdt, const char *node_path,
return r;
}
-int qemu_devtree_setprop_cell(void *fdt, const char *node_path,
- const char *property, uint32_t val)
+int qemu_fdt_setprop_cell(void *fdt, const char *node_path,
+ const char *property, uint32_t val)
{
int r;
@@ -161,15 +161,15 @@ int qemu_devtree_setprop_cell(void *fdt, const char *node_path,
return r;
}
-int qemu_devtree_setprop_u64(void *fdt, const char *node_path,
- const char *property, uint64_t val)
+int qemu_fdt_setprop_u64(void *fdt, const char *node_path,
+ const char *property, uint64_t val)
{
val = cpu_to_be64(val);
- return qemu_devtree_setprop(fdt, node_path, property, &val, sizeof(val));
+ return qemu_fdt_setprop(fdt, node_path, property, &val, sizeof(val));
}
-int qemu_devtree_setprop_string(void *fdt, const char *node_path,
- const char *property, const char *string)
+int qemu_fdt_setprop_string(void *fdt, const char *node_path,
+ const char *property, const char *string)
{
int r;
@@ -183,8 +183,8 @@ int qemu_devtree_setprop_string(void *fdt, const char *node_path,
return r;
}
-const void *qemu_devtree_getprop(void *fdt, const char *node_path,
- const char *property, int *lenp)
+const void *qemu_fdt_getprop(void *fdt, const char *node_path,
+ const char *property, int *lenp)
{
int len;
const void *r;
@@ -200,11 +200,11 @@ const void *qemu_devtree_getprop(void *fdt, const char *node_path,
return r;
}
-uint32_t qemu_devtree_getprop_cell(void *fdt, const char *node_path,
- const char *property)
+uint32_t qemu_fdt_getprop_cell(void *fdt, const char *node_path,
+ const char *property)
{
int len;
- const uint32_t *p = qemu_devtree_getprop(fdt, node_path, property, &len);
+ const uint32_t *p = qemu_fdt_getprop(fdt, node_path, property, &len);
if (len != 4) {
fprintf(stderr, "%s: %s/%s not 4 bytes long (not a cell?)\n",
__func__, node_path, property);
@@ -213,7 +213,7 @@ uint32_t qemu_devtree_getprop_cell(void *fdt, const char *node_path,
return be32_to_cpu(*p);
}
-uint32_t qemu_devtree_get_phandle(void *fdt, const char *path)
+uint32_t qemu_fdt_get_phandle(void *fdt, const char *path)
{
uint32_t r;
@@ -227,15 +227,15 @@ uint32_t qemu_devtree_get_phandle(void *fdt, const char *path)
return r;
}
-int qemu_devtree_setprop_phandle(void *fdt, const char *node_path,
- const char *property,
- const char *target_node_path)
+int qemu_fdt_setprop_phandle(void *fdt, const char *node_path,
+ const char *property,
+ const char *target_node_path)
{
- uint32_t phandle = qemu_devtree_get_phandle(fdt, target_node_path);
- return qemu_devtree_setprop_cell(fdt, node_path, property, phandle);
+ uint32_t phandle = qemu_fdt_get_phandle(fdt, target_node_path);
+ return qemu_fdt_setprop_cell(fdt, node_path, property, phandle);
}
-uint32_t qemu_devtree_alloc_phandle(void *fdt)
+uint32_t qemu_fdt_alloc_phandle(void *fdt)
{
static int phandle = 0x0;
@@ -259,7 +259,7 @@ uint32_t qemu_devtree_alloc_phandle(void *fdt)
return phandle++;
}
-int qemu_devtree_nop_node(void *fdt, const char *node_path)
+int qemu_fdt_nop_node(void *fdt, const char *node_path)
{
int r;
@@ -273,7 +273,7 @@ int qemu_devtree_nop_node(void *fdt, const char *node_path)
return r;
}
-int qemu_devtree_add_subnode(void *fdt, const char *name)
+int qemu_fdt_add_subnode(void *fdt, const char *name)
{
char *dupname = g_strdup(name);
char *basename = strrchr(dupname, '/');
@@ -303,7 +303,7 @@ int qemu_devtree_add_subnode(void *fdt, const char *name)
return retval;
}
-void qemu_devtree_dumpdtb(void *fdt, int size)
+void qemu_fdt_dumpdtb(void *fdt, int size)
{
const char *dumpdtb = qemu_opt_get(qemu_get_machine_opts(), "dumpdtb");
@@ -313,11 +313,11 @@ void qemu_devtree_dumpdtb(void *fdt, int size)
}
}
-int qemu_devtree_setprop_sized_cells_from_array(void *fdt,
- const char *node_path,
- const char *property,
- int numvalues,
- uint64_t *values)
+int qemu_fdt_setprop_sized_cells_from_array(void *fdt,
+ const char *node_path,
+ const char *property,
+ int numvalues,
+ uint64_t *values)
{
uint32_t *propcells;
uint64_t value;
@@ -342,6 +342,6 @@ int qemu_devtree_setprop_sized_cells_from_array(void *fdt,
propcells[cellnum++] = cpu_to_be32(value);
}
- return qemu_devtree_setprop(fdt, node_path, property, propcells,
- cellnum * sizeof(uint32_t));
+ return qemu_fdt_setprop(fdt, node_path, property, propcells,
+ cellnum * sizeof(uint32_t));
}
diff --git a/exec.c b/exec.c
index 00526d18c0..7e49e8e555 100644
--- a/exec.c
+++ b/exec.c
@@ -50,6 +50,7 @@
#include "translate-all.h"
#include "exec/memory-internal.h"
+#include "qemu/cache-utils.h"
#include "qemu/range.h"
@@ -2070,9 +2071,13 @@ void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
address_space_rw(&address_space_memory, addr, buf, len, is_write);
}
-/* used for ROM loading : can write in RAM and ROM */
-void cpu_physical_memory_write_rom(hwaddr addr,
- const uint8_t *buf, int len)
+enum write_rom_type {
+ WRITE_DATA,
+ FLUSH_CACHE,
+};
+
+static inline void cpu_physical_memory_write_rom_internal(
+ hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
{
hwaddr l;
uint8_t *ptr;
@@ -2091,8 +2096,15 @@ void cpu_physical_memory_write_rom(hwaddr addr,
addr1 += memory_region_get_ram_addr(mr);
/* ROM/RAM case */
ptr = qemu_get_ram_ptr(addr1);
- memcpy(ptr, buf, l);
- invalidate_and_set_dirty(addr1, l);
+ switch (type) {
+ case WRITE_DATA:
+ memcpy(ptr, buf, l);
+ invalidate_and_set_dirty(addr1, l);
+ break;
+ case FLUSH_CACHE:
+ flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
+ break;
+ }
}
len -= l;
buf += l;
@@ -2100,6 +2112,28 @@ void cpu_physical_memory_write_rom(hwaddr addr,
}
}
+/* used for ROM loading : can write in RAM and ROM */
+void cpu_physical_memory_write_rom(hwaddr addr,
+ const uint8_t *buf, int len)
+{
+ cpu_physical_memory_write_rom_internal(addr, buf, len, WRITE_DATA);
+}
+
+void cpu_flush_icache_range(hwaddr start, int len)
+{
+ /*
+ * This function should do the same thing as an icache flush that was
+ * triggered from within the guest. For TCG we are always cache coherent,
+ * so there is no need to flush anything. For KVM / Xen we need to flush
+ * the host's instruction cache at least.
+ */
+ if (tcg_enabled()) {
+ return;
+ }
+
+ cpu_physical_memory_write_rom_internal(start, NULL, len, FLUSH_CACHE);
+}
+
typedef struct {
MemoryRegion *mr;
void *buffer;
diff --git a/gdb-xml/aarch64-fpu.xml b/gdb-xml/aarch64-fpu.xml
new file mode 100644
index 0000000000..997197e5e5
--- /dev/null
+++ b/gdb-xml/aarch64-fpu.xml
@@ -0,0 +1,86 @@
+<?xml version="1.0"?>
+<!-- Copyright (C) 2009-2012 Free Software Foundation, Inc.
+ Contributed by ARM Ltd.
+
+ Copying and distribution of this file, with or without modification,
+ are permitted in any medium without royalty provided the copyright
+ notice and this notice are preserved. -->
+
+<!DOCTYPE feature SYSTEM "gdb-target.dtd">
+<feature name="org.gnu.gdb.aarch64.fpu">
+ <vector id="v2d" type="ieee_double" count="2"/>
+ <vector id="v2u" type="uint64" count="2"/>
+ <vector id="v2i" type="int64" count="2"/>
+ <vector id="v4f" type="ieee_single" count="4"/>
+ <vector id="v4u" type="uint32" count="4"/>
+ <vector id="v4i" type="int32" count="4"/>
+ <vector id="v8u" type="uint16" count="8"/>
+ <vector id="v8i" type="int16" count="8"/>
+ <vector id="v16u" type="uint8" count="16"/>
+ <vector id="v16i" type="int8" count="16"/>
+ <vector id="v1u" type="uint128" count="1"/>
+ <vector id="v1i" type="int128" count="1"/>
+ <union id="vnd">
+ <field name="f" type="v2d"/>
+ <field name="u" type="v2u"/>
+ <field name="s" type="v2i"/>
+ </union>
+ <union id="vns">
+ <field name="f" type="v4f"/>
+ <field name="u" type="v4u"/>
+ <field name="s" type="v4i"/>
+ </union>
+ <union id="vnh">
+ <field name="u" type="v8u"/>
+ <field name="s" type="v8i"/>
+ </union>
+ <union id="vnb">
+ <field name="u" type="v16u"/>
+ <field name="s" type="v16i"/>
+ </union>
+ <union id="vnq">
+ <field name="u" type="v1u"/>
+ <field name="s" type="v1i"/>
+ </union>
+ <union id="aarch64v">
+ <field name="d" type="vnd"/>
+ <field name="s" type="vns"/>
+ <field name="h" type="vnh"/>
+ <field name="b" type="vnb"/>
+ <field name="q" type="vnq"/>
+ </union>
+ <reg name="v0" bitsize="128" type="aarch64v" regnum="34"/>
+ <reg name="v1" bitsize="128" type="aarch64v" />
+ <reg name="v2" bitsize="128" type="aarch64v" />
+ <reg name="v3" bitsize="128" type="aarch64v" />
+ <reg name="v4" bitsize="128" type="aarch64v" />
+ <reg name="v5" bitsize="128" type="aarch64v" />
+ <reg name="v6" bitsize="128" type="aarch64v" />
+ <reg name="v7" bitsize="128" type="aarch64v" />
+ <reg name="v8" bitsize="128" type="aarch64v" />
+ <reg name="v9" bitsize="128" type="aarch64v" />
+ <reg name="v10" bitsize="128" type="aarch64v"/>
+ <reg name="v11" bitsize="128" type="aarch64v"/>
+ <reg name="v12" bitsize="128" type="aarch64v"/>
+ <reg name="v13" bitsize="128" type="aarch64v"/>
+ <reg name="v14" bitsize="128" type="aarch64v"/>
+ <reg name="v15" bitsize="128" type="aarch64v"/>
+ <reg name="v16" bitsize="128" type="aarch64v"/>
+ <reg name="v17" bitsize="128" type="aarch64v"/>
+ <reg name="v18" bitsize="128" type="aarch64v"/>
+ <reg name="v19" bitsize="128" type="aarch64v"/>
+ <reg name="v20" bitsize="128" type="aarch64v"/>
+ <reg name="v21" bitsize="128" type="aarch64v"/>
+ <reg name="v22" bitsize="128" type="aarch64v"/>
+ <reg name="v23" bitsize="128" type="aarch64v"/>
+ <reg name="v24" bitsize="128" type="aarch64v"/>
+ <reg name="v25" bitsize="128" type="aarch64v"/>
+ <reg name="v26" bitsize="128" type="aarch64v"/>
+ <reg name="v27" bitsize="128" type="aarch64v"/>
+ <reg name="v28" bitsize="128" type="aarch64v"/>
+ <reg name="v29" bitsize="128" type="aarch64v"/>
+ <reg name="v30" bitsize="128" type="aarch64v"/>
+ <reg name="v31" bitsize="128" type="aarch64v"/>
+ <reg name="fpsr" bitsize="32"/>
+ <reg name="fpcr" bitsize="32"/>
+</feature>
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
index 78b56149b6..6088e53653 100644
--- a/hw/arm/Makefile.objs
+++ b/hw/arm/Makefile.objs
@@ -1,7 +1,10 @@
obj-y += boot.o collie.o exynos4_boards.o gumstix.o highbank.o
+obj-$(CONFIG_DIGIC) += digic_boards.o
obj-y += integratorcp.o kzm.o mainstone.o musicpal.o nseries.o
obj-y += omap_sx1.o palm.o realview.o spitz.o stellaris.o
obj-y += tosa.o versatilepb.o vexpress.o virt.o xilinx_zynq.o z2.o
obj-y += armv7m.o exynos4210.o pxa2xx.o pxa2xx_gpio.o pxa2xx_pic.o
+obj-$(CONFIG_DIGIC) += digic.o
obj-y += omap1.o omap2.o strongarm.o
+obj-$(CONFIG_ALLWINNER_A10) += allwinner-a10.o cubieboard.o
diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c
new file mode 100644
index 0000000000..4658e19504
--- /dev/null
+++ b/hw/arm/allwinner-a10.c
@@ -0,0 +1,103 @@
+/*
+ * Allwinner A10 SoC emulation
+ *
+ * Copyright (C) 2013 Li Guang
+ * Written by Li Guang <lig.fnst@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "hw/sysbus.h"
+#include "hw/devices.h"
+#include "hw/arm/allwinner-a10.h"
+
+static void aw_a10_init(Object *obj)
+{
+ AwA10State *s = AW_A10(obj);
+
+ object_initialize(&s->cpu, sizeof(s->cpu), "cortex-a8-" TYPE_ARM_CPU);
+ object_property_add_child(obj, "cpu", OBJECT(&s->cpu), NULL);
+
+ object_initialize(&s->intc, sizeof(s->intc), TYPE_AW_A10_PIC);
+ qdev_set_parent_bus(DEVICE(&s->intc), sysbus_get_default());
+
+ object_initialize(&s->timer, sizeof(s->timer), TYPE_AW_A10_PIT);
+ qdev_set_parent_bus(DEVICE(&s->timer), sysbus_get_default());
+}
+
+static void aw_a10_realize(DeviceState *dev, Error **errp)
+{
+ AwA10State *s = AW_A10(dev);
+ SysBusDevice *sysbusdev;
+ uint8_t i;
+ qemu_irq fiq, irq;
+ Error *err = NULL;
+
+ object_property_set_bool(OBJECT(&s->cpu), true, "realized", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+ irq = qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ);
+ fiq = qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ);
+
+ object_property_set_bool(OBJECT(&s->intc), true, "realized", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbusdev = SYS_BUS_DEVICE(&s->intc);
+ sysbus_mmio_map(sysbusdev, 0, AW_A10_PIC_REG_BASE);
+ sysbus_connect_irq(sysbusdev, 0, irq);
+ sysbus_connect_irq(sysbusdev, 1, fiq);
+ for (i = 0; i < AW_A10_PIC_INT_NR; i++) {
+ s->irq[i] = qdev_get_gpio_in(DEVICE(&s->intc), i);
+ }
+
+ object_property_set_bool(OBJECT(&s->timer), true, "realized", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbusdev = SYS_BUS_DEVICE(&s->timer);
+ sysbus_mmio_map(sysbusdev, 0, AW_A10_PIT_REG_BASE);
+ sysbus_connect_irq(sysbusdev, 0, s->irq[22]);
+ sysbus_connect_irq(sysbusdev, 1, s->irq[23]);
+ sysbus_connect_irq(sysbusdev, 2, s->irq[24]);
+ sysbus_connect_irq(sysbusdev, 3, s->irq[25]);
+ sysbus_connect_irq(sysbusdev, 4, s->irq[67]);
+ sysbus_connect_irq(sysbusdev, 5, s->irq[68]);
+
+ serial_mm_init(get_system_memory(), AW_A10_UART0_REG_BASE, 2, s->irq[1],
+ 115200, serial_hds[0], DEVICE_NATIVE_ENDIAN);
+}
+
+static void aw_a10_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = aw_a10_realize;
+}
+
+static const TypeInfo aw_a10_type_info = {
+ .name = TYPE_AW_A10,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(AwA10State),
+ .instance_init = aw_a10_init,
+ .class_init = aw_a10_class_init,
+};
+
+static void aw_a10_register_types(void)
+{
+ type_register_static(&aw_a10_type_info);
+}
+
+type_init(aw_a10_register_types)
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index 55d552f3a8..1c1b0e5258 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -17,18 +17,55 @@
#include "sysemu/device_tree.h"
#include "qemu/config-file.h"
+/* Kernel boot protocol is specified in the kernel docs
+ * Documentation/arm/Booting and Documentation/arm64/booting.txt
+ * They have different preferred image load offsets from system RAM base.
+ */
#define KERNEL_ARGS_ADDR 0x100
#define KERNEL_LOAD_ADDR 0x00010000
+#define KERNEL64_LOAD_ADDR 0x00080000
+
+typedef enum {
+ FIXUP_NONE = 0, /* do nothing */
+ FIXUP_TERMINATOR, /* end of insns */
+ FIXUP_BOARDID, /* overwrite with board ID number */
+ FIXUP_ARGPTR, /* overwrite with pointer to kernel args */
+ FIXUP_ENTRYPOINT, /* overwrite with kernel entry point */
+ FIXUP_GIC_CPU_IF, /* overwrite with GIC CPU interface address */
+ FIXUP_BOOTREG, /* overwrite with boot register address */
+ FIXUP_DSB, /* overwrite with correct DSB insn for cpu */
+ FIXUP_MAX,
+} FixupType;
+
+typedef struct ARMInsnFixup {
+ uint32_t insn;
+ FixupType fixup;
+} ARMInsnFixup;
+
+static const ARMInsnFixup bootloader_aarch64[] = {
+ { 0x580000c0 }, /* ldr x0, arg ; Load the lower 32-bits of DTB */
+ { 0xaa1f03e1 }, /* mov x1, xzr */
+ { 0xaa1f03e2 }, /* mov x2, xzr */
+ { 0xaa1f03e3 }, /* mov x3, xzr */
+ { 0x58000084 }, /* ldr x4, entry ; Load the lower 32-bits of kernel entry */
+ { 0xd61f0080 }, /* br x4 ; Jump to the kernel entry point */
+ { 0, FIXUP_ARGPTR }, /* arg: .word @DTB Lower 32-bits */
+ { 0 }, /* .word @DTB Higher 32-bits */
+ { 0, FIXUP_ENTRYPOINT }, /* entry: .word @Kernel Entry Lower 32-bits */
+ { 0 }, /* .word @Kernel Entry Higher 32-bits */
+ { 0, FIXUP_TERMINATOR }
+};
/* The worlds second smallest bootloader. Set r0-r2, then jump to kernel. */
-static uint32_t bootloader[] = {
- 0xe3a00000, /* mov r0, #0 */
- 0xe59f1004, /* ldr r1, [pc, #4] */
- 0xe59f2004, /* ldr r2, [pc, #4] */
- 0xe59ff004, /* ldr pc, [pc, #4] */
- 0, /* Board ID */
- 0, /* Address of kernel args. Set by integratorcp_init. */
- 0 /* Kernel entry point. Set by integratorcp_init. */
+static const ARMInsnFixup bootloader[] = {
+ { 0xe3a00000 }, /* mov r0, #0 */
+ { 0xe59f1004 }, /* ldr r1, [pc, #4] */
+ { 0xe59f2004 }, /* ldr r2, [pc, #4] */
+ { 0xe59ff004 }, /* ldr pc, [pc, #4] */
+ { 0, FIXUP_BOARDID },
+ { 0, FIXUP_ARGPTR },
+ { 0, FIXUP_ENTRYPOINT },
+ { 0, FIXUP_TERMINATOR }
};
/* Handling for secondary CPU boot in a multicore system.
@@ -48,39 +85,83 @@ static uint32_t bootloader[] = {
#define DSB_INSN 0xf57ff04f
#define CP15_DSB_INSN 0xee070f9a /* mcr cp15, 0, r0, c7, c10, 4 */
-static uint32_t smpboot[] = {
- 0xe59f2028, /* ldr r2, gic_cpu_if */
- 0xe59f0028, /* ldr r0, startaddr */
- 0xe3a01001, /* mov r1, #1 */
- 0xe5821000, /* str r1, [r2] - set GICC_CTLR.Enable */
- 0xe3a010ff, /* mov r1, #0xff */
- 0xe5821004, /* str r1, [r2, 4] - set GIC_PMR.Priority to 0xff */
- DSB_INSN, /* dsb */
- 0xe320f003, /* wfi */
- 0xe5901000, /* ldr r1, [r0] */
- 0xe1110001, /* tst r1, r1 */
- 0x0afffffb, /* beq <wfi> */
- 0xe12fff11, /* bx r1 */
- 0, /* gic_cpu_if: base address of GIC CPU interface */
- 0 /* bootreg: Boot register address is held here */
+static const ARMInsnFixup smpboot[] = {
+ { 0xe59f2028 }, /* ldr r2, gic_cpu_if */
+ { 0xe59f0028 }, /* ldr r0, bootreg_addr */
+ { 0xe3a01001 }, /* mov r1, #1 */
+ { 0xe5821000 }, /* str r1, [r2] - set GICC_CTLR.Enable */
+ { 0xe3a010ff }, /* mov r1, #0xff */
+ { 0xe5821004 }, /* str r1, [r2, 4] - set GIC_PMR.Priority to 0xff */
+ { 0, FIXUP_DSB }, /* dsb */
+ { 0xe320f003 }, /* wfi */
+ { 0xe5901000 }, /* ldr r1, [r0] */
+ { 0xe1110001 }, /* tst r1, r1 */
+ { 0x0afffffb }, /* beq <wfi> */
+ { 0xe12fff11 }, /* bx r1 */
+ { 0, FIXUP_GIC_CPU_IF }, /* gic_cpu_if: .word 0x.... */
+ { 0, FIXUP_BOOTREG }, /* bootreg_addr: .word 0x.... */
+ { 0, FIXUP_TERMINATOR }
};
+static void write_bootloader(const char *name, hwaddr addr,
+ const ARMInsnFixup *insns, uint32_t *fixupcontext)
+{
+ /* Fix up the specified bootloader fragment and write it into
+ * guest memory using rom_add_blob_fixed(). fixupcontext is
+ * an array giving the values to write in for the fixup types
+ * which write a value into the code array.
+ */
+ int i, len;
+ uint32_t *code;
+
+ len = 0;
+ while (insns[len].fixup != FIXUP_TERMINATOR) {
+ len++;
+ }
+
+ code = g_new0(uint32_t, len);
+
+ for (i = 0; i < len; i++) {
+ uint32_t insn = insns[i].insn;
+ FixupType fixup = insns[i].fixup;
+
+ switch (fixup) {
+ case FIXUP_NONE:
+ break;
+ case FIXUP_BOARDID:
+ case FIXUP_ARGPTR:
+ case FIXUP_ENTRYPOINT:
+ case FIXUP_GIC_CPU_IF:
+ case FIXUP_BOOTREG:
+ case FIXUP_DSB:
+ insn = fixupcontext[fixup];
+ break;
+ default:
+ abort();
+ }
+ code[i] = tswap32(insn);
+ }
+
+ rom_add_blob_fixed(name, code, len * sizeof(uint32_t), addr);
+
+ g_free(code);
+}
+
static void default_write_secondary(ARMCPU *cpu,
const struct arm_boot_info *info)
{
- int n;
- smpboot[ARRAY_SIZE(smpboot) - 1] = info->smp_bootreg_addr;
- smpboot[ARRAY_SIZE(smpboot) - 2] = info->gic_cpu_if_addr;
- for (n = 0; n < ARRAY_SIZE(smpboot); n++) {
- /* Replace DSB with the pre-v7 DSB if necessary. */
- if (!arm_feature(&cpu->env, ARM_FEATURE_V7) &&
- smpboot[n] == DSB_INSN) {
- smpboot[n] = CP15_DSB_INSN;
- }
- smpboot[n] = tswap32(smpboot[n]);
+ uint32_t fixupcontext[FIXUP_MAX];
+
+ fixupcontext[FIXUP_GIC_CPU_IF] = info->gic_cpu_if_addr;
+ fixupcontext[FIXUP_BOOTREG] = info->smp_bootreg_addr;
+ if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ fixupcontext[FIXUP_DSB] = DSB_INSN;
+ } else {
+ fixupcontext[FIXUP_DSB] = CP15_DSB_INSN;
}
- rom_add_blob_fixed("smpboot", smpboot, sizeof(smpboot),
- info->smp_loader_start);
+
+ write_bootloader("smpboot", info->smp_loader_start,
+ smpboot, fixupcontext);
}
static void default_reset_secondary(ARMCPU *cpu,
@@ -254,8 +335,8 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo)
}
}
- acells = qemu_devtree_getprop_cell(fdt, "/", "#address-cells");
- scells = qemu_devtree_getprop_cell(fdt, "/", "#size-cells");
+ acells = qemu_fdt_getprop_cell(fdt, "/", "#address-cells");
+ scells = qemu_fdt_getprop_cell(fdt, "/", "#size-cells");
if (acells == 0 || scells == 0) {
fprintf(stderr, "dtb file invalid (#address-cells or #size-cells 0)\n");
goto fail;
@@ -270,17 +351,17 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo)
goto fail;
}
- rc = qemu_devtree_setprop_sized_cells(fdt, "/memory", "reg",
- acells, binfo->loader_start,
- scells, binfo->ram_size);
+ rc = qemu_fdt_setprop_sized_cells(fdt, "/memory", "reg",
+ acells, binfo->loader_start,
+ scells, binfo->ram_size);
if (rc < 0) {
fprintf(stderr, "couldn't set /memory/reg\n");
goto fail;
}
if (binfo->kernel_cmdline && *binfo->kernel_cmdline) {
- rc = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs",
- binfo->kernel_cmdline);
+ rc = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
+ binfo->kernel_cmdline);
if (rc < 0) {
fprintf(stderr, "couldn't set /chosen/bootargs\n");
goto fail;
@@ -288,15 +369,15 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo)
}
if (binfo->initrd_size) {
- rc = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start",
- binfo->initrd_start);
+ rc = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start",
+ binfo->initrd_start);
if (rc < 0) {
fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n");
goto fail;
}
- rc = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-end",
- binfo->initrd_start + binfo->initrd_size);
+ rc = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end",
+ binfo->initrd_start + binfo->initrd_size);
if (rc < 0) {
fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n");
goto fail;
@@ -307,7 +388,7 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo)
binfo->modify_dtb(binfo, fdt);
}
- qemu_devtree_dumpdtb(fdt, size);
+ qemu_fdt_dumpdtb(fdt, size);
cpu_physical_memory_write(addr, fdt, size);
@@ -334,7 +415,12 @@ static void do_cpu_reset(void *opaque)
env->thumb = info->entry & 1;
} else {
if (CPU(cpu) == first_cpu) {
- env->regs[15] = info->loader_start;
+ if (env->aarch64) {
+ env->pc = info->loader_start;
+ } else {
+ env->regs[15] = info->loader_start;
+ }
+
if (!info->dtb_filename) {
if (old_param) {
set_kernel_args_old(info);
@@ -354,11 +440,11 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
CPUState *cs = CPU(cpu);
int kernel_size;
int initrd_size;
- int n;
int is_linux = 0;
uint64_t elf_entry;
- hwaddr entry;
+ hwaddr entry, kernel_load_offset;
int big_endian;
+ static const ARMInsnFixup *primary_loader;
/* Load the kernel. */
if (!info->kernel_filename) {
@@ -368,6 +454,14 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
return;
}
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ primary_loader = bootloader_aarch64;
+ kernel_load_offset = KERNEL64_LOAD_ADDR;
+ } else {
+ primary_loader = bootloader;
+ kernel_load_offset = KERNEL_LOAD_ADDR;
+ }
+
info->dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
if (!info->secondary_cpu_reset_hook) {
@@ -408,9 +502,9 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
&is_linux);
}
if (kernel_size < 0) {
- entry = info->loader_start + KERNEL_LOAD_ADDR;
+ entry = info->loader_start + kernel_load_offset;
kernel_size = load_image_targphys(info->kernel_filename, entry,
- info->ram_size - KERNEL_LOAD_ADDR);
+ info->ram_size - kernel_load_offset);
is_linux = 1;
}
if (kernel_size < 0) {
@@ -420,6 +514,8 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
}
info->entry = entry;
if (is_linux) {
+ uint32_t fixupcontext[FIXUP_MAX];
+
if (info->initrd_filename) {
initrd_size = load_ramdisk(info->initrd_filename,
info->initrd_start,
@@ -441,7 +537,7 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
}
info->initrd_size = initrd_size;
- bootloader[4] = info->board_id;
+ fixupcontext[FIXUP_BOARDID] = info->board_id;
/* for device tree boot, we pass the DTB directly in r2. Otherwise
* we point to the kernel args.
@@ -456,9 +552,9 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
if (load_dtb(dtb_start, info)) {
exit(1);
}
- bootloader[5] = dtb_start;
+ fixupcontext[FIXUP_ARGPTR] = dtb_start;
} else {
- bootloader[5] = info->loader_start + KERNEL_ARGS_ADDR;
+ fixupcontext[FIXUP_ARGPTR] = info->loader_start + KERNEL_ARGS_ADDR;
if (info->ram_size >= (1ULL << 32)) {
fprintf(stderr, "qemu: RAM size must be less than 4GB to boot"
" Linux kernel using ATAGS (try passing a device tree"
@@ -466,12 +562,11 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
exit(1);
}
}
- bootloader[6] = entry;
- for (n = 0; n < sizeof(bootloader) / 4; n++) {
- bootloader[n] = tswap32(bootloader[n]);
- }
- rom_add_blob_fixed("bootloader", bootloader, sizeof(bootloader),
- info->loader_start);
+ fixupcontext[FIXUP_ENTRYPOINT] = entry;
+
+ write_bootloader("bootloader", info->loader_start,
+ primary_loader, fixupcontext);
+
if (info->nb_cpus > 1) {
info->write_secondary_boot(cpu, info);
}
diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c
new file mode 100644
index 0000000000..3fcb6d22f5
--- /dev/null
+++ b/hw/arm/cubieboard.c
@@ -0,0 +1,69 @@
+/*
+ * cubieboard emulation
+ *
+ * Copyright (C) 2013 Li Guang
+ * Written by Li Guang <lig.fnst@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "hw/sysbus.h"
+#include "hw/devices.h"
+#include "hw/boards.h"
+#include "hw/arm/allwinner-a10.h"
+
+static struct arm_boot_info cubieboard_binfo = {
+ .loader_start = AW_A10_SDRAM_BASE,
+ .board_id = 0x1008,
+};
+
+typedef struct CubieBoardState {
+ AwA10State *a10;
+ MemoryRegion sdram;
+} CubieBoardState;
+
+static void cubieboard_init(QEMUMachineInitArgs *args)
+{
+ CubieBoardState *s = g_new(CubieBoardState, 1);
+ Error *err = NULL;
+
+ s->a10 = AW_A10(object_new(TYPE_AW_A10));
+ object_property_set_bool(OBJECT(s->a10), true, "realized", &err);
+ if (err != NULL) {
+ error_report("Couldn't realize Allwinner A10: %s\n",
+ error_get_pretty(err));
+ exit(1);
+ }
+
+ memory_region_init_ram(&s->sdram, NULL, "cubieboard.ram", args->ram_size);
+ vmstate_register_ram_global(&s->sdram);
+ memory_region_add_subregion(get_system_memory(), AW_A10_SDRAM_BASE,
+ &s->sdram);
+
+ cubieboard_binfo.ram_size = args->ram_size;
+ cubieboard_binfo.kernel_filename = args->kernel_filename;
+ cubieboard_binfo.kernel_cmdline = args->kernel_cmdline;
+ arm_load_kernel(&s->a10->cpu, &cubieboard_binfo);
+}
+
+static QEMUMachine cubieboard_machine = {
+ .name = "cubieboard",
+ .desc = "cubietech cubieboard",
+ .init = cubieboard_init,
+};
+
+
+static void cubieboard_machine_init(void)
+{
+ qemu_register_machine(&cubieboard_machine);
+}
+
+machine_init(cubieboard_machine_init)
diff --git a/hw/arm/digic.c b/hw/arm/digic.c
new file mode 100644
index 0000000000..ec8c330602
--- /dev/null
+++ b/hw/arm/digic.c
@@ -0,0 +1,115 @@
+/*
+ * QEMU model of the Canon DIGIC SoC.
+ *
+ * Copyright (C) 2013 Antony Pavlov <antonynpavlov@gmail.com>
+ *
+ * This model is based on reverse engineering efforts
+ * made by CHDK (http://chdk.wikia.com) and
+ * Magic Lantern (http://www.magiclantern.fm) projects
+ * contributors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hw/arm/digic.h"
+
+#define DIGIC4_TIMER_BASE(n) (0xc0210000 + (n) * 0x100)
+
+#define DIGIC_UART_BASE 0xc0800000
+
+static void digic_init(Object *obj)
+{
+ DigicState *s = DIGIC(obj);
+ DeviceState *dev;
+ int i;
+
+ object_initialize(&s->cpu, sizeof(s->cpu), "arm946-" TYPE_ARM_CPU);
+ object_property_add_child(obj, "cpu", OBJECT(&s->cpu), NULL);
+
+ for (i = 0; i < DIGIC4_NB_TIMERS; i++) {
+#define DIGIC_TIMER_NAME_MLEN 11
+ char name[DIGIC_TIMER_NAME_MLEN];
+
+ object_initialize(&s->timer[i], sizeof(s->timer[i]), TYPE_DIGIC_TIMER);
+ dev = DEVICE(&s->timer[i]);
+ qdev_set_parent_bus(dev, sysbus_get_default());
+ snprintf(name, DIGIC_TIMER_NAME_MLEN, "timer[%d]", i);
+ object_property_add_child(obj, name, OBJECT(&s->timer[i]), NULL);
+ }
+
+ object_initialize(&s->uart, sizeof(s->uart), TYPE_DIGIC_UART);
+ dev = DEVICE(&s->uart);
+ qdev_set_parent_bus(dev, sysbus_get_default());
+ object_property_add_child(obj, "uart", OBJECT(&s->uart), NULL);
+}
+
+static void digic_realize(DeviceState *dev, Error **errp)
+{
+ DigicState *s = DIGIC(dev);
+ Error *err = NULL;
+ SysBusDevice *sbd;
+ int i;
+
+ object_property_set_bool(OBJECT(&s->cpu), true, "reset-hivecs", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ object_property_set_bool(OBJECT(&s->cpu), true, "realized", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ for (i = 0; i < DIGIC4_NB_TIMERS; i++) {
+ object_property_set_bool(OBJECT(&s->timer[i]), true, "realized", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ sbd = SYS_BUS_DEVICE(&s->timer[i]);
+ sysbus_mmio_map(sbd, 0, DIGIC4_TIMER_BASE(i));
+ }
+
+ object_property_set_bool(OBJECT(&s->uart), true, "realized", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ sbd = SYS_BUS_DEVICE(&s->uart);
+ sysbus_mmio_map(sbd, 0, DIGIC_UART_BASE);
+}
+
+static void digic_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = digic_realize;
+}
+
+static const TypeInfo digic_type_info = {
+ .name = TYPE_DIGIC,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(DigicState),
+ .instance_init = digic_init,
+ .class_init = digic_class_init,
+};
+
+static void digic_register_types(void)
+{
+ type_register_static(&digic_type_info);
+}
+
+type_init(digic_register_types)
diff --git a/hw/arm/digic_boards.c b/hw/arm/digic_boards.c
new file mode 100644
index 0000000000..32fc30a69d
--- /dev/null
+++ b/hw/arm/digic_boards.c
@@ -0,0 +1,162 @@
+/*
+ * QEMU model of the Canon DIGIC boards (cameras indeed :).
+ *
+ * Copyright (C) 2013 Antony Pavlov <antonynpavlov@gmail.com>
+ *
+ * This model is based on reverse engineering efforts
+ * made by CHDK (http://chdk.wikia.com) and
+ * Magic Lantern (http://www.magiclantern.fm) projects
+ * contributors.
+ *
+ * See docs here:
+ * http://magiclantern.wikia.com/wiki/Register_Map
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hw/boards.h"
+#include "exec/address-spaces.h"
+#include "qemu/error-report.h"
+#include "hw/arm/digic.h"
+#include "hw/block/flash.h"
+#include "hw/loader.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/qtest.h"
+
+#define DIGIC4_ROM0_BASE 0xf0000000
+#define DIGIC4_ROM1_BASE 0xf8000000
+#define DIGIC4_ROM_MAX_SIZE 0x08000000
+
+typedef struct DigicBoardState {
+ DigicState *digic;
+ MemoryRegion ram;
+} DigicBoardState;
+
+typedef struct DigicBoard {
+ hwaddr ram_size;
+ void (*add_rom0)(DigicBoardState *, hwaddr, const char *);
+ const char *rom0_def_filename;
+ void (*add_rom1)(DigicBoardState *, hwaddr, const char *);
+ const char *rom1_def_filename;
+} DigicBoard;
+
+static void digic4_board_setup_ram(DigicBoardState *s, hwaddr ram_size)
+{
+ memory_region_init_ram(&s->ram, NULL, "ram", ram_size);
+ memory_region_add_subregion(get_system_memory(), 0, &s->ram);
+ vmstate_register_ram_global(&s->ram);
+}
+
+static void digic4_board_init(DigicBoard *board)
+{
+ Error *err = NULL;
+
+ DigicBoardState *s = g_new(DigicBoardState, 1);
+
+ s->digic = DIGIC(object_new(TYPE_DIGIC));
+ object_property_set_bool(OBJECT(s->digic), true, "realized", &err);
+ if (err != NULL) {
+ error_report("Couldn't realize DIGIC SoC: %s\n",
+ error_get_pretty(err));
+ exit(1);
+ }
+
+ digic4_board_setup_ram(s, board->ram_size);
+
+ if (board->add_rom0) {
+ board->add_rom0(s, DIGIC4_ROM0_BASE, board->rom0_def_filename);
+ }
+
+ if (board->add_rom1) {
+ board->add_rom1(s, DIGIC4_ROM1_BASE, board->rom1_def_filename);
+ }
+}
+
+static void digic_load_rom(DigicBoardState *s, hwaddr addr,
+ hwaddr max_size, const char *def_filename)
+{
+ target_long rom_size;
+ const char *filename;
+
+ if (qtest_enabled()) {
+ /* qtest runs no code so don't attempt a ROM load which
+ * could fail and result in a spurious test failure.
+ */
+ return;
+ }
+
+ if (bios_name) {
+ filename = bios_name;
+ } else {
+ filename = def_filename;
+ }
+
+ if (filename) {
+ char *fn = qemu_find_file(QEMU_FILE_TYPE_BIOS, filename);
+
+ if (!fn) {
+ error_report("Couldn't find rom image '%s'.\n", filename);
+ exit(1);
+ }
+
+ rom_size = load_image_targphys(fn, addr, max_size);
+ if (rom_size < 0 || rom_size > max_size) {
+ error_report("Couldn't load rom image '%s'.\n", filename);
+ exit(1);
+ }
+ }
+}
+
+/*
+ * Samsung K8P3215UQB
+ * 64M Bit (4Mx16) Page Mode / Multi-Bank NOR Flash Memory
+ */
+static void digic4_add_k8p3215uqb_rom(DigicBoardState *s, hwaddr addr,
+ const char *def_filename)
+{
+#define FLASH_K8P3215UQB_SIZE (4 * 1024 * 1024)
+#define FLASH_K8P3215UQB_SECTOR_SIZE (64 * 1024)
+
+ pflash_cfi02_register(addr, NULL, "pflash", FLASH_K8P3215UQB_SIZE,
+ NULL, FLASH_K8P3215UQB_SECTOR_SIZE,
+ FLASH_K8P3215UQB_SIZE / FLASH_K8P3215UQB_SECTOR_SIZE,
+ DIGIC4_ROM_MAX_SIZE / FLASH_K8P3215UQB_SIZE,
+ 4,
+ 0x00EC, 0x007E, 0x0003, 0x0001,
+ 0x0555, 0x2aa, 0);
+
+ digic_load_rom(s, addr, FLASH_K8P3215UQB_SIZE, def_filename);
+}
+
+static DigicBoard digic4_board_canon_a1100 = {
+ .ram_size = 64 * 1024 * 1024,
+ .add_rom1 = digic4_add_k8p3215uqb_rom,
+ .rom1_def_filename = "canon-a1100-rom1.bin",
+};
+
+static void canon_a1100_init(QEMUMachineInitArgs *args)
+{
+ digic4_board_init(&digic4_board_canon_a1100);
+}
+
+static QEMUMachine canon_a1100 = {
+ .name = "canon-a1100",
+ .desc = "Canon PowerShot A1100 IS",
+ .init = &canon_a1100_init,
+};
+
+static void digic_register_machines(void)
+{
+ qemu_register_machine(&canon_a1100);
+}
+
+machine_init(digic_register_machines)
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
index fe98ef10cb..c75b425c01 100644
--- a/hw/arm/highbank.c
+++ b/hw/arm/highbank.c
@@ -26,12 +26,13 @@
#include "hw/boards.h"
#include "sysemu/blockdev.h"
#include "exec/address-spaces.h"
+#include "qemu/error-report.h"
-#define SMP_BOOT_ADDR 0x100
-#define SMP_BOOT_REG 0x40
-#define GIC_BASE_ADDR 0xfff10000
+#define SMP_BOOT_ADDR 0x100
+#define SMP_BOOT_REG 0x40
+#define MPCORE_PERIPHBASE 0xfff10000
-#define NIRQ_GIC 160
+#define NIRQ_GIC 160
/* Board init. */
@@ -54,7 +55,7 @@ static void hb_write_secondary(ARMCPU *cpu, const struct arm_boot_info *info)
0xe1110001, /* tst r1, r1 */
0x0afffffb, /* beq <wfi> */
0xe12fff11, /* bx r1 */
- GIC_BASE_ADDR /* privbase: gic address. */
+ MPCORE_PERIPHBASE /* privbase: MPCore peripheral base address. */
};
for (n = 0; n < ARRAY_SIZE(smpboot); n++) {
smpboot[n] = tswap32(smpboot[n]);
@@ -229,15 +230,23 @@ static void calxeda_init(QEMUMachineInitArgs *args, enum cxmachines machine)
}
for (n = 0; n < smp_cpus; n++) {
+ ObjectClass *oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
ARMCPU *cpu;
- cpu = cpu_arm_init(cpu_model);
- if (cpu == NULL) {
- fprintf(stderr, "Unable to find CPU definition\n");
+ Error *err = NULL;
+
+ cpu = ARM_CPU(object_new(object_class_get_name(oc)));
+
+ object_property_set_int(OBJECT(cpu), MPCORE_PERIPHBASE, "reset-cbar",
+ &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ exit(1);
+ }
+ object_property_set_bool(OBJECT(cpu), true, "realized", &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
exit(1);
}
-
- /* This will become a QOM property eventually */
- cpu->reset_cbar = GIC_BASE_ADDR;
cpu_irq[n] = qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ);
}
@@ -279,7 +288,7 @@ static void calxeda_init(QEMUMachineInitArgs *args, enum cxmachines machine)
qdev_prop_set_uint32(dev, "num-irq", NIRQ_GIC);
qdev_init_nofail(dev);
busdev = SYS_BUS_DEVICE(dev);
- sysbus_mmio_map(busdev, 0, GIC_BASE_ADDR);
+ sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE);
for (n = 0; n < smp_cpus; n++) {
sysbus_connect_irq(busdev, n, cpu_irq[n]);
}
diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c
index f48de00a1a..ef1707aef0 100644
--- a/hw/arm/vexpress.c
+++ b/hw/arm/vexpress.c
@@ -419,13 +419,13 @@ static int add_virtio_mmio_node(void *fdt, uint32_t acells, uint32_t scells,
int rc;
char *nodename = g_strdup_printf("/virtio_mmio@%" PRIx64, addr);
- rc = qemu_devtree_add_subnode(fdt, nodename);
- rc |= qemu_devtree_setprop_string(fdt, nodename,
- "compatible", "virtio,mmio");
- rc |= qemu_devtree_setprop_sized_cells(fdt, nodename, "reg",
- acells, addr, scells, size);
- qemu_devtree_setprop_cells(fdt, nodename, "interrupt-parent", intc);
- qemu_devtree_setprop_cells(fdt, nodename, "interrupts", 0, irq, 1);
+ rc = qemu_fdt_add_subnode(fdt, nodename);
+ rc |= qemu_fdt_setprop_string(fdt, nodename,
+ "compatible", "virtio,mmio");
+ rc |= qemu_fdt_setprop_sized_cells(fdt, nodename, "reg",
+ acells, addr, scells, size);
+ qemu_fdt_setprop_cells(fdt, nodename, "interrupt-parent", intc);
+ qemu_fdt_setprop_cells(fdt, nodename, "interrupts", 0, irq, 1);
g_free(nodename);
if (rc) {
return -1;
@@ -456,8 +456,8 @@ static void vexpress_modify_dtb(const struct arm_boot_info *info, void *fdt)
uint32_t acells, scells, intc;
const VEDBoardInfo *daughterboard = (const VEDBoardInfo *)info;
- acells = qemu_devtree_getprop_cell(fdt, "/", "#address-cells");
- scells = qemu_devtree_getprop_cell(fdt, "/", "#size-cells");
+ acells = qemu_fdt_getprop_cell(fdt, "/", "#address-cells");
+ scells = qemu_fdt_getprop_cell(fdt, "/", "#size-cells");
intc = find_int_controller(fdt);
if (!intc) {
/* Not fatal, we just won't provide virtio. This will
@@ -480,6 +480,36 @@ static void vexpress_modify_dtb(const struct arm_boot_info *info, void *fdt)
}
}
+
+/* Open code a private version of pflash registration since we
+ * need to set non-default device width for VExpress platform.
+ */
+static pflash_t *ve_pflash_cfi01_register(hwaddr base, const char *name,
+ DriveInfo *di)
+{
+ DeviceState *dev = qdev_create(NULL, "cfi.pflash01");
+
+ if (di && qdev_prop_set_drive(dev, "drive", di->bdrv)) {
+ abort();
+ }
+
+ qdev_prop_set_uint32(dev, "num-blocks",
+ VEXPRESS_FLASH_SIZE / VEXPRESS_FLASH_SECT_SIZE);
+ qdev_prop_set_uint64(dev, "sector-length", VEXPRESS_FLASH_SECT_SIZE);
+ qdev_prop_set_uint8(dev, "width", 4);
+ qdev_prop_set_uint8(dev, "device-width", 2);
+ qdev_prop_set_uint8(dev, "big-endian", 0);
+ qdev_prop_set_uint16(dev, "id0", 0x89);
+ qdev_prop_set_uint16(dev, "id1", 0x18);
+ qdev_prop_set_uint16(dev, "id2", 0x00);
+ qdev_prop_set_uint16(dev, "id3", 0x00);
+ qdev_prop_set_string(dev, "name", name);
+ qdev_init_nofail(dev);
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
+ return OBJECT_CHECK(pflash_t, (dev), "cfi.pflash01");
+}
+
static void vexpress_common_init(VEDBoardInfo *daughterboard,
QEMUMachineInitArgs *args)
{
@@ -561,11 +591,8 @@ static void vexpress_common_init(VEDBoardInfo *daughterboard,
sysbus_create_simple("pl111", map[VE_CLCD], pic[14]);
dinfo = drive_get_next(IF_PFLASH);
- pflash0 = pflash_cfi01_register(map[VE_NORFLASH0], NULL, "vexpress.flash0",
- VEXPRESS_FLASH_SIZE, dinfo ? dinfo->bdrv : NULL,
- VEXPRESS_FLASH_SECT_SIZE,
- VEXPRESS_FLASH_SIZE / VEXPRESS_FLASH_SECT_SIZE, 4,
- 0x00, 0x89, 0x00, 0x18, 0);
+ pflash0 = ve_pflash_cfi01_register(map[VE_NORFLASH0], "vexpress.flash0",
+ dinfo);
if (!pflash0) {
fprintf(stderr, "vexpress: error registering flash 0.\n");
exit(1);
@@ -580,11 +607,8 @@ static void vexpress_common_init(VEDBoardInfo *daughterboard,
}
dinfo = drive_get_next(IF_PFLASH);
- if (!pflash_cfi01_register(map[VE_NORFLASH1], NULL, "vexpress.flash1",
- VEXPRESS_FLASH_SIZE, dinfo ? dinfo->bdrv : NULL,
- VEXPRESS_FLASH_SECT_SIZE,
- VEXPRESS_FLASH_SIZE / VEXPRESS_FLASH_SECT_SIZE, 4,
- 0x00, 0x89, 0x00, 0x18, 0)) {
+ if (!ve_pflash_cfi01_register(map[VE_NORFLASH1], "vexpress.flash1",
+ dinfo)) {
fprintf(stderr, "vexpress: error registering flash 1.\n");
exit(1);
}
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 9531b5a574..517f2fe30f 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -156,42 +156,42 @@ static void create_fdt(VirtBoardInfo *vbi)
vbi->fdt = fdt;
/* Header */
- qemu_devtree_setprop_string(fdt, "/", "compatible", "linux,dummy-virt");
- qemu_devtree_setprop_cell(fdt, "/", "#address-cells", 0x2);
- qemu_devtree_setprop_cell(fdt, "/", "#size-cells", 0x2);
+ qemu_fdt_setprop_string(fdt, "/", "compatible", "linux,dummy-virt");
+ qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x2);
+ qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x2);
/*
* /chosen and /memory nodes must exist for load_dtb
* to fill in necessary properties later
*/
- qemu_devtree_add_subnode(fdt, "/chosen");
- qemu_devtree_add_subnode(fdt, "/memory");
- qemu_devtree_setprop_string(fdt, "/memory", "device_type", "memory");
+ qemu_fdt_add_subnode(fdt, "/chosen");
+ qemu_fdt_add_subnode(fdt, "/memory");
+ qemu_fdt_setprop_string(fdt, "/memory", "device_type", "memory");
/* Clock node, for the benefit of the UART. The kernel device tree
* binding documentation claims the PL011 node clock properties are
* optional but in practice if you omit them the kernel refuses to
* probe for the device.
*/
- vbi->clock_phandle = qemu_devtree_alloc_phandle(fdt);
- qemu_devtree_add_subnode(fdt, "/apb-pclk");
- qemu_devtree_setprop_string(fdt, "/apb-pclk", "compatible", "fixed-clock");
- qemu_devtree_setprop_cell(fdt, "/apb-pclk", "#clock-cells", 0x0);
- qemu_devtree_setprop_cell(fdt, "/apb-pclk", "clock-frequency", 24000000);
- qemu_devtree_setprop_string(fdt, "/apb-pclk", "clock-output-names",
+ vbi->clock_phandle = qemu_fdt_alloc_phandle(fdt);
+ qemu_fdt_add_subnode(fdt, "/apb-pclk");
+ qemu_fdt_setprop_string(fdt, "/apb-pclk", "compatible", "fixed-clock");
+ qemu_fdt_setprop_cell(fdt, "/apb-pclk", "#clock-cells", 0x0);
+ qemu_fdt_setprop_cell(fdt, "/apb-pclk", "clock-frequency", 24000000);
+ qemu_fdt_setprop_string(fdt, "/apb-pclk", "clock-output-names",
"clk24mhz");
- qemu_devtree_setprop_cell(fdt, "/apb-pclk", "phandle", vbi->clock_phandle);
+ qemu_fdt_setprop_cell(fdt, "/apb-pclk", "phandle", vbi->clock_phandle);
/* No PSCI for TCG yet */
if (kvm_enabled()) {
- qemu_devtree_add_subnode(fdt, "/psci");
- qemu_devtree_setprop_string(fdt, "/psci", "compatible", "arm,psci");
- qemu_devtree_setprop_string(fdt, "/psci", "method", "hvc");
- qemu_devtree_setprop_cell(fdt, "/psci", "cpu_suspend",
+ qemu_fdt_add_subnode(fdt, "/psci");
+ qemu_fdt_setprop_string(fdt, "/psci", "compatible", "arm,psci");
+ qemu_fdt_setprop_string(fdt, "/psci", "method", "hvc");
+ qemu_fdt_setprop_cell(fdt, "/psci", "cpu_suspend",
PSCI_FN_CPU_SUSPEND);
- qemu_devtree_setprop_cell(fdt, "/psci", "cpu_off", PSCI_FN_CPU_OFF);
- qemu_devtree_setprop_cell(fdt, "/psci", "cpu_on", PSCI_FN_CPU_ON);
- qemu_devtree_setprop_cell(fdt, "/psci", "migrate", PSCI_FN_MIGRATE);
+ qemu_fdt_setprop_cell(fdt, "/psci", "cpu_off", PSCI_FN_CPU_OFF);
+ qemu_fdt_setprop_cell(fdt, "/psci", "cpu_on", PSCI_FN_CPU_ON);
+ qemu_fdt_setprop_cell(fdt, "/psci", "migrate", PSCI_FN_MIGRATE);
}
}
@@ -206,10 +206,10 @@ static void fdt_add_timer_nodes(const VirtBoardInfo *vbi)
irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START,
GIC_FDT_IRQ_PPI_CPU_WIDTH, (1 << vbi->smp_cpus) - 1);
- qemu_devtree_add_subnode(vbi->fdt, "/timer");
- qemu_devtree_setprop_string(vbi->fdt, "/timer",
+ qemu_fdt_add_subnode(vbi->fdt, "/timer");
+ qemu_fdt_setprop_string(vbi->fdt, "/timer",
"compatible", "arm,armv7-timer");
- qemu_devtree_setprop_cells(vbi->fdt, "/timer", "interrupts",
+ qemu_fdt_setprop_cells(vbi->fdt, "/timer", "interrupts",
GIC_FDT_IRQ_TYPE_PPI, 13, irqflags,
GIC_FDT_IRQ_TYPE_PPI, 14, irqflags,
GIC_FDT_IRQ_TYPE_PPI, 11, irqflags,
@@ -220,25 +220,25 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
{
int cpu;
- qemu_devtree_add_subnode(vbi->fdt, "/cpus");
- qemu_devtree_setprop_cell(vbi->fdt, "/cpus", "#address-cells", 0x1);
- qemu_devtree_setprop_cell(vbi->fdt, "/cpus", "#size-cells", 0x0);
+ qemu_fdt_add_subnode(vbi->fdt, "/cpus");
+ qemu_fdt_setprop_cell(vbi->fdt, "/cpus", "#address-cells", 0x1);
+ qemu_fdt_setprop_cell(vbi->fdt, "/cpus", "#size-cells", 0x0);
for (cpu = vbi->smp_cpus - 1; cpu >= 0; cpu--) {
char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
- qemu_devtree_add_subnode(vbi->fdt, nodename);
- qemu_devtree_setprop_string(vbi->fdt, nodename, "device_type", "cpu");
- qemu_devtree_setprop_string(vbi->fdt, nodename, "compatible",
+ qemu_fdt_add_subnode(vbi->fdt, nodename);
+ qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "cpu");
+ qemu_fdt_setprop_string(vbi->fdt, nodename, "compatible",
armcpu->dtb_compatible);
if (vbi->smp_cpus > 1) {
- qemu_devtree_setprop_string(vbi->fdt, nodename,
+ qemu_fdt_setprop_string(vbi->fdt, nodename,
"enable-method", "psci");
}
- qemu_devtree_setprop_cell(vbi->fdt, nodename, "reg", cpu);
+ qemu_fdt_setprop_cell(vbi->fdt, nodename, "reg", cpu);
g_free(nodename);
}
}
@@ -247,20 +247,20 @@ static void fdt_add_gic_node(const VirtBoardInfo *vbi)
{
uint32_t gic_phandle;
- gic_phandle = qemu_devtree_alloc_phandle(vbi->fdt);
- qemu_devtree_setprop_cell(vbi->fdt, "/", "interrupt-parent", gic_phandle);
+ gic_phandle = qemu_fdt_alloc_phandle(vbi->fdt);
+ qemu_fdt_setprop_cell(vbi->fdt, "/", "interrupt-parent", gic_phandle);
- qemu_devtree_add_subnode(vbi->fdt, "/intc");
- qemu_devtree_setprop_string(vbi->fdt, "/intc", "compatible",
+ qemu_fdt_add_subnode(vbi->fdt, "/intc");
+ qemu_fdt_setprop_string(vbi->fdt, "/intc", "compatible",
vbi->gic_compatible);
- qemu_devtree_setprop_cell(vbi->fdt, "/intc", "#interrupt-cells", 3);
- qemu_devtree_setprop(vbi->fdt, "/intc", "interrupt-controller", NULL, 0);
- qemu_devtree_setprop_sized_cells(vbi->fdt, "/intc", "reg",
+ qemu_fdt_setprop_cell(vbi->fdt, "/intc", "#interrupt-cells", 3);
+ qemu_fdt_setprop(vbi->fdt, "/intc", "interrupt-controller", NULL, 0);
+ qemu_fdt_setprop_sized_cells(vbi->fdt, "/intc", "reg",
2, vbi->memmap[VIRT_GIC_DIST].base,
2, vbi->memmap[VIRT_GIC_DIST].size,
2, vbi->memmap[VIRT_GIC_CPU].base,
2, vbi->memmap[VIRT_GIC_CPU].size);
- qemu_devtree_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
+ qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle);
}
static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
@@ -275,18 +275,18 @@ static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic)
sysbus_create_simple("pl011", base, pic[irq]);
nodename = g_strdup_printf("/pl011@%" PRIx64, base);
- qemu_devtree_add_subnode(vbi->fdt, nodename);
+ qemu_fdt_add_subnode(vbi->fdt, nodename);
/* Note that we can't use setprop_string because of the embedded NUL */
- qemu_devtree_setprop(vbi->fdt, nodename, "compatible",
+ qemu_fdt_setprop(vbi->fdt, nodename, "compatible",
compat, sizeof(compat));
- qemu_devtree_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
2, base, 2, size);
- qemu_devtree_setprop_cells(vbi->fdt, nodename, "interrupts",
+ qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupts",
GIC_FDT_IRQ_TYPE_SPI, irq,
GIC_FDT_IRQ_FLAGS_EDGE_LO_HI);
- qemu_devtree_setprop_cells(vbi->fdt, nodename, "clocks",
+ qemu_fdt_setprop_cells(vbi->fdt, nodename, "clocks",
vbi->clock_phandle, vbi->clock_phandle);
- qemu_devtree_setprop(vbi->fdt, nodename, "clock-names",
+ qemu_fdt_setprop(vbi->fdt, nodename, "clock-names",
clocknames, sizeof(clocknames));
g_free(nodename);
}
@@ -314,14 +314,14 @@ static void create_virtio_devices(const VirtBoardInfo *vbi, qemu_irq *pic)
hwaddr base = vbi->memmap[VIRT_MMIO].base + i * size;
nodename = g_strdup_printf("/virtio_mmio@%" PRIx64, base);
- qemu_devtree_add_subnode(vbi->fdt, nodename);
- qemu_devtree_setprop_string(vbi->fdt, nodename,
- "compatible", "virtio,mmio");
- qemu_devtree_setprop_sized_cells(vbi->fdt, nodename, "reg",
- 2, base, 2, size);
- qemu_devtree_setprop_cells(vbi->fdt, nodename, "interrupts",
- GIC_FDT_IRQ_TYPE_SPI, irq,
- GIC_FDT_IRQ_FLAGS_EDGE_LO_HI);
+ qemu_fdt_add_subnode(vbi->fdt, nodename);
+ qemu_fdt_setprop_string(vbi->fdt, nodename,
+ "compatible", "virtio,mmio");
+ qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ 2, base, 2, size);
+ qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupts",
+ GIC_FDT_IRQ_TYPE_SPI, irq,
+ GIC_FDT_IRQ_FLAGS_EDGE_LO_HI);
g_free(nodename);
}
}
diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c
index 46924a0391..17251c7a65 100644
--- a/hw/arm/xilinx_zynq.c
+++ b/hw/arm/xilinx_zynq.c
@@ -25,6 +25,7 @@
#include "sysemu/blockdev.h"
#include "hw/loader.h"
#include "hw/ssi.h"
+#include "qemu/error-report.h"
#define NUM_SPI_FLASHES 4
#define NUM_QSPI_FLASHES 2
@@ -35,6 +36,8 @@
#define IRQ_OFFSET 32 /* pic interrupts start from index 32 */
+#define MPCORE_PERIPHBASE 0xF8F00000
+
static const int dma_irqs[8] = {
46, 47, 48, 49, 72, 73, 74, 75
};
@@ -102,6 +105,7 @@ static void zynq_init(QEMUMachineInitArgs *args)
const char *kernel_filename = args->kernel_filename;
const char *kernel_cmdline = args->kernel_cmdline;
const char *initrd_filename = args->initrd_filename;
+ ObjectClass *cpu_oc;
ARMCPU *cpu;
MemoryRegion *address_space_mem = get_system_memory();
MemoryRegion *ext_ram = g_new(MemoryRegion, 1);
@@ -110,15 +114,24 @@ static void zynq_init(QEMUMachineInitArgs *args)
SysBusDevice *busdev;
qemu_irq pic[64];
NICInfo *nd;
+ Error *err = NULL;
int n;
if (!cpu_model) {
cpu_model = "cortex-a9";
}
+ cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
+
+ cpu = ARM_CPU(object_new(object_class_get_name(cpu_oc)));
- cpu = cpu_arm_init(cpu_model);
- if (!cpu) {
- fprintf(stderr, "Unable to find CPU definition\n");
+ object_property_set_int(OBJECT(cpu), MPCORE_PERIPHBASE, "reset-cbar", &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ exit(1);
+ }
+ object_property_set_bool(OBJECT(cpu), true, "realized", &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
exit(1);
}
@@ -154,7 +167,7 @@ static void zynq_init(QEMUMachineInitArgs *args)
qdev_prop_set_uint32(dev, "num-cpu", 1);
qdev_init_nofail(dev);
busdev = SYS_BUS_DEVICE(dev);
- sysbus_mmio_map(busdev, 0, 0xF8F00000);
+ sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE);
sysbus_connect_irq(busdev, 0,
qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ));
diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c
index 018a9677ba..0c95d53dca 100644
--- a/hw/block/pflash_cfi01.c
+++ b/hw/block/pflash_cfi01.c
@@ -40,6 +40,7 @@
#include "hw/block/flash.h"
#include "block/block.h"
#include "qemu/timer.h"
+#include "qemu/bitops.h"
#include "exec/address-spaces.h"
#include "qemu/host-utils.h"
#include "hw/sysbus.h"
@@ -71,7 +72,9 @@ struct pflash_t {
BlockDriverState *bs;
uint32_t nb_blocs;
uint64_t sector_len;
- uint8_t width;
+ uint8_t bank_width;
+ uint8_t device_width; /* If 0, device width not specified. */
+ uint8_t max_device_width; /* max device width in bytes */
uint8_t be;
uint8_t wcycle; /* if 0, the flash is read normally */
int ro;
@@ -116,6 +119,119 @@ static void pflash_timer (void *opaque)
pfl->cmd = 0;
}
+/* Perform a CFI query based on the bank width of the flash.
+ * If this code is called we know we have a device_width set for
+ * this flash.
+ */
+static uint32_t pflash_cfi_query(pflash_t *pfl, hwaddr offset)
+{
+ int i;
+ uint32_t resp = 0;
+ hwaddr boff;
+
+ /* Adjust incoming offset to match expected device-width
+ * addressing. CFI query addresses are always specified in terms of
+ * the maximum supported width of the device. This means that x8
+ * devices and x8/x16 devices in x8 mode behave differently. For
+ * devices that are not used at their max width, we will be
+ * provided with addresses that use higher address bits than
+ * expected (based on the max width), so we will shift them lower
+ * so that they will match the addresses used when
+ * device_width==max_device_width.
+ */
+ boff = offset >> (ctz32(pfl->bank_width) +
+ ctz32(pfl->max_device_width) - ctz32(pfl->device_width));
+
+ if (boff > pfl->cfi_len) {
+ return 0;
+ }
+ /* Now we will construct the CFI response generated by a single
+ * device, then replicate that for all devices that make up the
+ * bus. For wide parts used in x8 mode, CFI query responses
+ * are different than native byte-wide parts.
+ */
+ resp = pfl->cfi_table[boff];
+ if (pfl->device_width != pfl->max_device_width) {
+ /* The only case currently supported is x8 mode for a
+ * wider part.
+ */
+ if (pfl->device_width != 1 || pfl->bank_width > 4) {
+ DPRINTF("%s: Unsupported device configuration: "
+ "device_width=%d, max_device_width=%d\n",
+ __func__, pfl->device_width,
+ pfl->max_device_width);
+ return 0;
+ }
+ /* CFI query data is repeated, rather than zero padded for
+ * wide devices used in x8 mode.
+ */
+ for (i = 1; i < pfl->max_device_width; i++) {
+ resp = deposit32(resp, 8 * i, 8, pfl->cfi_table[boff]);
+ }
+ }
+ /* Replicate responses for each device in bank. */
+ if (pfl->device_width < pfl->bank_width) {
+ for (i = pfl->device_width;
+ i < pfl->bank_width; i += pfl->device_width) {
+ resp = deposit32(resp, 8 * i, 8 * pfl->device_width, resp);
+ }
+ }
+
+ return resp;
+}
+
+
+
+/* Perform a device id query based on the bank width of the flash. */
+static uint32_t pflash_devid_query(pflash_t *pfl, hwaddr offset)
+{
+ int i;
+ uint32_t resp;
+ hwaddr boff;
+
+ /* Adjust incoming offset to match expected device-width
+ * addressing. Device ID read addresses are always specified in
+ * terms of the maximum supported width of the device. This means
+ * that x8 devices and x8/x16 devices in x8 mode behave
+ * differently. For devices that are not used at their max width,
+ * we will be provided with addresses that use higher address bits
+ * than expected (based on the max width), so we will shift them
+ * lower so that they will match the addresses used when
+ * device_width==max_device_width.
+ */
+ boff = offset >> (ctz32(pfl->bank_width) +
+ ctz32(pfl->max_device_width) - ctz32(pfl->device_width));
+
+ /* Mask off upper bits which may be used in to query block
+ * or sector lock status at other addresses.
+ * Offsets 2/3 are block lock status, is not emulated.
+ */
+ switch (boff & 0xFF) {
+ case 0:
+ resp = pfl->ident0;
+ DPRINTF("%s: Manufacturer Code %04x\n", __func__, ret);
+ break;
+ case 1:
+ resp = pfl->ident1;
+ DPRINTF("%s: Device ID Code %04x\n", __func__, ret);
+ break;
+ default:
+ DPRINTF("%s: Read Device Information offset=%x\n", __func__,
+ (unsigned)offset);
+ return 0;
+ break;
+ }
+ /* Replicate responses for each device in bank. */
+ if (pfl->device_width < pfl->bank_width) {
+ for (i = pfl->device_width;
+ i < pfl->bank_width; i += pfl->device_width) {
+ resp = deposit32(resp, 8 * i, 8 * pfl->device_width, resp);
+ }
+ }
+
+ return resp;
+}
+
static uint32_t pflash_read (pflash_t *pfl, hwaddr offset,
int width, int be)
{
@@ -124,12 +240,6 @@ static uint32_t pflash_read (pflash_t *pfl, hwaddr offset,
uint8_t *p;
ret = -1;
- boff = offset & 0xFF; /* why this here ?? */
-
- if (pfl->width == 2)
- boff = boff >> 1;
- else if (pfl->width == 4)
- boff = boff >> 2;
#if 0
DPRINTF("%s: reading offset " TARGET_FMT_plx " under cmd %02x width %d\n",
@@ -190,35 +300,88 @@ static uint32_t pflash_read (pflash_t *pfl, hwaddr offset,
case 0x60: /* Block /un)lock */
case 0x70: /* Status Register */
case 0xe8: /* Write block */
- /* Status register read */
+ /* Status register read. Return status from each device in
+ * bank.
+ */
ret = pfl->status;
- if (width > 2) {
+ if (pfl->device_width && width > pfl->device_width) {
+ int shift = pfl->device_width * 8;
+ while (shift + pfl->device_width * 8 <= width * 8) {
+ ret |= pfl->status << shift;
+ shift += pfl->device_width * 8;
+ }
+ } else if (!pfl->device_width && width > 2) {
+ /* Handle 32 bit flash cases where device width is not
+ * set. (Existing behavior before device width added.)
+ */
ret |= pfl->status << 16;
}
DPRINTF("%s: status %x\n", __func__, ret);
break;
case 0x90:
- switch (boff) {
- case 0:
- ret = pfl->ident0 << 8 | pfl->ident1;
- DPRINTF("%s: Manufacturer Code %04x\n", __func__, ret);
- break;
- case 1:
- ret = pfl->ident2 << 8 | pfl->ident3;
- DPRINTF("%s: Device ID Code %04x\n", __func__, ret);
- break;
- default:
- DPRINTF("%s: Read Device Information boff=%x\n", __func__,
- (unsigned)boff);
- ret = 0;
- break;
+ if (!pfl->device_width) {
+ /* Preserve old behavior if device width not specified */
+ boff = offset & 0xFF;
+ if (pfl->bank_width == 2) {
+ boff = boff >> 1;
+ } else if (pfl->bank_width == 4) {
+ boff = boff >> 2;
+ }
+
+ switch (boff) {
+ case 0:
+ ret = pfl->ident0 << 8 | pfl->ident1;
+ DPRINTF("%s: Manufacturer Code %04x\n", __func__, ret);
+ break;
+ case 1:
+ ret = pfl->ident2 << 8 | pfl->ident3;
+ DPRINTF("%s: Device ID Code %04x\n", __func__, ret);
+ break;
+ default:
+ DPRINTF("%s: Read Device Information boff=%x\n", __func__,
+ (unsigned)boff);
+ ret = 0;
+ break;
+ }
+ } else {
+ /* If we have a read larger than the bank_width, combine multiple
+ * manufacturer/device ID queries into a single response.
+ */
+ int i;
+ for (i = 0; i < width; i += pfl->bank_width) {
+ ret = deposit32(ret, i * 8, pfl->bank_width * 8,
+ pflash_devid_query(pfl,
+ offset + i * pfl->bank_width));
+ }
}
break;
case 0x98: /* Query mode */
- if (boff > pfl->cfi_len)
- ret = 0;
- else
- ret = pfl->cfi_table[boff];
+ if (!pfl->device_width) {
+ /* Preserve old behavior if device width not specified */
+ boff = offset & 0xFF;
+ if (pfl->bank_width == 2) {
+ boff = boff >> 1;
+ } else if (pfl->bank_width == 4) {
+ boff = boff >> 2;
+ }
+
+ if (boff > pfl->cfi_len) {
+ ret = 0;
+ } else {
+ ret = pfl->cfi_table[boff];
+ }
+ } else {
+ /* If we have a read larger than the bank_width, combine multiple
+ * CFI queries into a single response.
+ */
+ int i;
+ for (i = 0; i < width; i += pfl->bank_width) {
+ ret = deposit32(ret, i * 8, pfl->bank_width * 8,
+ pflash_cfi_query(pfl,
+ offset + i * pfl->bank_width));
+ }
+ }
+
break;
}
return ret;
@@ -378,6 +541,14 @@ static void pflash_write(pflash_t *pfl, hwaddr offset,
break;
case 0xe8:
+ /* Mask writeblock size based on device width, or bank width if
+ * device width not specified.
+ */
+ if (pfl->device_width) {
+ value = extract32(value, 0, pfl->device_width * 8);
+ } else {
+ value = extract32(value, 0, pfl->bank_width * 8);
+ }
DPRINTF("%s: block write of %x bytes\n", __func__, value);
pfl->counter = value;
pfl->wcycle++;
@@ -613,6 +784,13 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp)
pfl->ro = 0;
}
+ /* Default to devices being used at their maximum device width. This was
+ * assumed before the device_width support was added.
+ */
+ if (!pfl->max_device_width) {
+ pfl->max_device_width = pfl->device_width;
+ }
+
pfl->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pflash_timer, pfl);
pfl->wcycle = 0;
pfl->cmd = 0;
@@ -665,7 +843,7 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp)
pfl->cfi_table[0x28] = 0x02;
pfl->cfi_table[0x29] = 0x00;
/* Max number of bytes in multi-bytes write */
- if (pfl->width == 1) {
+ if (pfl->bank_width == 1) {
pfl->cfi_table[0x2A] = 0x08;
} else {
pfl->cfi_table[0x2A] = 0x0B;
@@ -706,7 +884,25 @@ static Property pflash_cfi01_properties[] = {
DEFINE_PROP_DRIVE("drive", struct pflash_t, bs),
DEFINE_PROP_UINT32("num-blocks", struct pflash_t, nb_blocs, 0),
DEFINE_PROP_UINT64("sector-length", struct pflash_t, sector_len, 0),
- DEFINE_PROP_UINT8("width", struct pflash_t, width, 0),
+ /* width here is the overall width of this QEMU device in bytes.
+ * The QEMU device may be emulating a number of flash devices
+ * wired up in parallel; the width of each individual flash
+ * device should be specified via device-width. If the individual
+ * devices have a maximum width which is greater than the width
+ * they are being used for, this maximum width should be set via
+ * max-device-width (which otherwise defaults to device-width).
+ * So for instance a 32-bit wide QEMU flash device made from four
+ * 16-bit flash devices used in 8-bit wide mode would be configured
+ * with width = 4, device-width = 1, max-device-width = 2.
+ *
+ * If device-width is not specified we default to backwards
+ * compatible behaviour which is a bad emulation of two
+ * 16 bit devices making up a 32 bit wide QEMU device. This
+ * is deprecated for new uses of this device.
+ */
+ DEFINE_PROP_UINT8("width", struct pflash_t, bank_width, 0),
+ DEFINE_PROP_UINT8("device-width", struct pflash_t, device_width, 0),
+ DEFINE_PROP_UINT8("max-device-width", struct pflash_t, max_device_width, 0),
DEFINE_PROP_UINT8("big-endian", struct pflash_t, be, 0),
DEFINE_PROP_UINT16("id0", struct pflash_t, ident0, 0),
DEFINE_PROP_UINT16("id1", struct pflash_t, ident1, 0),
@@ -745,8 +941,8 @@ pflash_t *pflash_cfi01_register(hwaddr base,
DeviceState *qdev, const char *name,
hwaddr size,
BlockDriverState *bs,
- uint32_t sector_len, int nb_blocs, int width,
- uint16_t id0, uint16_t id1,
+ uint32_t sector_len, int nb_blocs,
+ int bank_width, uint16_t id0, uint16_t id1,
uint16_t id2, uint16_t id3, int be)
{
DeviceState *dev = qdev_create(NULL, TYPE_CFI_PFLASH01);
@@ -756,7 +952,7 @@ pflash_t *pflash_cfi01_register(hwaddr base,
}
qdev_prop_set_uint32(dev, "num-blocks", nb_blocs);
qdev_prop_set_uint64(dev, "sector-length", sector_len);
- qdev_prop_set_uint8(dev, "width", width);
+ qdev_prop_set_uint8(dev, "width", bank_width);
qdev_prop_set_uint8(dev, "big-endian", !!be);
qdev_prop_set_uint16(dev, "id0", id0);
qdev_prop_set_uint16(dev, "id1", id1);
diff --git a/hw/char/Makefile.objs b/hw/char/Makefile.objs
index cbd6a006f4..be2a7d953a 100644
--- a/hw/char/Makefile.objs
+++ b/hw/char/Makefile.objs
@@ -14,6 +14,7 @@ obj-$(CONFIG_COLDFIRE) += mcf_uart.o
obj-$(CONFIG_OMAP) += omap_uart.o
obj-$(CONFIG_SH4) += sh_serial.o
obj-$(CONFIG_PSERIES) += spapr_vty.o
+obj-$(CONFIG_DIGIC) += digic-uart.o
common-obj-$(CONFIG_ETRAXFS) += etraxfs_ser.o
common-obj-$(CONFIG_ISA_DEBUG) += debugcon.o
diff --git a/hw/char/digic-uart.c b/hw/char/digic-uart.c
new file mode 100644
index 0000000000..fd8e07713d
--- /dev/null
+++ b/hw/char/digic-uart.c
@@ -0,0 +1,195 @@
+/*
+ * QEMU model of the Canon DIGIC UART block.
+ *
+ * Copyright (C) 2013 Antony Pavlov <antonynpavlov@gmail.com>
+ *
+ * This model is based on reverse engineering efforts
+ * made by CHDK (http://chdk.wikia.com) and
+ * Magic Lantern (http://www.magiclantern.fm) projects
+ * contributors.
+ *
+ * See "Serial terminal" docs here:
+ * http://magiclantern.wikia.com/wiki/Register_Map#Misc_Registers
+ *
+ * The QEMU model of the Milkymist UART block by Michael Walle
+ * is used as a template.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+#include "sysemu/char.h"
+
+#include "hw/char/digic-uart.h"
+
+enum {
+ ST_RX_RDY = (1 << 0),
+ ST_TX_RDY = (1 << 1),
+};
+
+static uint64_t digic_uart_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ DigicUartState *s = opaque;
+ uint64_t ret = 0;
+
+ addr >>= 2;
+
+ switch (addr) {
+ case R_RX:
+ s->reg_st &= ~(ST_RX_RDY);
+ ret = s->reg_rx;
+ break;
+
+ case R_ST:
+ ret = s->reg_st;
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "digic-uart: read access to unknown register 0x"
+ TARGET_FMT_plx, addr << 2);
+ }
+
+ return ret;
+}
+
+static void digic_uart_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned size)
+{
+ DigicUartState *s = opaque;
+ unsigned char ch = value;
+
+ addr >>= 2;
+
+ switch (addr) {
+ case R_TX:
+ if (s->chr) {
+ qemu_chr_fe_write_all(s->chr, &ch, 1);
+ }
+ break;
+
+ case R_ST:
+ /*
+ * Ignore write to R_ST.
+ *
+ * The point is that this register is actively used
+ * during receiving and transmitting symbols,
+ * but we don't know the function of most of bits.
+ *
+ * Ignoring writes to R_ST is only a simplification
+ * of the model. It has no perceptible side effects
+ * for existing guests.
+ */
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "digic-uart: write access to unknown register 0x"
+ TARGET_FMT_plx, addr << 2);
+ }
+}
+
+static const MemoryRegionOps uart_mmio_ops = {
+ .read = digic_uart_read,
+ .write = digic_uart_write,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int uart_can_rx(void *opaque)
+{
+ DigicUartState *s = opaque;
+
+ return !(s->reg_st & ST_RX_RDY);
+}
+
+static void uart_rx(void *opaque, const uint8_t *buf, int size)
+{
+ DigicUartState *s = opaque;
+
+ assert(uart_can_rx(opaque));
+
+ s->reg_st |= ST_RX_RDY;
+ s->reg_rx = *buf;
+}
+
+static void uart_event(void *opaque, int event)
+{
+}
+
+static void digic_uart_reset(DeviceState *d)
+{
+ DigicUartState *s = DIGIC_UART(d);
+
+ s->reg_rx = 0;
+ s->reg_st = ST_TX_RDY;
+}
+
+static void digic_uart_realize(DeviceState *dev, Error **errp)
+{
+ DigicUartState *s = DIGIC_UART(dev);
+
+ s->chr = qemu_char_get_next_serial();
+ if (s->chr) {
+ qemu_chr_add_handlers(s->chr, uart_can_rx, uart_rx, uart_event, s);
+ }
+}
+
+static void digic_uart_init(Object *obj)
+{
+ DigicUartState *s = DIGIC_UART(obj);
+
+ memory_region_init_io(&s->regs_region, OBJECT(s), &uart_mmio_ops, s,
+ TYPE_DIGIC_UART, 0x18);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->regs_region);
+}
+
+static const VMStateDescription vmstate_digic_uart = {
+ .name = "digic-uart",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(reg_rx, DigicUartState),
+ VMSTATE_UINT32(reg_st, DigicUartState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void digic_uart_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = digic_uart_realize;
+ dc->reset = digic_uart_reset;
+ dc->vmsd = &vmstate_digic_uart;
+}
+
+static const TypeInfo digic_uart_info = {
+ .name = TYPE_DIGIC_UART,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(DigicUartState),
+ .instance_init = digic_uart_init,
+ .class_init = digic_uart_class_init,
+};
+
+static void digic_uart_register_types(void)
+{
+ type_register_static(&digic_uart_info);
+}
+
+type_init(digic_uart_register_types)
diff --git a/hw/core/loader.c b/hw/core/loader.c
index 60d2ebd4ac..0634bee20c 100644
--- a/hw/core/loader.c
+++ b/hw/core/loader.c
@@ -785,6 +785,13 @@ static void rom_reset(void *unused)
g_free(rom->data);
rom->data = NULL;
}
+ /*
+ * The rom loader is really on the same level as firmware in the guest
+ * shadowing a ROM into RAM. Such a shadowing mechanism needs to ensure
+ * that the instruction cache for that new region is clear, so that the
+ * CPU definitely fetches its instructions from the just written data.
+ */
+ cpu_flush_icache_range(rom->addr, rom->datasize);
}
}
diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs
index 47ac44264c..60eb936e0d 100644
--- a/hw/intc/Makefile.objs
+++ b/hw/intc/Makefile.objs
@@ -24,3 +24,4 @@ obj-$(CONFIG_OPENPIC_KVM) += openpic_kvm.o
obj-$(CONFIG_SH4) += sh_intc.o
obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_XICS_KVM) += xics_kvm.o
+obj-$(CONFIG_ALLWINNER_A10_PIC) += allwinner-a10-pic.o
diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c
new file mode 100644
index 0000000000..407d563514
--- /dev/null
+++ b/hw/intc/allwinner-a10-pic.c
@@ -0,0 +1,200 @@
+/*
+ * Allwinner A10 interrupt controller device emulation
+ *
+ * Copyright (C) 2013 Li Guang
+ * Written by Li Guang <lig.fnst@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "hw/sysbus.h"
+#include "hw/devices.h"
+#include "sysemu/sysemu.h"
+#include "hw/intc/allwinner-a10-pic.h"
+
+static void aw_a10_pic_update(AwA10PICState *s)
+{
+ uint8_t i;
+ int irq = 0, fiq = 0;
+
+ for (i = 0; i < AW_A10_PIC_REG_NUM; i++) {
+ irq |= s->irq_pending[i] & ~s->mask[i];
+ fiq |= s->select[i] & s->irq_pending[i] & ~s->mask[i];
+ }
+
+ qemu_set_irq(s->parent_irq, !!irq);
+ qemu_set_irq(s->parent_fiq, !!fiq);
+}
+
+static void aw_a10_pic_set_irq(void *opaque, int irq, int level)
+{
+ AwA10PICState *s = opaque;
+
+ if (level) {
+ set_bit(irq % 32, (void *)&s->irq_pending[irq / 32]);
+ }
+ aw_a10_pic_update(s);
+}
+
+static uint64_t aw_a10_pic_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AwA10PICState *s = opaque;
+ uint8_t index = (offset & 0xc) / 4;
+
+ switch (offset) {
+ case AW_A10_PIC_VECTOR:
+ return s->vector;
+ case AW_A10_PIC_BASE_ADDR:
+ return s->base_addr;
+ case AW_A10_PIC_PROTECT:
+ return s->protect;
+ case AW_A10_PIC_NMI:
+ return s->nmi;
+ case AW_A10_PIC_IRQ_PENDING ... AW_A10_PIC_IRQ_PENDING + 8:
+ return s->irq_pending[index];
+ case AW_A10_PIC_FIQ_PENDING ... AW_A10_PIC_FIQ_PENDING + 8:
+ return s->fiq_pending[index];
+ case AW_A10_PIC_SELECT ... AW_A10_PIC_SELECT + 8:
+ return s->select[index];
+ case AW_A10_PIC_ENABLE ... AW_A10_PIC_ENABLE + 8:
+ return s->enable[index];
+ case AW_A10_PIC_MASK ... AW_A10_PIC_MASK + 8:
+ return s->mask[index];
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%x\n", __func__, (int)offset);
+ break;
+ }
+
+ return 0;
+}
+
+static void aw_a10_pic_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ AwA10PICState *s = opaque;
+ uint8_t index = (offset & 0xc) / 4;
+
+ switch (offset) {
+ case AW_A10_PIC_VECTOR:
+ s->vector = value & ~0x3;
+ break;
+ case AW_A10_PIC_BASE_ADDR:
+ s->base_addr = value & ~0x3;
+ case AW_A10_PIC_PROTECT:
+ s->protect = value;
+ break;
+ case AW_A10_PIC_NMI:
+ s->nmi = value;
+ break;
+ case AW_A10_PIC_IRQ_PENDING ... AW_A10_PIC_IRQ_PENDING + 8:
+ s->irq_pending[index] &= ~value;
+ break;
+ case AW_A10_PIC_FIQ_PENDING ... AW_A10_PIC_FIQ_PENDING + 8:
+ s->fiq_pending[index] &= ~value;
+ break;
+ case AW_A10_PIC_SELECT ... AW_A10_PIC_SELECT + 8:
+ s->select[index] = value;
+ break;
+ case AW_A10_PIC_ENABLE ... AW_A10_PIC_ENABLE + 8:
+ s->enable[index] = value;
+ break;
+ case AW_A10_PIC_MASK ... AW_A10_PIC_MASK + 8:
+ s->mask[index] = value;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%x\n", __func__, (int)offset);
+ break;
+ }
+
+ aw_a10_pic_update(s);
+}
+
+static const MemoryRegionOps aw_a10_pic_ops = {
+ .read = aw_a10_pic_read,
+ .write = aw_a10_pic_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const VMStateDescription vmstate_aw_a10_pic = {
+ .name = "a10.pic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(vector, AwA10PICState),
+ VMSTATE_UINT32(base_addr, AwA10PICState),
+ VMSTATE_UINT32(protect, AwA10PICState),
+ VMSTATE_UINT32(nmi, AwA10PICState),
+ VMSTATE_UINT32_ARRAY(irq_pending, AwA10PICState, AW_A10_PIC_REG_NUM),
+ VMSTATE_UINT32_ARRAY(fiq_pending, AwA10PICState, AW_A10_PIC_REG_NUM),
+ VMSTATE_UINT32_ARRAY(enable, AwA10PICState, AW_A10_PIC_REG_NUM),
+ VMSTATE_UINT32_ARRAY(select, AwA10PICState, AW_A10_PIC_REG_NUM),
+ VMSTATE_UINT32_ARRAY(mask, AwA10PICState, AW_A10_PIC_REG_NUM),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void aw_a10_pic_init(Object *obj)
+{
+ AwA10PICState *s = AW_A10_PIC(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
+
+ qdev_init_gpio_in(DEVICE(dev), aw_a10_pic_set_irq, AW_A10_PIC_INT_NR);
+ sysbus_init_irq(dev, &s->parent_irq);
+ sysbus_init_irq(dev, &s->parent_fiq);
+ memory_region_init_io(&s->iomem, OBJECT(s), &aw_a10_pic_ops, s,
+ TYPE_AW_A10_PIC, 0x400);
+ sysbus_init_mmio(dev, &s->iomem);
+}
+
+static void aw_a10_pic_reset(DeviceState *d)
+{
+ AwA10PICState *s = AW_A10_PIC(d);
+ uint8_t i;
+
+ s->base_addr = 0;
+ s->protect = 0;
+ s->nmi = 0;
+ s->vector = 0;
+ for (i = 0; i < AW_A10_PIC_REG_NUM; i++) {
+ s->irq_pending[i] = 0;
+ s->fiq_pending[i] = 0;
+ s->select[i] = 0;
+ s->enable[i] = 0;
+ s->mask[i] = 0;
+ }
+}
+
+static void aw_a10_pic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = aw_a10_pic_reset;
+ dc->desc = "allwinner a10 pic";
+ dc->vmsd = &vmstate_aw_a10_pic;
+ }
+
+static const TypeInfo aw_a10_pic_info = {
+ .name = TYPE_AW_A10_PIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AwA10PICState),
+ .instance_init = aw_a10_pic_init,
+ .class_init = aw_a10_pic_class_init,
+};
+
+static void aw_a10_register_types(void)
+{
+ type_register_static(&aw_a10_pic_info);
+}
+
+type_init(aw_a10_register_types);
diff --git a/hw/intc/xics.c b/hw/intc/xics.c
index a333305d3d..b437563fb9 100644
--- a/hw/intc/xics.c
+++ b/hw/intc/xics.c
@@ -723,7 +723,7 @@ static void rtas_set_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t nr, server, priority;
if ((nargs != 3) || (nret != 1)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
@@ -733,13 +733,13 @@ static void rtas_set_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr,
if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
|| (priority > 0xff)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
ics_write_xive(ics, nr, server, priority, priority);
- rtas_st(rets, 0, 0); /* Success */
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
static void rtas_get_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr,
@@ -751,18 +751,18 @@ static void rtas_get_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t nr;
if ((nargs != 1) || (nret != 3)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
nr = rtas_ld(args, 0);
if (!ics_valid_irq(ics, nr)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
- rtas_st(rets, 0, 0); /* Success */
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
}
@@ -776,21 +776,21 @@ static void rtas_int_off(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t nr;
if ((nargs != 1) || (nret != 1)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
nr = rtas_ld(args, 0);
if (!ics_valid_irq(ics, nr)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff,
ics->irqs[nr - ics->offset].priority);
- rtas_st(rets, 0, 0); /* Success */
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
static void rtas_int_on(PowerPCCPU *cpu, sPAPREnvironment *spapr,
@@ -802,14 +802,14 @@ static void rtas_int_on(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t nr;
if ((nargs != 1) || (nret != 1)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
nr = rtas_ld(args, 0);
if (!ics_valid_irq(ics, nr)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
@@ -817,7 +817,7 @@ static void rtas_int_on(PowerPCCPU *cpu, sPAPREnvironment *spapr,
ics->irqs[nr - ics->offset].saved_priority,
ics->irqs[nr - ics->offset].saved_priority);
- rtas_st(rets, 0, 0); /* Success */
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
/*
diff --git a/hw/microblaze/boot.c b/hw/microblaze/boot.c
index 2a7ea5c0f9..48d9e7afa4 100644
--- a/hw/microblaze/boot.c
+++ b/hw/microblaze/boot.c
@@ -79,19 +79,19 @@ static int microblaze_load_dtb(hwaddr addr,
}
if (kernel_cmdline) {
- r = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs",
- kernel_cmdline);
+ r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
+ kernel_cmdline);
if (r < 0) {
fprintf(stderr, "couldn't set /chosen/bootargs\n");
}
}
if (initrd_start) {
- qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start",
- initrd_start);
+ qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start",
+ initrd_start);
- qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-end",
- initrd_end);
+ qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end",
+ initrd_end);
}
cpu_physical_memory_write(addr, fdt, fdt_size);
diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c
index beaad682ac..635713e766 100644
--- a/hw/nvram/spapr_nvram.c
+++ b/hw/nvram/spapr_nvram.c
@@ -55,12 +55,12 @@ static void rtas_nvram_fetch(PowerPCCPU *cpu, sPAPREnvironment *spapr,
void *membuf;
if ((nargs != 3) || (nret != 2)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
if (!nvram) {
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
rtas_st(rets, 1, 0);
return;
}
@@ -71,7 +71,7 @@ static void rtas_nvram_fetch(PowerPCCPU *cpu, sPAPREnvironment *spapr,
if (((offset + len) < offset)
|| ((offset + len) > nvram->size)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
rtas_st(rets, 1, 0);
return;
}
@@ -87,7 +87,7 @@ static void rtas_nvram_fetch(PowerPCCPU *cpu, sPAPREnvironment *spapr,
}
cpu_physical_memory_unmap(membuf, len, 1, len);
- rtas_st(rets, 0, (alen < len) ? -1 : 0);
+ rtas_st(rets, 0, (alen < len) ? RTAS_OUT_HW_ERROR : RTAS_OUT_SUCCESS);
rtas_st(rets, 1, (alen < 0) ? 0 : alen);
}
@@ -102,12 +102,12 @@ static void rtas_nvram_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
void *membuf;
if ((nargs != 3) || (nret != 2)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
if (!nvram) {
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -117,7 +117,7 @@ static void rtas_nvram_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
if (((offset + len) < offset)
|| ((offset + len) > nvram->size)) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
@@ -132,7 +132,7 @@ static void rtas_nvram_store(PowerPCCPU *cpu, sPAPREnvironment *spapr,
}
cpu_physical_memory_unmap(membuf, len, 0, len);
- rtas_st(rets, 0, (alen < len) ? -1 : 0);
+ rtas_st(rets, 0, (alen < len) ? RTAS_OUT_HW_ERROR : RTAS_OUT_SUCCESS);
rtas_st(rets, 1, (alen < 0) ? 0 : alen);
}
diff --git a/hw/pci-host/grackle.c b/hw/pci-host/grackle.c
index 4991ec44b0..75b60d36ac 100644
--- a/hw/pci-host/grackle.c
+++ b/hw/pci-host/grackle.c
@@ -82,7 +82,7 @@ PCIBus *pci_grackle_init(uint32_t base, qemu_irq *pic,
memory_region_add_subregion(address_space_mem, 0x80000000ULL,
&d->pci_hole);
- phb->bus = pci_register_bus(dev, "pci",
+ phb->bus = pci_register_bus(dev, NULL,
pci_grackle_set_irq,
pci_grackle_map_irq,
pic,
diff --git a/hw/pci-host/uninorth.c b/hw/pci-host/uninorth.c
index 91530cdd04..adc1d89010 100644
--- a/hw/pci-host/uninorth.c
+++ b/hw/pci-host/uninorth.c
@@ -234,7 +234,7 @@ PCIBus *pci_pmac_init(qemu_irq *pic,
memory_region_add_subregion(address_space_mem, 0x80000000ULL,
&d->pci_hole);
- h->bus = pci_register_bus(dev, "pci",
+ h->bus = pci_register_bus(dev, NULL,
pci_unin_set_irq, pci_unin_map_irq,
pic,
&d->pci_mmio,
@@ -300,7 +300,7 @@ PCIBus *pci_pmac_u3_init(qemu_irq *pic,
memory_region_add_subregion(address_space_mem, 0x80000000ULL,
&d->pci_hole);
- h->bus = pci_register_bus(dev, "pci",
+ h->bus = pci_register_bus(dev, NULL,
pci_unin_set_irq, pci_unin_map_irq,
pic,
&d->pci_mmio,
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
index cfdd84b969..b37ce9d633 100644
--- a/hw/ppc/e500.c
+++ b/hw/ppc/e500.c
@@ -108,18 +108,18 @@ static void dt_serial_create(void *fdt, unsigned long long offset,
char ser[128];
snprintf(ser, sizeof(ser), "%s/serial@%llx", soc, offset);
- qemu_devtree_add_subnode(fdt, ser);
- qemu_devtree_setprop_string(fdt, ser, "device_type", "serial");
- qemu_devtree_setprop_string(fdt, ser, "compatible", "ns16550");
- qemu_devtree_setprop_cells(fdt, ser, "reg", offset, 0x100);
- qemu_devtree_setprop_cell(fdt, ser, "cell-index", idx);
- qemu_devtree_setprop_cell(fdt, ser, "clock-frequency", 0);
- qemu_devtree_setprop_cells(fdt, ser, "interrupts", 42, 2);
- qemu_devtree_setprop_phandle(fdt, ser, "interrupt-parent", mpic);
- qemu_devtree_setprop_string(fdt, "/aliases", alias, ser);
+ qemu_fdt_add_subnode(fdt, ser);
+ qemu_fdt_setprop_string(fdt, ser, "device_type", "serial");
+ qemu_fdt_setprop_string(fdt, ser, "compatible", "ns16550");
+ qemu_fdt_setprop_cells(fdt, ser, "reg", offset, 0x100);
+ qemu_fdt_setprop_cell(fdt, ser, "cell-index", idx);
+ qemu_fdt_setprop_cell(fdt, ser, "clock-frequency", 0);
+ qemu_fdt_setprop_cells(fdt, ser, "interrupts", 42, 2);
+ qemu_fdt_setprop_phandle(fdt, ser, "interrupt-parent", mpic);
+ qemu_fdt_setprop_string(fdt, "/aliases", alias, ser);
if (defcon) {
- qemu_devtree_setprop_string(fdt, "/chosen", "linux,stdout-path", ser);
+ qemu_fdt_setprop_string(fdt, "/chosen", "linux,stdout-path", ser);
}
}
@@ -183,30 +183,30 @@ static int ppce500_load_device_tree(QEMUMachineInitArgs *args,
}
/* Manipulate device tree in memory. */
- qemu_devtree_setprop_cell(fdt, "/", "#address-cells", 2);
- qemu_devtree_setprop_cell(fdt, "/", "#size-cells", 2);
+ qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 2);
+ qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 2);
- qemu_devtree_add_subnode(fdt, "/memory");
- qemu_devtree_setprop_string(fdt, "/memory", "device_type", "memory");
- qemu_devtree_setprop(fdt, "/memory", "reg", mem_reg_property,
- sizeof(mem_reg_property));
+ qemu_fdt_add_subnode(fdt, "/memory");
+ qemu_fdt_setprop_string(fdt, "/memory", "device_type", "memory");
+ qemu_fdt_setprop(fdt, "/memory", "reg", mem_reg_property,
+ sizeof(mem_reg_property));
- qemu_devtree_add_subnode(fdt, "/chosen");
+ qemu_fdt_add_subnode(fdt, "/chosen");
if (initrd_size) {
- ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start",
- initrd_base);
+ ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start",
+ initrd_base);
if (ret < 0) {
fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n");
}
- ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-end",
- (initrd_base + initrd_size));
+ ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end",
+ (initrd_base + initrd_size));
if (ret < 0) {
fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n");
}
}
- ret = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs",
+ ret = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
args->kernel_cmdline);
if (ret < 0)
fprintf(stderr, "couldn't set /chosen/bootargs\n");
@@ -217,22 +217,22 @@ static int ppce500_load_device_tree(QEMUMachineInitArgs *args,
tb_freq = kvmppc_get_tbfreq();
/* indicate KVM hypercall interface */
- qemu_devtree_add_subnode(fdt, "/hypervisor");
- qemu_devtree_setprop_string(fdt, "/hypervisor", "compatible",
- "linux,kvm");
+ qemu_fdt_add_subnode(fdt, "/hypervisor");
+ qemu_fdt_setprop_string(fdt, "/hypervisor", "compatible",
+ "linux,kvm");
kvmppc_get_hypercall(env, hypercall, sizeof(hypercall));
- qemu_devtree_setprop(fdt, "/hypervisor", "hcall-instructions",
- hypercall, sizeof(hypercall));
+ qemu_fdt_setprop(fdt, "/hypervisor", "hcall-instructions",
+ hypercall, sizeof(hypercall));
/* if KVM supports the idle hcall, set property indicating this */
if (kvmppc_get_hasidle(env)) {
- qemu_devtree_setprop(fdt, "/hypervisor", "has-idle", NULL, 0);
+ qemu_fdt_setprop(fdt, "/hypervisor", "has-idle", NULL, 0);
}
}
/* Create CPU nodes */
- qemu_devtree_add_subnode(fdt, "/cpus");
- qemu_devtree_setprop_cell(fdt, "/cpus", "#address-cells", 1);
- qemu_devtree_setprop_cell(fdt, "/cpus", "#size-cells", 0);
+ qemu_fdt_add_subnode(fdt, "/cpus");
+ qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 1);
+ qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0);
/* We need to generate the cpu nodes in reverse order, so Linux can pick
the first node as boot node and be happy */
@@ -249,55 +249,56 @@ static int ppce500_load_device_tree(QEMUMachineInitArgs *args,
snprintf(cpu_name, sizeof(cpu_name), "/cpus/PowerPC,8544@%x",
cpu->cpu_index);
- qemu_devtree_add_subnode(fdt, cpu_name);
- qemu_devtree_setprop_cell(fdt, cpu_name, "clock-frequency", clock_freq);
- qemu_devtree_setprop_cell(fdt, cpu_name, "timebase-frequency", tb_freq);
- qemu_devtree_setprop_string(fdt, cpu_name, "device_type", "cpu");
- qemu_devtree_setprop_cell(fdt, cpu_name, "reg", cpu->cpu_index);
- qemu_devtree_setprop_cell(fdt, cpu_name, "d-cache-line-size",
- env->dcache_line_size);
- qemu_devtree_setprop_cell(fdt, cpu_name, "i-cache-line-size",
- env->icache_line_size);
- qemu_devtree_setprop_cell(fdt, cpu_name, "d-cache-size", 0x8000);
- qemu_devtree_setprop_cell(fdt, cpu_name, "i-cache-size", 0x8000);
- qemu_devtree_setprop_cell(fdt, cpu_name, "bus-frequency", 0);
+ qemu_fdt_add_subnode(fdt, cpu_name);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "clock-frequency", clock_freq);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "timebase-frequency", tb_freq);
+ qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
+ qemu_fdt_setprop_cell(fdt, cpu_name, "reg", cpu->cpu_index);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "d-cache-line-size",
+ env->dcache_line_size);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "i-cache-line-size",
+ env->icache_line_size);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "d-cache-size", 0x8000);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "i-cache-size", 0x8000);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "bus-frequency", 0);
if (cpu->cpu_index) {
- qemu_devtree_setprop_string(fdt, cpu_name, "status", "disabled");
- qemu_devtree_setprop_string(fdt, cpu_name, "enable-method", "spin-table");
- qemu_devtree_setprop_u64(fdt, cpu_name, "cpu-release-addr",
- cpu_release_addr);
+ qemu_fdt_setprop_string(fdt, cpu_name, "status", "disabled");
+ qemu_fdt_setprop_string(fdt, cpu_name, "enable-method",
+ "spin-table");
+ qemu_fdt_setprop_u64(fdt, cpu_name, "cpu-release-addr",
+ cpu_release_addr);
} else {
- qemu_devtree_setprop_string(fdt, cpu_name, "status", "okay");
+ qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
}
}
- qemu_devtree_add_subnode(fdt, "/aliases");
+ qemu_fdt_add_subnode(fdt, "/aliases");
/* XXX These should go into their respective devices' code */
snprintf(soc, sizeof(soc), "/soc@%llx", MPC8544_CCSRBAR_BASE);
- qemu_devtree_add_subnode(fdt, soc);
- qemu_devtree_setprop_string(fdt, soc, "device_type", "soc");
- qemu_devtree_setprop(fdt, soc, "compatible", compatible_sb,
- sizeof(compatible_sb));
- qemu_devtree_setprop_cell(fdt, soc, "#address-cells", 1);
- qemu_devtree_setprop_cell(fdt, soc, "#size-cells", 1);
- qemu_devtree_setprop_cells(fdt, soc, "ranges", 0x0,
- MPC8544_CCSRBAR_BASE >> 32, MPC8544_CCSRBAR_BASE,
- MPC8544_CCSRBAR_SIZE);
+ qemu_fdt_add_subnode(fdt, soc);
+ qemu_fdt_setprop_string(fdt, soc, "device_type", "soc");
+ qemu_fdt_setprop(fdt, soc, "compatible", compatible_sb,
+ sizeof(compatible_sb));
+ qemu_fdt_setprop_cell(fdt, soc, "#address-cells", 1);
+ qemu_fdt_setprop_cell(fdt, soc, "#size-cells", 1);
+ qemu_fdt_setprop_cells(fdt, soc, "ranges", 0x0,
+ MPC8544_CCSRBAR_BASE >> 32, MPC8544_CCSRBAR_BASE,
+ MPC8544_CCSRBAR_SIZE);
/* XXX should contain a reasonable value */
- qemu_devtree_setprop_cell(fdt, soc, "bus-frequency", 0);
+ qemu_fdt_setprop_cell(fdt, soc, "bus-frequency", 0);
snprintf(mpic, sizeof(mpic), "%s/pic@%llx", soc, MPC8544_MPIC_REGS_OFFSET);
- qemu_devtree_add_subnode(fdt, mpic);
- qemu_devtree_setprop_string(fdt, mpic, "device_type", "open-pic");
- qemu_devtree_setprop_string(fdt, mpic, "compatible", "fsl,mpic");
- qemu_devtree_setprop_cells(fdt, mpic, "reg", MPC8544_MPIC_REGS_OFFSET,
- 0x40000);
- qemu_devtree_setprop_cell(fdt, mpic, "#address-cells", 0);
- qemu_devtree_setprop_cell(fdt, mpic, "#interrupt-cells", 2);
- mpic_ph = qemu_devtree_alloc_phandle(fdt);
- qemu_devtree_setprop_cell(fdt, mpic, "phandle", mpic_ph);
- qemu_devtree_setprop_cell(fdt, mpic, "linux,phandle", mpic_ph);
- qemu_devtree_setprop(fdt, mpic, "interrupt-controller", NULL, 0);
+ qemu_fdt_add_subnode(fdt, mpic);
+ qemu_fdt_setprop_string(fdt, mpic, "device_type", "open-pic");
+ qemu_fdt_setprop_string(fdt, mpic, "compatible", "fsl,mpic");
+ qemu_fdt_setprop_cells(fdt, mpic, "reg", MPC8544_MPIC_REGS_OFFSET,
+ 0x40000);
+ qemu_fdt_setprop_cell(fdt, mpic, "#address-cells", 0);
+ qemu_fdt_setprop_cell(fdt, mpic, "#interrupt-cells", 2);
+ mpic_ph = qemu_fdt_alloc_phandle(fdt);
+ qemu_fdt_setprop_cell(fdt, mpic, "phandle", mpic_ph);
+ qemu_fdt_setprop_cell(fdt, mpic, "linux,phandle", mpic_ph);
+ qemu_fdt_setprop(fdt, mpic, "interrupt-controller", NULL, 0);
/*
* We have to generate ser1 first, because Linux takes the first
@@ -311,19 +312,19 @@ static int ppce500_load_device_tree(QEMUMachineInitArgs *args,
snprintf(gutil, sizeof(gutil), "%s/global-utilities@%llx", soc,
MPC8544_UTIL_OFFSET);
- qemu_devtree_add_subnode(fdt, gutil);
- qemu_devtree_setprop_string(fdt, gutil, "compatible", "fsl,mpc8544-guts");
- qemu_devtree_setprop_cells(fdt, gutil, "reg", MPC8544_UTIL_OFFSET, 0x1000);
- qemu_devtree_setprop(fdt, gutil, "fsl,has-rstcr", NULL, 0);
+ qemu_fdt_add_subnode(fdt, gutil);
+ qemu_fdt_setprop_string(fdt, gutil, "compatible", "fsl,mpc8544-guts");
+ qemu_fdt_setprop_cells(fdt, gutil, "reg", MPC8544_UTIL_OFFSET, 0x1000);
+ qemu_fdt_setprop(fdt, gutil, "fsl,has-rstcr", NULL, 0);
snprintf(msi, sizeof(msi), "/%s/msi@%llx", soc, MPC8544_MSI_REGS_OFFSET);
- qemu_devtree_add_subnode(fdt, msi);
- qemu_devtree_setprop_string(fdt, msi, "compatible", "fsl,mpic-msi");
- qemu_devtree_setprop_cells(fdt, msi, "reg", MPC8544_MSI_REGS_OFFSET, 0x200);
- msi_ph = qemu_devtree_alloc_phandle(fdt);
- qemu_devtree_setprop_cells(fdt, msi, "msi-available-ranges", 0x0, 0x100);
- qemu_devtree_setprop_phandle(fdt, msi, "interrupt-parent", mpic);
- qemu_devtree_setprop_cells(fdt, msi, "interrupts",
+ qemu_fdt_add_subnode(fdt, msi);
+ qemu_fdt_setprop_string(fdt, msi, "compatible", "fsl,mpic-msi");
+ qemu_fdt_setprop_cells(fdt, msi, "reg", MPC8544_MSI_REGS_OFFSET, 0x200);
+ msi_ph = qemu_fdt_alloc_phandle(fdt);
+ qemu_fdt_setprop_cells(fdt, msi, "msi-available-ranges", 0x0, 0x100);
+ qemu_fdt_setprop_phandle(fdt, msi, "interrupt-parent", mpic);
+ qemu_fdt_setprop_cells(fdt, msi, "interrupts",
0xe0, 0x0,
0xe1, 0x0,
0xe2, 0x0,
@@ -332,46 +333,46 @@ static int ppce500_load_device_tree(QEMUMachineInitArgs *args,
0xe5, 0x0,
0xe6, 0x0,
0xe7, 0x0);
- qemu_devtree_setprop_cell(fdt, msi, "phandle", msi_ph);
- qemu_devtree_setprop_cell(fdt, msi, "linux,phandle", msi_ph);
+ qemu_fdt_setprop_cell(fdt, msi, "phandle", msi_ph);
+ qemu_fdt_setprop_cell(fdt, msi, "linux,phandle", msi_ph);
snprintf(pci, sizeof(pci), "/pci@%llx", MPC8544_PCI_REGS_BASE);
- qemu_devtree_add_subnode(fdt, pci);
- qemu_devtree_setprop_cell(fdt, pci, "cell-index", 0);
- qemu_devtree_setprop_string(fdt, pci, "compatible", "fsl,mpc8540-pci");
- qemu_devtree_setprop_string(fdt, pci, "device_type", "pci");
- qemu_devtree_setprop_cells(fdt, pci, "interrupt-map-mask", 0xf800, 0x0,
- 0x0, 0x7);
- pci_map = pci_map_create(fdt, qemu_devtree_get_phandle(fdt, mpic),
+ qemu_fdt_add_subnode(fdt, pci);
+ qemu_fdt_setprop_cell(fdt, pci, "cell-index", 0);
+ qemu_fdt_setprop_string(fdt, pci, "compatible", "fsl,mpc8540-pci");
+ qemu_fdt_setprop_string(fdt, pci, "device_type", "pci");
+ qemu_fdt_setprop_cells(fdt, pci, "interrupt-map-mask", 0xf800, 0x0,
+ 0x0, 0x7);
+ pci_map = pci_map_create(fdt, qemu_fdt_get_phandle(fdt, mpic),
params->pci_first_slot, params->pci_nr_slots,
&len);
- qemu_devtree_setprop(fdt, pci, "interrupt-map", pci_map, len);
- qemu_devtree_setprop_phandle(fdt, pci, "interrupt-parent", mpic);
- qemu_devtree_setprop_cells(fdt, pci, "interrupts", 24, 2);
- qemu_devtree_setprop_cells(fdt, pci, "bus-range", 0, 255);
+ qemu_fdt_setprop(fdt, pci, "interrupt-map", pci_map, len);
+ qemu_fdt_setprop_phandle(fdt, pci, "interrupt-parent", mpic);
+ qemu_fdt_setprop_cells(fdt, pci, "interrupts", 24, 2);
+ qemu_fdt_setprop_cells(fdt, pci, "bus-range", 0, 255);
for (i = 0; i < 14; i++) {
pci_ranges[i] = cpu_to_be32(pci_ranges[i]);
}
- qemu_devtree_setprop_cell(fdt, pci, "fsl,msi", msi_ph);
- qemu_devtree_setprop(fdt, pci, "ranges", pci_ranges, sizeof(pci_ranges));
- qemu_devtree_setprop_cells(fdt, pci, "reg", MPC8544_PCI_REGS_BASE >> 32,
- MPC8544_PCI_REGS_BASE, 0, 0x1000);
- qemu_devtree_setprop_cell(fdt, pci, "clock-frequency", 66666666);
- qemu_devtree_setprop_cell(fdt, pci, "#interrupt-cells", 1);
- qemu_devtree_setprop_cell(fdt, pci, "#size-cells", 2);
- qemu_devtree_setprop_cell(fdt, pci, "#address-cells", 3);
- qemu_devtree_setprop_string(fdt, "/aliases", "pci0", pci);
+ qemu_fdt_setprop_cell(fdt, pci, "fsl,msi", msi_ph);
+ qemu_fdt_setprop(fdt, pci, "ranges", pci_ranges, sizeof(pci_ranges));
+ qemu_fdt_setprop_cells(fdt, pci, "reg", MPC8544_PCI_REGS_BASE >> 32,
+ MPC8544_PCI_REGS_BASE, 0, 0x1000);
+ qemu_fdt_setprop_cell(fdt, pci, "clock-frequency", 66666666);
+ qemu_fdt_setprop_cell(fdt, pci, "#interrupt-cells", 1);
+ qemu_fdt_setprop_cell(fdt, pci, "#size-cells", 2);
+ qemu_fdt_setprop_cell(fdt, pci, "#address-cells", 3);
+ qemu_fdt_setprop_string(fdt, "/aliases", "pci0", pci);
params->fixup_devtree(params, fdt);
if (toplevel_compat) {
- qemu_devtree_setprop(fdt, "/", "compatible", toplevel_compat,
- strlen(toplevel_compat) + 1);
+ qemu_fdt_setprop(fdt, "/", "compatible", toplevel_compat,
+ strlen(toplevel_compat) + 1);
}
done:
if (!dry_run) {
- qemu_devtree_dumpdtb(fdt, fdt_size);
+ qemu_fdt_dumpdtb(fdt, fdt_size);
cpu_physical_memory_write(addr, fdt, fdt_size);
}
ret = fdt_size;
diff --git a/hw/ppc/e500plat.c b/hw/ppc/e500plat.c
index 2e964b2474..7d5357e83b 100644
--- a/hw/ppc/e500plat.c
+++ b/hw/ppc/e500plat.c
@@ -23,9 +23,9 @@ static void e500plat_fixup_devtree(PPCE500Params *params, void *fdt)
const char model[] = "QEMU ppce500";
const char compatible[] = "fsl,qemu-e500";
- qemu_devtree_setprop(fdt, "/", "model", model, sizeof(model));
- qemu_devtree_setprop(fdt, "/", "compatible", compatible,
- sizeof(compatible));
+ qemu_fdt_setprop(fdt, "/", "model", model, sizeof(model));
+ qemu_fdt_setprop(fdt, "/", "compatible", compatible,
+ sizeof(compatible));
}
static void e500plat_init(QEMUMachineInitArgs *args)
diff --git a/hw/ppc/mpc8544ds.c b/hw/ppc/mpc8544ds.c
index edcc0be5f7..292c70953b 100644
--- a/hw/ppc/mpc8544ds.c
+++ b/hw/ppc/mpc8544ds.c
@@ -21,9 +21,9 @@ static void mpc8544ds_fixup_devtree(PPCE500Params *params, void *fdt)
const char model[] = "MPC8544DS";
const char compatible[] = "MPC8544DS\0MPC85xxDS";
- qemu_devtree_setprop(fdt, "/", "model", model, sizeof(model));
- qemu_devtree_setprop(fdt, "/", "compatible", compatible,
- sizeof(compatible));
+ qemu_fdt_setprop(fdt, "/", "model", model, sizeof(model));
+ qemu_fdt_setprop(fdt, "/", "compatible", compatible,
+ sizeof(compatible));
}
static void mpc8544ds_init(QEMUMachineInitArgs *args)
diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c
index 67597dfb88..ec15bab0b5 100644
--- a/hw/ppc/ppc440_bamboo.c
+++ b/hw/ppc/ppc440_bamboo.c
@@ -77,23 +77,23 @@ static int bamboo_load_device_tree(hwaddr addr,
/* Manipulate device tree in memory. */
- ret = qemu_devtree_setprop(fdt, "/memory", "reg", mem_reg_property,
- sizeof(mem_reg_property));
+ ret = qemu_fdt_setprop(fdt, "/memory", "reg", mem_reg_property,
+ sizeof(mem_reg_property));
if (ret < 0)
fprintf(stderr, "couldn't set /memory/reg\n");
- ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start",
- initrd_base);
+ ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start",
+ initrd_base);
if (ret < 0)
fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n");
- ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-end",
- (initrd_base + initrd_size));
+ ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end",
+ (initrd_base + initrd_size));
if (ret < 0)
fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n");
- ret = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs",
- kernel_cmdline);
+ ret = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
+ kernel_cmdline);
if (ret < 0)
fprintf(stderr, "couldn't set /chosen/bootargs\n");
@@ -105,10 +105,10 @@ static int bamboo_load_device_tree(hwaddr addr,
clock_freq = kvmppc_get_clockfreq();
}
- qemu_devtree_setprop_cell(fdt, "/cpus/cpu@0", "clock-frequency",
- clock_freq);
- qemu_devtree_setprop_cell(fdt, "/cpus/cpu@0", "timebase-frequency",
- tb_freq);
+ qemu_fdt_setprop_cell(fdt, "/cpus/cpu@0", "clock-frequency",
+ clock_freq);
+ qemu_fdt_setprop_cell(fdt, "/cpus/cpu@0", "timebase-frequency",
+ tb_freq);
rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr);
g_free(fdt);
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 7e53a5f977..93d02c1e50 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -526,14 +526,15 @@ static int spapr_populate_memory(sPAPREnvironment *spapr, void *fdt)
cpu_to_be32(0x0), cpu_to_be32(0x0),
cpu_to_be32(0x0)};
char mem_name[32];
- hwaddr node0_size, mem_start;
+ hwaddr node0_size, mem_start, node_size;
uint64_t mem_reg_property[2];
int i, off;
/* memory node(s) */
- node0_size = (nb_numa_nodes > 1) ? node_mem[0] : ram_size;
- if (spapr->rma_size > node0_size) {
- spapr->rma_size = node0_size;
+ if (nb_numa_nodes > 1 && node_mem[0] < ram_size) {
+ node0_size = node_mem[0];
+ } else {
+ node0_size = ram_size;
}
/* RMA */
@@ -566,7 +567,15 @@ static int spapr_populate_memory(sPAPREnvironment *spapr, void *fdt)
mem_start = node0_size;
for (i = 1; i < nb_numa_nodes; i++) {
mem_reg_property[0] = cpu_to_be64(mem_start);
- mem_reg_property[1] = cpu_to_be64(node_mem[i]);
+ if (mem_start >= ram_size) {
+ node_size = 0;
+ } else {
+ node_size = node_mem[i];
+ if (node_size > ram_size - mem_start) {
+ node_size = ram_size - mem_start;
+ }
+ }
+ mem_reg_property[1] = cpu_to_be64(node_size);
associativity[3] = associativity[4] = cpu_to_be32(i);
sprintf(mem_name, "memory@" TARGET_FMT_lx, mem_start);
off = fdt_add_subnode(fdt, 0, mem_name);
@@ -576,7 +585,7 @@ static int spapr_populate_memory(sPAPREnvironment *spapr, void *fdt)
sizeof(mem_reg_property))));
_FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
sizeof(associativity))));
- mem_start += node_mem[i];
+ mem_start += node_size;
}
return 0;
@@ -688,7 +697,8 @@ static void spapr_reset_htab(sPAPREnvironment *spapr)
/* Update the RMA size if necessary */
if (spapr->vrma_adjust) {
- spapr->rma_size = kvmppc_rma_size(ram_size, spapr->htab_shift);
+ hwaddr node0_size = (nb_numa_nodes > 1) ? node_mem[0] : ram_size;
+ spapr->rma_size = kvmppc_rma_size(node0_size, spapr->htab_shift);
}
}
@@ -739,18 +749,10 @@ static void spapr_cpu_reset(void *opaque)
static void spapr_create_nvram(sPAPREnvironment *spapr)
{
DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram");
- const char *drivename = qemu_opt_get(qemu_get_machine_opts(), "nvram");
-
- if (drivename) {
- BlockDriverState *bs;
+ DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
- bs = bdrv_find(drivename);
- if (!bs) {
- fprintf(stderr, "No such block device \"%s\" for nvram\n",
- drivename);
- exit(1);
- }
- qdev_prop_set_drive_nofail(dev, "drive", bs);
+ if (dinfo) {
+ qdev_prop_set_drive_nofail(dev, "drive", dinfo->bdrv);
}
qdev_init_nofail(dev);
@@ -1113,6 +1115,7 @@ static void ppc_spapr_init(QEMUMachineInitArgs *args)
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *ram = g_new(MemoryRegion, 1);
hwaddr rma_alloc_size;
+ hwaddr node0_size = (nb_numa_nodes > 1) ? node_mem[0] : ram_size;
uint32_t initrd_base = 0;
long kernel_size = 0, initrd_size = 0;
long load_limit, rtas_limit, fw_size;
@@ -1134,10 +1137,10 @@ static void ppc_spapr_init(QEMUMachineInitArgs *args)
exit(1);
}
- if (rma_alloc_size && (rma_alloc_size < ram_size)) {
+ if (rma_alloc_size && (rma_alloc_size < node0_size)) {
spapr->rma_size = rma_alloc_size;
} else {
- spapr->rma_size = ram_size;
+ spapr->rma_size = node0_size;
/* With KVM, we don't actually know whether KVM supports an
* unbounded RMA (PR KVM) or is limited by the hash table size
@@ -1154,6 +1157,12 @@ static void ppc_spapr_init(QEMUMachineInitArgs *args)
}
}
+ if (spapr->rma_size > node0_size) {
+ fprintf(stderr, "Error: Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")\n",
+ spapr->rma_size);
+ exit(1);
+ }
+
/* We place the device tree and RTAS just below either the top of the RMA,
* or just below 2GB, whichever is lowere, so that it can be
* processed with 32-bit real mode code if necessary */
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index a69390e54e..16fa49e886 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -286,7 +286,7 @@ static void check_exception(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint64_t xinfo;
if ((nargs < 6) || (nargs > 7) || nret != 1) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
@@ -306,9 +306,9 @@ static void check_exception(PowerPCCPU *cpu, sPAPREnvironment *spapr,
cpu_physical_memory_write(buf, pending_epow, len);
g_free(pending_epow);
pending_epow = NULL;
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
} else {
- rtas_st(rets, 0, 1);
+ rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
}
}
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index 2beedd45e9..ec00300884 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -90,7 +90,7 @@ static void finish_read_pci_config(sPAPREnvironment *spapr, uint64_t buid,
if ((size != 1) && (size != 2) && (size != 4)) {
/* access must be 1, 2 or 4 bytes */
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -100,14 +100,14 @@ static void finish_read_pci_config(sPAPREnvironment *spapr, uint64_t buid,
if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
/* Access must be to a valid device, within bounds and
* naturally aligned */
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
val = pci_host_config_read_common(pci_dev, addr,
pci_config_size(pci_dev), size);
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
rtas_st(rets, 1, val);
}
@@ -120,7 +120,7 @@ static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t size, addr;
if ((nargs != 4) || (nret != 2)) {
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -139,7 +139,7 @@ static void rtas_read_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t size, addr;
if ((nargs != 2) || (nret != 2)) {
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -157,7 +157,7 @@ static void finish_write_pci_config(sPAPREnvironment *spapr, uint64_t buid,
if ((size != 1) && (size != 2) && (size != 4)) {
/* access must be 1, 2 or 4 bytes */
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -167,14 +167,14 @@ static void finish_write_pci_config(sPAPREnvironment *spapr, uint64_t buid,
if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
/* Access must be to a valid device, within bounds and
* naturally aligned */
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev),
val, size);
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
@@ -186,7 +186,7 @@ static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t val, size, addr;
if ((nargs != 5) || (nret != 1)) {
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -206,7 +206,7 @@ static void rtas_write_pci_config(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t val, size, addr;
if ((nargs != 3) || (nret != 1)) {
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -293,7 +293,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
break;
default:
fprintf(stderr, "rtas_ibm_change_msi(%u) is not implemented\n", func);
- rtas_st(rets, 0, -3); /* Parameter error */
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
@@ -303,7 +303,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
pdev = find_dev(spapr, buid, config_addr);
}
if (!phb || !pdev) {
- rtas_st(rets, 0, -3); /* Parameter error */
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
@@ -312,11 +312,11 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
ndev = spapr_msicfg_find(phb, config_addr, false);
if (ndev < 0) {
trace_spapr_pci_msi("MSI has not been enabled", -1, config_addr);
- rtas_st(rets, 0, -1); /* Hardware error */
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
trace_spapr_pci_msi("Released MSIs", ndev, config_addr);
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
rtas_st(rets, 1, 0);
return;
}
@@ -327,7 +327,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
ndev = spapr_msicfg_find(phb, config_addr, true);
if (ndev >= SPAPR_MSIX_MAX_DEVS || ndev < 0) {
fprintf(stderr, "No free entry for a new MSI device\n");
- rtas_st(rets, 0, -1); /* Hardware error */
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
trace_spapr_pci_msi("Configuring MSI", ndev, config_addr);
@@ -336,7 +336,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
if (phb->msi_table[ndev].nvec && (req_num != phb->msi_table[ndev].nvec)) {
/* Unexpected behaviour */
fprintf(stderr, "Cannot reuse MSI config for device#%d", ndev);
- rtas_st(rets, 0, -1); /* Hardware error */
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -346,7 +346,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
ret_intr_type == RTAS_TYPE_MSI);
if (irq < 0) {
fprintf(stderr, "Cannot allocate MSIs for device#%d", ndev);
- rtas_st(rets, 0, -1); /* Hardware error */
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
phb->msi_table[ndev].irq = irq;
@@ -358,7 +358,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
spapr_msi_setmsg(pdev, spapr->msi_win_addr, ret_intr_type == RTAS_TYPE_MSIX,
phb->msi_table[ndev].irq, req_num);
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
rtas_st(rets, 1, req_num);
rtas_st(rets, 2, ++seq_num);
rtas_st(rets, 3, ret_intr_type);
@@ -383,7 +383,7 @@ static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
/* Fins sPAPRPHBState */
phb = find_phb(spapr, buid);
if (!phb) {
- rtas_st(rets, 0, -3); /* Parameter error */
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
@@ -391,7 +391,7 @@ static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
ndev = spapr_msicfg_find(phb, config_addr, false);
if (ndev < 0) {
trace_spapr_pci_msi("MSI has not been enabled", -1, config_addr);
- rtas_st(rets, 0, -1); /* Hardware error */
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -399,7 +399,7 @@ static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
intr_src_num);
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
rtas_st(rets, 1, intr_src_num);
rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
}
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index eb542f218a..1cb276de05 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -47,10 +47,10 @@ static void rtas_display_character(PowerPCCPU *cpu, sPAPREnvironment *spapr,
VIOsPAPRDevice *sdev = vty_lookup(spapr, 0);
if (!sdev) {
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
} else {
vty_putchars(sdev, &c, sizeof(c));
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
}
@@ -62,13 +62,13 @@ static void rtas_get_time_of_day(PowerPCCPU *cpu, sPAPREnvironment *spapr,
struct tm tm;
if (nret != 8) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
qemu_get_timedate(&tm, spapr->rtc_offset);
- rtas_st(rets, 0, 0); /* Success */
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
rtas_st(rets, 1, tm.tm_year + 1900);
rtas_st(rets, 2, tm.tm_mon + 1);
rtas_st(rets, 3, tm.tm_mday);
@@ -96,7 +96,7 @@ static void rtas_set_time_of_day(PowerPCCPU *cpu, sPAPREnvironment *spapr,
rtc_change_mon_event(&tm);
spapr->rtc_offset = qemu_timedate_diff(&tm);
- rtas_st(rets, 0, 0); /* Success */
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
static void rtas_power_off(PowerPCCPU *cpu, sPAPREnvironment *spapr,
@@ -104,11 +104,11 @@ static void rtas_power_off(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t nret, target_ulong rets)
{
if (nargs != 2 || nret != 1) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
qemu_system_shutdown_request();
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
static void rtas_system_reboot(PowerPCCPU *cpu, sPAPREnvironment *spapr,
@@ -117,11 +117,11 @@ static void rtas_system_reboot(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t nret, target_ulong rets)
{
if (nargs != 0 || nret != 1) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
qemu_system_reset_request();
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_,
@@ -134,7 +134,7 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_,
CPUState *cpu;
if (nargs != 1 || nret != 2) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
@@ -147,12 +147,12 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_,
rtas_st(rets, 1, 2);
}
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
return;
}
/* Didn't find a matching cpu */
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
}
static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr,
@@ -164,7 +164,7 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr,
CPUState *cs;
if (nargs != 3 || nret != 1) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
@@ -178,7 +178,7 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr,
CPUPPCState *env = &cpu->env;
if (!cs->halted) {
- rtas_st(rets, 0, -1);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -194,12 +194,12 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPREnvironment *spapr,
qemu_cpu_kick(cs);
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
return;
}
/* Didn't find a matching cpu */
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
}
static void rtas_stop_self(PowerPCCPU *cpu, sPAPREnvironment *spapr,
@@ -224,6 +224,49 @@ static void rtas_stop_self(PowerPCCPU *cpu, sPAPREnvironment *spapr,
env->msr = 0;
}
+#define DIAGNOSTICS_RUN_MODE 42
+
+static void rtas_ibm_get_system_parameter(PowerPCCPU *cpu,
+ sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ target_ulong parameter = rtas_ld(args, 0);
+ target_ulong buffer = rtas_ld(args, 1);
+ target_ulong length = rtas_ld(args, 2);
+ target_ulong ret = RTAS_OUT_NOT_SUPPORTED;
+
+ switch (parameter) {
+ case DIAGNOSTICS_RUN_MODE:
+ if (length == 1) {
+ rtas_st(buffer, 0, 0);
+ ret = RTAS_OUT_SUCCESS;
+ }
+ break;
+ }
+
+ rtas_st(rets, 0, ret);
+}
+
+static void rtas_ibm_set_system_parameter(PowerPCCPU *cpu,
+ sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ target_ulong parameter = rtas_ld(args, 0);
+ target_ulong ret = RTAS_OUT_NOT_SUPPORTED;
+
+ switch (parameter) {
+ case DIAGNOSTICS_RUN_MODE:
+ ret = RTAS_OUT_NOT_AUTHORIZED;
+ break;
+ }
+
+ rtas_st(rets, 0, ret);
+}
+
static struct rtas_call {
const char *name;
spapr_rtas_fn fn;
@@ -255,7 +298,7 @@ target_ulong spapr_rtas_call(PowerPCCPU *cpu, sPAPREnvironment *spapr,
}
hcall_dprintf("Unknown RTAS token 0x%x\n", token);
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return H_PARAMETER;
}
@@ -291,24 +334,24 @@ int spapr_rtas_device_tree_setup(void *fdt, hwaddr rtas_addr,
return ret;
}
- ret = qemu_devtree_setprop_cell(fdt, "/rtas", "linux,rtas-base",
- rtas_addr);
+ ret = qemu_fdt_setprop_cell(fdt, "/rtas", "linux,rtas-base",
+ rtas_addr);
if (ret < 0) {
fprintf(stderr, "Couldn't add linux,rtas-base property: %s\n",
fdt_strerror(ret));
return ret;
}
- ret = qemu_devtree_setprop_cell(fdt, "/rtas", "linux,rtas-entry",
- rtas_addr);
+ ret = qemu_fdt_setprop_cell(fdt, "/rtas", "linux,rtas-entry",
+ rtas_addr);
if (ret < 0) {
fprintf(stderr, "Couldn't add linux,rtas-entry property: %s\n",
fdt_strerror(ret));
return ret;
}
- ret = qemu_devtree_setprop_cell(fdt, "/rtas", "rtas-size",
- rtas_size);
+ ret = qemu_fdt_setprop_cell(fdt, "/rtas", "rtas-size",
+ rtas_size);
if (ret < 0) {
fprintf(stderr, "Couldn't add rtas-size property: %s\n",
fdt_strerror(ret));
@@ -322,8 +365,8 @@ int spapr_rtas_device_tree_setup(void *fdt, hwaddr rtas_addr,
continue;
}
- ret = qemu_devtree_setprop_cell(fdt, "/rtas", call->name,
- i + TOKEN_BASE);
+ ret = qemu_fdt_setprop_cell(fdt, "/rtas", call->name,
+ i + TOKEN_BASE);
if (ret < 0) {
fprintf(stderr, "Couldn't add rtas token for %s: %s\n",
call->name, fdt_strerror(ret));
@@ -345,6 +388,10 @@ static void core_rtas_register_types(void)
rtas_query_cpu_stopped_state);
spapr_rtas_register("start-cpu", rtas_start_cpu);
spapr_rtas_register("stop-self", rtas_stop_self);
+ spapr_rtas_register("ibm,get-system-parameter",
+ rtas_ibm_get_system_parameter);
+ spapr_rtas_register("ibm,set-system-parameter",
+ rtas_ibm_set_system_parameter);
}
type_init(core_rtas_register_types)
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index a6a0a5113c..fee6195f95 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -331,25 +331,25 @@ static void rtas_set_tce_bypass(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t unit, enable;
if (nargs != 2) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
unit = rtas_ld(args, 0);
enable = rtas_ld(args, 1);
dev = spapr_vio_find_by_reg(bus, unit);
if (!dev) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
if (!dev->tcet) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
spapr_tce_set_bypass(dev->tcet, !!enable);
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
static void rtas_quiesce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
@@ -362,7 +362,7 @@ static void rtas_quiesce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
VIOsPAPRDevice *dev = NULL;
if (nargs != 0) {
- rtas_st(rets, 0, -3);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
@@ -371,7 +371,7 @@ static void rtas_quiesce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
spapr_vio_quiesce_one(dev);
}
- rtas_st(rets, 0, 0);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
static VIOsPAPRDevice *reg_conflict(VIOsPAPRDevice *dev)
diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c
index fcfa678344..bdb057e36c 100644
--- a/hw/ppc/virtex_ml507.c
+++ b/hw/ppc/virtex_ml507.c
@@ -166,7 +166,7 @@ static int xilinx_load_device_tree(hwaddr addr,
if (!fdt) {
return 0;
}
- r = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline);
+ r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline);
if (r < 0)
fprintf(stderr, "couldn't set /chosen/bootargs\n");
cpu_physical_memory_write(addr, fdt, fdt_size);
diff --git a/hw/timer/Makefile.objs b/hw/timer/Makefile.objs
index 3ae091c95e..2c86c3d412 100644
--- a/hw/timer/Makefile.objs
+++ b/hw/timer/Makefile.objs
@@ -26,5 +26,8 @@ obj-$(CONFIG_OMAP) += omap_synctimer.o
obj-$(CONFIG_PXA2XX) += pxa2xx_timer.o
obj-$(CONFIG_SH4) += sh_timer.o
obj-$(CONFIG_TUSB6010) += tusb6010.o
+obj-$(CONFIG_DIGIC) += digic-timer.o
obj-$(CONFIG_MC146818RTC) += mc146818rtc.o
+
+obj-$(CONFIG_ALLWINNER_A10_PIT) += allwinner-a10-pit.o
diff --git a/hw/timer/allwinner-a10-pit.c b/hw/timer/allwinner-a10-pit.c
new file mode 100644
index 0000000000..b27fce8cd2
--- /dev/null
+++ b/hw/timer/allwinner-a10-pit.c
@@ -0,0 +1,254 @@
+/*
+ * Allwinner A10 timer device emulation
+ *
+ * Copyright (C) 2013 Li Guang
+ * Written by Li Guang <lig.fnst@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "hw/sysbus.h"
+#include "sysemu/sysemu.h"
+#include "hw/timer/allwinner-a10-pit.h"
+
+static uint64_t a10_pit_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AwA10PITState *s = AW_A10_PIT(opaque);
+ uint8_t index;
+
+ switch (offset) {
+ case AW_A10_PIT_TIMER_IRQ_EN:
+ return s->irq_enable;
+ case AW_A10_PIT_TIMER_IRQ_ST:
+ return s->irq_status;
+ case AW_A10_PIT_TIMER_BASE ... AW_A10_PIT_TIMER_BASE_END:
+ index = offset & 0xf0;
+ index >>= 4;
+ index -= 1;
+ switch (offset & 0x0f) {
+ case AW_A10_PIT_TIMER_CONTROL:
+ return s->control[index];
+ case AW_A10_PIT_TIMER_INTERVAL:
+ return s->interval[index];
+ case AW_A10_PIT_TIMER_COUNT:
+ s->count[index] = ptimer_get_count(s->timer[index]);
+ return s->count[index];
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%x\n", __func__, (int)offset);
+ break;
+ }
+ case AW_A10_PIT_WDOG_CONTROL:
+ break;
+ case AW_A10_PIT_WDOG_MODE:
+ break;
+ case AW_A10_PIT_COUNT_LO:
+ return s->count_lo;
+ case AW_A10_PIT_COUNT_HI:
+ return s->count_hi;
+ case AW_A10_PIT_COUNT_CTL:
+ return s->count_ctl;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%x\n", __func__, (int)offset);
+ break;
+ }
+
+ return 0;
+}
+
+static void a10_pit_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ AwA10PITState *s = AW_A10_PIT(opaque);
+ uint8_t index;
+
+ switch (offset) {
+ case AW_A10_PIT_TIMER_IRQ_EN:
+ s->irq_enable = value;
+ break;
+ case AW_A10_PIT_TIMER_IRQ_ST:
+ s->irq_status &= ~value;
+ break;
+ case AW_A10_PIT_TIMER_BASE ... AW_A10_PIT_TIMER_BASE_END:
+ index = offset & 0xf0;
+ index >>= 4;
+ index -= 1;
+ switch (offset & 0x0f) {
+ case AW_A10_PIT_TIMER_CONTROL:
+ s->control[index] = value;
+ if (s->control[index] & AW_A10_PIT_TIMER_RELOAD) {
+ ptimer_set_count(s->timer[index], s->interval[index]);
+ }
+ if (s->control[index] & AW_A10_PIT_TIMER_EN) {
+ int oneshot = 0;
+ if (s->control[index] & AW_A10_PIT_TIMER_MODE) {
+ oneshot = 1;
+ }
+ ptimer_run(s->timer[index], oneshot);
+ } else {
+ ptimer_stop(s->timer[index]);
+ }
+ break;
+ case AW_A10_PIT_TIMER_INTERVAL:
+ s->interval[index] = value;
+ ptimer_set_limit(s->timer[index], s->interval[index], 1);
+ break;
+ case AW_A10_PIT_TIMER_COUNT:
+ s->count[index] = value;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%x\n", __func__, (int)offset);
+ }
+ break;
+ case AW_A10_PIT_WDOG_CONTROL:
+ s->watch_dog_control = value;
+ break;
+ case AW_A10_PIT_WDOG_MODE:
+ s->watch_dog_mode = value;
+ break;
+ case AW_A10_PIT_COUNT_LO:
+ s->count_lo = value;
+ break;
+ case AW_A10_PIT_COUNT_HI:
+ s->count_hi = value;
+ break;
+ case AW_A10_PIT_COUNT_CTL:
+ s->count_ctl = value;
+ if (s->count_ctl & AW_A10_PIT_COUNT_RL_EN) {
+ uint64_t tmp_count = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+
+ s->count_lo = tmp_count;
+ s->count_hi = tmp_count >> 32;
+ s->count_ctl &= ~AW_A10_PIT_COUNT_RL_EN;
+ }
+ if (s->count_ctl & AW_A10_PIT_COUNT_CLR_EN) {
+ s->count_lo = 0;
+ s->count_hi = 0;
+ s->count_ctl &= ~AW_A10_PIT_COUNT_CLR_EN;
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Bad offset 0x%x\n", __func__, (int)offset);
+ break;
+ }
+}
+
+static const MemoryRegionOps a10_pit_ops = {
+ .read = a10_pit_read,
+ .write = a10_pit_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const VMStateDescription vmstate_a10_pit = {
+ .name = "a10.pit",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(irq_enable, AwA10PITState),
+ VMSTATE_UINT32(irq_status, AwA10PITState),
+ VMSTATE_UINT32_ARRAY(control, AwA10PITState, AW_A10_PIT_TIMER_NR),
+ VMSTATE_UINT32_ARRAY(interval, AwA10PITState, AW_A10_PIT_TIMER_NR),
+ VMSTATE_UINT32_ARRAY(count, AwA10PITState, AW_A10_PIT_TIMER_NR),
+ VMSTATE_UINT32(watch_dog_mode, AwA10PITState),
+ VMSTATE_UINT32(watch_dog_control, AwA10PITState),
+ VMSTATE_UINT32(count_lo, AwA10PITState),
+ VMSTATE_UINT32(count_hi, AwA10PITState),
+ VMSTATE_UINT32(count_ctl, AwA10PITState),
+ VMSTATE_PTIMER_ARRAY(timer, AwA10PITState, AW_A10_PIT_TIMER_NR),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void a10_pit_reset(DeviceState *dev)
+{
+ AwA10PITState *s = AW_A10_PIT(dev);
+ uint8_t i;
+
+ s->irq_enable = 0;
+ s->irq_status = 0;
+ for (i = 0; i < 6; i++) {
+ s->control[i] = AW_A10_PIT_DEFAULT_CLOCK;
+ s->interval[i] = 0;
+ s->count[i] = 0;
+ ptimer_stop(s->timer[i]);
+ }
+ s->watch_dog_mode = 0;
+ s->watch_dog_control = 0;
+ s->count_lo = 0;
+ s->count_hi = 0;
+ s->count_ctl = 0;
+}
+
+static void a10_pit_timer_cb(void *opaque)
+{
+ AwA10PITState *s = AW_A10_PIT(opaque);
+ uint8_t i;
+
+ for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) {
+ if (s->control[i] & AW_A10_PIT_TIMER_EN) {
+ s->irq_status |= 1 << i;
+ if (s->control[i] & AW_A10_PIT_TIMER_MODE) {
+ ptimer_stop(s->timer[i]);
+ s->control[i] &= ~AW_A10_PIT_TIMER_EN;
+ }
+ qemu_irq_pulse(s->irq[i]);
+ }
+ }
+}
+
+static void a10_pit_init(Object *obj)
+{
+ AwA10PITState *s = AW_A10_PIT(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ QEMUBH * bh[AW_A10_PIT_TIMER_NR];
+ uint8_t i;
+
+ for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) {
+ sysbus_init_irq(sbd, &s->irq[i]);
+ }
+ memory_region_init_io(&s->iomem, OBJECT(s), &a10_pit_ops, s,
+ TYPE_AW_A10_PIT, 0x400);
+ sysbus_init_mmio(sbd, &s->iomem);
+
+ for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) {
+ bh[i] = qemu_bh_new(a10_pit_timer_cb, s);
+ s->timer[i] = ptimer_init(bh[i]);
+ ptimer_set_freq(s->timer[i], 240000);
+ }
+}
+
+static void a10_pit_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = a10_pit_reset;
+ dc->desc = "allwinner a10 timer";
+ dc->vmsd = &vmstate_a10_pit;
+}
+
+static const TypeInfo a10_pit_info = {
+ .name = TYPE_AW_A10_PIT,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AwA10PITState),
+ .instance_init = a10_pit_init,
+ .class_init = a10_pit_class_init,
+};
+
+static void a10_register_types(void)
+{
+ type_register_static(&a10_pit_info);
+}
+
+type_init(a10_register_types);
diff --git a/hw/timer/digic-timer.c b/hw/timer/digic-timer.c
new file mode 100644
index 0000000000..1fde22c67f
--- /dev/null
+++ b/hw/timer/digic-timer.c
@@ -0,0 +1,163 @@
+/*
+ * QEMU model of the Canon DIGIC timer block.
+ *
+ * Copyright (C) 2013 Antony Pavlov <antonynpavlov@gmail.com>
+ *
+ * This model is based on reverse engineering efforts
+ * made by CHDK (http://chdk.wikia.com) and
+ * Magic Lantern (http://www.magiclantern.fm) projects
+ * contributors.
+ *
+ * See "Timer/Clock Module" docs here:
+ * http://magiclantern.wikia.com/wiki/Register_Map
+ *
+ * The QEMU model of the OSTimer in PKUnity SoC by Guan Xuetao
+ * is used as a template.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "hw/sysbus.h"
+#include "hw/ptimer.h"
+#include "qemu/main-loop.h"
+
+#include "hw/timer/digic-timer.h"
+
+static const VMStateDescription vmstate_digic_timer = {
+ .name = "digic.timer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_PTIMER(ptimer, DigicTimerState),
+ VMSTATE_UINT32(control, DigicTimerState),
+ VMSTATE_UINT32(relvalue, DigicTimerState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void digic_timer_reset(DeviceState *dev)
+{
+ DigicTimerState *s = DIGIC_TIMER(dev);
+
+ ptimer_stop(s->ptimer);
+ s->control = 0;
+ s->relvalue = 0;
+}
+
+static uint64_t digic_timer_read(void *opaque, hwaddr offset, unsigned size)
+{
+ DigicTimerState *s = opaque;
+ uint64_t ret = 0;
+
+ switch (offset) {
+ case DIGIC_TIMER_CONTROL:
+ ret = s->control;
+ break;
+ case DIGIC_TIMER_RELVALUE:
+ ret = s->relvalue;
+ break;
+ case DIGIC_TIMER_VALUE:
+ ret = ptimer_get_count(s->ptimer) & 0xffff;
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "digic-timer: read access to unknown register 0x"
+ TARGET_FMT_plx, offset);
+ }
+
+ return ret;
+}
+
+static void digic_timer_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ DigicTimerState *s = opaque;
+
+ switch (offset) {
+ case DIGIC_TIMER_CONTROL:
+ if (value & DIGIC_TIMER_CONTROL_RST) {
+ digic_timer_reset((DeviceState *)s);
+ break;
+ }
+
+ if (value & DIGIC_TIMER_CONTROL_EN) {
+ ptimer_run(s->ptimer, 0);
+ }
+
+ s->control = (uint32_t)value;
+ break;
+
+ case DIGIC_TIMER_RELVALUE:
+ s->relvalue = extract32(value, 0, 16);
+ ptimer_set_limit(s->ptimer, s->relvalue, 1);
+ break;
+
+ case DIGIC_TIMER_VALUE:
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "digic-timer: read access to unknown register 0x"
+ TARGET_FMT_plx, offset);
+ }
+}
+
+static const MemoryRegionOps digic_timer_ops = {
+ .read = digic_timer_read,
+ .write = digic_timer_write,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void digic_timer_init(Object *obj)
+{
+ DigicTimerState *s = DIGIC_TIMER(obj);
+
+ s->ptimer = ptimer_init(NULL);
+
+ /*
+ * FIXME: there is no documentation on Digic timer
+ * frequency setup so let it always run at 1 MHz
+ */
+ ptimer_set_freq(s->ptimer, 1 * 1000 * 1000);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &digic_timer_ops, s,
+ TYPE_DIGIC_TIMER, 0x100);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem);
+}
+
+static void digic_timer_class_init(ObjectClass *klass, void *class_data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = digic_timer_reset;
+ dc->vmsd = &vmstate_digic_timer;
+}
+
+static const TypeInfo digic_timer_info = {
+ .name = TYPE_DIGIC_TIMER,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(DigicTimerState),
+ .instance_init = digic_timer_init,
+ .class_init = digic_timer_class_init,
+};
+
+static void digic_timer_register_type(void)
+{
+ type_register_static(&digic_timer_info);
+}
+
+type_init(digic_timer_register_type)
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index e4996e19c3..8f33122c9f 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -110,6 +110,7 @@ void stq_phys(hwaddr addr, uint64_t val);
void cpu_physical_memory_write_rom(hwaddr addr,
const uint8_t *buf, int len);
+void cpu_flush_icache_range(hwaddr start, int len);
extern struct MemoryRegion io_mem_rom;
extern struct MemoryRegion io_mem_notdirty;
diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h
new file mode 100644
index 0000000000..da36647f32
--- /dev/null
+++ b/include/hw/arm/allwinner-a10.h
@@ -0,0 +1,35 @@
+#ifndef ALLWINNER_H_
+
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+#include "hw/char/serial.h"
+#include "hw/arm/arm.h"
+#include "hw/timer/allwinner-a10-pit.h"
+#include "hw/intc/allwinner-a10-pic.h"
+
+#include "sysemu/sysemu.h"
+#include "exec/address-spaces.h"
+
+
+#define AW_A10_PIC_REG_BASE 0x01c20400
+#define AW_A10_PIT_REG_BASE 0x01c20c00
+#define AW_A10_UART0_REG_BASE 0x01c28000
+
+#define AW_A10_SDRAM_BASE 0x40000000
+
+#define TYPE_AW_A10 "allwinner-a10"
+#define AW_A10(obj) OBJECT_CHECK(AwA10State, (obj), TYPE_AW_A10)
+
+typedef struct AwA10State {
+ /*< private >*/
+ DeviceState parent_obj;
+ /*< public >*/
+
+ ARMCPU cpu;
+ qemu_irq irq[AW_A10_PIC_INT_NR];
+ AwA10PITState timer;
+ AwA10PICState intc;
+} AwA10State;
+
+#define ALLWINNER_H_
+#endif
diff --git a/include/hw/arm/digic.h b/include/hw/arm/digic.h
new file mode 100644
index 0000000000..a739d6ae65
--- /dev/null
+++ b/include/hw/arm/digic.h
@@ -0,0 +1,43 @@
+/*
+ * Misc Canon DIGIC declarations.
+ *
+ * Copyright (C) 2013 Antony Pavlov <antonynpavlov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef HW_ARM_DIGIC_H
+#define HW_ARM_DIGIC_H
+
+#include "cpu.h"
+
+#include "hw/timer/digic-timer.h"
+#include "hw/char/digic-uart.h"
+
+#define TYPE_DIGIC "digic"
+
+#define DIGIC(obj) OBJECT_CHECK(DigicState, (obj), TYPE_DIGIC)
+
+#define DIGIC4_NB_TIMERS 3
+
+typedef struct DigicState {
+ /*< private >*/
+ DeviceState parent_obj;
+ /*< public >*/
+
+ ARMCPU cpu;
+
+ DigicTimerState timer[DIGIC4_NB_TIMERS];
+ DigicUartState uart;
+} DigicState;
+
+#endif /* HW_ARM_DIGIC_H */
diff --git a/include/hw/char/digic-uart.h b/include/hw/char/digic-uart.h
new file mode 100644
index 0000000000..ef83a3059c
--- /dev/null
+++ b/include/hw/char/digic-uart.h
@@ -0,0 +1,47 @@
+/*
+ * Canon DIGIC UART block declarations.
+ *
+ * Copyright (C) 2013 Antony Pavlov <antonynpavlov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef HW_CHAR_DIGIC_UART_H
+#define HW_CHAR_DIGIC_UART_H
+
+#include "hw/sysbus.h"
+#include "qemu/typedefs.h"
+
+#define TYPE_DIGIC_UART "digic-uart"
+#define DIGIC_UART(obj) \
+ OBJECT_CHECK(DigicUartState, (obj), TYPE_DIGIC_UART)
+
+enum {
+ R_TX = 0x00,
+ R_RX,
+ R_ST = (0x14 >> 2),
+ R_MAX
+};
+
+typedef struct DigicUartState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion regs_region;
+ CharDriverState *chr;
+
+ uint32_t reg_rx;
+ uint32_t reg_st;
+} DigicUartState;
+
+#endif /* HW_CHAR_DIGIC_UART_H */
diff --git a/include/hw/intc/allwinner-a10-pic.h b/include/hw/intc/allwinner-a10-pic.h
new file mode 100644
index 0000000000..5721b2e6b6
--- /dev/null
+++ b/include/hw/intc/allwinner-a10-pic.h
@@ -0,0 +1,40 @@
+#ifndef AW_A10_PIC_H
+#define AW_A10_PIC_H
+
+#define TYPE_AW_A10_PIC "allwinner-a10-pic"
+#define AW_A10_PIC(obj) OBJECT_CHECK(AwA10PICState, (obj), TYPE_AW_A10_PIC)
+
+#define AW_A10_PIC_VECTOR 0
+#define AW_A10_PIC_BASE_ADDR 4
+#define AW_A10_PIC_PROTECT 8
+#define AW_A10_PIC_NMI 0xc
+#define AW_A10_PIC_IRQ_PENDING 0x10
+#define AW_A10_PIC_FIQ_PENDING 0x20
+#define AW_A10_PIC_SELECT 0x30
+#define AW_A10_PIC_ENABLE 0x40
+#define AW_A10_PIC_MASK 0x50
+
+#define AW_A10_PIC_INT_NR 95
+#define AW_A10_PIC_REG_NUM DIV_ROUND_UP(AW_A10_PIC_INT_NR, 32)
+
+typedef struct AwA10PICState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+ MemoryRegion iomem;
+ qemu_irq parent_fiq;
+ qemu_irq parent_irq;
+
+ uint32_t vector;
+ uint32_t base_addr;
+ uint32_t protect;
+ uint32_t nmi;
+ uint32_t irq_pending[AW_A10_PIC_REG_NUM];
+ uint32_t fiq_pending[AW_A10_PIC_REG_NUM];
+ uint32_t select[AW_A10_PIC_REG_NUM];
+ uint32_t enable[AW_A10_PIC_REG_NUM];
+ uint32_t mask[AW_A10_PIC_REG_NUM];
+ /*priority setting here*/
+} AwA10PICState;
+
+#endif
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index fdaab2de52..b2f11e9a2c 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -332,6 +332,15 @@ static inline int spapr_allocate_lsi(int hint)
return spapr_allocate_irq(hint, true);
}
+/* RTAS return codes */
+#define RTAS_OUT_SUCCESS 0
+#define RTAS_OUT_NO_ERRORS_FOUND 1
+#define RTAS_OUT_HW_ERROR -1
+#define RTAS_OUT_BUSY -2
+#define RTAS_OUT_PARAM_ERROR -3
+#define RTAS_OUT_NOT_SUPPORTED -3
+#define RTAS_OUT_NOT_AUTHORIZED -9002
+
static inline uint64_t ppc64_phys_to_real(uint64_t addr)
{
return addr & ~0xF000000000000000ULL;
diff --git a/include/hw/ptimer.h b/include/hw/ptimer.h
index 28fcaf17f8..a33edf4b0c 100644
--- a/include/hw/ptimer.h
+++ b/include/hw/ptimer.h
@@ -36,4 +36,8 @@ extern const VMStateDescription vmstate_ptimer;
.offset = vmstate_offset_pointer(_state, _field, ptimer_state), \
}
+#define VMSTATE_PTIMER_ARRAY(_f, _s, _n) \
+ VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(_f, _s, _n, 0, \
+ vmstate_ptimer, ptimer_state)
+
#endif
diff --git a/include/hw/timer/allwinner-a10-pit.h b/include/hw/timer/allwinner-a10-pit.h
new file mode 100644
index 0000000000..15efab8b5f
--- /dev/null
+++ b/include/hw/timer/allwinner-a10-pit.h
@@ -0,0 +1,58 @@
+#ifndef AW_A10_PIT_H
+#define AW_A10_PIT_H
+
+#include "hw/ptimer.h"
+
+#define TYPE_AW_A10_PIT "allwinner-A10-timer"
+#define AW_A10_PIT(obj) OBJECT_CHECK(AwA10PITState, (obj), TYPE_AW_A10_PIT)
+
+#define AW_A10_PIT_TIMER_NR 6
+#define AW_A10_PIT_TIMER_IRQ 0x1
+#define AW_A10_PIT_WDOG_IRQ 0x100
+
+#define AW_A10_PIT_TIMER_IRQ_EN 0
+#define AW_A10_PIT_TIMER_IRQ_ST 0x4
+
+#define AW_A10_PIT_TIMER_CONTROL 0x0
+#define AW_A10_PIT_TIMER_EN 0x1
+#define AW_A10_PIT_TIMER_RELOAD 0x2
+#define AW_A10_PIT_TIMER_MODE 0x80
+
+#define AW_A10_PIT_TIMER_INTERVAL 0x4
+#define AW_A10_PIT_TIMER_COUNT 0x8
+#define AW_A10_PIT_WDOG_CONTROL 0x90
+#define AW_A10_PIT_WDOG_MODE 0x94
+
+#define AW_A10_PIT_COUNT_CTL 0xa0
+#define AW_A10_PIT_COUNT_RL_EN 0x2
+#define AW_A10_PIT_COUNT_CLR_EN 0x1
+#define AW_A10_PIT_COUNT_LO 0xa4
+#define AW_A10_PIT_COUNT_HI 0xa8
+
+#define AW_A10_PIT_TIMER_BASE 0x10
+#define AW_A10_PIT_TIMER_BASE_END \
+ (AW_A10_PIT_TIMER_BASE * 6 + AW_A10_PIT_TIMER_COUNT)
+
+#define AW_A10_PIT_DEFAULT_CLOCK 0x4
+
+typedef struct AwA10PITState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+ qemu_irq irq[AW_A10_PIT_TIMER_NR];
+ ptimer_state * timer[AW_A10_PIT_TIMER_NR];
+ MemoryRegion iomem;
+
+ uint32_t irq_enable;
+ uint32_t irq_status;
+ uint32_t control[AW_A10_PIT_TIMER_NR];
+ uint32_t interval[AW_A10_PIT_TIMER_NR];
+ uint32_t count[AW_A10_PIT_TIMER_NR];
+ uint32_t watch_dog_mode;
+ uint32_t watch_dog_control;
+ uint32_t count_lo;
+ uint32_t count_hi;
+ uint32_t count_ctl;
+} AwA10PITState;
+
+#endif
diff --git a/include/hw/timer/digic-timer.h b/include/hw/timer/digic-timer.h
new file mode 100644
index 0000000000..ae913482c6
--- /dev/null
+++ b/include/hw/timer/digic-timer.h
@@ -0,0 +1,46 @@
+/*
+ * Canon DIGIC timer block declarations.
+ *
+ * Copyright (C) 2013 Antony Pavlov <antonynpavlov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef HW_TIMER_DIGIC_TIMER_H
+#define HW_TIMER_DIGIC_TIMER_H
+
+#include "hw/sysbus.h"
+#include "qemu/typedefs.h"
+#include "hw/ptimer.h"
+
+#define TYPE_DIGIC_TIMER "digic-timer"
+#define DIGIC_TIMER(obj) OBJECT_CHECK(DigicTimerState, (obj), TYPE_DIGIC_TIMER)
+
+#define DIGIC_TIMER_CONTROL 0x00
+#define DIGIC_TIMER_CONTROL_RST 0x80000000
+#define DIGIC_TIMER_CONTROL_EN 0x00000001
+#define DIGIC_TIMER_RELVALUE 0x08
+#define DIGIC_TIMER_VALUE 0x0c
+
+typedef struct DigicTimerState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion iomem;
+ ptimer_state *ptimer;
+
+ uint32_t control;
+ uint32_t relvalue;
+} DigicTimerState;
+
+#endif /* HW_TIMER_DIGIC_TIMER_H */
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index 9d09e60419..be193baba1 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -339,6 +339,16 @@ extern const VMStateInfo vmstate_info_bitmap;
.offset = vmstate_offset_array(_state, _field, _type, _num), \
}
+#define VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(_f, _s, _n, _v, _vmsd, _type) { \
+ .name = (stringify(_f)), \
+ .version_id = (_v), \
+ .num = (_n), \
+ .vmsd = &(_vmsd), \
+ .size = sizeof(_type *), \
+ .flags = VMS_ARRAY|VMS_STRUCT|VMS_ARRAY_OF_POINTER, \
+ .offset = vmstate_offset_array(_s, _f, _type*, _n), \
+}
+
#define VMSTATE_STRUCT_ARRAY_TEST(_field, _state, _num, _test, _version, _vmsd, _type) { \
.name = (stringify(_field)), \
.num = (_num), \
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
index 0f688c1c00..de85d282d0 100644
--- a/include/qemu/host-utils.h
+++ b/include/qemu/host-utils.h
@@ -228,6 +228,38 @@ static inline int cto64(uint64_t val)
}
/**
+ * clrsb32 - count leading redundant sign bits in a 32-bit value.
+ * @val: The value to search
+ *
+ * Returns the number of bits following the sign bit that are equal to it.
+ * No special cases; output range is [0-31].
+ */
+static inline int clrsb32(uint32_t val)
+{
+#if QEMU_GNUC_PREREQ(4, 7)
+ return __builtin_clrsb(val);
+#else
+ return clz32(val ^ ((int32_t)val >> 1)) - 1;
+#endif
+}
+
+/**
+ * clrsb64 - count leading redundant sign bits in a 64-bit value.
+ * @val: The value to search
+ *
+ * Returns the number of bits following the sign bit that are equal to it.
+ * No special cases; output range is [0-63].
+ */
+static inline int clrsb64(uint64_t val)
+{
+#if QEMU_GNUC_PREREQ(4, 7)
+ return __builtin_clrsbll(val);
+#else
+ return clz64(val ^ ((int64_t)val >> 1)) - 1;
+#endif
+}
+
+/**
* ctpop8 - count the population of one bits in an 8-bit value.
* @val: The value to search
*/
diff --git a/include/sysemu/device_tree.h b/include/sysemu/device_tree.h
index 2b58baf8b1..899f05c138 100644
--- a/include/sysemu/device_tree.h
+++ b/include/sysemu/device_tree.h
@@ -17,27 +17,27 @@
void *create_device_tree(int *sizep);
void *load_device_tree(const char *filename_path, int *sizep);
-int qemu_devtree_setprop(void *fdt, const char *node_path,
- const char *property, const void *val_array, int size);
-int qemu_devtree_setprop_cell(void *fdt, const char *node_path,
- const char *property, uint32_t val);
-int qemu_devtree_setprop_u64(void *fdt, const char *node_path,
- const char *property, uint64_t val);
-int qemu_devtree_setprop_string(void *fdt, const char *node_path,
- const char *property, const char *string);
-int qemu_devtree_setprop_phandle(void *fdt, const char *node_path,
- const char *property,
- const char *target_node_path);
-const void *qemu_devtree_getprop(void *fdt, const char *node_path,
- const char *property, int *lenp);
-uint32_t qemu_devtree_getprop_cell(void *fdt, const char *node_path,
- const char *property);
-uint32_t qemu_devtree_get_phandle(void *fdt, const char *path);
-uint32_t qemu_devtree_alloc_phandle(void *fdt);
-int qemu_devtree_nop_node(void *fdt, const char *node_path);
-int qemu_devtree_add_subnode(void *fdt, const char *name);
+int qemu_fdt_setprop(void *fdt, const char *node_path,
+ const char *property, const void *val, int size);
+int qemu_fdt_setprop_cell(void *fdt, const char *node_path,
+ const char *property, uint32_t val);
+int qemu_fdt_setprop_u64(void *fdt, const char *node_path,
+ const char *property, uint64_t val);
+int qemu_fdt_setprop_string(void *fdt, const char *node_path,
+ const char *property, const char *string);
+int qemu_fdt_setprop_phandle(void *fdt, const char *node_path,
+ const char *property,
+ const char *target_node_path);
+const void *qemu_fdt_getprop(void *fdt, const char *node_path,
+ const char *property, int *lenp);
+uint32_t qemu_fdt_getprop_cell(void *fdt, const char *node_path,
+ const char *property);
+uint32_t qemu_fdt_get_phandle(void *fdt, const char *path);
+uint32_t qemu_fdt_alloc_phandle(void *fdt);
+int qemu_fdt_nop_node(void *fdt, const char *node_path);
+int qemu_fdt_add_subnode(void *fdt, const char *name);
-#define qemu_devtree_setprop_cells(fdt, node_path, property, ...) \
+#define qemu_fdt_setprop_cells(fdt, node_path, property, ...) \
do { \
uint32_t qdt_tmp[] = { __VA_ARGS__ }; \
int i; \
@@ -45,14 +45,14 @@ int qemu_devtree_add_subnode(void *fdt, const char *name);
for (i = 0; i < ARRAY_SIZE(qdt_tmp); i++) { \
qdt_tmp[i] = cpu_to_be32(qdt_tmp[i]); \
} \
- qemu_devtree_setprop(fdt, node_path, property, qdt_tmp, \
- sizeof(qdt_tmp)); \
+ qemu_fdt_setprop(fdt, node_path, property, qdt_tmp, \
+ sizeof(qdt_tmp)); \
} while (0)
-void qemu_devtree_dumpdtb(void *fdt, int size);
+void qemu_fdt_dumpdtb(void *fdt, int size);
/**
- * qemu_devtree_setprop_sized_cells_from_array:
+ * qemu_fdt_setprop_sized_cells_from_array:
* @fdt: device tree blob
* @node_path: node to set property on
* @property: property to set
@@ -72,20 +72,20 @@ void qemu_devtree_dumpdtb(void *fdt, int size);
* the number of cells used for each element vary depending on the
* #address-cells and #size-cells properties of their parent node.
* If you know all your cell elements are one cell wide you can use the
- * simpler qemu_devtree_setprop_cells(). If you're not setting up the
- * array programmatically, qemu_devtree_setprop_sized_cells may be more
+ * simpler qemu_fdt_setprop_cells(). If you're not setting up the
+ * array programmatically, qemu_fdt_setprop_sized_cells may be more
* convenient.
*
* Return value: 0 on success, <0 on error.
*/
-int qemu_devtree_setprop_sized_cells_from_array(void *fdt,
- const char *node_path,
- const char *property,
- int numvalues,
- uint64_t *values);
+int qemu_fdt_setprop_sized_cells_from_array(void *fdt,
+ const char *node_path,
+ const char *property,
+ int numvalues,
+ uint64_t *values);
/**
- * qemu_devtree_setprop_sized_cells:
+ * qemu_fdt_setprop_sized_cells:
* @fdt: device tree blob
* @node_path: node to set property on
* @property: property to set
@@ -97,17 +97,17 @@ int qemu_devtree_setprop_sized_cells_from_array(void *fdt,
* used by this value" and "value".
*
* This is a convenience wrapper for the function
- * qemu_devtree_setprop_sized_cells_from_array().
+ * qemu_fdt_setprop_sized_cells_from_array().
*
* Return value: 0 on success, <0 on error.
*/
-#define qemu_devtree_setprop_sized_cells(fdt, node_path, property, ...) \
- ({ \
- uint64_t qdt_tmp[] = { __VA_ARGS__ }; \
- qemu_devtree_setprop_sized_cells_from_array(fdt, node_path, \
- property, \
- ARRAY_SIZE(qdt_tmp) / 2, \
- qdt_tmp); \
+#define qemu_fdt_setprop_sized_cells(fdt, node_path, property, ...) \
+ ({ \
+ uint64_t qdt_tmp[] = { __VA_ARGS__ }; \
+ qemu_fdt_setprop_sized_cells_from_array(fdt, node_path, \
+ property, \
+ ARRAY_SIZE(qdt_tmp) / 2, \
+ qdt_tmp); \
})
#endif /* __DEVICE_TREE_H__ */
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 7751c47ef1..4e7148a2d6 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -1171,7 +1171,7 @@ static int target_setup_sigframe(struct target_rt_sigframe *sf,
}
__put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
__put_user(env->pc, &sf->uc.tuc_mcontext.pc);
- __put_user(env->pstate, &sf->uc.tuc_mcontext.pstate);
+ __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
__put_user(/*current->thread.fault_address*/ 0,
&sf->uc.tuc_mcontext.fault_address);
@@ -1210,6 +1210,7 @@ static int target_restore_sigframe(CPUARMState *env,
struct target_aux_context *aux =
(struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
uint32_t magic, size;
+ uint64_t pstate;
target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
sigprocmask(SIG_SETMASK, &set, NULL);
@@ -1220,7 +1221,8 @@ static int target_restore_sigframe(CPUARMState *env,
__get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
__get_user(env->pc, &sf->uc.tuc_mcontext.pc);
- __get_user(env->pstate, &sf->uc.tuc_mcontext.pstate);
+ __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
+ pstate_write(env, pstate);
__get_user(magic, &aux->fpsimd.head.magic);
__get_user(size, &aux->fpsimd.head.size);
diff --git a/target-arm/Makefile.objs b/target-arm/Makefile.objs
index 356fbfcdfd..dcd167e0d8 100644
--- a/target-arm/Makefile.objs
+++ b/target-arm/Makefile.objs
@@ -1,8 +1,11 @@
obj-y += arm-semi.o
obj-$(CONFIG_SOFTMMU) += machine.o
obj-$(CONFIG_KVM) += kvm.o
+obj-$(call land,$(CONFIG_KVM),$(call lnot,$(TARGET_AARCH64))) += kvm32.o
+obj-$(call land,$(CONFIG_KVM),$(TARGET_AARCH64)) += kvm64.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-y += translate.o op_helper.o helper.o cpu.o
obj-y += neon_helper.o iwmmxt_helper.o
obj-y += gdbstub.o
-obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o gdbstub64.o
+obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o
+obj-y += crypto_helper.o
diff --git a/target-arm/cpu-qom.h b/target-arm/cpu-qom.h
index f32178a9db..afbd4222c5 100644
--- a/target-arm/cpu-qom.h
+++ b/target-arm/cpu-qom.h
@@ -139,6 +139,7 @@ typedef struct ARMCPU {
uint32_t ccsidr[16];
uint32_t reset_cbar;
uint32_t reset_auxcr;
+ bool reset_hivecs;
} ARMCPU;
#define TYPE_AARCH64_CPU "aarch64-cpu"
diff --git a/target-arm/cpu.c b/target-arm/cpu.c
index 0635e78ec2..408d207865 100644
--- a/target-arm/cpu.c
+++ b/target-arm/cpu.c
@@ -21,6 +21,7 @@
#include "cpu.h"
#include "qemu-common.h"
#include "hw/qdev-properties.h"
+#include "qapi/qmp/qerror.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/loader.h"
#endif
@@ -88,6 +89,12 @@ static void arm_cpu_reset(CPUState *s)
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
/* 64 bit CPUs always start in 64 bit mode */
env->aarch64 = 1;
+#if defined(CONFIG_USER_ONLY)
+ env->pstate = PSTATE_MODE_EL0t;
+#else
+ env->pstate = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F
+ | PSTATE_MODE_EL1h;
+#endif
}
#if defined(CONFIG_USER_ONLY)
@@ -120,6 +127,11 @@ static void arm_cpu_reset(CPUState *s)
env->regs[15] = pc & ~1;
}
}
+
+ if (env->cp15.c1_sys & (1 << 13)) {
+ env->regs[15] = 0xFFFF0000;
+ }
+
env->vfp.xregs[ARM_VFP_FPEXC] = 0;
#endif
set_flush_to_zero(1, &env->vfp.standard_fp_status);
@@ -231,6 +243,30 @@ static void arm_cpu_initfn(Object *obj)
}
}
+static Property arm_cpu_reset_cbar_property =
+ DEFINE_PROP_UINT32("reset-cbar", ARMCPU, reset_cbar, 0);
+
+static Property arm_cpu_reset_hivecs_property =
+ DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
+
+static void arm_cpu_post_init(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ Error *err = NULL;
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_CBAR)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property,
+ &err);
+ assert_no_error(err);
+ }
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property,
+ &err);
+ assert_no_error(err);
+ }
+}
+
static void arm_cpu_finalizefn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -249,6 +285,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
set_feature(env, ARM_FEATURE_V7);
set_feature(env, ARM_FEATURE_ARM_DIV);
set_feature(env, ARM_FEATURE_LPAE);
+ set_feature(env, ARM_FEATURE_V8_AES);
}
if (arm_feature(env, ARM_FEATURE_V7)) {
set_feature(env, ARM_FEATURE_VAPA);
@@ -290,6 +327,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
set_feature(env, ARM_FEATURE_PXN);
}
+ if (cpu->reset_hivecs) {
+ cpu->reset_sctlr |= (1 << 13);
+ }
+
register_cp_regs_for_features(cpu);
arm_cpu_register_gdb_regs_for_features(cpu);
@@ -616,6 +657,7 @@ static void cortex_a9_initfn(Object *obj)
* and valid configurations; we don't model A9UP).
*/
set_feature(&cpu->env, ARM_FEATURE_V7MP);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR);
cpu->midr = 0x410fc090;
cpu->reset_fpsid = 0x41033090;
cpu->mvfr0 = 0x11110222;
@@ -638,15 +680,7 @@ static void cortex_a9_initfn(Object *obj)
cpu->clidr = (1 << 27) | (1 << 24) | 3;
cpu->ccsidr[0] = 0xe00fe015; /* 16k L1 dcache. */
cpu->ccsidr[1] = 0x200fe015; /* 16k L1 icache. */
- {
- ARMCPRegInfo cbar = {
- .name = "CBAR", .cp = 15, .crn = 15, .crm = 0, .opc1 = 4,
- .opc2 = 0, .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
- .fieldoffset = offsetof(CPUARMState, cp15.c15_config_base_address)
- };
- define_one_arm_cp_reg(cpu, &cbar);
- define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
- }
+ define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
}
#ifndef CONFIG_USER_ONLY
@@ -685,6 +719,7 @@ static void cortex_a15_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR);
set_feature(&cpu->env, ARM_FEATURE_LPAE);
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
cpu->midr = 0x412fc0f1;
@@ -999,6 +1034,7 @@ static const TypeInfo arm_cpu_type_info = {
.parent = TYPE_CPU,
.instance_size = sizeof(ARMCPU),
.instance_init = arm_cpu_initfn,
+ .instance_post_init = arm_cpu_post_init,
.instance_finalize = arm_cpu_finalizefn,
.abstract = true,
.class_size = sizeof(ARMCPUClass),
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index c3f007fc53..56ed591164 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -113,8 +113,15 @@ typedef struct CPUARMState {
/* Regs for A64 mode. */
uint64_t xregs[32];
uint64_t pc;
- /* TODO: pstate doesn't correspond to an architectural register;
- * it would be better modelled as the underlying fields.
+ /* PSTATE isn't an architectural register for ARMv8. However, it is
+ * convenient for us to assemble the underlying state into a 32 bit format
+ * identical to the architectural format used for the SPSR. (This is also
+ * what the Linux kernel's 'pstate' field in signal handlers and KVM's
+ * 'pstate' register are.) Of the PSTATE bits:
+ * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same
+ * semantics as for AArch32, as described in the comments on each field)
+ * nRW (also known as M[4]) is kept, inverted, in env->aarch64
+ * all other bits are stored in their correct places in env->pstate
*/
uint32_t pstate;
uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
@@ -309,15 +316,6 @@ static inline bool is_a64(CPUARMState *env)
return env->aarch64;
}
-#define PSTATE_N_SHIFT 3
-#define PSTATE_N (1 << PSTATE_N_SHIFT)
-#define PSTATE_Z_SHIFT 2
-#define PSTATE_Z (1 << PSTATE_Z_SHIFT)
-#define PSTATE_C_SHIFT 1
-#define PSTATE_C (1 << PSTATE_C_SHIFT)
-#define PSTATE_V_SHIFT 0
-#define PSTATE_V (1 << PSTATE_V_SHIFT)
-
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
@@ -352,6 +350,56 @@ int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
/* Execution state bits. MRS read as zero, MSR writes ignored. */
#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J)
+/* Bit definitions for ARMv8 SPSR (PSTATE) format.
+ * Only these are valid when in AArch64 mode; in
+ * AArch32 mode SPSRs are basically CPSR-format.
+ */
+#define PSTATE_M (0xFU)
+#define PSTATE_nRW (1U << 4)
+#define PSTATE_F (1U << 6)
+#define PSTATE_I (1U << 7)
+#define PSTATE_A (1U << 8)
+#define PSTATE_D (1U << 9)
+#define PSTATE_IL (1U << 20)
+#define PSTATE_SS (1U << 21)
+#define PSTATE_V (1U << 28)
+#define PSTATE_C (1U << 29)
+#define PSTATE_Z (1U << 30)
+#define PSTATE_N (1U << 31)
+#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
+#define CACHED_PSTATE_BITS (PSTATE_NZCV)
+/* Mode values for AArch64 */
+#define PSTATE_MODE_EL3h 13
+#define PSTATE_MODE_EL3t 12
+#define PSTATE_MODE_EL2h 9
+#define PSTATE_MODE_EL2t 8
+#define PSTATE_MODE_EL1h 5
+#define PSTATE_MODE_EL1t 4
+#define PSTATE_MODE_EL0t 0
+
+/* Return the current PSTATE value. For the moment we don't support 32<->64 bit
+ * interprocessing, so we don't attempt to sync with the cpsr state used by
+ * the 32 bit decoder.
+ */
+static inline uint32_t pstate_read(CPUARMState *env)
+{
+ int ZF;
+
+ ZF = (env->ZF == 0);
+ return (env->NF & 0x80000000) | (ZF << 30)
+ | (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
+ | env->pstate;
+}
+
+static inline void pstate_write(CPUARMState *env, uint32_t val)
+{
+ env->ZF = (~val) & PSTATE_Z;
+ env->NF = val;
+ env->CF = (val >> 29) & 1;
+ env->VF = (val << 3) & 0x80000000;
+ env->pstate = val & ~CACHED_PSTATE_BITS;
+}
+
/* Return the current CPSR value. */
uint32_t cpsr_read(CPUARMState *env);
/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */
@@ -399,6 +447,34 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
uint32_t vfp_get_fpscr(CPUARMState *env);
void vfp_set_fpscr(CPUARMState *env, uint32_t val);
+/* For A64 the FPSCR is split into two logically distinct registers,
+ * FPCR and FPSR. However since they still use non-overlapping bits
+ * we store the underlying state in fpscr and just mask on read/write.
+ */
+#define FPSR_MASK 0xf800009f
+#define FPCR_MASK 0x07f79f00
+static inline uint32_t vfp_get_fpsr(CPUARMState *env)
+{
+ return vfp_get_fpscr(env) & FPSR_MASK;
+}
+
+static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val)
+{
+ uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK);
+ vfp_set_fpscr(env, new_fpscr);
+}
+
+static inline uint32_t vfp_get_fpcr(CPUARMState *env)
+{
+ return vfp_get_fpscr(env) & FPCR_MASK;
+}
+
+static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
+{
+ uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK);
+ vfp_set_fpscr(env, new_fpscr);
+}
+
enum arm_cpu_mode {
ARM_CPU_MODE_USR = 0x10,
ARM_CPU_MODE_FIQ = 0x11,
@@ -467,6 +543,8 @@ enum arm_features {
ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
ARM_FEATURE_V8,
ARM_FEATURE_AARCH64, /* supports 64 bit mode */
+ ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
+ ARM_FEATURE_CBAR, /* has cp15 CBAR */
};
static inline int arm_feature(CPUARMState *env, int feature)
diff --git a/target-arm/cpu64.c b/target-arm/cpu64.c
index 3e99c2140a..04ce87951c 100644
--- a/target-arm/cpu64.c
+++ b/target-arm/cpu64.c
@@ -68,11 +68,22 @@ static void aarch64_cpu_finalizefn(Object *obj)
{
}
+static void aarch64_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ /*
+ * TODO: this will need updating for system emulation,
+ * when the core may be in AArch32 mode.
+ */
+ cpu->env.pc = value;
+}
+
static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
{
CPUClass *cc = CPU_CLASS(oc);
cc->dump_state = aarch64_cpu_dump_state;
+ cc->set_pc = aarch64_cpu_set_pc;
cc->gdb_read_register = aarch64_cpu_gdb_read_register;
cc->gdb_write_register = aarch64_cpu_gdb_write_register;
cc->gdb_num_core_regs = 34;
diff --git a/target-arm/crypto_helper.c b/target-arm/crypto_helper.c
new file mode 100644
index 0000000000..f94be69ac5
--- /dev/null
+++ b/target-arm/crypto_helper.c
@@ -0,0 +1,281 @@
+/*
+ * crypto_helper.c - emulate v8 Crypto Extensions instructions
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ */
+
+#include <stdlib.h>
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "helper.h"
+
+union AES_STATE {
+ uint8_t bytes[16];
+ uint32_t cols[4];
+ uint64_t l[2];
+};
+
+void HELPER(crypto_aese)(CPUARMState *env, uint32_t rd, uint32_t rm,
+ uint32_t decrypt)
+{
+ static uint8_t const sbox[][256] = { {
+ /* S-box for encryption */
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
+ 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
+ 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
+ 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
+ 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
+ 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
+ 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
+ 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
+ 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
+ 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
+ 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
+ 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+ }, {
+ /* S-box for decryption */
+ 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
+ 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
+ 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
+ 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
+ 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
+ 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
+ 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
+ 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
+ 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
+ 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
+ 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
+ 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
+ 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
+ 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
+ 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
+ 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
+ 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
+ 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
+ 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
+ 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
+ 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
+ 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
+ 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
+ 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
+ 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
+ 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
+ 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
+ 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
+ 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
+ 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
+ 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
+ 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+ } };
+ static uint8_t const shift[][16] = {
+ /* ShiftRows permutation vector for encryption */
+ { 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11 },
+ /* ShiftRows permutation vector for decryption */
+ { 0, 13, 10, 7, 4, 1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3 },
+ };
+ union AES_STATE rk = { .l = {
+ float64_val(env->vfp.regs[rm]),
+ float64_val(env->vfp.regs[rm + 1])
+ } };
+ union AES_STATE st = { .l = {
+ float64_val(env->vfp.regs[rd]),
+ float64_val(env->vfp.regs[rd + 1])
+ } };
+ int i;
+
+ assert(decrypt < 2);
+
+ /* xor state vector with round key */
+ rk.l[0] ^= st.l[0];
+ rk.l[1] ^= st.l[1];
+
+ /* combine ShiftRows operation and sbox substitution */
+ for (i = 0; i < 16; i++) {
+ st.bytes[i] = sbox[decrypt][rk.bytes[shift[decrypt][i]]];
+ }
+
+ env->vfp.regs[rd] = make_float64(st.l[0]);
+ env->vfp.regs[rd + 1] = make_float64(st.l[1]);
+}
+
+void HELPER(crypto_aesmc)(CPUARMState *env, uint32_t rd, uint32_t rm,
+ uint32_t decrypt)
+{
+ static uint32_t const mc[][256] = { {
+ /* MixColumns lookup table */
+ 0x00000000, 0x03010102, 0x06020204, 0x05030306,
+ 0x0c040408, 0x0f05050a, 0x0a06060c, 0x0907070e,
+ 0x18080810, 0x1b090912, 0x1e0a0a14, 0x1d0b0b16,
+ 0x140c0c18, 0x170d0d1a, 0x120e0e1c, 0x110f0f1e,
+ 0x30101020, 0x33111122, 0x36121224, 0x35131326,
+ 0x3c141428, 0x3f15152a, 0x3a16162c, 0x3917172e,
+ 0x28181830, 0x2b191932, 0x2e1a1a34, 0x2d1b1b36,
+ 0x241c1c38, 0x271d1d3a, 0x221e1e3c, 0x211f1f3e,
+ 0x60202040, 0x63212142, 0x66222244, 0x65232346,
+ 0x6c242448, 0x6f25254a, 0x6a26264c, 0x6927274e,
+ 0x78282850, 0x7b292952, 0x7e2a2a54, 0x7d2b2b56,
+ 0x742c2c58, 0x772d2d5a, 0x722e2e5c, 0x712f2f5e,
+ 0x50303060, 0x53313162, 0x56323264, 0x55333366,
+ 0x5c343468, 0x5f35356a, 0x5a36366c, 0x5937376e,
+ 0x48383870, 0x4b393972, 0x4e3a3a74, 0x4d3b3b76,
+ 0x443c3c78, 0x473d3d7a, 0x423e3e7c, 0x413f3f7e,
+ 0xc0404080, 0xc3414182, 0xc6424284, 0xc5434386,
+ 0xcc444488, 0xcf45458a, 0xca46468c, 0xc947478e,
+ 0xd8484890, 0xdb494992, 0xde4a4a94, 0xdd4b4b96,
+ 0xd44c4c98, 0xd74d4d9a, 0xd24e4e9c, 0xd14f4f9e,
+ 0xf05050a0, 0xf35151a2, 0xf65252a4, 0xf55353a6,
+ 0xfc5454a8, 0xff5555aa, 0xfa5656ac, 0xf95757ae,
+ 0xe85858b0, 0xeb5959b2, 0xee5a5ab4, 0xed5b5bb6,
+ 0xe45c5cb8, 0xe75d5dba, 0xe25e5ebc, 0xe15f5fbe,
+ 0xa06060c0, 0xa36161c2, 0xa66262c4, 0xa56363c6,
+ 0xac6464c8, 0xaf6565ca, 0xaa6666cc, 0xa96767ce,
+ 0xb86868d0, 0xbb6969d2, 0xbe6a6ad4, 0xbd6b6bd6,
+ 0xb46c6cd8, 0xb76d6dda, 0xb26e6edc, 0xb16f6fde,
+ 0x907070e0, 0x937171e2, 0x967272e4, 0x957373e6,
+ 0x9c7474e8, 0x9f7575ea, 0x9a7676ec, 0x997777ee,
+ 0x887878f0, 0x8b7979f2, 0x8e7a7af4, 0x8d7b7bf6,
+ 0x847c7cf8, 0x877d7dfa, 0x827e7efc, 0x817f7ffe,
+ 0x9b80801b, 0x98818119, 0x9d82821f, 0x9e83831d,
+ 0x97848413, 0x94858511, 0x91868617, 0x92878715,
+ 0x8388880b, 0x80898909, 0x858a8a0f, 0x868b8b0d,
+ 0x8f8c8c03, 0x8c8d8d01, 0x898e8e07, 0x8a8f8f05,
+ 0xab90903b, 0xa8919139, 0xad92923f, 0xae93933d,
+ 0xa7949433, 0xa4959531, 0xa1969637, 0xa2979735,
+ 0xb398982b, 0xb0999929, 0xb59a9a2f, 0xb69b9b2d,
+ 0xbf9c9c23, 0xbc9d9d21, 0xb99e9e27, 0xba9f9f25,
+ 0xfba0a05b, 0xf8a1a159, 0xfda2a25f, 0xfea3a35d,
+ 0xf7a4a453, 0xf4a5a551, 0xf1a6a657, 0xf2a7a755,
+ 0xe3a8a84b, 0xe0a9a949, 0xe5aaaa4f, 0xe6abab4d,
+ 0xefacac43, 0xecadad41, 0xe9aeae47, 0xeaafaf45,
+ 0xcbb0b07b, 0xc8b1b179, 0xcdb2b27f, 0xceb3b37d,
+ 0xc7b4b473, 0xc4b5b571, 0xc1b6b677, 0xc2b7b775,
+ 0xd3b8b86b, 0xd0b9b969, 0xd5baba6f, 0xd6bbbb6d,
+ 0xdfbcbc63, 0xdcbdbd61, 0xd9bebe67, 0xdabfbf65,
+ 0x5bc0c09b, 0x58c1c199, 0x5dc2c29f, 0x5ec3c39d,
+ 0x57c4c493, 0x54c5c591, 0x51c6c697, 0x52c7c795,
+ 0x43c8c88b, 0x40c9c989, 0x45caca8f, 0x46cbcb8d,
+ 0x4fcccc83, 0x4ccdcd81, 0x49cece87, 0x4acfcf85,
+ 0x6bd0d0bb, 0x68d1d1b9, 0x6dd2d2bf, 0x6ed3d3bd,
+ 0x67d4d4b3, 0x64d5d5b1, 0x61d6d6b7, 0x62d7d7b5,
+ 0x73d8d8ab, 0x70d9d9a9, 0x75dadaaf, 0x76dbdbad,
+ 0x7fdcdca3, 0x7cdddda1, 0x79dedea7, 0x7adfdfa5,
+ 0x3be0e0db, 0x38e1e1d9, 0x3de2e2df, 0x3ee3e3dd,
+ 0x37e4e4d3, 0x34e5e5d1, 0x31e6e6d7, 0x32e7e7d5,
+ 0x23e8e8cb, 0x20e9e9c9, 0x25eaeacf, 0x26ebebcd,
+ 0x2fececc3, 0x2cededc1, 0x29eeeec7, 0x2aefefc5,
+ 0x0bf0f0fb, 0x08f1f1f9, 0x0df2f2ff, 0x0ef3f3fd,
+ 0x07f4f4f3, 0x04f5f5f1, 0x01f6f6f7, 0x02f7f7f5,
+ 0x13f8f8eb, 0x10f9f9e9, 0x15fafaef, 0x16fbfbed,
+ 0x1ffcfce3, 0x1cfdfde1, 0x19fefee7, 0x1affffe5,
+ }, {
+ /* Inverse MixColumns lookup table */
+ 0x00000000, 0x0b0d090e, 0x161a121c, 0x1d171b12,
+ 0x2c342438, 0x27392d36, 0x3a2e3624, 0x31233f2a,
+ 0x58684870, 0x5365417e, 0x4e725a6c, 0x457f5362,
+ 0x745c6c48, 0x7f516546, 0x62467e54, 0x694b775a,
+ 0xb0d090e0, 0xbbdd99ee, 0xa6ca82fc, 0xadc78bf2,
+ 0x9ce4b4d8, 0x97e9bdd6, 0x8afea6c4, 0x81f3afca,
+ 0xe8b8d890, 0xe3b5d19e, 0xfea2ca8c, 0xf5afc382,
+ 0xc48cfca8, 0xcf81f5a6, 0xd296eeb4, 0xd99be7ba,
+ 0x7bbb3bdb, 0x70b632d5, 0x6da129c7, 0x66ac20c9,
+ 0x578f1fe3, 0x5c8216ed, 0x41950dff, 0x4a9804f1,
+ 0x23d373ab, 0x28de7aa5, 0x35c961b7, 0x3ec468b9,
+ 0x0fe75793, 0x04ea5e9d, 0x19fd458f, 0x12f04c81,
+ 0xcb6bab3b, 0xc066a235, 0xdd71b927, 0xd67cb029,
+ 0xe75f8f03, 0xec52860d, 0xf1459d1f, 0xfa489411,
+ 0x9303e34b, 0x980eea45, 0x8519f157, 0x8e14f859,
+ 0xbf37c773, 0xb43ace7d, 0xa92dd56f, 0xa220dc61,
+ 0xf66d76ad, 0xfd607fa3, 0xe07764b1, 0xeb7a6dbf,
+ 0xda595295, 0xd1545b9b, 0xcc434089, 0xc74e4987,
+ 0xae053edd, 0xa50837d3, 0xb81f2cc1, 0xb31225cf,
+ 0x82311ae5, 0x893c13eb, 0x942b08f9, 0x9f2601f7,
+ 0x46bde64d, 0x4db0ef43, 0x50a7f451, 0x5baafd5f,
+ 0x6a89c275, 0x6184cb7b, 0x7c93d069, 0x779ed967,
+ 0x1ed5ae3d, 0x15d8a733, 0x08cfbc21, 0x03c2b52f,
+ 0x32e18a05, 0x39ec830b, 0x24fb9819, 0x2ff69117,
+ 0x8dd64d76, 0x86db4478, 0x9bcc5f6a, 0x90c15664,
+ 0xa1e2694e, 0xaaef6040, 0xb7f87b52, 0xbcf5725c,
+ 0xd5be0506, 0xdeb30c08, 0xc3a4171a, 0xc8a91e14,
+ 0xf98a213e, 0xf2872830, 0xef903322, 0xe49d3a2c,
+ 0x3d06dd96, 0x360bd498, 0x2b1ccf8a, 0x2011c684,
+ 0x1132f9ae, 0x1a3ff0a0, 0x0728ebb2, 0x0c25e2bc,
+ 0x656e95e6, 0x6e639ce8, 0x737487fa, 0x78798ef4,
+ 0x495ab1de, 0x4257b8d0, 0x5f40a3c2, 0x544daacc,
+ 0xf7daec41, 0xfcd7e54f, 0xe1c0fe5d, 0xeacdf753,
+ 0xdbeec879, 0xd0e3c177, 0xcdf4da65, 0xc6f9d36b,
+ 0xafb2a431, 0xa4bfad3f, 0xb9a8b62d, 0xb2a5bf23,
+ 0x83868009, 0x888b8907, 0x959c9215, 0x9e919b1b,
+ 0x470a7ca1, 0x4c0775af, 0x51106ebd, 0x5a1d67b3,
+ 0x6b3e5899, 0x60335197, 0x7d244a85, 0x7629438b,
+ 0x1f6234d1, 0x146f3ddf, 0x097826cd, 0x02752fc3,
+ 0x335610e9, 0x385b19e7, 0x254c02f5, 0x2e410bfb,
+ 0x8c61d79a, 0x876cde94, 0x9a7bc586, 0x9176cc88,
+ 0xa055f3a2, 0xab58faac, 0xb64fe1be, 0xbd42e8b0,
+ 0xd4099fea, 0xdf0496e4, 0xc2138df6, 0xc91e84f8,
+ 0xf83dbbd2, 0xf330b2dc, 0xee27a9ce, 0xe52aa0c0,
+ 0x3cb1477a, 0x37bc4e74, 0x2aab5566, 0x21a65c68,
+ 0x10856342, 0x1b886a4c, 0x069f715e, 0x0d927850,
+ 0x64d90f0a, 0x6fd40604, 0x72c31d16, 0x79ce1418,
+ 0x48ed2b32, 0x43e0223c, 0x5ef7392e, 0x55fa3020,
+ 0x01b79aec, 0x0aba93e2, 0x17ad88f0, 0x1ca081fe,
+ 0x2d83bed4, 0x268eb7da, 0x3b99acc8, 0x3094a5c6,
+ 0x59dfd29c, 0x52d2db92, 0x4fc5c080, 0x44c8c98e,
+ 0x75ebf6a4, 0x7ee6ffaa, 0x63f1e4b8, 0x68fcedb6,
+ 0xb1670a0c, 0xba6a0302, 0xa77d1810, 0xac70111e,
+ 0x9d532e34, 0x965e273a, 0x8b493c28, 0x80443526,
+ 0xe90f427c, 0xe2024b72, 0xff155060, 0xf418596e,
+ 0xc53b6644, 0xce366f4a, 0xd3217458, 0xd82c7d56,
+ 0x7a0ca137, 0x7101a839, 0x6c16b32b, 0x671bba25,
+ 0x5638850f, 0x5d358c01, 0x40229713, 0x4b2f9e1d,
+ 0x2264e947, 0x2969e049, 0x347efb5b, 0x3f73f255,
+ 0x0e50cd7f, 0x055dc471, 0x184adf63, 0x1347d66d,
+ 0xcadc31d7, 0xc1d138d9, 0xdcc623cb, 0xd7cb2ac5,
+ 0xe6e815ef, 0xede51ce1, 0xf0f207f3, 0xfbff0efd,
+ 0x92b479a7, 0x99b970a9, 0x84ae6bbb, 0x8fa362b5,
+ 0xbe805d9f, 0xb58d5491, 0xa89a4f83, 0xa397468d,
+ } };
+ union AES_STATE st = { .l = {
+ float64_val(env->vfp.regs[rm]),
+ float64_val(env->vfp.regs[rm + 1])
+ } };
+ int i;
+
+ assert(decrypt < 2);
+
+ for (i = 0; i < 16; i += 4) {
+ st.cols[i >> 2] = cpu_to_le32(
+ mc[decrypt][st.bytes[i]] ^
+ rol32(mc[decrypt][st.bytes[i + 1]], 8) ^
+ rol32(mc[decrypt][st.bytes[i + 2]], 16) ^
+ rol32(mc[decrypt][st.bytes[i + 3]], 24));
+ }
+
+ env->vfp.regs[rd] = make_float64(st.l[0]);
+ env->vfp.regs[rd + 1] = make_float64(st.l[1]);
+}
diff --git a/target-arm/gdbstub64.c b/target-arm/gdbstub64.c
index 7cb6a7c0e0..e8a82952a4 100644
--- a/target-arm/gdbstub64.c
+++ b/target-arm/gdbstub64.c
@@ -37,7 +37,7 @@ int aarch64_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
return gdb_get_reg64(mem_buf, env->pc);
break;
case 33:
- return gdb_get_reg32(mem_buf, env->pstate);
+ return gdb_get_reg32(mem_buf, pstate_read(env));
}
/* Unknown register. */
return 0;
@@ -65,7 +65,7 @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 8;
case 33:
/* CPSR */
- env->pstate = tmp;
+ pstate_write(env, tmp);
return 4;
}
/* Unknown register. */
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c
new file mode 100644
index 0000000000..d3f706754f
--- /dev/null
+++ b/target-arm/helper-a64.c
@@ -0,0 +1,79 @@
+/*
+ * AArch64 specific helpers
+ *
+ * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "helper.h"
+#include "qemu/host-utils.h"
+#include "sysemu/sysemu.h"
+#include "qemu/bitops.h"
+
+/* C2.4.7 Multiply and divide */
+/* special cases for 0 and LLONG_MIN are mandated by the standard */
+uint64_t HELPER(udiv64)(uint64_t num, uint64_t den)
+{
+ if (den == 0) {
+ return 0;
+ }
+ return num / den;
+}
+
+int64_t HELPER(sdiv64)(int64_t num, int64_t den)
+{
+ if (den == 0) {
+ return 0;
+ }
+ if (num == LLONG_MIN && den == -1) {
+ return LLONG_MIN;
+ }
+ return num / den;
+}
+
+uint64_t HELPER(clz64)(uint64_t x)
+{
+ return clz64(x);
+}
+
+uint64_t HELPER(cls64)(uint64_t x)
+{
+ return clrsb64(x);
+}
+
+uint32_t HELPER(cls32)(uint32_t x)
+{
+ return clrsb32(x);
+}
+
+uint64_t HELPER(rbit64)(uint64_t x)
+{
+ /* assign the correct byte position */
+ x = bswap64(x);
+
+ /* assign the correct nibble position */
+ x = ((x & 0xf0f0f0f0f0f0f0f0ULL) >> 4)
+ | ((x & 0x0f0f0f0f0f0f0f0fULL) << 4);
+
+ /* assign the correct bit position */
+ x = ((x & 0x8888888888888888ULL) >> 3)
+ | ((x & 0x4444444444444444ULL) >> 1)
+ | ((x & 0x2222222222222222ULL) << 1)
+ | ((x & 0x1111111111111111ULL) << 3);
+
+ return x;
+}
diff --git a/target-arm/helper-a64.h b/target-arm/helper-a64.h
new file mode 100644
index 0000000000..a163a94322
--- /dev/null
+++ b/target-arm/helper-a64.h
@@ -0,0 +1,24 @@
+/*
+ * AArch64 specific helper definitions
+ *
+ * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+DEF_HELPER_FLAGS_2(udiv64, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(sdiv64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
+DEF_HELPER_FLAGS_1(clz64, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(cls64, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(cls32, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64)
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 5e5e5aad2b..6ebd7dc7bc 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -65,6 +65,48 @@ static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
return 0;
}
+static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ switch (reg) {
+ case 0 ... 31:
+ /* 128 bit FP register */
+ stfq_le_p(buf, env->vfp.regs[reg * 2]);
+ stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
+ return 16;
+ case 32:
+ /* FPSR */
+ stl_p(buf, vfp_get_fpsr(env));
+ return 4;
+ case 33:
+ /* FPCR */
+ stl_p(buf, vfp_get_fpcr(env));
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ switch (reg) {
+ case 0 ... 31:
+ /* 128 bit FP register */
+ env->vfp.regs[reg * 2] = ldfq_le_p(buf);
+ env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
+ return 16;
+ case 32:
+ /* FPSR */
+ vfp_set_fpsr(env, ldl_p(buf));
+ return 4;
+ case 33:
+ /* FPCR */
+ vfp_set_fpcr(env, ldl_p(buf));
+ return 4;
+ default:
+ return 0;
+ }
+}
+
static int raw_read(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t *value)
{
@@ -1338,7 +1380,8 @@ static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
*/
{ .name = "C15_IMPDEF", .cp = 15, .crn = 15,
.crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
- .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
+ .access = PL1_RW,
+ .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE | ARM_CP_OVERRIDE,
.resetvalue = 0 },
REGINFO_SENTINEL
};
@@ -1744,6 +1787,15 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &auxcr);
}
+ if (arm_feature(env, ARM_FEATURE_CBAR)) {
+ ARMCPRegInfo cbar = {
+ .name = "CBAR", .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
+ .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_config_base_address)
+ };
+ define_one_arm_cp_reg(cpu, &cbar);
+ }
+
/* Generic registers whose values depend on the implementation */
{
ARMCPRegInfo sctlr = {
@@ -1785,7 +1837,11 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
CPUState *cs = CPU(cpu);
CPUARMState *env = &cpu->env;
- if (arm_feature(env, ARM_FEATURE_NEON)) {
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
+ aarch64_fpu_gdb_set_reg,
+ 34, "aarch64-fpu.xml", 0);
+ } else if (arm_feature(env, ARM_FEATURE_NEON)) {
gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
51, "arm-neon.xml", 0);
} else if (arm_feature(env, ARM_FEATURE_VFP3)) {
diff --git a/target-arm/helper.h b/target-arm/helper.h
index d459a39e46..73d67dcc17 100644
--- a/target-arm/helper.h
+++ b/target-arm/helper.h
@@ -463,4 +463,11 @@ DEF_HELPER_3(neon_qzip8, void, env, i32, i32)
DEF_HELPER_3(neon_qzip16, void, env, i32, i32)
DEF_HELPER_3(neon_qzip32, void, env, i32, i32)
+DEF_HELPER_4(crypto_aese, void, env, i32, i32, i32)
+DEF_HELPER_4(crypto_aesmc, void, env, i32, i32, i32)
+
+#ifdef TARGET_AARCH64
+#include "helper-a64.h"
+#endif
+
#include "exec/def-helper.h"
diff --git a/target-arm/kvm.c b/target-arm/kvm.c
index f865dac871..1d2688dda7 100644
--- a/target-arm/kvm.c
+++ b/target-arm/kvm.c
@@ -100,120 +100,6 @@ void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
}
}
-static inline void set_feature(uint64_t *features, int feature)
-{
- *features |= 1ULL << feature;
-}
-
-bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
-{
- /* Identify the feature bits corresponding to the host CPU, and
- * fill out the ARMHostCPUClass fields accordingly. To do this
- * we have to create a scratch VM, create a single CPU inside it,
- * and then query that CPU for the relevant ID registers.
- */
- int i, ret, fdarray[3];
- uint32_t midr, id_pfr0, id_isar0, mvfr1;
- uint64_t features = 0;
- /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
- * we know these will only support creating one kind of guest CPU,
- * which is its preferred CPU type.
- */
- static const uint32_t cpus_to_try[] = {
- QEMU_KVM_ARM_TARGET_CORTEX_A15,
- QEMU_KVM_ARM_TARGET_NONE
- };
- struct kvm_vcpu_init init;
- struct kvm_one_reg idregs[] = {
- {
- .id = KVM_REG_ARM | KVM_REG_SIZE_U32
- | ENCODE_CP_REG(15, 0, 0, 0, 0, 0),
- .addr = (uintptr_t)&midr,
- },
- {
- .id = KVM_REG_ARM | KVM_REG_SIZE_U32
- | ENCODE_CP_REG(15, 0, 0, 1, 0, 0),
- .addr = (uintptr_t)&id_pfr0,
- },
- {
- .id = KVM_REG_ARM | KVM_REG_SIZE_U32
- | ENCODE_CP_REG(15, 0, 0, 2, 0, 0),
- .addr = (uintptr_t)&id_isar0,
- },
- {
- .id = KVM_REG_ARM | KVM_REG_SIZE_U32
- | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1,
- .addr = (uintptr_t)&mvfr1,
- },
- };
-
- if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
- return false;
- }
-
- ahcc->target = init.target;
-
- /* This is not strictly blessed by the device tree binding docs yet,
- * but in practice the kernel does not care about this string so
- * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
- */
- ahcc->dtb_compatible = "arm,arm-v7";
-
- for (i = 0; i < ARRAY_SIZE(idregs); i++) {
- ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]);
- if (ret) {
- break;
- }
- }
-
- kvm_arm_destroy_scratch_host_vcpu(fdarray);
-
- if (ret) {
- return false;
- }
-
- /* Now we've retrieved all the register information we can
- * set the feature bits based on the ID register fields.
- * We can assume any KVM supporting CPU is at least a v7
- * with VFPv3, LPAE and the generic timers; this in turn implies
- * most of the other feature bits, but a few must be tested.
- */
- set_feature(&features, ARM_FEATURE_V7);
- set_feature(&features, ARM_FEATURE_VFP3);
- set_feature(&features, ARM_FEATURE_LPAE);
- set_feature(&features, ARM_FEATURE_GENERIC_TIMER);
-
- switch (extract32(id_isar0, 24, 4)) {
- case 1:
- set_feature(&features, ARM_FEATURE_THUMB_DIV);
- break;
- case 2:
- set_feature(&features, ARM_FEATURE_ARM_DIV);
- set_feature(&features, ARM_FEATURE_THUMB_DIV);
- break;
- default:
- break;
- }
-
- if (extract32(id_pfr0, 12, 4) == 1) {
- set_feature(&features, ARM_FEATURE_THUMB2EE);
- }
- if (extract32(mvfr1, 20, 4) == 1) {
- set_feature(&features, ARM_FEATURE_VFP_FP16);
- }
- if (extract32(mvfr1, 12, 4) == 1) {
- set_feature(&features, ARM_FEATURE_NEON);
- }
- if (extract32(mvfr1, 28, 4) == 1) {
- /* FMAC support implies VFPv4 */
- set_feature(&features, ARM_FEATURE_VFP4);
- }
-
- ahcc->features = features;
-
- return true;
-}
-
static void kvm_arm_host_cpu_class_init(ObjectClass *oc, void *data)
{
ARMHostCPUClass *ahcc = ARM_HOST_CPU_CLASS(oc);
@@ -242,7 +128,11 @@ static void kvm_arm_host_cpu_initfn(Object *obj)
static const TypeInfo host_arm_cpu_type_info = {
.name = TYPE_ARM_HOST_CPU,
+#ifdef TARGET_AARCH64
+ .parent = TYPE_AARCH64_CPU,
+#else
.parent = TYPE_ARM_CPU,
+#endif
.instance_init = kvm_arm_host_cpu_initfn,
.class_init = kvm_arm_host_cpu_class_init,
.class_size = sizeof(ARMHostCPUClass),
@@ -265,144 +155,6 @@ unsigned long kvm_arch_vcpu_id(CPUState *cpu)
return cpu->cpu_index;
}
-static bool reg_syncs_via_tuple_list(uint64_t regidx)
-{
- /* Return true if the regidx is a register we should synchronize
- * via the cpreg_tuples array (ie is not a core reg we sync by
- * hand in kvm_arch_get/put_registers())
- */
- switch (regidx & KVM_REG_ARM_COPROC_MASK) {
- case KVM_REG_ARM_CORE:
- case KVM_REG_ARM_VFP:
- return false;
- default:
- return true;
- }
-}
-
-static int compare_u64(const void *a, const void *b)
-{
- if (*(uint64_t *)a > *(uint64_t *)b) {
- return 1;
- }
- if (*(uint64_t *)a < *(uint64_t *)b) {
- return -1;
- }
- return 0;
-}
-
-int kvm_arch_init_vcpu(CPUState *cs)
-{
- struct kvm_vcpu_init init;
- int i, ret, arraylen;
- uint64_t v;
- struct kvm_one_reg r;
- struct kvm_reg_list rl;
- struct kvm_reg_list *rlp;
- ARMCPU *cpu = ARM_CPU(cs);
-
- if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
- fprintf(stderr, "KVM is not supported for this guest CPU type\n");
- return -EINVAL;
- }
-
- init.target = cpu->kvm_target;
- memset(init.features, 0, sizeof(init.features));
- if (cpu->start_powered_off) {
- init.features[0] = 1 << KVM_ARM_VCPU_POWER_OFF;
- }
- ret = kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
- if (ret) {
- return ret;
- }
- /* Query the kernel to make sure it supports 32 VFP
- * registers: QEMU's "cortex-a15" CPU is always a
- * VFP-D32 core. The simplest way to do this is just
- * to attempt to read register d31.
- */
- r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
- r.addr = (uintptr_t)(&v);
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
- if (ret == -ENOENT) {
- return -EINVAL;
- }
-
- /* Populate the cpreg list based on the kernel's idea
- * of what registers exist (and throw away the TCG-created list).
- */
- rl.n = 0;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
- if (ret != -E2BIG) {
- return ret;
- }
- rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
- rlp->n = rl.n;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
- if (ret) {
- goto out;
- }
- /* Sort the list we get back from the kernel, since cpreg_tuples
- * must be in strictly ascending order.
- */
- qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
-
- for (i = 0, arraylen = 0; i < rlp->n; i++) {
- if (!reg_syncs_via_tuple_list(rlp->reg[i])) {
- continue;
- }
- switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
- case KVM_REG_SIZE_U32:
- case KVM_REG_SIZE_U64:
- break;
- default:
- fprintf(stderr, "Can't handle size of register in kernel list\n");
- ret = -EINVAL;
- goto out;
- }
-
- arraylen++;
- }
-
- cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
- cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
- cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
- arraylen);
- cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
- arraylen);
- cpu->cpreg_array_len = arraylen;
- cpu->cpreg_vmstate_array_len = arraylen;
-
- for (i = 0, arraylen = 0; i < rlp->n; i++) {
- uint64_t regidx = rlp->reg[i];
- if (!reg_syncs_via_tuple_list(regidx)) {
- continue;
- }
- cpu->cpreg_indexes[arraylen] = regidx;
- arraylen++;
- }
- assert(cpu->cpreg_array_len == arraylen);
-
- if (!write_kvmstate_to_list(cpu)) {
- /* Shouldn't happen unless kernel is inconsistent about
- * what registers exist.
- */
- fprintf(stderr, "Initial read of kernel register state failed\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Save a copy of the initial register values so that we can
- * feed it back to the kernel on VCPU reset.
- */
- cpu->cpreg_reset_values = g_memdup(cpu->cpreg_values,
- cpu->cpreg_array_len *
- sizeof(cpu->cpreg_values[0]));
-
-out:
- g_free(rlp);
- return ret;
-}
-
/* We track all the KVM devices which need their memory addresses
* passing to the kernel in a list of these structures.
* When board init is complete we run through the list and
@@ -563,232 +315,6 @@ bool write_list_to_kvmstate(ARMCPU *cpu)
return ok;
}
-typedef struct Reg {
- uint64_t id;
- int offset;
-} Reg;
-
-#define COREREG(KERNELNAME, QEMUFIELD) \
- { \
- KVM_REG_ARM | KVM_REG_SIZE_U32 | \
- KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
- offsetof(CPUARMState, QEMUFIELD) \
- }
-
-#define VFPSYSREG(R) \
- { \
- KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
- KVM_REG_ARM_VFP_##R, \
- offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
- }
-
-static const Reg regs[] = {
- /* R0_usr .. R14_usr */
- COREREG(usr_regs.uregs[0], regs[0]),
- COREREG(usr_regs.uregs[1], regs[1]),
- COREREG(usr_regs.uregs[2], regs[2]),
- COREREG(usr_regs.uregs[3], regs[3]),
- COREREG(usr_regs.uregs[4], regs[4]),
- COREREG(usr_regs.uregs[5], regs[5]),
- COREREG(usr_regs.uregs[6], regs[6]),
- COREREG(usr_regs.uregs[7], regs[7]),
- COREREG(usr_regs.uregs[8], usr_regs[0]),
- COREREG(usr_regs.uregs[9], usr_regs[1]),
- COREREG(usr_regs.uregs[10], usr_regs[2]),
- COREREG(usr_regs.uregs[11], usr_regs[3]),
- COREREG(usr_regs.uregs[12], usr_regs[4]),
- COREREG(usr_regs.uregs[13], banked_r13[0]),
- COREREG(usr_regs.uregs[14], banked_r14[0]),
- /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
- COREREG(svc_regs[0], banked_r13[1]),
- COREREG(svc_regs[1], banked_r14[1]),
- COREREG(svc_regs[2], banked_spsr[1]),
- COREREG(abt_regs[0], banked_r13[2]),
- COREREG(abt_regs[1], banked_r14[2]),
- COREREG(abt_regs[2], banked_spsr[2]),
- COREREG(und_regs[0], banked_r13[3]),
- COREREG(und_regs[1], banked_r14[3]),
- COREREG(und_regs[2], banked_spsr[3]),
- COREREG(irq_regs[0], banked_r13[4]),
- COREREG(irq_regs[1], banked_r14[4]),
- COREREG(irq_regs[2], banked_spsr[4]),
- /* R8_fiq .. R14_fiq and SPSR_fiq */
- COREREG(fiq_regs[0], fiq_regs[0]),
- COREREG(fiq_regs[1], fiq_regs[1]),
- COREREG(fiq_regs[2], fiq_regs[2]),
- COREREG(fiq_regs[3], fiq_regs[3]),
- COREREG(fiq_regs[4], fiq_regs[4]),
- COREREG(fiq_regs[5], banked_r13[5]),
- COREREG(fiq_regs[6], banked_r14[5]),
- COREREG(fiq_regs[7], banked_spsr[5]),
- /* R15 */
- COREREG(usr_regs.uregs[15], regs[15]),
- /* VFP system registers */
- VFPSYSREG(FPSID),
- VFPSYSREG(MVFR1),
- VFPSYSREG(MVFR0),
- VFPSYSREG(FPEXC),
- VFPSYSREG(FPINST),
- VFPSYSREG(FPINST2),
-};
-
-int kvm_arch_put_registers(CPUState *cs, int level)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- struct kvm_one_reg r;
- int mode, bn;
- int ret, i;
- uint32_t cpsr, fpscr;
-
- /* Make sure the banked regs are properly set */
- mode = env->uncached_cpsr & CPSR_M;
- bn = bank_number(mode);
- if (mode == ARM_CPU_MODE_FIQ) {
- memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
- } else {
- memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
- }
- env->banked_r13[bn] = env->regs[13];
- env->banked_r14[bn] = env->regs[14];
- env->banked_spsr[bn] = env->spsr;
-
- /* Now we can safely copy stuff down to the kernel */
- for (i = 0; i < ARRAY_SIZE(regs); i++) {
- r.id = regs[i].id;
- r.addr = (uintptr_t)(env) + regs[i].offset;
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
- if (ret) {
- return ret;
- }
- }
-
- /* Special cases which aren't a single CPUARMState field */
- cpsr = cpsr_read(env);
- r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
- KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
- r.addr = (uintptr_t)(&cpsr);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
- if (ret) {
- return ret;
- }
-
- /* VFP registers */
- r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
- for (i = 0; i < 32; i++) {
- r.addr = (uintptr_t)(&env->vfp.regs[i]);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
- if (ret) {
- return ret;
- }
- r.id++;
- }
-
- r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
- KVM_REG_ARM_VFP_FPSCR;
- fpscr = vfp_get_fpscr(env);
- r.addr = (uintptr_t)&fpscr;
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
- if (ret) {
- return ret;
- }
-
- /* Note that we do not call write_cpustate_to_list()
- * here, so we are only writing the tuple list back to
- * KVM. This is safe because nothing can change the
- * CPUARMState cp15 fields (in particular gdb accesses cannot)
- * and so there are no changes to sync. In fact syncing would
- * be wrong at this point: for a constant register where TCG and
- * KVM disagree about its value, the preceding write_list_to_cpustate()
- * would not have had any effect on the CPUARMState value (since the
- * register is read-only), and a write_cpustate_to_list() here would
- * then try to write the TCG value back into KVM -- this would either
- * fail or incorrectly change the value the guest sees.
- *
- * If we ever want to allow the user to modify cp15 registers via
- * the gdb stub, we would need to be more clever here (for instance
- * tracking the set of registers kvm_arch_get_registers() successfully
- * managed to update the CPUARMState with, and only allowing those
- * to be written back up into the kernel).
- */
- if (!write_list_to_kvmstate(cpu)) {
- return EINVAL;
- }
-
- return ret;
-}
-
-int kvm_arch_get_registers(CPUState *cs)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- struct kvm_one_reg r;
- int mode, bn;
- int ret, i;
- uint32_t cpsr, fpscr;
-
- for (i = 0; i < ARRAY_SIZE(regs); i++) {
- r.id = regs[i].id;
- r.addr = (uintptr_t)(env) + regs[i].offset;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
- if (ret) {
- return ret;
- }
- }
-
- /* Special cases which aren't a single CPUARMState field */
- r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
- KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
- r.addr = (uintptr_t)(&cpsr);
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
- if (ret) {
- return ret;
- }
- cpsr_write(env, cpsr, 0xffffffff);
-
- /* Make sure the current mode regs are properly set */
- mode = env->uncached_cpsr & CPSR_M;
- bn = bank_number(mode);
- if (mode == ARM_CPU_MODE_FIQ) {
- memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
- } else {
- memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
- }
- env->regs[13] = env->banked_r13[bn];
- env->regs[14] = env->banked_r14[bn];
- env->spsr = env->banked_spsr[bn];
-
- /* VFP registers */
- r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
- for (i = 0; i < 32; i++) {
- r.addr = (uintptr_t)(&env->vfp.regs[i]);
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
- if (ret) {
- return ret;
- }
- r.id++;
- }
-
- r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
- KVM_REG_ARM_VFP_FPSCR;
- r.addr = (uintptr_t)&fpscr;
- ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
- if (ret) {
- return ret;
- }
- vfp_set_fpscr(env, fpscr);
-
- if (!write_kvmstate_to_list(cpu)) {
- return EINVAL;
- }
- /* Note that it's OK to have registers which aren't in CPUState,
- * so we can ignore a failure return here.
- */
- write_list_to_cpustate(cpu);
-
- return 0;
-}
-
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{
}
@@ -802,19 +328,6 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
return 0;
}
-void kvm_arch_reset_vcpu(CPUState *cs)
-{
- /* Feed the kernel back its initial register state */
- ARMCPU *cpu = ARM_CPU(cs);
-
- memmove(cpu->cpreg_values, cpu->cpreg_reset_values,
- cpu->cpreg_array_len * sizeof(cpu->cpreg_values[0]));
-
- if (!write_list_to_kvmstate(cpu)) {
- abort();
- }
-}
-
bool kvm_arch_stop_on_emulation_error(CPUState *cs)
{
return true;
diff --git a/target-arm/kvm32.c b/target-arm/kvm32.c
new file mode 100644
index 0000000000..a4fde07969
--- /dev/null
+++ b/target-arm/kvm32.c
@@ -0,0 +1,515 @@
+/*
+ * ARM implementation of KVM hooks, 32 bit specific code.
+ *
+ * Copyright Christoffer Dall 2009-2010
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+#include "cpu.h"
+#include "hw/arm/arm.h"
+
+static inline void set_feature(uint64_t *features, int feature)
+{
+ *features |= 1ULL << feature;
+}
+
+bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
+{
+ /* Identify the feature bits corresponding to the host CPU, and
+ * fill out the ARMHostCPUClass fields accordingly. To do this
+ * we have to create a scratch VM, create a single CPU inside it,
+ * and then query that CPU for the relevant ID registers.
+ */
+ int i, ret, fdarray[3];
+ uint32_t midr, id_pfr0, id_isar0, mvfr1;
+ uint64_t features = 0;
+ /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
+ * we know these will only support creating one kind of guest CPU,
+ * which is its preferred CPU type.
+ */
+ static const uint32_t cpus_to_try[] = {
+ QEMU_KVM_ARM_TARGET_CORTEX_A15,
+ QEMU_KVM_ARM_TARGET_NONE
+ };
+ struct kvm_vcpu_init init;
+ struct kvm_one_reg idregs[] = {
+ {
+ .id = KVM_REG_ARM | KVM_REG_SIZE_U32
+ | ENCODE_CP_REG(15, 0, 0, 0, 0, 0),
+ .addr = (uintptr_t)&midr,
+ },
+ {
+ .id = KVM_REG_ARM | KVM_REG_SIZE_U32
+ | ENCODE_CP_REG(15, 0, 0, 1, 0, 0),
+ .addr = (uintptr_t)&id_pfr0,
+ },
+ {
+ .id = KVM_REG_ARM | KVM_REG_SIZE_U32
+ | ENCODE_CP_REG(15, 0, 0, 2, 0, 0),
+ .addr = (uintptr_t)&id_isar0,
+ },
+ {
+ .id = KVM_REG_ARM | KVM_REG_SIZE_U32
+ | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1,
+ .addr = (uintptr_t)&mvfr1,
+ },
+ };
+
+ if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
+ return false;
+ }
+
+ ahcc->target = init.target;
+
+ /* This is not strictly blessed by the device tree binding docs yet,
+ * but in practice the kernel does not care about this string so
+ * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
+ */
+ ahcc->dtb_compatible = "arm,arm-v7";
+
+ for (i = 0; i < ARRAY_SIZE(idregs); i++) {
+ ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]);
+ if (ret) {
+ break;
+ }
+ }
+
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+
+ if (ret) {
+ return false;
+ }
+
+ /* Now we've retrieved all the register information we can
+ * set the feature bits based on the ID register fields.
+ * We can assume any KVM supporting CPU is at least a v7
+ * with VFPv3, LPAE and the generic timers; this in turn implies
+ * most of the other feature bits, but a few must be tested.
+ */
+ set_feature(&features, ARM_FEATURE_V7);
+ set_feature(&features, ARM_FEATURE_VFP3);
+ set_feature(&features, ARM_FEATURE_LPAE);
+ set_feature(&features, ARM_FEATURE_GENERIC_TIMER);
+
+ switch (extract32(id_isar0, 24, 4)) {
+ case 1:
+ set_feature(&features, ARM_FEATURE_THUMB_DIV);
+ break;
+ case 2:
+ set_feature(&features, ARM_FEATURE_ARM_DIV);
+ set_feature(&features, ARM_FEATURE_THUMB_DIV);
+ break;
+ default:
+ break;
+ }
+
+ if (extract32(id_pfr0, 12, 4) == 1) {
+ set_feature(&features, ARM_FEATURE_THUMB2EE);
+ }
+ if (extract32(mvfr1, 20, 4) == 1) {
+ set_feature(&features, ARM_FEATURE_VFP_FP16);
+ }
+ if (extract32(mvfr1, 12, 4) == 1) {
+ set_feature(&features, ARM_FEATURE_NEON);
+ }
+ if (extract32(mvfr1, 28, 4) == 1) {
+ /* FMAC support implies VFPv4 */
+ set_feature(&features, ARM_FEATURE_VFP4);
+ }
+
+ ahcc->features = features;
+
+ return true;
+}
+
+static bool reg_syncs_via_tuple_list(uint64_t regidx)
+{
+ /* Return true if the regidx is a register we should synchronize
+ * via the cpreg_tuples array (ie is not a core reg we sync by
+ * hand in kvm_arch_get/put_registers())
+ */
+ switch (regidx & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE:
+ case KVM_REG_ARM_VFP:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int compare_u64(const void *a, const void *b)
+{
+ if (*(uint64_t *)a > *(uint64_t *)b) {
+ return 1;
+ }
+ if (*(uint64_t *)a < *(uint64_t *)b) {
+ return -1;
+ }
+ return 0;
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ struct kvm_vcpu_init init;
+ int i, ret, arraylen;
+ uint64_t v;
+ struct kvm_one_reg r;
+ struct kvm_reg_list rl;
+ struct kvm_reg_list *rlp;
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
+ fprintf(stderr, "KVM is not supported for this guest CPU type\n");
+ return -EINVAL;
+ }
+
+ init.target = cpu->kvm_target;
+ memset(init.features, 0, sizeof(init.features));
+ if (cpu->start_powered_off) {
+ init.features[0] = 1 << KVM_ARM_VCPU_POWER_OFF;
+ }
+ ret = kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
+ if (ret) {
+ return ret;
+ }
+ /* Query the kernel to make sure it supports 32 VFP
+ * registers: QEMU's "cortex-a15" CPU is always a
+ * VFP-D32 core. The simplest way to do this is just
+ * to attempt to read register d31.
+ */
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
+ r.addr = (uintptr_t)(&v);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (ret == -ENOENT) {
+ return -EINVAL;
+ }
+
+ /* Populate the cpreg list based on the kernel's idea
+ * of what registers exist (and throw away the TCG-created list).
+ */
+ rl.n = 0;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
+ if (ret != -E2BIG) {
+ return ret;
+ }
+ rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
+ rlp->n = rl.n;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
+ if (ret) {
+ goto out;
+ }
+ /* Sort the list we get back from the kernel, since cpreg_tuples
+ * must be in strictly ascending order.
+ */
+ qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
+
+ for (i = 0, arraylen = 0; i < rlp->n; i++) {
+ if (!reg_syncs_via_tuple_list(rlp->reg[i])) {
+ continue;
+ }
+ switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ case KVM_REG_SIZE_U64:
+ break;
+ default:
+ fprintf(stderr, "Can't handle size of register in kernel list\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ arraylen++;
+ }
+
+ cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
+ cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
+ cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
+ arraylen);
+ cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
+ arraylen);
+ cpu->cpreg_array_len = arraylen;
+ cpu->cpreg_vmstate_array_len = arraylen;
+
+ for (i = 0, arraylen = 0; i < rlp->n; i++) {
+ uint64_t regidx = rlp->reg[i];
+ if (!reg_syncs_via_tuple_list(regidx)) {
+ continue;
+ }
+ cpu->cpreg_indexes[arraylen] = regidx;
+ arraylen++;
+ }
+ assert(cpu->cpreg_array_len == arraylen);
+
+ if (!write_kvmstate_to_list(cpu)) {
+ /* Shouldn't happen unless kernel is inconsistent about
+ * what registers exist.
+ */
+ fprintf(stderr, "Initial read of kernel register state failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Save a copy of the initial register values so that we can
+ * feed it back to the kernel on VCPU reset.
+ */
+ cpu->cpreg_reset_values = g_memdup(cpu->cpreg_values,
+ cpu->cpreg_array_len *
+ sizeof(cpu->cpreg_values[0]));
+
+out:
+ g_free(rlp);
+ return ret;
+}
+
+typedef struct Reg {
+ uint64_t id;
+ int offset;
+} Reg;
+
+#define COREREG(KERNELNAME, QEMUFIELD) \
+ { \
+ KVM_REG_ARM | KVM_REG_SIZE_U32 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
+ offsetof(CPUARMState, QEMUFIELD) \
+ }
+
+#define VFPSYSREG(R) \
+ { \
+ KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
+ KVM_REG_ARM_VFP_##R, \
+ offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
+ }
+
+static const Reg regs[] = {
+ /* R0_usr .. R14_usr */
+ COREREG(usr_regs.uregs[0], regs[0]),
+ COREREG(usr_regs.uregs[1], regs[1]),
+ COREREG(usr_regs.uregs[2], regs[2]),
+ COREREG(usr_regs.uregs[3], regs[3]),
+ COREREG(usr_regs.uregs[4], regs[4]),
+ COREREG(usr_regs.uregs[5], regs[5]),
+ COREREG(usr_regs.uregs[6], regs[6]),
+ COREREG(usr_regs.uregs[7], regs[7]),
+ COREREG(usr_regs.uregs[8], usr_regs[0]),
+ COREREG(usr_regs.uregs[9], usr_regs[1]),
+ COREREG(usr_regs.uregs[10], usr_regs[2]),
+ COREREG(usr_regs.uregs[11], usr_regs[3]),
+ COREREG(usr_regs.uregs[12], usr_regs[4]),
+ COREREG(usr_regs.uregs[13], banked_r13[0]),
+ COREREG(usr_regs.uregs[14], banked_r14[0]),
+ /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
+ COREREG(svc_regs[0], banked_r13[1]),
+ COREREG(svc_regs[1], banked_r14[1]),
+ COREREG(svc_regs[2], banked_spsr[1]),
+ COREREG(abt_regs[0], banked_r13[2]),
+ COREREG(abt_regs[1], banked_r14[2]),
+ COREREG(abt_regs[2], banked_spsr[2]),
+ COREREG(und_regs[0], banked_r13[3]),
+ COREREG(und_regs[1], banked_r14[3]),
+ COREREG(und_regs[2], banked_spsr[3]),
+ COREREG(irq_regs[0], banked_r13[4]),
+ COREREG(irq_regs[1], banked_r14[4]),
+ COREREG(irq_regs[2], banked_spsr[4]),
+ /* R8_fiq .. R14_fiq and SPSR_fiq */
+ COREREG(fiq_regs[0], fiq_regs[0]),
+ COREREG(fiq_regs[1], fiq_regs[1]),
+ COREREG(fiq_regs[2], fiq_regs[2]),
+ COREREG(fiq_regs[3], fiq_regs[3]),
+ COREREG(fiq_regs[4], fiq_regs[4]),
+ COREREG(fiq_regs[5], banked_r13[5]),
+ COREREG(fiq_regs[6], banked_r14[5]),
+ COREREG(fiq_regs[7], banked_spsr[5]),
+ /* R15 */
+ COREREG(usr_regs.uregs[15], regs[15]),
+ /* VFP system registers */
+ VFPSYSREG(FPSID),
+ VFPSYSREG(MVFR1),
+ VFPSYSREG(MVFR0),
+ VFPSYSREG(FPEXC),
+ VFPSYSREG(FPINST),
+ VFPSYSREG(FPINST2),
+};
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ struct kvm_one_reg r;
+ int mode, bn;
+ int ret, i;
+ uint32_t cpsr, fpscr;
+
+ /* Make sure the banked regs are properly set */
+ mode = env->uncached_cpsr & CPSR_M;
+ bn = bank_number(mode);
+ if (mode == ARM_CPU_MODE_FIQ) {
+ memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
+ } else {
+ memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
+ }
+ env->banked_r13[bn] = env->regs[13];
+ env->banked_r14[bn] = env->regs[14];
+ env->banked_spsr[bn] = env->spsr;
+
+ /* Now we can safely copy stuff down to the kernel */
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ r.id = regs[i].id;
+ r.addr = (uintptr_t)(env) + regs[i].offset;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /* Special cases which aren't a single CPUARMState field */
+ cpsr = cpsr_read(env);
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
+ r.addr = (uintptr_t)(&cpsr);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+
+ /* VFP registers */
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
+ for (i = 0; i < 32; i++) {
+ r.addr = (uintptr_t)(&env->vfp.regs[i]);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ r.id++;
+ }
+
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
+ KVM_REG_ARM_VFP_FPSCR;
+ fpscr = vfp_get_fpscr(env);
+ r.addr = (uintptr_t)&fpscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+
+ /* Note that we do not call write_cpustate_to_list()
+ * here, so we are only writing the tuple list back to
+ * KVM. This is safe because nothing can change the
+ * CPUARMState cp15 fields (in particular gdb accesses cannot)
+ * and so there are no changes to sync. In fact syncing would
+ * be wrong at this point: for a constant register where TCG and
+ * KVM disagree about its value, the preceding write_list_to_cpustate()
+ * would not have had any effect on the CPUARMState value (since the
+ * register is read-only), and a write_cpustate_to_list() here would
+ * then try to write the TCG value back into KVM -- this would either
+ * fail or incorrectly change the value the guest sees.
+ *
+ * If we ever want to allow the user to modify cp15 registers via
+ * the gdb stub, we would need to be more clever here (for instance
+ * tracking the set of registers kvm_arch_get_registers() successfully
+ * managed to update the CPUARMState with, and only allowing those
+ * to be written back up into the kernel).
+ */
+ if (!write_list_to_kvmstate(cpu)) {
+ return EINVAL;
+ }
+
+ return ret;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ struct kvm_one_reg r;
+ int mode, bn;
+ int ret, i;
+ uint32_t cpsr, fpscr;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ r.id = regs[i].id;
+ r.addr = (uintptr_t)(env) + regs[i].offset;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /* Special cases which aren't a single CPUARMState field */
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
+ r.addr = (uintptr_t)(&cpsr);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ cpsr_write(env, cpsr, 0xffffffff);
+
+ /* Make sure the current mode regs are properly set */
+ mode = env->uncached_cpsr & CPSR_M;
+ bn = bank_number(mode);
+ if (mode == ARM_CPU_MODE_FIQ) {
+ memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
+ } else {
+ memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
+ }
+ env->regs[13] = env->banked_r13[bn];
+ env->regs[14] = env->banked_r14[bn];
+ env->spsr = env->banked_spsr[bn];
+
+ /* VFP registers */
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
+ for (i = 0; i < 32; i++) {
+ r.addr = (uintptr_t)(&env->vfp.regs[i]);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ r.id++;
+ }
+
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
+ KVM_REG_ARM_VFP_FPSCR;
+ r.addr = (uintptr_t)&fpscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ vfp_set_fpscr(env, fpscr);
+
+ if (!write_kvmstate_to_list(cpu)) {
+ return EINVAL;
+ }
+ /* Note that it's OK to have registers which aren't in CPUState,
+ * so we can ignore a failure return here.
+ */
+ write_list_to_cpustate(cpu);
+
+ return 0;
+}
+
+void kvm_arch_reset_vcpu(CPUState *cs)
+{
+ /* Feed the kernel back its initial register state */
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ memmove(cpu->cpreg_values, cpu->cpreg_reset_values,
+ cpu->cpreg_array_len * sizeof(cpu->cpreg_values[0]));
+
+ if (!write_list_to_kvmstate(cpu)) {
+ abort();
+ }
+}
diff --git a/target-arm/kvm64.c b/target-arm/kvm64.c
new file mode 100644
index 0000000000..1b7ca90374
--- /dev/null
+++ b/target-arm/kvm64.c
@@ -0,0 +1,204 @@
+/*
+ * ARM implementation of KVM hooks, 64 bit specific code
+ *
+ * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+#include "cpu.h"
+#include "hw/arm/arm.h"
+
+static inline void set_feature(uint64_t *features, int feature)
+{
+ *features |= 1ULL << feature;
+}
+
+bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
+{
+ /* Identify the feature bits corresponding to the host CPU, and
+ * fill out the ARMHostCPUClass fields accordingly. To do this
+ * we have to create a scratch VM, create a single CPU inside it,
+ * and then query that CPU for the relevant ID registers.
+ * For AArch64 we currently don't care about ID registers at
+ * all; we just want to know the CPU type.
+ */
+ int fdarray[3];
+ uint64_t features = 0;
+ /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
+ * we know these will only support creating one kind of guest CPU,
+ * which is its preferred CPU type. Fortunately these old kernels
+ * support only a very limited number of CPUs.
+ */
+ static const uint32_t cpus_to_try[] = {
+ KVM_ARM_TARGET_AEM_V8,
+ KVM_ARM_TARGET_FOUNDATION_V8,
+ KVM_ARM_TARGET_CORTEX_A57,
+ QEMU_KVM_ARM_TARGET_NONE
+ };
+ struct kvm_vcpu_init init;
+
+ if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
+ return false;
+ }
+
+ ahcc->target = init.target;
+ ahcc->dtb_compatible = "arm,arm-v8";
+
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+
+ /* We can assume any KVM supporting CPU is at least a v8
+ * with VFPv4+Neon; this in turn implies most of the other
+ * feature bits.
+ */
+ set_feature(&features, ARM_FEATURE_V8);
+ set_feature(&features, ARM_FEATURE_VFP4);
+ set_feature(&features, ARM_FEATURE_NEON);
+ set_feature(&features, ARM_FEATURE_AARCH64);
+
+ ahcc->features = features;
+
+ return true;
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ struct kvm_vcpu_init init;
+ int ret;
+
+ if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
+ !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ fprintf(stderr, "KVM is not supported for this guest CPU type\n");
+ return -EINVAL;
+ }
+
+ init.target = cpu->kvm_target;
+ memset(init.features, 0, sizeof(init.features));
+ if (cpu->start_powered_off) {
+ init.features[0] = 1 << KVM_ARM_VCPU_POWER_OFF;
+ }
+ ret = kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
+
+ /* TODO : support for save/restore/reset of system regs via tuple list */
+
+ return ret;
+}
+
+#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ struct kvm_one_reg reg;
+ uint64_t val;
+ int i;
+ int ret;
+
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ for (i = 0; i < 31; i++) {
+ reg.id = AARCH64_CORE_REG(regs.regs[i]);
+ reg.addr = (uintptr_t) &env->xregs[i];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ reg.id = AARCH64_CORE_REG(regs.sp);
+ reg.addr = (uintptr_t) &env->xregs[31];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
+ val = pstate_read(env);
+ reg.id = AARCH64_CORE_REG(regs.pstate);
+ reg.addr = (uintptr_t) &val;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(regs.pc);
+ reg.addr = (uintptr_t) &env->pc;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ /* TODO:
+ * SP_EL1
+ * ELR_EL1
+ * SPSR[]
+ * FP state
+ * system registers
+ */
+ return ret;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ struct kvm_one_reg reg;
+ uint64_t val;
+ int i;
+ int ret;
+
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ for (i = 0; i < 31; i++) {
+ reg.id = AARCH64_CORE_REG(regs.regs[i]);
+ reg.addr = (uintptr_t) &env->xregs[i];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ reg.id = AARCH64_CORE_REG(regs.sp);
+ reg.addr = (uintptr_t) &env->xregs[31];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(regs.pstate);
+ reg.addr = (uintptr_t) &val;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ pstate_write(env, val);
+
+ reg.id = AARCH64_CORE_REG(regs.pc);
+ reg.addr = (uintptr_t) &env->pc;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ /* TODO: other registers */
+ return ret;
+}
+
+void kvm_arch_reset_vcpu(CPUState *cs)
+{
+}
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index f120088607..0a76130bb2 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -28,13 +28,15 @@
#include "translate.h"
#include "qemu/host-utils.h"
+#include "exec/gen-icount.h"
+
#include "helper.h"
#define GEN_HELPER 1
#include "helper.h"
static TCGv_i64 cpu_X[32];
static TCGv_i64 cpu_pc;
-static TCGv_i32 pstate;
+static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
static const char *regnames[] = {
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
@@ -43,6 +45,13 @@ static const char *regnames[] = {
"x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
};
+enum a64_shift_type {
+ A64_SHIFT_TYPE_LSL = 0,
+ A64_SHIFT_TYPE_LSR = 1,
+ A64_SHIFT_TYPE_ASR = 2,
+ A64_SHIFT_TYPE_ROR = 3
+};
+
/* initialize TCG globals. */
void a64_translate_init(void)
{
@@ -57,9 +66,10 @@ void a64_translate_init(void)
regnames[i]);
}
- pstate = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUARMState, pstate),
- "pstate");
+ cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
+ cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
+ cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
+ cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
}
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
@@ -67,6 +77,7 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
+ uint32_t psr = pstate_read(env);
int i;
cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
@@ -79,11 +90,12 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
cpu_fprintf(f, " ");
}
}
- cpu_fprintf(f, "PSTATE=%c%c%c%c\n",
- env->pstate & PSTATE_N ? 'n' : '.',
- env->pstate & PSTATE_Z ? 'z' : '.',
- env->pstate & PSTATE_C ? 'c' : '.',
- env->pstate & PSTATE_V ? 'v' : '.');
+ cpu_fprintf(f, "PSTATE=%08x (flags %c%c%c%c)\n",
+ psr,
+ psr & PSTATE_N ? 'N' : '-',
+ psr & PSTATE_Z ? 'Z' : '-',
+ psr & PSTATE_C ? 'C' : '-',
+ psr & PSTATE_V ? 'V' : '-');
cpu_fprintf(f, "\n");
}
@@ -104,21 +116,1474 @@ static void gen_exception_insn(DisasContext *s, int offset, int excp)
{
gen_a64_set_pc_im(s->pc - offset);
gen_exception(excp);
- s->is_jmp = DISAS_JUMP;
+ s->is_jmp = DISAS_EXC;
+}
+
+static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
+{
+ /* No direct tb linking with singlestep or deterministic io */
+ if (s->singlestep_enabled || (s->tb->cflags & CF_LAST_IO)) {
+ return false;
+ }
+
+ /* Only link tbs from inside the same guest page */
+ if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
+ return false;
+ }
+
+ return true;
+}
+
+static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
+{
+ TranslationBlock *tb;
+
+ tb = s->tb;
+ if (use_goto_tb(s, n, dest)) {
+ tcg_gen_goto_tb(n);
+ gen_a64_set_pc_im(dest);
+ tcg_gen_exit_tb((tcg_target_long)tb + n);
+ s->is_jmp = DISAS_TB_JUMP;
+ } else {
+ gen_a64_set_pc_im(dest);
+ if (s->singlestep_enabled) {
+ gen_exception(EXCP_DEBUG);
+ }
+ tcg_gen_exit_tb(0);
+ s->is_jmp = DISAS_JUMP;
+ }
}
-static void real_unallocated_encoding(DisasContext *s)
+static void unallocated_encoding(DisasContext *s)
{
- fprintf(stderr, "Unknown instruction: %#x\n", s->insn);
gen_exception_insn(s, 4, EXCP_UDEF);
}
-#define unallocated_encoding(s) do { \
- fprintf(stderr, "unallocated encoding at line: %d\n", __LINE__); \
- real_unallocated_encoding(s); \
- } while (0)
+#define unsupported_encoding(s, insn) \
+ do { \
+ qemu_log_mask(LOG_UNIMP, \
+ "%s:%d: unsupported instruction encoding 0x%08x " \
+ "at pc=%016" PRIx64 "\n", \
+ __FILE__, __LINE__, insn, s->pc - 4); \
+ unallocated_encoding(s); \
+ } while (0);
+
+static void init_tmp_a64_array(DisasContext *s)
+{
+#ifdef CONFIG_DEBUG_TCG
+ int i;
+ for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
+ TCGV_UNUSED_I64(s->tmp_a64[i]);
+ }
+#endif
+ s->tmp_a64_count = 0;
+}
+
+static void free_tmp_a64(DisasContext *s)
+{
+ int i;
+ for (i = 0; i < s->tmp_a64_count; i++) {
+ tcg_temp_free_i64(s->tmp_a64[i]);
+ }
+ init_tmp_a64_array(s);
+}
+
+static TCGv_i64 new_tmp_a64(DisasContext *s)
+{
+ assert(s->tmp_a64_count < TMP_A64_MAX);
+ return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
+}
+
+static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
+{
+ TCGv_i64 t = new_tmp_a64(s);
+ tcg_gen_movi_i64(t, 0);
+ return t;
+}
+
+/*
+ * Register access functions
+ *
+ * These functions are used for directly accessing a register in where
+ * changes to the final register value are likely to be made. If you
+ * need to use a register for temporary calculation (e.g. index type
+ * operations) use the read_* form.
+ *
+ * B1.2.1 Register mappings
+ *
+ * In instruction register encoding 31 can refer to ZR (zero register) or
+ * the SP (stack pointer) depending on context. In QEMU's case we map SP
+ * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
+ * This is the point of the _sp forms.
+ */
+static TCGv_i64 cpu_reg(DisasContext *s, int reg)
+{
+ if (reg == 31) {
+ return new_tmp_a64_zero(s);
+ } else {
+ return cpu_X[reg];
+ }
+}
+
+/* register access for when 31 == SP */
+static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
+{
+ return cpu_X[reg];
+}
+
+/* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
+ * representing the register contents. This TCGv is an auto-freed
+ * temporary so it need not be explicitly freed, and may be modified.
+ */
+static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
+{
+ TCGv_i64 v = new_tmp_a64(s);
+ if (reg != 31) {
+ if (sf) {
+ tcg_gen_mov_i64(v, cpu_X[reg]);
+ } else {
+ tcg_gen_ext32u_i64(v, cpu_X[reg]);
+ }
+ } else {
+ tcg_gen_movi_i64(v, 0);
+ }
+ return v;
+}
+
+/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
+ * than the 32 bit equivalent.
+ */
+static inline void gen_set_NZ64(TCGv_i64 result)
+{
+ TCGv_i64 flag = tcg_temp_new_i64();
+
+ tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
+ tcg_gen_trunc_i64_i32(cpu_ZF, flag);
+ tcg_gen_shri_i64(flag, result, 32);
+ tcg_gen_trunc_i64_i32(cpu_NF, flag);
+ tcg_temp_free_i64(flag);
+}
+
+/* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
+static inline void gen_logic_CC(int sf, TCGv_i64 result)
+{
+ if (sf) {
+ gen_set_NZ64(result);
+ } else {
+ tcg_gen_trunc_i64_i32(cpu_ZF, result);
+ tcg_gen_trunc_i64_i32(cpu_NF, result);
+ }
+ tcg_gen_movi_i32(cpu_CF, 0);
+ tcg_gen_movi_i32(cpu_VF, 0);
+}
+
+/*
+ * the instruction disassembly implemented here matches
+ * the instruction encoding classifications in chapter 3 (C3)
+ * of the ARM Architecture Reference Manual (DDI0487A_a)
+ */
+
+/* C3.2.7 Unconditional branch (immediate)
+ * 31 30 26 25 0
+ * +----+-----------+-------------------------------------+
+ * | op | 0 0 1 0 1 | imm26 |
+ * +----+-----------+-------------------------------------+
+ */
+static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
+{
+ uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
+
+ if (insn & (1 << 31)) {
+ /* C5.6.26 BL Branch with link */
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
+ }
+
+ /* C5.6.20 B Branch / C5.6.26 BL Branch with link */
+ gen_goto_tb(s, 0, addr);
+}
+
+/* C3.2.1 Compare & branch (immediate)
+ * 31 30 25 24 23 5 4 0
+ * +----+-------------+----+---------------------+--------+
+ * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
+ * +----+-------------+----+---------------------+--------+
+ */
+static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, op, rt;
+ uint64_t addr;
+ int label_match;
+ TCGv_i64 tcg_cmp;
+
+ sf = extract32(insn, 31, 1);
+ op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
+ rt = extract32(insn, 0, 5);
+ addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
+
+ tcg_cmp = read_cpu_reg(s, rt, sf);
+ label_match = gen_new_label();
+
+ tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
+ tcg_cmp, 0, label_match);
+
+ gen_goto_tb(s, 0, s->pc);
+ gen_set_label(label_match);
+ gen_goto_tb(s, 1, addr);
+}
+
+/* C3.2.5 Test & branch (immediate)
+ * 31 30 25 24 23 19 18 5 4 0
+ * +----+-------------+----+-------+-------------+------+
+ * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
+ * +----+-------------+----+-------+-------------+------+
+ */
+static void disas_test_b_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int bit_pos, op, rt;
+ uint64_t addr;
+ int label_match;
+ TCGv_i64 tcg_cmp;
+
+ bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
+ op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
+ addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
+ rt = extract32(insn, 0, 5);
+
+ tcg_cmp = tcg_temp_new_i64();
+ tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
+ label_match = gen_new_label();
+ tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
+ tcg_cmp, 0, label_match);
+ tcg_temp_free_i64(tcg_cmp);
+ gen_goto_tb(s, 0, s->pc);
+ gen_set_label(label_match);
+ gen_goto_tb(s, 1, addr);
+}
+
+/* C3.2.2 / C5.6.19 Conditional branch (immediate)
+ * 31 25 24 23 5 4 3 0
+ * +---------------+----+---------------------+----+------+
+ * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
+ * +---------------+----+---------------------+----+------+
+ */
+static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int cond;
+ uint64_t addr;
+
+ if ((insn & (1 << 4)) || (insn & (1 << 24))) {
+ unallocated_encoding(s);
+ return;
+ }
+ addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
+ cond = extract32(insn, 0, 4);
+
+ if (cond < 0x0e) {
+ /* genuinely conditional branches */
+ int label_match = gen_new_label();
+ arm_gen_test_cc(cond, label_match);
+ gen_goto_tb(s, 0, s->pc);
+ gen_set_label(label_match);
+ gen_goto_tb(s, 1, addr);
+ } else {
+ /* 0xe and 0xf are both "always" conditions */
+ gen_goto_tb(s, 0, addr);
+ }
+}
+
+/* C5.6.68 HINT */
+static void handle_hint(DisasContext *s, uint32_t insn,
+ unsigned int op1, unsigned int op2, unsigned int crm)
+{
+ unsigned int selector = crm << 3 | op2;
+
+ if (op1 != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (selector) {
+ case 0: /* NOP */
+ return;
+ case 1: /* YIELD */
+ case 2: /* WFE */
+ case 3: /* WFI */
+ case 4: /* SEV */
+ case 5: /* SEVL */
+ /* we treat all as NOP at least for now */
+ return;
+ default:
+ /* default specified as NOP equivalent */
+ return;
+ }
+}
+
+/* CLREX, DSB, DMB, ISB */
+static void handle_sync(DisasContext *s, uint32_t insn,
+ unsigned int op1, unsigned int op2, unsigned int crm)
+{
+ if (op1 != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (op2) {
+ case 2: /* CLREX */
+ unsupported_encoding(s, insn);
+ return;
+ case 4: /* DSB */
+ case 5: /* DMB */
+ case 6: /* ISB */
+ /* We don't emulate caches so barriers are no-ops */
+ return;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+}
+
+/* C5.6.130 MSR (immediate) - move immediate to processor state field */
+static void handle_msr_i(DisasContext *s, uint32_t insn,
+ unsigned int op1, unsigned int op2, unsigned int crm)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* C5.6.204 SYS */
+static void handle_sys(DisasContext *s, uint32_t insn, unsigned int l,
+ unsigned int op1, unsigned int op2,
+ unsigned int crn, unsigned int crm, unsigned int rt)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* C5.6.129 MRS - move from system register */
+static void handle_mrs(DisasContext *s, uint32_t insn, unsigned int op0,
+ unsigned int op1, unsigned int op2,
+ unsigned int crn, unsigned int crm, unsigned int rt)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* C5.6.131 MSR (register) - move to system register */
+static void handle_msr(DisasContext *s, uint32_t insn, unsigned int op0,
+ unsigned int op1, unsigned int op2,
+ unsigned int crn, unsigned int crm, unsigned int rt)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* C3.2.4 System
+ * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
+ * +---------------------+---+-----+-----+-------+-------+-----+------+
+ * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
+ * +---------------------+---+-----+-----+-------+-------+-----+------+
+ */
+static void disas_system(DisasContext *s, uint32_t insn)
+{
+ unsigned int l, op0, op1, crn, crm, op2, rt;
+ l = extract32(insn, 21, 1);
+ op0 = extract32(insn, 19, 2);
+ op1 = extract32(insn, 16, 3);
+ crn = extract32(insn, 12, 4);
+ crm = extract32(insn, 8, 4);
+ op2 = extract32(insn, 5, 3);
+ rt = extract32(insn, 0, 5);
+
+ if (op0 == 0) {
+ if (l || rt != 31) {
+ unallocated_encoding(s);
+ return;
+ }
+ switch (crn) {
+ case 2: /* C5.6.68 HINT */
+ handle_hint(s, insn, op1, op2, crm);
+ break;
+ case 3: /* CLREX, DSB, DMB, ISB */
+ handle_sync(s, insn, op1, op2, crm);
+ break;
+ case 4: /* C5.6.130 MSR (immediate) */
+ handle_msr_i(s, insn, op1, op2, crm);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+ return;
+ }
+
+ if (op0 == 1) {
+ /* C5.6.204 SYS */
+ handle_sys(s, insn, l, op1, op2, crn, crm, rt);
+ } else if (l) { /* op0 > 1 */
+ /* C5.6.129 MRS - move from system register */
+ handle_mrs(s, insn, op0, op1, op2, crn, crm, rt);
+ } else {
+ /* C5.6.131 MSR (register) - move to system register */
+ handle_msr(s, insn, op0, op1, op2, crn, crm, rt);
+ }
+}
+
+/* Exception generation */
+static void disas_exc(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* C3.2.7 Unconditional branch (register)
+ * 31 25 24 21 20 16 15 10 9 5 4 0
+ * +---------------+-------+-------+-------+------+-------+
+ * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
+ * +---------------+-------+-------+-------+------+-------+
+ */
+static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
+{
+ unsigned int opc, op2, op3, rn, op4;
+
+ opc = extract32(insn, 21, 4);
+ op2 = extract32(insn, 16, 5);
+ op3 = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ op4 = extract32(insn, 0, 5);
+
+ if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opc) {
+ case 0: /* BR */
+ case 2: /* RET */
+ break;
+ case 1: /* BLR */
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
+ break;
+ case 4: /* ERET */
+ case 5: /* DRPS */
+ if (rn != 0x1f) {
+ unallocated_encoding(s);
+ } else {
+ unsupported_encoding(s, insn);
+ }
+ return;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_gen_mov_i64(cpu_pc, cpu_reg(s, rn));
+ s->is_jmp = DISAS_JUMP;
+}
+
+/* C3.2 Branches, exception generating and system instructions */
+static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 25, 7)) {
+ case 0x0a: case 0x0b:
+ case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
+ disas_uncond_b_imm(s, insn);
+ break;
+ case 0x1a: case 0x5a: /* Compare & branch (immediate) */
+ disas_comp_b_imm(s, insn);
+ break;
+ case 0x1b: case 0x5b: /* Test & branch (immediate) */
+ disas_test_b_imm(s, insn);
+ break;
+ case 0x2a: /* Conditional branch (immediate) */
+ disas_cond_b_imm(s, insn);
+ break;
+ case 0x6a: /* Exception generation / System */
+ if (insn & (1 << 24)) {
+ disas_system(s, insn);
+ } else {
+ disas_exc(s, insn);
+ }
+ break;
+ case 0x6b: /* Unconditional branch (register) */
+ disas_uncond_b_reg(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* Load/store exclusive */
+static void disas_ldst_excl(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* Load register (literal) */
+static void disas_ld_lit(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* Load/store pair (all forms) */
+static void disas_ldst_pair(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* Load/store register (all forms) */
+static void disas_ldst_reg(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* AdvSIMD load/store multiple structures */
+static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* AdvSIMD load/store single structure */
+static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* C3.3 Loads and stores */
+static void disas_ldst(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 24, 6)) {
+ case 0x08: /* Load/store exclusive */
+ disas_ldst_excl(s, insn);
+ break;
+ case 0x18: case 0x1c: /* Load register (literal) */
+ disas_ld_lit(s, insn);
+ break;
+ case 0x28: case 0x29:
+ case 0x2c: case 0x2d: /* Load/store pair (all forms) */
+ disas_ldst_pair(s, insn);
+ break;
+ case 0x38: case 0x39:
+ case 0x3c: case 0x3d: /* Load/store register (all forms) */
+ disas_ldst_reg(s, insn);
+ break;
+ case 0x0c: /* AdvSIMD load/store multiple structures */
+ disas_ldst_multiple_struct(s, insn);
+ break;
+ case 0x0d: /* AdvSIMD load/store single structure */
+ disas_ldst_single_struct(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* C3.4.6 PC-rel. addressing
+ * 31 30 29 28 24 23 5 4 0
+ * +----+-------+-----------+-------------------+------+
+ * | op | immlo | 1 0 0 0 0 | immhi | Rd |
+ * +----+-------+-----------+-------------------+------+
+ */
+static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
+{
+ unsigned int page, rd;
+ uint64_t base;
+ int64_t offset;
+
+ page = extract32(insn, 31, 1);
+ /* SignExtend(immhi:immlo) -> offset */
+ offset = ((int64_t)sextract32(insn, 5, 19) << 2) | extract32(insn, 29, 2);
+ rd = extract32(insn, 0, 5);
+ base = s->pc - 4;
+
+ if (page) {
+ /* ADRP (page based) */
+ base &= ~0xfff;
+ offset <<= 12;
+ }
+
+ tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
+}
+
+/* Add/subtract (immediate) */
+static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* The input should be a value in the bottom e bits (with higher
+ * bits zero); returns that value replicated into every element
+ * of size e in a 64 bit integer.
+ */
+static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
+{
+ assert(e != 0);
+ while (e < 64) {
+ mask |= mask << e;
+ e *= 2;
+ }
+ return mask;
+}
+
+/* Return a value with the bottom len bits set (where 0 < len <= 64) */
+static inline uint64_t bitmask64(unsigned int length)
+{
+ assert(length > 0 && length <= 64);
+ return ~0ULL >> (64 - length);
+}
+
+/* Simplified variant of pseudocode DecodeBitMasks() for the case where we
+ * only require the wmask. Returns false if the imms/immr/immn are a reserved
+ * value (ie should cause a guest UNDEF exception), and true if they are
+ * valid, in which case the decoded bit pattern is written to result.
+ */
+static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
+ unsigned int imms, unsigned int immr)
+{
+ uint64_t mask;
+ unsigned e, levels, s, r;
+ int len;
+
+ assert(immn < 2 && imms < 64 && immr < 64);
+
+ /* The bit patterns we create here are 64 bit patterns which
+ * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
+ * 64 bits each. Each element contains the same value: a run
+ * of between 1 and e-1 non-zero bits, rotated within the
+ * element by between 0 and e-1 bits.
+ *
+ * The element size and run length are encoded into immn (1 bit)
+ * and imms (6 bits) as follows:
+ * 64 bit elements: immn = 1, imms = <length of run - 1>
+ * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
+ * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
+ * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
+ * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
+ * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
+ * Notice that immn = 0, imms = 11111x is the only combination
+ * not covered by one of the above options; this is reserved.
+ * Further, <length of run - 1> all-ones is a reserved pattern.
+ *
+ * In all cases the rotation is by immr % e (and immr is 6 bits).
+ */
+
+ /* First determine the element size */
+ len = 31 - clz32((immn << 6) | (~imms & 0x3f));
+ if (len < 1) {
+ /* This is the immn == 0, imms == 0x11111x case */
+ return false;
+ }
+ e = 1 << len;
+
+ levels = e - 1;
+ s = imms & levels;
+ r = immr & levels;
+
+ if (s == levels) {
+ /* <length of run - 1> mustn't be all-ones. */
+ return false;
+ }
+
+ /* Create the value of one element: s+1 set bits rotated
+ * by r within the element (which is e bits wide)...
+ */
+ mask = bitmask64(s + 1);
+ mask = (mask >> r) | (mask << (e - r));
+ /* ...then replicate the element over the whole 64 bit value */
+ mask = bitfield_replicate(mask, e);
+ *result = mask;
+ return true;
+}
+
+/* C3.4.4 Logical (immediate)
+ * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
+ * +----+-----+-------------+---+------+------+------+------+
+ * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
+ * +----+-----+-------------+---+------+------+------+------+
+ */
+static void disas_logic_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, opc, is_n, immr, imms, rn, rd;
+ TCGv_i64 tcg_rd, tcg_rn;
+ uint64_t wmask;
+ bool is_and = false;
+
+ sf = extract32(insn, 31, 1);
+ opc = extract32(insn, 29, 2);
+ is_n = extract32(insn, 22, 1);
+ immr = extract32(insn, 16, 6);
+ imms = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (!sf && is_n) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (opc == 0x3) { /* ANDS */
+ tcg_rd = cpu_reg(s, rd);
+ } else {
+ tcg_rd = cpu_reg_sp(s, rd);
+ }
+ tcg_rn = cpu_reg(s, rn);
+
+ if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
+ /* some immediate field values are reserved */
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!sf) {
+ wmask &= 0xffffffff;
+ }
+
+ switch (opc) {
+ case 0x3: /* ANDS */
+ case 0x0: /* AND */
+ tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
+ is_and = true;
+ break;
+ case 0x1: /* ORR */
+ tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
+ break;
+ case 0x2: /* EOR */
+ tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
+ break;
+ default:
+ assert(FALSE); /* must handle all above */
+ break;
+ }
+
+ if (!sf && !is_and) {
+ /* zero extend final result; we know we can skip this for AND
+ * since the immediate had the high 32 bits clear.
+ */
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
-void disas_a64_insn(CPUARMState *env, DisasContext *s)
+ if (opc == 3) { /* ANDS */
+ gen_logic_CC(sf, tcg_rd);
+ }
+}
+
+/* Move wide (immediate) */
+static void disas_movw_imm(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* C3.4.2 Bitfield
+ * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
+ * +----+-----+-------------+---+------+------+------+------+
+ * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
+ * +----+-----+-------------+---+------+------+------+------+
+ */
+static void disas_bitfield(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
+ TCGv_i64 tcg_rd, tcg_tmp;
+
+ sf = extract32(insn, 31, 1);
+ opc = extract32(insn, 29, 2);
+ n = extract32(insn, 22, 1);
+ ri = extract32(insn, 16, 6);
+ si = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+ bitsize = sf ? 64 : 32;
+
+ if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rd = cpu_reg(s, rd);
+ tcg_tmp = read_cpu_reg(s, rn, sf);
+
+ /* OPTME: probably worth recognizing common cases of ext{8,16,32}{u,s} */
+
+ if (opc != 1) { /* SBFM or UBFM */
+ tcg_gen_movi_i64(tcg_rd, 0);
+ }
+
+ /* do the bit move operation */
+ if (si >= ri) {
+ /* Wd<s-r:0> = Wn<s:r> */
+ tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
+ pos = 0;
+ len = (si - ri) + 1;
+ } else {
+ /* Wd<32+s-r,32-r> = Wn<s:0> */
+ pos = bitsize - ri;
+ len = si + 1;
+ }
+
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
+
+ if (opc == 0) { /* SBFM - sign extend the destination field */
+ tcg_gen_shli_i64(tcg_rd, tcg_rd, 64 - (pos + len));
+ tcg_gen_sari_i64(tcg_rd, tcg_rd, 64 - (pos + len));
+ }
+
+ if (!sf) { /* zero extend final result */
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+}
+
+/* C3.4.3 Extract
+ * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
+ * +----+------+-------------+---+----+------+--------+------+------+
+ * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
+ * +----+------+-------------+---+----+------+--------+------+------+
+ */
+static void disas_extract(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
+
+ sf = extract32(insn, 31, 1);
+ n = extract32(insn, 22, 1);
+ rm = extract32(insn, 16, 5);
+ imm = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+ op21 = extract32(insn, 29, 2);
+ op0 = extract32(insn, 21, 1);
+ bitsize = sf ? 64 : 32;
+
+ if (sf != n || op21 || op0 || imm >= bitsize) {
+ unallocated_encoding(s);
+ } else {
+ TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
+
+ tcg_rd = cpu_reg(s, rd);
+
+ if (imm) {
+ /* OPTME: we can special case rm==rn as a rotate */
+ tcg_rm = read_cpu_reg(s, rm, sf);
+ tcg_rn = read_cpu_reg(s, rn, sf);
+ tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
+ tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
+ tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ } else {
+ /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
+ * so an extract from bit 0 is a special case.
+ */
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
+ }
+ }
+
+ }
+}
+
+/* C3.4 Data processing - immediate */
+static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 23, 6)) {
+ case 0x20: case 0x21: /* PC-rel. addressing */
+ disas_pc_rel_adr(s, insn);
+ break;
+ case 0x22: case 0x23: /* Add/subtract (immediate) */
+ disas_add_sub_imm(s, insn);
+ break;
+ case 0x24: /* Logical (immediate) */
+ disas_logic_imm(s, insn);
+ break;
+ case 0x25: /* Move wide (immediate) */
+ disas_movw_imm(s, insn);
+ break;
+ case 0x26: /* Bitfield */
+ disas_bitfield(s, insn);
+ break;
+ case 0x27: /* Extract */
+ disas_extract(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* Shift a TCGv src by TCGv shift_amount, put result in dst.
+ * Note that it is the caller's responsibility to ensure that the
+ * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
+ * mandated semantics for out of range shifts.
+ */
+static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
+ enum a64_shift_type shift_type, TCGv_i64 shift_amount)
+{
+ switch (shift_type) {
+ case A64_SHIFT_TYPE_LSL:
+ tcg_gen_shl_i64(dst, src, shift_amount);
+ break;
+ case A64_SHIFT_TYPE_LSR:
+ tcg_gen_shr_i64(dst, src, shift_amount);
+ break;
+ case A64_SHIFT_TYPE_ASR:
+ if (!sf) {
+ tcg_gen_ext32s_i64(dst, src);
+ }
+ tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
+ break;
+ case A64_SHIFT_TYPE_ROR:
+ if (sf) {
+ tcg_gen_rotr_i64(dst, src, shift_amount);
+ } else {
+ TCGv_i32 t0, t1;
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(t0, src);
+ tcg_gen_trunc_i64_i32(t1, shift_amount);
+ tcg_gen_rotr_i32(t0, t0, t1);
+ tcg_gen_extu_i32_i64(dst, t0);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ }
+ break;
+ default:
+ assert(FALSE); /* all shift types should be handled */
+ break;
+ }
+
+ if (!sf) { /* zero extend final result */
+ tcg_gen_ext32u_i64(dst, dst);
+ }
+}
+
+/* Shift a TCGv src by immediate, put result in dst.
+ * The shift amount must be in range (this should always be true as the
+ * relevant instructions will UNDEF on bad shift immediates).
+ */
+static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
+ enum a64_shift_type shift_type, unsigned int shift_i)
+{
+ assert(shift_i < (sf ? 64 : 32));
+
+ if (shift_i == 0) {
+ tcg_gen_mov_i64(dst, src);
+ } else {
+ TCGv_i64 shift_const;
+
+ shift_const = tcg_const_i64(shift_i);
+ shift_reg(dst, src, sf, shift_type, shift_const);
+ tcg_temp_free_i64(shift_const);
+ }
+}
+
+/* C3.5.10 Logical (shifted register)
+ * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
+ * +----+-----+-----------+-------+---+------+--------+------+------+
+ * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
+ * +----+-----+-----------+-------+---+------+--------+------+------+
+ */
+static void disas_logic_reg(DisasContext *s, uint32_t insn)
+{
+ TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
+ unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
+
+ sf = extract32(insn, 31, 1);
+ opc = extract32(insn, 29, 2);
+ shift_type = extract32(insn, 22, 2);
+ invert = extract32(insn, 21, 1);
+ rm = extract32(insn, 16, 5);
+ shift_amount = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (!sf && (shift_amount & (1 << 5))) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rd = cpu_reg(s, rd);
+
+ if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
+ /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
+ * register-register MOV and MVN, so it is worth special casing.
+ */
+ tcg_rm = cpu_reg(s, rm);
+ if (invert) {
+ tcg_gen_not_i64(tcg_rd, tcg_rm);
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ } else {
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_rm);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
+ }
+ }
+ return;
+ }
+
+ tcg_rm = read_cpu_reg(s, rm, sf);
+
+ if (shift_amount) {
+ shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
+ }
+
+ tcg_rn = cpu_reg(s, rn);
+
+ switch (opc | (invert << 2)) {
+ case 0: /* AND */
+ case 3: /* ANDS */
+ tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 1: /* ORR */
+ tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 2: /* EOR */
+ tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 4: /* BIC */
+ case 7: /* BICS */
+ tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 5: /* ORN */
+ tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 6: /* EON */
+ tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ default:
+ assert(FALSE);
+ break;
+ }
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+
+ if (opc == 3) {
+ gen_logic_CC(sf, tcg_rd);
+ }
+}
+
+/* Add/subtract (extended register) */
+static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* Add/subtract (shifted register) */
+static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* Data-processing (3 source) */
+static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* Add/subtract (with carry) */
+static void disas_adc_sbc(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* Conditional compare (immediate) */
+static void disas_cc_imm(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* Conditional compare (register) */
+static void disas_cc_reg(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* C3.5.6 Conditional select
+ * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
+ * +----+----+---+-----------------+------+------+-----+------+------+
+ * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
+ * +----+----+---+-----------------+------+------+-----+------+------+
+ */
+static void disas_cond_select(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
+ TCGv_i64 tcg_rd, tcg_src;
+
+ if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
+ /* S == 1 or op2<1> == 1 */
+ unallocated_encoding(s);
+ return;
+ }
+ sf = extract32(insn, 31, 1);
+ else_inv = extract32(insn, 30, 1);
+ rm = extract32(insn, 16, 5);
+ cond = extract32(insn, 12, 4);
+ else_inc = extract32(insn, 10, 1);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (rd == 31) {
+ /* silly no-op write; until we use movcond we must special-case
+ * this to avoid a dead temporary across basic blocks.
+ */
+ return;
+ }
+
+ tcg_rd = cpu_reg(s, rd);
+
+ if (cond >= 0x0e) { /* condition "always" */
+ tcg_src = read_cpu_reg(s, rn, sf);
+ tcg_gen_mov_i64(tcg_rd, tcg_src);
+ } else {
+ /* OPTME: we could use movcond here, at the cost of duplicating
+ * a lot of the arm_gen_test_cc() logic.
+ */
+ int label_match = gen_new_label();
+ int label_continue = gen_new_label();
+
+ arm_gen_test_cc(cond, label_match);
+ /* nomatch: */
+ tcg_src = cpu_reg(s, rm);
+
+ if (else_inv && else_inc) {
+ tcg_gen_neg_i64(tcg_rd, tcg_src);
+ } else if (else_inv) {
+ tcg_gen_not_i64(tcg_rd, tcg_src);
+ } else if (else_inc) {
+ tcg_gen_addi_i64(tcg_rd, tcg_src, 1);
+ } else {
+ tcg_gen_mov_i64(tcg_rd, tcg_src);
+ }
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ tcg_gen_br(label_continue);
+ /* match: */
+ gen_set_label(label_match);
+ tcg_src = read_cpu_reg(s, rn, sf);
+ tcg_gen_mov_i64(tcg_rd, tcg_src);
+ /* continue: */
+ gen_set_label(label_continue);
+ }
+}
+
+static void handle_clz(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd, tcg_rn;
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (sf) {
+ gen_helper_clz64(tcg_rd, tcg_rn);
+ } else {
+ TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
+ gen_helper_clz(tcg_tmp32, tcg_tmp32);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ tcg_temp_free_i32(tcg_tmp32);
+ }
+}
+
+static void handle_cls(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd, tcg_rn;
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (sf) {
+ gen_helper_cls64(tcg_rd, tcg_rn);
+ } else {
+ TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
+ gen_helper_cls32(tcg_tmp32, tcg_tmp32);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ tcg_temp_free_i32(tcg_tmp32);
+ }
+}
+
+static void handle_rbit(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd, tcg_rn;
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (sf) {
+ gen_helper_rbit64(tcg_rd, tcg_rn);
+ } else {
+ TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
+ gen_helper_rbit(tcg_tmp32, tcg_tmp32);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ tcg_temp_free_i32(tcg_tmp32);
+ }
+}
+
+/* C5.6.149 REV with sf==1, opcode==3 ("REV64") */
+static void handle_rev64(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ if (!sf) {
+ unallocated_encoding(s);
+ return;
+ }
+ tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
+}
+
+/* C5.6.149 REV with sf==0, opcode==2
+ * C5.6.151 REV32 (sf==1, opcode==2)
+ */
+static void handle_rev32(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+
+ if (sf) {
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+ TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
+
+ /* bswap32_i64 requires zero high word */
+ tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
+ tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
+ tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
+ tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
+
+ tcg_temp_free_i64(tcg_tmp);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
+ tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
+ }
+}
+
+/* C5.6.150 REV16 (opcode==1) */
+static void handle_rev16(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+ TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
+
+ tcg_gen_andi_i64(tcg_tmp, tcg_rn, 0xffff);
+ tcg_gen_bswap16_i64(tcg_rd, tcg_tmp);
+
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16);
+ tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
+ tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 16, 16);
+
+ if (sf) {
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
+ tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
+ tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 32, 16);
+
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 48);
+ tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 48, 16);
+ }
+
+ tcg_temp_free_i64(tcg_tmp);
+}
+
+/* C3.5.7 Data-processing (1 source)
+ * 31 30 29 28 21 20 16 15 10 9 5 4 0
+ * +----+---+---+-----------------+---------+--------+------+------+
+ * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
+ * +----+---+---+-----------------+---------+--------+------+------+
+ */
+static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, opcode, rn, rd;
+
+ if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ sf = extract32(insn, 31, 1);
+ opcode = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ switch (opcode) {
+ case 0: /* RBIT */
+ handle_rbit(s, sf, rn, rd);
+ break;
+ case 1: /* REV16 */
+ handle_rev16(s, sf, rn, rd);
+ break;
+ case 2: /* REV32 */
+ handle_rev32(s, sf, rn, rd);
+ break;
+ case 3: /* REV64 */
+ handle_rev64(s, sf, rn, rd);
+ break;
+ case 4: /* CLZ */
+ handle_clz(s, sf, rn, rd);
+ break;
+ case 5: /* CLS */
+ handle_cls(s, sf, rn, rd);
+ break;
+ }
+}
+
+static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
+ unsigned int rm, unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_n, tcg_m, tcg_rd;
+ tcg_rd = cpu_reg(s, rd);
+
+ if (!sf && is_signed) {
+ tcg_n = new_tmp_a64(s);
+ tcg_m = new_tmp_a64(s);
+ tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
+ tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
+ } else {
+ tcg_n = read_cpu_reg(s, rn, sf);
+ tcg_m = read_cpu_reg(s, rm, sf);
+ }
+
+ if (is_signed) {
+ gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
+ } else {
+ gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
+ }
+
+ if (!sf) { /* zero extend final result */
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+}
+
+/* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */
+static void handle_shift_reg(DisasContext *s,
+ enum a64_shift_type shift_type, unsigned int sf,
+ unsigned int rm, unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_shift = tcg_temp_new_i64();
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
+
+ tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
+ shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
+ tcg_temp_free_i64(tcg_shift);
+}
+
+/* C3.5.8 Data-processing (2 source)
+ * 31 30 29 28 21 20 16 15 10 9 5 4 0
+ * +----+---+---+-----------------+------+--------+------+------+
+ * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
+ * +----+---+---+-----------------+------+--------+------+------+
+ */
+static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, rm, opcode, rn, rd;
+ sf = extract32(insn, 31, 1);
+ rm = extract32(insn, 16, 5);
+ opcode = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (extract32(insn, 29, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 2: /* UDIV */
+ handle_div(s, false, sf, rm, rn, rd);
+ break;
+ case 3: /* SDIV */
+ handle_div(s, true, sf, rm, rn, rd);
+ break;
+ case 8: /* LSLV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
+ break;
+ case 9: /* LSRV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
+ break;
+ case 10: /* ASRV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
+ break;
+ case 11: /* RORV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
+ break;
+ case 16:
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 21:
+ case 22:
+ case 23: /* CRC32 */
+ unsupported_encoding(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* C3.5 Data processing - register */
+static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 24, 5)) {
+ case 0x0a: /* Logical (shifted register) */
+ disas_logic_reg(s, insn);
+ break;
+ case 0x0b: /* Add/subtract */
+ if (insn & (1 << 21)) { /* (extended register) */
+ disas_add_sub_ext_reg(s, insn);
+ } else {
+ disas_add_sub_reg(s, insn);
+ }
+ break;
+ case 0x1b: /* Data-processing (3 source) */
+ disas_data_proc_3src(s, insn);
+ break;
+ case 0x1a:
+ switch (extract32(insn, 21, 3)) {
+ case 0x0: /* Add/subtract (with carry) */
+ disas_adc_sbc(s, insn);
+ break;
+ case 0x2: /* Conditional compare */
+ if (insn & (1 << 11)) { /* (immediate) */
+ disas_cc_imm(s, insn);
+ } else { /* (register) */
+ disas_cc_reg(s, insn);
+ }
+ break;
+ case 0x4: /* Conditional select */
+ disas_cond_select(s, insn);
+ break;
+ case 0x6: /* Data-processing */
+ if (insn & (1 << 30)) { /* (1 source) */
+ disas_data_proc_1src(s, insn);
+ } else { /* (2 source) */
+ disas_data_proc_2src(s, insn);
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* C3.6 Data processing - SIMD and floating point */
+static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
+{
+ unsupported_encoding(s, insn);
+}
+
+/* C3.1 A64 instruction index by encoding */
+static void disas_a64_insn(CPUARMState *env, DisasContext *s)
{
uint32_t insn;
@@ -126,14 +1591,204 @@ void disas_a64_insn(CPUARMState *env, DisasContext *s)
s->insn = insn;
s->pc += 4;
- switch ((insn >> 24) & 0x1f) {
- default:
+ switch (extract32(insn, 25, 4)) {
+ case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
unallocated_encoding(s);
break;
+ case 0x8: case 0x9: /* Data processing - immediate */
+ disas_data_proc_imm(s, insn);
+ break;
+ case 0xa: case 0xb: /* Branch, exception generation and system insns */
+ disas_b_exc_sys(s, insn);
+ break;
+ case 0x4:
+ case 0x6:
+ case 0xc:
+ case 0xe: /* Loads and stores */
+ disas_ldst(s, insn);
+ break;
+ case 0x5:
+ case 0xd: /* Data processing - register */
+ disas_data_proc_reg(s, insn);
+ break;
+ case 0x7:
+ case 0xf: /* Data processing - SIMD and floating point */
+ disas_data_proc_simd_fp(s, insn);
+ break;
+ default:
+ assert(FALSE); /* all 15 cases should be handled above */
+ break;
}
- if (unlikely(s->singlestep_enabled) && (s->is_jmp == DISAS_TB_JUMP)) {
- /* go through the main loop for single step */
- s->is_jmp = DISAS_JUMP;
+ /* if we allocated any temporaries, free them here */
+ free_tmp_a64(s);
+}
+
+void gen_intermediate_code_internal_a64(ARMCPU *cpu,
+ TranslationBlock *tb,
+ bool search_pc)
+{
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ DisasContext dc1, *dc = &dc1;
+ CPUBreakpoint *bp;
+ uint16_t *gen_opc_end;
+ int j, lj;
+ target_ulong pc_start;
+ target_ulong next_page_start;
+ int num_insns;
+ int max_insns;
+
+ pc_start = tb->pc;
+
+ dc->tb = tb;
+
+ gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
+
+ dc->is_jmp = DISAS_NEXT;
+ dc->pc = pc_start;
+ dc->singlestep_enabled = cs->singlestep_enabled;
+ dc->condjmp = 0;
+
+ dc->aarch64 = 1;
+ dc->thumb = 0;
+ dc->bswap_code = 0;
+ dc->condexec_mask = 0;
+ dc->condexec_cond = 0;
+#if !defined(CONFIG_USER_ONLY)
+ dc->user = 0;
+#endif
+ dc->vfp_enabled = 0;
+ dc->vec_len = 0;
+ dc->vec_stride = 0;
+
+ init_tmp_a64_array(dc);
+
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ lj = -1;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+
+ gen_tb_start();
+
+ tcg_clear_temp_count();
+
+ do {
+ if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
+ QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (bp->pc == dc->pc) {
+ gen_exception_insn(dc, 0, EXCP_DEBUG);
+ /* Advance PC so that clearing the breakpoint will
+ invalidate this TB. */
+ dc->pc += 2;
+ goto done_generating;
+ }
+ }
+ }
+
+ if (search_pc) {
+ j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ if (lj < j) {
+ lj++;
+ while (lj < j) {
+ tcg_ctx.gen_opc_instr_start[lj++] = 0;
+ }
+ }
+ tcg_ctx.gen_opc_pc[lj] = dc->pc;
+ tcg_ctx.gen_opc_instr_start[lj] = 1;
+ tcg_ctx.gen_opc_icount[lj] = num_insns;
+ }
+
+ if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
+ tcg_gen_debug_insn_start(dc->pc);
+ }
+
+ disas_a64_insn(env, dc);
+
+ if (tcg_check_temp_count()) {
+ fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
+ dc->pc);
+ }
+
+ /* Translation stops when a conditional branch is encountered.
+ * Otherwise the subsequent code could get translated several times.
+ * Also stop translation when a page boundary is reached. This
+ * ensures prefetch aborts occur at the right place.
+ */
+ num_insns++;
+ } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
+ !cs->singlestep_enabled &&
+ !singlestep &&
+ dc->pc < next_page_start &&
+ num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+
+ if (unlikely(cs->singlestep_enabled) && dc->is_jmp != DISAS_EXC) {
+ /* Note that this means single stepping WFI doesn't halt the CPU.
+ * For conditional branch insns this is harmless unreachable code as
+ * gen_goto_tb() has already handled emitting the debug exception
+ * (and thus a tb-jump is not possible when singlestepping).
+ */
+ assert(dc->is_jmp != DISAS_TB_JUMP);
+ if (dc->is_jmp != DISAS_JUMP) {
+ gen_a64_set_pc_im(dc->pc);
+ }
+ gen_exception(EXCP_DEBUG);
+ } else {
+ switch (dc->is_jmp) {
+ case DISAS_NEXT:
+ gen_goto_tb(dc, 1, dc->pc);
+ break;
+ default:
+ case DISAS_JUMP:
+ case DISAS_UPDATE:
+ /* indicate that the hash table must be used to find the next TB */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_TB_JUMP:
+ case DISAS_EXC:
+ case DISAS_SWI:
+ break;
+ case DISAS_WFI:
+ /* This is a special case because we don't want to just halt the CPU
+ * if trying to debug across a WFI.
+ */
+ gen_helper_wfi(cpu_env);
+ break;
+ }
+ }
+
+done_generating:
+ gen_tb_end(tb, num_insns);
+ *tcg_ctx.gen_opc_ptr = INDEX_op_end;
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(env, pc_start, dc->pc - pc_start,
+ dc->thumb | (dc->bswap_code << 1));
+ qemu_log("\n");
+ }
+#endif
+ if (search_pc) {
+ j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ lj++;
+ while (lj <= j) {
+ tcg_ctx.gen_opc_instr_start[lj++] = 0;
+ }
+ } else {
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
}
}
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 8c479ff9a8..1403ecf216 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -56,11 +56,6 @@ static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
#define IS_USER(s) (s->user)
#endif
-/* These instructions trap after executing, so defer them until after the
- conditional execution state has been updated. */
-#define DISAS_WFI 4
-#define DISAS_SWI 5
-
TCGv_ptr cpu_env;
/* We reuse the same 64-bit temporaries for efficiency. */
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
@@ -676,7 +671,11 @@ static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
}
#undef PAS_OP
-static void gen_test_cc(int cc, int label)
+/*
+ * generate a conditional branch based on ARM condition code cc.
+ * This is common between ARM and Aarch64 targets.
+ */
+void arm_gen_test_cc(int cc, int label)
{
TCGv_i32 tmp;
int inv;
@@ -900,11 +899,7 @@ DO_GEN_ST(32, MO_TEUL)
static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
{
- if (s->aarch64) {
- gen_a64_set_pc_im(val);
- } else {
- tcg_gen_movi_i32(cpu_R[15], val);
- }
+ tcg_gen_movi_i32(cpu_R[15], val);
}
/* Force a TB lookup after an instruction that changes the CPU state. */
@@ -4592,6 +4587,8 @@ static const uint8_t neon_3r_sizes[] = {
#define NEON_2RM_VREV16 2
#define NEON_2RM_VPADDL 4
#define NEON_2RM_VPADDL_U 5
+#define NEON_2RM_AESE 6 /* Includes AESD */
+#define NEON_2RM_AESMC 7 /* Includes AESIMC */
#define NEON_2RM_VCLS 8
#define NEON_2RM_VCLZ 9
#define NEON_2RM_VCNT 10
@@ -4649,6 +4646,8 @@ static const uint8_t neon_2rm_sizes[] = {
[NEON_2RM_VREV16] = 0x1,
[NEON_2RM_VPADDL] = 0x7,
[NEON_2RM_VPADDL_U] = 0x7,
+ [NEON_2RM_AESE] = 0x1,
+ [NEON_2RM_AESMC] = 0x1,
[NEON_2RM_VCLS] = 0x7,
[NEON_2RM_VCLZ] = 0x7,
[NEON_2RM_VCNT] = 0x1,
@@ -6184,6 +6183,28 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
tcg_temp_free_i32(tmp2);
tcg_temp_free_i32(tmp3);
break;
+ case NEON_2RM_AESE: case NEON_2RM_AESMC:
+ if (!arm_feature(env, ARM_FEATURE_V8_AES)
+ || ((rm | rd) & 1)) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rm);
+
+ /* Bit 6 is the lowest opcode bit; it distinguishes between
+ * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
+ */
+ tmp3 = tcg_const_i32(extract32(insn, 6, 1));
+
+ if (op == NEON_2RM_AESE) {
+ gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
+ } else {
+ gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp3);
+ break;
default:
elementwise:
for (pass = 0; pass < (q ? 4 : 2); pass++) {
@@ -7114,7 +7135,7 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
/* if not always execute, we generate a conditional jump to
next instruction */
s->condlabel = gen_new_label();
- gen_test_cc(cond ^ 1, s->condlabel);
+ arm_gen_test_cc(cond ^ 1, s->condlabel);
s->condjmp = 1;
}
if ((insn & 0x0f900000) == 0x03000000) {
@@ -9131,7 +9152,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
op = (insn >> 22) & 0xf;
/* Generate a conditional jump to next instruction. */
s->condlabel = gen_new_label();
- gen_test_cc(op ^ 1, s->condlabel);
+ arm_gen_test_cc(op ^ 1, s->condlabel);
s->condjmp = 1;
/* offset[11:1] = insn[10:0] */
@@ -9488,7 +9509,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
cond = s->condexec_cond;
if (cond != 0x0e) { /* Skip conditional when condition is AL. */
s->condlabel = gen_new_label();
- gen_test_cc(cond ^ 1, s->condlabel);
+ arm_gen_test_cc(cond ^ 1, s->condlabel);
s->condjmp = 1;
}
}
@@ -10161,7 +10182,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
}
/* generate a conditional jump to next instruction */
s->condlabel = gen_new_label();
- gen_test_cc(cond ^ 1, s->condlabel);
+ arm_gen_test_cc(cond ^ 1, s->condlabel);
s->condjmp = 1;
/* jump to the offset */
@@ -10217,6 +10238,15 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
int max_insns;
/* generate intermediate code */
+
+ /* The A64 decoder has its own top level loop, because it doesn't need
+ * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
+ */
+ if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
+ gen_intermediate_code_internal_a64(cpu, tb, search_pc);
+ return;
+ }
+
pc_start = tb->pc;
dc->tb = tb;
@@ -10228,31 +10258,18 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->singlestep_enabled = cs->singlestep_enabled;
dc->condjmp = 0;
- if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
- dc->aarch64 = 1;
- dc->thumb = 0;
- dc->bswap_code = 0;
- dc->condexec_mask = 0;
- dc->condexec_cond = 0;
-#if !defined(CONFIG_USER_ONLY)
- dc->user = 0;
-#endif
- dc->vfp_enabled = 0;
- dc->vec_len = 0;
- dc->vec_stride = 0;
- } else {
- dc->aarch64 = 0;
- dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
- dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
- dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
- dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
+ dc->aarch64 = 0;
+ dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
+ dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
+ dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
+ dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
#if !defined(CONFIG_USER_ONLY)
- dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
+ dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
#endif
- dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
- dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
- dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
- }
+ dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
+ dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
+ dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
+
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();
cpu_F0d = tcg_temp_new_i64();
@@ -10314,7 +10331,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
do {
#ifdef CONFIG_USER_ONLY
/* Intercept jump to the magic kernel page. */
- if (!dc->aarch64 && dc->pc >= 0xffff0000) {
+ if (dc->pc >= 0xffff0000) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
gen_exception(EXCP_KERNEL_TRAP);
@@ -10362,9 +10379,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
tcg_gen_debug_insn_start(dc->pc);
}
- if (dc->aarch64) {
- disas_a64_insn(env, dc);
- } else if (dc->thumb) {
+ if (dc->thumb) {
disas_thumb_insn(env, dc);
if (dc->condexec_mask) {
dc->condexec_cond = (dc->condexec_cond & 0xe)
@@ -10559,8 +10574,9 @@ void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
{
if (is_a64(env)) {
env->pc = tcg_ctx.gen_opc_pc[pc_pos];
+ env->condexec_bits = 0;
} else {
env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
+ env->condexec_bits = gen_opc_condexec_bits[pc_pos];
}
- env->condexec_bits = gen_opc_condexec_bits[pc_pos];
}
diff --git a/target-arm/translate.h b/target-arm/translate.h
index 67c776053b..a6f6b3e699 100644
--- a/target-arm/translate.h
+++ b/target-arm/translate.h
@@ -24,20 +24,39 @@ typedef struct DisasContext {
int vec_len;
int vec_stride;
int aarch64;
+#define TMP_A64_MAX 16
+ int tmp_a64_count;
+ TCGv_i64 tmp_a64[TMP_A64_MAX];
} DisasContext;
extern TCGv_ptr cpu_env;
+/* target-specific extra values for is_jmp */
+/* These instructions trap after executing, so the A32/T32 decoder must
+ * defer them until after the conditional execution state has been updated.
+ * WFI also needs special handling when single-stepping.
+ */
+#define DISAS_WFI 4
+#define DISAS_SWI 5
+/* For instructions which unconditionally cause an exception we can skip
+ * emitting unreachable code at the end of the TB in the A64 decoder
+ */
+#define DISAS_EXC 6
+
#ifdef TARGET_AARCH64
void a64_translate_init(void);
-void disas_a64_insn(CPUARMState *env, DisasContext *s);
+void gen_intermediate_code_internal_a64(ARMCPU *cpu,
+ TranslationBlock *tb,
+ bool search_pc);
void gen_a64_set_pc_im(uint64_t val);
#else
static inline void a64_translate_init(void)
{
}
-static inline void disas_a64_insn(CPUARMState *env, DisasContext *s)
+static inline void gen_intermediate_code_internal_a64(ARMCPU *cpu,
+ TranslationBlock *tb,
+ bool search_pc)
{
}
@@ -46,4 +65,6 @@ static inline void gen_a64_set_pc_im(uint64_t val)
}
#endif
+void arm_gen_test_cc(int cc, int label);
+
#endif /* TARGET_ARM_TRANSLATE_H */
diff --git a/target-mips/translate.c b/target-mips/translate.c
index e30273438a..ef0a2c36b0 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -1606,12 +1606,12 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_LWU:
- tcg_gen_qemu_ld32u(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
gen_store_gpr(t0, rt);
opn = "lwu";
break;
case OPC_LD:
- tcg_gen_qemu_ld64(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
gen_store_gpr(t0, rt);
opn = "ld";
break;
@@ -1629,7 +1629,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
#endif
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld64(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
tcg_gen_shl_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 63);
t2 = tcg_const_tl(0x7fffffffffffffffull);
@@ -1650,7 +1650,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
#endif
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld64(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
tcg_gen_shr_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 63);
t2 = tcg_const_tl(0xfffffffffffffffeull);
@@ -1667,7 +1667,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
t1 = tcg_const_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
tcg_temp_free(t1);
- tcg_gen_qemu_ld64(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
gen_store_gpr(t0, rt);
opn = "ldpc";
break;
@@ -1676,32 +1676,32 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
t1 = tcg_const_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
tcg_temp_free(t1);
- tcg_gen_qemu_ld32s(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
gen_store_gpr(t0, rt);
opn = "lwpc";
break;
case OPC_LW:
- tcg_gen_qemu_ld32s(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
gen_store_gpr(t0, rt);
opn = "lw";
break;
case OPC_LH:
- tcg_gen_qemu_ld16s(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW);
gen_store_gpr(t0, rt);
opn = "lh";
break;
case OPC_LHU:
- tcg_gen_qemu_ld16u(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUW);
gen_store_gpr(t0, rt);
opn = "lhu";
break;
case OPC_LB:
- tcg_gen_qemu_ld8s(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_SB);
gen_store_gpr(t0, rt);
opn = "lb";
break;
case OPC_LBU:
- tcg_gen_qemu_ld8u(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_UB);
gen_store_gpr(t0, rt);
opn = "lbu";
break;
@@ -1713,7 +1713,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
#endif
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~3);
- tcg_gen_qemu_ld32u(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
tcg_gen_shl_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 31);
t2 = tcg_const_tl(0x7fffffffull);
@@ -1735,7 +1735,7 @@ static void gen_ld(DisasContext *ctx, uint32_t opc,
#endif
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~3);
- tcg_gen_qemu_ld32u(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
tcg_gen_shr_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 31);
t2 = tcg_const_tl(0xfffffffeull);
@@ -1774,7 +1774,7 @@ static void gen_st (DisasContext *ctx, uint32_t opc, int rt,
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_SD:
- tcg_gen_qemu_st64(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ);
opn = "sd";
break;
case OPC_SDL:
@@ -1789,15 +1789,15 @@ static void gen_st (DisasContext *ctx, uint32_t opc, int rt,
break;
#endif
case OPC_SW:
- tcg_gen_qemu_st32(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
opn = "sw";
break;
case OPC_SH:
- tcg_gen_qemu_st16(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW);
opn = "sh";
break;
case OPC_SB:
- tcg_gen_qemu_st8(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_8);
opn = "sb";
break;
case OPC_SWL:
@@ -1868,9 +1868,7 @@ static void gen_flt_ldst (DisasContext *ctx, uint32_t opc, int ft,
case OPC_LWC1:
{
TCGv_i32 fp0 = tcg_temp_new_i32();
-
- tcg_gen_qemu_ld32s(t0, t0, ctx->mem_idx);
- tcg_gen_trunc_tl_i32(fp0, t0);
+ tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL);
gen_store_fpr32(fp0, ft);
tcg_temp_free_i32(fp0);
}
@@ -1879,12 +1877,8 @@ static void gen_flt_ldst (DisasContext *ctx, uint32_t opc, int ft,
case OPC_SWC1:
{
TCGv_i32 fp0 = tcg_temp_new_i32();
- TCGv t1 = tcg_temp_new();
-
gen_load_fpr32(fp0, ft);
- tcg_gen_extu_i32_tl(t1, fp0);
- tcg_gen_qemu_st32(t1, t0, ctx->mem_idx);
- tcg_temp_free(t1);
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL);
tcg_temp_free_i32(fp0);
}
opn = "swc1";
@@ -1892,8 +1886,7 @@ static void gen_flt_ldst (DisasContext *ctx, uint32_t opc, int ft,
case OPC_LDC1:
{
TCGv_i64 fp0 = tcg_temp_new_i64();
-
- tcg_gen_qemu_ld64(fp0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
gen_store_fpr64(ctx, fp0, ft);
tcg_temp_free_i64(fp0);
}
@@ -1902,9 +1895,8 @@ static void gen_flt_ldst (DisasContext *ctx, uint32_t opc, int ft,
case OPC_SDC1:
{
TCGv_i64 fp0 = tcg_temp_new_i64();
-
gen_load_fpr64(ctx, fp0, ft);
- tcg_gen_qemu_st64(fp0, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
tcg_temp_free_i64(fp0);
}
opn = "sdc1";
@@ -8652,7 +8644,7 @@ static void gen_flt3_ldst (DisasContext *ctx, uint32_t opc,
{
TCGv_i32 fp0 = tcg_temp_new_i32();
- tcg_gen_qemu_ld32s(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
tcg_gen_trunc_tl_i32(fp0, t0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
@@ -8664,8 +8656,7 @@ static void gen_flt3_ldst (DisasContext *ctx, uint32_t opc,
check_cp1_registers(ctx, fd);
{
TCGv_i64 fp0 = tcg_temp_new_i64();
-
- tcg_gen_qemu_ld64(fp0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -8677,7 +8668,7 @@ static void gen_flt3_ldst (DisasContext *ctx, uint32_t opc,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld64(fp0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -8687,13 +8678,9 @@ static void gen_flt3_ldst (DisasContext *ctx, uint32_t opc,
check_cop1x(ctx);
{
TCGv_i32 fp0 = tcg_temp_new_i32();
- TCGv t1 = tcg_temp_new();
-
gen_load_fpr32(fp0, fs);
- tcg_gen_extu_i32_tl(t1, fp0);
- tcg_gen_qemu_st32(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL);
tcg_temp_free_i32(fp0);
- tcg_temp_free(t1);
}
opn = "swxc1";
store = 1;
@@ -8703,9 +8690,8 @@ static void gen_flt3_ldst (DisasContext *ctx, uint32_t opc,
check_cp1_registers(ctx, fs);
{
TCGv_i64 fp0 = tcg_temp_new_i64();
-
gen_load_fpr64(ctx, fp0, fs);
- tcg_gen_qemu_st64(fp0, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
tcg_temp_free_i64(fp0);
}
opn = "sdxc1";
@@ -8716,9 +8702,8 @@ static void gen_flt3_ldst (DisasContext *ctx, uint32_t opc,
tcg_gen_andi_tl(t0, t0, ~0x7);
{
TCGv_i64 fp0 = tcg_temp_new_i64();
-
gen_load_fpr64(ctx, fp0, fs);
- tcg_gen_qemu_st64(fp0, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
tcg_temp_free_i64(fp0);
}
opn = "suxc1";
@@ -9286,30 +9271,30 @@ static void gen_mips16_save (DisasContext *ctx,
case 4:
gen_base_offset_addr(ctx, t0, 29, 12);
gen_load_gpr(t1, 7);
- tcg_gen_qemu_st32(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
/* Fall through */
case 3:
gen_base_offset_addr(ctx, t0, 29, 8);
gen_load_gpr(t1, 6);
- tcg_gen_qemu_st32(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
/* Fall through */
case 2:
gen_base_offset_addr(ctx, t0, 29, 4);
gen_load_gpr(t1, 5);
- tcg_gen_qemu_st32(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
/* Fall through */
case 1:
gen_base_offset_addr(ctx, t0, 29, 0);
gen_load_gpr(t1, 4);
- tcg_gen_qemu_st32(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
}
gen_load_gpr(t0, 29);
-#define DECR_AND_STORE(reg) do { \
- tcg_gen_subi_tl(t0, t0, 4); \
- gen_load_gpr(t1, reg); \
- tcg_gen_qemu_st32(t1, t0, ctx->mem_idx); \
+#define DECR_AND_STORE(reg) do { \
+ tcg_gen_subi_tl(t0, t0, 4); \
+ gen_load_gpr(t1, reg); \
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); \
} while (0)
if (do_ra) {
@@ -9407,10 +9392,10 @@ static void gen_mips16_restore (DisasContext *ctx,
tcg_gen_addi_tl(t0, cpu_gpr[29], framesize);
-#define DECR_AND_LOAD(reg) do { \
- tcg_gen_subi_tl(t0, t0, 4); \
- tcg_gen_qemu_ld32s(t1, t0, ctx->mem_idx); \
- gen_store_gpr(t1, reg); \
+#define DECR_AND_LOAD(reg) do { \
+ tcg_gen_subi_tl(t0, t0, 4); \
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); \
+ gen_store_gpr(t1, reg); \
} while (0)
if (do_ra) {
@@ -10935,7 +10920,7 @@ static void gen_ldxs (DisasContext *ctx, int base, int index, int rd)
gen_op_addr_add(ctx, t0, t1, t0);
}
- tcg_gen_qemu_ld32s(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
gen_store_gpr(t1, rd);
tcg_temp_free(t0);
@@ -10964,21 +10949,21 @@ static void gen_ldst_pair (DisasContext *ctx, uint32_t opc, int rd,
generate_exception(ctx, EXCP_RI);
return;
}
- tcg_gen_qemu_ld32s(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
gen_store_gpr(t1, rd);
tcg_gen_movi_tl(t1, 4);
gen_op_addr_add(ctx, t0, t0, t1);
- tcg_gen_qemu_ld32s(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
gen_store_gpr(t1, rd+1);
opn = "lwp";
break;
case SWP:
gen_load_gpr(t1, rd);
- tcg_gen_qemu_st32(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
tcg_gen_movi_tl(t1, 4);
gen_op_addr_add(ctx, t0, t0, t1);
gen_load_gpr(t1, rd+1);
- tcg_gen_qemu_st32(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
opn = "swp";
break;
#ifdef TARGET_MIPS64
@@ -10987,21 +10972,21 @@ static void gen_ldst_pair (DisasContext *ctx, uint32_t opc, int rd,
generate_exception(ctx, EXCP_RI);
return;
}
- tcg_gen_qemu_ld64(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ);
gen_store_gpr(t1, rd);
tcg_gen_movi_tl(t1, 8);
gen_op_addr_add(ctx, t0, t0, t1);
- tcg_gen_qemu_ld64(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ);
gen_store_gpr(t1, rd+1);
opn = "ldp";
break;
case SDP:
gen_load_gpr(t1, rd);
- tcg_gen_qemu_st64(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ);
tcg_gen_movi_tl(t1, 8);
gen_op_addr_add(ctx, t0, t0, t1);
gen_load_gpr(t1, rd+1);
- tcg_gen_qemu_st64(t1, t0, ctx->mem_idx);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ);
opn = "sdp";
break;
#endif
@@ -12672,23 +12657,23 @@ static void gen_mipsdsp_ld(DisasContext *ctx, uint32_t opc,
switch (opc) {
case OPC_LBUX:
- tcg_gen_qemu_ld8u(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_UB);
gen_store_gpr(t0, rd);
opn = "lbux";
break;
case OPC_LHX:
- tcg_gen_qemu_ld16s(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW);
gen_store_gpr(t0, rd);
opn = "lhx";
break;
case OPC_LWX:
- tcg_gen_qemu_ld32s(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
gen_store_gpr(t0, rd);
opn = "lwx";
break;
#if defined(TARGET_MIPS64)
case OPC_LDX:
- tcg_gen_qemu_ld64(t0, t0, ctx->mem_idx);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
gen_store_gpr(t0, rd);
opn = "ldx";
break;
diff --git a/target-ppc/cpu-models.c b/target-ppc/cpu-models.c
index 8dea560383..7c9466fc07 100644
--- a/target-ppc/cpu-models.c
+++ b/target-ppc/cpu-models.c
@@ -44,6 +44,7 @@
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); \
\
pcc->pvr = _pvr; \
+ pcc->pvr_mask = CPU_POWERPC_DEFAULT_MASK; \
pcc->svr = _svr; \
dc->desc = _desc; \
} \
@@ -1139,7 +1140,7 @@
"POWER7 v2.1")
POWERPC_DEF("POWER7_v2.3", CPU_POWERPC_POWER7_v23, POWER7,
"POWER7 v2.3")
- POWERPC_DEF("POWER7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7,
+ POWERPC_DEF("POWER7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7P,
"POWER7+ v2.1")
POWERPC_DEF("POWER8_v1.0", CPU_POWERPC_POWER8_v10, POWER8,
"POWER8 v1.0")
diff --git a/target-ppc/cpu-models.h b/target-ppc/cpu-models.h
index d9145d147f..49ba4a4522 100644
--- a/target-ppc/cpu-models.h
+++ b/target-ppc/cpu-models.h
@@ -39,6 +39,7 @@ extern PowerPCCPUAlias ppc_cpu_aliases[];
/*****************************************************************************/
/* PVR definitions for most known PowerPC */
enum {
+ CPU_POWERPC_DEFAULT_MASK = 0xFFFFFFFF,
/* PowerPC 401 family */
/* Generic PowerPC 401 */
#define CPU_POWERPC_401 CPU_POWERPC_401G2
@@ -552,10 +553,16 @@ enum {
CPU_POWERPC_POWER6 = 0x003E0000,
CPU_POWERPC_POWER6_5 = 0x0F000001, /* POWER6 in POWER5 mode */
CPU_POWERPC_POWER6A = 0x0F000002,
+ CPU_POWERPC_POWER7_BASE = 0x003F0000,
+ CPU_POWERPC_POWER7_MASK = 0xFFFF0000,
CPU_POWERPC_POWER7_v20 = 0x003F0200,
CPU_POWERPC_POWER7_v21 = 0x003F0201,
CPU_POWERPC_POWER7_v23 = 0x003F0203,
+ CPU_POWERPC_POWER7P_BASE = 0x004A0000,
+ CPU_POWERPC_POWER7P_MASK = 0xFFFF0000,
CPU_POWERPC_POWER7P_v21 = 0x004A0201,
+ CPU_POWERPC_POWER8_BASE = 0x004B0000,
+ CPU_POWERPC_POWER8_MASK = 0xFFFF0000,
CPU_POWERPC_POWER8_v10 = 0x004B0100,
CPU_POWERPC_970 = 0x00390202,
CPU_POWERPC_970FX_v10 = 0x00391100,
diff --git a/target-ppc/cpu-qom.h b/target-ppc/cpu-qom.h
index 827e5dd0e1..72b22329b0 100644
--- a/target-ppc/cpu-qom.h
+++ b/target-ppc/cpu-qom.h
@@ -54,6 +54,7 @@ typedef struct PowerPCCPUClass {
void (*parent_reset)(CPUState *cpu);
uint32_t pvr;
+ uint32_t pvr_mask;
uint32_t svr;
uint64_t insns_flags;
uint64_t insns_flags2;
@@ -99,6 +100,7 @@ static inline PowerPCCPU *ppc_env_get_cpu(CPUPPCState *env)
#define ENV_OFFSET offsetof(PowerPCCPU, env)
PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr);
+PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr);
void ppc_cpu_do_interrupt(CPUState *cpu);
void ppc_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index 26acdba847..bb847676a5 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -236,6 +236,8 @@ enum {
POWERPC_EXCP_NMEXTBR = 91, /* Non maskable external breakpoint */
POWERPC_EXCP_ITLBE = 92, /* Instruction TLB error */
POWERPC_EXCP_DTLBE = 93, /* Data TLB error */
+ /* VSX Unavailable (Power ISA 2.06 and later) */
+ POWERPC_EXCP_VSXU = 94, /* VSX Unavailable */
/* EOL */
POWERPC_EXCP_NB = 96,
/* QEMU exceptions: used internally during code translation */
@@ -427,6 +429,7 @@ struct ppc_slb_t {
#define MSR_VR 25 /* altivec available x hflags */
#define MSR_SPE 25 /* SPE enable for BookE x hflags */
#define MSR_AP 23 /* Access privilege state on 602 hflags */
+#define MSR_VSX 23 /* Vector Scalar Extension (ISA 2.06 and later) x hflags */
#define MSR_SA 22 /* Supervisor access mode on 602 hflags */
#define MSR_KEY 19 /* key bit on 603e */
#define MSR_POW 18 /* Power management */
@@ -467,6 +470,7 @@ struct ppc_slb_t {
#define msr_vr ((env->msr >> MSR_VR) & 1)
#define msr_spe ((env->msr >> MSR_SPE) & 1)
#define msr_ap ((env->msr >> MSR_AP) & 1)
+#define msr_vsx ((env->msr >> MSR_VSX) & 1)
#define msr_sa ((env->msr >> MSR_SA) & 1)
#define msr_key ((env->msr >> MSR_KEY) & 1)
#define msr_pow ((env->msr >> MSR_POW) & 1)
@@ -549,6 +553,8 @@ enum {
POWERPC_FLAG_BUS_CLK = 0x00020000,
/* Has CFAR */
POWERPC_FLAG_CFAR = 0x00040000,
+ /* Has VSX */
+ POWERPC_FLAG_VSX = 0x00080000,
};
/*****************************************************************************/
@@ -1870,7 +1876,8 @@ enum {
/* Book I 2.05 PowerPC specification */
PPC2_ISA205 = 0x0000000000000020ULL,
-#define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_PRCNTL | PPC2_DBRX | PPC2_ISA205)
+#define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \
+ PPC2_ISA205)
};
/*****************************************************************************/
diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c
index c959460f70..26c57d9a34 100644
--- a/target-ppc/excp_helper.c
+++ b/target-ppc/excp_helper.c
@@ -390,6 +390,11 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
new_msr |= (target_ulong)MSR_HVB;
}
goto store_current;
+ case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
+ if (lpes1 == 0) {
+ new_msr |= (target_ulong)MSR_HVB;
+ }
+ goto store_current;
case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
LOG_EXCP("PIT exception\n");
goto store_next;
diff --git a/target-ppc/helper_regs.h b/target-ppc/helper_regs.h
index a6d5e2fe2f..c02e8da4e4 100644
--- a/target-ppc/helper_regs.h
+++ b/target-ppc/helper_regs.h
@@ -56,7 +56,7 @@ static inline void hreg_compute_hflags(CPUPPCState *env)
/* We 'forget' FE0 & FE1: we'll never generate imprecise exceptions */
hflags_mask = (1 << MSR_VR) | (1 << MSR_AP) | (1 << MSR_SA) |
(1 << MSR_PR) | (1 << MSR_FP) | (1 << MSR_SE) | (1 << MSR_BE) |
- (1 << MSR_LE);
+ (1 << MSR_LE) | (1 << MSR_VSX);
hflags_mask |= (1ULL << MSR_CM) | (1ULL << MSR_SF) | MSR_HVB;
hreg_compute_mem_idx(env);
env->hflags = env->msr & hflags_mask;
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index b77ce5e94c..781b72f1ea 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -1745,6 +1745,7 @@ static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
/* Now fix up the class with information we can query from the host */
+ pcc->pvr = mfpvr();
if (vmx != -1) {
/* Only override when we know what the host supports */
@@ -1795,6 +1796,9 @@ static int kvm_ppc_register_host_cpu_type(void)
pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
if (pvr_pcc == NULL) {
+ pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
+ }
+ if (pvr_pcc == NULL) {
return -1;
}
type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
@@ -1902,3 +1906,31 @@ int kvm_arch_on_sigbus(int code, void *addr)
void kvm_arch_init_irq_routing(KVMState *s)
{
}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type)
+{
+ return -EINVAL;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+}
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
+{
+}
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 66c777174c..ea58dc90d5 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -51,6 +51,7 @@ static char cpu_reg_names[10*3 + 22*4 /* GPR */
#endif
+ 10*4 + 22*5 /* FPR */
+ 2*(10*6 + 22*7) /* AVRh, AVRl */
+ + 10*5 + 22*6 /* VSR */
+ 8*5 /* CRF */];
static TCGv cpu_gpr[32];
#if !defined(TARGET_PPC64)
@@ -58,6 +59,7 @@ static TCGv cpu_gprh[32];
#endif
static TCGv_i64 cpu_fpr[32];
static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
+static TCGv_i64 cpu_vsr[32];
static TCGv_i32 cpu_crf[8];
static TCGv cpu_nip;
static TCGv cpu_msr;
@@ -137,6 +139,11 @@ void ppc_translate_init(void)
#endif
p += (i < 10) ? 6 : 7;
cpu_reg_names_size -= (i < 10) ? 6 : 7;
+ snprintf(p, cpu_reg_names_size, "vsr%d", i);
+ cpu_vsr[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ offsetof(CPUPPCState, vsr[i]), p);
+ p += (i < 10) ? 5 : 6;
+ cpu_reg_names_size -= (i < 10) ? 5 : 6;
}
cpu_nip = tcg_global_mem_new(TCG_AREG0,
@@ -195,6 +202,7 @@ typedef struct DisasContext {
#endif
int fpu_enabled;
int altivec_enabled;
+ int vsx_enabled;
int spe_enabled;
ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
int singlestep_enabled;
@@ -365,6 +373,12 @@ static inline int32_t name(uint32_t opcode) \
return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
}
+#define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
+static inline uint32_t name(uint32_t opcode) \
+{ \
+ return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
+ ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
+}
/* Opcode part 1 */
EXTRACT_HELPER(opc1, 26, 6);
/* Opcode part 2 */
@@ -479,6 +493,14 @@ static inline target_ulong MASK(uint32_t start, uint32_t end)
return ret;
}
+EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5);
+EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5);
+EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5);
+EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5);
+EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5);
+EXTRACT_HELPER(DM, 8, 2);
+EXTRACT_HELPER(UIM, 16, 2);
+EXTRACT_HELPER(SHW, 8, 2);
/*****************************************************************************/
/* PowerPC instructions table */
@@ -6964,10 +6986,473 @@ GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20)
GEN_VAFORM_PAIRED(vsel, vperm, 21)
GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
+/*** VSX extension ***/
+
+static inline TCGv_i64 cpu_vsrh(int n)
+{
+ if (n < 32) {
+ return cpu_fpr[n];
+ } else {
+ return cpu_avrh[n-32];
+ }
+}
+
+static inline TCGv_i64 cpu_vsrl(int n)
+{
+ if (n < 32) {
+ return cpu_vsr[n];
+ } else {
+ return cpu_avrl[n-32];
+ }
+}
+
+static void gen_lxsdx(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
+ /* NOTE: cpu_vsrl is undefined */
+ tcg_temp_free(EA);
+}
+
+static void gen_lxvd2x(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
+ tcg_temp_free(EA);
+}
+
+static void gen_lxvdsx(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
+ tcg_gen_mov_tl(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
+ tcg_temp_free(EA);
+}
+
+static void gen_lxvw4x(DisasContext *ctx)
+{
+ TCGv EA, tmp;
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ tmp = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld32u(ctx, tmp, EA);
+ tcg_gen_addi_tl(EA, EA, 4);
+ gen_qemu_ld32u(ctx, xth, EA);
+ tcg_gen_deposit_i64(xth, xth, tmp, 32, 32);
+
+ tcg_gen_addi_tl(EA, EA, 4);
+ gen_qemu_ld32u(ctx, tmp, EA);
+ tcg_gen_addi_tl(EA, EA, 4);
+ gen_qemu_ld32u(ctx, xtl, EA);
+ tcg_gen_deposit_i64(xtl, xtl, tmp, 32, 32);
+
+ tcg_temp_free(EA);
+ tcg_temp_free(tmp);
+}
+
+static void gen_stxsdx(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_st64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
+ tcg_temp_free(EA);
+}
+
+static void gen_stxvd2x(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_st64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_st64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
+ tcg_temp_free(EA);
+}
+
+static void gen_stxvw4x(DisasContext *ctx)
+{
+ TCGv EA, tmp;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ tmp = tcg_temp_new();
+
+ tcg_gen_shri_i64(tmp, cpu_vsrh(xS(ctx->opcode)), 32);
+ gen_qemu_st32(ctx, tmp, EA);
+ tcg_gen_addi_tl(EA, EA, 4);
+ gen_qemu_st32(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
+
+ tcg_gen_shri_i64(tmp, cpu_vsrl(xS(ctx->opcode)), 32);
+ tcg_gen_addi_tl(EA, EA, 4);
+ gen_qemu_st32(ctx, tmp, EA);
+ tcg_gen_addi_tl(EA, EA, 4);
+ gen_qemu_st32(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
+
+ tcg_temp_free(EA);
+ tcg_temp_free(tmp);
+}
+
+static void gen_xxpermdi(DisasContext *ctx)
+{
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+
+ if ((DM(ctx->opcode) & 2) == 0) {
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)));
+ } else {
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)));
+ }
+ if ((DM(ctx->opcode) & 1) == 0) {
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xB(ctx->opcode)));
+ } else {
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xB(ctx->opcode)));
+ }
+}
+
+#define OP_ABS 1
+#define OP_NABS 2
+#define OP_NEG 3
+#define OP_CPSGN 4
+#define SGN_MASK_DP 0x8000000000000000ul
+#define SGN_MASK_SP 0x8000000080000000ul
+
+#define VSX_SCALAR_MOVE(name, op, sgn_mask) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ TCGv_i64 xb, sgm; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xb = tcg_temp_new(); \
+ sgm = tcg_temp_new(); \
+ tcg_gen_mov_i64(xb, cpu_vsrh(xB(ctx->opcode))); \
+ tcg_gen_movi_i64(sgm, sgn_mask); \
+ switch (op) { \
+ case OP_ABS: { \
+ tcg_gen_andc_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_NABS: { \
+ tcg_gen_or_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_NEG: { \
+ tcg_gen_xor_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_CPSGN: { \
+ TCGv_i64 xa = tcg_temp_new(); \
+ tcg_gen_mov_i64(xa, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_and_i64(xa, xa, sgm); \
+ tcg_gen_andc_i64(xb, xb, sgm); \
+ tcg_gen_or_i64(xb, xb, xa); \
+ tcg_temp_free(xa); \
+ break; \
+ } \
+ } \
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xb); \
+ tcg_temp_free(xb); \
+ tcg_temp_free(sgm); \
+ }
+
+VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
+
+#define VSX_VECTOR_MOVE(name, op, sgn_mask) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ TCGv_i64 xbh, xbl, sgm; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xbh = tcg_temp_new(); \
+ xbl = tcg_temp_new(); \
+ sgm = tcg_temp_new(); \
+ tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \
+ tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \
+ tcg_gen_movi_i64(sgm, sgn_mask); \
+ switch (op) { \
+ case OP_ABS: { \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ tcg_gen_andc_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_NABS: { \
+ tcg_gen_or_i64(xbh, xbh, sgm); \
+ tcg_gen_or_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_NEG: { \
+ tcg_gen_xor_i64(xbh, xbh, sgm); \
+ tcg_gen_xor_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_CPSGN: { \
+ TCGv_i64 xah = tcg_temp_new(); \
+ TCGv_i64 xal = tcg_temp_new(); \
+ tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \
+ tcg_gen_and_i64(xah, xah, sgm); \
+ tcg_gen_and_i64(xal, xal, sgm); \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ tcg_gen_andc_i64(xbl, xbl, sgm); \
+ tcg_gen_or_i64(xbh, xbh, xah); \
+ tcg_gen_or_i64(xbl, xbl, xal); \
+ tcg_temp_free(xah); \
+ tcg_temp_free(xal); \
+ break; \
+ } \
+ } \
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \
+ tcg_temp_free(xbh); \
+ tcg_temp_free(xbl); \
+ tcg_temp_free(sgm); \
+ }
+
+VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
+
+
+#define VSX_LOGICAL(name, tcg_op) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ tcg_op(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)), \
+ cpu_vsrh(xB(ctx->opcode))); \
+ tcg_op(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)), \
+ cpu_vsrl(xB(ctx->opcode))); \
+ }
+
+VSX_LOGICAL(xxland, tcg_gen_and_tl)
+VSX_LOGICAL(xxlandc, tcg_gen_andc_tl)
+VSX_LOGICAL(xxlor, tcg_gen_or_tl)
+VSX_LOGICAL(xxlxor, tcg_gen_xor_tl)
+VSX_LOGICAL(xxlnor, tcg_gen_nor_tl)
+
+#define VSX_XXMRG(name, high) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ TCGv_i64 a0, a1, b0, b1; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ a0 = tcg_temp_new(); \
+ a1 = tcg_temp_new(); \
+ b0 = tcg_temp_new(); \
+ b1 = tcg_temp_new(); \
+ if (high) { \
+ tcg_gen_mov_i64(a0, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(a1, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(b0, cpu_vsrh(xB(ctx->opcode))); \
+ tcg_gen_mov_i64(b1, cpu_vsrh(xB(ctx->opcode))); \
+ } else { \
+ tcg_gen_mov_i64(a0, cpu_vsrl(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(a1, cpu_vsrl(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(b0, cpu_vsrl(xB(ctx->opcode))); \
+ tcg_gen_mov_i64(b1, cpu_vsrl(xB(ctx->opcode))); \
+ } \
+ tcg_gen_shri_i64(a0, a0, 32); \
+ tcg_gen_shri_i64(b0, b0, 32); \
+ tcg_gen_deposit_i64(cpu_vsrh(xT(ctx->opcode)), \
+ b0, a0, 32, 32); \
+ tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), \
+ b1, a1, 32, 32); \
+ tcg_temp_free(a0); \
+ tcg_temp_free(a1); \
+ tcg_temp_free(b0); \
+ tcg_temp_free(b1); \
+ }
+
+VSX_XXMRG(xxmrghw, 1)
+VSX_XXMRG(xxmrglw, 0)
+
+static void gen_xxsel(DisasContext * ctx)
+{
+ TCGv_i64 a, b, c;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ a = tcg_temp_new();
+ b = tcg_temp_new();
+ c = tcg_temp_new();
+
+ tcg_gen_mov_i64(a, cpu_vsrh(xA(ctx->opcode)));
+ tcg_gen_mov_i64(b, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_mov_i64(c, cpu_vsrh(xC(ctx->opcode)));
+
+ tcg_gen_and_i64(b, b, c);
+ tcg_gen_andc_i64(a, a, c);
+ tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), a, b);
+
+ tcg_gen_mov_i64(a, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_mov_i64(b, cpu_vsrl(xB(ctx->opcode)));
+ tcg_gen_mov_i64(c, cpu_vsrl(xC(ctx->opcode)));
+
+ tcg_gen_and_i64(b, b, c);
+ tcg_gen_andc_i64(a, a, c);
+ tcg_gen_or_i64(cpu_vsrl(xT(ctx->opcode)), a, b);
+
+ tcg_temp_free(a);
+ tcg_temp_free(b);
+ tcg_temp_free(c);
+}
+
+static void gen_xxspltw(DisasContext *ctx)
+{
+ TCGv_i64 b, b2;
+ TCGv_i64 vsr = (UIM(ctx->opcode) & 2) ?
+ cpu_vsrl(xB(ctx->opcode)) :
+ cpu_vsrh(xB(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+
+ b = tcg_temp_new();
+ b2 = tcg_temp_new();
+
+ if (UIM(ctx->opcode) & 1) {
+ tcg_gen_ext32u_i64(b, vsr);
+ } else {
+ tcg_gen_shri_i64(b, vsr, 32);
+ }
+
+ tcg_gen_shli_i64(b2, b, 32);
+ tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), b, b2);
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
+
+ tcg_temp_free(b);
+ tcg_temp_free(b2);
+}
+
+static void gen_xxsldwi(DisasContext *ctx)
+{
+ TCGv_i64 xth, xtl;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new();
+ xtl = tcg_temp_new();
+
+ switch (SHW(ctx->opcode)) {
+ case 0: {
+ tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
+ tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
+ break;
+ }
+ case 1: {
+ TCGv_i64 t0 = tcg_temp_new();
+ tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
+ tcg_gen_shli_i64(xth, xth, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xth, xth, t0);
+ tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_shli_i64(xtl, xtl, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xtl, xtl, t0);
+ tcg_temp_free(t0);
+ break;
+ }
+ case 2: {
+ tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
+ break;
+ }
+ case 3: {
+ TCGv_i64 t0 = tcg_temp_new();
+ tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_shli_i64(xth, xth, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xth, xth, t0);
+ tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_shli_i64(xtl, xtl, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrl(xB(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xtl, xtl, t0);
+ tcg_temp_free(t0);
+ break;
+ }
+ }
+
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xth);
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xtl);
+
+ tcg_temp_free(xth);
+ tcg_temp_free(xtl);
+}
+
+
/*** SPE extension ***/
/* Register moves */
-
static inline void gen_evmra(DisasContext *ctx)
{
@@ -9413,6 +9898,119 @@ GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20),
GEN_VAFORM_PAIRED(vsel, vperm, 21),
GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23),
+GEN_HANDLER_E(lxsdx, 0x1F, 0x0C, 0x12, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvd2x, 0x1F, 0x0C, 0x1A, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX),
+
+GEN_HANDLER_E(stxsdx, 0x1F, 0xC, 0x16, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX),
+
+#undef GEN_XX2FORM
+#define GEN_XX2FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
+
+#undef GEN_XX3FORM
+#define GEN_XX3FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2)
+
+#undef GEN_XX3FORM_DM
+#define GEN_XX3FORM_DM(name, opc2, opc3) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x0C, 0, PPC_NONE, PPC2_VSX)
+
+GEN_XX2FORM(xsabsdp, 0x12, 0x15, PPC2_VSX),
+GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX),
+GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX),
+GEN_XX3FORM(xscpsgndp, 0x00, 0x16, PPC2_VSX),
+
+GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX),
+GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX),
+GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX),
+GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX),
+GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX),
+GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX),
+
+#undef VSX_LOGICAL
+#define VSX_LOGICAL(name, opc2, opc3, fl2) \
+GEN_XX3FORM(name, opc2, opc3, fl2)
+
+VSX_LOGICAL(xxland, 0x8, 0x10, PPC2_VSX),
+VSX_LOGICAL(xxlandc, 0x8, 0x11, PPC2_VSX),
+VSX_LOGICAL(xxlor, 0x8, 0x12, PPC2_VSX),
+VSX_LOGICAL(xxlxor, 0x8, 0x13, PPC2_VSX),
+VSX_LOGICAL(xxlnor, 0x8, 0x14, PPC2_VSX),
+GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX),
+GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX),
+GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX),
+GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00),
+
+#define GEN_XXSEL_ROW(opc3) \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x18, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x19, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1A, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1B, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1C, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1D, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1E, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1F, opc3, 0, PPC_NONE, PPC2_VSX), \
+
+GEN_XXSEL_ROW(0x00)
+GEN_XXSEL_ROW(0x01)
+GEN_XXSEL_ROW(0x02)
+GEN_XXSEL_ROW(0x03)
+GEN_XXSEL_ROW(0x04)
+GEN_XXSEL_ROW(0x05)
+GEN_XXSEL_ROW(0x06)
+GEN_XXSEL_ROW(0x07)
+GEN_XXSEL_ROW(0x08)
+GEN_XXSEL_ROW(0x09)
+GEN_XXSEL_ROW(0x0A)
+GEN_XXSEL_ROW(0x0B)
+GEN_XXSEL_ROW(0x0C)
+GEN_XXSEL_ROW(0x0D)
+GEN_XXSEL_ROW(0x0E)
+GEN_XXSEL_ROW(0x0F)
+GEN_XXSEL_ROW(0x10)
+GEN_XXSEL_ROW(0x11)
+GEN_XXSEL_ROW(0x12)
+GEN_XXSEL_ROW(0x13)
+GEN_XXSEL_ROW(0x14)
+GEN_XXSEL_ROW(0x15)
+GEN_XXSEL_ROW(0x16)
+GEN_XXSEL_ROW(0x17)
+GEN_XXSEL_ROW(0x18)
+GEN_XXSEL_ROW(0x19)
+GEN_XXSEL_ROW(0x1A)
+GEN_XXSEL_ROW(0x1B)
+GEN_XXSEL_ROW(0x1C)
+GEN_XXSEL_ROW(0x1D)
+GEN_XXSEL_ROW(0x1E)
+GEN_XXSEL_ROW(0x1F)
+
+GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01),
+
#undef GEN_SPE
#define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \
GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, PPC_NONE)
@@ -9759,6 +10357,11 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
ctx.altivec_enabled = msr_vr;
else
ctx.altivec_enabled = 0;
+ if ((env->flags & POWERPC_FLAG_VSX) && msr_vsx) {
+ ctx.vsx_enabled = msr_vsx;
+ } else {
+ ctx.vsx_enabled = 0;
+ }
if ((env->flags & POWERPC_FLAG_SE) && msr_se)
ctx.singlestep_enabled = CPU_SINGLE_STEP;
else
diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
index 47825ac543..c030a2032a 100644
--- a/target-ppc/translate_init.c
+++ b/target-ppc/translate_init.c
@@ -3061,6 +3061,7 @@ static void init_excp_POWER7 (CPUPPCState *env)
env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_VSXU] = 0x00000F40;
env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
env->excp_vectors[POWERPC_EXCP_MAINT] = 0x00001600;
env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001700;
@@ -7221,6 +7222,46 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
dc->fw_name = "PowerPC,POWER7";
dc->desc = "POWER7";
+ pcc->pvr = CPU_POWERPC_POWER7_BASE;
+ pcc->pvr_mask = CPU_POWERPC_POWER7_MASK;
+ pcc->init_proc = init_proc_POWER7;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_64B | PPC_ALTIVEC |
+ PPC_SEGMENT_64B | PPC_SLBI |
+ PPC_POPCNTB | PPC_POPCNTWD;
+ pcc->insns_flags2 = PPC2_VSX | PPC2_DFP | PPC2_DBRX | PPC2_ISA205;
+ pcc->msr_mask = 0x800000000284FF37ULL;
+ pcc->mmu_model = POWERPC_MMU_2_06;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_POWER7;
+ pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
+ POWERPC_FLAG_VSX;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x8000;
+}
+
+POWERPC_FAMILY(POWER7P)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER7+";
+ dc->desc = "POWER7+";
+ pcc->pvr = CPU_POWERPC_POWER7P_BASE;
+ pcc->pvr_mask = CPU_POWERPC_POWER7P_MASK;
pcc->init_proc = init_proc_POWER7;
pcc->check_pow = check_pow_nocheck;
pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
@@ -7244,7 +7285,8 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
pcc->bfd_mach = bfd_mach_ppc64;
pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
- POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR;
+ POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
+ POWERPC_FLAG_VSX;
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
}
@@ -7256,6 +7298,8 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
dc->fw_name = "PowerPC,POWER8";
dc->desc = "POWER8";
+ pcc->pvr = CPU_POWERPC_POWER8_BASE;
+ pcc->pvr_mask = CPU_POWERPC_POWER8_MASK;
pcc->init_proc = init_proc_POWER7;
pcc->check_pow = check_pow_nocheck;
pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
@@ -7269,7 +7313,7 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
PPC_SEGMENT_64B | PPC_SLBI |
PPC_POPCNTB | PPC_POPCNTWD;
pcc->insns_flags2 = PPC2_VSX | PPC2_DFP | PPC2_DBRX;
- pcc->msr_mask = 0x800000000204FF36ULL;
+ pcc->msr_mask = 0x800000000284FF36ULL;
pcc->mmu_model = POWERPC_MMU_2_06;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
@@ -7279,7 +7323,8 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
pcc->bfd_mach = bfd_mach_ppc64;
pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
- POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR;
+ POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
+ POWERPC_FLAG_VSX;
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
}
@@ -8188,6 +8233,44 @@ PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr)
return pcc;
}
+static gint ppc_cpu_compare_class_pvr_mask(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *oc = (ObjectClass *)a;
+ uint32_t pvr = *(uint32_t *)b;
+ PowerPCCPUClass *pcc = (PowerPCCPUClass *)a;
+ gint ret;
+
+ /* -cpu host does a PVR lookup during construction */
+ if (unlikely(strcmp(object_class_get_name(oc),
+ TYPE_HOST_POWERPC_CPU) == 0)) {
+ return -1;
+ }
+
+#if defined(TARGET_PPCEMB)
+ if (pcc->mmu_model != POWERPC_MMU_BOOKE) {
+ return -1;
+ }
+#endif
+ ret = (((pcc->pvr & pcc->pvr_mask) == (pvr & pcc->pvr_mask)) ? 0 : -1);
+
+ return ret;
+}
+
+PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr)
+{
+ GSList *list, *item;
+ PowerPCCPUClass *pcc = NULL;
+
+ list = object_class_get_list(TYPE_POWERPC_CPU, true);
+ item = g_slist_find_custom(list, &pvr, ppc_cpu_compare_class_pvr_mask);
+ if (item != NULL) {
+ pcc = POWERPC_CPU_CLASS(item->data);
+ }
+ g_slist_free(list);
+
+ return pcc;
+}
+
static gint ppc_cpu_compare_class_name(gconstpointer a, gconstpointer b)
{
ObjectClass *oc = (ObjectClass *)a;
@@ -8559,6 +8642,8 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
DeviceClass *dc = DEVICE_CLASS(oc);
pcc->parent_realize = dc->realize;
+ pcc->pvr = CPU_POWERPC_DEFAULT_MASK;
+ pcc->pvr_mask = CPU_POWERPC_DEFAULT_MASK;
dc->realize = ppc_cpu_realizefn;
dc->unrealize = ppc_cpu_unrealizefn;
diff --git a/target-sh4/translate.c b/target-sh4/translate.c
index 2272eb0beb..661fc6c887 100644
--- a/target-sh4/translate.c
+++ b/target-sh4/translate.c
@@ -464,7 +464,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
- tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
tcg_temp_free(addr);
}
return;
@@ -472,7 +472,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
- tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
tcg_temp_free(addr);
}
return;
@@ -482,14 +482,14 @@ static void _decode_opc(DisasContext * ctx)
case 0x9000: /* mov.w @(disp,PC),Rn */
{
TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
- tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
tcg_temp_free(addr);
}
return;
case 0xd000: /* mov.l @(disp,PC),Rn */
{
TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
- tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
tcg_temp_free(addr);
}
return;
@@ -516,28 +516,29 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
return;
case 0x2000: /* mov.b Rm,@Rn */
- tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
return;
case 0x2001: /* mov.w Rm,@Rn */
- tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
return;
case 0x2002: /* mov.l Rm,@Rn */
- tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
return;
case 0x6000: /* mov.b @Rm,Rn */
- tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
return;
case 0x6001: /* mov.w @Rm,Rn */
- tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
return;
case 0x6002: /* mov.l @Rm,Rn */
- tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
return;
case 0x2004: /* mov.b Rm,@-Rn */
{
TCGv addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 1);
- tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
+ /* might cause re-execution */
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
tcg_temp_free(addr);
}
@@ -546,7 +547,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 2);
- tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
tcg_gen_mov_i32(REG(B11_8), addr);
tcg_temp_free(addr);
}
@@ -555,22 +556,22 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 4);
- tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
tcg_gen_mov_i32(REG(B11_8), addr);
}
return;
case 0x6004: /* mov.b @Rm+,Rn */
- tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
if ( B11_8 != B7_4 )
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
return;
case 0x6005: /* mov.w @Rm+,Rn */
- tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
if ( B11_8 != B7_4 )
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
return;
case 0x6006: /* mov.l @Rm+,Rn */
- tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
if ( B11_8 != B7_4 )
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
return;
@@ -578,7 +579,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
- tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
tcg_temp_free(addr);
}
return;
@@ -586,7 +587,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
- tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
tcg_temp_free(addr);
}
return;
@@ -594,7 +595,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
- tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
tcg_temp_free(addr);
}
return;
@@ -602,7 +603,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
- tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
tcg_temp_free(addr);
}
return;
@@ -610,7 +611,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
- tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
tcg_temp_free(addr);
}
return;
@@ -618,7 +619,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
- tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
tcg_temp_free(addr);
}
return;
@@ -767,9 +768,9 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv arg0, arg1;
arg0 = tcg_temp_new();
- tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
+ tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
arg1 = tcg_temp_new();
- tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
gen_helper_macl(cpu_env, arg0, arg1);
tcg_temp_free(arg1);
tcg_temp_free(arg0);
@@ -781,9 +782,9 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv arg0, arg1;
arg0 = tcg_temp_new();
- tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
+ tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
arg1 = tcg_temp_new();
- tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
gen_helper_macw(cpu_env, arg0, arg1);
tcg_temp_free(arg1);
tcg_temp_free(arg0);
@@ -979,11 +980,14 @@ static void _decode_opc(DisasContext * ctx)
TCGv addr_hi = tcg_temp_new();
int fr = XREG(B7_4);
tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
- tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
- tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
+ tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
+ ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
+ ctx->memidx, MO_TEUL);
tcg_temp_free(addr_hi);
} else {
- tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
+ ctx->memidx, MO_TEUL);
}
return;
case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
@@ -992,11 +996,12 @@ static void _decode_opc(DisasContext * ctx)
TCGv addr_hi = tcg_temp_new();
int fr = XREG(B11_8);
tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
- tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
- tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
tcg_temp_free(addr_hi);
} else {
- tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
+ tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
+ ctx->memidx, MO_TEUL);
}
return;
case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
@@ -1005,12 +1010,13 @@ static void _decode_opc(DisasContext * ctx)
TCGv addr_hi = tcg_temp_new();
int fr = XREG(B11_8);
tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
- tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
- tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
tcg_temp_free(addr_hi);
} else {
- tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
+ tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
+ ctx->memidx, MO_TEUL);
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
}
return;
@@ -1020,16 +1026,17 @@ static void _decode_opc(DisasContext * ctx)
TCGv addr = tcg_temp_new_i32();
int fr = XREG(B7_4);
tcg_gen_subi_i32(addr, REG(B11_8), 4);
- tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
tcg_gen_subi_i32(addr, addr, 4);
- tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
tcg_gen_mov_i32(REG(B11_8), addr);
tcg_temp_free(addr);
} else {
TCGv addr;
addr = tcg_temp_new_i32();
tcg_gen_subi_i32(addr, REG(B11_8), 4);
- tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
+ ctx->memidx, MO_TEUL);
tcg_gen_mov_i32(REG(B11_8), addr);
tcg_temp_free(addr);
}
@@ -1041,11 +1048,14 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
if (ctx->flags & FPSCR_SZ) {
int fr = XREG(B11_8);
- tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
+ ctx->memidx, MO_TEUL);
tcg_gen_addi_i32(addr, addr, 4);
- tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
+ ctx->memidx, MO_TEUL);
} else {
- tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
+ ctx->memidx, MO_TEUL);
}
tcg_temp_free(addr);
}
@@ -1057,11 +1067,14 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
if (ctx->flags & FPSCR_SZ) {
int fr = XREG(B7_4);
- tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
+ ctx->memidx, MO_TEUL);
tcg_gen_addi_i32(addr, addr, 4);
- tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
+ ctx->memidx, MO_TEUL);
} else {
- tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
+ ctx->memidx, MO_TEUL);
}
tcg_temp_free(addr);
}
@@ -1164,9 +1177,9 @@ static void _decode_opc(DisasContext * ctx)
addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(0), cpu_gbr);
val = tcg_temp_new();
- tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
tcg_gen_andi_i32(val, val, B7_0);
- tcg_gen_qemu_st8(val, addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
tcg_temp_free(val);
tcg_temp_free(addr);
}
@@ -1200,7 +1213,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
- tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
tcg_temp_free(addr);
}
return;
@@ -1208,7 +1221,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
- tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
tcg_temp_free(addr);
}
return;
@@ -1216,7 +1229,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
- tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
tcg_temp_free(addr);
}
return;
@@ -1224,7 +1237,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
- tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
tcg_temp_free(addr);
}
return;
@@ -1232,7 +1245,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
- tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
tcg_temp_free(addr);
}
return;
@@ -1240,7 +1253,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
- tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
tcg_temp_free(addr);
}
return;
@@ -1248,7 +1261,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
- tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
tcg_temp_free(addr);
}
return;
@@ -1256,7 +1269,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
- tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
tcg_temp_free(addr);
}
return;
@@ -1264,7 +1277,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
- tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
tcg_temp_free(addr);
}
return;
@@ -1272,7 +1285,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
- tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
tcg_temp_free(addr);
}
return;
@@ -1288,9 +1301,9 @@ static void _decode_opc(DisasContext * ctx)
addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(0), cpu_gbr);
val = tcg_temp_new();
- tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
tcg_gen_ori_i32(val, val, B7_0);
- tcg_gen_qemu_st8(val, addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
tcg_temp_free(val);
tcg_temp_free(addr);
}
@@ -1318,7 +1331,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv val = tcg_temp_new();
tcg_gen_add_i32(val, REG(0), cpu_gbr);
- tcg_gen_qemu_ld8u(val, val, ctx->memidx);
+ tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
tcg_gen_andi_i32(val, val, B7_0);
gen_cmp_imm(TCG_COND_EQ, val, 0);
tcg_temp_free(val);
@@ -1333,9 +1346,9 @@ static void _decode_opc(DisasContext * ctx)
addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(0), cpu_gbr);
val = tcg_temp_new();
- tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
tcg_gen_xori_i32(val, val, B7_0);
- tcg_gen_qemu_st8(val, addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
tcg_temp_free(val);
tcg_temp_free(addr);
}
@@ -1349,7 +1362,7 @@ static void _decode_opc(DisasContext * ctx)
return;
case 0x4087: /* ldc.l @Rm+,Rn_BANK */
CHECK_PRIVILEGED
- tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
return;
case 0x0082: /* stc Rm_BANK,Rn */
@@ -1361,7 +1374,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 4);
- tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
tcg_gen_mov_i32(REG(B11_8), addr);
tcg_temp_free(addr);
}
@@ -1414,7 +1427,7 @@ static void _decode_opc(DisasContext * ctx)
CHECK_PRIVILEGED
{
TCGv val = tcg_temp_new();
- tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
tcg_temp_free(val);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
@@ -1430,7 +1443,7 @@ static void _decode_opc(DisasContext * ctx)
{
TCGv addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 4);
- tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(cpu_sr, addr, ctx->memidx, MO_TEUL);
tcg_gen_mov_i32(REG(B11_8), addr);
tcg_temp_free(addr);
}
@@ -1442,7 +1455,7 @@ static void _decode_opc(DisasContext * ctx)
return; \
case ldpnum: \
prechk \
- tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
+ tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
return;
#define ST(reg,stnum,stpnum,prechk) \
@@ -1455,7 +1468,7 @@ static void _decode_opc(DisasContext * ctx)
{ \
TCGv addr = tcg_temp_new(); \
tcg_gen_subi_i32(addr, REG(B11_8), 4); \
- tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
+ tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
tcg_gen_mov_i32(REG(B11_8), addr); \
tcg_temp_free(addr); \
} \
@@ -1483,7 +1496,7 @@ static void _decode_opc(DisasContext * ctx)
CHECK_FPU_ENABLED
{
TCGv addr = tcg_temp_new();
- tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
gen_helper_ld_fpscr(cpu_env, addr);
tcg_temp_free(addr);
@@ -1502,7 +1515,7 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 4);
- tcg_gen_qemu_st32(val, addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
tcg_gen_mov_i32(REG(B11_8), addr);
tcg_temp_free(addr);
tcg_temp_free(val);
@@ -1511,21 +1524,21 @@ static void _decode_opc(DisasContext * ctx)
case 0x00c3: /* movca.l R0,@Rm */
{
TCGv val = tcg_temp_new();
- tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
gen_helper_movcal(cpu_env, REG(B11_8), val);
- tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
}
ctx->has_movcal = 1;
return;
case 0x40a9:
/* MOVUA.L @Rm,R0 (Rm) -> R0
Load non-boundary-aligned data */
- tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
return;
case 0x40e9:
/* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
Load non-boundary-aligned data */
- tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
return;
case 0x0029: /* movt Rn */
@@ -1542,7 +1555,7 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
- tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
gen_set_label(label);
tcg_gen_movi_i32(cpu_ldst, 0);
return;
@@ -1557,7 +1570,7 @@ static void _decode_opc(DisasContext * ctx)
*/
if (ctx->features & SH_FEATURE_SH4A) {
tcg_gen_movi_i32(cpu_ldst, 0);
- tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
tcg_gen_movi_i32(cpu_ldst, 1);
return;
} else
@@ -1655,10 +1668,10 @@ static void _decode_opc(DisasContext * ctx)
addr = tcg_temp_local_new();
tcg_gen_mov_i32(addr, REG(B11_8));
val = tcg_temp_local_new();
- tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
+ tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
gen_cmp_imm(TCG_COND_EQ, val, 0);
tcg_gen_ori_i32(val, val, 0x80);
- tcg_gen_qemu_st8(val, addr, ctx->memidx);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
tcg_temp_free(val);
tcg_temp_free(addr);
}
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
index 7ac8e45485..495b901080 100644
--- a/tcg/i386/tcg-target.c
+++ b/tcg/i386/tcg-target.c
@@ -1486,7 +1486,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
switch (memop & MO_SIZE) {
case MO_8:
- /* In 32-bit mode, 8-byte stores can only happen from [abcd]x.
+ /* In 32-bit mode, 8-bit stores can only happen from [abcd]x.
Use the scratch register if necessary. */
if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) {
tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
diff --git a/tests/qom-test.c b/tests/qom-test.c
index 499be40261..5e5af7a50f 100644
--- a/tests/qom-test.c
+++ b/tests/qom-test.c
@@ -70,6 +70,8 @@ static const char *arm_machines[] = {
"xilinx-zynq-a9",
"highbank",
"midway",
+ "canon-a1100",
+ "cubieboard",
};
static const char *cris_machines[] = {