summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore19
-rw-r--r--.travis.yml53
-rw-r--r--CODING_STYLE7
-rw-r--r--MAINTAINERS17
-rw-r--r--Makefile8
-rw-r--r--Makefile.objs1
-rw-r--r--VERSION2
-rw-r--r--async.c18
-rw-r--r--backends/baum.c8
-rw-r--r--block-migration.c30
-rw-r--r--block.c343
-rw-r--r--block/blkverify.c17
-rw-r--r--block/bochs.c119
-rw-r--r--block/cloop.c81
-rw-r--r--block/commit.c2
-rw-r--r--block/curl.c7
-rw-r--r--block/dmg.c275
-rw-r--r--block/gluster.c2
-rw-r--r--block/iscsi.c25
-rw-r--r--block/mirror.c44
-rw-r--r--block/nbd-client.c33
-rw-r--r--block/nbd.c2
-rw-r--r--block/nfs.c5
-rw-r--r--block/parallels.c14
-rw-r--r--block/qcow.c2
-rw-r--r--block/qcow2-cluster.c16
-rw-r--r--block/qcow2-refcount.c150
-rw-r--r--block/qcow2-snapshot.c50
-rw-r--r--block/qcow2.c207
-rw-r--r--block/qcow2.h52
-rw-r--r--block/qed.c38
-rw-r--r--block/quorum.c16
-rw-r--r--block/raw-posix.c47
-rw-r--r--block/raw-win32.c10
-rw-r--r--block/sheepdog.c8
-rw-r--r--block/vdi.c39
-rw-r--r--block/vhdx-log.c2
-rw-r--r--block/vhdx.c12
-rw-r--r--block/vhdx.h6
-rw-r--r--block/vmdk.c23
-rw-r--r--block/vpc.c32
-rw-r--r--block/vvfat.c4
-rw-r--r--blockdev.c35
-rw-r--r--blockjob.c2
-rw-r--r--bsd-user/main.c2
-rwxr-xr-xconfigure122
-rw-r--r--cpu-exec.c114
-rw-r--r--cpus.c27
-rw-r--r--cputlb.c24
-rw-r--r--default-configs/usb.mak1
-rw-r--r--device-hotplug.c4
-rw-r--r--dma-helpers.c1
-rw-r--r--docs/writing-qmp-commands.txt6
-rw-r--r--exec.c120
-rw-r--r--fpu/softfloat.c24
-rw-r--r--fsdev/virtfs-proxy-helper.c1
-rw-r--r--gdbstub.c48
-rw-r--r--hmp-commands.hx36
-rw-r--r--hmp.c25
-rw-r--r--hmp.h5
-rw-r--r--hw/9pfs/virtio-9p.c5
-rw-r--r--hw/acpi/cpu_hotplug.c1
-rw-r--r--hw/arm/boot.c5
-rw-r--r--hw/arm/cubieboard.c13
-rw-r--r--hw/arm/exynos4210.c16
-rw-r--r--hw/arm/highbank.c19
-rw-r--r--hw/arm/integratorcp.c1
-rw-r--r--hw/arm/realview.c39
-rw-r--r--hw/arm/spitz.c13
-rw-r--r--hw/arm/vexpress.c122
-rw-r--r--hw/arm/virt.c6
-rw-r--r--hw/audio/fmopl.c6
-rw-r--r--hw/block/dataplane/virtio-blk.c89
-rw-r--r--hw/block/m25p80.c9
-rw-r--r--hw/block/nvme.c4
-rw-r--r--hw/block/xen_disk.c7
-rw-r--r--hw/char/pl011.c24
-rw-r--r--hw/char/sclpconsole-lm.c27
-rw-r--r--hw/char/sclpconsole.c30
-rw-r--r--hw/char/serial.c6
-rw-r--r--hw/char/virtio-console.c69
-rw-r--r--hw/char/virtio-serial-bus.c51
-rw-r--r--hw/core/Makefile.objs3
-rw-r--r--hw/core/fw-path-provider.c52
-rw-r--r--hw/core/machine.c28
-rw-r--r--hw/core/qdev-properties-system.c70
-rw-r--r--hw/core/qdev-properties.c21
-rw-r--r--hw/core/qdev.c115
-rw-r--r--hw/display/ads7846.c7
-rw-r--r--hw/display/ssd0323.c11
-rw-r--r--hw/display/vmware_vga.c3
-rw-r--r--hw/dma/xilinx_axidma.c20
-rw-r--r--hw/i2c/smbus_eeprom.c2
-rw-r--r--hw/i386/acpi-build.c70
-rw-r--r--hw/i386/acpi-defs.h14
-rw-r--r--hw/i386/acpi-dsdt-cpu-hotplug.dsl2
-rw-r--r--hw/i386/acpi-dsdt.hex.generated25
-rw-r--r--hw/i386/kvmvapic.c37
-rw-r--r--hw/i386/pc.c16
-rw-r--r--hw/i386/pc_piix.c7
-rw-r--r--hw/i386/pc_q35.c1
-rw-r--r--hw/i386/q35-acpi-dsdt.hex.generated25
-rw-r--r--hw/ide/ahci.c17
-rw-r--r--hw/ide/core.c2
-rw-r--r--hw/ide/pci.c2
-rw-r--r--hw/intc/allwinner-a10-pic.c22
-rw-r--r--hw/intc/apic.c10
-rw-r--r--hw/intc/apic_common.c7
-rw-r--r--hw/intc/arm_gic_kvm.c2
-rw-r--r--hw/intc/openpic.c14
-rw-r--r--hw/intc/openpic_kvm.c12
-rw-r--r--hw/intc/slavio_intctl.c2
-rw-r--r--hw/intc/xics_kvm.c11
-rw-r--r--hw/intc/xilinx_intc.c3
-rw-r--r--hw/misc/ivshmem.c4
-rw-r--r--hw/misc/max111x.c54
-rw-r--r--hw/misc/tmp105.c8
-rw-r--r--hw/misc/vfio.c33
-rw-r--r--hw/misc/zynq_slcr.c697
-rw-r--r--hw/net/allwinner_emac.c6
-rw-r--r--hw/net/cadence_gem.c4
-rw-r--r--hw/net/fsl_etsec/rings.c8
-rw-r--r--hw/net/pcnet.c1
-rw-r--r--hw/net/spapr_llan.c3
-rw-r--r--hw/net/virtio-net.c50
-rw-r--r--hw/net/vmxnet3.c58
-rw-r--r--hw/net/xilinx_axienet.c25
-rw-r--r--hw/nvram/fw_cfg.c2
-rw-r--r--hw/pci-host/apb.c10
-rw-r--r--hw/pci-host/prep.c233
-rw-r--r--hw/pci/pci.c55
-rw-r--r--hw/pci/pci_host.c3
-rw-r--r--hw/pcmcia/pxa2xx.c4
-rw-r--r--hw/ppc/e500.c5
-rw-r--r--hw/ppc/ppc.c94
-rw-r--r--hw/ppc/ppc405_uc.c2
-rw-r--r--hw/ppc/ppc440_bamboo.c4
-rw-r--r--hw/ppc/ppc4xx_devs.c2
-rw-r--r--hw/ppc/ppc_booke.c24
-rw-r--r--hw/ppc/ppce500_spin.c8
-rw-r--r--hw/ppc/prep.c149
-rw-r--r--hw/ppc/spapr.c111
-rw-r--r--hw/ppc/spapr_hcall.c60
-rw-r--r--hw/ppc/spapr_pci.c75
-rw-r--r--hw/ppc/spapr_vio.c3
-rw-r--r--hw/ppc/virtex_ml507.c4
-rw-r--r--hw/s390x/ipl.c2
-rw-r--r--hw/s390x/s390-virtio-bus.c4
-rw-r--r--hw/s390x/s390-virtio.c8
-rw-r--r--hw/s390x/sclpcpu.c6
-rw-r--r--hw/s390x/virtio-ccw.c4
-rw-r--r--hw/scsi/scsi-bus.c31
-rw-r--r--hw/scsi/spapr_vscsi.c6
-rw-r--r--hw/scsi/virtio-scsi.c4
-rw-r--r--hw/scsi/vmw_pvscsi.c3
-rw-r--r--hw/sd/ssi-sd.c7
-rw-r--r--hw/sh4/sh7750.c2
-rw-r--r--hw/ssi/ssi.c11
-rw-r--r--hw/timer/allwinner-a10-pit.c66
-rw-r--r--hw/timer/cadence_ttc.c2
-rw-r--r--hw/timer/grlib_gptimer.c5
-rw-r--r--hw/timer/hpet.c3
-rw-r--r--hw/usb/Makefile.objs4
-rw-r--r--hw/usb/desc-msos.c6
-rw-r--r--hw/usb/desc.h1
-rw-r--r--hw/usb/dev-mtp.c1103
-rw-r--r--hw/usb/hcd-ohci.c32
-rw-r--r--hw/virtio/virtio-pci.c4
-rw-r--r--hw/virtio/virtio-rng.c7
-rw-r--r--include/block/aio.h18
-rw-r--r--include/block/block.h19
-rw-r--r--include/block/block_int.h10
-rw-r--r--include/exec/cpu-all.h24
-rw-r--r--include/exec/cpu-defs.h61
-rw-r--r--include/exec/cputlb.h6
-rw-r--r--include/exec/exec-all.h45
-rw-r--r--include/exec/gen-icount.h10
-rw-r--r--include/exec/softmmu_exec.h52
-rw-r--r--include/exec/softmmu_template.h24
-rw-r--r--include/fpu/softfloat.h8
-rw-r--r--include/hw/acpi/cpu_hotplug_defs.h8
-rw-r--r--include/hw/boards.h57
-rw-r--r--include/hw/fw-path-provider.h48
-rw-r--r--include/hw/net/allwinner_emac.h1
-rw-r--r--include/hw/ppc/ppc.h3
-rw-r--r--include/hw/ppc/spapr.h9
-rw-r--r--include/hw/qdev-core.h6
-rw-r--r--include/hw/qdev-properties.h14
-rw-r--r--include/hw/scsi/scsi.h1
-rw-r--r--include/hw/ssi.h3
-rw-r--r--include/hw/timer/allwinner-a10-pit.h13
-rw-r--r--include/hw/virtio/virtio-blk.h8
-rw-r--r--include/hw/virtio/virtio-serial.h8
-rw-r--r--include/hw/xen/xen.h1
-rw-r--r--include/migration/vmstate.h3
-rw-r--r--include/monitor/monitor.h1
-rw-r--r--include/qapi/qmp/qerror.h79
-rw-r--r--include/qemu-common.h2
-rw-r--r--include/qemu-io.h2
-rw-r--r--include/qemu/bswap.h2
-rw-r--r--include/qemu/config-file.h2
-rw-r--r--include/qemu/error-report.h1
-rw-r--r--include/qemu/int128.h4
-rw-r--r--include/qemu/rfifolock.h54
-rw-r--r--include/qemu/typedefs.h1
-rw-r--r--include/qom/cpu.h127
-rw-r--r--include/qom/object.h37
-rw-r--r--include/sysemu/iothread.h40
-rw-r--r--include/sysemu/kvm.h1
-rw-r--r--include/sysemu/qemumachine.h16
-rw-r--r--include/sysemu/qtest.h1
-rw-r--r--include/sysemu/sysemu.h11
-rw-r--r--include/ui/console.h2
-rw-r--r--iothread.c167
-rw-r--r--kvm-stub.c1
-rw-r--r--linux-headers/asm-s390/kvm.h24
-rw-r--r--linux-headers/linux/kvm.h17
-rw-r--r--linux-headers/linux/vfio.h6
-rw-r--r--linux-user/elfload.c17
-rw-r--r--linux-user/linuxload.c3
-rw-r--r--linux-user/m68k-sim.c3
-rw-r--r--linux-user/m68k/target_cpu.h4
-rw-r--r--linux-user/main.c158
-rw-r--r--linux-user/qemu.h2
-rw-r--r--linux-user/signal.c146
-rw-r--r--linux-user/syscall.c163
-rw-r--r--linux-user/syscall_defs.h18
-rw-r--r--linux-user/vm86.c27
-rw-r--r--main-loop.c3
-rw-r--r--migration.c38
-rw-r--r--monitor.c85
-rw-r--r--net/net.c14
-rw-r--r--net/netmap.c4
-rw-r--r--net/slirp.c2
-rw-r--r--net/tap.c14
-rw-r--r--os-posix.c4
-rw-r--r--pc-bios/README2
-rw-r--r--pc-bios/ppc_rom.binbin524288 -> 1048576 bytes
-rw-r--r--pc-bios/qemu_logo.svg1010
-rw-r--r--pc-bios/slof.binbin873920 -> 921720 bytes
-rw-r--r--po/Makefile4
-rw-r--r--po/de_DE.po2
-rw-r--r--po/fr_FR.po2
-rw-r--r--po/hu.po2
-rw-r--r--po/it.po2
-rw-r--r--po/tr.po2
-rw-r--r--qapi-schema.json40
-rw-r--r--qapi/qmp-dispatch.c3
-rw-r--r--qapi/qmp-input-visitor.c2
-rw-r--r--qdev-monitor.c27
-rw-r--r--qemu-char.c17
-rw-r--r--qemu-doc.texi13
-rw-r--r--qemu-file.c2
-rw-r--r--qemu-img.c131
-rw-r--r--qemu-io-cmds.c4
-rw-r--r--qemu-io.c18
-rw-r--r--qemu-nbd.c20
-rw-r--r--qemu-options.hx34
-rw-r--r--qemu.sasl4
-rw-r--r--qga/commands-posix.c2
-rw-r--r--qga/qapi-schema.json14
-rw-r--r--qga/vss-win32/install.cpp3
-rw-r--r--qmp-commands.hx41
-rw-r--r--qmp.c30
-rw-r--r--qobject/json-parser.c2
-rw-r--r--qom/cpu.c85
-rw-r--r--qom/object.c205
m---------roms/SLOF0
m---------roms/openhackware0
-rw-r--r--rules.mak4
-rw-r--r--savevm.c11
-rw-r--r--scripts/coverity-model.c183
-rwxr-xr-xscripts/make-release3
-rw-r--r--scripts/qemu-binfmt-conf.sh3
-rw-r--r--slirp/misc.c13
-rw-r--r--slirp/slirp.c8
-rw-r--r--slirp/slirp.h2
-rw-r--r--slirp/tftp.c2
-rw-r--r--stubs/Makefile.objs3
-rw-r--r--stubs/arch-query-cpu-def.c2
-rw-r--r--stubs/mon-print-filename.c6
-rw-r--r--stubs/qtest.c14
-rw-r--r--stubs/runstate-check.c6
-rw-r--r--target-alpha/cpu.c22
-rw-r--r--target-alpha/cpu.h20
-rw-r--r--target-alpha/fpu_helper.c7
-rw-r--r--target-alpha/helper.c40
-rw-r--r--target-alpha/helper.h1
-rw-r--r--target-alpha/mem_helper.c32
-rw-r--r--target-alpha/sys_helper.c4
-rw-r--r--target-alpha/translate.c2366
-rw-r--r--target-arm/arm-semi.c9
-rw-r--r--target-arm/cpu-qom.h10
-rw-r--r--target-arm/cpu.c45
-rw-r--r--target-arm/cpu.h92
-rw-r--r--target-arm/cpu64.c115
-rw-r--r--target-arm/gdbstub64.c2
-rw-r--r--target-arm/helper-a64.c254
-rw-r--r--target-arm/helper-a64.h10
-rw-r--r--target-arm/helper.c1294
-rw-r--r--target-arm/helper.h51
-rw-r--r--target-arm/internals.h267
-rw-r--r--target-arm/kvm32.c19
-rw-r--r--target-arm/kvm64.c71
-rw-r--r--target-arm/machine.c15
-rw-r--r--target-arm/neon_helper.c187
-rw-r--r--target-arm/op_helper.c131
-rw-r--r--target-arm/translate-a64.c1958
-rw-r--r--target-arm/translate.c260
-rw-r--r--target-arm/translate.h29
-rw-r--r--target-cris/cpu.c27
-rw-r--r--target-cris/cpu.h12
-rw-r--r--target-cris/helper.c48
-rw-r--r--target-cris/mmu.c3
-rw-r--r--target-cris/op_helper.c28
-rw-r--r--target-cris/translate.c21
-rw-r--r--target-cris/translate_v10.c16
-rw-r--r--target-i386/cpu-qom.h15
-rw-r--r--target-i386/cpu.c309
-rw-r--r--target-i386/cpu.h360
-rw-r--r--target-i386/excp_helper.c6
-rw-r--r--target-i386/helper.c88
-rw-r--r--target-i386/kvm.c10
-rw-r--r--target-i386/machine.c9
-rw-r--r--target-i386/mem_helper.c16
-rw-r--r--target-i386/misc_helper.c20
-rw-r--r--target-i386/seg_helper.c20
-rw-r--r--target-i386/smm_helper.c2
-rw-r--r--target-i386/svm_helper.c41
-rw-r--r--target-i386/translate.c4
-rw-r--r--target-lm32/cpu.c14
-rw-r--r--target-lm32/cpu.h13
-rw-r--r--target-lm32/helper.c64
-rw-r--r--target-lm32/op_helper.c23
-rw-r--r--target-lm32/translate.c5
-rw-r--r--target-m68k/cpu.c14
-rw-r--r--target-m68k/cpu.h9
-rw-r--r--target-m68k/helper.c23
-rw-r--r--target-m68k/m68k-semi.c5
-rw-r--r--target-m68k/op_helper.c34
-rw-r--r--target-m68k/qregs.def1
-rw-r--r--target-m68k/translate.c47
-rw-r--r--target-microblaze/cpu.c14
-rw-r--r--target-microblaze/cpu.h8
-rw-r--r--target-microblaze/helper.c34
-rw-r--r--target-microblaze/mmu.c6
-rw-r--r--target-microblaze/op_helper.c19
-rw-r--r--target-microblaze/translate.c102
-rw-r--r--target-mips/cpu.c38
-rw-r--r--target-mips/cpu.h36
-rw-r--r--target-mips/helper.c64
-rw-r--r--target-mips/machine.c3
-rw-r--r--target-mips/op_helper.c36
-rw-r--r--target-mips/translate.c87
-rw-r--r--target-mips/translate_init.c26
-rw-r--r--target-moxie/cpu.c27
-rw-r--r--target-moxie/cpu.h7
-rw-r--r--target-moxie/helper.c51
-rw-r--r--target-moxie/translate.c4
-rw-r--r--target-openrisc/cpu.c34
-rw-r--r--target-openrisc/cpu.h10
-rw-r--r--target-openrisc/exception.c6
-rw-r--r--target-openrisc/interrupt.c14
-rw-r--r--target-openrisc/interrupt_helper.c2
-rw-r--r--target-openrisc/mmu.c17
-rw-r--r--target-openrisc/mmu_helper.c8
-rw-r--r--target-openrisc/sys_helper.c6
-rw-r--r--target-openrisc/translate.c5
-rw-r--r--target-ppc/arch_dump.c6
-rw-r--r--target-ppc/cpu-qom.h8
-rw-r--r--target-ppc/cpu.h18
-rw-r--r--target-ppc/excp_helper.c78
-rw-r--r--target-ppc/fpu_helper.c520
-rw-r--r--target-ppc/helper_regs.h4
-rw-r--r--target-ppc/int_helper.c4
-rw-r--r--target-ppc/kvm.c8
-rw-r--r--target-ppc/machine.c4
-rw-r--r--target-ppc/misc_helper.c4
-rw-r--r--target-ppc/mmu-hash32.c35
-rw-r--r--target-ppc/mmu-hash32.h14
-rw-r--r--target-ppc/mmu-hash64.c30
-rw-r--r--target-ppc/mmu-hash64.h8
-rw-r--r--target-ppc/mmu_helper.c208
-rw-r--r--target-ppc/translate.c4
-rw-r--r--target-ppc/translate_init.c82
-rw-r--r--target-ppc/user_only_helper.c8
-rw-r--r--target-s390x/arch_dump.c6
-rw-r--r--target-s390x/cc_helper.c5
-rw-r--r--target-s390x/cpu.c22
-rw-r--r--target-s390x/cpu.h17
-rw-r--r--target-s390x/fpu_helper.c4
-rw-r--r--target-s390x/helper.c88
-rw-r--r--target-s390x/int_helper.c3
-rw-r--r--target-s390x/kvm.c175
-rw-r--r--target-s390x/mem_helper.c58
-rw-r--r--target-s390x/misc_helper.c45
-rw-r--r--target-s390x/translate.c4
-rw-r--r--target-sh4/cpu.c27
-rw-r--r--target-sh4/cpu.h11
-rw-r--r--target-sh4/helper.c108
-rw-r--r--target-sh4/op_helper.c20
-rw-r--r--target-sh4/translate.c4
-rw-r--r--target-sparc/cpu.c114
-rw-r--r--target-sparc/cpu.h13
-rw-r--r--target-sparc/helper.c53
-rw-r--r--target-sparc/int32_helper.c10
-rw-r--r--target-sparc/int64_helper.c8
-rw-r--r--target-sparc/ldst_helper.c53
-rw-r--r--target-sparc/machine.c3
-rw-r--r--target-sparc/mmu_helper.c42
-rw-r--r--target-sparc/translate.c4
-rw-r--r--target-unicore32/cpu.c13
-rw-r--r--target-unicore32/cpu.h11
-rw-r--r--target-unicore32/helper.c30
-rw-r--r--target-unicore32/op_helper.c14
-rw-r--r--target-unicore32/softmmu.c29
-rw-r--r--target-unicore32/translate.c32
-rw-r--r--target-unicore32/ucf64_helper.c3
-rw-r--r--target-xtensa/cpu.c8
-rw-r--r--target-xtensa/cpu.h13
-rw-r--r--target-xtensa/helper.c32
-rw-r--r--target-xtensa/op_helper.c51
-rw-r--r--target-xtensa/translate.c5
-rw-r--r--tcg/README18
-rw-r--r--tcg/aarch64/tcg-target.c1802
-rw-r--r--tcg/aarch64/tcg-target.h83
-rw-r--r--tcg/arm/tcg-target.c29
-rw-r--r--tcg/arm/tcg-target.h2
-rw-r--r--tcg/i386/tcg-target.c8
-rw-r--r--tcg/i386/tcg-target.h2
-rw-r--r--tcg/ia64/tcg-target.c500
-rw-r--r--tcg/ia64/tcg-target.h2
-rw-r--r--tcg/mips/tcg-target.c14
-rw-r--r--tcg/mips/tcg-target.h5
-rw-r--r--tcg/optimize.c45
-rw-r--r--tcg/ppc/tcg-target.c8
-rw-r--r--tcg/ppc/tcg-target.h2
-rw-r--r--tcg/ppc64/tcg-target.c12
-rw-r--r--tcg/ppc64/tcg-target.h1
-rw-r--r--tcg/s390/tcg-target.c45
-rw-r--r--tcg/s390/tcg-target.h2
-rw-r--r--tcg/sparc/tcg-target.c609
-rw-r--r--tcg/sparc/tcg-target.h8
-rw-r--r--tcg/tcg-be-ldst.h2
-rw-r--r--tcg/tcg-op.h48
-rw-r--r--tcg/tcg.c14
-rw-r--r--tcg/tcg.h8
-rw-r--r--tcg/tci/tcg-target.c2
-rw-r--r--tcg/tci/tcg-target.h8
-rw-r--r--tci.c20
-rw-r--r--tests/.gitignore12
-rw-r--r--tests/Makefile55
-rw-r--r--tests/acpi-test-data/pc/DSDTbin4485 -> 4480 bytes
-rw-r--r--tests/acpi-test-data/pc/SSDTbin2363 -> 2269 bytes
-rw-r--r--tests/acpi-test-data/q35/DSDTbin7383 -> 7378 bytes
-rw-r--r--tests/acpi-test-data/q35/SSDTbin652 -> 550 bytes
-rw-r--r--tests/acpi-test.c70
-rw-r--r--tests/i82801b11-test.c33
-rw-r--r--tests/libqos/pci-pc.c12
-rw-r--r--tests/libqtest.c95
-rw-r--r--tests/libqtest.h34
-rw-r--r--tests/nvme-test.c34
-rw-r--r--tests/pvpanic-test.c47
-rw-r--r--tests/qdev-monitor-test.c8
-rw-r--r--tests/qemu-iotests/026.out6
-rwxr-xr-xtests/qemu-iotests/02940
-rw-r--r--tests/qemu-iotests/029.out17
-rwxr-xr-xtests/qemu-iotests/03050
-rwxr-xr-xtests/qemu-iotests/03920
-rw-r--r--tests/qemu-iotests/039.out11
-rw-r--r--tests/qemu-iotests/044.out2
-rwxr-xr-xtests/qemu-iotests/05112
-rw-r--r--tests/qemu-iotests/051.out49
-rwxr-xr-xtests/qemu-iotests/0569
-rwxr-xr-xtests/qemu-iotests/06026
-rw-r--r--tests/qemu-iotests/060.out15
-rwxr-xr-xtests/qemu-iotests/075106
-rw-r--r--tests/qemu-iotests/075.out38
-rwxr-xr-xtests/qemu-iotests/07676
-rw-r--r--tests/qemu-iotests/076.out18
-rwxr-xr-xtests/qemu-iotests/07891
-rw-r--r--tests/qemu-iotests/078.out28
-rwxr-xr-xtests/qemu-iotests/080180
-rw-r--r--tests/qemu-iotests/080.out83
-rwxr-xr-xtests/qemu-iotests/083129
-rw-r--r--tests/qemu-iotests/083.out163
-rwxr-xr-xtests/qemu-iotests/084104
-rw-r--r--tests/qemu-iotests/084.out30
-rwxr-xr-xtests/qemu-iotests/087102
-rw-r--r--tests/qemu-iotests/087.out29
-rwxr-xr-xtests/qemu-iotests/08864
-rw-r--r--tests/qemu-iotests/088.out17
-rw-r--r--tests/qemu-iotests/common21
-rw-r--r--tests/qemu-iotests/common.rc3
-rw-r--r--tests/qemu-iotests/group11
-rw-r--r--tests/qemu-iotests/iotests.py5
-rwxr-xr-xtests/qemu-iotests/nbd-fault-injector.py264
-rw-r--r--tests/qemu-iotests/sample_images/empty.bochs.bz2bin0 -> 118 bytes
-rw-r--r--tests/qemu-iotests/sample_images/fake.parallels.bz2bin0 -> 141 bytes
-rw-r--r--tests/qemu-iotests/sample_images/simple-pattern.cloop.bz2bin0 -> 488 bytes
-rw-r--r--tests/qom-test.c39
-rw-r--r--tests/spapr-phb-test.c35
-rw-r--r--tests/tcg/test_path.c13
-rw-r--r--tests/test-aio.c75
-rw-r--r--tests/test-qmp-input-strict.c8
-rw-r--r--tests/test-rfifolock.c91
-rw-r--r--tests/tmp105-test.c136
-rw-r--r--tests/virtio-9p-test.c46
-rw-r--r--tests/virtio-balloon-test.c33
-rw-r--r--tests/virtio-blk-test.c34
-rw-r--r--tests/virtio-console-test.c41
-rw-r--r--tests/virtio-rng-test.c33
-rw-r--r--tests/virtio-scsi-test.c35
-rw-r--r--tests/virtio-serial-test.c33
-rw-r--r--trace-events45
-rw-r--r--trace/simple.c9
-rw-r--r--translate-all.c99
-rw-r--r--translate-all.h2
-rw-r--r--ui/console.c5
-rw-r--r--ui/gtk.c120
-rw-r--r--ui/input-legacy.c1
-rw-r--r--ui/input.c19
-rw-r--r--ui/sdl2.c21
-rw-r--r--ui/spice-display.c4
-rw-r--r--ui/spice-input.c2
-rw-r--r--ui/vnc.c14
-rw-r--r--user-exec.c53
-rw-r--r--util/Makefile.objs1
-rw-r--r--util/cutils.c14
-rw-r--r--util/error.c5
-rw-r--r--util/module.c2
-rw-r--r--util/osdep.c18
-rw-r--r--util/oslib-posix.c4
-rw-r--r--util/path.c4
-rw-r--r--util/qemu-config.c16
-rw-r--r--util/qemu-error.c4
-rw-r--r--util/qemu-option.c2
-rw-r--r--util/qemu-thread-posix.c21
-rw-r--r--util/qemu-thread-win32.c2
-rw-r--r--util/rfifolock.c78
-rw-r--r--vl.c333
-rw-r--r--vmstate.c2
-rw-r--r--xbzrle.c8
543 files changed, 20295 insertions, 8750 deletions
diff --git a/.gitignore b/.gitignore
index ef7019f35d..8a5270973e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,9 +18,10 @@
/*-darwin-user
/*-linux-user
/*-bsd-user
-libdis*
-libuser
+/libdis*
+/libuser
/linux-headers/asm
+/qga/qapi-generated
/qapi-generated
/qapi-types.[ch]
/qapi-visit.[ch]
@@ -48,19 +49,9 @@ libuser
/qemu-monitor.texi
/qmp-commands.txt
/vscclient
-/test-bitops
-/test-coroutine
-/test-int128
-/test-opts-visitor
-/test-qmp-input-visitor
-/test-qmp-output-visitor
-/test-string-input-visitor
-/test-string-output-visitor
-/test-visitor-serialization
/fsdev/virtfs-proxy-helper
/fsdev/virtfs-proxy-helper.1
/fsdev/virtfs-proxy-helper.pod
-/.gdbinit
*.a
*.aux
*.cp
@@ -89,12 +80,8 @@ libuser
*.pc
.libs
.sdk
-*.swp
-*.orig
-.pc
*.gcda
*.gcno
-patches
/pc-bios/bios-pq/status
/pc-bios/vgabios-pq/status
/pc-bios/optionrom/linuxboot.asm
diff --git a/.travis.yml b/.travis.yml
index c7ff4da29c..04da973632 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,6 +4,12 @@ python:
compiler:
- gcc
- clang
+notifications:
+ irc:
+ channels:
+ - "irc.oftc.net#qemu"
+ on_success: change
+ on_failure: always
env:
global:
- TEST_CMD="make check"
@@ -14,23 +20,23 @@ env:
- GUI_PKGS="libgtk-3-dev libvte-2.90-dev libsdl1.2-dev libpng12-dev libpixman-1-dev"
- EXTRA_PKGS=""
matrix:
- - TARGETS=alpha-softmmu,alpha-linux-user
- - TARGETS=arm-softmmu,arm-linux-user
- - TARGETS=aarch64-softmmu,aarch64-linux-user
- - TARGETS=cris-softmmu
- - TARGETS=i386-softmmu,x86_64-softmmu
- - TARGETS=lm32-softmmu
- - TARGETS=m68k-softmmu
- - TARGETS=microblaze-softmmu,microblazeel-softmmu
- - TARGETS=mips-softmmu,mips64-softmmu,mips64el-softmmu,mipsel-softmmu
- - TARGETS=moxie-softmmu
- - TARGETS=or32-softmmu,
- - TARGETS=ppc-softmmu,ppc64-softmmu,ppcemb-softmmu
- - TARGETS=s390x-softmmu
- - TARGETS=sh4-softmmu,sh4eb-softmmu
- - TARGETS=sparc-softmmu,sparc64-softmmu
- - TARGETS=unicore32-softmmu
- - TARGETS=xtensa-softmmu,xtensaeb-softmmu
+ - TARGETS=alpha-softmmu,alpha-linux-user
+ - TARGETS=arm-softmmu,arm-linux-user
+ - TARGETS=aarch64-softmmu,aarch64-linux-user
+ - TARGETS=cris-softmmu
+ - TARGETS=i386-softmmu,x86_64-softmmu
+ - TARGETS=lm32-softmmu
+ - TARGETS=m68k-softmmu
+ - TARGETS=microblaze-softmmu,microblazeel-softmmu
+ - TARGETS=mips-softmmu,mips64-softmmu,mips64el-softmmu,mipsel-softmmu
+ - TARGETS=moxie-softmmu
+ - TARGETS=or32-softmmu,
+ - TARGETS=ppc-softmmu,ppc64-softmmu,ppcemb-softmmu
+ - TARGETS=s390x-softmmu
+ - TARGETS=sh4-softmmu,sh4eb-softmmu
+ - TARGETS=sparc-softmmu,sparc64-softmmu
+ - TARGETS=unicore32-softmmu
+ - TARGETS=xtensa-softmmu,xtensaeb-softmmu
before_install:
- git submodule update --init --recursive
- sudo apt-get update -qq
@@ -46,6 +52,10 @@ matrix:
- env: TARGETS=i386-softmmu,x86_64-softmmu
EXTRA_CONFIG="--enable-debug --enable-tcg-interpreter"
compiler: gcc
+ # All the extra -dev packages
+ - env: TARGETS=i386-softmmu,x86_64-softmmu
+ EXTRA_PKGS="libaio-dev libcap-ng-dev libattr1-dev libbrlapi-dev uuid-dev libusb-1.0.0-dev"
+ compiler: gcc
# Currently configure doesn't force --disable-pie
- env: TARGETS=i386-softmmu,x86_64-softmmu
EXTRA_CONFIG="--enable-gprof --enable-gcov --disable-pie"
@@ -65,8 +75,7 @@ matrix:
EXTRA_CONFIG="--enable-trace-backend=ftrace"
TEST_CMD=""
compiler: gcc
- # This disabled make check for the ftrace backend which needs more setting up
- # Currently broken on 12.04 due to mis-packaged liburcu and changed API, will be pulled.
- #- env: TARGETS=i386-softmmu,x86_64-softmmu
- # EXTRA_PKGS="liblttng-ust-dev liburcu-dev"
- # EXTRA_CONFIG="--enable-trace-backend=ust"
+ - env: TARGETS=i386-softmmu,x86_64-softmmu
+ EXTRA_PKGS="liblttng-ust-dev liburcu-dev"
+ EXTRA_CONFIG="--enable-trace-backend=ust"
+ compiler: gcc
diff --git a/CODING_STYLE b/CODING_STYLE
index dcbce28a27..4280945ff0 100644
--- a/CODING_STYLE
+++ b/CODING_STYLE
@@ -84,3 +84,10 @@ and clarity it comes on a line by itself:
Rationale: a consistent (except for functions...) bracing style reduces
ambiguity and avoids needless churn when lines are added or removed.
Furthermore, it is the QEMU coding style.
+
+5. Declarations
+
+Mixed declarations (interleaving statements and declarations within blocks)
+are not allowed; declarations should be at the beginning of blocks. In other
+words, the code should not generate warnings if using GCC's
+-Wdeclaration-after-statement option.
diff --git a/MAINTAINERS b/MAINTAINERS
index 7d17f83868..b287ef8939 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -304,7 +304,7 @@ S: Maintained
F: hw/*/versatile*
Xilinx Zynq
-M: Peter Crosthwaite <peter.crosthwaite@petalogix.com>
+M: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
S: Maintained
F: hw/arm/xilinx_zynq.c
F: hw/misc/zynq_slcr.c
@@ -353,7 +353,7 @@ S: Maintained
F: hw/microblaze/petalogix_s3adsp1800_mmu.c
petalogix_ml605
-M: Peter Crosthwaite <peter.crosthwaite@petalogix.com>
+M: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
S: Maintained
F: hw/microblaze/petalogix_ml605_mmu.c
@@ -592,7 +592,7 @@ S: Orphan
F: hw/scsi/lsi53c895a.c
SSI
-M: Peter Crosthwaite <peter.crosthwaite@petalogix.com>
+M: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
S: Maintained
F: hw/ssi/*
F: hw/block/m25p80.c
@@ -623,6 +623,7 @@ M: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
S: Supported
F: hw/9pfs/
F: fsdev/
+F: tests/virtio-9p-test.c
T: git git://github.com/kvaneesh/QEMU.git
virtio-blk
@@ -648,9 +649,10 @@ nvme
M: Keith Busch <keith.busch@intel.com>
S: Supported
F: hw/block/nvme*
+F: tests/nvme-test.c
Xilinx EDK
-M: Peter Crosthwaite <peter.crosthwaite@petalogix.com>
+M: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
S: Maintained
F: hw/*/xilinx_*
@@ -672,6 +674,8 @@ S: Supported
F: block*
F: block/
F: hw/block/
+F: qemu-img*
+F: qemu-io*
T: git git://repo.or.cz/qemu/kevin.git block
T: git git://github.com/stefanha/qemu.git block
@@ -694,7 +698,7 @@ F: include/hw/cpu/icc_bus.h
F: hw/cpu/icc_bus.c
Device Tree
-M: Peter Crosthwaite <peter.crosthwaite@petalogix.com>
+M: Peter Crosthwaite <peter.crosthwaite@xilinx.com>
M: Alexander Graf <agraf@suse.de>
S: Maintained
F: device_tree.[ch]
@@ -715,7 +719,8 @@ F: hw/display/qxl*
Graphics
M: Anthony Liguori <aliguori@amazon.com>
-S: Maintained
+M: Gerd Hoffmann <kraxel@redhat.com>
+S: Odd Fixes
F: ui/
Cocoa graphics
diff --git a/Makefile b/Makefile
index bd9cd4fceb..423e373fed 100644
--- a/Makefile
+++ b/Makefile
@@ -133,6 +133,7 @@ dummy := $(call unnest-vars,, \
stub-obj-y \
util-obj-y \
qga-obj-y \
+ qga-vss-dll-obj-y \
block-obj-y \
block-obj-m \
common-obj-y \
@@ -265,10 +266,7 @@ clean:
# avoid old build problems by removing potentially incorrect old files
rm -f config.mak op-i386.h opc-i386.h gen-op-i386.h op-arm.h opc-arm.h gen-op-arm.h
rm -f qemu-options.def
- find . -name '*.[oda]' -type f -exec rm -f {} +
- find . -name '*.l[oa]' -type f -exec rm -f {} +
- find . -name '*$(DSOSUF)' -type f -exec rm -f {} +
- find . -name '*.mo' -type f -exec rm -f {} +
+ find . \( -name '*.l[oa]' -o -name '*.so' -o -name '*.dll' -o -name '*.mo' -o -name '*.[oda]' \) -type f -exec rm {} +
rm -f $(filter-out %.tlb,$(TOOLS)) $(HELPERS-y) qemu-ga TAGS cscope.* *.pod *~ */*~
rm -f fsdev/*.pod
rm -rf .libs */.libs
@@ -379,7 +377,7 @@ endif
ifneq ($(CONFIG_MODULES),)
$(INSTALL_DIR) "$(DESTDIR)$(qemu_moddir)"
for s in $(patsubst %.mo,%$(DSOSUF),$(modules-m)); do \
- $(INSTALL_PROG) $(STRIP_OPT) $$s "$(DESTDIR)$(qemu_moddir)/$${s//\//-}"; \
+ $(INSTALL_PROG) $(STRIP_OPT) $$s "$(DESTDIR)$(qemu_moddir)/$$(echo $$s | tr / -)"; \
done
endif
ifneq ($(HELPERS-y),)
diff --git a/Makefile.objs b/Makefile.objs
index 5cd3d816ff..a6e0e2aacc 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -39,6 +39,7 @@ libcacard-y += libcacard/vcardt.o
ifeq ($(CONFIG_SOFTMMU),y)
common-obj-y = blockdev.o blockdev-nbd.o block/
+common-obj-y += iothread.o
common-obj-y += net/
common-obj-y += qdev-monitor.o device-hotplug.o
common-obj-$(CONFIG_WIN32) += os-win32.o
diff --git a/VERSION b/VERSION
index 536bc469eb..52be4db49f 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.7.50
+2.0.50
diff --git a/async.c b/async.c
index 5fb3fa61df..6930185e64 100644
--- a/async.c
+++ b/async.c
@@ -214,6 +214,7 @@ aio_ctx_finalize(GSource *source)
thread_pool_free(ctx->thread_pool);
aio_set_event_notifier(ctx, &ctx->notifier, NULL);
event_notifier_cleanup(&ctx->notifier);
+ rfifolock_destroy(&ctx->lock);
qemu_mutex_destroy(&ctx->bh_lock);
g_array_free(ctx->pollfds, TRUE);
timerlistgroup_deinit(&ctx->tlg);
@@ -250,6 +251,12 @@ static void aio_timerlist_notify(void *opaque)
aio_notify(opaque);
}
+static void aio_rfifolock_cb(void *opaque)
+{
+ /* Kick owner thread in case they are blocked in aio_poll() */
+ aio_notify(opaque);
+}
+
AioContext *aio_context_new(void)
{
AioContext *ctx;
@@ -257,6 +264,7 @@ AioContext *aio_context_new(void)
ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
ctx->thread_pool = NULL;
qemu_mutex_init(&ctx->bh_lock);
+ rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
event_notifier_init(&ctx->notifier, false);
aio_set_event_notifier(ctx, &ctx->notifier,
(EventNotifierHandler *)
@@ -275,3 +283,13 @@ void aio_context_unref(AioContext *ctx)
{
g_source_unref(&ctx->source);
}
+
+void aio_context_acquire(AioContext *ctx)
+{
+ rfifolock_lock(&ctx->lock);
+}
+
+void aio_context_release(AioContext *ctx)
+{
+ rfifolock_unlock(&ctx->lock);
+}
diff --git a/backends/baum.c b/backends/baum.c
index 665107fa9c..759003f623 100644
--- a/backends/baum.c
+++ b/backends/baum.c
@@ -566,9 +566,11 @@ CharDriverState *chr_baum_init(void)
BaumDriverState *baum;
CharDriverState *chr;
brlapi_handle_t *handle;
-#if defined(CONFIG_SDL) && SDL_COMPILEDVERSION < SDL_VERSIONNUM(2, 0, 0)
+#if defined(CONFIG_SDL)
+#if SDL_COMPILEDVERSION < SDL_VERSIONNUM(2, 0, 0)
SDL_SysWMinfo info;
#endif
+#endif
int tty;
baum = g_malloc0(sizeof(BaumDriverState));
@@ -595,13 +597,15 @@ CharDriverState *chr_baum_init(void)
goto fail;
}
-#if defined(CONFIG_SDL) && SDL_COMPILEDVERSION < SDL_VERSIONNUM(2, 0, 0)
+#if defined(CONFIG_SDL)
+#if SDL_COMPILEDVERSION < SDL_VERSIONNUM(2, 0, 0)
memset(&info, 0, sizeof(info));
SDL_VERSION(&info.version);
if (SDL_GetWMInfo(&info))
tty = info.info.x11.wmwindow;
else
#endif
+#endif
tty = BRLAPI_TTY_DEFAULT;
if (brlapi__enterTtyMode(handle, tty, NULL) == -1) {
diff --git a/block-migration.c b/block-migration.c
index 897fdbabb5..56951e09ae 100644
--- a/block-migration.c
+++ b/block-migration.c
@@ -310,13 +310,28 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
/* Called with iothread lock taken. */
-static void set_dirty_tracking(void)
+static int set_dirty_tracking(void)
{
BlkMigDevState *bmds;
+ int ret;
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
- bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE);
+ bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE,
+ NULL);
+ if (!bmds->dirty_bitmap) {
+ ret = -errno;
+ goto fail;
+ }
}
+ return 0;
+
+fail:
+ QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
+ if (bmds->dirty_bitmap) {
+ bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
+ }
+ }
+ return ret;
}
static void unset_dirty_tracking(void)
@@ -611,10 +626,17 @@ static int block_save_setup(QEMUFile *f, void *opaque)
block_mig_state.submitted, block_mig_state.transferred);
qemu_mutex_lock_iothread();
- init_blk_migration(f);
/* start track dirty blocks */
- set_dirty_tracking();
+ ret = set_dirty_tracking();
+
+ if (ret) {
+ qemu_mutex_unlock_iothread();
+ return ret;
+ }
+
+ init_blk_migration(f);
+
qemu_mutex_unlock_iothread();
ret = flush_blks(f);
diff --git a/block.c b/block.c
index f1ef4b0109..4745712d22 100644
--- a/block.c
+++ b/block.c
@@ -332,10 +332,21 @@ void bdrv_register(BlockDriver *bdrv)
}
/* create a new block device (by default it is empty) */
-BlockDriverState *bdrv_new(const char *device_name)
+BlockDriverState *bdrv_new(const char *device_name, Error **errp)
{
BlockDriverState *bs;
+ if (bdrv_find(device_name)) {
+ error_setg(errp, "Device with id '%s' already exists",
+ device_name);
+ return NULL;
+ }
+ if (bdrv_find_node(device_name)) {
+ error_setg(errp, "Device with node-name '%s' already exists",
+ device_name);
+ return NULL;
+ }
+
bs = g_malloc0(sizeof(BlockDriverState));
QLIST_INIT(&bs->dirty_bitmaps);
pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
@@ -767,6 +778,11 @@ static int bdrv_open_flags(BlockDriverState *bs, int flags)
{
int open_flags = flags | BDRV_O_CACHE_WB;
+ /* The backing file of a temporary snapshot is read-only */
+ if (flags & BDRV_O_SNAPSHOT) {
+ open_flags &= ~BDRV_O_RDWR;
+ }
+
/*
* Clear flags that are internal to the block layer before opening the
* image.
@@ -783,38 +799,36 @@ static int bdrv_open_flags(BlockDriverState *bs, int flags)
return open_flags;
}
-static int bdrv_assign_node_name(BlockDriverState *bs,
- const char *node_name,
- Error **errp)
+static void bdrv_assign_node_name(BlockDriverState *bs,
+ const char *node_name,
+ Error **errp)
{
if (!node_name) {
- return 0;
+ return;
}
/* empty string node name is invalid */
if (node_name[0] == '\0') {
error_setg(errp, "Empty node name");
- return -EINVAL;
+ return;
}
/* takes care of avoiding namespaces collisions */
if (bdrv_find(node_name)) {
error_setg(errp, "node-name=%s is conflicting with a device id",
node_name);
- return -EINVAL;
+ return;
}
/* takes care of avoiding duplicates node names */
if (bdrv_find_node(node_name)) {
error_setg(errp, "Duplicate node name");
- return -EINVAL;
+ return;
}
/* copy node name into the bs and insert it into the graph list */
pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
-
- return 0;
}
/*
@@ -849,9 +863,10 @@ static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
node_name = qdict_get_try_str(options, "node-name");
- ret = bdrv_assign_node_name(bs, node_name, errp);
- if (ret < 0) {
- return ret;
+ bdrv_assign_node_name(bs, node_name, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return -EINVAL;
}
qdict_del(options, "node-name");
@@ -968,7 +983,7 @@ static int bdrv_file_open(BlockDriverState *bs, const char *filename,
{
BlockDriver *drv;
const char *drvname;
- bool allow_protocol_prefix = false;
+ bool parse_filename = false;
Error *local_err = NULL;
int ret;
@@ -977,7 +992,7 @@ static int bdrv_file_open(BlockDriverState *bs, const char *filename,
filename = qdict_get_try_str(*options, "filename");
} else if (filename && !qdict_haskey(*options, "filename")) {
qdict_put(*options, "filename", qstring_from_str(filename));
- allow_protocol_prefix = true;
+ parse_filename = true;
} else {
error_setg(errp, "Can't specify 'file' and 'filename' options at the "
"same time");
@@ -994,7 +1009,7 @@ static int bdrv_file_open(BlockDriverState *bs, const char *filename,
}
qdict_del(*options, "driver");
} else if (filename) {
- drv = bdrv_find_protocol(filename, allow_protocol_prefix);
+ drv = bdrv_find_protocol(filename, parse_filename);
if (!drv) {
error_setg(errp, "Unknown protocol");
}
@@ -1010,7 +1025,7 @@ static int bdrv_file_open(BlockDriverState *bs, const char *filename,
}
/* Parse the filename and open it */
- if (drv->bdrv_parse_filename && filename) {
+ if (drv->bdrv_parse_filename && parse_filename) {
drv->bdrv_parse_filename(filename, *options, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -1053,14 +1068,14 @@ fail:
*/
int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
{
- char backing_filename[PATH_MAX];
- int back_flags, ret;
+ char *backing_filename = g_malloc0(PATH_MAX);
+ int back_flags, ret = 0;
BlockDriver *back_drv = NULL;
Error *local_err = NULL;
if (bs->backing_hd != NULL) {
QDECREF(options);
- return 0;
+ goto free_exit;
}
/* NULL means an empty set of options */
@@ -1073,10 +1088,9 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
backing_filename[0] = '\0';
} else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
QDECREF(options);
- return 0;
+ goto free_exit;
} else {
- bdrv_get_full_backing_filename(bs, backing_filename,
- sizeof(backing_filename));
+ bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
}
if (bs->backing_format[0] != '\0') {
@@ -1097,7 +1111,7 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
error_setg(errp, "Could not open backing file: %s",
error_get_pretty(local_err));
error_free(local_err);
- return ret;
+ goto free_exit;
}
if (bs->backing_hd->file) {
@@ -1108,7 +1122,9 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
/* Recalculate the BlockLimits with the backing file */
bdrv_refresh_limits(bs);
- return 0;
+free_exit:
+ g_free(backing_filename);
+ return ret;
}
/*
@@ -1162,6 +1178,75 @@ done:
return ret;
}
+void bdrv_append_temp_snapshot(BlockDriverState *bs, Error **errp)
+{
+ /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
+ char *tmp_filename = g_malloc0(PATH_MAX + 1);
+ int64_t total_size;
+ BlockDriver *bdrv_qcow2;
+ QEMUOptionParameter *create_options;
+ QDict *snapshot_options;
+ BlockDriverState *bs_snapshot;
+ Error *local_err;
+ int ret;
+
+ /* if snapshot, we create a temporary backing file and open it
+ instead of opening 'filename' directly */
+
+ /* Get the required size from the image */
+ total_size = bdrv_getlength(bs);
+ if (total_size < 0) {
+ error_setg_errno(errp, -total_size, "Could not get image size");
+ goto out;
+ }
+ total_size &= BDRV_SECTOR_MASK;
+
+ /* Create the temporary image */
+ ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Could not get temporary filename");
+ goto out;
+ }
+
+ bdrv_qcow2 = bdrv_find_format("qcow2");
+ create_options = parse_option_parameters("", bdrv_qcow2->create_options,
+ NULL);
+
+ set_option_parameter_int(create_options, BLOCK_OPT_SIZE, total_size);
+
+ ret = bdrv_create(bdrv_qcow2, tmp_filename, create_options, &local_err);
+ free_option_parameters(create_options);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Could not create temporary overlay "
+ "'%s': %s", tmp_filename,
+ error_get_pretty(local_err));
+ error_free(local_err);
+ goto out;
+ }
+
+ /* Prepare a new options QDict for the temporary file */
+ snapshot_options = qdict_new();
+ qdict_put(snapshot_options, "file.driver",
+ qstring_from_str("file"));
+ qdict_put(snapshot_options, "file.filename",
+ qstring_from_str(tmp_filename));
+
+ bs_snapshot = bdrv_new("", &error_abort);
+ bs_snapshot->is_temporary = 1;
+
+ ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
+ bs->open_flags & ~BDRV_O_SNAPSHOT, bdrv_qcow2, &local_err);
+ if (ret < 0) {
+ error_propagate(errp, local_err);
+ goto out;
+ }
+
+ bdrv_append(bs_snapshot, bs);
+
+out:
+ g_free(tmp_filename);
+}
+
/*
* Opens a disk image (raw, qcow2, vmdk, ...)
*
@@ -1182,8 +1267,6 @@ int bdrv_open(BlockDriverState **pbs, const char *filename,
BlockDriver *drv, Error **errp)
{
int ret;
- /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
- char tmp_filename[PATH_MAX + 1];
BlockDriverState *file = NULL, *bs;
const char *drvname;
Error *local_err = NULL;
@@ -1218,7 +1301,7 @@ int bdrv_open(BlockDriverState **pbs, const char *filename,
if (*pbs) {
bs = *pbs;
} else {
- bs = bdrv_new("");
+ bs = bdrv_new("", &error_abort);
}
/* NULL means an empty set of options */
@@ -1243,74 +1326,6 @@ int bdrv_open(BlockDriverState **pbs, const char *filename,
}
}
- /* For snapshot=on, create a temporary qcow2 overlay */
- if (flags & BDRV_O_SNAPSHOT) {
- BlockDriverState *bs1;
- int64_t total_size;
- BlockDriver *bdrv_qcow2;
- QEMUOptionParameter *create_options;
- QDict *snapshot_options;
-
- /* if snapshot, we create a temporary backing file and open it
- instead of opening 'filename' directly */
-
- /* Get the required size from the image */
- QINCREF(options);
- bs1 = NULL;
- ret = bdrv_open(&bs1, filename, NULL, options, BDRV_O_NO_BACKING,
- drv, &local_err);
- if (ret < 0) {
- goto fail;
- }
- total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
-
- bdrv_unref(bs1);
-
- /* Create the temporary image */
- ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename));
- if (ret < 0) {
- error_setg_errno(errp, -ret, "Could not get temporary filename");
- goto fail;
- }
-
- bdrv_qcow2 = bdrv_find_format("qcow2");
- create_options = parse_option_parameters("", bdrv_qcow2->create_options,
- NULL);
-
- set_option_parameter_int(create_options, BLOCK_OPT_SIZE, total_size);
-
- ret = bdrv_create(bdrv_qcow2, tmp_filename, create_options, &local_err);
- free_option_parameters(create_options);
- if (ret < 0) {
- error_setg_errno(errp, -ret, "Could not create temporary overlay "
- "'%s': %s", tmp_filename,
- error_get_pretty(local_err));
- error_free(local_err);
- local_err = NULL;
- goto fail;
- }
-
- /* Prepare a new options QDict for the temporary file, where user
- * options refer to the backing file */
- if (filename) {
- qdict_put(options, "file.filename", qstring_from_str(filename));
- }
- if (drv) {
- qdict_put(options, "driver", qstring_from_str(drv->format_name));
- }
-
- snapshot_options = qdict_new();
- qdict_put(snapshot_options, "backing", options);
- qdict_flatten(snapshot_options);
-
- bs->options = snapshot_options;
- options = qdict_clone_shallow(bs->options);
-
- filename = tmp_filename;
- drv = bdrv_qcow2;
- bs->is_temporary = 1;
- }
-
/* Open image file without format layer */
if (flags & BDRV_O_RDWR) {
flags |= BDRV_O_ALLOW_RDWR;
@@ -1321,7 +1336,7 @@ int bdrv_open(BlockDriverState **pbs, const char *filename,
bdrv_open_flags(bs, flags | BDRV_O_UNMAP) |
BDRV_O_PROTOCOL, true, &local_err);
if (ret < 0) {
- goto fail;
+ goto unlink_and_fail;
}
/* Find the right image format driver */
@@ -1372,6 +1387,17 @@ int bdrv_open(BlockDriverState **pbs, const char *filename,
}
}
+ /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
+ * temporary snapshot afterwards. */
+ if (flags & BDRV_O_SNAPSHOT) {
+ bdrv_append_temp_snapshot(bs, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ goto close_and_fail;
+ }
+ }
+
+
done:
/* Check if any unknown options were used */
if (options && (qdict_size(options) != 0)) {
@@ -1388,12 +1414,19 @@ done:
ret = -EINVAL;
goto close_and_fail;
}
- QDECREF(options);
if (!bdrv_key_required(bs)) {
bdrv_dev_change_media_cb(bs, true);
+ } else if (!runstate_check(RUN_STATE_PRELAUNCH)
+ && !runstate_check(RUN_STATE_INMIGRATE)
+ && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
+ error_setg(errp,
+ "Guest must be stopped for opening of encrypted image");
+ ret = -EBUSY;
+ goto close_and_fail;
}
+ QDECREF(options);
*pbs = bs;
return 0;
@@ -2561,6 +2594,10 @@ static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
{
int64_t len;
+ if (size > INT_MAX) {
+ return -EIO;
+ }
+
if (!bdrv_is_inserted(bs))
return -ENOMEDIUM;
@@ -2581,6 +2618,10 @@ static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
int nb_sectors)
{
+ if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
+ return -EIO;
+ }
+
return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
nb_sectors * BDRV_SECTOR_SIZE);
}
@@ -2662,6 +2703,10 @@ static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
.iov_len = nb_sectors * BDRV_SECTOR_SIZE,
};
+ if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
+ return -EINVAL;
+ }
+
qemu_iovec_init_external(&qiov, &iov, 1);
return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
&qiov, is_write, flags);
@@ -2717,10 +2762,16 @@ int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
*/
int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
{
- int64_t target_size = bdrv_getlength(bs) / BDRV_SECTOR_SIZE;
+ int64_t target_size;
int64_t ret, nb_sectors, sector_num = 0;
int n;
+ target_size = bdrv_getlength(bs);
+ if (target_size < 0) {
+ return target_size;
+ }
+ target_size /= BDRV_SECTOR_SIZE;
+
for (;;) {
nb_sectors = target_size - sector_num;
if (nb_sectors <= 0) {
@@ -4055,7 +4106,7 @@ int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
{
- while (bs && bs->drv && !bs->drv->bdrv_debug_resume) {
+ while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
bs = bs->file;
}
@@ -4774,19 +4825,43 @@ flush_parent:
return bdrv_co_flush(bs->file);
}
-void bdrv_invalidate_cache(BlockDriverState *bs)
+void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
{
- if (bs->drv && bs->drv->bdrv_invalidate_cache) {
- bs->drv->bdrv_invalidate_cache(bs);
+ Error *local_err = NULL;
+ int ret;
+
+ if (!bs->drv) {
+ return;
+ }
+
+ if (bs->drv->bdrv_invalidate_cache) {
+ bs->drv->bdrv_invalidate_cache(bs, &local_err);
+ } else if (bs->file) {
+ bdrv_invalidate_cache(bs->file, &local_err);
+ }
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ ret = refresh_total_sectors(bs, bs->total_sectors);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Could not refresh total sector count");
+ return;
}
}
-void bdrv_invalidate_cache_all(void)
+void bdrv_invalidate_cache_all(Error **errp)
{
BlockDriverState *bs;
+ Error *local_err = NULL;
QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
- bdrv_invalidate_cache(bs);
+ bdrv_invalidate_cache(bs, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
}
}
@@ -5048,7 +5123,8 @@ bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
return true;
}
-BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity)
+BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
+ Error **errp)
{
int64_t bitmap_size;
BdrvDirtyBitmap *bitmap;
@@ -5057,7 +5133,13 @@ BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity)
granularity >>= BDRV_SECTOR_BITS;
assert(granularity);
- bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS);
+ bitmap_size = bdrv_getlength(bs);
+ if (bitmap_size < 0) {
+ error_setg_errno(errp, -bitmap_size, "could not get length of device");
+ errno = -bitmap_size;
+ return NULL;
+ }
+ bitmap_size >>= BDRV_SECTOR_BITS;
bitmap = g_malloc0(sizeof(BdrvDirtyBitmap));
bitmap->bitmap = hbitmap_alloc(bitmap_size, ffs(granularity) - 1);
QLIST_INSERT_HEAD(&bs->dirty_bitmaps, bitmap, list);
@@ -5390,43 +5472,37 @@ int bdrv_amend_options(BlockDriverState *bs, QEMUOptionParameter *options)
return bs->drv->bdrv_amend_options(bs, options);
}
-/* Used to recurse on single child block filters.
- * Single child block filter will store their child in bs->file.
+/* This function will be called by the bdrv_recurse_is_first_non_filter method
+ * of block filter and by bdrv_is_first_non_filter.
+ * It is used to test if the given bs is the candidate or recurse more in the
+ * node graph.
*/
-bool bdrv_generic_is_first_non_filter(BlockDriverState *bs,
+bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
BlockDriverState *candidate)
{
- if (!bs->drv) {
- return false;
- }
-
- if (!bs->drv->authorizations[BS_IS_A_FILTER]) {
- if (bs == candidate) {
- return true;
- } else {
- return false;
- }
- }
-
- if (!bs->drv->authorizations[BS_FILTER_PASS_DOWN]) {
+ /* return false if basic checks fails */
+ if (!bs || !bs->drv) {
return false;
}
- if (!bs->file) {
- return false;
+ /* the code reached a non block filter driver -> check if the bs is
+ * the same as the candidate. It's the recursion termination condition.
+ */
+ if (!bs->drv->is_filter) {
+ return bs == candidate;
}
+ /* Down this path the driver is a block filter driver */
- return bdrv_recurse_is_first_non_filter(bs->file, candidate);
-}
-
-bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
- BlockDriverState *candidate)
-{
- if (bs->drv && bs->drv->bdrv_recurse_is_first_non_filter) {
+ /* If the block filter recursion method is defined use it to recurse down
+ * the node graph.
+ */
+ if (bs->drv->bdrv_recurse_is_first_non_filter) {
return bs->drv->bdrv_recurse_is_first_non_filter(bs, candidate);
}
- return bdrv_generic_is_first_non_filter(bs, candidate);
+ /* the driver is a block filter but don't allow to recurse -> return false
+ */
+ return false;
}
/* This function checks if the candidate is the first non filter bs down it's
@@ -5441,6 +5517,7 @@ bool bdrv_is_first_non_filter(BlockDriverState *candidate)
QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
bool perm;
+ /* try to recurse in this top level bs */
perm = bdrv_recurse_is_first_non_filter(bs, candidate);
/* candidate is the first non filter */
diff --git a/block/blkverify.c b/block/blkverify.c
index b98b08bedf..e1c31171c3 100644
--- a/block/blkverify.c
+++ b/block/blkverify.c
@@ -288,6 +288,20 @@ static BlockDriverAIOCB *blkverify_aio_flush(BlockDriverState *bs,
return bdrv_aio_flush(s->test_file, cb, opaque);
}
+static bool blkverify_recurse_is_first_non_filter(BlockDriverState *bs,
+ BlockDriverState *candidate)
+{
+ BDRVBlkverifyState *s = bs->opaque;
+
+ bool perm = bdrv_recurse_is_first_non_filter(bs->file, candidate);
+
+ if (perm) {
+ return true;
+ }
+
+ return bdrv_recurse_is_first_non_filter(s->test_file, candidate);
+}
+
static BlockDriver bdrv_blkverify = {
.format_name = "blkverify",
.protocol_name = "blkverify",
@@ -302,7 +316,8 @@ static BlockDriver bdrv_blkverify = {
.bdrv_aio_writev = blkverify_aio_writev,
.bdrv_aio_flush = blkverify_aio_flush,
- .authorizations = { true, false },
+ .is_filter = true,
+ .bdrv_recurse_is_first_non_filter = blkverify_recurse_is_first_non_filter,
};
static void bdrv_blkverify_init(void)
diff --git a/block/bochs.c b/block/bochs.c
index 4d6403f904..eacf956e7d 100644
--- a/block/bochs.c
+++ b/block/bochs.c
@@ -39,56 +39,41 @@
// not allocated: 0xffffffff
// always little-endian
-struct bochs_header_v1 {
- char magic[32]; // "Bochs Virtual HD Image"
- char type[16]; // "Redolog"
- char subtype[16]; // "Undoable" / "Volatile" / "Growing"
- uint32_t version;
- uint32_t header; // size of header
-
- union {
- struct {
- uint32_t catalog; // num of entries
- uint32_t bitmap; // bitmap size
- uint32_t extent; // extent size
- uint64_t disk; // disk size
- char padding[HEADER_SIZE - 64 - 8 - 20];
- } redolog;
- char padding[HEADER_SIZE - 64 - 8];
- } extra;
-};
-
-// always little-endian
struct bochs_header {
- char magic[32]; // "Bochs Virtual HD Image"
- char type[16]; // "Redolog"
- char subtype[16]; // "Undoable" / "Volatile" / "Growing"
+ char magic[32]; /* "Bochs Virtual HD Image" */
+ char type[16]; /* "Redolog" */
+ char subtype[16]; /* "Undoable" / "Volatile" / "Growing" */
uint32_t version;
- uint32_t header; // size of header
+ uint32_t header; /* size of header */
+
+ uint32_t catalog; /* num of entries */
+ uint32_t bitmap; /* bitmap size */
+ uint32_t extent; /* extent size */
union {
- struct {
- uint32_t catalog; // num of entries
- uint32_t bitmap; // bitmap size
- uint32_t extent; // extent size
- uint32_t reserved; // for ???
- uint64_t disk; // disk size
- char padding[HEADER_SIZE - 64 - 8 - 24];
- } redolog;
- char padding[HEADER_SIZE - 64 - 8];
+ struct {
+ uint32_t reserved; /* for ??? */
+ uint64_t disk; /* disk size */
+ char padding[HEADER_SIZE - 64 - 20 - 12];
+ } QEMU_PACKED redolog;
+ struct {
+ uint64_t disk; /* disk size */
+ char padding[HEADER_SIZE - 64 - 20 - 8];
+ } QEMU_PACKED redolog_v1;
+ char padding[HEADER_SIZE - 64 - 20];
} extra;
-};
+} QEMU_PACKED;
typedef struct BDRVBochsState {
CoMutex lock;
uint32_t *catalog_bitmap;
- int catalog_size;
+ uint32_t catalog_size;
- int data_offset;
+ uint32_t data_offset;
- int bitmap_blocks;
- int extent_blocks;
- int extent_size;
+ uint32_t bitmap_blocks;
+ uint32_t extent_blocks;
+ uint32_t extent_size;
} BDRVBochsState;
static int bochs_probe(const uint8_t *buf, int buf_size, const char *filename)
@@ -112,9 +97,8 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVBochsState *s = bs->opaque;
- int i;
+ uint32_t i;
struct bochs_header bochs;
- struct bochs_header_v1 header_v1;
int ret;
bs->read_only = 1; // no write support yet
@@ -134,13 +118,19 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
}
if (le32_to_cpu(bochs.version) == HEADER_V1) {
- memcpy(&header_v1, &bochs, sizeof(bochs));
- bs->total_sectors = le64_to_cpu(header_v1.extra.redolog.disk) / 512;
+ bs->total_sectors = le64_to_cpu(bochs.extra.redolog_v1.disk) / 512;
} else {
- bs->total_sectors = le64_to_cpu(bochs.extra.redolog.disk) / 512;
+ bs->total_sectors = le64_to_cpu(bochs.extra.redolog.disk) / 512;
+ }
+
+ /* Limit to 1M entries to avoid unbounded allocation. This is what is
+ * needed for the largest image that bximage can create (~8 TB). */
+ s->catalog_size = le32_to_cpu(bochs.catalog);
+ if (s->catalog_size > 0x100000) {
+ error_setg(errp, "Catalog size is too large");
+ return -EFBIG;
}
- s->catalog_size = le32_to_cpu(bochs.extra.redolog.catalog);
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
ret = bdrv_pread(bs->file, le32_to_cpu(bochs.header), s->catalog_bitmap,
@@ -154,10 +144,34 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
s->data_offset = le32_to_cpu(bochs.header) + (s->catalog_size * 4);
- s->bitmap_blocks = 1 + (le32_to_cpu(bochs.extra.redolog.bitmap) - 1) / 512;
- s->extent_blocks = 1 + (le32_to_cpu(bochs.extra.redolog.extent) - 1) / 512;
+ s->bitmap_blocks = 1 + (le32_to_cpu(bochs.bitmap) - 1) / 512;
+ s->extent_blocks = 1 + (le32_to_cpu(bochs.extent) - 1) / 512;
- s->extent_size = le32_to_cpu(bochs.extra.redolog.extent);
+ s->extent_size = le32_to_cpu(bochs.extent);
+ if (s->extent_size < BDRV_SECTOR_SIZE) {
+ /* bximage actually never creates extents smaller than 4k */
+ error_setg(errp, "Extent size must be at least 512");
+ ret = -EINVAL;
+ goto fail;
+ } else if (!is_power_of_2(s->extent_size)) {
+ error_setg(errp, "Extent size %" PRIu32 " is not a power of two",
+ s->extent_size);
+ ret = -EINVAL;
+ goto fail;
+ } else if (s->extent_size > 0x800000) {
+ error_setg(errp, "Extent size %" PRIu32 " is too large",
+ s->extent_size);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (s->catalog_size < DIV_ROUND_UP(bs->total_sectors,
+ s->extent_size / BDRV_SECTOR_SIZE))
+ {
+ error_setg(errp, "Catalog size is too small for this disk size");
+ ret = -EINVAL;
+ goto fail;
+ }
qemu_co_mutex_init(&s->lock);
return 0;
@@ -170,8 +184,8 @@ fail:
static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
{
BDRVBochsState *s = bs->opaque;
- int64_t offset = sector_num * 512;
- int64_t extent_index, extent_offset, bitmap_offset;
+ uint64_t offset = sector_num * 512;
+ uint64_t extent_index, extent_offset, bitmap_offset;
char bitmap_entry;
// seek to sector
@@ -182,8 +196,9 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
return -1; /* not allocated */
}
- bitmap_offset = s->data_offset + (512 * s->catalog_bitmap[extent_index] *
- (s->extent_blocks + s->bitmap_blocks));
+ bitmap_offset = s->data_offset +
+ (512 * (uint64_t) s->catalog_bitmap[extent_index] *
+ (s->extent_blocks + s->bitmap_blocks));
/* read in bitmap for current extent */
if (bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8),
diff --git a/block/cloop.c b/block/cloop.c
index b907023e10..8457737922 100644
--- a/block/cloop.c
+++ b/block/cloop.c
@@ -26,6 +26,9 @@
#include "qemu/module.h"
#include <zlib.h>
+/* Maximum compressed block size */
+#define MAX_BLOCK_SIZE (64 * 1024 * 1024)
+
typedef struct BDRVCloopState {
CoMutex lock;
uint32_t block_size;
@@ -68,6 +71,26 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
return ret;
}
s->block_size = be32_to_cpu(s->block_size);
+ if (s->block_size % 512) {
+ error_setg(errp, "block_size %" PRIu32 " must be a multiple of 512",
+ s->block_size);
+ return -EINVAL;
+ }
+ if (s->block_size == 0) {
+ error_setg(errp, "block_size cannot be zero");
+ return -EINVAL;
+ }
+
+ /* cloop's create_compressed_fs.c warns about block sizes beyond 256 KB but
+ * we can accept more. Prevent ridiculous values like 4 GB - 1 since we
+ * need a buffer this big.
+ */
+ if (s->block_size > MAX_BLOCK_SIZE) {
+ error_setg(errp, "block_size %" PRIu32 " must be %u MB or less",
+ s->block_size,
+ MAX_BLOCK_SIZE / (1024 * 1024));
+ return -EINVAL;
+ }
ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4);
if (ret < 0) {
@@ -76,7 +99,23 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
s->n_blocks = be32_to_cpu(s->n_blocks);
/* read offsets */
- offsets_size = s->n_blocks * sizeof(uint64_t);
+ if (s->n_blocks > (UINT32_MAX - 1) / sizeof(uint64_t)) {
+ /* Prevent integer overflow */
+ error_setg(errp, "n_blocks %" PRIu32 " must be %zu or less",
+ s->n_blocks,
+ (UINT32_MAX - 1) / sizeof(uint64_t));
+ return -EINVAL;
+ }
+ offsets_size = (s->n_blocks + 1) * sizeof(uint64_t);
+ if (offsets_size > 512 * 1024 * 1024) {
+ /* Prevent ridiculous offsets_size which causes memory allocation to
+ * fail or overflows bdrv_pread() size. In practice the 512 MB
+ * offsets[] limit supports 16 TB images at 256 KB block size.
+ */
+ error_setg(errp, "image requires too many offsets, "
+ "try increasing block size");
+ return -EINVAL;
+ }
s->offsets = g_malloc(offsets_size);
ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
@@ -84,13 +123,37 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
- for(i=0;i<s->n_blocks;i++) {
+ for (i = 0; i < s->n_blocks + 1; i++) {
+ uint64_t size;
+
s->offsets[i] = be64_to_cpu(s->offsets[i]);
- if (i > 0) {
- uint32_t size = s->offsets[i] - s->offsets[i - 1];
- if (size > max_compressed_block_size) {
- max_compressed_block_size = size;
- }
+ if (i == 0) {
+ continue;
+ }
+
+ if (s->offsets[i] < s->offsets[i - 1]) {
+ error_setg(errp, "offsets not monotonically increasing at "
+ "index %" PRIu32 ", image file is corrupt", i);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ size = s->offsets[i] - s->offsets[i - 1];
+
+ /* Compressed blocks should be smaller than the uncompressed block size
+ * but maybe compression performed poorly so the compressed block is
+ * actually bigger. Clamp down on unrealistic values to prevent
+ * ridiculous s->compressed_block allocation.
+ */
+ if (size > 2 * MAX_BLOCK_SIZE) {
+ error_setg(errp, "invalid compressed block size at index %" PRIu32
+ ", image file is corrupt", i);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (size > max_compressed_block_size) {
+ max_compressed_block_size = size;
}
}
@@ -180,9 +243,7 @@ static coroutine_fn int cloop_co_read(BlockDriverState *bs, int64_t sector_num,
static void cloop_close(BlockDriverState *bs)
{
BDRVCloopState *s = bs->opaque;
- if (s->n_blocks > 0) {
- g_free(s->offsets);
- }
+ g_free(s->offsets);
g_free(s->compressed_block);
g_free(s->uncompressed_block);
inflateEnd(&s->zstream);
diff --git a/block/commit.c b/block/commit.c
index acec4ac5a8..5c09f4444e 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -194,7 +194,7 @@ void commit_start(BlockDriverState *bs, BlockDriverState *base,
if ((on_error == BLOCKDEV_ON_ERROR_STOP ||
on_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
!bdrv_iostatus_is_enabled(bs)) {
- error_set(errp, QERR_INVALID_PARAMETER_COMBINATION);
+ error_setg(errp, "Invalid parameter combination");
return;
}
diff --git a/block/curl.c b/block/curl.c
index 3494c6d662..6731d2891f 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -157,6 +157,11 @@ static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
if (!s || !s->orig_buf)
goto read_end;
+ if (s->buf_off >= s->buf_len) {
+ /* buffer full, read nothing */
+ return 0;
+ }
+ realsize = MIN(realsize, s->buf_len - s->buf_off);
memcpy(s->orig_buf + s->buf_off, ptr, realsize);
s->buf_off += realsize;
@@ -538,7 +543,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
return 0;
out:
- fprintf(stderr, "CURL: Error opening file: %s\n", state->errmsg);
+ error_setg(errp, "CURL: Error opening file: %s", state->errmsg);
curl_easy_cleanup(state->curl);
state->curl = NULL;
out_noclean:
diff --git a/block/dmg.c b/block/dmg.c
index d5e9b1ff01..856402e1f2 100644
--- a/block/dmg.c
+++ b/block/dmg.c
@@ -27,6 +27,14 @@
#include "qemu/module.h"
#include <zlib.h>
+enum {
+ /* Limit chunk sizes to prevent unreasonable amounts of memory being used
+ * or truncating when converting to 32-bit types
+ */
+ DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
+ DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
+};
+
typedef struct BDRVDMGState {
CoMutex lock;
/* each chunk contains a certain number of sectors,
@@ -92,13 +100,44 @@ static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
return 0;
}
+/* Increase max chunk sizes, if necessary. This function is used to calculate
+ * the buffer sizes needed for compressed/uncompressed chunk I/O.
+ */
+static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
+ uint32_t *max_compressed_size,
+ uint32_t *max_sectors_per_chunk)
+{
+ uint32_t compressed_size = 0;
+ uint32_t uncompressed_sectors = 0;
+
+ switch (s->types[chunk]) {
+ case 0x80000005: /* zlib compressed */
+ compressed_size = s->lengths[chunk];
+ uncompressed_sectors = s->sectorcounts[chunk];
+ break;
+ case 1: /* copy */
+ uncompressed_sectors = (s->lengths[chunk] + 511) / 512;
+ break;
+ case 2: /* zero */
+ uncompressed_sectors = s->sectorcounts[chunk];
+ break;
+ }
+
+ if (compressed_size > *max_compressed_size) {
+ *max_compressed_size = compressed_size;
+ }
+ if (uncompressed_sectors > *max_sectors_per_chunk) {
+ *max_sectors_per_chunk = uncompressed_sectors;
+ }
+}
+
static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVDMGState *s = bs->opaque;
- uint64_t info_begin,info_end,last_in_offset,last_out_offset;
+ uint64_t info_begin, info_end, last_in_offset, last_out_offset;
uint32_t count, tmp;
- uint32_t max_compressed_size=1,max_sectors_per_chunk=1,i;
+ uint32_t max_compressed_size = 1, max_sectors_per_chunk = 1, i;
int64_t offset;
int ret;
@@ -160,37 +199,40 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
- if (type == 0x6d697368 && count >= 244) {
- int new_size, chunk_count;
+ if (type == 0x6d697368 && count >= 244) {
+ size_t new_size;
+ uint32_t chunk_count;
offset += 4;
offset += 200;
- chunk_count = (count-204)/40;
- new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
- s->types = g_realloc(s->types, new_size/2);
- s->offsets = g_realloc(s->offsets, new_size);
- s->lengths = g_realloc(s->lengths, new_size);
- s->sectors = g_realloc(s->sectors, new_size);
- s->sectorcounts = g_realloc(s->sectorcounts, new_size);
+ chunk_count = (count - 204) / 40;
+ new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
+ s->types = g_realloc(s->types, new_size / 2);
+ s->offsets = g_realloc(s->offsets, new_size);
+ s->lengths = g_realloc(s->lengths, new_size);
+ s->sectors = g_realloc(s->sectors, new_size);
+ s->sectorcounts = g_realloc(s->sectorcounts, new_size);
for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
ret = read_uint32(bs, offset, &s->types[i]);
if (ret < 0) {
goto fail;
}
- offset += 4;
- if(s->types[i]!=0x80000005 && s->types[i]!=1 && s->types[i]!=2) {
- if(s->types[i]==0xffffffff) {
- last_in_offset = s->offsets[i-1]+s->lengths[i-1];
- last_out_offset = s->sectors[i-1]+s->sectorcounts[i-1];
- }
- chunk_count--;
- i--;
- offset += 36;
- continue;
- }
- offset += 4;
+ offset += 4;
+ if (s->types[i] != 0x80000005 && s->types[i] != 1 &&
+ s->types[i] != 2) {
+ if (s->types[i] == 0xffffffff && i > 0) {
+ last_in_offset = s->offsets[i - 1] + s->lengths[i - 1];
+ last_out_offset = s->sectors[i - 1] +
+ s->sectorcounts[i - 1];
+ }
+ chunk_count--;
+ i--;
+ offset += 36;
+ continue;
+ }
+ offset += 4;
ret = read_uint64(bs, offset, &s->sectors[i]);
if (ret < 0) {
@@ -205,6 +247,14 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
}
offset += 8;
+ if (s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
+ error_report("sector count %" PRIu64 " for chunk %u is "
+ "larger than max (%u)",
+ s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
+ ret = -EINVAL;
+ goto fail;
+ }
+
ret = read_uint64(bs, offset, &s->offsets[i]);
if (ret < 0) {
goto fail;
@@ -218,19 +268,25 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
}
offset += 8;
- if(s->lengths[i]>max_compressed_size)
- max_compressed_size = s->lengths[i];
- if(s->sectorcounts[i]>max_sectors_per_chunk)
- max_sectors_per_chunk = s->sectorcounts[i];
- }
- s->n_chunks+=chunk_count;
- }
+ if (s->lengths[i] > DMG_LENGTHS_MAX) {
+ error_report("length %" PRIu64 " for chunk %u is larger "
+ "than max (%u)",
+ s->lengths[i], i, DMG_LENGTHS_MAX);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ update_max_chunk_size(s, i, &max_compressed_size,
+ &max_sectors_per_chunk);
+ }
+ s->n_chunks += chunk_count;
+ }
}
/* initialize zlib engine */
- s->compressed_chunk = g_malloc(max_compressed_size+1);
- s->uncompressed_chunk = g_malloc(512*max_sectors_per_chunk);
- if(inflateInit(&s->zstream) != Z_OK) {
+ s->compressed_chunk = g_malloc(max_compressed_size + 1);
+ s->uncompressed_chunk = g_malloc(512 * max_sectors_per_chunk);
+ if (inflateInit(&s->zstream) != Z_OK) {
ret = -EINVAL;
goto fail;
}
@@ -252,83 +308,82 @@ fail:
}
static inline int is_sector_in_chunk(BDRVDMGState* s,
- uint32_t chunk_num,int sector_num)
+ uint32_t chunk_num, uint64_t sector_num)
{
- if(chunk_num>=s->n_chunks || s->sectors[chunk_num]>sector_num ||
- s->sectors[chunk_num]+s->sectorcounts[chunk_num]<=sector_num)
- return 0;
- else
- return -1;
+ if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
+ s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
+ return 0;
+ } else {
+ return -1;
+ }
}
-static inline uint32_t search_chunk(BDRVDMGState* s,int sector_num)
+static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
{
/* binary search */
- uint32_t chunk1=0,chunk2=s->n_chunks,chunk3;
- while(chunk1!=chunk2) {
- chunk3 = (chunk1+chunk2)/2;
- if(s->sectors[chunk3]>sector_num)
- chunk2 = chunk3;
- else if(s->sectors[chunk3]+s->sectorcounts[chunk3]>sector_num)
- return chunk3;
- else
- chunk1 = chunk3;
+ uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
+ while (chunk1 != chunk2) {
+ chunk3 = (chunk1 + chunk2) / 2;
+ if (s->sectors[chunk3] > sector_num) {
+ chunk2 = chunk3;
+ } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
+ return chunk3;
+ } else {
+ chunk1 = chunk3;
+ }
}
return s->n_chunks; /* error */
}
-static inline int dmg_read_chunk(BlockDriverState *bs, int sector_num)
+static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
{
BDRVDMGState *s = bs->opaque;
- if(!is_sector_in_chunk(s,s->current_chunk,sector_num)) {
- int ret;
- uint32_t chunk = search_chunk(s,sector_num);
-
- if(chunk>=s->n_chunks)
- return -1;
-
- s->current_chunk = s->n_chunks;
- switch(s->types[chunk]) {
- case 0x80000005: { /* zlib compressed */
- int i;
-
- /* we need to buffer, because only the chunk as whole can be
- * inflated. */
- i=0;
- do {
- ret = bdrv_pread(bs->file, s->offsets[chunk] + i,
- s->compressed_chunk+i, s->lengths[chunk]-i);
- if(ret<0 && errno==EINTR)
- ret=0;
- i+=ret;
- } while(ret>=0 && ret+i<s->lengths[chunk]);
-
- if (ret != s->lengths[chunk])
- return -1;
-
- s->zstream.next_in = s->compressed_chunk;
- s->zstream.avail_in = s->lengths[chunk];
- s->zstream.next_out = s->uncompressed_chunk;
- s->zstream.avail_out = 512*s->sectorcounts[chunk];
- ret = inflateReset(&s->zstream);
- if(ret != Z_OK)
- return -1;
- ret = inflate(&s->zstream, Z_FINISH);
- if(ret != Z_STREAM_END || s->zstream.total_out != 512*s->sectorcounts[chunk])
- return -1;
- break; }
- case 1: /* copy */
- ret = bdrv_pread(bs->file, s->offsets[chunk],
+ if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
+ int ret;
+ uint32_t chunk = search_chunk(s, sector_num);
+
+ if (chunk >= s->n_chunks) {
+ return -1;
+ }
+
+ s->current_chunk = s->n_chunks;
+ switch (s->types[chunk]) {
+ case 0x80000005: { /* zlib compressed */
+ /* we need to buffer, because only the chunk as whole can be
+ * inflated. */
+ ret = bdrv_pread(bs->file, s->offsets[chunk],
+ s->compressed_chunk, s->lengths[chunk]);
+ if (ret != s->lengths[chunk]) {
+ return -1;
+ }
+
+ s->zstream.next_in = s->compressed_chunk;
+ s->zstream.avail_in = s->lengths[chunk];
+ s->zstream.next_out = s->uncompressed_chunk;
+ s->zstream.avail_out = 512 * s->sectorcounts[chunk];
+ ret = inflateReset(&s->zstream);
+ if (ret != Z_OK) {
+ return -1;
+ }
+ ret = inflate(&s->zstream, Z_FINISH);
+ if (ret != Z_STREAM_END ||
+ s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
+ return -1;
+ }
+ break; }
+ case 1: /* copy */
+ ret = bdrv_pread(bs->file, s->offsets[chunk],
s->uncompressed_chunk, s->lengths[chunk]);
- if (ret != s->lengths[chunk])
- return -1;
- break;
- case 2: /* zero */
- memset(s->uncompressed_chunk, 0, 512*s->sectorcounts[chunk]);
- break;
- }
- s->current_chunk = chunk;
+ if (ret != s->lengths[chunk]) {
+ return -1;
+ }
+ break;
+ case 2: /* zero */
+ memset(s->uncompressed_chunk, 0, 512 * s->sectorcounts[chunk]);
+ break;
+ }
+ s->current_chunk = chunk;
}
return 0;
}
@@ -339,12 +394,14 @@ static int dmg_read(BlockDriverState *bs, int64_t sector_num,
BDRVDMGState *s = bs->opaque;
int i;
- for(i=0;i<nb_sectors;i++) {
- uint32_t sector_offset_in_chunk;
- if(dmg_read_chunk(bs, sector_num+i) != 0)
- return -1;
- sector_offset_in_chunk = sector_num+i-s->sectors[s->current_chunk];
- memcpy(buf+i*512,s->uncompressed_chunk+sector_offset_in_chunk*512,512);
+ for (i = 0; i < nb_sectors; i++) {
+ uint32_t sector_offset_in_chunk;
+ if (dmg_read_chunk(bs, sector_num + i) != 0) {
+ return -1;
+ }
+ sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
+ memcpy(buf + i * 512,
+ s->uncompressed_chunk + sector_offset_in_chunk * 512, 512);
}
return 0;
}
@@ -376,12 +433,12 @@ static void dmg_close(BlockDriverState *bs)
}
static BlockDriver bdrv_dmg = {
- .format_name = "dmg",
- .instance_size = sizeof(BDRVDMGState),
- .bdrv_probe = dmg_probe,
- .bdrv_open = dmg_open,
- .bdrv_read = dmg_co_read,
- .bdrv_close = dmg_close,
+ .format_name = "dmg",
+ .instance_size = sizeof(BDRVDMGState),
+ .bdrv_probe = dmg_probe,
+ .bdrv_open = dmg_open,
+ .bdrv_read = dmg_co_read,
+ .bdrv_close = dmg_close,
};
static void bdrv_dmg_init(void)
diff --git a/block/gluster.c b/block/gluster.c
index a44d612923..8836085646 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -80,7 +80,7 @@ static int parse_volume_options(GlusterConf *gconf, char *path)
* 'server' specifies the server where the volume file specification for
* the given volume resides. This can be either hostname, ipv4 address
* or ipv6 address. ipv6 address needs to be within square brackets [ ].
- * If transport type is 'unix', then 'server' field should not be specifed.
+ * If transport type is 'unix', then 'server' field should not be specified.
* The 'socket' field needs to be populated with the path to unix domain
* socket.
*
diff --git a/block/iscsi.c b/block/iscsi.c
index b490e98c05..a30202b4fe 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -417,6 +417,10 @@ static int coroutine_fn iscsi_co_flush(BlockDriverState *bs)
IscsiLun *iscsilun = bs->opaque;
struct IscsiTask iTask;
+ if (bs->sg) {
+ return 0;
+ }
+
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
@@ -838,7 +842,8 @@ retry:
if (iTask.status == SCSI_STATUS_CHECK_CONDITION &&
iTask.task->sense.key == SCSI_SENSE_ILLEGAL_REQUEST &&
- iTask.task->sense.ascq == SCSI_SENSE_ASCQ_INVALID_OPERATION_CODE) {
+ (iTask.task->sense.ascq == SCSI_SENSE_ASCQ_INVALID_OPERATION_CODE ||
+ iTask.task->sense.ascq == SCSI_SENSE_ASCQ_INVALID_FIELD_IN_CDB)) {
/* WRITE SAME is not supported by the target */
iscsilun->has_write_same = false;
scsi_free_scsi_task(iTask.task);
@@ -1090,7 +1095,7 @@ static struct scsi_task *iscsi_do_inquiry(struct iscsi_context *iscsi, int lun,
*inq = scsi_datain_unmarshall(task);
if (*inq == NULL) {
error_setg(errp, "iSCSI: failed to unmarshall inquiry datain blob");
- goto fail;
+ goto fail_with_err;
}
return task;
@@ -1098,6 +1103,7 @@ static struct scsi_task *iscsi_do_inquiry(struct iscsi_context *iscsi, int lun,
fail:
error_setg(errp, "iSCSI: Inquiry command failed : %s",
iscsi_get_error(iscsi));
+fail_with_err:
if (task != NULL) {
scsi_free_scsi_task(task);
}
@@ -1226,6 +1232,7 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
iscsi_readcapacity_sync(iscsilun, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
+ ret = -EINVAL;
goto out;
}
bs->total_sectors = sector_lun2qemu(iscsilun->num_blocks, iscsilun);
@@ -1330,18 +1337,20 @@ static int iscsi_refresh_limits(BlockDriverState *bs)
/* We don't actually refresh here, but just return data queried in
* iscsi_open(): iscsi targets don't change their limits. */
- if (iscsilun->lbp.lbpu || iscsilun->lbp.lbpws) {
+ if (iscsilun->lbp.lbpu) {
if (iscsilun->bl.max_unmap < 0xffffffff) {
bs->bl.max_discard = sector_lun2qemu(iscsilun->bl.max_unmap,
iscsilun);
}
bs->bl.discard_alignment = sector_lun2qemu(iscsilun->bl.opt_unmap_gran,
iscsilun);
+ }
- if (iscsilun->bl.max_ws_len < 0xffffffff) {
- bs->bl.max_write_zeroes = sector_lun2qemu(iscsilun->bl.max_ws_len,
- iscsilun);
- }
+ if (iscsilun->bl.max_ws_len < 0xffffffff) {
+ bs->bl.max_write_zeroes = sector_lun2qemu(iscsilun->bl.max_ws_len,
+ iscsilun);
+ }
+ if (iscsilun->lbp.lbpws) {
bs->bl.write_zeroes_alignment = sector_lun2qemu(iscsilun->bl.opt_unmap_gran,
iscsilun);
}
@@ -1391,7 +1400,7 @@ static int iscsi_create(const char *filename, QEMUOptionParameter *options,
IscsiLun *iscsilun = NULL;
QDict *bs_options;
- bs = bdrv_new("");
+ bs = bdrv_new("", &error_abort);
/* Read out options */
while (options && options->name) {
diff --git a/block/mirror.c b/block/mirror.c
index dd5ee056b4..36f4f8e8bd 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -98,7 +98,14 @@ static void mirror_iteration_done(MirrorOp *op, int ret)
qemu_iovec_destroy(&op->qiov);
g_slice_free(MirrorOp, op);
- qemu_coroutine_enter(s->common.co, NULL);
+
+ /* Enter coroutine when it is not sleeping. The coroutine sleeps to
+ * rate-limit itself. The coroutine will eventually resume since there is
+ * a sleep timeout so don't wake it early.
+ */
+ if (s->common.busy) {
+ qemu_coroutine_enter(s->common.co, NULL);
+ }
}
static void mirror_write_complete(void *opaque, int ret)
@@ -139,11 +146,12 @@ static void mirror_read_complete(void *opaque, int ret)
mirror_write_complete, op);
}
-static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
+static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
{
BlockDriverState *source = s->common.bs;
int nb_sectors, sectors_per_chunk, nb_chunks;
int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
+ uint64_t delay_ns;
MirrorOp *op;
s->sector_num = hbitmap_iter_next(&s->hbi);
@@ -231,7 +239,12 @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
nb_chunks += added_chunks;
next_sector += added_sectors;
next_chunk += added_chunks;
- } while (next_sector < end);
+ if (!s->synced && s->common.speed) {
+ delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors);
+ } else {
+ delay_ns = 0;
+ }
+ } while (delay_ns == 0 && next_sector < end);
/* Allocate a MirrorOp that is used as an AIO callback. */
op = g_slice_new(MirrorOp);
@@ -268,6 +281,7 @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
trace_mirror_one_iteration(s, sector_num, nb_sectors);
bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
mirror_read_complete, op);
+ return delay_ns;
}
static void mirror_free_init(MirrorBlockJob *s)
@@ -362,7 +376,7 @@ static void coroutine_fn mirror_run(void *opaque)
bdrv_dirty_iter_init(bs, s->dirty_bitmap, &s->hbi);
last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
for (;;) {
- uint64_t delay_ns;
+ uint64_t delay_ns = 0;
int64_t cnt;
bool should_complete;
@@ -386,8 +400,10 @@ static void coroutine_fn mirror_run(void *opaque)
qemu_coroutine_yield();
continue;
} else if (cnt != 0) {
- mirror_iteration(s);
- continue;
+ delay_ns = mirror_iteration(s);
+ if (delay_ns == 0) {
+ continue;
+ }
}
}
@@ -432,17 +448,10 @@ static void coroutine_fn mirror_run(void *opaque)
}
ret = 0;
- trace_mirror_before_sleep(s, cnt, s->synced);
+ trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
if (!s->synced) {
/* Publish progress */
s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE;
-
- if (s->common.speed) {
- delay_ns = ratelimit_calculate_delay(&s->limit, sectors_per_chunk);
- } else {
- delay_ns = 0;
- }
-
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
if (block_job_is_cancelled(&s->common)) {
break;
@@ -596,7 +605,10 @@ static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
s->granularity = granularity;
s->buf_size = MAX(buf_size, granularity);
- s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity);
+ s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, errp);
+ if (!s->dirty_bitmap) {
+ return;
+ }
bdrv_set_enable_write_cache(s->target, true);
bdrv_set_on_error(s->target, on_target_error, on_target_error);
bdrv_iostatus_enable(s->target);
@@ -668,7 +680,7 @@ void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
mirror_start_job(bs, base, speed, 0, 0,
on_error, on_error, cb, opaque, &local_err,
&commit_active_job_driver, false, base);
- if (error_is_set(&local_err)) {
+ if (local_err) {
error_propagate(errp, local_err);
goto error_restore_flags;
}
diff --git a/block/nbd-client.c b/block/nbd-client.c
index 0922b78292..7d698cb619 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -43,6 +43,17 @@ static void nbd_recv_coroutines_enter_all(NbdClientSession *s)
}
}
+static void nbd_teardown_connection(NbdClientSession *client)
+{
+ /* finish any pending coroutines */
+ shutdown(client->sock, 2);
+ nbd_recv_coroutines_enter_all(client);
+
+ qemu_aio_set_fd_handler(client->sock, NULL, NULL, NULL);
+ closesocket(client->sock);
+ client->sock = -1;
+}
+
static void nbd_reply_ready(void *opaque)
{
NbdClientSession *s = opaque;
@@ -78,7 +89,7 @@ static void nbd_reply_ready(void *opaque)
}
fail:
- nbd_recv_coroutines_enter_all(s);
+ nbd_teardown_connection(s);
}
static void nbd_restart_write(void *opaque)
@@ -324,7 +335,7 @@ int nbd_client_session_co_discard(NbdClientSession *client, int64_t sector_num,
}
-static void nbd_teardown_connection(NbdClientSession *client)
+void nbd_client_session_close(NbdClientSession *client)
{
struct nbd_request request = {
.type = NBD_CMD_DISC,
@@ -332,22 +343,14 @@ static void nbd_teardown_connection(NbdClientSession *client)
.len = 0
};
- nbd_send_request(client->sock, &request);
-
- /* finish any pending coroutines */
- shutdown(client->sock, 2);
- nbd_recv_coroutines_enter_all(client);
-
- qemu_aio_set_fd_handler(client->sock, NULL, NULL, NULL);
- closesocket(client->sock);
- client->sock = -1;
-}
-
-void nbd_client_session_close(NbdClientSession *client)
-{
if (!client->bs) {
return;
}
+ if (client->sock == -1) {
+ return;
+ }
+
+ nbd_send_request(client->sock, &request);
nbd_teardown_connection(client);
client->bs = NULL;
diff --git a/block/nbd.c b/block/nbd.c
index 55124239df..613f2581ae 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -175,7 +175,7 @@ static void nbd_parse_filename(const char *filename, QDict *options,
InetSocketAddress *addr = NULL;
addr = inet_parse(host_spec, errp);
- if (error_is_set(errp)) {
+ if (!addr) {
goto out;
}
diff --git a/block/nfs.c b/block/nfs.c
index ef731f04e3..9fa831f160 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -112,6 +112,9 @@ nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
if (task->ret == 0 && task->st) {
memcpy(task->st, data, sizeof(struct stat));
}
+ if (task->ret < 0) {
+ error_report("NFS Error: %s", nfs_get_error(nfs));
+ }
if (task->co) {
task->bh = qemu_bh_new(nfs_co_generic_bh_cb, task);
qemu_bh_schedule(task->bh);
@@ -340,7 +343,7 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags,
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
- if (error_is_set(&local_err)) {
+ if (local_err) {
error_propagate(errp, local_err);
return -EINVAL;
}
diff --git a/block/parallels.c b/block/parallels.c
index 3f588f58dc..1a5bd350b3 100644
--- a/block/parallels.c
+++ b/block/parallels.c
@@ -49,9 +49,9 @@ typedef struct BDRVParallelsState {
CoMutex lock;
uint32_t *catalog_bitmap;
- int catalog_size;
+ unsigned int catalog_size;
- int tracks;
+ unsigned int tracks;
} BDRVParallelsState;
static int parallels_probe(const uint8_t *buf, int buf_size, const char *filename)
@@ -93,8 +93,18 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
bs->total_sectors = le32_to_cpu(ph.nb_sectors);
s->tracks = le32_to_cpu(ph.tracks);
+ if (s->tracks == 0) {
+ error_setg(errp, "Invalid image: Zero sectors per track");
+ ret = -EINVAL;
+ goto fail;
+ }
s->catalog_size = le32_to_cpu(ph.catalog_entries);
+ if (s->catalog_size > INT_MAX / 4) {
+ error_setg(errp, "Catalog too large");
+ ret = -EFBIG;
+ goto fail;
+ }
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
ret = bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4);
diff --git a/block/qcow.c b/block/qcow.c
index 1e128becf0..d5a7d5fd1e 100644
--- a/block/qcow.c
+++ b/block/qcow.c
@@ -723,7 +723,7 @@ static int qcow_create(const char *filename, QEMUOptionParameter *options,
backing_file = NULL;
}
header.cluster_bits = 9; /* 512 byte cluster to avoid copying
- unmodifyed sectors */
+ unmodified sectors */
header.l2_bits = 12; /* 32 KB L2 tables */
} else {
header.cluster_bits = 12; /* 4 KB clusters */
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index 36c1bed350..331ab08022 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -55,7 +55,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
}
}
- if (new_l1_size > INT_MAX) {
+ if (new_l1_size > INT_MAX / sizeof(uint64_t)) {
return -EFBIG;
}
@@ -359,15 +359,6 @@ static int coroutine_fn copy_sectors(BlockDriverState *bs,
struct iovec iov;
int n, ret;
- /*
- * If this is the last cluster and it is only partially used, we must only
- * copy until the end of the image, or bdrv_check_request will fail for the
- * bdrv_read/write calls below.
- */
- if (start_sect + n_end > bs->total_sectors) {
- n_end = bs->total_sectors - start_sect;
- }
-
n = n_end - n_start;
if (n <= 0) {
return 0;
@@ -380,6 +371,10 @@ static int coroutine_fn copy_sectors(BlockDriverState *bs,
BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
+ if (!bs->drv) {
+ return -ENOMEDIUM;
+ }
+
/* Call .bdrv_co_readv() directly instead of using the public block-layer
* interface. This avoids double I/O throttling and request tracking,
* which can lead to deadlock when block layer copy-on-read is enabled.
@@ -496,6 +491,7 @@ int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset,
break;
case QCOW2_CLUSTER_ZERO:
if (s->qcow_version < 3) {
+ qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
return -EIO;
}
c = count_contiguous_clusters(nb_clusters, s->cluster_size,
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
index 8712d8bd54..a37ee45016 100644
--- a/block/qcow2-refcount.c
+++ b/block/qcow2-refcount.c
@@ -28,7 +28,7 @@
#include "qemu/range.h"
#include "qapi/qmp/types.h"
-static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size);
+static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size);
static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
int64_t offset, int64_t length,
int addend, enum qcow2_discard_type type);
@@ -40,8 +40,10 @@ static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
int qcow2_refcount_init(BlockDriverState *bs)
{
BDRVQcowState *s = bs->opaque;
- int ret, refcount_table_size2, i;
+ unsigned int refcount_table_size2, i;
+ int ret;
+ assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t));
refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
s->refcount_table = g_malloc(refcount_table_size2);
if (s->refcount_table_size > 0) {
@@ -87,7 +89,7 @@ static int load_refcount_block(BlockDriverState *bs,
static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
{
BDRVQcowState *s = bs->opaque;
- int refcount_table_index, block_index;
+ uint64_t refcount_table_index, block_index;
int64_t refcount_block_offset;
int ret;
uint16_t *refcount_block;
@@ -96,7 +98,8 @@ static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
if (refcount_table_index >= s->refcount_table_size)
return 0;
- refcount_block_offset = s->refcount_table[refcount_table_index];
+ refcount_block_offset =
+ s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
if (!refcount_block_offset)
return 0;
@@ -191,10 +194,11 @@ static int alloc_refcount_block(BlockDriverState *bs,
* they can describe them themselves.
*
* - We need to consider that at this point we are inside update_refcounts
- * and doing the initial refcount increase. This means that some clusters
- * have already been allocated by the caller, but their refcount isn't
- * accurate yet. free_cluster_index tells us where this allocation ends
- * as long as we don't overwrite it by freeing clusters.
+ * and potentially doing an initial refcount increase. This means that
+ * some clusters have already been allocated by the caller, but their
+ * refcount isn't accurate yet. If we allocate clusters for metadata, we
+ * need to return -EAGAIN to signal the caller that it needs to restart
+ * the search for free clusters.
*
* - alloc_clusters_noref and qcow2_free_clusters may load a different
* refcount block into the cache
@@ -279,7 +283,10 @@ static int alloc_refcount_block(BlockDriverState *bs,
}
s->refcount_table[refcount_table_index] = new_block;
- return 0;
+
+ /* The new refcount block may be where the caller intended to put its
+ * data, so let it restart the search. */
+ return -EAGAIN;
}
ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block);
@@ -302,8 +309,11 @@ static int alloc_refcount_block(BlockDriverState *bs,
/* Calculate the number of refcount blocks needed so far */
uint64_t refcount_block_clusters = 1 << (s->cluster_bits - REFCOUNT_SHIFT);
- uint64_t blocks_used = (s->free_cluster_index +
- refcount_block_clusters - 1) / refcount_block_clusters;
+ uint64_t blocks_used = DIV_ROUND_UP(cluster_index, refcount_block_clusters);
+
+ if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
+ return -EFBIG;
+ }
/* And now we need at least one block more for the new metadata */
uint64_t table_size = next_refcount_table_size(s, blocks_used + 1);
@@ -336,8 +346,6 @@ static int alloc_refcount_block(BlockDriverState *bs,
uint16_t *new_blocks = g_malloc0(blocks_clusters * s->cluster_size);
uint64_t *new_table = g_malloc0(table_size * sizeof(uint64_t));
- assert(meta_offset >= (s->free_cluster_index * s->cluster_size));
-
/* Fill the new refcount table */
memcpy(new_table, s->refcount_table,
s->refcount_table_size * sizeof(uint64_t));
@@ -400,18 +408,19 @@ static int alloc_refcount_block(BlockDriverState *bs,
s->refcount_table_size = table_size;
s->refcount_table_offset = table_offset;
- /* Free old table. Remember, we must not change free_cluster_index */
- uint64_t old_free_cluster_index = s->free_cluster_index;
+ /* Free old table. */
qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
QCOW2_DISCARD_OTHER);
- s->free_cluster_index = old_free_cluster_index;
ret = load_refcount_block(bs, new_block, (void**) refcount_block);
if (ret < 0) {
return ret;
}
- return 0;
+ /* If we were trying to do the initial refcount update for some cluster
+ * allocation, we might have used the same clusters to store newly
+ * allocated metadata. Make the caller search some new space. */
+ return -EAGAIN;
fail_table:
g_free(new_table);
@@ -626,15 +635,16 @@ int qcow2_update_cluster_refcount(BlockDriverState *bs,
/* return < 0 if error */
-static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size)
+static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size)
{
BDRVQcowState *s = bs->opaque;
- int i, nb_clusters, refcount;
+ uint64_t i, nb_clusters;
+ int refcount;
nb_clusters = size_to_clusters(s, size);
retry:
for(i = 0; i < nb_clusters; i++) {
- int64_t next_cluster_index = s->free_cluster_index++;
+ uint64_t next_cluster_index = s->free_cluster_index++;
refcount = get_refcount(bs, next_cluster_index);
if (refcount < 0) {
@@ -651,18 +661,21 @@ retry:
return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
}
-int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size)
+int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size)
{
int64_t offset;
int ret;
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
- offset = alloc_clusters_noref(bs, size);
- if (offset < 0) {
- return offset;
- }
+ do {
+ offset = alloc_clusters_noref(bs, size);
+ if (offset < 0) {
+ return offset;
+ }
+
+ ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER);
+ } while (ret == -EAGAIN);
- ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER);
if (ret < 0) {
return ret;
}
@@ -675,7 +688,6 @@ int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
{
BDRVQcowState *s = bs->opaque;
uint64_t cluster_index;
- uint64_t old_free_cluster_index;
uint64_t i;
int refcount, ret;
@@ -684,30 +696,28 @@ int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
return 0;
}
- /* Check how many clusters there are free */
- cluster_index = offset >> s->cluster_bits;
- for(i = 0; i < nb_clusters; i++) {
- refcount = get_refcount(bs, cluster_index++);
+ do {
+ /* Check how many clusters there are free */
+ cluster_index = offset >> s->cluster_bits;
+ for(i = 0; i < nb_clusters; i++) {
+ refcount = get_refcount(bs, cluster_index++);
- if (refcount < 0) {
- return refcount;
- } else if (refcount != 0) {
- break;
+ if (refcount < 0) {
+ return refcount;
+ } else if (refcount != 0) {
+ break;
+ }
}
- }
- /* And then allocate them */
- old_free_cluster_index = s->free_cluster_index;
- s->free_cluster_index = cluster_index + i;
+ /* And then allocate them */
+ ret = update_refcount(bs, offset, i << s->cluster_bits, 1,
+ QCOW2_DISCARD_NEVER);
+ } while (ret == -EAGAIN);
- ret = update_refcount(bs, offset, i << s->cluster_bits, 1,
- QCOW2_DISCARD_NEVER);
if (ret < 0) {
return ret;
}
- s->free_cluster_index = old_free_cluster_index;
-
return i;
}
@@ -1010,8 +1020,7 @@ static void inc_refcounts(BlockDriverState *bs,
int64_t offset, int64_t size)
{
BDRVQcowState *s = bs->opaque;
- int64_t start, last, cluster_offset;
- int k;
+ uint64_t start, last, cluster_offset, k;
if (size <= 0)
return;
@@ -1021,11 +1030,7 @@ static void inc_refcounts(BlockDriverState *bs,
for(cluster_offset = start; cluster_offset <= last;
cluster_offset += s->cluster_size) {
k = cluster_offset >> s->cluster_bits;
- if (k < 0) {
- fprintf(stderr, "ERROR: invalid cluster offset=0x%" PRIx64 "\n",
- cluster_offset);
- res->corruptions++;
- } else if (k >= refcount_table_size) {
+ if (k >= refcount_table_size) {
fprintf(stderr, "Warning: cluster offset=0x%" PRIx64 " is after "
"the end of the image file, can't properly check refcounts.\n",
cluster_offset);
@@ -1382,7 +1387,7 @@ static int write_reftable_entry(BlockDriverState *bs, int rt_index)
* does _not_ decrement the reference count for the currently occupied cluster.
*
* This function prints an informative message to stderr on error (and returns
- * -errno); on success, 0 is returned.
+ * -errno); on success, the offset of the newly allocated cluster is returned.
*/
static int64_t realloc_refcount_block(BlockDriverState *bs, int reftable_index,
uint64_t offset)
@@ -1398,14 +1403,14 @@ static int64_t realloc_refcount_block(BlockDriverState *bs, int reftable_index,
fprintf(stderr, "Could not allocate new cluster: %s\n",
strerror(-new_offset));
ret = new_offset;
- goto fail;
+ goto done;
}
/* fetch current refcount block content */
ret = qcow2_cache_get(bs, s->refcount_block_cache, offset, &refcount_block);
if (ret < 0) {
fprintf(stderr, "Could not fetch refcount block: %s\n", strerror(-ret));
- goto fail;
+ goto fail_free_cluster;
}
/* new block has not yet been entered into refcount table, therefore it is
@@ -1416,8 +1421,7 @@ static int64_t realloc_refcount_block(BlockDriverState *bs, int reftable_index,
"check failed: %s\n", strerror(-ret));
/* the image will be marked corrupt, so don't even attempt on freeing
* the cluster */
- new_offset = 0;
- goto fail;
+ goto done;
}
/* write to new block */
@@ -1425,7 +1429,7 @@ static int64_t realloc_refcount_block(BlockDriverState *bs, int reftable_index,
s->cluster_sectors);
if (ret < 0) {
fprintf(stderr, "Could not write refcount block: %s\n", strerror(-ret));
- goto fail;
+ goto fail_free_cluster;
}
/* update refcount table */
@@ -1435,24 +1439,27 @@ static int64_t realloc_refcount_block(BlockDriverState *bs, int reftable_index,
if (ret < 0) {
fprintf(stderr, "Could not update refcount table: %s\n",
strerror(-ret));
- goto fail;
+ goto fail_free_cluster;
}
-fail:
- if (new_offset && (ret < 0)) {
- qcow2_free_clusters(bs, new_offset, s->cluster_size,
- QCOW2_DISCARD_ALWAYS);
- }
+ goto done;
+
+fail_free_cluster:
+ qcow2_free_clusters(bs, new_offset, s->cluster_size, QCOW2_DISCARD_OTHER);
+
+done:
if (refcount_block) {
- if (ret < 0) {
- qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
- } else {
- ret = qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
- }
+ /* This should never fail, as it would only do so if the given refcount
+ * block cannot be found in the cache. As this is impossible as long as
+ * there are no bugs, assert the success. */
+ int tmp = qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block);
+ assert(tmp == 0);
}
+
if (ret < 0) {
return ret;
}
+
return new_offset;
}
@@ -1466,14 +1473,19 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix)
{
BDRVQcowState *s = bs->opaque;
- int64_t size, i, highest_cluster;
- int nb_clusters, refcount1, refcount2;
+ int64_t size, i, highest_cluster, nb_clusters;
+ int refcount1, refcount2;
QCowSnapshot *sn;
uint16_t *refcount_table;
int ret;
size = bdrv_getlength(bs->file);
nb_clusters = size_to_clusters(s, size);
+ if (nb_clusters > INT_MAX) {
+ res->check_errors++;
+ return -EFBIG;
+ }
+
refcount_table = g_malloc0(nb_clusters * sizeof(uint16_t));
res->bfi.total_clusters =
diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c
index 2fc6320aa1..0aa9defbe2 100644
--- a/block/qcow2-snapshot.c
+++ b/block/qcow2-snapshot.c
@@ -26,31 +26,6 @@
#include "block/block_int.h"
#include "block/qcow2.h"
-typedef struct QEMU_PACKED QCowSnapshotHeader {
- /* header is 8 byte aligned */
- uint64_t l1_table_offset;
-
- uint32_t l1_size;
- uint16_t id_str_size;
- uint16_t name_size;
-
- uint32_t date_sec;
- uint32_t date_nsec;
-
- uint64_t vm_clock_nsec;
-
- uint32_t vm_state_size;
- uint32_t extra_data_size; /* for extension */
- /* extra data follows */
- /* id_str follows */
- /* name follows */
-} QCowSnapshotHeader;
-
-typedef struct QEMU_PACKED QCowSnapshotExtraData {
- uint64_t vm_state_size_large;
- uint64_t disk_size;
-} QCowSnapshotExtraData;
-
void qcow2_free_snapshots(BlockDriverState *bs)
{
BDRVQcowState *s = bs->opaque;
@@ -141,8 +116,14 @@ int qcow2_read_snapshots(BlockDriverState *bs)
}
offset += name_size;
sn->name[name_size] = '\0';
+
+ if (offset - s->snapshots_offset > QCOW_MAX_SNAPSHOTS_SIZE) {
+ ret = -EFBIG;
+ goto fail;
+ }
}
+ assert(offset - s->snapshots_offset <= INT_MAX);
s->snapshots_size = offset - s->snapshots_offset;
return 0;
@@ -163,7 +144,7 @@ static int qcow2_write_snapshots(BlockDriverState *bs)
uint32_t nb_snapshots;
uint64_t snapshots_offset;
} QEMU_PACKED header_data;
- int64_t offset, snapshots_offset;
+ int64_t offset, snapshots_offset = 0;
int ret;
/* compute the size of the snapshots */
@@ -175,7 +156,14 @@ static int qcow2_write_snapshots(BlockDriverState *bs)
offset += sizeof(extra);
offset += strlen(sn->id_str);
offset += strlen(sn->name);
+
+ if (offset > QCOW_MAX_SNAPSHOTS_SIZE) {
+ ret = -EFBIG;
+ goto fail;
+ }
}
+
+ assert(offset <= INT_MAX);
snapshots_size = offset;
/* Allocate space for the new snapshot list */
@@ -357,6 +345,10 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
uint64_t *l1_table = NULL;
int64_t l1_table_offset;
+ if (s->nb_snapshots >= QCOW_MAX_SNAPSHOTS) {
+ return -EFBIG;
+ }
+
memset(sn, 0, sizeof(*sn));
/* Generate an ID if it wasn't passed */
@@ -701,7 +693,11 @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs,
sn = &s->snapshots[snapshot_index];
/* Allocate and read in the snapshot's L1 table */
- new_l1_bytes = s->l1_size * sizeof(uint64_t);
+ if (sn->l1_size > QCOW_MAX_L1_SIZE) {
+ error_setg(errp, "Snapshot L1 table too large");
+ return -EFBIG;
+ }
+ new_l1_bytes = sn->l1_size * sizeof(uint64_t);
new_l1_table = g_malloc0(align_offset(new_l1_bytes, 512));
ret = bdrv_pread(bs->file, sn->l1_table_offset, new_l1_table, new_l1_bytes);
diff --git a/block/qcow2.c b/block/qcow2.c
index cfe80befa0..e903d971c3 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -269,12 +269,15 @@ static int qcow2_mark_clean(BlockDriverState *bs)
BDRVQcowState *s = bs->opaque;
if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
- int ret = bdrv_flush(bs);
+ int ret;
+
+ s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY;
+
+ ret = bdrv_flush(bs);
if (ret < 0) {
return ret;
}
- s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY;
return qcow2_update_header(bs);
}
return 0;
@@ -329,6 +332,32 @@ static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result,
return ret;
}
+static int validate_table_offset(BlockDriverState *bs, uint64_t offset,
+ uint64_t entries, size_t entry_len)
+{
+ BDRVQcowState *s = bs->opaque;
+ uint64_t size;
+
+ /* Use signed INT64_MAX as the maximum even for uint64_t header fields,
+ * because values will be passed to qemu functions taking int64_t. */
+ if (entries > INT64_MAX / entry_len) {
+ return -EINVAL;
+ }
+
+ size = entries * entry_len;
+
+ if (INT64_MAX - size < offset) {
+ return -EINVAL;
+ }
+
+ /* Tables must be cluster aligned */
+ if (offset & (s->cluster_size - 1)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static QemuOptsList qcow2_runtime_opts = {
.name = "qcow2",
.head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head),
@@ -419,7 +448,8 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVQcowState *s = bs->opaque;
- int len, i, ret = 0;
+ unsigned int len, i;
+ int ret = 0;
QCowHeader header;
QemuOpts *opts;
Error *local_err = NULL;
@@ -460,6 +490,18 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
s->qcow_version = header.version;
+ /* Initialise cluster size */
+ if (header.cluster_bits < MIN_CLUSTER_BITS ||
+ header.cluster_bits > MAX_CLUSTER_BITS) {
+ error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ s->cluster_bits = header.cluster_bits;
+ s->cluster_size = 1 << s->cluster_bits;
+ s->cluster_sectors = 1 << (s->cluster_bits - 9);
+
/* Initialise version 3 header fields */
if (header.version == 2) {
header.incompatible_features = 0;
@@ -473,6 +515,18 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
be64_to_cpus(&header.autoclear_features);
be32_to_cpus(&header.refcount_order);
be32_to_cpus(&header.header_length);
+
+ if (header.header_length < 104) {
+ error_setg(errp, "qcow2 header too short");
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if (header.header_length > s->cluster_size) {
+ error_setg(errp, "qcow2 header exceeds cluster size");
+ ret = -EINVAL;
+ goto fail;
}
if (header.header_length > sizeof(header)) {
@@ -487,6 +541,12 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
}
}
+ if (header.backing_file_offset > s->cluster_size) {
+ error_setg(errp, "Invalid backing file offset");
+ ret = -EINVAL;
+ goto fail;
+ }
+
if (header.backing_file_offset) {
ext_end = header.backing_file_offset;
} else {
@@ -506,6 +566,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
s->incompatible_features &
~QCOW2_INCOMPAT_MASK);
ret = -ENOTSUP;
+ g_free(feature_table);
goto fail;
}
@@ -529,12 +590,6 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
}
s->refcount_order = header.refcount_order;
- if (header.cluster_bits < MIN_CLUSTER_BITS ||
- header.cluster_bits > MAX_CLUSTER_BITS) {
- error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);
- ret = -EINVAL;
- goto fail;
- }
if (header.crypt_method > QCOW_CRYPT_AES) {
error_setg(errp, "Unsupported encryption method: %i",
header.crypt_method);
@@ -545,23 +600,52 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
if (s->crypt_method_header) {
bs->encrypted = 1;
}
- s->cluster_bits = header.cluster_bits;
- s->cluster_size = 1 << s->cluster_bits;
- s->cluster_sectors = 1 << (s->cluster_bits - 9);
+
s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
s->l2_size = 1 << s->l2_bits;
bs->total_sectors = header.size / 512;
s->csize_shift = (62 - (s->cluster_bits - 8));
s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
+
s->refcount_table_offset = header.refcount_table_offset;
s->refcount_table_size =
header.refcount_table_clusters << (s->cluster_bits - 3);
- s->snapshots_offset = header.snapshots_offset;
- s->nb_snapshots = header.nb_snapshots;
+ if (header.refcount_table_clusters > qcow2_max_refcount_clusters(s)) {
+ error_setg(errp, "Reference count table too large");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = validate_table_offset(bs, s->refcount_table_offset,
+ s->refcount_table_size, sizeof(uint64_t));
+ if (ret < 0) {
+ error_setg(errp, "Invalid reference count table offset");
+ goto fail;
+ }
+
+ /* Snapshot table offset/length */
+ if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) {
+ error_setg(errp, "Too many snapshots");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = validate_table_offset(bs, header.snapshots_offset,
+ header.nb_snapshots,
+ sizeof(QCowSnapshotHeader));
+ if (ret < 0) {
+ error_setg(errp, "Invalid snapshot table offset");
+ goto fail;
+ }
/* read the level 1 table */
+ if (header.l1_size > QCOW_MAX_L1_SIZE) {
+ error_setg(errp, "Active L1 table too large");
+ ret = -EFBIG;
+ goto fail;
+ }
s->l1_size = header.l1_size;
l1_vm_state_index = size_to_l1(s, header.size);
@@ -579,7 +663,16 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
ret = -EINVAL;
goto fail;
}
+
+ ret = validate_table_offset(bs, header.l1_table_offset,
+ header.l1_size, sizeof(uint64_t));
+ if (ret < 0) {
+ error_setg(errp, "Invalid L1 table offset");
+ goto fail;
+ }
s->l1_table_offset = header.l1_table_offset;
+
+
if (s->l1_size > 0) {
s->l1_table = g_malloc0(
align_offset(s->l1_size * sizeof(uint64_t), 512));
@@ -625,8 +718,10 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
/* read the backing file name */
if (header.backing_file_offset != 0) {
len = header.backing_file_size;
- if (len > 1023) {
- len = 1023;
+ if (len > MIN(1023, s->cluster_size - header.backing_file_offset)) {
+ error_setg(errp, "Backing file name too long");
+ ret = -EINVAL;
+ goto fail;
}
ret = bdrv_pread(bs->file, header.backing_file_offset,
bs->backing_file, len);
@@ -637,6 +732,10 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
bs->backing_file[len] = '\0';
}
+ /* Internal snapshots */
+ s->snapshots_offset = header.snapshots_offset;
+ s->nb_snapshots = header.nb_snapshots;
+
ret = qcow2_read_snapshots(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read snapshots");
@@ -644,7 +743,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
}
/* Clear unknown autoclear feature bits */
- if (!bs->read_only && s->autoclear_features != 0) {
+ if (!bs->read_only && !(flags & BDRV_O_INCOMING) && s->autoclear_features) {
s->autoclear_features = 0;
ret = qcow2_update_header(bs);
if (ret < 0) {
@@ -657,7 +756,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
qemu_co_mutex_init(&s->lock);
/* Repair image if dirty */
- if (!(flags & BDRV_O_CHECK) && !bs->read_only &&
+ if (!(flags & (BDRV_O_CHECK | BDRV_O_INCOMING)) && !bs->read_only &&
(s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
BdrvCheckResult result = {0};
@@ -745,6 +844,9 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
if (s->l2_table_cache) {
qcow2_cache_destroy(bs, s->l2_table_cache);
}
+ if (s->refcount_block_cache) {
+ qcow2_cache_destroy(bs, s->refcount_block_cache);
+ }
g_free(s->cluster_cache);
qemu_vfree(s->cluster_data);
return ret;
@@ -801,11 +903,25 @@ static int qcow2_set_key(BlockDriverState *bs, const char *key)
return 0;
}
-/* We have nothing to do for QCOW2 reopen, stubs just return
- * success */
+/* We have no actual commit/abort logic for qcow2, but we need to write out any
+ * unwritten data if we reopen read-only. */
static int qcow2_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
{
+ int ret;
+
+ if ((state->flags & BDRV_O_RDWR) == 0) {
+ ret = bdrv_flush(state->bs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = qcow2_mark_clean(state->bs);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
return 0;
}
@@ -1137,10 +1253,12 @@ static void qcow2_close(BlockDriverState *bs)
/* else pre-write overlap checks in cache_destroy may crash */
s->l1_table = NULL;
- qcow2_cache_flush(bs, s->l2_table_cache);
- qcow2_cache_flush(bs, s->refcount_block_cache);
+ if (!(bs->open_flags & BDRV_O_INCOMING)) {
+ qcow2_cache_flush(bs, s->l2_table_cache);
+ qcow2_cache_flush(bs, s->refcount_block_cache);
- qcow2_mark_clean(bs);
+ qcow2_mark_clean(bs);
+ }
qcow2_cache_destroy(bs, s->l2_table_cache);
qcow2_cache_destroy(bs, s->refcount_block_cache);
@@ -1154,7 +1272,7 @@ static void qcow2_close(BlockDriverState *bs)
qcow2_free_snapshots(bs);
}
-static void qcow2_invalidate_cache(BlockDriverState *bs)
+static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp)
{
BDRVQcowState *s = bs->opaque;
int flags = s->flags;
@@ -1162,6 +1280,8 @@ static void qcow2_invalidate_cache(BlockDriverState *bs)
AES_KEY aes_decrypt_key;
uint32_t crypt_method = 0;
QDict *options;
+ Error *local_err = NULL;
+ int ret;
/*
* Backing files are read-only which makes all of their metadata immutable,
@@ -1176,12 +1296,25 @@ static void qcow2_invalidate_cache(BlockDriverState *bs)
qcow2_close(bs);
- options = qdict_new();
- qdict_put(options, QCOW2_OPT_LAZY_REFCOUNTS,
- qbool_from_int(s->use_lazy_refcounts));
+ bdrv_invalidate_cache(bs->file, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
memset(s, 0, sizeof(BDRVQcowState));
- qcow2_open(bs, options, flags, NULL);
+ options = qdict_clone_shallow(bs->options);
+
+ ret = qcow2_open(bs, options, flags, &local_err);
+ if (local_err) {
+ error_setg(errp, "Could not reopen qcow2 layer: %s",
+ error_get_pretty(local_err));
+ error_free(local_err);
+ return;
+ } else if (ret < 0) {
+ error_setg_errno(errp, -ret, "Could not reopen qcow2 layer");
+ return;
+ }
QDECREF(options);
@@ -1415,7 +1548,9 @@ static int preallocate(BlockDriverState *bs)
return ret;
}
- if (meta != NULL) {
+ while (meta) {
+ QCowL2Meta *next = meta->next;
+
ret = qcow2_alloc_cluster_link_l2(bs, meta);
if (ret < 0) {
qcow2_free_any_clusters(bs, meta->alloc_offset,
@@ -1426,6 +1561,9 @@ static int preallocate(BlockDriverState *bs)
/* There are no dependent requests, but we need to remove our
* request from the list of in-flight requests */
QLIST_REMOVE(meta, next_in_flight);
+
+ g_free(meta);
+ meta = next;
}
/* TODO Preallocate data if requested */
@@ -1483,7 +1621,7 @@ static int qcow2_create2(const char *filename, int64_t total_size,
*/
BlockDriverState* bs;
QCowHeader *header;
- uint8_t* refcount_table;
+ uint64_t* refcount_table;
Error *local_err = NULL;
int ret;
@@ -1535,9 +1673,10 @@ static int qcow2_create2(const char *filename, int64_t total_size,
goto out;
}
- /* Write an empty refcount table */
- refcount_table = g_malloc0(cluster_size);
- ret = bdrv_pwrite(bs, cluster_size, refcount_table, cluster_size);
+ /* Write a refcount table with one refcount block */
+ refcount_table = g_malloc0(2 * cluster_size);
+ refcount_table[0] = cpu_to_be64(2 * cluster_size);
+ ret = bdrv_pwrite(bs, cluster_size, refcount_table, 2 * cluster_size);
g_free(refcount_table);
if (ret < 0) {
@@ -1562,7 +1701,7 @@ static int qcow2_create2(const char *filename, int64_t total_size,
goto out;
}
- ret = qcow2_alloc_clusters(bs, 2 * cluster_size);
+ ret = qcow2_alloc_clusters(bs, 3 * cluster_size);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 "
"header and refcount table");
diff --git a/block/qcow2.h b/block/qcow2.h
index 0b0eac899c..b49424b85e 100644
--- a/block/qcow2.h
+++ b/block/qcow2.h
@@ -38,6 +38,19 @@
#define QCOW_CRYPT_AES 1
#define QCOW_MAX_CRYPT_CLUSTERS 32
+#define QCOW_MAX_SNAPSHOTS 65536
+
+/* 8 MB refcount table is enough for 2 PB images at 64k cluster size
+ * (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
+#define QCOW_MAX_REFTABLE_SIZE 0x800000
+
+/* 32 MB L1 table is enough for 2 PB images at 64k cluster size
+ * (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
+#define QCOW_MAX_L1_SIZE 0x2000000
+
+/* Allow for an average of 1k per snapshot table entry, should be plenty of
+ * space for snapshot names and IDs */
+#define QCOW_MAX_SNAPSHOTS_SIZE (1024 * QCOW_MAX_SNAPSHOTS)
/* indicate that the refcount of the referenced cluster is exactly one. */
#define QCOW_OFLAG_COPIED (1ULL << 63)
@@ -97,6 +110,32 @@ typedef struct QCowHeader {
uint32_t header_length;
} QEMU_PACKED QCowHeader;
+typedef struct QEMU_PACKED QCowSnapshotHeader {
+ /* header is 8 byte aligned */
+ uint64_t l1_table_offset;
+
+ uint32_t l1_size;
+ uint16_t id_str_size;
+ uint16_t name_size;
+
+ uint32_t date_sec;
+ uint32_t date_nsec;
+
+ uint64_t vm_clock_nsec;
+
+ uint32_t vm_state_size;
+ uint32_t extra_data_size; /* for extension */
+ /* extra data follows */
+ /* id_str follows */
+ /* name follows */
+} QCowSnapshotHeader;
+
+typedef struct QEMU_PACKED QCowSnapshotExtraData {
+ uint64_t vm_state_size_large;
+ uint64_t disk_size;
+} QCowSnapshotExtraData;
+
+
typedef struct QCowSnapshot {
uint64_t l1_table_offset;
uint32_t l1_size;
@@ -191,8 +230,8 @@ typedef struct BDRVQcowState {
uint64_t *refcount_table;
uint64_t refcount_table_offset;
uint32_t refcount_table_size;
- int64_t free_cluster_index;
- int64_t free_byte_offset;
+ uint64_t free_cluster_index;
+ uint64_t free_byte_offset;
CoMutex lock;
@@ -202,7 +241,7 @@ typedef struct BDRVQcowState {
AES_KEY aes_decrypt_key;
uint64_t snapshots_offset;
int snapshots_size;
- int nb_snapshots;
+ unsigned int nb_snapshots;
QCowSnapshot *snapshots;
int flags;
@@ -383,6 +422,11 @@ static inline int64_t qcow2_vm_state_offset(BDRVQcowState *s)
return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits);
}
+static inline uint64_t qcow2_max_refcount_clusters(BDRVQcowState *s)
+{
+ return QCOW_MAX_REFTABLE_SIZE >> s->cluster_bits;
+}
+
static inline int qcow2_get_cluster_type(uint64_t l2_entry)
{
if (l2_entry & QCOW_OFLAG_COMPRESSED) {
@@ -431,7 +475,7 @@ void qcow2_refcount_close(BlockDriverState *bs);
int qcow2_update_cluster_refcount(BlockDriverState *bs, int64_t cluster_index,
int addend, enum qcow2_discard_type type);
-int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size);
+int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
int nb_clusters);
int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size);
diff --git a/block/qed.c b/block/qed.c
index 8802ad3845..c130e42d0d 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -650,19 +650,21 @@ static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options,
}
if (!qed_is_cluster_size_valid(cluster_size)) {
- fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n",
- QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
+ error_setg(errp, "QED cluster size must be within range [%u, %u] "
+ "and power of 2",
+ QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
return -EINVAL;
}
if (!qed_is_table_size_valid(table_size)) {
- fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n",
- QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
+ error_setg(errp, "QED table size must be within range [%u, %u] "
+ "and power of 2",
+ QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
return -EINVAL;
}
if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
- fprintf(stderr, "QED image size must be a non-zero multiple of "
- "cluster size and less than %" PRIu64 " bytes\n",
- qed_max_image_size(cluster_size, table_size));
+ error_setg(errp, "QED image size must be a non-zero multiple of "
+ "cluster size and less than %" PRIu64 " bytes",
+ qed_max_image_size(cluster_size, table_size));
return -EINVAL;
}
@@ -1558,13 +1560,31 @@ static int bdrv_qed_change_backing_file(BlockDriverState *bs,
return ret;
}
-static void bdrv_qed_invalidate_cache(BlockDriverState *bs)
+static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
{
BDRVQEDState *s = bs->opaque;
+ Error *local_err = NULL;
+ int ret;
bdrv_qed_close(bs);
+
+ bdrv_invalidate_cache(bs->file, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
memset(s, 0, sizeof(BDRVQEDState));
- bdrv_qed_open(bs, NULL, bs->open_flags, NULL);
+ ret = bdrv_qed_open(bs, NULL, bs->open_flags, &local_err);
+ if (local_err) {
+ error_setg(errp, "Could not reopen qed layer: %s",
+ error_get_pretty(local_err));
+ error_free(local_err);
+ return;
+ } else if (ret < 0) {
+ error_setg_errno(errp, -ret, "Could not reopen qed layer");
+ return;
+ }
}
static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
diff --git a/block/quorum.c b/block/quorum.c
index bd997b7322..ecec3a5407 100644
--- a/block/quorum.c
+++ b/block/quorum.c
@@ -625,13 +625,18 @@ static int64_t quorum_getlength(BlockDriverState *bs)
return result;
}
-static void quorum_invalidate_cache(BlockDriverState *bs)
+static void quorum_invalidate_cache(BlockDriverState *bs, Error **errp)
{
BDRVQuorumState *s = bs->opaque;
+ Error *local_err = NULL;
int i;
for (i = 0; i < s->num_children; i++) {
- bdrv_invalidate_cache(s->bs[i]);
+ bdrv_invalidate_cache(s->bs[i], &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
}
}
@@ -748,7 +753,7 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
opts = qemu_opts_create(&quorum_runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
- if (error_is_set(&local_err)) {
+ if (local_err) {
ret = -EINVAL;
goto exit;
}
@@ -823,7 +828,7 @@ close_exit:
g_free(opened);
exit:
/* propagate error */
- if (error_is_set(&local_err)) {
+ if (local_err) {
error_propagate(errp, local_err);
}
QDECREF(list);
@@ -852,8 +857,6 @@ static BlockDriver bdrv_quorum = {
.bdrv_file_open = quorum_open,
.bdrv_close = quorum_close,
- .authorizations = { true, true },
-
.bdrv_co_flush_to_disk = quorum_co_flush,
.bdrv_getlength = quorum_getlength,
@@ -862,6 +865,7 @@ static BlockDriver bdrv_quorum = {
.bdrv_aio_writev = quorum_aio_writev,
.bdrv_invalidate_cache = quorum_invalidate_cache,
+ .is_filter = true,
.bdrv_recurse_is_first_non_filter = quorum_recurse_is_first_non_filter,
};
diff --git a/block/raw-posix.c b/block/raw-posix.c
index e6b4c1fe02..1688e16c64 100644
--- a/block/raw-posix.c
+++ b/block/raw-posix.c
@@ -1561,6 +1561,15 @@ static int check_hdev_writable(BDRVRawState *s)
return 0;
}
+static void hdev_parse_filename(const char *filename, QDict *options,
+ Error **errp)
+{
+ /* The prefix is optional, just as for "file". */
+ strstart(filename, "host_device:", &filename);
+
+ qdict_put_obj(options, "filename", QOBJECT(qstring_from_str(filename)));
+}
+
static int hdev_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
@@ -1767,6 +1776,18 @@ static int hdev_create(const char *filename, QEMUOptionParameter *options,
int ret = 0;
struct stat stat_buf;
int64_t total_size = 0;
+ bool has_prefix;
+
+ /* This function is used by all three protocol block drivers and therefore
+ * any of these three prefixes may be given.
+ * The return value has to be stored somewhere, otherwise this is an error
+ * due to -Werror=unused-value. */
+ has_prefix =
+ strstart(filename, "host_device:", &filename) ||
+ strstart(filename, "host_cdrom:" , &filename) ||
+ strstart(filename, "host_floppy:", &filename);
+
+ (void)has_prefix;
/* Read out options */
while (options && options->name) {
@@ -1805,6 +1826,7 @@ static BlockDriver bdrv_host_device = {
.instance_size = sizeof(BDRVRawState),
.bdrv_needs_filename = true,
.bdrv_probe_device = hdev_probe_device,
+ .bdrv_parse_filename = hdev_parse_filename,
.bdrv_file_open = hdev_open,
.bdrv_close = raw_close,
.bdrv_reopen_prepare = raw_reopen_prepare,
@@ -1834,6 +1856,15 @@ static BlockDriver bdrv_host_device = {
};
#ifdef __linux__
+static void floppy_parse_filename(const char *filename, QDict *options,
+ Error **errp)
+{
+ /* The prefix is optional, just as for "file". */
+ strstart(filename, "host_floppy:", &filename);
+
+ qdict_put_obj(options, "filename", QOBJECT(qstring_from_str(filename)));
+}
+
static int floppy_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
@@ -1939,6 +1970,7 @@ static BlockDriver bdrv_host_floppy = {
.instance_size = sizeof(BDRVRawState),
.bdrv_needs_filename = true,
.bdrv_probe_device = floppy_probe_device,
+ .bdrv_parse_filename = floppy_parse_filename,
.bdrv_file_open = floppy_open,
.bdrv_close = raw_close,
.bdrv_reopen_prepare = raw_reopen_prepare,
@@ -1963,7 +1995,20 @@ static BlockDriver bdrv_host_floppy = {
.bdrv_media_changed = floppy_media_changed,
.bdrv_eject = floppy_eject,
};
+#endif
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+static void cdrom_parse_filename(const char *filename, QDict *options,
+ Error **errp)
+{
+ /* The prefix is optional, just as for "file". */
+ strstart(filename, "host_cdrom:", &filename);
+ qdict_put_obj(options, "filename", QOBJECT(qstring_from_str(filename)));
+}
+#endif
+
+#ifdef __linux__
static int cdrom_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
@@ -2050,6 +2095,7 @@ static BlockDriver bdrv_host_cdrom = {
.instance_size = sizeof(BDRVRawState),
.bdrv_needs_filename = true,
.bdrv_probe_device = cdrom_probe_device,
+ .bdrv_parse_filename = cdrom_parse_filename,
.bdrv_file_open = cdrom_open,
.bdrv_close = raw_close,
.bdrv_reopen_prepare = raw_reopen_prepare,
@@ -2180,6 +2226,7 @@ static BlockDriver bdrv_host_cdrom = {
.instance_size = sizeof(BDRVRawState),
.bdrv_needs_filename = true,
.bdrv_probe_device = cdrom_probe_device,
+ .bdrv_parse_filename = cdrom_parse_filename,
.bdrv_file_open = cdrom_open,
.bdrv_close = raw_close,
.bdrv_reopen_prepare = raw_reopen_prepare,
diff --git a/block/raw-win32.c b/block/raw-win32.c
index 99547488e4..48cb2c2258 100644
--- a/block/raw-win32.c
+++ b/block/raw-win32.c
@@ -593,6 +593,15 @@ static int hdev_probe_device(const char *filename)
return 0;
}
+static void hdev_parse_filename(const char *filename, QDict *options,
+ Error **errp)
+{
+ /* The prefix is optional, just as for "file". */
+ strstart(filename, "host_device:", &filename);
+
+ qdict_put_obj(options, "filename", QOBJECT(qstring_from_str(filename)));
+}
+
static int hdev_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
@@ -663,6 +672,7 @@ static BlockDriver bdrv_host_device = {
.protocol_name = "host_device",
.instance_size = sizeof(BDRVRawState),
.bdrv_needs_filename = true,
+ .bdrv_parse_filename = hdev_parse_filename,
.bdrv_probe_device = hdev_probe_device,
.bdrv_file_open = hdev_open,
.bdrv_close = raw_close,
diff --git a/block/sheepdog.c b/block/sheepdog.c
index f7bd0242e5..0eb33ee80e 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -909,9 +909,9 @@ static void co_write_request(void *opaque)
}
/*
- * Return a socket discriptor to read/write objects.
+ * Return a socket descriptor to read/write objects.
*
- * We cannot use this discriptor for other operations because
+ * We cannot use this descriptor for other operations because
* the block driver may be on waiting response from the server.
*/
static int get_sheep_fd(BDRVSheepdogState *s)
@@ -1896,7 +1896,7 @@ static int sd_create_branch(BDRVSheepdogState *s)
/*
* Even If deletion fails, we will just create extra snapshot based on
- * the workding VDI which was supposed to be deleted. So no need to
+ * the working VDI which was supposed to be deleted. So no need to
* false bail out.
*/
deleted = sd_delete(s);
@@ -2194,7 +2194,7 @@ cleanup:
* We implement rollback(loadvm) operation to the specified snapshot by
* 1) switch to the snapshot
* 2) rely on sd_create_branch to delete working VDI and
- * 3) create a new working VDI based on the speicified snapshot
+ * 3) create a new working VDI based on the specified snapshot
*/
static int sd_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
{
diff --git a/block/vdi.c b/block/vdi.c
index ae49cd83ca..820cd376b3 100644
--- a/block/vdi.c
+++ b/block/vdi.c
@@ -31,7 +31,7 @@
* Allocation of blocks could be optimized (less writes to block map and
* header).
*
- * Read and write of adjacents blocks could be done in one operation
+ * Read and write of adjacent blocks could be done in one operation
* (current code uses one operation per block (1 MiB).
*
* The code is not thread safe (missing locks for changes in header and
@@ -120,6 +120,11 @@ typedef unsigned char uuid_t[16];
#define VDI_IS_ALLOCATED(X) ((X) < VDI_DISCARDED)
+/* max blocks in image is (0xffffffff / 4) */
+#define VDI_BLOCKS_IN_IMAGE_MAX 0x3fffffff
+#define VDI_DISK_SIZE_MAX ((uint64_t)VDI_BLOCKS_IN_IMAGE_MAX * \
+ (uint64_t)DEFAULT_CLUSTER_SIZE)
+
#if !defined(CONFIG_UUID)
static inline void uuid_generate(uuid_t out)
{
@@ -385,6 +390,14 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
vdi_header_print(&header);
#endif
+ if (header.disk_size > VDI_DISK_SIZE_MAX) {
+ error_setg(errp, "Unsupported VDI image size (size is 0x%" PRIx64
+ ", max supported is 0x%" PRIx64 ")",
+ header.disk_size, VDI_DISK_SIZE_MAX);
+ ret = -ENOTSUP;
+ goto fail;
+ }
+
if (header.disk_size % SECTOR_SIZE != 0) {
/* 'VBoxManage convertfromraw' can create images with odd disk sizes.
We accept them but round the disk size to the next multiple of
@@ -420,9 +433,9 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
header.sector_size, SECTOR_SIZE);
ret = -ENOTSUP;
goto fail;
- } else if (header.block_size != 1 * MiB) {
- error_setg(errp, "unsupported VDI image (sector size %u is not %u)",
- header.block_size, 1 * MiB);
+ } else if (header.block_size != DEFAULT_CLUSTER_SIZE) {
+ error_setg(errp, "unsupported VDI image (block size %u is not %u)",
+ header.block_size, DEFAULT_CLUSTER_SIZE);
ret = -ENOTSUP;
goto fail;
} else if (header.disk_size >
@@ -441,6 +454,12 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
error_setg(errp, "unsupported VDI image (non-NULL parent UUID)");
ret = -ENOTSUP;
goto fail;
+ } else if (header.blocks_in_image > VDI_BLOCKS_IN_IMAGE_MAX) {
+ error_setg(errp, "unsupported VDI image "
+ "(too many blocks %u, max is %u)",
+ header.blocks_in_image, VDI_BLOCKS_IN_IMAGE_MAX);
+ ret = -ENOTSUP;
+ goto fail;
}
bs->total_sectors = header.disk_size / SECTOR_SIZE;
@@ -689,11 +708,20 @@ static int vdi_create(const char *filename, QEMUOptionParameter *options,
options++;
}
+ if (bytes > VDI_DISK_SIZE_MAX) {
+ result = -ENOTSUP;
+ error_setg(errp, "Unsupported VDI image size (size is 0x%" PRIx64
+ ", max supported is 0x%" PRIx64 ")",
+ bytes, VDI_DISK_SIZE_MAX);
+ goto exit;
+ }
+
fd = qemu_open(filename,
O_WRONLY | O_CREAT | O_TRUNC | O_BINARY | O_LARGEFILE,
0644);
if (fd < 0) {
- return -errno;
+ result = -errno;
+ goto exit;
}
/* We need enough blocks to store the given disk size,
@@ -754,6 +782,7 @@ static int vdi_create(const char *filename, QEMUOptionParameter *options,
result = -errno;
}
+exit:
return result;
}
diff --git a/block/vhdx-log.c b/block/vhdx-log.c
index 02755b8ded..a77c040ee0 100644
--- a/block/vhdx-log.c
+++ b/block/vhdx-log.c
@@ -578,7 +578,7 @@ static int vhdx_validate_log_entry(BlockDriverState *bs, BDRVVHDXState *s,
total_sectors = hdr.entry_length / VHDX_LOG_SECTOR_SIZE;
- /* read_desc() will incrememnt the read idx */
+ /* read_desc() will increment the read idx */
ret = vhdx_log_read_desc(bs, s, log, &desc_buffer);
if (ret < 0) {
goto free_and_exit;
diff --git a/block/vhdx.c b/block/vhdx.c
index 5390ba6d0f..509baaf484 100644
--- a/block/vhdx.c
+++ b/block/vhdx.c
@@ -780,12 +780,20 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
le32_to_cpus(&s->logical_sector_size);
le32_to_cpus(&s->physical_sector_size);
- if (s->logical_sector_size == 0 || s->params.block_size == 0) {
+ if (s->params.block_size < VHDX_BLOCK_SIZE_MIN ||
+ s->params.block_size > VHDX_BLOCK_SIZE_MAX) {
ret = -EINVAL;
goto exit;
}
- /* both block_size and sector_size are guaranteed powers of 2 */
+ /* only 2 supported sector sizes */
+ if (s->logical_sector_size != 512 && s->logical_sector_size != 4096) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Both block_size and sector_size are guaranteed powers of 2, below.
+ Due to range checks above, s->sectors_per_block can never be < 256 */
s->sectors_per_block = s->params.block_size / s->logical_sector_size;
s->chunk_ratio = (VHDX_MAX_SECTORS_PER_BLOCK) *
(uint64_t)s->logical_sector_size /
diff --git a/block/vhdx.h b/block/vhdx.h
index 2acd7c2d19..8103d4c446 100644
--- a/block/vhdx.h
+++ b/block/vhdx.h
@@ -61,7 +61,7 @@
/* These structures are ones that are defined in the VHDX specification
* document */
-#define VHDX_FILE_SIGNATURE 0x656C696678646876 /* "vhdxfile" in ASCII */
+#define VHDX_FILE_SIGNATURE 0x656C696678646876ULL /* "vhdxfile" in ASCII */
typedef struct VHDXFileIdentifier {
uint64_t signature; /* "vhdxfile" in ASCII */
uint16_t creator[256]; /* optional; utf-16 string to identify
@@ -238,7 +238,7 @@ typedef struct QEMU_PACKED VHDXLogDataSector {
/* upper 44 bits are the file offset in 1MB units lower 3 bits are the state
other bits are reserved */
#define VHDX_BAT_STATE_BIT_MASK 0x07
-#define VHDX_BAT_FILE_OFF_MASK 0xFFFFFFFFFFF00000 /* upper 44 bits */
+#define VHDX_BAT_FILE_OFF_MASK 0xFFFFFFFFFFF00000ULL /* upper 44 bits */
typedef uint64_t VHDXBatEntry;
/* ---- METADATA REGION STRUCTURES ---- */
@@ -247,7 +247,7 @@ typedef uint64_t VHDXBatEntry;
#define VHDX_METADATA_MAX_ENTRIES 2047 /* not including the header */
#define VHDX_METADATA_TABLE_MAX_SIZE \
(VHDX_METADATA_ENTRY_SIZE * (VHDX_METADATA_MAX_ENTRIES+1))
-#define VHDX_METADATA_SIGNATURE 0x617461646174656D /* "metadata" in ASCII */
+#define VHDX_METADATA_SIGNATURE 0x617461646174656DULL /* "metadata" in ASCII */
typedef struct QEMU_PACKED VHDXMetadataTableHeader {
uint64_t signature; /* "metadata" in ASCII */
uint16_t reserved;
diff --git a/block/vmdk.c b/block/vmdk.c
index b69988d169..06a1f9f93b 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -262,7 +262,7 @@ static uint32_t vmdk_read_cid(BlockDriverState *bs, int parent)
p_name = strstr(desc, cid_str);
if (p_name != NULL) {
p_name += cid_str_size;
- sscanf(p_name, "%x", &cid);
+ sscanf(p_name, "%" SCNx32, &cid);
}
return cid;
@@ -290,7 +290,7 @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
p_name = strstr(desc, "CID");
if (p_name != NULL) {
p_name += sizeof("CID");
- snprintf(p_name, sizeof(desc) - (p_name - desc), "%x\n", cid);
+ snprintf(p_name, sizeof(desc) - (p_name - desc), "%" PRIx32 "\n", cid);
pstrcat(desc, sizeof(desc), tmp_desc);
}
@@ -640,7 +640,7 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
if (le32_to_cpu(header.version) > 3) {
char buf[64];
- snprintf(buf, sizeof(buf), "VMDK version %d",
+ snprintf(buf, sizeof(buf), "VMDK version %" PRId32,
le32_to_cpu(header.version));
error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
bs->device_name, "vmdk", buf);
@@ -671,8 +671,9 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
}
if (bdrv_getlength(file) <
le64_to_cpu(header.grain_offset) * BDRV_SECTOR_SIZE) {
- error_setg(errp, "File truncated, expecting at least %lld bytes",
- le64_to_cpu(header.grain_offset) * BDRV_SECTOR_SIZE);
+ error_setg(errp, "File truncated, expecting at least %" PRId64 " bytes",
+ (int64_t)(le64_to_cpu(header.grain_offset)
+ * BDRV_SECTOR_SIZE));
return -EINVAL;
}
@@ -1707,8 +1708,8 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options,
const char desc_template[] =
"# Disk DescriptorFile\n"
"version=1\n"
- "CID=%x\n"
- "parentCID=%x\n"
+ "CID=%" PRIx32 "\n"
+ "parentCID=%" PRIx32 "\n"
"createType=\"%s\"\n"
"%s"
"\n"
@@ -1720,7 +1721,7 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options,
"\n"
"ddb.virtualHWVersion = \"%d\"\n"
"ddb.geometry.cylinders = \"%" PRId64 "\"\n"
- "ddb.geometry.heads = \"%d\"\n"
+ "ddb.geometry.heads = \"%" PRIu32 "\"\n"
"ddb.geometry.sectors = \"63\"\n"
"ddb.adapterType = \"%s\"\n";
@@ -1780,9 +1781,9 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options,
strcmp(fmt, "twoGbMaxExtentFlat"));
compress = !strcmp(fmt, "streamOptimized");
if (flat) {
- desc_extent_line = "RW %lld FLAT \"%s\" 0\n";
+ desc_extent_line = "RW %" PRId64 " FLAT \"%s\" 0\n";
} else {
- desc_extent_line = "RW %lld SPARSE \"%s\"\n";
+ desc_extent_line = "RW %" PRId64 " SPARSE \"%s\"\n";
}
if (flat && backing_file) {
error_setg(errp, "Flat image can't have backing file");
@@ -1850,7 +1851,7 @@ static int vmdk_create(const char *filename, QEMUOptionParameter *options,
}
/* generate descriptor file */
desc = g_strdup_printf(desc_template,
- (unsigned int)time(NULL),
+ (uint32_t)time(NULL),
parent_cid,
fmt,
parent_desc_line,
diff --git a/block/vpc.c b/block/vpc.c
index 82bf2485a5..2e25f57230 100644
--- a/block/vpc.c
+++ b/block/vpc.c
@@ -45,6 +45,8 @@ enum vhd_type {
// Seconds since Jan 1, 2000 0:00:00 (UTC)
#define VHD_TIMESTAMP_BASE 946684800
+#define VHD_MAX_SECTORS (65535LL * 255 * 255)
+
// always big-endian
typedef struct vhd_footer {
char creator[8]; // "conectix"
@@ -164,6 +166,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
VHDDynDiskHeader *dyndisk_header;
uint8_t buf[HEADER_SIZE];
uint32_t checksum;
+ uint64_t computed_size;
int disk_type = VHD_DYNAMIC;
int ret;
@@ -222,7 +225,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
}
/* Allow a maximum disk size of approximately 2 TB */
- if (bs->total_sectors >= 65535LL * 255 * 255) {
+ if (bs->total_sectors >= VHD_MAX_SECTORS) {
ret = -EFBIG;
goto fail;
}
@@ -242,10 +245,31 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
}
s->block_size = be32_to_cpu(dyndisk_header->block_size);
+ if (!is_power_of_2(s->block_size) || s->block_size < BDRV_SECTOR_SIZE) {
+ error_setg(errp, "Invalid block size %" PRIu32, s->block_size);
+ ret = -EINVAL;
+ goto fail;
+ }
s->bitmap_size = ((s->block_size / (8 * 512)) + 511) & ~511;
s->max_table_entries = be32_to_cpu(dyndisk_header->max_table_entries);
- s->pagetable = g_malloc(s->max_table_entries * 4);
+
+ if ((bs->total_sectors * 512) / s->block_size > 0xffffffffU) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ if (s->max_table_entries > (VHD_MAX_SECTORS * 512) / s->block_size) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ computed_size = (uint64_t) s->max_table_entries * s->block_size;
+ if (computed_size < bs->total_sectors * 512) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ s->pagetable = qemu_blockalign(bs, s->max_table_entries * 4);
s->bat_offset = be64_to_cpu(dyndisk_header->table_offset);
@@ -298,7 +322,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
return 0;
fail:
- g_free(s->pagetable);
+ qemu_vfree(s->pagetable);
#ifdef CACHE
g_free(s->pageentry_u8);
#endif
@@ -833,7 +857,7 @@ static int vpc_has_zero_init(BlockDriverState *bs)
static void vpc_close(BlockDriverState *bs)
{
BDRVVPCState *s = bs->opaque;
- g_free(s->pagetable);
+ qemu_vfree(s->pagetable);
#ifdef CACHE
g_free(s->pageentry_u8);
#endif
diff --git a/block/vvfat.c b/block/vvfat.c
index f966ea5da8..c3af7ff4c5 100644
--- a/block/vvfat.c
+++ b/block/vvfat.c
@@ -1119,6 +1119,7 @@ DLOG(if (stderr == NULL) {
if (!s->fat_type) {
s->fat_type = 16;
}
+ s->first_sectors_number = 0x40;
cyls = s->fat_type == 12 ? 64 : 1024;
heads = 16;
secs = 63;
@@ -1146,7 +1147,6 @@ DLOG(if (stderr == NULL) {
s->current_cluster=0xffffffff;
- s->first_sectors_number=0x40;
/* read only is the default for safety */
bs->read_only = 1;
s->qcow = s->write_target = NULL;
@@ -2947,7 +2947,7 @@ static int enable_write_target(BDRVVVFATState *s)
unlink(s->qcow_filename);
#endif
- s->bs->backing_hd = bdrv_new("");
+ s->bs->backing_hd = bdrv_new("", &error_abort);
s->bs->backing_hd->drv = &vvfat_write_target;
s->bs->backing_hd->opaque = g_malloc(sizeof(void*));
*(void**)s->bs->backing_hd->opaque = s;
diff --git a/blockdev.c b/blockdev.c
index c3422a1d41..7810e9fb68 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -452,16 +452,14 @@ static DriveInfo *blockdev_init(const char *file, QDict *bs_opts,
}
}
- if (bdrv_find_node(qemu_opts_id(opts))) {
- error_setg(errp, "device id=%s is conflicting with a node-name",
- qemu_opts_id(opts));
- goto early_err;
- }
-
/* init */
dinfo = g_malloc0(sizeof(*dinfo));
dinfo->id = g_strdup(qemu_opts_id(opts));
- dinfo->bdrv = bdrv_new(dinfo->id);
+ dinfo->bdrv = bdrv_new(dinfo->id, &error);
+ if (error) {
+ error_propagate(errp, error);
+ goto bdrv_new_err;
+ }
dinfo->bdrv->open_flags = snapshot ? BDRV_O_SNAPSHOT : 0;
dinfo->bdrv->read_only = ro;
dinfo->refcount = 1;
@@ -523,8 +521,9 @@ static DriveInfo *blockdev_init(const char *file, QDict *bs_opts,
err:
bdrv_unref(dinfo->bdrv);
- g_free(dinfo->id);
QTAILQ_REMOVE(&drives, dinfo, next);
+bdrv_new_err:
+ g_free(dinfo->id);
g_free(dinfo);
early_err:
QDECREF(bs_opts);
@@ -1116,6 +1115,7 @@ typedef struct InternalSnapshotState {
static void internal_snapshot_prepare(BlkTransactionState *common,
Error **errp)
{
+ Error *local_err = NULL;
const char *device;
const char *name;
BlockDriverState *bs;
@@ -1164,8 +1164,10 @@ static void internal_snapshot_prepare(BlkTransactionState *common,
}
/* check whether a snapshot with name exist */
- ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn, errp);
- if (error_is_set(errp)) {
+ ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn,
+ &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
return;
} else if (ret) {
error_setg(errp,
@@ -1521,14 +1523,16 @@ static void eject_device(BlockDriverState *bs, int force, Error **errp)
return;
}
if (!bdrv_dev_has_removable_media(bs)) {
- error_set(errp, QERR_DEVICE_NOT_REMOVABLE, bdrv_get_device_name(bs));
+ error_setg(errp, "Device '%s' is not removable",
+ bdrv_get_device_name(bs));
return;
}
if (bdrv_dev_is_medium_locked(bs) && !bdrv_dev_is_tray_open(bs)) {
bdrv_dev_eject_request(bs, force);
if (!force) {
- error_set(errp, QERR_DEVICE_LOCKED, bdrv_get_device_name(bs));
+ error_setg(errp, "Device '%s' is locked",
+ bdrv_get_device_name(bs));
return;
}
}
@@ -1876,6 +1880,10 @@ void qmp_block_commit(const char *device,
*/
BlockdevOnError on_error = BLOCKDEV_ON_ERROR_REPORT;
+ if (!has_speed) {
+ speed = 0;
+ }
+
/* drain all i/o before commits */
bdrv_drain_all();
@@ -2216,7 +2224,8 @@ void qmp_block_job_cancel(const char *device,
return;
}
if (job->paused && !force) {
- error_set(errp, QERR_BLOCK_JOB_PAUSED, device);
+ error_setg(errp, "The block job for device '%s' is currently paused",
+ device);
return;
}
diff --git a/blockjob.c b/blockjob.c
index b3ce14cebd..cd4784f053 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -88,7 +88,7 @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
Error *local_err = NULL;
if (!job->driver->set_speed) {
- error_set(errp, QERR_NOT_SUPPORTED);
+ error_set(errp, QERR_UNSUPPORTED);
return;
}
job->driver->set_speed(job, speed, &local_err);
diff --git a/bsd-user/main.c b/bsd-user/main.c
index f9246aa102..f81ba55af8 100644
--- a/bsd-user/main.c
+++ b/bsd-user/main.c
@@ -1000,7 +1000,7 @@ int main(int argc, char **argv)
memset(ts, 0, sizeof(TaskState));
init_task_state(ts);
ts->info = info;
- env->opaque = ts;
+ cpu->opaque = ts;
#if defined(TARGET_I386)
cpu_x86_set_cpl(env, 3);
diff --git a/configure b/configure
index 8c2838ee3c..2fbec59e96 100755
--- a/configure
+++ b/configure
@@ -198,6 +198,7 @@ audio_win_int=""
cc_i386=i386-pc-linux-gnu-gcc
libs_qga=""
debug_info="yes"
+stack_protector=""
# Don't accept a target_list environment variable.
unset target_list
@@ -318,6 +319,7 @@ glusterfs_zerofill="no"
virtio_blk_data_plane=""
gtk=""
gtkabi="2.0"
+vte=""
tpm="no"
libssh2=""
vhdx=""
@@ -949,6 +951,10 @@ for opt do
;;
--disable-werror) werror="no"
;;
+ --enable-stack-protector) stack_protector="yes"
+ ;;
+ --disable-stack-protector) stack_protector="no"
+ ;;
--disable-curses) curses="no"
;;
--enable-curses) curses="yes"
@@ -1063,6 +1069,10 @@ for opt do
;;
--with-gtkabi=*) gtkabi="$optarg"
;;
+ --disable-vte) vte="no"
+ ;;
+ --enable-vte) vte="yes"
+ ;;
--enable-tpm) tpm="yes"
;;
--disable-libssh2) libssh2="no"
@@ -1077,7 +1087,10 @@ for opt do
;;
--enable-quorum) quorum="yes"
;;
- *) echo "ERROR: unknown option $opt"; show_help="yes"
+ *)
+ echo "ERROR: unknown option $opt"
+ echo "Try '$0 --help' for more information"
+ exit 1
;;
esac
done
@@ -1207,13 +1220,14 @@ Advanced options (experts only):
--enable-modules enable modules support
--enable-debug-tcg enable TCG debugging
--disable-debug-tcg disable TCG debugging (default)
- --enable-debug-info enable debugging information (default)
- --disable-debug-info disable debugging information
+ --enable-debug-info enable debugging information (default)
+ --disable-debug-info disable debugging information
--enable-debug enable common debug build options
--enable-sparse enable sparse checker
--disable-sparse disable sparse checker (default)
--disable-strip disable stripping binaries
--disable-werror disable compilation abort on warning
+ --disable-stack-protector disable compiler-provided stack protection
--disable-sdl disable SDL
--enable-sdl enable SDL
--with-sdlabi select preferred SDL ABI 1.2 or 2.0
@@ -1342,7 +1356,7 @@ Advanced options (experts only):
NOTE: The object files are built at the place where configure is launched
EOF
-exit 1
+exit 0
fi
# Now we have handled --enable-tcg-interpreter and know we're not just
@@ -1434,9 +1448,18 @@ for flag in $gcc_flags; do
fi
done
-if compile_prog "-Werror -fstack-protector-all" "" ; then
- QEMU_CFLAGS="$QEMU_CFLAGS -fstack-protector-all"
- LIBTOOLFLAGS="$LIBTOOLFLAGS -Wc,-fstack-protector-all"
+if test "$stack_protector" != "no" ; then
+ gcc_flags="-fstack-protector-strong -fstack-protector-all"
+ for flag in $gcc_flags; do
+ # We need to check both a compile and a link, since some compiler
+ # setups fail only on a .c->.o compile and some only at link time
+ if do_cc $QEMU_CFLAGS -Werror $flag -c -o $TMPO $TMPC &&
+ compile_prog "-Werror $flag" ""; then
+ QEMU_CFLAGS="$QEMU_CFLAGS $flag"
+ LIBTOOLFLAGS="$LIBTOOLFLAGS -Wc,$flag"
+ break
+ fi
+ done
fi
# Workaround for http://gcc.gnu.org/PR55489. Happens with -fPIE/-fPIC and
@@ -1946,30 +1969,41 @@ if test "$gtk" != "no"; then
gtkpackage="gtk+-$gtkabi"
if test "$gtkabi" = "3.0" ; then
gtkversion="3.0.0"
+ else
+ gtkversion="2.18.0"
+ fi
+ if $pkg_config --exists "$gtkpackage >= $gtkversion"; then
+ gtk_cflags=`$pkg_config --cflags $gtkpackage`
+ gtk_libs=`$pkg_config --libs $gtkpackage`
+ libs_softmmu="$gtk_libs $libs_softmmu"
+ gtk="yes"
+ elif test "$gtk" = "yes"; then
+ feature_not_found "gtk" "Install gtk2 or gtk3 (requires --with-gtkabi=3.0 option to configure) devel"
+ else
+ gtk="no"
+ fi
+fi
+
+##########################################
+# VTE probe
+
+if test "$vte" != "no"; then
+ if test "$gtkabi" = "3.0"; then
vtepackage="vte-2.90"
vteversion="0.32.0"
else
- gtkversion="2.18.0"
vtepackage="vte"
vteversion="0.24.0"
fi
- if ! $pkg_config --exists "$gtkpackage >= $gtkversion"; then
- if test "$gtk" = "yes" ; then
- feature_not_found "gtk" "Install gtk2 or gtk3 (requires --with-gtkabi=3.0 option to configure) devel"
- fi
- gtk="no"
- elif ! $pkg_config --exists "$vtepackage >= $vteversion"; then
- if test "$gtk" = "yes" ; then
- error_exit "libvte not found (required for gtk support)"
- fi
- gtk="no"
+ if $pkg_config --exists "$vtepackage >= $vteversion"; then
+ vte_cflags=`$pkg_config --cflags $vtepackage`
+ vte_libs=`$pkg_config --libs $vtepackage`
+ libs_softmmu="$vte_libs $libs_softmmu"
+ vte="yes"
+ elif test "$vte" = "yes"; then
+ feature_not_found "vte" "Install libvte or libvte-2.90 (requires --with-gtkabi=3.0 option to configure) devel"
else
- gtk_cflags=`$pkg_config --cflags $gtkpackage`
- gtk_libs=`$pkg_config --libs $gtkpackage`
- vte_cflags=`$pkg_config --cflags $vtepackage`
- vte_libs=`$pkg_config --libs $vtepackage`
- libs_softmmu="$gtk_libs $vte_libs $libs_softmmu"
- gtk="yes"
+ vte="no"
fi
fi
@@ -2696,6 +2730,24 @@ if test "$mingw32" != yes -a "$pthread" = no; then
"Make sure to have the pthread libs and headers installed."
fi
+# check for pthread_setname_np
+pthread_setname_np=no
+cat > $TMPC << EOF
+#include <pthread.h>
+
+static void *f(void *p) { return NULL; }
+int main(void)
+{
+ pthread_t thread;
+ pthread_create(&thread, 0, f, 0);
+ pthread_setname_np(thread, "QEMU");
+ return 0;
+}
+EOF
+if compile_prog "" "$pthread_lib" ; then
+ pthread_setname_np=yes
+fi
+
##########################################
# rbd probe
if test "$rbd" != "no" ; then
@@ -3822,6 +3874,11 @@ fi
int128=no
cat > $TMPC << EOF
+#if defined(__clang_major__) && defined(__clang_minor__)
+# if ((__clang_major__ < 3) || (__clang_major__ == 3) && (__clang_minor__ < 2))
+# error __int128_t does not work in CLANG before 3.2
+# endif
+#endif
__int128_t a;
__uint128_t b;
int main (void) {
@@ -3863,7 +3920,7 @@ fi
##########################################
# Do we have libnfs
if test "$libnfs" != "no" ; then
- if $pkg_config --atleast-version=1.9.2 libnfs; then
+ if $pkg_config --atleast-version=1.9.3 libnfs; then
libnfs="yes"
libnfs_libs=$($pkg_config --libs libnfs)
LIBS="$LIBS $libnfs_libs"
@@ -4041,13 +4098,13 @@ echo "sparse enabled $sparse"
echo "strip binaries $strip_opt"
echo "profiler $profiler"
echo "static build $static"
-echo "-Werror enabled $werror"
if test "$darwin" = "yes" ; then
echo "Cocoa support $cocoa"
fi
echo "pixman $pixman"
echo "SDL support $sdl"
echo "GTK support $gtk"
+echo "VTE support $vte"
echo "curses support $curses"
echo "curl support $curl"
echo "mingw32 support $mingw32"
@@ -4376,6 +4433,9 @@ echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak
if test "$gtk" = "yes" ; then
echo "CONFIG_GTK=y" >> $config_host_mak
echo "GTK_CFLAGS=$gtk_cflags" >> $config_host_mak
+fi
+if test "$vte" = "yes" ; then
+ echo "CONFIG_VTE=y" >> $config_host_mak
echo "VTE_CFLAGS=$vte_cflags" >> $config_host_mak
fi
if test "$xen" = "yes" ; then
@@ -4623,6 +4683,16 @@ if test "$rdma" = "yes" ; then
echo "CONFIG_RDMA=y" >> $config_host_mak
fi
+# Hold two types of flag:
+# CONFIG_THREAD_SETNAME_BYTHREAD - we've got a way of setting the name on
+# a thread we have a handle to
+# CONFIG_PTHREAD_SETNAME_NP - A way of doing it on a particular
+# platform
+if test "$pthread_setname_np" = "yes" ; then
+ echo "CONFIG_THREAD_SETNAME_BYTHREAD=y" >> $config_host_mak
+ echo "CONFIG_PTHREAD_SETNAME_NP=y" >> $config_host_mak
+fi
+
if test "$tcg_interpreter" = "yes"; then
QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/tci $QEMU_INCLUDES"
elif test "$ARCH" = "sparc64" ; then
diff --git a/cpu-exec.c b/cpu-exec.c
index 1b0f617c19..2f54054d8c 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -23,29 +23,22 @@
#include "qemu/atomic.h"
#include "sysemu/qtest.h"
-bool qemu_cpu_has_work(CPUState *cpu)
+void cpu_loop_exit(CPUState *cpu)
{
- return cpu_has_work(cpu);
-}
-
-void cpu_loop_exit(CPUArchState *env)
-{
- CPUState *cpu = ENV_GET_CPU(env);
-
cpu->current_tb = NULL;
- siglongjmp(env->jmp_env, 1);
+ siglongjmp(cpu->jmp_env, 1);
}
/* exit the current TB from a signal handler. The host registers are
restored in a state compatible with the CPU emulator
*/
#if defined(CONFIG_SOFTMMU)
-void cpu_resume_from_signal(CPUArchState *env, void *puc)
+void cpu_resume_from_signal(CPUState *cpu, void *puc)
{
/* XXX: restore cpu registers saved in host registers */
- env->exception_index = -1;
- siglongjmp(env->jmp_env, 1);
+ cpu->exception_index = -1;
+ siglongjmp(cpu->jmp_env, 1);
}
#endif
@@ -108,7 +101,7 @@ static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
if (max_cycles > CF_COUNT_MASK)
max_cycles = CF_COUNT_MASK;
- tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
+ tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
max_cycles);
cpu->current_tb = tb;
/* execute the generated code */
@@ -123,6 +116,7 @@ static TranslationBlock *tb_find_slow(CPUArchState *env,
target_ulong cs_base,
uint64_t flags)
{
+ CPUState *cpu = ENV_GET_CPU(env);
TranslationBlock *tb, **ptb1;
unsigned int h;
tb_page_addr_t phys_pc, phys_page1;
@@ -160,7 +154,7 @@ static TranslationBlock *tb_find_slow(CPUArchState *env,
}
not_found:
/* if no translated code available, then translate it now */
- tb = tb_gen_code(env, pc, cs_base, flags, 0);
+ tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
found:
/* Move the last found TB to the head of the list */
@@ -170,12 +164,13 @@ static TranslationBlock *tb_find_slow(CPUArchState *env,
tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
}
/* we add the TB in the virtual pc hash table */
- env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
+ cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
return tb;
}
static inline TranslationBlock *tb_find_fast(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
TranslationBlock *tb;
target_ulong cs_base, pc;
int flags;
@@ -184,7 +179,7 @@ static inline TranslationBlock *tb_find_fast(CPUArchState *env)
always be the same before a given translated block
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
+ tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
tb = tb_find_slow(env, pc, cs_base, flags);
@@ -201,10 +196,11 @@ void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
static void cpu_handle_debug_exception(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
CPUWatchpoint *wp;
- if (!env->watchpoint_hit) {
- QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ if (!cpu->watchpoint_hit) {
+ QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
@@ -231,6 +227,8 @@ int cpu_exec(CPUArchState *env)
TranslationBlock *tb;
uint8_t *tc_ptr;
uintptr_t next_tb;
+ /* This must be volatile so it is not trashed by longjmp() */
+ volatile bool have_tb_lock = false;
if (cpu->halted) {
if (!cpu_has_work(cpu)) {
@@ -283,16 +281,16 @@ int cpu_exec(CPUArchState *env)
#else
#error unsupported target CPU
#endif
- env->exception_index = -1;
+ cpu->exception_index = -1;
/* prepare setjmp context for exception handling */
for(;;) {
- if (sigsetjmp(env->jmp_env, 0) == 0) {
+ if (sigsetjmp(cpu->jmp_env, 0) == 0) {
/* if an exception is pending, we execute it here */
- if (env->exception_index >= 0) {
- if (env->exception_index >= EXCP_INTERRUPT) {
+ if (cpu->exception_index >= 0) {
+ if (cpu->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */
- ret = env->exception_index;
+ ret = cpu->exception_index;
if (ret == EXCP_DEBUG) {
cpu_handle_debug_exception(env);
}
@@ -305,11 +303,11 @@ int cpu_exec(CPUArchState *env)
#if defined(TARGET_I386)
cc->do_interrupt(cpu);
#endif
- ret = env->exception_index;
+ ret = cpu->exception_index;
break;
#else
cc->do_interrupt(cpu);
- env->exception_index = -1;
+ cpu->exception_index = -1;
#endif
}
}
@@ -324,8 +322,8 @@ int cpu_exec(CPUArchState *env)
}
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
- env->exception_index = EXCP_DEBUG;
- cpu_loop_exit(env);
+ cpu->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cpu);
}
#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
@@ -333,8 +331,8 @@ int cpu_exec(CPUArchState *env)
if (interrupt_request & CPU_INTERRUPT_HALT) {
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
- env->exception_index = EXCP_HLT;
- cpu_loop_exit(env);
+ cpu->exception_index = EXCP_HLT;
+ cpu_loop_exit(cpu);
}
#endif
#if defined(TARGET_I386)
@@ -348,8 +346,8 @@ int cpu_exec(CPUArchState *env)
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
0);
do_cpu_init(x86_cpu);
- env->exception_index = EXCP_HALTED;
- cpu_loop_exit(env);
+ cpu->exception_index = EXCP_HALTED;
+ cpu_loop_exit(cpu);
} else if (interrupt_request & CPU_INTERRUPT_SIPI) {
do_cpu_sipi(x86_cpu);
} else if (env->hflags2 & HF2_GIF_MASK) {
@@ -420,7 +418,7 @@ int cpu_exec(CPUArchState *env)
#elif defined(TARGET_LM32)
if ((interrupt_request & CPU_INTERRUPT_HARD)
&& (env->ie & IE_IE)) {
- env->exception_index = EXCP_IRQ;
+ cpu->exception_index = EXCP_IRQ;
cc->do_interrupt(cpu);
next_tb = 0;
}
@@ -429,7 +427,7 @@ int cpu_exec(CPUArchState *env)
&& (env->sregs[SR_MSR] & MSR_IE)
&& !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
&& !(env->iflags & (D_FLAG | IMM_FLAG))) {
- env->exception_index = EXCP_IRQ;
+ cpu->exception_index = EXCP_IRQ;
cc->do_interrupt(cpu);
next_tb = 0;
}
@@ -437,7 +435,7 @@ int cpu_exec(CPUArchState *env)
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
cpu_mips_hw_interrupts_pending(env)) {
/* Raise it */
- env->exception_index = EXCP_EXT_INTERRUPT;
+ cpu->exception_index = EXCP_EXT_INTERRUPT;
env->error_code = 0;
cc->do_interrupt(cpu);
next_tb = 0;
@@ -454,7 +452,7 @@ int cpu_exec(CPUArchState *env)
idx = EXCP_TICK;
}
if (idx >= 0) {
- env->exception_index = idx;
+ cpu->exception_index = idx;
cc->do_interrupt(cpu);
next_tb = 0;
}
@@ -469,7 +467,7 @@ int cpu_exec(CPUArchState *env)
if (((type == TT_EXTINT) &&
cpu_pil_allowed(env, pil)) ||
type != TT_EXTINT) {
- env->exception_index = env->interrupt_index;
+ cpu->exception_index = env->interrupt_index;
cc->do_interrupt(cpu);
next_tb = 0;
}
@@ -478,7 +476,7 @@ int cpu_exec(CPUArchState *env)
#elif defined(TARGET_ARM)
if (interrupt_request & CPU_INTERRUPT_FIQ
&& !(env->daif & PSTATE_F)) {
- env->exception_index = EXCP_FIQ;
+ cpu->exception_index = EXCP_FIQ;
cc->do_interrupt(cpu);
next_tb = 0;
}
@@ -494,14 +492,14 @@ int cpu_exec(CPUArchState *env)
if (interrupt_request & CPU_INTERRUPT_HARD
&& ((IS_M(env) && env->regs[15] < 0xfffffff0)
|| !(env->daif & PSTATE_I))) {
- env->exception_index = EXCP_IRQ;
+ cpu->exception_index = EXCP_IRQ;
cc->do_interrupt(cpu);
next_tb = 0;
}
#elif defined(TARGET_UNICORE32)
if (interrupt_request & CPU_INTERRUPT_HARD
&& !(env->uncached_asr & ASR_I)) {
- env->exception_index = UC32_EXCP_INTR;
+ cpu->exception_index = UC32_EXCP_INTR;
cc->do_interrupt(cpu);
next_tb = 0;
}
@@ -536,7 +534,7 @@ int cpu_exec(CPUArchState *env)
}
}
if (idx >= 0) {
- env->exception_index = idx;
+ cpu->exception_index = idx;
env->error_code = 0;
cc->do_interrupt(cpu);
next_tb = 0;
@@ -546,7 +544,7 @@ int cpu_exec(CPUArchState *env)
if (interrupt_request & CPU_INTERRUPT_HARD
&& (env->pregs[PR_CCS] & I_FLAG)
&& !env->locked_irq) {
- env->exception_index = EXCP_IRQ;
+ cpu->exception_index = EXCP_IRQ;
cc->do_interrupt(cpu);
next_tb = 0;
}
@@ -558,7 +556,7 @@ int cpu_exec(CPUArchState *env)
m_flag_archval = M_FLAG_V32;
}
if ((env->pregs[PR_CCS] & m_flag_archval)) {
- env->exception_index = EXCP_NMI;
+ cpu->exception_index = EXCP_NMI;
cc->do_interrupt(cpu);
next_tb = 0;
}
@@ -572,7 +570,7 @@ int cpu_exec(CPUArchState *env)
hardware doesn't rely on this, so we
provide/save the vector when the interrupt is
first signalled. */
- env->exception_index = env->pending_vector;
+ cpu->exception_index = env->pending_vector;
do_interrupt_m68k_hardirq(env);
next_tb = 0;
}
@@ -584,7 +582,7 @@ int cpu_exec(CPUArchState *env)
}
#elif defined(TARGET_XTENSA)
if (interrupt_request & CPU_INTERRUPT_HARD) {
- env->exception_index = EXC_IRQ;
+ cpu->exception_index = EXC_IRQ;
cc->do_interrupt(cpu);
next_tb = 0;
}
@@ -600,10 +598,11 @@ int cpu_exec(CPUArchState *env)
}
if (unlikely(cpu->exit_request)) {
cpu->exit_request = 0;
- env->exception_index = EXCP_INTERRUPT;
- cpu_loop_exit(env);
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit(cpu);
}
spin_lock(&tcg_ctx.tb_ctx.tb_lock);
+ have_tb_lock = true;
tb = tb_find_fast(env);
/* Note: we do it here to avoid a gcc bug on Mac OS X when
doing it in tb_find_slow */
@@ -625,6 +624,7 @@ int cpu_exec(CPUArchState *env)
tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
next_tb & TB_EXIT_MASK, tb);
}
+ have_tb_lock = false;
spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
/* cpu_interrupt might be called while translating the
@@ -654,25 +654,25 @@ int cpu_exec(CPUArchState *env)
/* Instruction counter expired. */
int insns_left;
tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
- insns_left = env->icount_decr.u32;
- if (env->icount_extra && insns_left >= 0) {
+ insns_left = cpu->icount_decr.u32;
+ if (cpu->icount_extra && insns_left >= 0) {
/* Refill decrementer and continue execution. */
- env->icount_extra += insns_left;
- if (env->icount_extra > 0xffff) {
+ cpu->icount_extra += insns_left;
+ if (cpu->icount_extra > 0xffff) {
insns_left = 0xffff;
} else {
- insns_left = env->icount_extra;
+ insns_left = cpu->icount_extra;
}
- env->icount_extra -= insns_left;
- env->icount_decr.u16.low = insns_left;
+ cpu->icount_extra -= insns_left;
+ cpu->icount_decr.u16.low = insns_left;
} else {
if (insns_left > 0) {
/* Execute remaining instructions. */
cpu_exec_nocache(env, insns_left, tb);
}
- env->exception_index = EXCP_INTERRUPT;
+ cpu->exception_index = EXCP_INTERRUPT;
next_tb = 0;
- cpu_loop_exit(env);
+ cpu_loop_exit(cpu);
}
break;
}
@@ -696,6 +696,10 @@ int cpu_exec(CPUArchState *env)
#ifdef TARGET_I386
x86_cpu = X86_CPU(cpu);
#endif
+ if (have_tb_lock) {
+ spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
+ have_tb_lock = false;
+ }
}
} /* for(;;) */
diff --git a/cpus.c b/cpus.c
index b6421fd2a8..7bbe15348c 100644
--- a/cpus.c
+++ b/cpus.c
@@ -76,7 +76,7 @@ static bool cpu_thread_is_idle(CPUState *cpu)
if (cpu_is_stopped(cpu)) {
return true;
}
- if (!cpu->halted || qemu_cpu_has_work(cpu) ||
+ if (!cpu->halted || cpu_has_work(cpu) ||
kvm_halt_in_kernel()) {
return false;
}
@@ -139,11 +139,10 @@ static int64_t cpu_get_icount_locked(void)
icount = qemu_icount;
if (cpu) {
- CPUArchState *env = cpu->env_ptr;
- if (!can_do_io(env)) {
+ if (!cpu_can_do_io(cpu)) {
fprintf(stderr, "Bad clock read\n");
}
- icount -= (env->icount_decr.u16.low + env->icount_extra);
+ icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
}
return qemu_icount_bias + (icount << icount_time_shift);
}
@@ -1236,6 +1235,7 @@ int vm_stop_force_state(RunState state)
static int tcg_cpu_exec(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
int ret;
#ifdef CONFIG_PROFILER
int64_t ti;
@@ -1248,9 +1248,9 @@ static int tcg_cpu_exec(CPUArchState *env)
int64_t count;
int64_t deadline;
int decr;
- qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
- env->icount_decr.u16.low = 0;
- env->icount_extra = 0;
+ qemu_icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
+ cpu->icount_decr.u16.low = 0;
+ cpu->icount_extra = 0;
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
/* Maintain prior (possibly buggy) behaviour where if no deadline
@@ -1266,8 +1266,8 @@ static int tcg_cpu_exec(CPUArchState *env)
qemu_icount += count;
decr = (count > 0xffff) ? 0xffff : count;
count -= decr;
- env->icount_decr.u16.low = decr;
- env->icount_extra = count;
+ cpu->icount_decr.u16.low = decr;
+ cpu->icount_extra = count;
}
ret = cpu_exec(env);
#ifdef CONFIG_PROFILER
@@ -1276,10 +1276,9 @@ static int tcg_cpu_exec(CPUArchState *env)
if (use_icount) {
/* Fold pending instructions back into the
instruction counter, and clear the interrupt flag. */
- qemu_icount -= (env->icount_decr.u16.low
- + env->icount_extra);
- env->icount_decr.u32 = 0;
- env->icount_extra = 0;
+ qemu_icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
+ cpu->icount_decr.u32 = 0;
+ cpu->icount_extra = 0;
}
return ret;
}
@@ -1455,7 +1454,7 @@ void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
l = sizeof(buf);
if (l > size)
l = size;
- cpu_physical_memory_rw(addr, buf, l, 0);
+ cpu_physical_memory_read(addr, buf, l);
if (fwrite(buf, 1, l, f) != l) {
error_set(errp, QERR_IO_ERROR);
goto exit;
diff --git a/cputlb.c b/cputlb.c
index 0fbaa39412..7bd3573025 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -46,9 +46,9 @@ int tlb_flush_count;
* entries from the TLB at any time, so flushing more entries than
* required is only an efficiency issue, not a correctness issue.
*/
-void tlb_flush(CPUArchState *env, int flush_global)
+void tlb_flush(CPUState *cpu, int flush_global)
{
- CPUState *cpu = ENV_GET_CPU(env);
+ CPUArchState *env = cpu->env_ptr;
#if defined(DEBUG_TLB)
printf("tlb_flush:\n");
@@ -58,7 +58,7 @@ void tlb_flush(CPUArchState *env, int flush_global)
cpu->current_tb = NULL;
memset(env->tlb_table, -1, sizeof(env->tlb_table));
- memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
+ memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
env->tlb_flush_addr = -1;
env->tlb_flush_mask = 0;
@@ -77,9 +77,9 @@ static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
}
}
-void tlb_flush_page(CPUArchState *env, target_ulong addr)
+void tlb_flush_page(CPUState *cpu, target_ulong addr)
{
- CPUState *cpu = ENV_GET_CPU(env);
+ CPUArchState *env = cpu->env_ptr;
int i;
int mmu_idx;
@@ -93,7 +93,7 @@ void tlb_flush_page(CPUArchState *env, target_ulong addr)
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
env->tlb_flush_addr, env->tlb_flush_mask);
#endif
- tlb_flush(env, 1);
+ tlb_flush(cpu, 1);
return;
}
/* must reset current TB so that interrupts cannot modify the
@@ -106,7 +106,7 @@ void tlb_flush_page(CPUArchState *env, target_ulong addr)
tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
}
- tb_flush_jmp_cache(env, addr);
+ tb_flush_jmp_cache(cpu, addr);
}
/* update the TLBs so that writes to code in the virtual page 'addr'
@@ -119,7 +119,7 @@ void tlb_protect_code(ram_addr_t ram_addr)
/* update the TLB so that writes in physical page 'phys_addr' are no longer
tested for self modifying code */
-void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
+void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr,
target_ulong vaddr)
{
cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
@@ -221,10 +221,11 @@ static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
/* Add a new TLB entry. At most one entry for a given virtual address
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
supplied size is only used by tlb_flush_page. */
-void tlb_set_page(CPUArchState *env, target_ulong vaddr,
+void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size)
{
+ CPUArchState *env = cpu->env_ptr;
MemoryRegionSection *section;
unsigned int index;
target_ulong address;
@@ -232,7 +233,6 @@ void tlb_set_page(CPUArchState *env, target_ulong vaddr,
uintptr_t addend;
CPUTLBEntry *te;
hwaddr iotlb, xlat, sz;
- CPUState *cpu = ENV_GET_CPU(env);
assert(size >= TARGET_PAGE_SIZE);
if (size != TARGET_PAGE_SIZE) {
@@ -261,7 +261,7 @@ void tlb_set_page(CPUArchState *env, target_ulong vaddr,
}
code_address = address;
- iotlb = memory_region_section_get_iotlb(env, section, vaddr, paddr, xlat,
+ iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
prot, &address);
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@@ -322,7 +322,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
if (cc->do_unassigned_access) {
cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
} else {
- cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x"
+ cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
TARGET_FMT_lx "\n", addr);
}
}
diff --git a/default-configs/usb.mak b/default-configs/usb.mak
index 1bf9075e85..73d84895aa 100644
--- a/default-configs/usb.mak
+++ b/default-configs/usb.mak
@@ -1,6 +1,7 @@
CONFIG_USB_TABLET_WACOM=y
CONFIG_USB_STORAGE_BOT=y
CONFIG_USB_STORAGE_UAS=y
+CONFIG_USB_STORAGE_MTP=y
CONFIG_USB_SMARTCARD=y
CONFIG_USB_AUDIO=y
CONFIG_USB_SERIAL=y
diff --git a/device-hotplug.c b/device-hotplug.c
index 103d34ac45..ebfa6b1016 100644
--- a/device-hotplug.c
+++ b/device-hotplug.c
@@ -33,12 +33,14 @@ DriveInfo *add_init_drive(const char *optstr)
{
DriveInfo *dinfo;
QemuOpts *opts;
+ MachineClass *mc;
opts = drive_def(optstr);
if (!opts)
return NULL;
- dinfo = drive_init(opts, current_machine->block_default_type);
+ mc = MACHINE_GET_CLASS(current_machine);
+ dinfo = drive_init(opts, mc->qemu_machine->block_default_type);
if (!dinfo) {
qemu_opts_del(opts);
return NULL;
diff --git a/dma-helpers.c b/dma-helpers.c
index c9620a5bbd..5f421e9814 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -213,6 +213,7 @@ BlockDriverAIOCB *dma_bdrv_io(
dbs->sg_cur_index = 0;
dbs->sg_cur_byte = 0;
dbs->dir = dir;
+ dbs->in_cancel = false;
dbs->io_func = io_func;
dbs->bh = NULL;
qemu_iovec_init(&dbs->iov, sg->nsg);
diff --git a/docs/writing-qmp-commands.txt b/docs/writing-qmp-commands.txt
index 8349dec8af..3930a9ba70 100644
--- a/docs/writing-qmp-commands.txt
+++ b/docs/writing-qmp-commands.txt
@@ -311,7 +311,7 @@ void hmp_hello_world(Monitor *mon, const QDict *qdict)
Error *errp = NULL;
qmp_hello_world(!!message, message, &errp);
- if (error_is_set(&errp)) {
+ if (errp) {
monitor_printf(mon, "%s\n", error_get_pretty(errp));
error_free(errp);
return;
@@ -483,7 +483,7 @@ void hmp_info_alarm_clock(Monitor *mon)
Error *errp = NULL;
clock = qmp_query_alarm_clock(&errp);
- if (error_is_set(&errp)) {
+ if (errp) {
monitor_printf(mon, "Could not query alarm clock information\n");
error_free(errp);
return;
@@ -634,7 +634,7 @@ void hmp_info_alarm_methods(Monitor *mon)
Error *errp = NULL;
method_list = qmp_query_alarm_methods(&errp);
- if (error_is_set(&errp)) {
+ if (errp) {
monitor_printf(mon, "Could not query alarm methods\n");
error_free(errp);
return;
diff --git a/exec.c b/exec.c
index 31ed3750aa..91513c6c43 100644
--- a/exec.c
+++ b/exec.c
@@ -33,6 +33,7 @@
#include "hw/xen/xen.h"
#include "qemu/timer.h"
#include "qemu/config-file.h"
+#include "qemu/error-report.h"
#include "exec/memory.h"
#include "sysemu/dma.h"
#include "exec/address-spaces.h"
@@ -419,7 +420,7 @@ static int cpu_common_post_load(void *opaque, int version_id)
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
version_id is increased. */
cpu->interrupt_request &= ~0x01;
- tlb_flush(cpu->env_ptr, 1);
+ tlb_flush(cpu, 1);
return 0;
}
@@ -484,8 +485,8 @@ void cpu_exec_init(CPUArchState *env)
}
cpu->cpu_index = cpu_index;
cpu->numa_node = 0;
- QTAILQ_INIT(&env->breakpoints);
- QTAILQ_INIT(&env->watchpoints);
+ QTAILQ_INIT(&cpu->breakpoints);
+ QTAILQ_INIT(&cpu->watchpoints);
#ifndef CONFIG_USER_ONLY
cpu->as = &address_space_memory;
cpu->thread_id = qemu_get_thread_id();
@@ -527,29 +528,29 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
#endif /* TARGET_HAS_ICE */
#if defined(CONFIG_USER_ONLY)
-void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
+void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{
}
-int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
+int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint)
{
return -ENOSYS;
}
#else
/* Add a watchpoint. */
-int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
+int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
int flags, CPUWatchpoint **watchpoint)
{
- target_ulong len_mask = ~(len - 1);
+ vaddr len_mask = ~(len - 1);
CPUWatchpoint *wp;
/* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
if ((len & (len - 1)) || (addr & ~len_mask) ||
len == 0 || len > TARGET_PAGE_SIZE) {
- fprintf(stderr, "qemu: tried to set invalid watchpoint at "
- TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
+ error_report("tried to set invalid watchpoint at %"
+ VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
return -EINVAL;
}
wp = g_malloc(sizeof(*wp));
@@ -559,12 +560,13 @@ int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len
wp->flags = flags;
/* keep all GDB-injected watchpoints in front */
- if (flags & BP_GDB)
- QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
- else
- QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
+ if (flags & BP_GDB) {
+ QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
+ } else {
+ QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
+ }
- tlb_flush_page(env, addr);
+ tlb_flush_page(cpu, addr);
if (watchpoint)
*watchpoint = wp;
@@ -572,16 +574,16 @@ int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len
}
/* Remove a specific watchpoint. */
-int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
+int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
int flags)
{
- target_ulong len_mask = ~(len - 1);
+ vaddr len_mask = ~(len - 1);
CPUWatchpoint *wp;
- QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (addr == wp->vaddr && len_mask == wp->len_mask
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
- cpu_watchpoint_remove_by_ref(env, wp);
+ cpu_watchpoint_remove_by_ref(cpu, wp);
return 0;
}
}
@@ -589,29 +591,30 @@ int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len
}
/* Remove a specific watchpoint by reference. */
-void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
+void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
{
- QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
+ QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
- tlb_flush_page(env, watchpoint->vaddr);
+ tlb_flush_page(cpu, watchpoint->vaddr);
g_free(watchpoint);
}
/* Remove all matching watchpoints. */
-void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
+void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
{
CPUWatchpoint *wp, *next;
- QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
- if (wp->flags & mask)
- cpu_watchpoint_remove_by_ref(env, wp);
+ QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
+ if (wp->flags & mask) {
+ cpu_watchpoint_remove_by_ref(cpu, wp);
+ }
}
}
#endif
/* Add a breakpoint. */
-int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
+int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
CPUBreakpoint **breakpoint)
{
#if defined(TARGET_HAS_ICE)
@@ -624,12 +627,12 @@ int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
/* keep all GDB-injected breakpoints in front */
if (flags & BP_GDB) {
- QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
+ QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
} else {
- QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
+ QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
}
- breakpoint_invalidate(ENV_GET_CPU(env), pc);
+ breakpoint_invalidate(cpu, pc);
if (breakpoint) {
*breakpoint = bp;
@@ -641,14 +644,14 @@ int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
}
/* Remove a specific breakpoint. */
-int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
+int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
{
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
if (bp->pc == pc && bp->flags == flags) {
- cpu_breakpoint_remove_by_ref(env, bp);
+ cpu_breakpoint_remove_by_ref(cpu, bp);
return 0;
}
}
@@ -659,26 +662,27 @@ int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
}
/* Remove a specific breakpoint by reference. */
-void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
+void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
{
#if defined(TARGET_HAS_ICE)
- QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
+ QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
- breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
+ breakpoint_invalidate(cpu, breakpoint->pc);
g_free(breakpoint);
#endif
}
/* Remove all matching breakpoints. */
-void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
+void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
{
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp, *next;
- QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
- if (bp->flags & mask)
- cpu_breakpoint_remove_by_ref(env, bp);
+ QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
+ if (bp->flags & mask) {
+ cpu_breakpoint_remove_by_ref(cpu, bp);
+ }
}
#endif
}
@@ -702,9 +706,8 @@ void cpu_single_step(CPUState *cpu, int enabled)
#endif
}
-void cpu_abort(CPUArchState *env, const char *fmt, ...)
+void cpu_abort(CPUState *cpu, const char *fmt, ...)
{
- CPUState *cpu = ENV_GET_CPU(env);
va_list ap;
va_list ap2;
@@ -792,7 +795,7 @@ static void cpu_physical_memory_set_dirty_tracking(bool enable)
in_migration = enable;
}
-hwaddr memory_region_section_get_iotlb(CPUArchState *env,
+hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section,
target_ulong vaddr,
hwaddr paddr, hwaddr xlat,
@@ -818,7 +821,7 @@ hwaddr memory_region_section_get_iotlb(CPUArchState *env,
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
- QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
/* Avoid trapping reads of pages with a write breakpoint. */
if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
@@ -1553,7 +1556,7 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) {
CPUArchState *env = current_cpu->env_ptr;
- tlb_set_dirty(env, env->mem_io_vaddr);
+ tlb_set_dirty(env, current_cpu->mem_io_vaddr);
}
}
@@ -1572,34 +1575,35 @@ static const MemoryRegionOps notdirty_mem_ops = {
/* Generate a debug exception if a watchpoint has been hit. */
static void check_watchpoint(int offset, int len_mask, int flags)
{
- CPUArchState *env = current_cpu->env_ptr;
+ CPUState *cpu = current_cpu;
+ CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base;
target_ulong vaddr;
CPUWatchpoint *wp;
int cpu_flags;
- if (env->watchpoint_hit) {
+ if (cpu->watchpoint_hit) {
/* We re-entered the check after replacing the TB. Now raise
* the debug interrupt so that is will trigger after the
* current instruction. */
- cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
+ cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
return;
}
- vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
- QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
+ QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if ((vaddr == (wp->vaddr & len_mask) ||
(vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
wp->flags |= BP_WATCHPOINT_HIT;
- if (!env->watchpoint_hit) {
- env->watchpoint_hit = wp;
- tb_check_watchpoint(env);
+ if (!cpu->watchpoint_hit) {
+ cpu->watchpoint_hit = wp;
+ tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
- env->exception_index = EXCP_DEBUG;
- cpu_loop_exit(env);
+ cpu->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cpu);
} else {
cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
- tb_gen_code(env, pc, cs_base, cpu_flags, 1);
- cpu_resume_from_signal(env, NULL);
+ tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
+ cpu_resume_from_signal(cpu, NULL);
}
}
} else {
@@ -1830,14 +1834,12 @@ static void tcg_commit(MemoryListener *listener)
reset the modified entries */
/* XXX: slow ! */
CPU_FOREACH(cpu) {
- CPUArchState *env = cpu->env_ptr;
-
/* FIXME: Disentangle the cpu.h circular files deps so we can
directly get the right CPU from listener. */
if (cpu->tcg_as_listener != listener) {
continue;
}
- tlb_flush(env, 1);
+ tlb_flush(cpu, 1);
}
}
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index fc0b179df4..e00a6fbca6 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -288,7 +288,7 @@ INLINE flag extractFloat32Sign( float32 a )
| If `a' is denormal and we are in flush-to-zero mode then set the
| input-denormal exception and return zero. Otherwise just return the value.
*----------------------------------------------------------------------------*/
-static float32 float32_squash_input_denormal(float32 a STATUS_PARAM)
+float32 float32_squash_input_denormal(float32 a STATUS_PARAM)
{
if (STATUS(flush_inputs_to_zero)) {
if (extractFloat32Exp(a) == 0 && extractFloat32Frac(a) != 0) {
@@ -473,7 +473,7 @@ INLINE flag extractFloat64Sign( float64 a )
| If `a' is denormal and we are in flush-to-zero mode then set the
| input-denormal exception and return zero. Otherwise just return the value.
*----------------------------------------------------------------------------*/
-static float64 float64_squash_input_denormal(float64 a STATUS_PARAM)
+float64 float64_squash_input_denormal(float64 a STATUS_PARAM)
{
if (STATUS(flush_inputs_to_zero)) {
if (extractFloat64Exp(a) == 0 && extractFloat64Frac(a) != 0) {
@@ -1628,6 +1628,26 @@ uint64 float32_to_uint64(float32 a STATUS_PARAM)
/*----------------------------------------------------------------------------
| Returns the result of converting the single-precision floating-point value
+| `a' to the 64-bit unsigned integer format. The conversion is
+| performed according to the IEC/IEEE Standard for Binary Floating-Point
+| Arithmetic, except that the conversion is always rounded toward zero. If
+| `a' is a NaN, the largest unsigned integer is returned. Otherwise, if the
+| conversion overflows, the largest unsigned integer is returned. If the
+| 'a' is negative, the result is rounded and zero is returned; values that do
+| not round to zero will raise the inexact flag.
+*----------------------------------------------------------------------------*/
+
+uint64 float32_to_uint64_round_to_zero(float32 a STATUS_PARAM)
+{
+ signed char current_rounding_mode = STATUS(float_rounding_mode);
+ set_float_rounding_mode(float_round_to_zero STATUS_VAR);
+ int64_t v = float32_to_uint64(a STATUS_VAR);
+ set_float_rounding_mode(current_rounding_mode STATUS_VAR);
+ return v;
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the single-precision floating-point value
| `a' to the 64-bit two's complement integer format. The conversion is
| performed according to the IEC/IEEE Standard for Binary Floating-Point
| Arithmetic, except that the conversion is always rounded toward zero. If
diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c
index bfecb8706c..cd291d32f2 100644
--- a/fsdev/virtfs-proxy-helper.c
+++ b/fsdev/virtfs-proxy-helper.c
@@ -760,6 +760,7 @@ static int proxy_socket(const char *path, uid_t uid, gid_t gid)
return -1;
}
+ size = sizeof(qemu);
client = accept(sock, (struct sockaddr *)&qemu, &size);
if (client < 0) {
do_perror("accept");
diff --git a/gdbstub.c b/gdbstub.c
index e8ab0b2992..8afe0b701c 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -635,7 +635,6 @@ static const int xlat_gdb_type[] = {
static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
{
CPUState *cpu;
- CPUArchState *env;
int err = 0;
if (kvm_enabled()) {
@@ -646,10 +645,10 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
case GDB_BREAKPOINT_SW:
case GDB_BREAKPOINT_HW:
CPU_FOREACH(cpu) {
- env = cpu->env_ptr;
- err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
- if (err)
+ err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
+ if (err) {
break;
+ }
}
return err;
#ifndef CONFIG_USER_ONLY
@@ -657,8 +656,7 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
case GDB_WATCHPOINT_READ:
case GDB_WATCHPOINT_ACCESS:
CPU_FOREACH(cpu) {
- env = cpu->env_ptr;
- err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
+ err = cpu_watchpoint_insert(cpu, addr, len, xlat_gdb_type[type],
NULL);
if (err)
break;
@@ -673,7 +671,6 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
{
CPUState *cpu;
- CPUArchState *env;
int err = 0;
if (kvm_enabled()) {
@@ -684,10 +681,10 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
case GDB_BREAKPOINT_SW:
case GDB_BREAKPOINT_HW:
CPU_FOREACH(cpu) {
- env = cpu->env_ptr;
- err = cpu_breakpoint_remove(env, addr, BP_GDB);
- if (err)
+ err = cpu_breakpoint_remove(cpu, addr, BP_GDB);
+ if (err) {
break;
+ }
}
return err;
#ifndef CONFIG_USER_ONLY
@@ -695,8 +692,7 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
case GDB_WATCHPOINT_READ:
case GDB_WATCHPOINT_ACCESS:
CPU_FOREACH(cpu) {
- env = cpu->env_ptr;
- err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
+ err = cpu_watchpoint_remove(cpu, addr, len, xlat_gdb_type[type]);
if (err)
break;
}
@@ -710,7 +706,6 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
static void gdb_breakpoint_remove_all(void)
{
CPUState *cpu;
- CPUArchState *env;
if (kvm_enabled()) {
kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
@@ -718,10 +713,9 @@ static void gdb_breakpoint_remove_all(void)
}
CPU_FOREACH(cpu) {
- env = cpu->env_ptr;
- cpu_breakpoint_remove_all(env, BP_GDB);
+ cpu_breakpoint_remove_all(cpu, BP_GDB);
#ifndef CONFIG_USER_ONLY
- cpu_watchpoint_remove_all(env, BP_GDB);
+ cpu_watchpoint_remove_all(cpu, BP_GDB);
#endif
}
}
@@ -1086,8 +1080,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
}
#ifdef CONFIG_USER_ONLY
else if (strncmp(p, "Offsets", 7) == 0) {
- CPUArchState *env = s->c_cpu->env_ptr;
- TaskState *ts = env->opaque;
+ TaskState *ts = s->c_cpu->opaque;
snprintf(buf, sizeof(buf),
"Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
@@ -1205,8 +1198,8 @@ static void gdb_vm_state_change(void *opaque, int running, RunState state)
}
switch (state) {
case RUN_STATE_DEBUG:
- if (env->watchpoint_hit) {
- switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
+ if (cpu->watchpoint_hit) {
+ switch (cpu->watchpoint_hit->flags & BP_MEM_ACCESS) {
case BP_MEM_READ:
type = "r";
break;
@@ -1220,8 +1213,8 @@ static void gdb_vm_state_change(void *opaque, int running, RunState state)
snprintf(buf, sizeof(buf),
"T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
GDB_SIGNAL_TRAP, cpu_index(cpu), type,
- env->watchpoint_hit->vaddr);
- env->watchpoint_hit = NULL;
+ (target_ulong)cpu->watchpoint_hit->vaddr);
+ cpu->watchpoint_hit = NULL;
goto send_packet;
}
tb_flush(env);
@@ -1594,13 +1587,16 @@ int gdbserver_start(int port)
/* Disable gdb stub for child processes. */
void gdbserver_fork(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
GDBState *s = gdbserver_state;
- if (gdbserver_fd < 0 || s->fd < 0)
- return;
+
+ if (gdbserver_fd < 0 || s->fd < 0) {
+ return;
+ }
close(s->fd);
s->fd = -1;
- cpu_breakpoint_remove_all(env, BP_GDB);
- cpu_watchpoint_remove_all(env, BP_GDB);
+ cpu_breakpoint_remove_all(cpu, BP_GDB);
+ cpu_watchpoint_remove_all(cpu, BP_GDB);
}
#else
static int gdb_chr_can_receive(void *opaque)
diff --git a/hmp-commands.hx b/hmp-commands.hx
index f3fc514427..8971f1b153 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -176,7 +176,7 @@ ETEXI
{
.name = "drive_del",
- .args_type = "id:s",
+ .args_type = "id:B",
.params = "device",
.help = "remove host block device",
.user_print = monitor_user_noop,
@@ -658,6 +658,7 @@ ETEXI
.help = "add device, like -device on the command line",
.user_print = monitor_user_noop,
.mhandler.cmd_new = do_device_add,
+ .command_completion = device_add_completion,
},
STEXI
@@ -673,6 +674,7 @@ ETEXI
.params = "device",
.help = "remove device",
.mhandler.cmd = hmp_device_del,
+ .command_completion = device_del_completion,
},
STEXI
@@ -998,26 +1000,34 @@ ETEXI
{
.name = "dump-guest-memory",
- .args_type = "paging:-p,filename:F,begin:i?,length:i?",
- .params = "[-p] filename [begin] [length]",
- .help = "dump guest memory to file"
- "\n\t\t\t begin(optional): the starting physical address"
- "\n\t\t\t length(optional): the memory size, in bytes",
+ .args_type = "paging:-p,zlib:-z,lzo:-l,snappy:-s,filename:F,begin:i?,length:i?",
+ .params = "[-p] [-z|-l|-s] filename [begin length]",
+ .help = "dump guest memory into file 'filename'.\n\t\t\t"
+ "-p: do paging to get guest's memory mapping.\n\t\t\t"
+ "-z: dump in kdump-compressed format, with zlib compression.\n\t\t\t"
+ "-l: dump in kdump-compressed format, with lzo compression.\n\t\t\t"
+ "-s: dump in kdump-compressed format, with snappy compression.\n\t\t\t"
+ "begin: the starting physical address.\n\t\t\t"
+ "length: the memory size, in bytes.",
.mhandler.cmd = hmp_dump_guest_memory,
},
STEXI
-@item dump-guest-memory [-p] @var{protocol} @var{begin} @var{length}
+@item dump-guest-memory [-p] @var{filename} @var{begin} @var{length}
+@item dump-guest-memory [-z|-l|-s] @var{filename}
@findex dump-guest-memory
Dump guest memory to @var{protocol}. The file can be processed with crash or
-gdb.
- filename: dump file name
- paging: do paging to get guest's memory mapping
+gdb. Without -z|-l|-s, the dump format is ELF.
+ -p: do paging to get guest's memory mapping.
+ -z: dump in kdump-compressed format, with zlib compression.
+ -l: dump in kdump-compressed format, with lzo compression.
+ -s: dump in kdump-compressed format, with snappy compression.
+ filename: dump file name.
begin: the starting physical address. It's optional, and should be
- specified with length together.
+ specified together with length.
length: the memory size, in bytes. It's optional, and should be specified
- with begin together.
+ together with begin.
ETEXI
{
@@ -1254,6 +1264,7 @@ ETEXI
.params = "[qom-type=]type,id=str[,prop=value][,...]",
.help = "create QOM object",
.mhandler.cmd = hmp_object_add,
+ .command_completion = object_add_completion,
},
STEXI
@@ -1268,6 +1279,7 @@ ETEXI
.params = "id",
.help = "destroy QOM object",
.mhandler.cmd = hmp_object_del,
+ .command_completion = object_del_completion,
},
STEXI
diff --git a/hmp.c b/hmp.c
index 2f279c4ff2..ca869bafa8 100644
--- a/hmp.c
+++ b/hmp.c
@@ -1308,16 +1308,35 @@ void hmp_dump_guest_memory(Monitor *mon, const QDict *qdict)
{
Error *errp = NULL;
int paging = qdict_get_try_bool(qdict, "paging", 0);
+ int zlib = qdict_get_try_bool(qdict, "zlib", 0);
+ int lzo = qdict_get_try_bool(qdict, "lzo", 0);
+ int snappy = qdict_get_try_bool(qdict, "snappy", 0);
const char *file = qdict_get_str(qdict, "filename");
bool has_begin = qdict_haskey(qdict, "begin");
bool has_length = qdict_haskey(qdict, "length");
- /* kdump-compressed format is not supported for HMP */
- bool has_format = false;
int64_t begin = 0;
int64_t length = 0;
enum DumpGuestMemoryFormat dump_format = DUMP_GUEST_MEMORY_FORMAT_ELF;
char *prot;
+ if (zlib + lzo + snappy > 1) {
+ error_setg(&errp, "only one of '-z|-l|-s' can be set");
+ hmp_handle_error(mon, &errp);
+ return;
+ }
+
+ if (zlib) {
+ dump_format = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
+ }
+
+ if (lzo) {
+ dump_format = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
+ }
+
+ if (snappy) {
+ dump_format = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
+ }
+
if (has_begin) {
begin = qdict_get_int(qdict, "begin");
}
@@ -1328,7 +1347,7 @@ void hmp_dump_guest_memory(Monitor *mon, const QDict *qdict)
prot = g_strconcat("file:", file, NULL);
qmp_dump_guest_memory(paging, prot, has_begin, begin, has_length, length,
- has_format, dump_format, &errp);
+ true, dump_format, &errp);
hmp_handle_error(mon, &errp);
g_free(prot);
}
diff --git a/hmp.h b/hmp.h
index ed58f0ea41..20ef454b4a 100644
--- a/hmp.h
+++ b/hmp.h
@@ -15,6 +15,7 @@
#define HMP_H
#include "qemu-common.h"
+#include "qemu/readline.h"
#include "qapi-types.h"
#include "qapi/qmp/qdict.h"
@@ -92,5 +93,9 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict);
void hmp_cpu_add(Monitor *mon, const QDict *qdict);
void hmp_object_add(Monitor *mon, const QDict *qdict);
void hmp_object_del(Monitor *mon, const QDict *qdict);
+void object_add_completion(ReadLineState *rs, int nb_args, const char *str);
+void object_del_completion(ReadLineState *rs, int nb_args, const char *str);
+void device_add_completion(ReadLineState *rs, int nb_args, const char *str);
+void device_del_completion(ReadLineState *rs, int nb_args, const char *str);
#endif
diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
index 83e4e93983..9aa6725f09 100644
--- a/hw/9pfs/virtio-9p.c
+++ b/hw/9pfs/virtio-9p.c
@@ -987,8 +987,9 @@ static void v9fs_attach(void *opaque)
*/
if (!s->migration_blocker) {
s->root_fid = fid;
- error_set(&s->migration_blocker, QERR_VIRTFS_FEATURE_BLOCKS_MIGRATION,
- s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
+ error_setg(&s->migration_blocker,
+ "Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'",
+ s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
migrate_add_blocker(s->migration_blocker);
}
out:
diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c
index 48928dc0ea..2ad83a0ede 100644
--- a/hw/acpi/cpu_hotplug.c
+++ b/hw/acpi/cpu_hotplug.c
@@ -43,6 +43,7 @@ void AcpiCpuHotplug_add(ACPIGPE *gpe, AcpiCpuHotplug *g, CPUState *cpu)
*gpe->sts = *gpe->sts | ACPI_CPU_HOTPLUG_STATUS;
cpu_id = k->get_arch_id(CPU(cpu));
+ g_assert((cpu_id / 8) < ACPI_GPE_PROC_LEN);
g->sts[cpu_id / 8] |= (1 << (cpu_id % 8));
}
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index dc62918da2..3d1f4a255b 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -448,6 +448,7 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
int initrd_size;
int is_linux = 0;
uint64_t elf_entry;
+ int elf_machine;
hwaddr entry, kernel_load_offset;
int big_endian;
static const ARMInsnFixup *primary_loader;
@@ -463,9 +464,11 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
primary_loader = bootloader_aarch64;
kernel_load_offset = KERNEL64_LOAD_ADDR;
+ elf_machine = EM_AARCH64;
} else {
primary_loader = bootloader;
kernel_load_offset = KERNEL_LOAD_ADDR;
+ elf_machine = EM_ARM;
}
info->dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
@@ -501,7 +504,7 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
/* Assume that raw images are linux kernels, and ELF images are not. */
kernel_size = load_elf(info->kernel_filename, NULL, NULL, &elf_entry,
- NULL, NULL, big_endian, ELF_MACHINE, 1);
+ NULL, NULL, big_endian, elf_machine, 1);
entry = elf_entry;
if (kernel_size < 0) {
kernel_size = load_uimage(info->kernel_filename, &entry, NULL,
diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c
index d95a7f35eb..9d158c7248 100644
--- a/hw/arm/cubieboard.c
+++ b/hw/arm/cubieboard.c
@@ -43,6 +43,19 @@ static void cubieboard_init(QEMUMachineInitArgs *args)
exit(1);
}
+ object_property_set_int(OBJECT(&s->a10->timer), 32768, "clk0-freq", &err);
+ if (err != NULL) {
+ error_report("Couldn't set clk0 frequency: %s", error_get_pretty(err));
+ exit(1);
+ }
+
+ object_property_set_int(OBJECT(&s->a10->timer), 24000000, "clk1-freq",
+ &err);
+ if (err != NULL) {
+ error_report("Couldn't set clk1 frequency: %s", error_get_pretty(err));
+ exit(1);
+ }
+
object_property_set_bool(OBJECT(s->a10), true, "realized", &err);
if (err != NULL) {
error_report("Couldn't realize Allwinner A10: %s",
diff --git a/hw/arm/exynos4210.c b/hw/arm/exynos4210.c
index 9f137e9acd..6426d168d2 100644
--- a/hw/arm/exynos4210.c
+++ b/hw/arm/exynos4210.c
@@ -143,11 +143,21 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem,
unsigned long mem_size;
DeviceState *dev;
SysBusDevice *busdev;
+ ObjectClass *cpu_oc;
+
+ cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, "cortex-a9");
+ assert(cpu_oc);
for (n = 0; n < EXYNOS4210_NCPUS; n++) {
- s->cpu[n] = cpu_arm_init("cortex-a9");
- if (!s->cpu[n]) {
- fprintf(stderr, "Unable to find CPU %d definition\n", n);
+ Object *cpuobj = object_new(object_class_get_name(cpu_oc));
+ Error *err = NULL;
+
+ s->cpu[n] = ARM_CPU(cpuobj);
+ object_property_set_int(cpuobj, EXYNOS4210_SMP_PRIVATE_BASE_ADDR,
+ "reset-cbar", &error_abort);
+ object_property_set_bool(cpuobj, true, "realized", &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
exit(1);
}
}
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
index f66d57b113..46b9f1e0c0 100644
--- a/hw/arm/highbank.c
+++ b/hw/arm/highbank.c
@@ -230,18 +230,23 @@ static void calxeda_init(QEMUMachineInitArgs *args, enum cxmachines machine)
for (n = 0; n < smp_cpus; n++) {
ObjectClass *oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
+ Object *cpuobj;
ARMCPU *cpu;
Error *err = NULL;
- cpu = ARM_CPU(object_new(object_class_get_name(oc)));
-
- object_property_set_int(OBJECT(cpu), MPCORE_PERIPHBASE, "reset-cbar",
- &err);
- if (err) {
- error_report("%s", error_get_pretty(err));
+ if (!oc) {
+ error_report("Unable to find CPU definition");
exit(1);
}
- object_property_set_bool(OBJECT(cpu), true, "realized", &err);
+
+ cpuobj = object_new(object_class_get_name(oc));
+ cpu = ARM_CPU(cpuobj);
+
+ if (object_property_find(cpuobj, "reset-cbar", NULL)) {
+ object_property_set_int(cpuobj, MPCORE_PERIPHBASE,
+ "reset-cbar", &error_abort);
+ }
+ object_property_set_bool(cpuobj, true, "realized", &err);
if (err) {
error_report("%s", error_get_pretty(err));
exit(1);
diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c
index a759689b44..912af96ee6 100644
--- a/hw/arm/integratorcp.c
+++ b/hw/arm/integratorcp.c
@@ -534,7 +534,6 @@ static QEMUMachine integratorcp_machine = {
.name = "integratorcp",
.desc = "ARM Integrator/CP (ARM926EJ-S)",
.init = integratorcp_init,
- .is_default = 1,
};
static void integratorcp_machine_init(void)
diff --git a/hw/arm/realview.c b/hw/arm/realview.c
index 6ef7646002..7e04e507f9 100644
--- a/hw/arm/realview.c
+++ b/hw/arm/realview.c
@@ -18,6 +18,7 @@
#include "hw/i2c/i2c.h"
#include "sysemu/blockdev.h"
#include "exec/address-spaces.h"
+#include "qemu/error-report.h"
#define SMP_BOOT_ADDR 0xe0000000
#define SMP_BOOTREG_ADDR 0x10000030
@@ -49,6 +50,7 @@ static void realview_init(QEMUMachineInitArgs *args,
{
ARMCPU *cpu = NULL;
CPUARMState *env;
+ ObjectClass *cpu_oc;
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *ram_lo = g_new(MemoryRegion, 1);
MemoryRegion *ram_hi = g_new(MemoryRegion, 1);
@@ -70,12 +72,14 @@ static void realview_init(QEMUMachineInitArgs *args,
uint32_t sys_id;
ram_addr_t low_ram_size;
ram_addr_t ram_size = args->ram_size;
+ hwaddr periphbase = 0;
switch (board_type) {
case BOARD_EB:
break;
case BOARD_EB_MPCORE:
is_mpcore = 1;
+ periphbase = 0x10100000;
break;
case BOARD_PB_A8:
is_pb = 1;
@@ -83,16 +87,37 @@ static void realview_init(QEMUMachineInitArgs *args,
case BOARD_PBX_A9:
is_mpcore = 1;
is_pb = 1;
+ periphbase = 0x1f000000;
break;
}
+
+ cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, args->cpu_model);
+ if (!cpu_oc) {
+ fprintf(stderr, "Unable to find CPU definition\n");
+ exit(1);
+ }
+
for (n = 0; n < smp_cpus; n++) {
- cpu = cpu_arm_init(args->cpu_model);
- if (!cpu) {
- fprintf(stderr, "Unable to find CPU definition\n");
+ Object *cpuobj = object_new(object_class_get_name(cpu_oc));
+ Error *err = NULL;
+
+ if (is_pb && is_mpcore) {
+ object_property_set_int(cpuobj, periphbase, "reset-cbar", &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ exit(1);
+ }
+ }
+
+ object_property_set_bool(cpuobj, true, "realized", &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
exit(1);
}
- cpu_irq[n] = qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ);
+
+ cpu_irq[n] = qdev_get_gpio_in(DEVICE(cpuobj), ARM_CPU_IRQ);
}
+ cpu = ARM_CPU(first_cpu);
env = &cpu->env;
if (arm_feature(env, ARM_FEATURE_V7)) {
if (is_mpcore) {
@@ -141,16 +166,10 @@ static void realview_init(QEMUMachineInitArgs *args,
sysbus_mmio_map(SYS_BUS_DEVICE(sysctl), 0, 0x10000000);
if (is_mpcore) {
- hwaddr periphbase;
dev = qdev_create(NULL, is_pb ? "a9mpcore_priv": "realview_mpcore");
qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
qdev_init_nofail(dev);
busdev = SYS_BUS_DEVICE(dev);
- if (is_pb) {
- periphbase = 0x1f000000;
- } else {
- periphbase = 0x10100000;
- }
sysbus_mmio_map(busdev, 0, periphbase);
for (n = 0; n < smp_cpus; n++) {
sysbus_connect_irq(busdev, n, cpu_irq[n]);
diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c
index 2decff170f..392ca84c81 100644
--- a/hw/arm/spitz.c
+++ b/hw/arm/spitz.c
@@ -658,14 +658,15 @@ static void spitz_adc_temp_on(void *opaque, int line, int level)
max111x_set_input(max1111, MAX1111_BATT_TEMP, 0);
}
-static int corgi_ssp_init(SSISlave *dev)
+static int corgi_ssp_init(SSISlave *d)
{
- CorgiSSPState *s = FROM_SSI_SLAVE(CorgiSSPState, dev);
+ DeviceState *dev = DEVICE(d);
+ CorgiSSPState *s = FROM_SSI_SLAVE(CorgiSSPState, d);
- qdev_init_gpio_in(&dev->qdev, corgi_ssp_gpio_cs, 3);
- s->bus[0] = ssi_create_bus(&dev->qdev, "ssi0");
- s->bus[1] = ssi_create_bus(&dev->qdev, "ssi1");
- s->bus[2] = ssi_create_bus(&dev->qdev, "ssi2");
+ qdev_init_gpio_in(dev, corgi_ssp_gpio_cs, 3);
+ s->bus[0] = ssi_create_bus(dev, "ssi0");
+ s->bus[1] = ssi_create_bus(dev, "ssi1");
+ s->bus[2] = ssi_create_bus(dev, "ssi2");
return 0;
}
diff --git a/hw/arm/vexpress.c b/hw/arm/vexpress.c
index ef1707aef0..169eb061a3 100644
--- a/hw/arm/vexpress.c
+++ b/hw/arm/vexpress.c
@@ -32,6 +32,7 @@
#include "sysemu/blockdev.h"
#include "hw/block/flash.h"
#include "sysemu/device_tree.h"
+#include "qemu/error-report.h"
#include <libfdt.h>
#define VEXPRESS_BOARD_ID 0x8e0
@@ -173,6 +174,63 @@ struct VEDBoardInfo {
DBoardInitFn *init;
};
+static void init_cpus(const char *cpu_model, const char *privdev,
+ hwaddr periphbase, qemu_irq *pic)
+{
+ ObjectClass *cpu_oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
+ DeviceState *dev;
+ SysBusDevice *busdev;
+ int n;
+
+ if (!cpu_oc) {
+ fprintf(stderr, "Unable to find CPU definition\n");
+ exit(1);
+ }
+
+ /* Create the actual CPUs */
+ for (n = 0; n < smp_cpus; n++) {
+ Object *cpuobj = object_new(object_class_get_name(cpu_oc));
+ Error *err = NULL;
+
+ if (object_property_find(cpuobj, "reset-cbar", NULL)) {
+ object_property_set_int(cpuobj, periphbase,
+ "reset-cbar", &error_abort);
+ }
+ object_property_set_bool(cpuobj, true, "realized", &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ exit(1);
+ }
+ }
+
+ /* Create the private peripheral devices (including the GIC);
+ * this must happen after the CPUs are created because a15mpcore_priv
+ * wires itself up to the CPU's generic_timer gpio out lines.
+ */
+ dev = qdev_create(NULL, privdev);
+ qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
+ qdev_init_nofail(dev);
+ busdev = SYS_BUS_DEVICE(dev);
+ sysbus_mmio_map(busdev, 0, periphbase);
+
+ /* Interrupts [42:0] are from the motherboard;
+ * [47:43] are reserved; [63:48] are daughterboard
+ * peripherals. Note that some documentation numbers
+ * external interrupts starting from 32 (because there
+ * are internal interrupts 0..31).
+ */
+ for (n = 0; n < 64; n++) {
+ pic[n] = qdev_get_gpio_in(dev, n);
+ }
+
+ /* Connect the CPUs to the GIC */
+ for (n = 0; n < smp_cpus; n++) {
+ DeviceState *cpudev = DEVICE(qemu_get_cpu(n));
+
+ sysbus_connect_irq(busdev, n, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
+ }
+}
+
static void a9_daughterboard_init(const VEDBoardInfo *daughterboard,
ram_addr_t ram_size,
const char *cpu_model,
@@ -181,25 +239,12 @@ static void a9_daughterboard_init(const VEDBoardInfo *daughterboard,
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *ram = g_new(MemoryRegion, 1);
MemoryRegion *lowram = g_new(MemoryRegion, 1);
- DeviceState *dev;
- SysBusDevice *busdev;
- int n;
- qemu_irq cpu_irq[4];
ram_addr_t low_ram_size;
if (!cpu_model) {
cpu_model = "cortex-a9";
}
- for (n = 0; n < smp_cpus; n++) {
- ARMCPU *cpu = cpu_arm_init(cpu_model);
- if (!cpu) {
- fprintf(stderr, "Unable to find CPU definition\n");
- exit(1);
- }
- cpu_irq[n] = qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ);
- }
-
if (ram_size > 0x40000000) {
/* 1GB is the maximum the address space permits */
fprintf(stderr, "vexpress-a9: cannot model more than 1GB RAM\n");
@@ -221,23 +266,7 @@ static void a9_daughterboard_init(const VEDBoardInfo *daughterboard,
memory_region_add_subregion(sysmem, 0x60000000, ram);
/* 0x1e000000 A9MPCore (SCU) private memory region */
- dev = qdev_create(NULL, "a9mpcore_priv");
- qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
- qdev_init_nofail(dev);
- busdev = SYS_BUS_DEVICE(dev);
- sysbus_mmio_map(busdev, 0, 0x1e000000);
- for (n = 0; n < smp_cpus; n++) {
- sysbus_connect_irq(busdev, n, cpu_irq[n]);
- }
- /* Interrupts [42:0] are from the motherboard;
- * [47:43] are reserved; [63:48] are daughterboard
- * peripherals. Note that some documentation numbers
- * external interrupts starting from 32 (because the
- * A9MP has internal interrupts 0..31).
- */
- for (n = 0; n < 64; n++) {
- pic[n] = qdev_get_gpio_in(dev, n);
- }
+ init_cpus(cpu_model, "a9mpcore_priv", 0x1e000000, pic);
/* Daughterboard peripherals : 0x10020000 .. 0x20000000 */
@@ -296,29 +325,14 @@ static void a15_daughterboard_init(const VEDBoardInfo *daughterboard,
const char *cpu_model,
qemu_irq *pic)
{
- int n;
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *ram = g_new(MemoryRegion, 1);
MemoryRegion *sram = g_new(MemoryRegion, 1);
- qemu_irq cpu_irq[4];
- DeviceState *dev;
- SysBusDevice *busdev;
if (!cpu_model) {
cpu_model = "cortex-a15";
}
- for (n = 0; n < smp_cpus; n++) {
- ARMCPU *cpu;
-
- cpu = cpu_arm_init(cpu_model);
- if (!cpu) {
- fprintf(stderr, "Unable to find CPU definition\n");
- exit(1);
- }
- cpu_irq[n] = qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ);
- }
-
{
/* We have to use a separate 64 bit variable here to avoid the gcc
* "comparison is always false due to limited range of data type"
@@ -337,23 +351,7 @@ static void a15_daughterboard_init(const VEDBoardInfo *daughterboard,
memory_region_add_subregion(sysmem, 0x80000000, ram);
/* 0x2c000000 A15MPCore private memory region (GIC) */
- dev = qdev_create(NULL, "a15mpcore_priv");
- qdev_prop_set_uint32(dev, "num-cpu", smp_cpus);
- qdev_init_nofail(dev);
- busdev = SYS_BUS_DEVICE(dev);
- sysbus_mmio_map(busdev, 0, 0x2c000000);
- for (n = 0; n < smp_cpus; n++) {
- sysbus_connect_irq(busdev, n, cpu_irq[n]);
- }
- /* Interrupts [42:0] are from the motherboard;
- * [47:43] are reserved; [63:48] are daughterboard
- * peripherals. Note that some documentation numbers
- * external interrupts starting from 32 (because there
- * are internal interrupts 0..31).
- */
- for (n = 0; n < 64; n++) {
- pic[n] = qdev_get_gpio_in(dev, n);
- }
+ init_cpus(cpu_model, "a15mpcore_priv", 0x2c000000, pic);
/* A15 daughterboard peripherals: */
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 517f2fe30f..2bbc9313d2 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -390,6 +390,12 @@ static void machvirt_init(QEMUMachineInitArgs *args)
if (n > 0) {
object_property_set_bool(cpuobj, true, "start-powered-off", NULL);
}
+
+ if (object_property_find(cpuobj, "reset-cbar", NULL)) {
+ object_property_set_int(cpuobj, vbi->memmap[VIRT_CPUPERIPHS].base,
+ "reset-cbar", &error_abort);
+ }
+
object_property_set_bool(cpuobj, true, "realized", NULL);
}
fdt_add_cpu_nodes(vbi);
diff --git a/hw/audio/fmopl.c b/hw/audio/fmopl.c
index f0a023477d..290a224edc 100644
--- a/hw/audio/fmopl.c
+++ b/hw/audio/fmopl.c
@@ -223,13 +223,13 @@ static void *cur_chip = NULL; /* current chip point */
/* static OPLSAMPLE *bufL,*bufR; */
static OPL_CH *S_CH;
static OPL_CH *E_CH;
-OPL_SLOT *SLOT7_1,*SLOT7_2,*SLOT8_1,*SLOT8_2;
+static OPL_SLOT *SLOT7_1, *SLOT7_2, *SLOT8_1, *SLOT8_2;
static INT32 outd[1];
static INT32 ams;
static INT32 vib;
-INT32 *ams_table;
-INT32 *vib_table;
+static INT32 *ams_table;
+static INT32 *vib_table;
static INT32 amsIncr;
static INT32 vibIncr;
static INT32 feedback2; /* connect for SLOT 2 */
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index d1c7ad4574..70b8a5ab75 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -23,6 +23,7 @@
#include "virtio-blk.h"
#include "block/aio.h"
#include "hw/virtio/virtio-bus.h"
+#include "qom/object_interfaces.h"
enum {
SEG_MAX = 126, /* maximum number of I/O segments */
@@ -44,8 +45,6 @@ struct VirtIOBlockDataPlane {
bool started;
bool starting;
bool stopping;
- QEMUBH *start_bh;
- QemuThread thread;
VirtIOBlkConf *blk;
int fd; /* image file descriptor */
@@ -59,12 +58,14 @@ struct VirtIOBlockDataPlane {
* (because you don't own the file descriptor or handle; you just
* use it).
*/
+ IOThread *iothread;
+ IOThread internal_iothread_obj;
AioContext *ctx;
EventNotifier io_notifier; /* Linux AIO completion */
EventNotifier host_notifier; /* doorbell */
IOQueue ioqueue; /* Linux AIO queue (should really be per
- dataplane thread) */
+ IOThread) */
VirtIOBlockRequest requests[REQ_MAX]; /* pool of requests, managed by the
queue */
@@ -342,26 +343,7 @@ static void handle_io(EventNotifier *e)
}
}
-static void *data_plane_thread(void *opaque)
-{
- VirtIOBlockDataPlane *s = opaque;
-
- while (!s->stopping || s->num_reqs > 0) {
- aio_poll(s->ctx, true);
- }
- return NULL;
-}
-
-static void start_data_plane_bh(void *opaque)
-{
- VirtIOBlockDataPlane *s = opaque;
-
- qemu_bh_delete(s->start_bh);
- s->start_bh = NULL;
- qemu_thread_create(&s->thread, "data_plane", data_plane_thread,
- s, QEMU_THREAD_JOINABLE);
-}
-
+/* Context: QEMU global mutex held */
void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
VirtIOBlockDataPlane **dataplane,
Error **errp)
@@ -408,12 +390,29 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *blk,
s->fd = fd;
s->blk = blk;
+ if (blk->iothread) {
+ s->iothread = blk->iothread;
+ object_ref(OBJECT(s->iothread));
+ } else {
+ /* Create per-device IOThread if none specified. This is for
+ * x-data-plane option compatibility. If x-data-plane is removed we
+ * can drop this.
+ */
+ object_initialize(&s->internal_iothread_obj,
+ sizeof(s->internal_iothread_obj),
+ TYPE_IOTHREAD);
+ user_creatable_complete(OBJECT(&s->internal_iothread_obj), &error_abort);
+ s->iothread = &s->internal_iothread_obj;
+ }
+ s->ctx = iothread_get_aio_context(s->iothread);
+
/* Prevent block operations that conflict with data plane thread */
bdrv_set_in_use(blk->conf.bs, 1);
*dataplane = s;
}
+/* Context: QEMU global mutex held */
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
{
if (!s) {
@@ -422,9 +421,11 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
virtio_blk_data_plane_stop(s);
bdrv_set_in_use(s->blk->conf.bs, 0);
+ object_unref(OBJECT(s->iothread));
g_free(s);
}
+/* Context: QEMU global mutex held */
void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
@@ -448,8 +449,6 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
return;
}
- s->ctx = aio_context_new();
-
/* Set up guest notifier (irq) */
if (k->set_guest_notifiers(qbus->parent, 1, true) != 0) {
fprintf(stderr, "virtio-blk failed to set guest notifier, "
@@ -464,7 +463,6 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
exit(1);
}
s->host_notifier = *virtio_queue_get_host_notifier(vq);
- aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify);
/* Set up ioqueue */
ioq_init(&s->ioqueue, s->fd, REQ_MAX);
@@ -472,7 +470,6 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
ioq_put_iocb(&s->ioqueue, &s->requests[i].iocb);
}
s->io_notifier = *ioq_get_notifier(&s->ioqueue);
- aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io);
s->starting = false;
s->started = true;
@@ -481,11 +478,14 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
/* Kick right away to begin processing requests already in vring */
event_notifier_set(virtio_queue_get_host_notifier(vq));
- /* Spawn thread in BH so it inherits iothread cpusets */
- s->start_bh = qemu_bh_new(start_data_plane_bh, s);
- qemu_bh_schedule(s->start_bh);
+ /* Get this show started by hooking up our callbacks */
+ aio_context_acquire(s->ctx);
+ aio_set_event_notifier(s->ctx, &s->host_notifier, handle_notify);
+ aio_set_event_notifier(s->ctx, &s->io_notifier, handle_io);
+ aio_context_release(s->ctx);
}
+/* Context: QEMU global mutex held */
void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
@@ -496,27 +496,32 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
s->stopping = true;
trace_virtio_blk_data_plane_stop(s);
- /* Stop thread or cancel pending thread creation BH */
- if (s->start_bh) {
- qemu_bh_delete(s->start_bh);
- s->start_bh = NULL;
- } else {
- aio_notify(s->ctx);
- qemu_thread_join(&s->thread);
+ aio_context_acquire(s->ctx);
+
+ /* Stop notifications for new requests from guest */
+ aio_set_event_notifier(s->ctx, &s->host_notifier, NULL);
+
+ /* Complete pending requests */
+ while (s->num_reqs > 0) {
+ aio_poll(s->ctx, true);
}
+ /* Stop ioq callbacks (there are no pending requests left) */
aio_set_event_notifier(s->ctx, &s->io_notifier, NULL);
- ioq_cleanup(&s->ioqueue);
- aio_set_event_notifier(s->ctx, &s->host_notifier, NULL);
- k->set_host_notifier(qbus->parent, 0, false);
+ aio_context_release(s->ctx);
+
+ /* Sync vring state back to virtqueue so that non-dataplane request
+ * processing can continue when we disable the host notifier below.
+ */
+ vring_teardown(&s->vring, s->vdev, 0);
- aio_context_unref(s->ctx);
+ ioq_cleanup(&s->ioqueue);
+ k->set_host_notifier(qbus->parent, 0, false);
/* Clean up guest notifier (irq) */
k->set_guest_notifiers(qbus->parent, 1, false);
- vring_teardown(&s->vring, s->vdev, 0);
s->started = false;
s->stopping = false;
}
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index 02a15441fa..e29a738d23 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -241,7 +241,8 @@ typedef enum {
} CMDState;
typedef struct Flash {
- SSISlave ssidev;
+ SSISlave parent_obj;
+
uint32_t r;
BlockDriverState *bdrv;
@@ -545,7 +546,7 @@ static void decode_new_cmd(Flash *s, uint32_t value)
static int m25p80_cs(SSISlave *ss, bool select)
{
- Flash *s = FROM_SSI_SLAVE(Flash, ss);
+ Flash *s = M25P80(ss);
if (select) {
s->len = 0;
@@ -561,7 +562,7 @@ static int m25p80_cs(SSISlave *ss, bool select)
static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx)
{
- Flash *s = FROM_SSI_SLAVE(Flash, ss);
+ Flash *s = M25P80(ss);
uint32_t r = 0;
switch (s->state) {
@@ -610,7 +611,7 @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx)
static int m25p80_init(SSISlave *ss)
{
DriveInfo *dinfo;
- Flash *s = FROM_SSI_SLAVE(Flash, ss);
+ Flash *s = M25P80(ss);
M25P80Class *mc = M25P80_GET_CLASS(s);
s->pi = mc->pi;
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 2882ffefce..5fd8f89822 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -752,8 +752,8 @@ static int nvme_init(PCIDevice *pci_dev)
return -1;
}
- bs_size = bdrv_getlength(n->conf.bs);
- if (bs_size <= 0) {
+ bs_size = bdrv_getlength(n->conf.bs);
+ if (bs_size < 0) {
return -1;
}
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index bc061e6403..a8fea72edf 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -817,11 +817,14 @@ static int blk_connect(struct XenDevice *xendev)
index = (blkdev->xendev.dev - 202 * 256) / 16;
blkdev->dinfo = drive_get(IF_XEN, 0, index);
if (!blkdev->dinfo) {
+ Error *local_err = NULL;
/* setup via xenbus -> create new block driver instance */
xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
- blkdev->bs = bdrv_new(blkdev->dev);
+ blkdev->bs = bdrv_new(blkdev->dev, &local_err);
+ if (local_err) {
+ blkdev->bs = NULL;
+ }
if (blkdev->bs) {
- Error *local_err = NULL;
BlockDriver *drv = bdrv_find_whitelisted_format(blkdev->fileproto,
readonly);
if (bdrv_open(&blkdev->bs, blkdev->filename, NULL, NULL, qflags,
diff --git a/hw/char/pl011.c b/hw/char/pl011.c
index a8ae6f4706..644aad7cf0 100644
--- a/hw/char/pl011.c
+++ b/hw/char/pl011.c
@@ -20,6 +20,7 @@ typedef struct PL011State {
uint32_t readbuff;
uint32_t flags;
uint32_t lcr;
+ uint32_t rsr;
uint32_t cr;
uint32_t dmacr;
uint32_t int_enabled;
@@ -81,13 +82,14 @@ static uint64_t pl011_read(void *opaque, hwaddr offset,
}
if (s->read_count == s->read_trigger - 1)
s->int_level &= ~ PL011_INT_RX;
+ s->rsr = c >> 8;
pl011_update(s);
if (s->chr) {
qemu_chr_accept_input(s->chr);
}
return c;
- case 1: /* UARTCR */
- return 0;
+ case 1: /* UARTRSR */
+ return s->rsr;
case 6: /* UARTFR */
return s->flags;
case 8: /* UARTILPR */
@@ -146,8 +148,8 @@ static void pl011_write(void *opaque, hwaddr offset,
s->int_level |= PL011_INT_TX;
pl011_update(s);
break;
- case 1: /* UARTCR */
- s->cr = value;
+ case 1: /* UARTRSR/UARTECR */
+ s->rsr = 0;
break;
case 6: /* UARTFR */
/* Writes to Flag register are ignored. */
@@ -162,6 +164,11 @@ static void pl011_write(void *opaque, hwaddr offset,
s->fbrd = value;
break;
case 11: /* UARTLCR_H */
+ /* Reset the FIFO state on FIFO enable or disable */
+ if ((s->lcr ^ value) & 0x10) {
+ s->read_count = 0;
+ s->read_pos = 0;
+ }
s->lcr = value;
pl011_set_read_trigger(s);
break;
@@ -214,7 +221,7 @@ static void pl011_put_fifo(void *opaque, uint32_t value)
s->read_fifo[slot] = value;
s->read_count++;
s->flags &= ~PL011_FLAG_RXFE;
- if (s->cr & 0x10 || s->read_count == 16) {
+ if (!(s->lcr & 0x10) || s->read_count == 16) {
s->flags |= PL011_FLAG_RXFF;
}
if (s->read_count == s->read_trigger) {
@@ -242,13 +249,14 @@ static const MemoryRegionOps pl011_ops = {
static const VMStateDescription vmstate_pl011 = {
.name = "pl011",
- .version_id = 1,
- .minimum_version_id = 1,
- .minimum_version_id_old = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .minimum_version_id_old = 2,
.fields = (VMStateField[]) {
VMSTATE_UINT32(readbuff, PL011State),
VMSTATE_UINT32(flags, PL011State),
VMSTATE_UINT32(lcr, PL011State),
+ VMSTATE_UINT32(rsr, PL011State),
VMSTATE_UINT32(cr, PL011State),
VMSTATE_UINT32(dmacr, PL011State),
VMSTATE_UINT32(int_enabled, PL011State),
diff --git a/hw/char/sclpconsole-lm.c b/hw/char/sclpconsole-lm.c
index 93390675d6..a2dc1c63b0 100644
--- a/hw/char/sclpconsole-lm.c
+++ b/hw/char/sclpconsole-lm.c
@@ -41,7 +41,6 @@ typedef struct SCLPConsoleLM {
uint32_t write_errors; /* errors writing to char layer */
uint32_t length; /* length of byte stream in buffer */
uint8_t buf[SIZE_CONSOLE_BUFFER];
- qemu_irq irq_console_read;
} SCLPConsoleLM;
/*
@@ -68,13 +67,15 @@ static int chr_can_read(void *opaque)
return 0;
}
-static void receive_from_chr_layer(SCLPConsoleLM *scon, const uint8_t *buf,
- int size)
+static void chr_read(void *opaque, const uint8_t *buf, int size)
{
+ SCLPConsoleLM *scon = opaque;
+
assert(size == 1);
if (*buf == '\r' || *buf == '\n') {
scon->event.event_pending = true;
+ sclp_service_interrupt(0);
return;
}
scon->buf[scon->length] = *buf;
@@ -84,20 +85,6 @@ static void receive_from_chr_layer(SCLPConsoleLM *scon, const uint8_t *buf,
}
}
-/*
- * Send data from a char device over to the guest
- */
-static void chr_read(void *opaque, const uint8_t *buf, int size)
-{
- SCLPConsoleLM *scon = opaque;
-
- receive_from_chr_layer(scon, buf, size);
- if (scon->event.event_pending) {
- /* trigger SCLP read operation */
- qemu_irq_raise(scon->irq_console_read);
- }
-}
-
/* functions to be called by event facility */
static bool can_handle_event(uint8_t type)
@@ -298,11 +285,6 @@ static int write_event_data(SCLPEvent *event, EventBufferHeader *ebh)
return SCLP_RC_NORMAL_COMPLETION;
}
-static void trigger_console_data(void *opaque, int n, int level)
-{
- sclp_service_interrupt(0);
-}
-
/* functions for live migration */
static const VMStateDescription vmstate_sclplmconsole = {
@@ -338,7 +320,6 @@ static int console_init(SCLPEvent *event)
if (scon->chr) {
qemu_chr_add_handlers(scon->chr, chr_can_read, chr_read, NULL, scon);
}
- scon->irq_console_read = *qemu_allocate_irqs(trigger_console_data, NULL, 1);
return 0;
}
diff --git a/hw/char/sclpconsole.c b/hw/char/sclpconsole.c
index 16d77c5e27..ce406730a5 100644
--- a/hw/char/sclpconsole.c
+++ b/hw/char/sclpconsole.c
@@ -36,7 +36,6 @@ typedef struct SCLPConsole {
uint32_t iov_bs; /* offset in buf for char layer read operation */
uint32_t iov_data_len; /* length of byte stream in buffer */
uint32_t iov_sclp_rest; /* length of byte stream not read via SCLP */
- qemu_irq irq_read_vt220;
} SCLPConsole;
/* character layer call-back functions */
@@ -49,11 +48,12 @@ static int chr_can_read(void *opaque)
return SIZE_BUFFER_VT220 - scon->iov_data_len;
}
-/* Receive n bytes from character layer, save in iov buffer,
- * and set event pending */
-static void receive_from_chr_layer(SCLPConsole *scon, const uint8_t *buf,
- int size)
+/* Send data from a char device over to the guest */
+static void chr_read(void *opaque, const uint8_t *buf, int size)
{
+ SCLPConsole *scon = opaque;
+
+ assert(scon);
/* read data must fit into current buffer */
assert(size <= SIZE_BUFFER_VT220 - scon->iov_data_len);
@@ -63,18 +63,7 @@ static void receive_from_chr_layer(SCLPConsole *scon, const uint8_t *buf,
scon->iov_sclp_rest += size;
scon->iov_bs += size;
scon->event.event_pending = true;
-}
-
-/* Send data from a char device over to the guest */
-static void chr_read(void *opaque, const uint8_t *buf, int size)
-{
- SCLPConsole *scon = opaque;
-
- assert(scon);
-
- receive_from_chr_layer(scon, buf, size);
- /* trigger SCLP read operation */
- qemu_irq_raise(scon->irq_read_vt220);
+ sclp_service_interrupt(0);
}
/* functions to be called by event facility */
@@ -192,11 +181,6 @@ static int write_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr)
return rc;
}
-static void trigger_ascii_console_data(void *opaque, int n, int level)
-{
- sclp_service_interrupt(0);
-}
-
static const VMStateDescription vmstate_sclpconsole = {
.name = "sclpconsole",
.version_id = 0,
@@ -232,8 +216,6 @@ static int console_init(SCLPEvent *event)
qemu_chr_add_handlers(scon->chr, chr_can_read,
chr_read, NULL, scon);
}
- scon->irq_read_vt220 = *qemu_allocate_irqs(trigger_ascii_console_data,
- NULL, 1);
return 0;
}
diff --git a/hw/char/serial.c b/hw/char/serial.c
index 6d3b5aff8b..f4d167f916 100644
--- a/hw/char/serial.c
+++ b/hw/char/serial.c
@@ -225,8 +225,10 @@ static gboolean serial_xmit(GIOChannel *chan, GIOCondition cond, void *opaque)
if (s->tsr_retry <= 0) {
if (s->fcr & UART_FCR_FE) {
- s->tsr = fifo8_is_empty(&s->xmit_fifo) ?
- 0 : fifo8_pop(&s->xmit_fifo);
+ if (fifo8_is_empty(&s->xmit_fifo)) {
+ return FALSE;
+ }
+ s->tsr = fifo8_pop(&s->xmit_fifo);
if (!s->xmit_fifo.num) {
s->lsr |= UART_LSR_THRE;
}
diff --git a/hw/char/virtio-console.c b/hw/char/virtio-console.c
index 2e00ad2a7c..6c8be0fe26 100644
--- a/hw/char/virtio-console.c
+++ b/hw/char/virtio-console.c
@@ -15,8 +15,13 @@
#include "trace.h"
#include "hw/virtio/virtio-serial.h"
+#define TYPE_VIRTIO_CONSOLE_SERIAL_PORT "virtserialport"
+#define VIRTIO_CONSOLE(obj) \
+ OBJECT_CHECK(VirtConsole, (obj), TYPE_VIRTIO_CONSOLE_SERIAL_PORT)
+
typedef struct VirtConsole {
- VirtIOSerialPort port;
+ VirtIOSerialPort parent_obj;
+
CharDriverState *chr;
guint watch;
} VirtConsole;
@@ -31,7 +36,7 @@ static gboolean chr_write_unblocked(GIOChannel *chan, GIOCondition cond,
VirtConsole *vcon = opaque;
vcon->watch = 0;
- virtio_serial_throttle_port(&vcon->port, false);
+ virtio_serial_throttle_port(VIRTIO_SERIAL_PORT(vcon), false);
return FALSE;
}
@@ -39,7 +44,7 @@ static gboolean chr_write_unblocked(GIOChannel *chan, GIOCondition cond,
static ssize_t flush_buf(VirtIOSerialPort *port,
const uint8_t *buf, ssize_t len)
{
- VirtConsole *vcon = DO_UPCAST(VirtConsole, port, port);
+ VirtConsole *vcon = VIRTIO_CONSOLE(port);
ssize_t ret;
if (!vcon->chr) {
@@ -75,7 +80,7 @@ static ssize_t flush_buf(VirtIOSerialPort *port,
/* Callback function that's called when the guest opens/closes the port */
static void set_guest_connected(VirtIOSerialPort *port, int guest_connected)
{
- VirtConsole *vcon = DO_UPCAST(VirtConsole, port, port);
+ VirtConsole *vcon = VIRTIO_CONSOLE(port);
if (!vcon->chr) {
return;
@@ -88,45 +93,49 @@ static int chr_can_read(void *opaque)
{
VirtConsole *vcon = opaque;
- return virtio_serial_guest_ready(&vcon->port);
+ return virtio_serial_guest_ready(VIRTIO_SERIAL_PORT(vcon));
}
/* Send data from a char device over to the guest */
static void chr_read(void *opaque, const uint8_t *buf, int size)
{
VirtConsole *vcon = opaque;
+ VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(vcon);
- trace_virtio_console_chr_read(vcon->port.id, size);
- virtio_serial_write(&vcon->port, buf, size);
+ trace_virtio_console_chr_read(port->id, size);
+ virtio_serial_write(port, buf, size);
}
static void chr_event(void *opaque, int event)
{
VirtConsole *vcon = opaque;
+ VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(vcon);
- trace_virtio_console_chr_event(vcon->port.id, event);
+ trace_virtio_console_chr_event(port->id, event);
switch (event) {
case CHR_EVENT_OPENED:
- virtio_serial_open(&vcon->port);
+ virtio_serial_open(port);
break;
case CHR_EVENT_CLOSED:
if (vcon->watch) {
g_source_remove(vcon->watch);
vcon->watch = 0;
}
- virtio_serial_close(&vcon->port);
+ virtio_serial_close(port);
break;
}
}
-static int virtconsole_initfn(VirtIOSerialPort *port)
+static void virtconsole_realize(DeviceState *dev, Error **errp)
{
- VirtConsole *vcon = DO_UPCAST(VirtConsole, port, port);
- VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_GET_CLASS(port);
+ VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev);
+ VirtConsole *vcon = VIRTIO_CONSOLE(dev);
+ VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_GET_CLASS(dev);
if (port->id == 0 && !k->is_console) {
- error_report("Port number 0 on virtio-serial devices reserved for virtconsole devices for backward compatibility.");
- return -1;
+ error_setg(errp, "Port number 0 on virtio-serial devices reserved "
+ "for virtconsole devices for backward compatibility.");
+ return;
}
if (vcon->chr) {
@@ -134,43 +143,27 @@ static int virtconsole_initfn(VirtIOSerialPort *port)
qemu_chr_add_handlers(vcon->chr, chr_can_read, chr_read, chr_event,
vcon);
}
-
- return 0;
}
-static int virtconsole_exitfn(VirtIOSerialPort *port)
+static void virtconsole_unrealize(DeviceState *dev, Error **errp)
{
- VirtConsole *vcon = DO_UPCAST(VirtConsole, port, port);
+ VirtConsole *vcon = VIRTIO_CONSOLE(dev);
if (vcon->watch) {
g_source_remove(vcon->watch);
}
-
- return 0;
}
-static Property virtconsole_properties[] = {
- DEFINE_PROP_CHR("chardev", VirtConsole, chr),
- DEFINE_PROP_END_OF_LIST(),
-};
-
static void virtconsole_class_init(ObjectClass *klass, void *data)
{
- DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_CLASS(klass);
k->is_console = true;
- k->init = virtconsole_initfn;
- k->exit = virtconsole_exitfn;
- k->have_data = flush_buf;
- k->set_guest_connected = set_guest_connected;
- dc->props = virtconsole_properties;
}
static const TypeInfo virtconsole_info = {
.name = "virtconsole",
- .parent = TYPE_VIRTIO_SERIAL_PORT,
- .instance_size = sizeof(VirtConsole),
+ .parent = TYPE_VIRTIO_CONSOLE_SERIAL_PORT,
.class_init = virtconsole_class_init,
};
@@ -184,15 +177,15 @@ static void virtserialport_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
VirtIOSerialPortClass *k = VIRTIO_SERIAL_PORT_CLASS(klass);
- k->init = virtconsole_initfn;
- k->exit = virtconsole_exitfn;
+ k->realize = virtconsole_realize;
+ k->unrealize = virtconsole_unrealize;
k->have_data = flush_buf;
k->set_guest_connected = set_guest_connected;
dc->props = virtserialport_properties;
}
static const TypeInfo virtserialport_info = {
- .name = "virtserialport",
+ .name = TYPE_VIRTIO_CONSOLE_SERIAL_PORT,
.parent = TYPE_VIRTIO_SERIAL_PORT,
.instance_size = sizeof(VirtConsole),
.class_init = virtserialport_class_init,
@@ -200,8 +193,8 @@ static const TypeInfo virtserialport_info = {
static void virtconsole_register_types(void)
{
- type_register_static(&virtconsole_info);
type_register_static(&virtserialport_info);
+ type_register_static(&virtconsole_info);
}
type_init(virtconsole_register_types)
diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
index 226e9f9a3c..2b647b68d5 100644
--- a/hw/char/virtio-serial-bus.c
+++ b/hw/char/virtio-serial-bus.c
@@ -808,13 +808,14 @@ static void remove_port(VirtIOSerial *vser, uint32_t port_id)
send_control_event(vser, port->id, VIRTIO_CONSOLE_PORT_REMOVE, 1);
}
-static int virtser_port_qdev_init(DeviceState *qdev)
+static void virtser_port_device_realize(DeviceState *dev, Error **errp)
{
- VirtIOSerialPort *port = DO_UPCAST(VirtIOSerialPort, dev, qdev);
+ VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev);
VirtIOSerialPortClass *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
- VirtIOSerialBus *bus = DO_UPCAST(VirtIOSerialBus, qbus, qdev->parent_bus);
- int ret, max_nr_ports;
+ VirtIOSerialBus *bus = VIRTIO_SERIAL_BUS(qdev_get_parent_bus(dev));
+ int max_nr_ports;
bool plugging_port0;
+ Error *err = NULL;
port->vser = bus->vser;
port->bh = qemu_bh_new(flush_queued_data_bh, port);
@@ -829,9 +830,9 @@ static int virtser_port_qdev_init(DeviceState *qdev)
plugging_port0 = vsc->is_console && !find_port_by_id(port->vser, 0);
if (find_port_by_id(port->vser, port->id)) {
- error_report("virtio-serial-bus: A port already exists at id %u",
- port->id);
- return -1;
+ error_setg(errp, "virtio-serial-bus: A port already exists at id %u",
+ port->id);
+ return;
}
if (port->id == VIRTIO_CONSOLE_BAD_ID) {
@@ -840,22 +841,24 @@ static int virtser_port_qdev_init(DeviceState *qdev)
} else {
port->id = find_free_port_id(port->vser);
if (port->id == VIRTIO_CONSOLE_BAD_ID) {
- error_report("virtio-serial-bus: Maximum port limit for this device reached");
- return -1;
+ error_setg(errp, "virtio-serial-bus: Maximum port limit for "
+ "this device reached");
+ return;
}
}
}
max_nr_ports = tswap32(port->vser->config.max_nr_ports);
if (port->id >= max_nr_ports) {
- error_report("virtio-serial-bus: Out-of-range port id specified, max. allowed: %u",
- max_nr_ports - 1);
- return -1;
+ error_setg(errp, "virtio-serial-bus: Out-of-range port id specified, "
+ "max. allowed: %u", max_nr_ports - 1);
+ return;
}
- ret = vsc->init(port);
- if (ret) {
- return ret;
+ vsc->realize(dev, &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
}
port->elem.out_num = 0;
@@ -868,14 +871,12 @@ static int virtser_port_qdev_init(DeviceState *qdev)
/* Send an update to the guest about this new port added */
virtio_notify_config(VIRTIO_DEVICE(port->vser));
-
- return ret;
}
-static int virtser_port_qdev_exit(DeviceState *qdev)
+static void virtser_port_device_unrealize(DeviceState *dev, Error **errp)
{
- VirtIOSerialPort *port = DO_UPCAST(VirtIOSerialPort, dev, qdev);
- VirtIOSerialPortClass *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(port);
+ VirtIOSerialPort *port = VIRTIO_SERIAL_PORT(dev);
+ VirtIOSerialPortClass *vsc = VIRTIO_SERIAL_PORT_GET_CLASS(dev);
VirtIOSerial *vser = port->vser;
qemu_bh_delete(port->bh);
@@ -883,10 +884,9 @@ static int virtser_port_qdev_exit(DeviceState *qdev)
QTAILQ_REMOVE(&vser->ports, port, next);
- if (vsc->exit) {
- vsc->exit(port);
+ if (vsc->unrealize) {
+ vsc->unrealize(dev, errp);
}
- return 0;
}
static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
@@ -971,10 +971,11 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
static void virtio_serial_port_class_init(ObjectClass *klass, void *data)
{
DeviceClass *k = DEVICE_CLASS(klass);
- k->init = virtser_port_qdev_init;
+
set_bit(DEVICE_CATEGORY_INPUT, k->categories);
k->bus_type = TYPE_VIRTIO_SERIAL_BUS;
- k->exit = virtser_port_qdev_exit;
+ k->realize = virtser_port_device_realize;
+ k->unrealize = virtser_port_device_unrealize;
k->unplug = qdev_simple_unplug_cb;
k->props = virtser_props;
}
diff --git a/hw/core/Makefile.objs b/hw/core/Makefile.objs
index 9e324befd6..5377d052e9 100644
--- a/hw/core/Makefile.objs
+++ b/hw/core/Makefile.objs
@@ -1,5 +1,6 @@
# core qdev-related obj files, also used by *-user:
common-obj-y += qdev.o qdev-properties.o
+common-obj-y += fw-path-provider.o
# irq.o needed for qdev GPIO handling:
common-obj-y += irq.o
common-obj-y += hotplug.o
@@ -8,7 +9,7 @@ common-obj-$(CONFIG_EMPTY_SLOT) += empty_slot.o
common-obj-$(CONFIG_XILINX_AXI) += stream.o
common-obj-$(CONFIG_PTIMER) += ptimer.o
common-obj-$(CONFIG_SOFTMMU) += sysbus.o
+common-obj-$(CONFIG_SOFTMMU) += machine.o
common-obj-$(CONFIG_SOFTMMU) += null-machine.o
common-obj-$(CONFIG_SOFTMMU) += loader.o
common-obj-$(CONFIG_SOFTMMU) += qdev-properties-system.o
-
diff --git a/hw/core/fw-path-provider.c b/hw/core/fw-path-provider.c
new file mode 100644
index 0000000000..1290c3e600
--- /dev/null
+++ b/hw/core/fw-path-provider.c
@@ -0,0 +1,52 @@
+/*
+ * Firmware patch provider class and helpers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/fw-path-provider.h"
+
+char *fw_path_provider_get_dev_path(FWPathProvider *p, BusState *bus,
+ DeviceState *dev)
+{
+ FWPathProviderClass *k = FW_PATH_PROVIDER_GET_CLASS(p);
+
+ return k->get_dev_path(p, bus, dev);
+}
+
+char *fw_path_provider_try_get_dev_path(Object *o, BusState *bus,
+ DeviceState *dev)
+{
+ FWPathProvider *p = (FWPathProvider *)
+ object_dynamic_cast(o, TYPE_FW_PATH_PROVIDER);
+
+ if (p) {
+ return fw_path_provider_get_dev_path(p, bus, dev);
+ }
+
+ return NULL;
+}
+
+static const TypeInfo fw_path_provider_info = {
+ .name = TYPE_FW_PATH_PROVIDER,
+ .parent = TYPE_INTERFACE,
+ .class_size = sizeof(FWPathProviderClass),
+};
+
+static void fw_path_provider_register_types(void)
+{
+ type_register_static(&fw_path_provider_info);
+}
+
+type_init(fw_path_provider_register_types)
diff --git a/hw/core/machine.c b/hw/core/machine.c
new file mode 100644
index 0000000000..d3ffef7e07
--- /dev/null
+++ b/hw/core/machine.c
@@ -0,0 +1,28 @@
+/*
+ * QEMU Machine
+ *
+ * Copyright (C) 2014 Red Hat Inc
+ *
+ * Authors:
+ * Marcel Apfelbaum <marcel.a@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "hw/boards.h"
+
+static const TypeInfo machine_info = {
+ .name = TYPE_MACHINE,
+ .parent = TYPE_OBJECT,
+ .abstract = true,
+ .class_size = sizeof(MachineClass),
+ .instance_size = sizeof(MachineState),
+};
+
+static void machine_register_types(void)
+{
+ type_register_static(&machine_info);
+}
+
+type_init(machine_register_types)
diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
index 5f5957ed8e..de835612f0 100644
--- a/hw/core/qdev-properties-system.c
+++ b/hw/core/qdev-properties-system.c
@@ -18,17 +18,19 @@
#include "net/hub.h"
#include "qapi/visitor.h"
#include "sysemu/char.h"
+#include "sysemu/iothread.h"
static void get_pointer(Object *obj, Visitor *v, Property *prop,
- const char *(*print)(void *ptr),
+ char *(*print)(void *ptr),
const char *name, Error **errp)
{
DeviceState *dev = DEVICE(obj);
void **ptr = qdev_get_prop_ptr(dev, prop);
char *p;
- p = (char *) (*ptr ? print(*ptr) : "");
+ p = *ptr ? print(*ptr) : g_strdup("");
visit_type_str(v, &p, name, errp);
+ g_free(p);
}
static void set_pointer(Object *obj, Visitor *v, Property *prop,
@@ -91,9 +93,9 @@ static void release_drive(Object *obj, const char *name, void *opaque)
}
}
-static const char *print_drive(void *ptr)
+static char *print_drive(void *ptr)
{
- return bdrv_get_device_name(ptr);
+ return g_strdup(bdrv_get_device_name(ptr));
}
static void get_drive(Object *obj, Visitor *v, void *opaque,
@@ -145,11 +147,12 @@ static void release_chr(Object *obj, const char *name, void *opaque)
}
-static const char *print_chr(void *ptr)
+static char *print_chr(void *ptr)
{
CharDriverState *chr = ptr;
+ const char *val = chr->label ? chr->label : "";
- return chr->label ? chr->label : "";
+ return g_strdup(val);
}
static void get_chr(Object *obj, Visitor *v, void *opaque,
@@ -224,11 +227,12 @@ err:
return ret;
}
-static const char *print_netdev(void *ptr)
+static char *print_netdev(void *ptr)
{
NetClientState *netdev = ptr;
+ const char *val = netdev->name ? netdev->name : "";
- return netdev->name ? netdev->name : "";
+ return g_strdup(val);
}
static void get_netdev(Object *obj, Visitor *v, void *opaque,
@@ -382,6 +386,56 @@ void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd)
nd->instantiated = 1;
}
+/* --- iothread --- */
+
+static char *print_iothread(void *ptr)
+{
+ return iothread_get_id(ptr);
+}
+
+static int parse_iothread(DeviceState *dev, const char *str, void **ptr)
+{
+ IOThread *iothread;
+
+ iothread = iothread_find(str);
+ if (!iothread) {
+ return -ENOENT;
+ }
+ object_ref(OBJECT(iothread));
+ *ptr = iothread;
+ return 0;
+}
+
+static void get_iothread(Object *obj, struct Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ get_pointer(obj, v, opaque, print_iothread, name, errp);
+}
+
+static void set_iothread(Object *obj, struct Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ set_pointer(obj, v, opaque, parse_iothread, name, errp);
+}
+
+static void release_iothread(Object *obj, const char *name, void *opaque)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ IOThread **ptr = qdev_get_prop_ptr(dev, prop);
+
+ if (*ptr) {
+ object_unref(OBJECT(*ptr));
+ }
+}
+
+PropertyInfo qdev_prop_iothread = {
+ .name = "iothread",
+ .get = get_iothread,
+ .set = set_iothread,
+ .release = release_iothread,
+};
+
static int qdev_add_one_global(QemuOpts *opts, void *opaque)
{
GlobalProperty *g;
diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c
index 77d0c66635..585a8e902e 100644
--- a/hw/core/qdev-properties.c
+++ b/hw/core/qdev-properties.c
@@ -21,6 +21,18 @@ void qdev_prop_set_after_realize(DeviceState *dev, const char *name,
}
}
+void qdev_prop_allow_set_link_before_realize(Object *obj, const char *name,
+ Object *val, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+
+ if (dev->realized) {
+ error_setg(errp, "Attempt to set link property '%s' on device '%s' "
+ "(type '%s') after it was realized",
+ name, dev->id, object_get_typename(obj));
+ }
+}
+
void *qdev_get_prop_ptr(DeviceState *dev, Property *prop)
{
void *ptr = dev;
@@ -575,8 +587,9 @@ static void set_blocksize(Object *obj, Visitor *v, void *opaque,
/* We rely on power-of-2 blocksizes for bitmasks */
if ((value & (value - 1)) != 0) {
- error_set(errp, QERR_PROPERTY_VALUE_NOT_POWER_OF_2,
- dev->id?:"", name, (int64_t)value);
+ error_setg(errp,
+ "Property %s.%s doesn't take value '%" PRId64 "', it's not a power of 2",
+ dev->id ?: "", name, (int64_t)value);
return;
}
@@ -841,7 +854,7 @@ void error_set_from_qdev_prop_error(Error **errp, int ret, DeviceState *dev,
{
switch (ret) {
case -EEXIST:
- error_set(errp, QERR_PROPERTY_VALUE_IN_USE,
+ error_setg(errp, "Property '%s.%s' can't take value '%s', it's in use",
object_get_typename(OBJECT(dev)), prop->name, value);
break;
default:
@@ -850,7 +863,7 @@ void error_set_from_qdev_prop_error(Error **errp, int ret, DeviceState *dev,
object_get_typename(OBJECT(dev)), prop->name, value);
break;
case -ENOENT:
- error_set(errp, QERR_PROPERTY_VALUE_NOT_FOUND,
+ error_setg(errp, "Property '%s.%s' can't find value '%s'",
object_get_typename(OBJECT(dev)), prop->name, value);
break;
case 0:
diff --git a/hw/core/qdev.c b/hw/core/qdev.c
index 380976a066..60f9df1ed9 100644
--- a/hw/core/qdev.c
+++ b/hw/core/qdev.c
@@ -26,6 +26,7 @@
this API directly. */
#include "hw/qdev.h"
+#include "hw/fw-path-provider.h"
#include "sysemu/sysemu.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
@@ -98,6 +99,8 @@ static void bus_add_child(BusState *bus, DeviceState *child)
object_property_add_link(OBJECT(bus), name,
object_get_typename(OBJECT(child)),
(Object **)&kid->child,
+ NULL, /* read-only property */
+ 0, /* return ownership on prop deletion */
NULL);
}
@@ -501,6 +504,45 @@ static void bus_unparent(Object *obj)
}
}
+static bool bus_get_realized(Object *obj, Error **err)
+{
+ BusState *bus = BUS(obj);
+
+ return bus->realized;
+}
+
+static void bus_set_realized(Object *obj, bool value, Error **err)
+{
+ BusState *bus = BUS(obj);
+ BusClass *bc = BUS_GET_CLASS(bus);
+ Error *local_err = NULL;
+
+ if (value && !bus->realized) {
+ if (bc->realize) {
+ bc->realize(bus, &local_err);
+
+ if (local_err != NULL) {
+ goto error;
+ }
+
+ }
+ } else if (!value && bus->realized) {
+ if (bc->unrealize) {
+ bc->unrealize(bus, &local_err);
+
+ if (local_err != NULL) {
+ goto error;
+ }
+ }
+ }
+
+ bus->realized = value;
+ return;
+
+error:
+ error_propagate(err, local_err);
+}
+
void qbus_create_inplace(void *bus, size_t size, const char *typename,
DeviceState *parent, const char *name)
{
@@ -529,6 +571,18 @@ static char *bus_get_fw_dev_path(BusState *bus, DeviceState *dev)
return NULL;
}
+static char *qdev_get_fw_dev_path_from_handler(BusState *bus, DeviceState *dev)
+{
+ Object *obj = OBJECT(dev);
+ char *d = NULL;
+
+ while (!d && obj->parent) {
+ obj = obj->parent;
+ d = fw_path_provider_try_get_dev_path(obj, bus, dev);
+ }
+ return d;
+}
+
static int qdev_get_fw_dev_path_helper(DeviceState *dev, char *p, int size)
{
int l = 0;
@@ -536,7 +590,10 @@ static int qdev_get_fw_dev_path_helper(DeviceState *dev, char *p, int size)
if (dev && dev->parent_bus) {
char *d;
l = qdev_get_fw_dev_path_helper(dev->parent_bus->parent, p, size);
- d = bus_get_fw_dev_path(dev->parent_bus, dev);
+ d = qdev_get_fw_dev_path_from_handler(dev->parent_bus, dev);
+ if (!d) {
+ d = bus_get_fw_dev_path(dev->parent_bus, dev);
+ }
if (d) {
l += snprintf(p + l, size - l, "%s", d);
g_free(d);
@@ -677,6 +734,7 @@ static void device_set_realized(Object *obj, bool value, Error **err)
{
DeviceState *dev = DEVICE(obj);
DeviceClass *dc = DEVICE_GET_CLASS(dev);
+ BusState *bus;
Error *local_err = NULL;
if (dev->hotplugged && !dc->hotpluggable) {
@@ -710,14 +768,30 @@ static void device_set_realized(Object *obj, bool value, Error **err)
dev->instance_id_alias,
dev->alias_required_for_version);
}
+ if (local_err == NULL) {
+ QLIST_FOREACH(bus, &dev->child_bus, sibling) {
+ object_property_set_bool(OBJECT(bus), true, "realized",
+ &local_err);
+ if (local_err != NULL) {
+ break;
+ }
+ }
+ }
if (dev->hotplugged && local_err == NULL) {
device_reset(dev);
}
} else if (!value && dev->realized) {
- if (qdev_get_vmsd(dev)) {
+ QLIST_FOREACH(bus, &dev->child_bus, sibling) {
+ object_property_set_bool(OBJECT(bus), false, "realized",
+ &local_err);
+ if (local_err != NULL) {
+ break;
+ }
+ }
+ if (qdev_get_vmsd(dev) && local_err == NULL) {
vmstate_unregister(dev, qdev_get_vmsd(dev), dev);
}
- if (dc->unrealize) {
+ if (dc->unrealize && local_err == NULL) {
dc->unrealize(dev, &local_err);
}
}
@@ -735,7 +809,8 @@ static bool device_get_hotpluggable(Object *obj, Error **err)
DeviceClass *dc = DEVICE_GET_CLASS(obj);
DeviceState *dev = DEVICE(obj);
- return dc->hotpluggable && dev->parent_bus->allow_hotplug;
+ return dc->hotpluggable && (dev->parent_bus == NULL ||
+ dev->parent_bus->allow_hotplug);
}
static void device_initfn(Object *obj)
@@ -767,7 +842,8 @@ static void device_initfn(Object *obj)
} while (class != object_class_by_name(TYPE_DEVICE));
object_property_add_link(OBJECT(dev), "parent_bus", TYPE_BUS,
- (Object **)&dev->parent_bus, &error_abort);
+ (Object **)&dev->parent_bus, NULL, 0,
+ &error_abort);
}
static void device_post_init(Object *obj)
@@ -792,14 +868,6 @@ static void device_class_base_init(ObjectClass *class, void *data)
* so do not propagate them to the subclasses.
*/
klass->props = NULL;
-
- /* by default all devices were considered as hotpluggable,
- * so with intent to check it in generic qdev_unplug() /
- * device_set_realized() functions make every device
- * hotpluggable. Devices that shouldn't be hotpluggable,
- * should override it in their class_init()
- */
- klass->hotpluggable = true;
}
static void device_unparent(Object *obj)
@@ -809,13 +877,13 @@ static void device_unparent(Object *obj)
QObject *event_data;
bool have_realized = dev->realized;
+ if (dev->realized) {
+ object_property_set_bool(obj, false, "realized", NULL);
+ }
while (dev->num_child_bus) {
bus = QLIST_FIRST(&dev->child_bus);
object_unparent(OBJECT(bus));
}
- if (dev->realized) {
- object_property_set_bool(obj, false, "realized", NULL);
- }
if (dev->parent_bus) {
bus_remove_child(dev->parent_bus, dev);
object_unref(OBJECT(dev->parent_bus));
@@ -845,6 +913,14 @@ static void device_class_init(ObjectClass *class, void *data)
class->unparent = device_unparent;
dc->realize = device_realize;
dc->unrealize = device_unrealize;
+
+ /* by default all devices were considered as hotpluggable,
+ * so with intent to check it in generic qdev_unplug() /
+ * device_set_realized() functions make every device
+ * hotpluggable. Devices that shouldn't be hotpluggable,
+ * should override it in their class_init()
+ */
+ dc->hotpluggable = true;
}
void device_reset(DeviceState *dev)
@@ -887,7 +963,12 @@ static void qbus_initfn(Object *obj)
QTAILQ_INIT(&bus->children);
object_property_add_link(obj, QDEV_HOTPLUG_HANDLER_PROPERTY,
TYPE_HOTPLUG_HANDLER,
- (Object **)&bus->hotplug_handler, NULL);
+ (Object **)&bus->hotplug_handler,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ NULL);
+ object_property_add_bool(obj, "realized",
+ bus_get_realized, bus_set_realized, NULL);
}
static char *default_bus_get_fw_dev_path(DeviceState *dev)
diff --git a/hw/display/ads7846.c b/hw/display/ads7846.c
index 5da3dc5b2c..85252a2329 100644
--- a/hw/display/ads7846.c
+++ b/hw/display/ads7846.c
@@ -133,11 +133,12 @@ static const VMStateDescription vmstate_ads7846 = {
}
};
-static int ads7846_init(SSISlave *dev)
+static int ads7846_init(SSISlave *d)
{
- ADS7846State *s = FROM_SSI_SLAVE(ADS7846State, dev);
+ DeviceState *dev = DEVICE(d);
+ ADS7846State *s = FROM_SSI_SLAVE(ADS7846State, d);
- qdev_init_gpio_out(&dev->qdev, &s->interrupt, 1);
+ qdev_init_gpio_out(dev, &s->interrupt, 1);
s->input[0] = ADS_TEMP0; /* TEMP0 */
s->input[2] = ADS_VBAT; /* VBAT */
diff --git a/hw/display/ssd0323.c b/hw/display/ssd0323.c
index 46c3b40c79..971152edbd 100644
--- a/hw/display/ssd0323.c
+++ b/hw/display/ssd0323.c
@@ -336,18 +336,19 @@ static const GraphicHwOps ssd0323_ops = {
.gfx_update = ssd0323_update_display,
};
-static int ssd0323_init(SSISlave *dev)
+static int ssd0323_init(SSISlave *d)
{
- ssd0323_state *s = FROM_SSI_SLAVE(ssd0323_state, dev);
+ DeviceState *dev = DEVICE(d);
+ ssd0323_state *s = FROM_SSI_SLAVE(ssd0323_state, d);
s->col_end = 63;
s->row_end = 79;
- s->con = graphic_console_init(DEVICE(dev), 0, &ssd0323_ops, s);
+ s->con = graphic_console_init(dev, 0, &ssd0323_ops, s);
qemu_console_resize(s->con, 128 * MAGNIFY, 64 * MAGNIFY);
- qdev_init_gpio_in(&dev->qdev, ssd0323_cd, 1);
+ qdev_init_gpio_in(dev, ssd0323_cd, 1);
- register_savevm(&dev->qdev, "ssd0323_oled", -1, 1,
+ register_savevm(dev, "ssd0323_oled", -1, 1,
ssd0323_save, ssd0323_load, s);
return 0;
}
diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c
index bd2c108c42..6ae3348deb 100644
--- a/hw/display/vmware_vga.c
+++ b/hw/display/vmware_vga.c
@@ -25,6 +25,7 @@
#include "hw/loader.h"
#include "trace.h"
#include "ui/console.h"
+#include "ui/vnc.h"
#include "hw/pci/pci.h"
#undef VERBOSE
@@ -218,7 +219,7 @@ enum {
/* These values can probably be changed arbitrarily. */
#define SVGA_SCRATCH_SIZE 0x8000
-#define SVGA_MAX_WIDTH 2360
+#define SVGA_MAX_WIDTH ROUND_UP(2360, VNC_DIRTY_PIXELS_PER_BIT)
#define SVGA_MAX_HEIGHT 1770
#ifdef VERBOSE
diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c
index 19f07b3b25..14b887bfa8 100644
--- a/hw/dma/xilinx_axidma.c
+++ b/hw/dma/xilinx_axidma.c
@@ -537,9 +537,15 @@ static void xilinx_axidma_realize(DeviceState *dev, Error **errp)
Error *local_errp = NULL;
object_property_add_link(OBJECT(ds), "dma", TYPE_XILINX_AXI_DMA,
- (Object **)&ds->dma, &local_errp);
+ (Object **)&ds->dma,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &local_errp);
object_property_add_link(OBJECT(cs), "dma", TYPE_XILINX_AXI_DMA,
- (Object **)&cs->dma, &local_errp);
+ (Object **)&cs->dma,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &local_errp);
if (local_errp) {
goto xilinx_axidma_realize_fail;
}
@@ -571,10 +577,16 @@ static void xilinx_axidma_init(Object *obj)
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
- (Object **)&s->tx_data_dev, &error_abort);
+ (Object **)&s->tx_data_dev,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &error_abort);
object_property_add_link(obj, "axistream-control-connected",
TYPE_STREAM_SLAVE,
- (Object **)&s->tx_control_dev, &error_abort);
+ (Object **)&s->tx_control_dev,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &error_abort);
object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev),
TYPE_XILINX_AXI_DMA_DATA_STREAM);
diff --git a/hw/i2c/smbus_eeprom.c b/hw/i2c/smbus_eeprom.c
index 86f35c11de..72c09cba6b 100644
--- a/hw/i2c/smbus_eeprom.c
+++ b/hw/i2c/smbus_eeprom.c
@@ -71,7 +71,7 @@ static void eeprom_write_data(SMBusDevice *dev, uint8_t cmd, uint8_t *buf, int l
printf("eeprom_write_byte: addr=0x%02x cmd=0x%02x val=0x%02x\n",
dev->i2c.address, cmd, buf[0]);
#endif
- /* An page write operation is not a valid SMBus command.
+ /* A page write operation is not a valid SMBus command.
It is a block write without a length byte. Fortunately we
get the full block anyway. */
/* TODO: Should this set the current location? */
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 7ecfd7004b..c98df88cd2 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -52,7 +52,7 @@
#include "qom/qom-qobject.h"
typedef struct AcpiCpuInfo {
- DECLARE_BITMAP(found_cpus, MAX_CPUMASK_BITS + 1);
+ DECLARE_BITMAP(found_cpus, ACPI_CPU_HOTPLUG_ID_LIMIT);
} AcpiCpuInfo;
typedef struct AcpiMcfgInfo {
@@ -117,7 +117,7 @@ int acpi_add_cpu_info(Object *o, void *opaque)
if (object_dynamic_cast(o, TYPE_CPU)) {
apic_id = object_property_get_int(o, "apic-id", NULL);
- assert(apic_id <= MAX_CPUMASK_BITS);
+ assert(apic_id < ACPI_CPU_HOTPLUG_ID_LIMIT);
set_bit(apic_id, cpu->found_cpus);
}
@@ -226,14 +226,14 @@ static void acpi_get_pci_info(PcPciInfo *info)
static void
build_header(GArray *linker, GArray *table_data,
- AcpiTableHeader *h, uint32_t sig, int len, uint8_t rev)
+ AcpiTableHeader *h, const char *sig, int len, uint8_t rev)
{
- h->signature = cpu_to_le32(sig);
+ memcpy(&h->signature, sig, 4);
h->length = cpu_to_le32(len);
h->revision = rev;
memcpy(h->oem_id, ACPI_BUILD_APPNAME6, 6);
memcpy(h->oem_table_id, ACPI_BUILD_APPNAME4, 4);
- memcpy(h->oem_table_id + 4, (void *)&sig, 4);
+ memcpy(h->oem_table_id + 4, sig, 4);
h->oem_revision = cpu_to_le32(1);
memcpy(h->asl_compiler_id, ACPI_BUILD_APPNAME4, 4);
h->asl_compiler_revision = cpu_to_le32(1);
@@ -391,7 +391,7 @@ static void build_append_int(GArray *table, uint32_t value)
build_append_byte(table, 0x01); /* OneOp */
} else if (value <= 0xFF) {
build_append_value(table, value, 1);
- } else if (value <= 0xFFFFF) {
+ } else if (value <= 0xFFFF) {
build_append_value(table, value, 2);
} else {
build_append_value(table, value, 4);
@@ -495,7 +495,7 @@ static void
build_facs(GArray *table_data, GArray *linker, PcGuestInfo *guest_info)
{
AcpiFacsDescriptorRev1 *facs = acpi_data_push(table_data, sizeof *facs);
- facs->signature = cpu_to_le32(ACPI_FACS_SIGNATURE);
+ memcpy(&facs->signature, "FACS", 4);
facs->length = cpu_to_le32(sizeof(*facs));
}
@@ -552,7 +552,7 @@ build_fadt(GArray *table_data, GArray *linker, AcpiPmInfo *pm,
fadt_setup(fadt, pm);
build_header(linker, table_data,
- (void *)fadt, ACPI_FACP_SIGNATURE, sizeof(*fadt), 1);
+ (void *)fadt, "FACP", sizeof(*fadt), 1);
}
static void
@@ -621,7 +621,7 @@ build_madt(GArray *table_data, GArray *linker, AcpiCpuInfo *cpu,
local_nmi->lint = 1; /* ACPI_LINT1 */
build_header(linker, table_data,
- (void *)(table_data->data + madt_start), ACPI_APIC_SIGNATURE,
+ (void *)(table_data->data + madt_start), "APIC",
table_data->len - madt_start, 1);
}
@@ -841,7 +841,7 @@ static void build_pci_bus_end(PCIBus *bus, void *bus_state)
pc = PCI_DEVICE_GET_CLASS(pdev);
dc = DEVICE_GET_CLASS(pdev);
- if (pc->class_id == PCI_CLASS_BRIDGE_ISA) {
+ if (pc->class_id == PCI_CLASS_BRIDGE_ISA || pc->is_bridge) {
set_bit(slot, slot_device_system);
}
@@ -882,7 +882,7 @@ static void build_pci_bus_end(PCIBus *bus, void *bus_state)
memcpy(pcihp, ACPI_PCIVGA_AML, ACPI_PCIVGA_SIZEOF);
patch_pcivga(i, pcihp);
} else if (system) {
- /* Nothing to do: system devices are in DSDT. */
+ /* Nothing to do: system devices are in DSDT or in SSDT above. */
} else if (present) {
void *pcihp = acpi_data_push(bus_table,
ACPI_PCINOHP_SIZEOF);
@@ -907,7 +907,7 @@ static void build_pci_bus_end(PCIBus *bus, void *bus_state)
build_append_byte(notify, 0x7B); /* AndOp */
build_append_byte(notify, 0x68); /* Arg0Op */
- build_append_int(notify, 0x1 << i);
+ build_append_int(notify, 0x1U << i);
build_append_byte(notify, 0x00); /* NullName */
build_append_byte(notify, 0x86); /* NotifyOp */
build_append_nameseg(notify, "S%.02X_", PCI_DEVFN(i, 0));
@@ -999,11 +999,16 @@ build_ssdt(GArray *table_data, GArray *linker,
AcpiCpuInfo *cpu, AcpiPmInfo *pm, AcpiMiscInfo *misc,
PcPciInfo *pci, PcGuestInfo *guest_info)
{
- int acpi_cpus = MIN(0xff, guest_info->apic_id_limit);
+ unsigned acpi_cpus = guest_info->apic_id_limit;
int ssdt_start = table_data->len;
uint8_t *ssdt_ptr;
int i;
+ /* The current AML generator can cover the APIC ID range [0..255],
+ * inclusive, for VCPU hotplug. */
+ QEMU_BUILD_BUG_ON(ACPI_CPU_HOTPLUG_ID_LIMIT > 256);
+ g_assert(acpi_cpus <= ACPI_CPU_HOTPLUG_ID_LIMIT);
+
/* Copy header and patch values in the S3_ / S4_ / S5_ packages */
ssdt_ptr = acpi_data_push(table_data, sizeof(ssdp_misc_aml));
memcpy(ssdt_ptr, ssdp_misc_aml, sizeof(ssdp_misc_aml));
@@ -1019,8 +1024,8 @@ build_ssdt(GArray *table_data, GArray *linker,
patch_pci_windows(pci, ssdt_ptr, sizeof(ssdp_misc_aml));
- *(uint16_t *)(ssdt_ptr + *ssdt_isa_pest) =
- cpu_to_le16(misc->pvpanic_port);
+ ACPI_BUILD_SET_LE(ssdt_ptr, sizeof(ssdp_misc_aml),
+ ssdt_isa_pest[0], 16, misc->pvpanic_port);
{
GArray *sb_scope = build_alloc_array();
@@ -1050,9 +1055,21 @@ build_ssdt(GArray *table_data, GArray *linker,
{
GArray *package = build_alloc_array();
- uint8_t op = 0x12; /* PackageOp */
+ uint8_t op;
+
+ /*
+ * Note: The ability to create variable-sized packages was first introduced in ACPI 2.0. ACPI 1.0 only
+ * allowed fixed-size packages with up to 255 elements.
+ * Windows guests up to win2k8 fail when VarPackageOp is used.
+ */
+ if (acpi_cpus <= 255) {
+ op = 0x12; /* PackageOp */
+ build_append_byte(package, acpi_cpus); /* NumElements */
+ } else {
+ op = 0x13; /* VarPackageOp */
+ build_append_int(package, acpi_cpus); /* VarNumElements */
+ }
- build_append_byte(package, acpi_cpus); /* NumElements */
for (i = 0; i < acpi_cpus; i++) {
uint8_t b = test_bit(i, cpu->found_cpus) ? 0x01 : 0x00;
build_append_byte(package, b);
@@ -1093,7 +1110,7 @@ build_ssdt(GArray *table_data, GArray *linker,
build_header(linker, table_data,
(void *)(table_data->data + ssdt_start),
- ACPI_SSDT_SIGNATURE, table_data->len - ssdt_start, 1);
+ "SSDT", table_data->len - ssdt_start, 1);
}
static void
@@ -1108,7 +1125,7 @@ build_hpet(GArray *table_data, GArray *linker)
hpet->timer_block_id = cpu_to_le32(0x8086a201);
hpet->addr.address = cpu_to_le64(HPET_BASE);
build_header(linker, table_data,
- (void *)hpet, ACPI_HPET_SIGNATURE, sizeof(*hpet), 1);
+ (void *)hpet, "HPET", sizeof(*hpet), 1);
}
static void
@@ -1200,7 +1217,7 @@ build_srat(GArray *table_data, GArray *linker,
build_header(linker, table_data,
(void *)(table_data->data + srat_start),
- ACPI_SRAT_SIGNATURE,
+ "SRAT",
table_data->len - srat_start, 1);
}
@@ -1208,7 +1225,7 @@ static void
build_mcfg_q35(GArray *table_data, GArray *linker, AcpiMcfgInfo *info)
{
AcpiTableMcfg *mcfg;
- uint32_t sig;
+ const char *sig;
int len = sizeof(*mcfg) + 1 * sizeof(mcfg->allocation[0]);
mcfg = acpi_data_push(table_data, len);
@@ -1225,9 +1242,10 @@ build_mcfg_q35(GArray *table_data, GArray *linker, AcpiMcfgInfo *info)
* ACPI spec requires OSPMs to ignore such tables.
*/
if (info->mcfg_base == PCIE_BASE_ADDR_UNMAPPED) {
- sig = ACPI_RSRV_SIGNATURE;
+ /* Reserved signature: ignored by OSPM */
+ sig = "QEMU";
} else {
- sig = ACPI_MCFG_SIGNATURE;
+ sig = "MCFG";
}
build_header(linker, table_data, (void *)mcfg, sig, len, 1);
}
@@ -1243,7 +1261,7 @@ build_dsdt(GArray *table_data, GArray *linker, AcpiMiscInfo *misc)
memcpy(dsdt, misc->dsdt_code, misc->dsdt_size);
memset(dsdt, 0, sizeof *dsdt);
- build_header(linker, table_data, dsdt, ACPI_DSDT_SIGNATURE,
+ build_header(linker, table_data, dsdt, "DSDT",
misc->dsdt_size, 1);
}
@@ -1268,7 +1286,7 @@ build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets)
sizeof(uint32_t));
}
build_header(linker, table_data,
- (void *)rsdt, ACPI_RSDT_SIGNATURE, rsdt_len, 1);
+ (void *)rsdt, "RSDT", rsdt_len, 1);
}
static GArray *
@@ -1279,7 +1297,7 @@ build_rsdp(GArray *rsdp_table, GArray *linker, unsigned rsdt)
bios_linker_loader_alloc(linker, ACPI_BUILD_RSDP_FILE, 1,
true /* fseg memory */);
- rsdp->signature = cpu_to_le64(ACPI_RSDP_SIGNATURE);
+ memcpy(&rsdp->signature, "RSD PTR ", 8);
memcpy(rsdp->oem_id, ACPI_BUILD_APPNAME6, 6);
rsdp->rsdt_physical_address = cpu_to_le32(rsdt);
/* Address to be filled by Guest linker */
diff --git a/hw/i386/acpi-defs.h b/hw/i386/acpi-defs.h
index 78ca20489f..e93babb026 100644
--- a/hw/i386/acpi-defs.h
+++ b/hw/i386/acpi-defs.h
@@ -52,8 +52,6 @@ struct Acpi20GenericAddress {
} QEMU_PACKED;
typedef struct Acpi20GenericAddress Acpi20GenericAddress;
-#define ACPI_RSDP_SIGNATURE 0x2052545020445352LL // "RSD PTR "
-
struct AcpiRsdpDescriptor { /* Root System Descriptor Pointer */
uint64_t signature; /* ACPI signature, contains "RSD PTR " */
uint8_t checksum; /* To make sum of struct == 0 */
@@ -92,7 +90,6 @@ typedef struct AcpiTableHeader AcpiTableHeader;
/*
* ACPI 1.0 Fixed ACPI Description Table (FADT)
*/
-#define ACPI_FACP_SIGNATURE 0x50434146 // FACP
struct AcpiFadtDescriptorRev1
{
ACPI_TABLE_HEADER_DEF /* ACPI common table header */
@@ -141,7 +138,6 @@ typedef struct AcpiFadtDescriptorRev1 AcpiFadtDescriptorRev1;
/*
* ACPI 1.0 Root System Description Table (RSDT)
*/
-#define ACPI_RSDT_SIGNATURE 0x54445352 // RSDT
struct AcpiRsdtDescriptorRev1
{
ACPI_TABLE_HEADER_DEF /* ACPI common table header */
@@ -153,7 +149,6 @@ typedef struct AcpiRsdtDescriptorRev1 AcpiRsdtDescriptorRev1;
/*
* ACPI 1.0 Firmware ACPI Control Structure (FACS)
*/
-#define ACPI_FACS_SIGNATURE 0x53434146 // FACS
struct AcpiFacsDescriptorRev1
{
uint32_t signature; /* ACPI Signature */
@@ -169,7 +164,6 @@ typedef struct AcpiFacsDescriptorRev1 AcpiFacsDescriptorRev1;
/*
* Differentiated System Description Table (DSDT)
*/
-#define ACPI_DSDT_SIGNATURE 0x54445344 // DSDT
/*
* MADT values and structures
@@ -182,7 +176,6 @@ typedef struct AcpiFacsDescriptorRev1 AcpiFacsDescriptorRev1;
/* Master MADT */
-#define ACPI_APIC_SIGNATURE 0x43495041 // APIC
struct AcpiMultipleApicTable
{
ACPI_TABLE_HEADER_DEF /* ACPI common table header */
@@ -253,7 +246,6 @@ typedef struct AcpiMadtLocalNmi AcpiMadtLocalNmi;
/*
* HPET Description Table
*/
-#define ACPI_HPET_SIGNATURE 0x54455048 // HPET
struct Acpi20Hpet {
ACPI_TABLE_HEADER_DEF /* ACPI common table header */
uint32_t timer_block_id;
@@ -268,7 +260,6 @@ typedef struct Acpi20Hpet Acpi20Hpet;
* SRAT (NUMA topology description) table
*/
-#define ACPI_SRAT_SIGNATURE 0x54415253 // SRAT
struct AcpiSystemResourceAffinityTable
{
ACPI_TABLE_HEADER_DEF
@@ -316,11 +307,6 @@ struct AcpiMcfgAllocation {
} QEMU_PACKED;
typedef struct AcpiMcfgAllocation AcpiMcfgAllocation;
-#define ACPI_MCFG_SIGNATURE 0x4746434d // MCFG
-
-/* Reserved signature: ignored by OSPM */
-#define ACPI_RSRV_SIGNATURE 0x554d4551 // QEMU
-
struct AcpiTableMcfg {
ACPI_TABLE_HEADER_DEF;
uint8_t reserved[8];
diff --git a/hw/i386/acpi-dsdt-cpu-hotplug.dsl b/hw/i386/acpi-dsdt-cpu-hotplug.dsl
index dee4843cde..34aab5af43 100644
--- a/hw/i386/acpi-dsdt-cpu-hotplug.dsl
+++ b/hw/i386/acpi-dsdt-cpu-hotplug.dsl
@@ -93,7 +93,7 @@ Scope(\_SB) {
}
Device(CPU_HOTPLUG_RESOURCE_DEVICE) {
- Name(_HID, "ACPI0004")
+ Name(_HID, EisaId("PNP0A06"))
Name(_CRS, ResourceTemplate() {
IO(Decode16, CPU_STATUS_BASE, CPU_STATUS_BASE, 0, CPU_STATUS_LEN)
diff --git a/hw/i386/acpi-dsdt.hex.generated b/hw/i386/acpi-dsdt.hex.generated
index 94c6e8e114..e61572a5dd 100644
--- a/hw/i386/acpi-dsdt.hex.generated
+++ b/hw/i386/acpi-dsdt.hex.generated
@@ -3,12 +3,12 @@ static unsigned char AcpiDsdtAmlCode[] = {
0x53,
0x44,
0x54,
-0x85,
+0x80,
0x11,
0x0,
0x0,
0x1,
-0x8b,
+0x60,
0x42,
0x58,
0x50,
@@ -31,8 +31,8 @@ static unsigned char AcpiDsdtAmlCode[] = {
0x4e,
0x54,
0x4c,
-0x23,
-0x8,
+0x15,
+0x11,
0x13,
0x20,
0x10,
@@ -4010,7 +4010,7 @@ static unsigned char AcpiDsdtAmlCode[] = {
0x53,
0x1,
0x10,
-0x47,
+0x42,
0x11,
0x5f,
0x53,
@@ -4243,7 +4243,7 @@ static unsigned char AcpiDsdtAmlCode[] = {
0x60,
0x5b,
0x82,
-0x2e,
+0x29,
0x50,
0x52,
0x45,
@@ -4253,16 +4253,11 @@ static unsigned char AcpiDsdtAmlCode[] = {
0x48,
0x49,
0x44,
-0xd,
+0xc,
0x41,
-0x43,
-0x50,
-0x49,
-0x30,
-0x30,
-0x30,
-0x34,
-0x0,
+0xd0,
+0xa,
+0x6,
0x8,
0x5f,
0x43,
diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c
index 72025d0359..a967b48965 100644
--- a/hw/i386/kvmvapic.c
+++ b/hw/i386/kvmvapic.c
@@ -124,14 +124,14 @@ static const TPRInstruction tpr_instr[] = {
static void read_guest_rom_state(VAPICROMState *s)
{
- cpu_physical_memory_rw(s->rom_state_paddr, (void *)&s->rom_state,
- sizeof(GuestROMState), 0);
+ cpu_physical_memory_read(s->rom_state_paddr, &s->rom_state,
+ sizeof(GuestROMState));
}
static void write_guest_rom_state(VAPICROMState *s)
{
- cpu_physical_memory_rw(s->rom_state_paddr, (void *)&s->rom_state,
- sizeof(GuestROMState), 1);
+ cpu_physical_memory_write(s->rom_state_paddr, &s->rom_state,
+ sizeof(GuestROMState));
}
static void update_guest_rom_state(VAPICROMState *s)
@@ -311,16 +311,14 @@ static int update_rom_mapping(VAPICROMState *s, CPUX86State *env, target_ulong i
for (pos = le32_to_cpu(s->rom_state.fixup_start);
pos < le32_to_cpu(s->rom_state.fixup_end);
pos += 4) {
- cpu_physical_memory_rw(paddr + pos - s->rom_state.vaddr,
- (void *)&offset, sizeof(offset), 0);
+ cpu_physical_memory_read(paddr + pos - s->rom_state.vaddr,
+ &offset, sizeof(offset));
offset = le32_to_cpu(offset);
- cpu_physical_memory_rw(paddr + offset, (void *)&patch,
- sizeof(patch), 0);
+ cpu_physical_memory_read(paddr + offset, &patch, sizeof(patch));
patch = le32_to_cpu(patch);
patch += rom_state_vaddr - le32_to_cpu(s->rom_state.vaddr);
patch = cpu_to_le32(patch);
- cpu_physical_memory_rw(paddr + offset, (void *)&patch,
- sizeof(patch), 1);
+ cpu_physical_memory_write(paddr + offset, &patch, sizeof(patch));
}
read_guest_rom_state(s);
s->vapic_paddr = paddr + le32_to_cpu(s->rom_state.vapic_vaddr) -
@@ -364,8 +362,8 @@ static int vapic_enable(VAPICROMState *s, X86CPU *cpu)
}
vapic_paddr = s->vapic_paddr +
(((hwaddr)cpu_number) << VAPIC_CPU_SHIFT);
- cpu_physical_memory_rw(vapic_paddr + offsetof(VAPICState, enabled),
- (void *)&enabled, sizeof(enabled), 1);
+ cpu_physical_memory_write(vapic_paddr + offsetof(VAPICState, enabled),
+ &enabled, sizeof(enabled));
apic_enable_vapic(cpu->apic_state, vapic_paddr);
s->state = VAPIC_ACTIVE;
@@ -406,7 +404,7 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
}
if (!kvm_enabled()) {
- cpu_restore_state(env, env->mem_io_pc);
+ cpu_restore_state(cs, cs->mem_io_pc);
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
&current_flags);
}
@@ -448,8 +446,8 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
if (!kvm_enabled()) {
cs->current_tb = NULL;
- tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
- cpu_resume_from_signal(env, NULL);
+ tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1);
+ cpu_resume_from_signal(cs, NULL);
}
}
@@ -535,7 +533,7 @@ static int patch_hypercalls(VAPICROMState *s)
uint8_t *rom;
rom = g_malloc(s->rom_size);
- cpu_physical_memory_rw(rom_paddr, rom, s->rom_size, 0);
+ cpu_physical_memory_read(rom_paddr, rom, s->rom_size);
for (pos = 0; pos < s->rom_size - sizeof(vmcall_pattern); pos++) {
if (kvm_irqchip_in_kernel()) {
@@ -551,8 +549,7 @@ static int patch_hypercalls(VAPICROMState *s)
}
if (memcmp(rom + pos, pattern, 7) == 0 &&
(rom[pos + 7] == alternates[0] || rom[pos + 7] == alternates[1])) {
- cpu_physical_memory_rw(rom_paddr + pos + 5, (uint8_t *)patch,
- 3, 1);
+ cpu_physical_memory_write(rom_paddr + pos + 5, patch, 3);
/*
* Don't flush the tb here. Under ordinary conditions, the patched
* calls are miles away from the current IP. Under malicious
@@ -760,8 +757,8 @@ static int vapic_post_load(void *opaque, int version_id)
run_on_cpu(first_cpu, do_vapic_enable, s);
} else {
zero = g_malloc0(s->rom_state.vapic_size);
- cpu_physical_memory_rw(s->vapic_paddr, zero,
- s->rom_state.vapic_size, 1);
+ cpu_physical_memory_write(s->vapic_paddr, zero,
+ s->rom_state.vapic_size);
g_free(zero);
}
}
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index e715a3312d..14f0d91f76 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -53,6 +53,7 @@
#include "qemu/bitmap.h"
#include "qemu/config-file.h"
#include "hw/acpi/acpi.h"
+#include "hw/acpi/cpu_hotplug.h"
#include "hw/cpu/icc_bus.h"
#include "hw/boards.h"
#include "hw/pci/pci_host.h"
@@ -974,6 +975,13 @@ void pc_hot_add_cpu(const int64_t id, Error **errp)
return;
}
+ if (apic_id >= ACPI_CPU_HOTPLUG_ID_LIMIT) {
+ error_setg(errp, "Unable to add CPU: %" PRIi64
+ ", resulting APIC ID (%" PRIi64 ") is too large",
+ id, apic_id);
+ return;
+ }
+
icc_bridge = DEVICE(object_resolve_path_type("icc-bridge",
TYPE_ICC_BRIDGE, NULL));
pc_new_cpu(current_cpu_model, apic_id, icc_bridge, errp);
@@ -984,6 +992,7 @@ void pc_cpus_init(const char *cpu_model, DeviceState *icc_bridge)
int i;
X86CPU *cpu = NULL;
Error *error = NULL;
+ unsigned long apic_id_limit;
/* init CPUs */
if (cpu_model == NULL) {
@@ -995,6 +1004,13 @@ void pc_cpus_init(const char *cpu_model, DeviceState *icc_bridge)
}
current_cpu_model = cpu_model;
+ apic_id_limit = pc_apic_id_limit(max_cpus);
+ if (apic_id_limit > ACPI_CPU_HOTPLUG_ID_LIMIT) {
+ error_report("max_cpus is too large. APIC ID of last CPU is %lu",
+ apic_id_limit - 1);
+ exit(1);
+ }
+
for (i = 0; i < smp_cpus; i++) {
cpu = pc_new_cpu(cpu_model, x86_cpu_apic_id_from_index(i),
icc_bridge, &error);
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 5e1d2d3de3..7930a26c1e 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -267,6 +267,7 @@ static void pc_compat_1_7(QEMUMachineInitArgs *args)
smbios_type1_defaults = false;
gigabyte_align = false;
option_rom_has_mr = true;
+ x86_cpu_compat_disable_kvm_features(FEAT_1_ECX, CPUID_EXT_X2APIC);
}
static void pc_compat_1_6(QEMUMachineInitArgs *args)
@@ -299,7 +300,7 @@ static void pc_compat_1_3(QEMUMachineInitArgs *args)
static void pc_compat_1_2(QEMUMachineInitArgs *args)
{
pc_compat_1_3(args);
- disable_kvm_pv_eoi();
+ x86_cpu_compat_disable_kvm_features(FEAT_KVM, KVM_FEATURE_PV_EOI);
}
static void pc_init_pci_1_7(QEMUMachineInitArgs *args)
@@ -345,7 +346,7 @@ static void pc_init_pci_no_kvmclock(QEMUMachineInitArgs *args)
has_pci_info = false;
has_acpi_build = false;
smbios_type1_defaults = false;
- disable_kvm_pv_eoi();
+ x86_cpu_compat_disable_kvm_features(FEAT_KVM, KVM_FEATURE_PV_EOI);
enable_compat_apic_id_mode();
pc_init1(args, 1, 0);
}
@@ -358,7 +359,7 @@ static void pc_init_isa(QEMUMachineInitArgs *args)
if (!args->cpu_model) {
args->cpu_model = "486";
}
- disable_kvm_pv_eoi();
+ x86_cpu_compat_disable_kvm_features(FEAT_KVM, KVM_FEATURE_PV_EOI);
enable_compat_apic_id_mode();
pc_init1(args, 0, 1);
}
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index 4b0456a95b..c844dc2a9f 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -245,6 +245,7 @@ static void pc_compat_1_7(QEMUMachineInitArgs *args)
smbios_type1_defaults = false;
gigabyte_align = false;
option_rom_has_mr = true;
+ x86_cpu_compat_disable_kvm_features(FEAT_1_ECX, CPUID_EXT_X2APIC);
}
static void pc_compat_1_6(QEMUMachineInitArgs *args)
diff --git a/hw/i386/q35-acpi-dsdt.hex.generated b/hw/i386/q35-acpi-dsdt.hex.generated
index 6c29f3b6d2..6b788c9be0 100644
--- a/hw/i386/q35-acpi-dsdt.hex.generated
+++ b/hw/i386/q35-acpi-dsdt.hex.generated
@@ -3,12 +3,12 @@ static unsigned char Q35AcpiDsdtAmlCode[] = {
0x53,
0x44,
0x54,
-0xd7,
+0xd2,
0x1c,
0x0,
0x0,
0x1,
-0x3e,
+0x13,
0x42,
0x58,
0x50,
@@ -31,8 +31,8 @@ static unsigned char Q35AcpiDsdtAmlCode[] = {
0x4e,
0x54,
0x4c,
-0x23,
-0x8,
+0x15,
+0x11,
0x13,
0x20,
0x10,
@@ -6959,7 +6959,7 @@ static unsigned char Q35AcpiDsdtAmlCode[] = {
0x53,
0x1,
0x10,
-0x47,
+0x42,
0x11,
0x5f,
0x53,
@@ -7192,7 +7192,7 @@ static unsigned char Q35AcpiDsdtAmlCode[] = {
0x60,
0x5b,
0x82,
-0x2e,
+0x29,
0x50,
0x52,
0x45,
@@ -7202,16 +7202,11 @@ static unsigned char Q35AcpiDsdtAmlCode[] = {
0x48,
0x49,
0x44,
-0xd,
+0xc,
0x41,
-0x43,
-0x50,
-0x49,
-0x30,
-0x30,
-0x30,
-0x34,
-0x0,
+0xd0,
+0xa,
+0x6,
0x8,
0x5f,
0x43,
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index fbea9e8886..50327ffdf1 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -118,11 +118,12 @@ static uint32_t ahci_port_read(AHCIState *s, int port, int offset)
static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev)
{
AHCIPCIState *d = container_of(s, AHCIPCIState, ahci);
- PCIDevice *pci_dev = PCI_DEVICE(d);
+ PCIDevice *pci_dev =
+ (PCIDevice *)object_dynamic_cast(OBJECT(d), TYPE_PCI_DEVICE);
DPRINTF(0, "raise irq\n");
- if (msi_enabled(pci_dev)) {
+ if (pci_dev && msi_enabled(pci_dev)) {
msi_notify(pci_dev, 0);
} else {
qemu_irq_raise(s->irq);
@@ -132,10 +133,12 @@ static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev)
static void ahci_irq_lower(AHCIState *s, AHCIDevice *dev)
{
AHCIPCIState *d = container_of(s, AHCIPCIState, ahci);
+ PCIDevice *pci_dev =
+ (PCIDevice *)object_dynamic_cast(OBJECT(d), TYPE_PCI_DEVICE);
DPRINTF(0, "lower irq\n");
- if (!msi_enabled(PCI_DEVICE(d))) {
+ if (!pci_dev || !msi_enabled(pci_dev)) {
qemu_irq_lower(s->irq);
}
}
@@ -435,9 +438,9 @@ static void check_cmd(AHCIState *s, int port)
if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) {
for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) {
- if ((pr->cmd_issue & (1 << slot)) &&
+ if ((pr->cmd_issue & (1U << slot)) &&
!handle_cmd(s, port, slot)) {
- pr->cmd_issue &= ~(1 << slot);
+ pr->cmd_issue &= ~(1U << slot);
}
}
}
@@ -1311,7 +1314,7 @@ static const VMStateDescription vmstate_sysbus_ahci = {
.name = "sysbus-ahci",
.unmigratable = 1, /* Still buggy under I/O load */
.fields = (VMStateField []) {
- VMSTATE_AHCI(ahci, AHCIPCIState),
+ VMSTATE_AHCI(ahci, SysbusAHCIState),
VMSTATE_END_OF_LIST()
},
};
@@ -1328,7 +1331,7 @@ static void sysbus_ahci_realize(DeviceState *dev, Error **errp)
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
SysbusAHCIState *s = SYSBUS_AHCI(dev);
- ahci_init(&s->ahci, dev, NULL, s->num_ports);
+ ahci_init(&s->ahci, dev, &address_space_memory, s->num_ports);
sysbus_init_mmio(sbd, &s->ahci.mem);
sysbus_init_irq(sbd, &s->ahci.irq);
diff --git a/hw/ide/core.c b/hw/ide/core.c
index e1dfe54df6..c943a4d764 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -1602,7 +1602,7 @@ static bool cmd_smart(IDEState *s, uint8_t cmd)
case 2: /* extended self test */
s->smart_selftest_count++;
if (s->smart_selftest_count > 21) {
- s->smart_selftest_count = 0;
+ s->smart_selftest_count = 1;
}
n = 2 + (s->smart_selftest_count - 1) * 24;
s->smart_selftest_data[n] = s->sector;
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index 91151fc85e..d8b1157c2e 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -421,7 +421,7 @@ static const VMStateDescription vmstate_bmdma_current = {
}
};
-const VMStateDescription vmstate_bmdma_status = {
+static const VMStateDescription vmstate_bmdma_status = {
.name ="ide bmdma/status",
.version_id = 1,
.minimum_version_id = 1,
diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c
index 407d563514..0924d9855c 100644
--- a/hw/intc/allwinner-a10-pic.c
+++ b/hw/intc/allwinner-a10-pic.c
@@ -23,11 +23,20 @@
static void aw_a10_pic_update(AwA10PICState *s)
{
uint8_t i;
- int irq = 0, fiq = 0;
+ int irq = 0, fiq = 0, pending;
+
+ s->vector = 0;
for (i = 0; i < AW_A10_PIC_REG_NUM; i++) {
irq |= s->irq_pending[i] & ~s->mask[i];
fiq |= s->select[i] & s->irq_pending[i] & ~s->mask[i];
+
+ if (!s->vector) {
+ pending = ffs(s->irq_pending[i] & ~s->mask[i]);
+ if (pending) {
+ s->vector = (i * 32 + pending - 1) * 4;
+ }
+ }
}
qemu_set_irq(s->parent_irq, !!irq);
@@ -40,6 +49,8 @@ static void aw_a10_pic_set_irq(void *opaque, int irq, int level)
if (level) {
set_bit(irq % 32, (void *)&s->irq_pending[irq / 32]);
+ } else {
+ clear_bit(irq % 32, (void *)&s->irq_pending[irq / 32]);
}
aw_a10_pic_update(s);
}
@@ -84,9 +95,6 @@ static void aw_a10_pic_write(void *opaque, hwaddr offset, uint64_t value,
uint8_t index = (offset & 0xc) / 4;
switch (offset) {
- case AW_A10_PIC_VECTOR:
- s->vector = value & ~0x3;
- break;
case AW_A10_PIC_BASE_ADDR:
s->base_addr = value & ~0x3;
case AW_A10_PIC_PROTECT:
@@ -96,7 +104,11 @@ static void aw_a10_pic_write(void *opaque, hwaddr offset, uint64_t value,
s->nmi = value;
break;
case AW_A10_PIC_IRQ_PENDING ... AW_A10_PIC_IRQ_PENDING + 8:
- s->irq_pending[index] &= ~value;
+ /*
+ * The register is read-only; nevertheless, Linux (including
+ * the version originally shipped by Allwinner) pretends to
+ * write to the register. Just ignore it.
+ */
break;
case AW_A10_PIC_FIQ_PENDING ... AW_A10_PIC_FIQ_PENDING + 8:
s->fiq_pending[index] &= ~value;
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index 361ae90b65..2f40cbad2d 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -98,8 +98,8 @@ static void apic_sync_vapic(APICCommonState *s, int sync_type)
return;
}
if (sync_type & SYNC_FROM_VAPIC) {
- cpu_physical_memory_rw(s->vapic_paddr, (void *)&vapic_state,
- sizeof(vapic_state), 0);
+ cpu_physical_memory_read(s->vapic_paddr, &vapic_state,
+ sizeof(vapic_state));
s->tpr = vapic_state.tpr;
}
if (sync_type & (SYNC_TO_VAPIC | SYNC_ISR_IRR_TO_VAPIC)) {
@@ -201,12 +201,12 @@ static void apic_external_nmi(APICCommonState *s)
#define foreach_apic(apic, deliver_bitmask, code) \
{\
- int __i, __j, __mask;\
+ int __i, __j;\
for(__i = 0; __i < MAX_APIC_WORDS; __i++) {\
- __mask = deliver_bitmask[__i];\
+ uint32_t __mask = deliver_bitmask[__i];\
if (__mask) {\
for(__j = 0; __j < 32; __j++) {\
- if (__mask & (1 << __j)) {\
+ if (__mask & (1U << __j)) {\
apic = local_apics[__i * 32 + __j];\
if (apic) {\
code;\
diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c
index c623fcc6d8..7ecce2dcce 100644
--- a/hw/intc/apic_common.c
+++ b/hw/intc/apic_common.c
@@ -117,7 +117,12 @@ void apic_report_irq_delivered(int delivered)
void apic_reset_irq_delivered(void)
{
- trace_apic_reset_irq_delivered(apic_irq_delivered);
+ /* Copy this into a local variable to encourage gcc to emit a plain
+ * register for a sys/sdt.h marker. For details on this workaround, see:
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=13296
+ */
+ volatile int a_i_d = apic_irq_delivered;
+ trace_apic_reset_irq_delivered(a_i_d);
apic_irq_delivered = 0;
}
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
index 100b6bf3de..719d2277ec 100644
--- a/hw/intc/arm_gic_kvm.c
+++ b/hw/intc/arm_gic_kvm.c
@@ -148,7 +148,7 @@ typedef void (*vgic_translate_fn)(GICState *s, int irq, int cpu,
uint32_t *field, bool to_kernel);
/* synthetic translate function used for clear/set registers to completely
- * clear a setting using a clear-register before setting the remaing bits
+ * clear a setting using a clear-register before setting the remaining bits
* using a set-register */
static void translate_clear(GICState *s, int irq, int cpu,
uint32_t *field, bool to_kernel)
diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c
index 7df72f44f0..be76fbd78f 100644
--- a/hw/intc/openpic.c
+++ b/hw/intc/openpic.c
@@ -123,7 +123,7 @@ static FslMpicInfo fsl_mpic_42 = {
#define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
#define IDR_EP_SHIFT 31
-#define IDR_EP_MASK (1 << IDR_EP_SHIFT)
+#define IDR_EP_MASK (1U << IDR_EP_SHIFT)
#define IDR_CI0_SHIFT 30
#define IDR_CI1_SHIFT 29
#define IDR_P1_SHIFT 1
@@ -220,17 +220,17 @@ typedef struct IRQSource {
} IRQSource;
#define IVPR_MASK_SHIFT 31
-#define IVPR_MASK_MASK (1 << IVPR_MASK_SHIFT)
+#define IVPR_MASK_MASK (1U << IVPR_MASK_SHIFT)
#define IVPR_ACTIVITY_SHIFT 30
-#define IVPR_ACTIVITY_MASK (1 << IVPR_ACTIVITY_SHIFT)
+#define IVPR_ACTIVITY_MASK (1U << IVPR_ACTIVITY_SHIFT)
#define IVPR_MODE_SHIFT 29
-#define IVPR_MODE_MASK (1 << IVPR_MODE_SHIFT)
+#define IVPR_MODE_MASK (1U << IVPR_MODE_SHIFT)
#define IVPR_POLARITY_SHIFT 23
-#define IVPR_POLARITY_MASK (1 << IVPR_POLARITY_SHIFT)
+#define IVPR_POLARITY_MASK (1U << IVPR_POLARITY_SHIFT)
#define IVPR_SENSE_SHIFT 22
-#define IVPR_SENSE_MASK (1 << IVPR_SENSE_SHIFT)
+#define IVPR_SENSE_MASK (1U << IVPR_SENSE_SHIFT)
-#define IVPR_PRIORITY_MASK (0xF << 16)
+#define IVPR_PRIORITY_MASK (0xFU << 16)
#define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16))
#define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
diff --git a/hw/intc/openpic_kvm.c b/hw/intc/openpic_kvm.c
index 87fdb126cf..6635407b8b 100644
--- a/hw/intc/openpic_kvm.c
+++ b/hw/intc/openpic_kvm.c
@@ -118,6 +118,11 @@ static void kvm_openpic_region_add(MemoryListener *listener,
abort();
}
+ /* Ignore events on regions that are not us */
+ if (section->mr != &opp->mem) {
+ return;
+ }
+
reg_base = section->offset_within_address_space;
attr.group = KVM_DEV_MPIC_GRP_MISC;
@@ -140,6 +145,11 @@ static void kvm_openpic_region_del(MemoryListener *listener,
uint64_t reg_base = 0;
int ret;
+ /* Ignore events on regions that are not us */
+ if (section->mr != &opp->mem) {
+ return;
+ }
+
attr.group = KVM_DEV_MPIC_GRP_MISC;
attr.attr = KVM_DEV_MPIC_BASE_ADDR;
attr.addr = (uint64_t)(unsigned long)&reg_base;
@@ -200,7 +210,7 @@ static void kvm_openpic_realize(DeviceState *dev, Error **errp)
qdev_init_gpio_in(dev, kvm_openpic_set_irq, OPENPIC_MAX_IRQ);
opp->mem_listener.region_add = kvm_openpic_region_add;
- opp->mem_listener.region_add = kvm_openpic_region_del;
+ opp->mem_listener.region_del = kvm_openpic_region_del;
memory_listener_register(&opp->mem_listener, &address_space_memory);
/* indicate pic capabilities */
diff --git a/hw/intc/slavio_intctl.c b/hw/intc/slavio_intctl.c
index 41a1672800..b10fb66b8d 100644
--- a/hw/intc/slavio_intctl.c
+++ b/hw/intc/slavio_intctl.c
@@ -272,7 +272,7 @@ static void slavio_check_interrupts(SLAVIO_INTCTLState *s, int set_irqs)
CPU_IRQ_TIMER_IN;
if (i == s->target_cpu) {
for (j = 0; j < 32; j++) {
- if ((s->intregm_pending & (1 << j)) && intbit_to_level[j]) {
+ if ((s->intregm_pending & (1U << j)) && intbit_to_level[j]) {
s->slaves[i].intreg_pending |= 1 << intbit_to_level[j];
}
}
diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c
index a5bbc2406d..c93dae053d 100644
--- a/hw/intc/xics_kvm.c
+++ b/hw/intc/xics_kvm.c
@@ -269,7 +269,16 @@ static void ics_kvm_set_irq(void *opaque, int srcno, int val)
static void ics_kvm_reset(DeviceState *dev)
{
- ics_set_kvm_state(ICS(dev), 1);
+ ICSState *ics = ICS(dev);
+ int i;
+
+ memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs);
+ for (i = 0; i < ics->nr_irqs; i++) {
+ ics->irqs[i].priority = 0xff;
+ ics->irqs[i].saved_priority = 0xff;
+ }
+
+ ics_set_kvm_state(ics, 1);
}
static void ics_kvm_realize(DeviceState *dev, Error **errp)
diff --git a/hw/intc/xilinx_intc.c b/hw/intc/xilinx_intc.c
index 4a103988f3..1b228ff4e0 100644
--- a/hw/intc/xilinx_intc.c
+++ b/hw/intc/xilinx_intc.c
@@ -71,8 +71,9 @@ static void update_irq(struct xlx_pic *p)
/* Update the vector register. */
for (i = 0; i < 32; i++) {
- if (p->regs[R_IPR] & (1 << i))
+ if (p->regs[R_IPR] & (1U << i)) {
break;
+ }
}
if (i == 32)
i = ~0;
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index 8d144baa1e..768e5288bc 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -684,8 +684,8 @@ static int pci_ivshmem_init(PCIDevice *dev)
}
if (s->role_val == IVSHMEM_PEER) {
- error_set(&s->migration_blocker, QERR_DEVICE_FEATURE_BLOCKS_MIGRATION,
- "peer mode", "ivshmem");
+ error_setg(&s->migration_blocker,
+ "Migration is disabled when using feature 'peer mode' in device 'ivshmem'");
migrate_add_blocker(s->migration_blocker);
}
diff --git a/hw/misc/max111x.c b/hw/misc/max111x.c
index d477ecdb29..bba87c2ec5 100644
--- a/hw/misc/max111x.c
+++ b/hw/misc/max111x.c
@@ -13,7 +13,8 @@
#include "hw/ssi.h"
typedef struct {
- SSISlave ssidev;
+ SSISlave parent_obj;
+
qemu_irq interrupt;
uint8_t tb1, rb2, rb3;
int cycle;
@@ -22,6 +23,14 @@ typedef struct {
int inputs, com;
} MAX111xState;
+#define TYPE_MAX_111X "max111x"
+
+#define MAX_111X(obj) \
+ OBJECT_CHECK(MAX111xState, (obj), TYPE_MAX_111X)
+
+#define TYPE_MAX_1110 "max1110"
+#define TYPE_MAX_1111 "max1111"
+
/* Control-byte bitfields */
#define CB_PD0 (1 << 0)
#define CB_PD1 (1 << 1)
@@ -92,7 +101,7 @@ static void max111x_write(MAX111xState *s, uint32_t value)
static uint32_t max111x_transfer(SSISlave *dev, uint32_t value)
{
- MAX111xState *s = FROM_SSI_SLAVE(MAX111xState, dev);
+ MAX111xState *s = MAX_111X(dev);
max111x_write(s, value);
return max111x_read(s);
}
@@ -103,7 +112,7 @@ static const VMStateDescription vmstate_max111x = {
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField[]) {
- VMSTATE_SSI_SLAVE(ssidev, MAX111xState),
+ VMSTATE_SSI_SLAVE(parent_obj, MAX111xState),
VMSTATE_UINT8(tb1, MAX111xState),
VMSTATE_UINT8(rb2, MAX111xState),
VMSTATE_UINT8(rb3, MAX111xState),
@@ -115,11 +124,12 @@ static const VMStateDescription vmstate_max111x = {
}
};
-static int max111x_init(SSISlave *dev, int inputs)
+static int max111x_init(SSISlave *d, int inputs)
{
- MAX111xState *s = FROM_SSI_SLAVE(MAX111xState, dev);
+ DeviceState *dev = DEVICE(d);
+ MAX111xState *s = MAX_111X(dev);
- qdev_init_gpio_out(&dev->qdev, &s->interrupt, 1);
+ qdev_init_gpio_out(dev, &s->interrupt, 1);
s->inputs = inputs;
/* TODO: add a user interface for setting these */
@@ -133,7 +143,7 @@ static int max111x_init(SSISlave *dev, int inputs)
s->input[7] = 0x80;
s->com = 0;
- vmstate_register(&dev->qdev, -1, &vmstate_max111x, s);
+ vmstate_register(dev, -1, &vmstate_max111x, s);
return 0;
}
@@ -149,23 +159,36 @@ static int max1111_init(SSISlave *dev)
void max111x_set_input(DeviceState *dev, int line, uint8_t value)
{
- MAX111xState *s = FROM_SSI_SLAVE(MAX111xState, SSI_SLAVE_FROM_QDEV(dev));
+ MAX111xState *s = MAX_111X(dev);
assert(line >= 0 && line < s->inputs);
s->input[line] = value;
}
-static void max1110_class_init(ObjectClass *klass, void *data)
+static void max111x_class_init(ObjectClass *klass, void *data)
{
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
- k->init = max1110_init;
k->transfer = max111x_transfer;
}
-static const TypeInfo max1110_info = {
- .name = "max1110",
+static const TypeInfo max111x_info = {
+ .name = TYPE_MAX_111X,
.parent = TYPE_SSI_SLAVE,
.instance_size = sizeof(MAX111xState),
+ .class_init = max111x_class_init,
+ .abstract = true,
+};
+
+static void max1110_class_init(ObjectClass *klass, void *data)
+{
+ SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
+
+ k->init = max1110_init;
+}
+
+static const TypeInfo max1110_info = {
+ .name = TYPE_MAX_1110,
+ .parent = TYPE_MAX_111X,
.class_init = max1110_class_init,
};
@@ -174,18 +197,17 @@ static void max1111_class_init(ObjectClass *klass, void *data)
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
k->init = max1111_init;
- k->transfer = max111x_transfer;
}
static const TypeInfo max1111_info = {
- .name = "max1111",
- .parent = TYPE_SSI_SLAVE,
- .instance_size = sizeof(MAX111xState),
+ .name = TYPE_MAX_1111,
+ .parent = TYPE_MAX_111X,
.class_init = max1111_class_init,
};
static void max111x_register_types(void)
{
+ type_register_static(&max111x_info);
type_register_static(&max1110_info);
type_register_static(&max1111_info);
}
diff --git a/hw/misc/tmp105.c b/hw/misc/tmp105.c
index 155e03df80..63aa3d6277 100644
--- a/hw/misc/tmp105.c
+++ b/hw/misc/tmp105.c
@@ -56,12 +56,14 @@ static void tmp105_get_temperature(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
TMP105State *s = TMP105(obj);
- int64_t value = s->temperature;
+ int64_t value = s->temperature * 1000 / 256;
visit_type_int(v, &value, name, errp);
}
-/* Units are 0.001 centigrades relative to 0 C. */
+/* Units are 0.001 centigrades relative to 0 C. s->temperature is 8.8
+ * fixed point, so units are 1/256 centigrades. A simple ratio will do.
+ */
static void tmp105_set_temperature(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
@@ -78,7 +80,7 @@ static void tmp105_set_temperature(Object *obj, Visitor *v, void *opaque,
return;
}
- s->temperature = ((int16_t) (temp * 0x800 / 128000)) << 4;
+ s->temperature = (int16_t) (temp * 256 / 1000);
tmp105_alarm_update(s);
}
diff --git a/hw/misc/vfio.c b/hw/misc/vfio.c
index c2c688c870..9cf5b84045 100644
--- a/hw/misc/vfio.c
+++ b/hw/misc/vfio.c
@@ -1043,7 +1043,7 @@ static void vfio_bar_write(void *opaque, hwaddr addr,
buf.dword = cpu_to_le32(data);
break;
default:
- hw_error("vfio: unsupported write size, %d bytes\n", size);
+ hw_error("vfio: unsupported write size, %d bytes", size);
break;
}
@@ -1103,7 +1103,7 @@ static uint64_t vfio_bar_read(void *opaque,
data = le32_to_cpu(buf.dword);
break;
default:
- hw_error("vfio: unsupported read size, %d bytes\n", size);
+ hw_error("vfio: unsupported read size, %d bytes", size);
break;
}
@@ -1157,7 +1157,7 @@ static void vfio_pci_load_rom(VFIODevice *vdev)
if (!vdev->rom_size) {
vdev->rom_read_failed = true;
error_report("vfio-pci: Cannot read device rom at "
- "%04x:%02x:%02x.%x\n",
+ "%04x:%02x:%02x.%x",
vdev->host.domain, vdev->host.bus, vdev->host.slot,
vdev->host.function);
error_printf("Device option ROM contents are probably invalid "
@@ -1192,11 +1192,8 @@ static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
uint64_t val = ((uint64_t)1 << (size * 8)) - 1;
/* Load the ROM lazily when the guest tries to read it */
- if (unlikely(!vdev->rom)) {
+ if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
vfio_pci_load_rom(vdev);
- if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
- vfio_pci_load_rom(vdev);
- }
}
memcpy(&val, vdev->rom + addr,
@@ -1341,7 +1338,7 @@ static void vfio_vga_write(void *opaque, hwaddr addr,
buf.dword = cpu_to_le32(data);
break;
default:
- hw_error("vfio: unsupported write size, %d bytes\n", size);
+ hw_error("vfio: unsupported write size, %d bytes", size);
break;
}
@@ -1384,7 +1381,7 @@ static uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
data = le32_to_cpu(buf.dword);
break;
default:
- hw_error("vfio: unsupported read size, %d bytes\n", size);
+ hw_error("vfio: unsupported read size, %d bytes", size);
break;
}
@@ -1429,7 +1426,7 @@ static uint64_t vfio_generic_window_quirk_read(void *opaque,
if (!vfio_range_contained(addr, size, quirk->data.data_offset,
quirk->data.data_size)) {
- hw_error("%s: window data read not fully contained: %s\n",
+ hw_error("%s: window data read not fully contained: %s",
__func__, memory_region_name(&quirk->mem));
}
@@ -1458,7 +1455,7 @@ static void vfio_generic_window_quirk_write(void *opaque, hwaddr addr,
quirk->data.address_offset, quirk->data.address_size)) {
if (addr != quirk->data.address_offset) {
- hw_error("%s: offset write into address window: %s\n",
+ hw_error("%s: offset write into address window: %s",
__func__, memory_region_name(&quirk->mem));
}
@@ -1479,7 +1476,7 @@ static void vfio_generic_window_quirk_write(void *opaque, hwaddr addr,
if (!vfio_range_contained(addr, size, quirk->data.data_offset,
quirk->data.data_size)) {
- hw_error("%s: window data write not fully contained: %s\n",
+ hw_error("%s: window data write not fully contained: %s",
__func__, memory_region_name(&quirk->mem));
}
@@ -1515,7 +1512,7 @@ static uint64_t vfio_generic_quirk_read(void *opaque,
ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) {
if (!vfio_range_contained(addr, size, offset,
quirk->data.address_mask + 1)) {
- hw_error("%s: read not fully contained: %s\n",
+ hw_error("%s: read not fully contained: %s",
__func__, memory_region_name(&quirk->mem));
}
@@ -1544,7 +1541,7 @@ static void vfio_generic_quirk_write(void *opaque, hwaddr addr,
ranges_overlap(addr, size, offset, quirk->data.address_mask + 1)) {
if (!vfio_range_contained(addr, size, offset,
quirk->data.address_mask + 1)) {
- hw_error("%s: write not fully contained: %s\n",
+ hw_error("%s: write not fully contained: %s",
__func__, memory_region_name(&quirk->mem));
}
@@ -2302,7 +2299,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
container->iommu_data.type1.error = ret;
}
} else {
- hw_error("vfio: DMA mapping failed, unable to continue\n");
+ hw_error("vfio: DMA mapping failed, unable to continue");
}
}
}
@@ -2972,7 +2969,7 @@ static void vfio_pci_pre_reset(VFIODevice *vdev)
pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
state = pmcsr & PCI_PM_CTRL_STATE_MASK;
if (state) {
- error_report("vfio: Unable to power on device, stuck in D%d\n",
+ error_report("vfio: Unable to power on device, stuck in D%d",
state);
}
}
@@ -3271,7 +3268,7 @@ static void vfio_kvm_device_del_group(VFIOGroup *group)
}
if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
- error_report("Failed to remove group %d to KVM VFIO device: %m",
+ error_report("Failed to remove group %d from KVM VFIO device: %m",
group->groupid);
}
#endif
@@ -3339,7 +3336,7 @@ static int vfio_connect_container(VFIOGroup *group)
vfio_listener_release(container);
g_free(container);
close(fd);
- error_report("vfio: memory listener initialization failed for container\n");
+ error_report("vfio: memory listener initialization failed for container");
return ret;
}
diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c
index d1cc23303a..2e53a2e21f 100644
--- a/hw/misc/zynq_slcr.c
+++ b/hw/misc/zynq_slcr.c
@@ -19,102 +19,155 @@
#include "hw/sysbus.h"
#include "sysemu/sysemu.h"
-#ifdef ZYNQ_ARM_SLCR_ERR_DEBUG
+#ifndef ZYNQ_SLCR_ERR_DEBUG
+#define ZYNQ_SLCR_ERR_DEBUG 0
+#endif
+
#define DB_PRINT(...) do { \
- fprintf(stderr, ": %s: ", __func__); \
- fprintf(stderr, ## __VA_ARGS__); \
+ if (ZYNQ_SLCR_ERR_DEBUG) { \
+ fprintf(stderr, ": %s: ", __func__); \
+ fprintf(stderr, ## __VA_ARGS__); \
+ } \
} while (0);
-#else
- #define DB_PRINT(...)
-#endif
#define XILINX_LOCK_KEY 0x767b
#define XILINX_UNLOCK_KEY 0xdf0d
#define R_PSS_RST_CTRL_SOFT_RST 0x1
-typedef enum {
- ARM_PLL_CTRL,
- DDR_PLL_CTRL,
- IO_PLL_CTRL,
- PLL_STATUS,
- ARM_PPL_CFG,
- DDR_PLL_CFG,
- IO_PLL_CFG,
- PLL_BG_CTRL,
- PLL_MAX
-} PLLValues;
-
-typedef enum {
- ARM_CLK_CTRL,
- DDR_CLK_CTRL,
- DCI_CLK_CTRL,
- APER_CLK_CTRL,
- USB0_CLK_CTRL,
- USB1_CLK_CTRL,
- GEM0_RCLK_CTRL,
- GEM1_RCLK_CTRL,
- GEM0_CLK_CTRL,
- GEM1_CLK_CTRL,
- SMC_CLK_CTRL,
- LQSPI_CLK_CTRL,
- SDIO_CLK_CTRL,
- UART_CLK_CTRL,
- SPI_CLK_CTRL,
- CAN_CLK_CTRL,
- CAN_MIOCLK_CTRL,
- DBG_CLK_CTRL,
- PCAP_CLK_CTRL,
- TOPSW_CLK_CTRL,
- CLK_MAX
-} ClkValues;
-
-typedef enum {
- CLK_CTRL,
- THR_CTRL,
- THR_CNT,
- THR_STA,
- FPGA_MAX
-} FPGAValues;
-
-typedef enum {
- SYNC_CTRL,
- SYNC_STATUS,
- BANDGAP_TRIP,
- CC_TEST,
- PLL_PREDIVISOR,
- CLK_621_TRUE,
- PICTURE_DBG,
- PICTURE_DBG_UCNT,
- PICTURE_DBG_LCNT,
- MISC_MAX
-} MiscValues;
-
-typedef enum {
- PSS,
- DDDR,
- DMAC = 3,
- USB,
- GEM,
- SDIO,
- SPI,
- CAN,
- I2C,
- UART,
- GPIO,
- LQSPI,
- SMC,
- OCM,
- DEVCI,
- FPGA,
- A9_CPU,
- RS_AWDT,
- RST_REASON,
- RST_REASON_CLR,
- REBOOT_STATUS,
- BOOT_MODE,
- RESET_MAX
-} ResetValues;
+enum {
+ SCL = 0x000 / 4,
+ LOCK,
+ UNLOCK,
+ LOCKSTA,
+
+ ARM_PLL_CTRL = 0x100 / 4,
+ DDR_PLL_CTRL,
+ IO_PLL_CTRL,
+ PLL_STATUS,
+ ARM_PLL_CFG,
+ DDR_PLL_CFG,
+ IO_PLL_CFG,
+
+ ARM_CLK_CTRL = 0x120 / 4,
+ DDR_CLK_CTRL,
+ DCI_CLK_CTRL,
+ APER_CLK_CTRL,
+ USB0_CLK_CTRL,
+ USB1_CLK_CTRL,
+ GEM0_RCLK_CTRL,
+ GEM1_RCLK_CTRL,
+ GEM0_CLK_CTRL,
+ GEM1_CLK_CTRL,
+ SMC_CLK_CTRL,
+ LQSPI_CLK_CTRL,
+ SDIO_CLK_CTRL,
+ UART_CLK_CTRL,
+ SPI_CLK_CTRL,
+ CAN_CLK_CTRL,
+ CAN_MIOCLK_CTRL,
+ DBG_CLK_CTRL,
+ PCAP_CLK_CTRL,
+ TOPSW_CLK_CTRL,
+
+#define FPGA_CTRL_REGS(n, start) \
+ FPGA ## n ## _CLK_CTRL = (start) / 4, \
+ FPGA ## n ## _THR_CTRL, \
+ FPGA ## n ## _THR_CNT, \
+ FPGA ## n ## _THR_STA,
+ FPGA_CTRL_REGS(0, 0x170)
+ FPGA_CTRL_REGS(1, 0x180)
+ FPGA_CTRL_REGS(2, 0x190)
+ FPGA_CTRL_REGS(3, 0x1a0)
+
+ BANDGAP_TRIP = 0x1b8 / 4,
+ PLL_PREDIVISOR = 0x1c0 / 4,
+ CLK_621_TRUE,
+
+ PSS_RST_CTRL = 0x200 / 4,
+ DDR_RST_CTRL,
+ TOPSW_RESET_CTRL,
+ DMAC_RST_CTRL,
+ USB_RST_CTRL,
+ GEM_RST_CTRL,
+ SDIO_RST_CTRL,
+ SPI_RST_CTRL,
+ CAN_RST_CTRL,
+ I2C_RST_CTRL,
+ UART_RST_CTRL,
+ GPIO_RST_CTRL,
+ LQSPI_RST_CTRL,
+ SMC_RST_CTRL,
+ OCM_RST_CTRL,
+ FPGA_RST_CTRL = 0x240 / 4,
+ A9_CPU_RST_CTRL,
+
+ RS_AWDT_CTRL = 0x24c / 4,
+ RST_REASON,
+
+ REBOOT_STATUS = 0x258 / 4,
+ BOOT_MODE,
+
+ APU_CTRL = 0x300 / 4,
+ WDT_CLK_SEL,
+
+ TZ_DMA_NS = 0x440 / 4,
+ TZ_DMA_IRQ_NS,
+ TZ_DMA_PERIPH_NS,
+
+ PSS_IDCODE = 0x530 / 4,
+
+ DDR_URGENT = 0x600 / 4,
+ DDR_CAL_START = 0x60c / 4,
+ DDR_REF_START = 0x614 / 4,
+ DDR_CMD_STA,
+ DDR_URGENT_SEL,
+ DDR_DFI_STATUS,
+
+ MIO = 0x700 / 4,
+#define MIO_LENGTH 54
+
+ MIO_LOOPBACK = 0x804 / 4,
+ MIO_MST_TRI0,
+ MIO_MST_TRI1,
+
+ SD0_WP_CD_SEL = 0x830 / 4,
+ SD1_WP_CD_SEL,
+
+ LVL_SHFTR_EN = 0x900 / 4,
+ OCM_CFG = 0x910 / 4,
+
+ CPU_RAM = 0xa00 / 4,
+
+ IOU = 0xa30 / 4,
+
+ DMAC_RAM = 0xa50 / 4,
+
+ AFI0 = 0xa60 / 4,
+ AFI1 = AFI0 + 3,
+ AFI2 = AFI1 + 3,
+ AFI3 = AFI2 + 3,
+#define AFI_LENGTH 3
+
+ OCM = 0xa90 / 4,
+
+ DEVCI_RAM = 0xaa0 / 4,
+
+ CSG_RAM = 0xab0 / 4,
+
+ GPIOB_CTRL = 0xb00 / 4,
+ GPIOB_CFG_CMOS18,
+ GPIOB_CFG_CMOS25,
+ GPIOB_CFG_CMOS33,
+ GPIOB_CFG_HSTL = 0xb14 / 4,
+ GPIOB_DRVR_BIAS_CTRL,
+
+ DDRIOB = 0xb40 / 4,
+#define DDRIOB_LENGTH 14
+};
+
+#define ZYNQ_SLCR_MMIO_SIZE 0x1000
+#define ZYNQ_SLCR_NUM_REGS (ZYNQ_SLCR_MMIO_SIZE / 4)
#define TYPE_ZYNQ_SLCR "xilinx,zynq_slcr"
#define ZYNQ_SLCR(obj) OBJECT_CHECK(ZynqSLCRState, (obj), TYPE_ZYNQ_SLCR)
@@ -124,42 +177,7 @@ typedef struct ZynqSLCRState {
MemoryRegion iomem;
- union {
- struct {
- uint16_t scl;
- uint16_t lockval;
- uint32_t pll[PLL_MAX]; /* 0x100 - 0x11C */
- uint32_t clk[CLK_MAX]; /* 0x120 - 0x16C */
- uint32_t fpga[4][FPGA_MAX]; /* 0x170 - 0x1AC */
- uint32_t misc[MISC_MAX]; /* 0x1B0 - 0x1D8 */
- uint32_t reset[RESET_MAX]; /* 0x200 - 0x25C */
- uint32_t apu_ctrl; /* 0x300 */
- uint32_t wdt_clk_sel; /* 0x304 */
- uint32_t tz_ocm[3]; /* 0x400 - 0x408 */
- uint32_t tz_ddr; /* 0x430 */
- uint32_t tz_dma[3]; /* 0x440 - 0x448 */
- uint32_t tz_misc[3]; /* 0x450 - 0x458 */
- uint32_t tz_fpga[2]; /* 0x484 - 0x488 */
- uint32_t dbg_ctrl; /* 0x500 */
- uint32_t pss_idcode; /* 0x530 */
- uint32_t ddr[8]; /* 0x600 - 0x620 - 0x604-missing */
- uint32_t mio[54]; /* 0x700 - 0x7D4 */
- uint32_t mio_func[4]; /* 0x800 - 0x810 */
- uint32_t sd[2]; /* 0x830 - 0x834 */
- uint32_t lvl_shftr_en; /* 0x900 */
- uint32_t ocm_cfg; /* 0x910 */
- uint32_t cpu_ram[8]; /* 0xA00 - 0xA1C */
- uint32_t iou[7]; /* 0xA30 - 0xA48 */
- uint32_t dmac_ram; /* 0xA50 */
- uint32_t afi[4][3]; /* 0xA60 - 0xA8C */
- uint32_t ocm[3]; /* 0xA90 - 0xA98 */
- uint32_t devci_ram; /* 0xAA0 */
- uint32_t csg_ram; /* 0xAB0 */
- uint32_t gpiob[12]; /* 0xB00 - 0xB2C */
- uint32_t ddriob[14]; /* 0xB40 - 0xB74 */
- };
- uint8_t data[0x1000];
- };
+ uint32_t regs[ZYNQ_SLCR_NUM_REGS];
} ZynqSLCRState;
static void zynq_slcr_reset(DeviceState *d)
@@ -169,177 +187,169 @@ static void zynq_slcr_reset(DeviceState *d)
DB_PRINT("RESET\n");
- s->lockval = 1;
+ s->regs[LOCKSTA] = 1;
/* 0x100 - 0x11C */
- s->pll[ARM_PLL_CTRL] = 0x0001A008;
- s->pll[DDR_PLL_CTRL] = 0x0001A008;
- s->pll[IO_PLL_CTRL] = 0x0001A008;
- s->pll[PLL_STATUS] = 0x0000003F;
- s->pll[ARM_PPL_CFG] = 0x00014000;
- s->pll[DDR_PLL_CFG] = 0x00014000;
- s->pll[IO_PLL_CFG] = 0x00014000;
+ s->regs[ARM_PLL_CTRL] = 0x0001A008;
+ s->regs[DDR_PLL_CTRL] = 0x0001A008;
+ s->regs[IO_PLL_CTRL] = 0x0001A008;
+ s->regs[PLL_STATUS] = 0x0000003F;
+ s->regs[ARM_PLL_CFG] = 0x00014000;
+ s->regs[DDR_PLL_CFG] = 0x00014000;
+ s->regs[IO_PLL_CFG] = 0x00014000;
/* 0x120 - 0x16C */
- s->clk[ARM_CLK_CTRL] = 0x1F000400;
- s->clk[DDR_CLK_CTRL] = 0x18400003;
- s->clk[DCI_CLK_CTRL] = 0x01E03201;
- s->clk[APER_CLK_CTRL] = 0x01FFCCCD;
- s->clk[USB0_CLK_CTRL] = s->clk[USB1_CLK_CTRL] = 0x00101941;
- s->clk[GEM0_RCLK_CTRL] = s->clk[GEM1_RCLK_CTRL] = 0x00000001;
- s->clk[GEM0_CLK_CTRL] = s->clk[GEM1_CLK_CTRL] = 0x00003C01;
- s->clk[SMC_CLK_CTRL] = 0x00003C01;
- s->clk[LQSPI_CLK_CTRL] = 0x00002821;
- s->clk[SDIO_CLK_CTRL] = 0x00001E03;
- s->clk[UART_CLK_CTRL] = 0x00003F03;
- s->clk[SPI_CLK_CTRL] = 0x00003F03;
- s->clk[CAN_CLK_CTRL] = 0x00501903;
- s->clk[DBG_CLK_CTRL] = 0x00000F03;
- s->clk[PCAP_CLK_CTRL] = 0x00000F01;
+ s->regs[ARM_CLK_CTRL] = 0x1F000400;
+ s->regs[DDR_CLK_CTRL] = 0x18400003;
+ s->regs[DCI_CLK_CTRL] = 0x01E03201;
+ s->regs[APER_CLK_CTRL] = 0x01FFCCCD;
+ s->regs[USB0_CLK_CTRL] = s->regs[USB1_CLK_CTRL] = 0x00101941;
+ s->regs[GEM0_RCLK_CTRL] = s->regs[GEM1_RCLK_CTRL] = 0x00000001;
+ s->regs[GEM0_CLK_CTRL] = s->regs[GEM1_CLK_CTRL] = 0x00003C01;
+ s->regs[SMC_CLK_CTRL] = 0x00003C01;
+ s->regs[LQSPI_CLK_CTRL] = 0x00002821;
+ s->regs[SDIO_CLK_CTRL] = 0x00001E03;
+ s->regs[UART_CLK_CTRL] = 0x00003F03;
+ s->regs[SPI_CLK_CTRL] = 0x00003F03;
+ s->regs[CAN_CLK_CTRL] = 0x00501903;
+ s->regs[DBG_CLK_CTRL] = 0x00000F03;
+ s->regs[PCAP_CLK_CTRL] = 0x00000F01;
/* 0x170 - 0x1AC */
- s->fpga[0][CLK_CTRL] = s->fpga[1][CLK_CTRL] = s->fpga[2][CLK_CTRL] =
- s->fpga[3][CLK_CTRL] = 0x00101800;
- s->fpga[0][THR_STA] = s->fpga[1][THR_STA] = s->fpga[2][THR_STA] =
- s->fpga[3][THR_STA] = 0x00010000;
+ s->regs[FPGA0_CLK_CTRL] = s->regs[FPGA1_CLK_CTRL] = s->regs[FPGA2_CLK_CTRL]
+ = s->regs[FPGA3_CLK_CTRL] = 0x00101800;
+ s->regs[FPGA0_THR_STA] = s->regs[FPGA1_THR_STA] = s->regs[FPGA2_THR_STA]
+ = s->regs[FPGA3_THR_STA] = 0x00010000;
/* 0x1B0 - 0x1D8 */
- s->misc[BANDGAP_TRIP] = 0x0000001F;
- s->misc[PLL_PREDIVISOR] = 0x00000001;
- s->misc[CLK_621_TRUE] = 0x00000001;
+ s->regs[BANDGAP_TRIP] = 0x0000001F;
+ s->regs[PLL_PREDIVISOR] = 0x00000001;
+ s->regs[CLK_621_TRUE] = 0x00000001;
/* 0x200 - 0x25C */
- s->reset[FPGA] = 0x01F33F0F;
- s->reset[RST_REASON] = 0x00000040;
+ s->regs[FPGA_RST_CTRL] = 0x01F33F0F;
+ s->regs[RST_REASON] = 0x00000040;
+
+ s->regs[BOOT_MODE] = 0x00000001;
/* 0x700 - 0x7D4 */
for (i = 0; i < 54; i++) {
- s->mio[i] = 0x00001601;
+ s->regs[MIO + i] = 0x00001601;
}
for (i = 2; i <= 8; i++) {
- s->mio[i] = 0x00000601;
+ s->regs[MIO + i] = 0x00000601;
}
- /* MIO_MST_TRI0, MIO_MST_TRI1 */
- s->mio_func[2] = s->mio_func[3] = 0xFFFFFFFF;
+ s->regs[MIO_MST_TRI0] = s->regs[MIO_MST_TRI1] = 0xFFFFFFFF;
- s->cpu_ram[0] = s->cpu_ram[1] = s->cpu_ram[3] =
- s->cpu_ram[4] = s->cpu_ram[7] = 0x00010101;
- s->cpu_ram[2] = s->cpu_ram[5] = 0x01010101;
- s->cpu_ram[6] = 0x00000001;
+ s->regs[CPU_RAM + 0] = s->regs[CPU_RAM + 1] = s->regs[CPU_RAM + 3]
+ = s->regs[CPU_RAM + 4] = s->regs[CPU_RAM + 7]
+ = 0x00010101;
+ s->regs[CPU_RAM + 2] = s->regs[CPU_RAM + 5] = 0x01010101;
+ s->regs[CPU_RAM + 6] = 0x00000001;
- s->iou[0] = s->iou[1] = s->iou[2] = s->iou[3] = 0x09090909;
- s->iou[4] = s->iou[5] = 0x00090909;
- s->iou[6] = 0x00000909;
+ s->regs[IOU + 0] = s->regs[IOU + 1] = s->regs[IOU + 2] = s->regs[IOU + 3]
+ = 0x09090909;
+ s->regs[IOU + 4] = s->regs[IOU + 5] = 0x00090909;
+ s->regs[IOU + 6] = 0x00000909;
- s->dmac_ram = 0x00000009;
+ s->regs[DMAC_RAM] = 0x00000009;
- s->afi[0][0] = s->afi[0][1] = 0x09090909;
- s->afi[1][0] = s->afi[1][1] = 0x09090909;
- s->afi[2][0] = s->afi[2][1] = 0x09090909;
- s->afi[3][0] = s->afi[3][1] = 0x09090909;
- s->afi[0][2] = s->afi[1][2] = s->afi[2][2] = s->afi[3][2] = 0x00000909;
+ s->regs[AFI0 + 0] = s->regs[AFI0 + 1] = 0x09090909;
+ s->regs[AFI1 + 0] = s->regs[AFI1 + 1] = 0x09090909;
+ s->regs[AFI2 + 0] = s->regs[AFI2 + 1] = 0x09090909;
+ s->regs[AFI3 + 0] = s->regs[AFI3 + 1] = 0x09090909;
+ s->regs[AFI0 + 2] = s->regs[AFI1 + 2] = s->regs[AFI2 + 2]
+ = s->regs[AFI3 + 2] = 0x00000909;
- s->ocm[0] = 0x01010101;
- s->ocm[1] = s->ocm[2] = 0x09090909;
+ s->regs[OCM + 0] = 0x01010101;
+ s->regs[OCM + 1] = s->regs[OCM + 2] = 0x09090909;
- s->devci_ram = 0x00000909;
- s->csg_ram = 0x00000001;
+ s->regs[DEVCI_RAM] = 0x00000909;
+ s->regs[CSG_RAM] = 0x00000001;
- s->ddriob[0] = s->ddriob[1] = s->ddriob[2] = s->ddriob[3] = 0x00000e00;
- s->ddriob[4] = s->ddriob[5] = s->ddriob[6] = 0x00000e00;
- s->ddriob[12] = 0x00000021;
+ s->regs[DDRIOB + 0] = s->regs[DDRIOB + 1] = s->regs[DDRIOB + 2]
+ = s->regs[DDRIOB + 3] = 0x00000e00;
+ s->regs[DDRIOB + 4] = s->regs[DDRIOB + 5] = s->regs[DDRIOB + 6]
+ = 0x00000e00;
+ s->regs[DDRIOB + 12] = 0x00000021;
}
-static inline uint32_t zynq_slcr_read_imp(void *opaque,
- hwaddr offset)
-{
- ZynqSLCRState *s = (ZynqSLCRState *)opaque;
+static bool zynq_slcr_check_offset(hwaddr offset, bool rnw)
+{
switch (offset) {
- case 0x0: /* SCL */
- return s->scl;
- case 0x4: /* LOCK */
- case 0x8: /* UNLOCK */
- DB_PRINT("Reading SCLR_LOCK/UNLOCK is not enabled\n");
- return 0;
- case 0x0C: /* LOCKSTA */
- return s->lockval;
- case 0x100 ... 0x11C:
- return s->pll[(offset - 0x100) / 4];
- case 0x120 ... 0x16C:
- return s->clk[(offset - 0x120) / 4];
- case 0x170 ... 0x1AC:
- return s->fpga[0][(offset - 0x170) / 4];
- case 0x1B0 ... 0x1D8:
- return s->misc[(offset - 0x1B0) / 4];
- case 0x200 ... 0x258:
- return s->reset[(offset - 0x200) / 4];
- case 0x25c:
- return 1;
- case 0x300:
- return s->apu_ctrl;
- case 0x304:
- return s->wdt_clk_sel;
- case 0x400 ... 0x408:
- return s->tz_ocm[(offset - 0x400) / 4];
- case 0x430:
- return s->tz_ddr;
- case 0x440 ... 0x448:
- return s->tz_dma[(offset - 0x440) / 4];
- case 0x450 ... 0x458:
- return s->tz_misc[(offset - 0x450) / 4];
- case 0x484 ... 0x488:
- return s->tz_fpga[(offset - 0x484) / 4];
- case 0x500:
- return s->dbg_ctrl;
- case 0x530:
- return s->pss_idcode;
- case 0x600 ... 0x620:
- if (offset == 0x604) {
- goto bad_reg;
- }
- return s->ddr[(offset - 0x600) / 4];
- case 0x700 ... 0x7D4:
- return s->mio[(offset - 0x700) / 4];
- case 0x800 ... 0x810:
- return s->mio_func[(offset - 0x800) / 4];
- case 0x830 ... 0x834:
- return s->sd[(offset - 0x830) / 4];
- case 0x900:
- return s->lvl_shftr_en;
- case 0x910:
- return s->ocm_cfg;
- case 0xA00 ... 0xA1C:
- return s->cpu_ram[(offset - 0xA00) / 4];
- case 0xA30 ... 0xA48:
- return s->iou[(offset - 0xA30) / 4];
- case 0xA50:
- return s->dmac_ram;
- case 0xA60 ... 0xA8C:
- return s->afi[0][(offset - 0xA60) / 4];
- case 0xA90 ... 0xA98:
- return s->ocm[(offset - 0xA90) / 4];
- case 0xAA0:
- return s->devci_ram;
- case 0xAB0:
- return s->csg_ram;
- case 0xB00 ... 0xB2C:
- return s->gpiob[(offset - 0xB00) / 4];
- case 0xB40 ... 0xB74:
- return s->ddriob[(offset - 0xB40) / 4];
+ case LOCK:
+ case UNLOCK:
+ case DDR_CAL_START:
+ case DDR_REF_START:
+ return !rnw; /* Write only */
+ case LOCKSTA:
+ case FPGA0_THR_STA:
+ case FPGA1_THR_STA:
+ case FPGA2_THR_STA:
+ case FPGA3_THR_STA:
+ case BOOT_MODE:
+ case PSS_IDCODE:
+ case DDR_CMD_STA:
+ case DDR_DFI_STATUS:
+ case PLL_STATUS:
+ return rnw;/* read only */
+ case SCL:
+ case ARM_PLL_CTRL ... IO_PLL_CTRL:
+ case ARM_PLL_CFG ... IO_PLL_CFG:
+ case ARM_CLK_CTRL ... TOPSW_CLK_CTRL:
+ case FPGA0_CLK_CTRL ... FPGA0_THR_CNT:
+ case FPGA1_CLK_CTRL ... FPGA1_THR_CNT:
+ case FPGA2_CLK_CTRL ... FPGA2_THR_CNT:
+ case FPGA3_CLK_CTRL ... FPGA3_THR_CNT:
+ case BANDGAP_TRIP:
+ case PLL_PREDIVISOR:
+ case CLK_621_TRUE:
+ case PSS_RST_CTRL ... A9_CPU_RST_CTRL:
+ case RS_AWDT_CTRL:
+ case RST_REASON:
+ case REBOOT_STATUS:
+ case APU_CTRL:
+ case WDT_CLK_SEL:
+ case TZ_DMA_NS ... TZ_DMA_PERIPH_NS:
+ case DDR_URGENT:
+ case DDR_URGENT_SEL:
+ case MIO ... MIO + MIO_LENGTH - 1:
+ case MIO_LOOPBACK ... MIO_MST_TRI1:
+ case SD0_WP_CD_SEL:
+ case SD1_WP_CD_SEL:
+ case LVL_SHFTR_EN:
+ case OCM_CFG:
+ case CPU_RAM:
+ case IOU:
+ case DMAC_RAM:
+ case AFI0 ... AFI3 + AFI_LENGTH - 1:
+ case OCM:
+ case DEVCI_RAM:
+ case CSG_RAM:
+ case GPIOB_CTRL ... GPIOB_CFG_CMOS33:
+ case GPIOB_CFG_HSTL:
+ case GPIOB_DRVR_BIAS_CTRL:
+ case DDRIOB ... DDRIOB + DDRIOB_LENGTH - 1:
+ return true;
default:
- bad_reg:
- DB_PRINT("Bad register offset 0x%x\n", (int)offset);
- return 0;
+ return false;
}
}
static uint64_t zynq_slcr_read(void *opaque, hwaddr offset,
unsigned size)
{
- uint32_t ret = zynq_slcr_read_imp(opaque, offset);
+ ZynqSLCRState *s = opaque;
+ offset /= 4;
+ uint32_t ret = s->regs[offset];
+
+ if (!zynq_slcr_check_offset(offset, true)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "zynq_slcr: Invalid read access to "
+ " addr %" HWADDR_PRIx "\n", offset * 4);
+ }
- DB_PRINT("addr: %08x data: %08x\n", (unsigned)offset, (unsigned)ret);
+ DB_PRINT("addr: %08" HWADDR_PRIx " data: %08" PRIx32 "\n", offset * 4, ret);
return ret;
}
@@ -347,148 +357,55 @@ static void zynq_slcr_write(void *opaque, hwaddr offset,
uint64_t val, unsigned size)
{
ZynqSLCRState *s = (ZynqSLCRState *)opaque;
+ offset /= 4;
- DB_PRINT("offset: %08x data: %08x\n", (unsigned)offset, (unsigned)val);
+ DB_PRINT("addr: %08" HWADDR_PRIx " data: %08" PRIx64 "\n", offset * 4, val);
+
+ if (!zynq_slcr_check_offset(offset, false)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "zynq_slcr: Invalid write access to "
+ "addr %" HWADDR_PRIx "\n", offset * 4);
+ return;
+ }
switch (offset) {
- case 0x00: /* SCL */
- s->scl = val & 0x1;
- return;
- case 0x4: /* SLCR_LOCK */
+ case SCL:
+ s->regs[SCL] = val & 0x1;
+ return;
+ case LOCK:
if ((val & 0xFFFF) == XILINX_LOCK_KEY) {
DB_PRINT("XILINX LOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
(unsigned)val & 0xFFFF);
- s->lockval = 1;
+ s->regs[LOCKSTA] = 1;
} else {
DB_PRINT("WRONG XILINX LOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
(int)offset, (unsigned)val & 0xFFFF);
}
return;
- case 0x8: /* SLCR_UNLOCK */
+ case UNLOCK:
if ((val & 0xFFFF) == XILINX_UNLOCK_KEY) {
DB_PRINT("XILINX UNLOCK 0xF8000000 + 0x%x <= 0x%x\n", (int)offset,
(unsigned)val & 0xFFFF);
- s->lockval = 0;
+ s->regs[LOCKSTA] = 0;
} else {
DB_PRINT("WRONG XILINX UNLOCK KEY 0xF8000000 + 0x%x <= 0x%x\n",
(int)offset, (unsigned)val & 0xFFFF);
}
return;
- case 0xc: /* LOCKSTA */
- DB_PRINT("Writing SCLR_LOCKSTA is not enabled\n");
- return;
}
- if (!s->lockval) {
- switch (offset) {
- case 0x100 ... 0x11C:
- if (offset == 0x10C) {
- goto bad_reg;
- }
- s->pll[(offset - 0x100) / 4] = val;
- break;
- case 0x120 ... 0x16C:
- s->clk[(offset - 0x120) / 4] = val;
- break;
- case 0x170 ... 0x1AC:
- s->fpga[0][(offset - 0x170) / 4] = val;
- break;
- case 0x1B0 ... 0x1D8:
- s->misc[(offset - 0x1B0) / 4] = val;
- break;
- case 0x200 ... 0x25C:
- if (offset == 0x250) {
- goto bad_reg;
- }
- s->reset[(offset - 0x200) / 4] = val;
- if (offset == 0x200 && (val & R_PSS_RST_CTRL_SOFT_RST)) {
- qemu_system_reset_request();
- }
- break;
- case 0x300:
- s->apu_ctrl = val;
- break;
- case 0x304:
- s->wdt_clk_sel = val;
- break;
- case 0x400 ... 0x408:
- s->tz_ocm[(offset - 0x400) / 4] = val;
- break;
- case 0x430:
- s->tz_ddr = val;
- break;
- case 0x440 ... 0x448:
- s->tz_dma[(offset - 0x440) / 4] = val;
- break;
- case 0x450 ... 0x458:
- s->tz_misc[(offset - 0x450) / 4] = val;
- break;
- case 0x484 ... 0x488:
- s->tz_fpga[(offset - 0x484) / 4] = val;
- break;
- case 0x500:
- s->dbg_ctrl = val;
- break;
- case 0x530:
- s->pss_idcode = val;
- break;
- case 0x600 ... 0x620:
- if (offset == 0x604) {
- goto bad_reg;
- }
- s->ddr[(offset - 0x600) / 4] = val;
- break;
- case 0x700 ... 0x7D4:
- s->mio[(offset - 0x700) / 4] = val;
- break;
- case 0x800 ... 0x810:
- s->mio_func[(offset - 0x800) / 4] = val;
- break;
- case 0x830 ... 0x834:
- s->sd[(offset - 0x830) / 4] = val;
- break;
- case 0x900:
- s->lvl_shftr_en = val;
- break;
- case 0x910:
- break;
- case 0xA00 ... 0xA1C:
- s->cpu_ram[(offset - 0xA00) / 4] = val;
- break;
- case 0xA30 ... 0xA48:
- s->iou[(offset - 0xA30) / 4] = val;
- break;
- case 0xA50:
- s->dmac_ram = val;
- break;
- case 0xA60 ... 0xA8C:
- s->afi[0][(offset - 0xA60) / 4] = val;
- break;
- case 0xA90:
- s->ocm[0] = val;
- break;
- case 0xAA0:
- s->devci_ram = val;
- break;
- case 0xAB0:
- s->csg_ram = val;
- break;
- case 0xB00 ... 0xB2C:
- if (offset == 0xB20 || offset == 0xB2C) {
- goto bad_reg;
- }
- s->gpiob[(offset - 0xB00) / 4] = val;
- break;
- case 0xB40 ... 0xB74:
- s->ddriob[(offset - 0xB40) / 4] = val;
- break;
- default:
- bad_reg:
- DB_PRINT("Bad register write %x <= %08x\n", (int)offset,
- (unsigned)val);
- }
+ if (!s->regs[LOCKSTA]) {
+ s->regs[offset / 4] = val;
} else {
DB_PRINT("SCLR registers are locked. Unlock them first\n");
+ return;
+ }
+
+ switch (offset) {
+ case PSS_RST_CTRL:
+ if (val & R_PSS_RST_CTRL_SOFT_RST) {
+ qemu_system_reset_request();
+ }
+ break;
}
}
@@ -498,23 +415,22 @@ static const MemoryRegionOps slcr_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int zynq_slcr_init(SysBusDevice *dev)
+static void zynq_slcr_init(Object *obj)
{
- ZynqSLCRState *s = ZYNQ_SLCR(dev);
-
- memory_region_init_io(&s->iomem, OBJECT(s), &slcr_ops, s, "slcr", 0x1000);
- sysbus_init_mmio(dev, &s->iomem);
+ ZynqSLCRState *s = ZYNQ_SLCR(obj);
- return 0;
+ memory_region_init_io(&s->iomem, obj, &slcr_ops, s, "slcr",
+ ZYNQ_SLCR_MMIO_SIZE);
+ sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->iomem);
}
static const VMStateDescription vmstate_zynq_slcr = {
.name = "zynq_slcr",
- .version_id = 1,
- .minimum_version_id = 1,
- .minimum_version_id_old = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .minimum_version_id_old = 2,
.fields = (VMStateField[]) {
- VMSTATE_UINT8_ARRAY(data, ZynqSLCRState, 0x1000),
+ VMSTATE_UINT32_ARRAY(regs, ZynqSLCRState, ZYNQ_SLCR_NUM_REGS),
VMSTATE_END_OF_LIST()
}
};
@@ -522,9 +438,7 @@ static const VMStateDescription vmstate_zynq_slcr = {
static void zynq_slcr_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
- sdc->init = zynq_slcr_init;
dc->vmsd = &vmstate_zynq_slcr;
dc->reset = zynq_slcr_reset;
}
@@ -534,6 +448,7 @@ static const TypeInfo zynq_slcr_info = {
.name = TYPE_ZYNQ_SLCR,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(ZynqSLCRState),
+ .instance_init = zynq_slcr_init,
};
static void zynq_slcr_register_types(void)
diff --git a/hw/net/allwinner_emac.c b/hw/net/allwinner_emac.c
index 469f2f0ede..d780ba0fcb 100644
--- a/hw/net/allwinner_emac.c
+++ b/hw/net/allwinner_emac.c
@@ -27,11 +27,11 @@ static uint8_t padding[60];
static void mii_set_link(RTL8201CPState *mii, bool link_ok)
{
if (link_ok) {
- mii->bmsr |= MII_BMSR_LINK_ST;
+ mii->bmsr |= MII_BMSR_LINK_ST | MII_BMSR_AN_COMP;
mii->anlpar |= MII_ANAR_TXFD | MII_ANAR_10FD | MII_ANAR_10 |
MII_ANAR_CSMACD;
} else {
- mii->bmsr &= ~MII_BMSR_LINK_ST;
+ mii->bmsr &= ~(MII_BMSR_LINK_ST | MII_BMSR_AN_COMP);
mii->anlpar = MII_ANAR_TX;
}
}
@@ -391,9 +391,11 @@ static void aw_emac_write(void *opaque, hwaddr offset, uint64_t value,
break;
case EMAC_INT_CTL_REG:
s->int_ctl = value;
+ aw_emac_update_irq(s);
break;
case EMAC_INT_STA_REG:
s->int_sta &= ~value;
+ aw_emac_update_irq(s);
break;
case EMAC_MAC_MADR_REG:
s->phy_target = value;
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
index 92dc2f21fa..e34b25e734 100644
--- a/hw/net/cadence_gem.c
+++ b/hw/net/cadence_gem.c
@@ -1093,7 +1093,7 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
uint32_t phy_addr, reg_num;
phy_addr = (retval & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT;
- if (phy_addr == BOARD_PHY_ADDRESS) {
+ if (phy_addr == BOARD_PHY_ADDRESS || phy_addr == 0) {
reg_num = (retval & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT;
retval &= 0xFFFF0000;
retval |= gem_phy_read(s, reg_num);
@@ -1193,7 +1193,7 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val,
uint32_t phy_addr, reg_num;
phy_addr = (val & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT;
- if (phy_addr == BOARD_PHY_ADDRESS) {
+ if (phy_addr == BOARD_PHY_ADDRESS || phy_addr == 0) {
reg_num = (val & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT;
gem_phy_write(s, reg_num, val);
}
diff --git a/hw/net/fsl_etsec/rings.c b/hw/net/fsl_etsec/rings.c
index 77602722b3..e36cfbe76d 100644
--- a/hw/net/fsl_etsec/rings.c
+++ b/hw/net/fsl_etsec/rings.c
@@ -195,8 +195,8 @@ static void process_tx_fcb(eTSEC *etsec)
/* if packet is IP4 and IP checksum is requested */
if (flags & FCB_TX_IP && flags & FCB_TX_CIP) {
- /* do IP4 checksum (TODO This funtion does TCP/UDP checksum but not sure
- * if it also does IP4 checksum. */
+ /* do IP4 checksum (TODO This function does TCP/UDP checksum
+ * but not sure if it also does IP4 checksum.) */
net_checksum_calculate(etsec->tx_buffer + 8,
etsec->tx_buffer_len - 8);
}
@@ -592,7 +592,7 @@ void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr)
/* TODO: Broadcast and Multicast */
- if (bd.flags | BD_INTERRUPT) {
+ if (bd.flags & BD_INTERRUPT) {
/* Set RXFx */
etsec->regs[RSTAT].value |= 1 << (7 - ring_nbr);
@@ -601,7 +601,7 @@ void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr)
}
} else {
- if (bd.flags | BD_INTERRUPT) {
+ if (bd.flags & BD_INTERRUPT) {
/* Set IEVENT */
ievent_set(etsec, IEVENT_RXB);
}
diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c
index 7cb47b3f1f..ebe505784d 100644
--- a/hw/net/pcnet.c
+++ b/hw/net/pcnet.c
@@ -718,7 +718,6 @@ static void pcnet_s_reset(PCNetState *s)
s->csr[94] = 0x0000;
s->csr[100] = 0x0200;
s->csr[103] = 0x0105;
- s->csr[103] = 0x0105;
s->csr[112] = 0x0000;
s->csr[114] = 0x0000;
s->csr[122] = 0x0000;
diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c
index f6fbcb56bf..c433337b67 100644
--- a/hw/net/spapr_llan.c
+++ b/hw/net/spapr_llan.c
@@ -29,6 +29,7 @@
#include "hw/qdev.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
+#include "sysemu/sysemu.h"
#include <libfdt.h>
@@ -213,6 +214,8 @@ static int spapr_vlan_init(VIOsPAPRDevice *sdev)
object_get_typename(OBJECT(sdev)), sdev->qdev.id, dev);
qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
+ add_boot_device_path(dev->nicconf.bootindex, DEVICE(dev), "");
+
return 0;
}
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index fd23c4627e..33bd233a2d 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -222,13 +222,33 @@ static char *mac_strdup_printf(const uint8_t *mac)
mac[1], mac[2], mac[3], mac[4], mac[5]);
}
+static intList *get_vlan_table(VirtIONet *n)
+{
+ intList *list, *entry;
+ int i, j;
+
+ list = NULL;
+ for (i = 0; i < MAX_VLAN >> 5; i++) {
+ for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
+ if (n->vlans[i] & (1U << j)) {
+ entry = g_malloc0(sizeof(*entry));
+ entry->value = (i << 5) + j;
+ entry->next = list;
+ list = entry;
+ }
+ }
+ }
+
+ return list;
+}
+
static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
{
VirtIONet *n = qemu_get_nic_opaque(nc);
+ VirtIODevice *vdev = VIRTIO_DEVICE(n);
RxFilterInfo *info;
strList *str_list, *entry;
- intList *int_list, *int_entry;
- int i, j;
+ int i;
info = g_malloc0(sizeof(*info));
info->name = g_strdup(nc->name);
@@ -273,19 +293,15 @@ static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
str_list = entry;
}
info->multicast_table = str_list;
+ info->vlan_table = get_vlan_table(n);
- int_list = NULL;
- for (i = 0; i < MAX_VLAN >> 5; i++) {
- for (j = 0; n->vlans[i] && j < 0x1f; j++) {
- if (n->vlans[i] & (1U << j)) {
- int_entry = g_malloc0(sizeof(*int_entry));
- int_entry->value = (i << 5) + j;
- int_entry->next = int_list;
- int_list = int_entry;
- }
- }
+ if (!((1 << VIRTIO_NET_F_CTRL_VLAN) & vdev->guest_features)) {
+ info->vlan = RX_STATE_ALL;
+ } else if (!info->vlan_table) {
+ info->vlan = RX_STATE_NONE;
+ } else {
+ info->vlan = RX_STATE_NORMAL;
}
- info->vlan_table = int_list;
/* enable event notification after query */
nc->rxfilter_notify_enabled = 1;
@@ -514,6 +530,12 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
}
vhost_net_ack_features(tap_get_vhost_net(nc->peer), features);
}
+
+ if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) {
+ memset(n->vlans, 0, MAX_VLAN >> 3);
+ } else {
+ memset(n->vlans, 0xff, MAX_VLAN >> 3);
+ }
}
static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
@@ -655,7 +677,7 @@ static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
goto error;
}
- if (in_use + mac_data.entries <= MAC_TABLE_ENTRIES) {
+ if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
mac_data.entries * ETH_ALEN);
if (s != mac_data.entries * ETH_ALEN) {
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index 5be807ce82..ddcee4bd21 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -52,6 +52,9 @@
#define VMXNET3_DEVICE_VERSION 0x1
#define VMXNET3_DEVICE_REVISION 0x1
+/* Number of interrupt vectors for non-MSIx modes */
+#define VMXNET3_MAX_NMSIX_INTRS (1)
+
/* Macros for rings descriptors access */
#define VMXNET3_READ_TX_QUEUE_DESCR8(dpa, field) \
(vmw_shmem_ld8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
@@ -1305,6 +1308,51 @@ static bool vmxnet3_verify_intx(VMXNET3State *s, int intx)
(pci_get_byte(s->parent_obj.config + PCI_INTERRUPT_PIN) - 1));
}
+static void vmxnet3_validate_interrupt_idx(bool is_msix, int idx)
+{
+ int max_ints = is_msix ? VMXNET3_MAX_INTRS : VMXNET3_MAX_NMSIX_INTRS;
+ if (idx >= max_ints) {
+ hw_error("Bad interrupt index: %d\n", idx);
+ }
+}
+
+static void vmxnet3_validate_interrupts(VMXNET3State *s)
+{
+ int i;
+
+ VMW_CFPRN("Verifying event interrupt index (%d)", s->event_int_idx);
+ vmxnet3_validate_interrupt_idx(s->msix_used, s->event_int_idx);
+
+ for (i = 0; i < s->txq_num; i++) {
+ int idx = s->txq_descr[i].intr_idx;
+ VMW_CFPRN("Verifying TX queue %d interrupt index (%d)", i, idx);
+ vmxnet3_validate_interrupt_idx(s->msix_used, idx);
+ }
+
+ for (i = 0; i < s->rxq_num; i++) {
+ int idx = s->rxq_descr[i].intr_idx;
+ VMW_CFPRN("Verifying RX queue %d interrupt index (%d)", i, idx);
+ vmxnet3_validate_interrupt_idx(s->msix_used, idx);
+ }
+}
+
+static void vmxnet3_validate_queues(VMXNET3State *s)
+{
+ /*
+ * txq_num and rxq_num are total number of queues
+ * configured by guest. These numbers must not
+ * exceed corresponding maximal values.
+ */
+
+ if (s->txq_num > VMXNET3_DEVICE_MAX_TX_QUEUES) {
+ hw_error("Bad TX queues number: %d\n", s->txq_num);
+ }
+
+ if (s->rxq_num > VMXNET3_DEVICE_MAX_RX_QUEUES) {
+ hw_error("Bad RX queues number: %d\n", s->rxq_num);
+ }
+}
+
static void vmxnet3_activate_device(VMXNET3State *s)
{
int i;
@@ -1351,7 +1399,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numRxQueues);
VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num);
- assert(s->txq_num <= VMXNET3_DEVICE_MAX_TX_QUEUES);
+ vmxnet3_validate_queues(s);
qdescr_table_pa =
VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.misc.queueDescPA);
@@ -1447,6 +1495,8 @@ static void vmxnet3_activate_device(VMXNET3State *s)
sizeof(s->rxq_descr[i].rxq_stats));
}
+ vmxnet3_validate_interrupts(s);
+
/* Make sure everything is in place before device activation */
smp_wmb();
@@ -2005,7 +2055,6 @@ vmxnet3_cleanup_msix(VMXNET3State *s)
}
}
-#define VMXNET3_MSI_NUM_VECTORS (1)
#define VMXNET3_MSI_OFFSET (0x50)
#define VMXNET3_USE_64BIT (true)
#define VMXNET3_PER_VECTOR_MASK (false)
@@ -2016,7 +2065,7 @@ vmxnet3_init_msi(VMXNET3State *s)
PCIDevice *d = PCI_DEVICE(s);
int res;
- res = msi_init(d, VMXNET3_MSI_OFFSET, VMXNET3_MSI_NUM_VECTORS,
+ res = msi_init(d, VMXNET3_MSI_OFFSET, VMXNET3_MAX_NMSIX_INTRS,
VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK);
if (0 > res) {
VMW_WRPRN("Failed to initialize MSI, error %d", res);
@@ -2342,6 +2391,9 @@ static int vmxnet3_post_load(void *opaque, int version_id)
}
}
+ vmxnet3_validate_queues(s);
+ vmxnet3_validate_interrupts(s);
+
return 0;
}
diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c
index 0bd5eda199..dbeb3c9a25 100644
--- a/hw/net/xilinx_axienet.c
+++ b/hw/net/xilinx_axienet.c
@@ -98,7 +98,7 @@ static unsigned int tdk_read(struct PHY *phy, unsigned int req)
r |= 1;
break;
case 17:
- /* Marvel PHY on many xilinx boards. */
+ /* Marvell PHY on many xilinx boards. */
r = 0x8000; /* 1000Mb */
break;
case 18:
@@ -142,6 +142,9 @@ tdk_write(struct PHY *phy, unsigned int req, unsigned int data)
phy->regs[regnum] = data;
break;
}
+
+ /* Unconditionally clear regs[BMCR][BMCR_RESET] */
+ phy->regs[0] &= ~0x8000;
}
static void
@@ -945,9 +948,15 @@ static void xilinx_enet_realize(DeviceState *dev, Error **errp)
Error *local_errp = NULL;
object_property_add_link(OBJECT(ds), "enet", "xlnx.axi-ethernet",
- (Object **) &ds->enet, &local_errp);
+ (Object **) &ds->enet,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &local_errp);
object_property_add_link(OBJECT(cs), "enet", "xlnx.axi-ethernet",
- (Object **) &cs->enet, &local_errp);
+ (Object **) &cs->enet,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &local_errp);
if (local_errp) {
goto xilinx_enet_realize_fail;
}
@@ -982,10 +991,16 @@ static void xilinx_enet_init(Object *obj)
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
- (Object **) &s->tx_data_dev, &error_abort);
+ (Object **) &s->tx_data_dev,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &error_abort);
object_property_add_link(obj, "axistream-control-connected",
TYPE_STREAM_SLAVE,
- (Object **) &s->tx_control_dev, &error_abort);
+ (Object **) &s->tx_control_dev,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &error_abort);
object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev),
TYPE_XILINX_AXI_ENET_DATA_STREAM);
diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c
index cb36dc2d0c..282341ac1b 100644
--- a/hw/nvram/fw_cfg.c
+++ b/hw/nvram/fw_cfg.c
@@ -504,7 +504,7 @@ static void fw_cfg_machine_ready(struct Notifier *n, void *data)
{
size_t len;
FWCfgState *s = container_of(n, FWCfgState, machine_ready);
- char *bootindex = get_boot_devices_list(&len);
+ char *bootindex = get_boot_devices_list(&len, false);
fw_cfg_add_file(s, "bootorder", (uint8_t*)bootindex, len);
}
diff --git a/hw/pci-host/apb.c b/hw/pci-host/apb.c
index 1b399ddbc3..252caefda7 100644
--- a/hw/pci-host/apb.c
+++ b/hw/pci-host/apb.c
@@ -58,11 +58,11 @@ do { printf("APB: " fmt , ## __VA_ARGS__); } while (0)
#define PBM_PCI_IMR_MASK 0x7fffffff
#define PBM_PCI_IMR_ENABLED 0x80000000
-#define POR (1 << 31)
-#define SOFT_POR (1 << 30)
-#define SOFT_XIR (1 << 29)
-#define BTN_POR (1 << 28)
-#define BTN_XIR (1 << 27)
+#define POR (1U << 31)
+#define SOFT_POR (1U << 30)
+#define SOFT_XIR (1U << 29)
+#define BTN_POR (1U << 28)
+#define BTN_XIR (1U << 27)
#define RESET_MASK 0xf8000000
#define RESET_WCMASK 0x98000000
#define RESET_WMASK 0x60000000
diff --git a/hw/pci-host/prep.c b/hw/pci-host/prep.c
index 0c948e2c30..40145408ef 100644
--- a/hw/pci-host/prep.c
+++ b/hw/pci-host/prep.c
@@ -28,7 +28,9 @@
#include "hw/pci/pci_bus.h"
#include "hw/pci/pci_host.h"
#include "hw/i386/pc.h"
+#include "hw/loader.h"
#include "exec/address-spaces.h"
+#include "elf.h"
#define TYPE_RAVEN_PCI_DEVICE "raven"
#define TYPE_RAVEN_PCI_HOST_BRIDGE "raven-pcihost"
@@ -38,6 +40,10 @@
typedef struct RavenPCIState {
PCIDevice dev;
+
+ uint32_t elf_machine;
+ char *bios_name;
+ MemoryRegion bios;
} RavenPCIState;
#define RAVEN_PCI_HOST_BRIDGE(obj) \
@@ -46,13 +52,25 @@ typedef struct RavenPCIState {
typedef struct PRePPCIState {
PCIHostState parent_obj;
- MemoryRegion intack;
qemu_irq irq[PCI_NUM_PINS];
PCIBus pci_bus;
+ AddressSpace pci_io_as;
+ MemoryRegion pci_io;
+ MemoryRegion pci_io_non_contiguous;
+ MemoryRegion pci_memory;
+ MemoryRegion pci_intack;
+ MemoryRegion bm;
+ MemoryRegion bm_ram_alias;
+ MemoryRegion bm_pci_memory_alias;
+ AddressSpace bm_as;
RavenPCIState pci_dev;
+
+ int contiguous_map;
} PREPPCIState;
-static inline uint32_t PPC_PCIIO_config(hwaddr addr)
+#define BIOS_SIZE (1024 * 1024)
+
+static inline uint32_t raven_pci_io_config(hwaddr addr)
{
int i;
@@ -64,53 +82,133 @@ static inline uint32_t PPC_PCIIO_config(hwaddr addr)
return (addr & 0x7ff) | (i << 11);
}
-static void ppc_pci_io_write(void *opaque, hwaddr addr,
- uint64_t val, unsigned int size)
+static void raven_pci_io_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned int size)
{
PREPPCIState *s = opaque;
PCIHostState *phb = PCI_HOST_BRIDGE(s);
- pci_data_write(phb->bus, PPC_PCIIO_config(addr), val, size);
+ pci_data_write(phb->bus, raven_pci_io_config(addr), val, size);
}
-static uint64_t ppc_pci_io_read(void *opaque, hwaddr addr,
- unsigned int size)
+static uint64_t raven_pci_io_read(void *opaque, hwaddr addr,
+ unsigned int size)
{
PREPPCIState *s = opaque;
PCIHostState *phb = PCI_HOST_BRIDGE(s);
- return pci_data_read(phb->bus, PPC_PCIIO_config(addr), size);
+ return pci_data_read(phb->bus, raven_pci_io_config(addr), size);
}
-static const MemoryRegionOps PPC_PCIIO_ops = {
- .read = ppc_pci_io_read,
- .write = ppc_pci_io_write,
+static const MemoryRegionOps raven_pci_io_ops = {
+ .read = raven_pci_io_read,
+ .write = raven_pci_io_write,
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static uint64_t ppc_intack_read(void *opaque, hwaddr addr,
- unsigned int size)
+static uint64_t raven_intack_read(void *opaque, hwaddr addr,
+ unsigned int size)
{
return pic_read_irq(isa_pic);
}
-static const MemoryRegionOps PPC_intack_ops = {
- .read = ppc_intack_read,
+static const MemoryRegionOps raven_intack_ops = {
+ .read = raven_intack_read,
.valid = {
.max_access_size = 1,
},
};
-static int prep_map_irq(PCIDevice *pci_dev, int irq_num)
+static inline hwaddr raven_io_address(PREPPCIState *s,
+ hwaddr addr)
+{
+ if (s->contiguous_map == 0) {
+ /* 64 KB contiguous space for IOs */
+ addr &= 0xFFFF;
+ } else {
+ /* 8 MB non-contiguous space for IOs */
+ addr = (addr & 0x1F) | ((addr & 0x007FFF000) >> 7);
+ }
+
+ /* FIXME: handle endianness switch */
+
+ return addr;
+}
+
+static uint64_t raven_io_read(void *opaque, hwaddr addr,
+ unsigned int size)
+{
+ PREPPCIState *s = opaque;
+ uint8_t buf[4];
+
+ addr = raven_io_address(s, addr);
+ address_space_read(&s->pci_io_as, addr + 0x80000000, buf, size);
+
+ if (size == 1) {
+ return buf[0];
+ } else if (size == 2) {
+ return lduw_le_p(buf);
+ } else if (size == 4) {
+ return ldl_le_p(buf);
+ } else {
+ g_assert_not_reached();
+ }
+}
+
+static void raven_io_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned int size)
+{
+ PREPPCIState *s = opaque;
+ uint8_t buf[4];
+
+ addr = raven_io_address(s, addr);
+
+ if (size == 1) {
+ buf[0] = val;
+ } else if (size == 2) {
+ stw_le_p(buf, val);
+ } else if (size == 4) {
+ stl_le_p(buf, val);
+ } else {
+ g_assert_not_reached();
+ }
+
+ address_space_write(&s->pci_io_as, addr + 0x80000000, buf, size);
+}
+
+static const MemoryRegionOps raven_io_ops = {
+ .read = raven_io_read,
+ .write = raven_io_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl.max_access_size = 4,
+ .valid.unaligned = true,
+};
+
+static int raven_map_irq(PCIDevice *pci_dev, int irq_num)
{
return (irq_num + (pci_dev->devfn >> 3)) & 1;
}
-static void prep_set_irq(void *opaque, int irq_num, int level)
+static void raven_set_irq(void *opaque, int irq_num, int level)
{
qemu_irq *pic = opaque;
qemu_set_irq(pic[irq_num] , level);
}
+static AddressSpace *raven_pcihost_set_iommu(PCIBus *bus, void *opaque,
+ int devfn)
+{
+ PREPPCIState *s = opaque;
+
+ return &s->bm_as;
+}
+
+static void raven_change_gpio(void *opaque, int n, int level)
+{
+ PREPPCIState *s = opaque;
+
+ s->contiguous_map = level;
+}
+
static void raven_pcihost_realizefn(DeviceState *d, Error **errp)
{
SysBusDevice *dev = SYS_BUS_DEVICE(d);
@@ -119,29 +217,30 @@ static void raven_pcihost_realizefn(DeviceState *d, Error **errp)
MemoryRegion *address_space_mem = get_system_memory();
int i;
- isa_mem_base = 0xc0000000;
-
for (i = 0; i < PCI_NUM_PINS; i++) {
sysbus_init_irq(dev, &s->irq[i]);
}
- pci_bus_irqs(&s->pci_bus, prep_set_irq, prep_map_irq, s->irq, PCI_NUM_PINS);
+ qdev_init_gpio_in(d, raven_change_gpio, 1);
+
+ pci_bus_irqs(&s->pci_bus, raven_set_irq, raven_map_irq, s->irq,
+ PCI_NUM_PINS);
- memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_be_ops, s,
- "pci-conf-idx", 1);
- sysbus_add_io(dev, 0xcf8, &h->conf_mem);
- sysbus_init_ioports(&h->busdev, 0xcf8, 1);
+ memory_region_init_io(&h->conf_mem, OBJECT(h), &pci_host_conf_le_ops, s,
+ "pci-conf-idx", 4);
+ memory_region_add_subregion(&s->pci_io, 0xcf8, &h->conf_mem);
- memory_region_init_io(&h->data_mem, OBJECT(h), &pci_host_data_be_ops, s,
- "pci-conf-data", 1);
- sysbus_add_io(dev, 0xcfc, &h->data_mem);
- sysbus_init_ioports(&h->busdev, 0xcfc, 1);
+ memory_region_init_io(&h->data_mem, OBJECT(h), &pci_host_data_le_ops, s,
+ "pci-conf-data", 4);
+ memory_region_add_subregion(&s->pci_io, 0xcfc, &h->data_mem);
- memory_region_init_io(&h->mmcfg, OBJECT(s), &PPC_PCIIO_ops, s, "pciio", 0x00400000);
+ memory_region_init_io(&h->mmcfg, OBJECT(s), &raven_pci_io_ops, s,
+ "pciio", 0x00400000);
memory_region_add_subregion(address_space_mem, 0x80800000, &h->mmcfg);
- memory_region_init_io(&s->intack, OBJECT(s), &PPC_intack_ops, s, "pci-intack", 1);
- memory_region_add_subregion(address_space_mem, 0xbffffff0, &s->intack);
+ memory_region_init_io(&s->pci_intack, OBJECT(s), &raven_intack_ops, s,
+ "pci-intack", 1);
+ memory_region_add_subregion(address_space_mem, 0xbffffff0, &s->pci_intack);
/* TODO Remove once realize propagates to child devices. */
object_property_set_bool(OBJECT(&s->pci_dev), true, "realized", errp);
@@ -152,11 +251,36 @@ static void raven_pcihost_initfn(Object *obj)
PCIHostState *h = PCI_HOST_BRIDGE(obj);
PREPPCIState *s = RAVEN_PCI_HOST_BRIDGE(obj);
MemoryRegion *address_space_mem = get_system_memory();
- MemoryRegion *address_space_io = get_system_io();
DeviceState *pci_dev;
+ memory_region_init(&s->pci_io, obj, "pci-io", 0x3f800000);
+ memory_region_init_io(&s->pci_io_non_contiguous, obj, &raven_io_ops, s,
+ "pci-io-non-contiguous", 0x00800000);
+ /* Open Hack'Ware hack: real size should be only 0x3f000000 bytes */
+ memory_region_init(&s->pci_memory, obj, "pci-memory",
+ 0x3f000000 + 0xc0000000ULL);
+ address_space_init(&s->pci_io_as, &s->pci_io, "raven-io");
+
+ /* CPU address space */
+ memory_region_add_subregion(address_space_mem, 0x80000000, &s->pci_io);
+ memory_region_add_subregion_overlap(address_space_mem, 0x80000000,
+ &s->pci_io_non_contiguous, 1);
+ memory_region_add_subregion(address_space_mem, 0xc0000000, &s->pci_memory);
pci_bus_new_inplace(&s->pci_bus, sizeof(s->pci_bus), DEVICE(obj), NULL,
- address_space_mem, address_space_io, 0, TYPE_PCI_BUS);
+ &s->pci_memory, &s->pci_io, 0, TYPE_PCI_BUS);
+
+ /* Bus master address space */
+ memory_region_init(&s->bm, obj, "bm-raven", UINT32_MAX);
+ memory_region_init_alias(&s->bm_pci_memory_alias, obj, "bm-pci-memory",
+ &s->pci_memory, 0,
+ memory_region_size(&s->pci_memory));
+ memory_region_init_alias(&s->bm_ram_alias, obj, "bm-system",
+ get_system_memory(), 0, 0x80000000);
+ memory_region_add_subregion(&s->bm, 0 , &s->bm_pci_memory_alias);
+ memory_region_add_subregion(&s->bm, 0x80000000, &s->bm_ram_alias);
+ address_space_init(&s->bm_as, &s->bm, "raven-bm");
+ pci_setup_iommu(&s->pci_bus, raven_pcihost_set_iommu, s);
+
h->bus = &s->pci_bus;
object_initialize(&s->pci_dev, sizeof(s->pci_dev), TYPE_RAVEN_PCI_DEVICE);
@@ -169,10 +293,45 @@ static void raven_pcihost_initfn(Object *obj)
static int raven_init(PCIDevice *d)
{
+ RavenPCIState *s = RAVEN_PCI_DEVICE(d);
+ char *filename;
+ int bios_size = -1;
+
d->config[0x0C] = 0x08; // cache_line_size
d->config[0x0D] = 0x10; // latency_timer
d->config[0x34] = 0x00; // capabilities_pointer
+ memory_region_init_ram(&s->bios, OBJECT(s), "bios", BIOS_SIZE);
+ memory_region_set_readonly(&s->bios, true);
+ memory_region_add_subregion(get_system_memory(), (uint32_t)(-BIOS_SIZE),
+ &s->bios);
+ vmstate_register_ram_global(&s->bios);
+ if (s->bios_name) {
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, s->bios_name);
+ if (filename) {
+ if (s->elf_machine != EM_NONE) {
+ bios_size = load_elf(filename, NULL, NULL, NULL,
+ NULL, NULL, 1, s->elf_machine, 0);
+ }
+ if (bios_size < 0) {
+ bios_size = get_image_size(filename);
+ if (bios_size > 0 && bios_size <= BIOS_SIZE) {
+ hwaddr bios_addr;
+ bios_size = (bios_size + 0xfff) & ~0xfff;
+ bios_addr = (uint32_t)(-BIOS_SIZE);
+ bios_size = load_image_targphys(filename, bios_addr,
+ bios_size);
+ }
+ }
+ }
+ if (bios_size < 0 || bios_size > BIOS_SIZE) {
+ hw_error("qemu: could not load bios image '%s'\n", s->bios_name);
+ }
+ if (filename) {
+ g_free(filename);
+ }
+ }
+
return 0;
}
@@ -212,12 +371,20 @@ static const TypeInfo raven_info = {
.class_init = raven_class_init,
};
+static Property raven_pcihost_properties[] = {
+ DEFINE_PROP_UINT32("elf-machine", PREPPCIState, pci_dev.elf_machine,
+ EM_NONE),
+ DEFINE_PROP_STRING("bios-name", PREPPCIState, pci_dev.bios_name),
+ DEFINE_PROP_END_OF_LIST()
+};
+
static void raven_pcihost_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->realize = raven_pcihost_realizefn;
+ dc->props = raven_pcihost_properties;
dc->fw_name = "pci";
}
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index 4e0701df38..2a9f08eb0a 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -48,7 +48,6 @@ static void pcibus_dev_print(Monitor *mon, DeviceState *dev, int indent);
static char *pcibus_get_dev_path(DeviceState *dev);
static char *pcibus_get_fw_dev_path(DeviceState *dev);
static void pcibus_reset(BusState *qbus);
-static void pci_bus_finalize(Object *obj);
static Property pci_props[] = {
DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
@@ -61,6 +60,34 @@ static Property pci_props[] = {
DEFINE_PROP_END_OF_LIST()
};
+static const VMStateDescription vmstate_pcibus = {
+ .name = "PCIBUS",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32_EQUAL(nirq, PCIBus),
+ VMSTATE_VARRAY_INT32(irq_count, PCIBus,
+ nirq, 0, vmstate_info_int32,
+ int32_t),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void pci_bus_realize(BusState *qbus, Error **errp)
+{
+ PCIBus *bus = PCI_BUS(qbus);
+
+ vmstate_register(NULL, -1, &vmstate_pcibus, bus);
+}
+
+static void pci_bus_unrealize(BusState *qbus, Error **errp)
+{
+ PCIBus *bus = PCI_BUS(qbus);
+
+ vmstate_unregister(NULL, &vmstate_pcibus, bus);
+}
+
static void pci_bus_class_init(ObjectClass *klass, void *data)
{
BusClass *k = BUS_CLASS(klass);
@@ -68,6 +95,8 @@ static void pci_bus_class_init(ObjectClass *klass, void *data)
k->print_dev = pcibus_dev_print;
k->get_dev_path = pcibus_get_dev_path;
k->get_fw_dev_path = pcibus_get_fw_dev_path;
+ k->realize = pci_bus_realize;
+ k->unrealize = pci_bus_unrealize;
k->reset = pcibus_reset;
}
@@ -75,7 +104,6 @@ static const TypeInfo pci_bus_info = {
.name = TYPE_PCI_BUS,
.parent = TYPE_BUS,
.instance_size = sizeof(PCIBus),
- .instance_finalize = pci_bus_finalize,
.class_init = pci_bus_class_init,
};
@@ -95,17 +123,6 @@ static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU;
static QLIST_HEAD(, PCIHostState) pci_host_bridges;
-static const VMStateDescription vmstate_pcibus = {
- .name = "PCIBUS",
- .version_id = 1,
- .minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
- VMSTATE_INT32_EQUAL(nirq, PCIBus),
- VMSTATE_VARRAY_INT32(irq_count, PCIBus, nirq, 0, vmstate_info_int32, int32_t),
- VMSTATE_END_OF_LIST()
- }
-};
static int pci_bar(PCIDevice *d, int reg)
{
uint8_t type;
@@ -172,9 +189,9 @@ static void pci_do_device_reset(PCIDevice *dev)
{
int r;
- dev->irq_state = 0;
- pci_update_irq_status(dev);
pci_device_deassert_intx(dev);
+ assert(dev->irq_state == 0);
+
/* Clear all writable bits */
pci_word_test_and_clear_mask(dev->config + PCI_COMMAND,
pci_get_word(dev->wmask + PCI_COMMAND) |
@@ -299,8 +316,6 @@ static void pci_bus_init(PCIBus *bus, DeviceState *parent,
QLIST_INIT(&bus->child);
pci_host_bus_register(bus, parent);
-
- vmstate_register(NULL, -1, &vmstate_pcibus, bus);
}
bool pci_bus_is_express(PCIBus *bus)
@@ -369,12 +384,6 @@ int pci_bus_num(PCIBus *s)
return s->parent_dev->config[PCI_SECONDARY_BUS];
}
-static void pci_bus_finalize(Object *obj)
-{
- PCIBus *bus = PCI_BUS(obj);
- vmstate_unregister(NULL, &vmstate_pcibus, bus);
-}
-
static int get_pci_config_device(QEMUFile *f, void *pv, size_t size)
{
PCIDevice *s = container_of(pv, PCIDevice, config);
diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c
index 77c7d1f86b..3e26f9256c 100644
--- a/hw/pci/pci_host.c
+++ b/hw/pci/pci_host.c
@@ -142,8 +142,9 @@ static uint64_t pci_host_data_read(void *opaque,
{
PCIHostState *s = opaque;
uint32_t val;
- if (!(s->config_reg & (1 << 31)))
+ if (!(s->config_reg & (1U << 31))) {
return 0xffffffff;
+ }
val = pci_data_read(s->bus, s->config_reg | (addr & 3), len);
PCI_DPRINTF("read addr " TARGET_FMT_plx " len %d val %x\n",
addr, len, val);
diff --git a/hw/pcmcia/pxa2xx.c b/hw/pcmcia/pxa2xx.c
index 8f17596cc3..96f377453d 100644
--- a/hw/pcmcia/pxa2xx.c
+++ b/hw/pcmcia/pxa2xx.c
@@ -198,7 +198,9 @@ static void pxa2xx_pcmcia_initfn(Object *obj)
s->slot.irq = qemu_allocate_irqs(pxa2xx_pcmcia_set_irq, s, 1)[0];
object_property_add_link(obj, "card", TYPE_PCMCIA_CARD,
- (Object **)&s->card, NULL);
+ (Object **)&s->card,
+ NULL, /* read-only property */
+ 0, NULL);
}
/* Insert a new card into a slot */
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
index 8a08752613..f984b3e9a9 100644
--- a/hw/ppc/e500.c
+++ b/hw/ppc/e500.c
@@ -472,14 +472,13 @@ static void ppce500_cpu_reset_sec(void *opaque)
{
PowerPCCPU *cpu = opaque;
CPUState *cs = CPU(cpu);
- CPUPPCState *env = &cpu->env;
cpu_reset(cs);
/* Secondary CPU starts in halted state for now. Needs to change when
implementing non-kernel boot. */
cs->halted = 1;
- env->exception_index = EXCP_HLT;
+ cs->exception_index = EXCP_HLT;
}
static void ppce500_cpu_reset(void *opaque)
@@ -650,7 +649,7 @@ void ppce500_init(QEMUMachineInitArgs *args, PPCE500Params *params)
input = (qemu_irq *)env->irq_inputs;
irqs[i][OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT];
irqs[i][OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT];
- env->spr[SPR_BOOKE_PIR] = cs->cpu_index = i;
+ env->spr_cb[SPR_BOOKE_PIR].default_value = cs->cpu_index = i;
env->mpic_iack = MPC8544_CCSRBAR_BASE +
MPC8544_MPIC_REGS_OFFSET + 0xa0;
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
index 0e82719b69..71df471746 100644
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -620,6 +620,13 @@ static void cpu_ppc_tb_start (CPUPPCState *env)
}
}
+bool ppc_decr_clear_on_delivery(CPUPPCState *env)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
+ return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
+}
+
static inline uint32_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
{
ppc_tb_t *tb_env = env->tb_env;
@@ -677,6 +684,11 @@ static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
}
+static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
+{
+ ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
+}
+
static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
{
/* Raise it */
@@ -684,11 +696,16 @@ static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
}
+static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
+{
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
+}
+
static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
QEMUTimer *timer,
- void (*raise_excp)(PowerPCCPU *),
- uint32_t decr, uint32_t value,
- int is_excp)
+ void (*raise_excp)(void *),
+ void (*lower_excp)(PowerPCCPU *),
+ uint32_t decr, uint32_t value)
{
CPUPPCState *env = &cpu->env;
ppc_tb_t *tb_env = env->tb_env;
@@ -702,59 +719,74 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
return;
}
- now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- next = now + muldiv64(value, get_ticks_per_sec(), tb_env->decr_freq);
- if (is_excp) {
- next += *nextp - now;
+ /*
+ * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
+ * interrupt.
+ *
+ * If we get a really small DEC value, we can assume that by the time we
+ * handled it we should inject an interrupt already.
+ *
+ * On MSB level based DEC implementations the MSB always means the interrupt
+ * is pending, so raise it on those.
+ *
+ * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
+ * an edge interrupt, so raise it here too.
+ */
+ if ((value < 3) ||
+ ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && (value & 0x80000000)) ||
+ ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && (value & 0x80000000)
+ && !(decr & 0x80000000))) {
+ (*raise_excp)(cpu);
+ return;
}
- if (next == now) {
- next++;
+
+ /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
+ if (!(value & 0x80000000) && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
+ (*lower_excp)(cpu);
}
+
+ /* Calculate the next timer event */
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ next = now + muldiv64(value, get_ticks_per_sec(), tb_env->decr_freq);
*nextp = next;
+
/* Adjust timer */
timer_mod(timer, next);
-
- /* If we set a negative value and the decrementer was positive, raise an
- * exception.
- */
- if ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED)
- && (value & 0x80000000)
- && !(decr & 0x80000000)) {
- (*raise_excp)(cpu);
- }
}
static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, uint32_t decr,
- uint32_t value, int is_excp)
+ uint32_t value)
{
ppc_tb_t *tb_env = cpu->env.tb_env;
__cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
- &cpu_ppc_decr_excp, decr, value, is_excp);
+ tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
+ value);
}
void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
- _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, 0);
+ _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value);
}
static void cpu_ppc_decr_cb(void *opaque)
{
PowerPCCPU *cpu = opaque;
- _cpu_ppc_store_decr(cpu, 0x00000000, 0xFFFFFFFF, 1);
+ cpu_ppc_decr_excp(cpu);
}
static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, uint32_t hdecr,
- uint32_t value, int is_excp)
+ uint32_t value)
{
ppc_tb_t *tb_env = cpu->env.tb_env;
if (tb_env->hdecr_timer != NULL) {
__cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
- &cpu_ppc_hdecr_excp, hdecr, value, is_excp);
+ tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
+ hdecr, value);
}
}
@@ -762,14 +794,14 @@ void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
- _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value, 0);
+ _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value);
}
static void cpu_ppc_hdecr_cb(void *opaque)
{
PowerPCCPU *cpu = opaque;
- _cpu_ppc_store_hdecr(cpu, 0x00000000, 0xFFFFFFFF, 1);
+ cpu_ppc_hdecr_excp(cpu);
}
static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value)
@@ -792,8 +824,8 @@ static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
* if a decrementer exception is pending when it enables msr_ee at startup,
* it's not ready to handle it...
*/
- _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 0);
- _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 0);
+ _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
+ _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
cpu_ppc_store_purr(cpu, 0x0000000000000000ULL);
}
@@ -806,6 +838,10 @@ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
tb_env = g_malloc0(sizeof(ppc_tb_t));
env->tb_env = tb_env;
tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
+ if (env->insns_flags & PPC_SEGMENT_64B) {
+ /* All Book3S 64bit CPUs implement level based DEC logic */
+ tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
+ }
/* Create new timer */
tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
if (0) {
@@ -1002,7 +1038,7 @@ static void cpu_4xx_wdt_cb (void *opaque)
case 0x1:
timer_mod(ppc40x_timer->wdt_timer, next);
ppc40x_timer->wdt_next = next;
- env->spr[SPR_40x_TSR] |= 1 << 31;
+ env->spr[SPR_40x_TSR] |= 1U << 31;
break;
case 0x2:
timer_mod(ppc40x_timer->wdt_timer, next);
diff --git a/hw/ppc/ppc405_uc.c b/hw/ppc/ppc405_uc.c
index ca520e8859..54ba59e73a 100644
--- a/hw/ppc/ppc405_uc.c
+++ b/hw/ppc/ppc405_uc.c
@@ -44,7 +44,7 @@
ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd,
uint32_t flags)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
ram_addr_t bdloc;
int i, n;
diff --git a/hw/ppc/ppc440_bamboo.c b/hw/ppc/ppc440_bamboo.c
index ec15bab0b5..2ddc2ed4b9 100644
--- a/hw/ppc/ppc440_bamboo.c
+++ b/hw/ppc/ppc440_bamboo.c
@@ -128,7 +128,7 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env,
tlb->attr = 0;
tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
- tlb->size = 1 << 31; /* up to 0x80000000 */
+ tlb->size = 1U << 31; /* up to 0x80000000 */
tlb->EPN = va & TARGET_PAGE_MASK;
tlb->RPN = pa & TARGET_PAGE_MASK;
tlb->PID = 0;
@@ -136,7 +136,7 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env,
tlb = &env->tlb.tlbe[1];
tlb->attr = 0;
tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
- tlb->size = 1 << 31; /* up to 0xffffffff */
+ tlb->size = 1U << 31; /* up to 0xffffffff */
tlb->EPN = 0x80000000 & TARGET_PAGE_MASK;
tlb->RPN = 0x80000000 & TARGET_PAGE_MASK;
tlb->PID = 0;
diff --git a/hw/ppc/ppc4xx_devs.c b/hw/ppc/ppc4xx_devs.c
index 9160ee7769..8a43111a51 100644
--- a/hw/ppc/ppc4xx_devs.c
+++ b/hw/ppc/ppc4xx_devs.c
@@ -161,7 +161,7 @@ static void ppcuic_set_irq (void *opaque, int irq_num, int level)
uint32_t mask, sr;
uic = opaque;
- mask = 1 << (31-irq_num);
+ mask = 1U << (31-irq_num);
LOG_UIC("%s: irq %d level %d uicsr %08" PRIx32
" mask %08" PRIx32 " => %08" PRIx32 " %08" PRIx32 "\n",
__func__, irq_num, level,
diff --git a/hw/ppc/ppc_booke.c b/hw/ppc/ppc_booke.c
index d8399602d6..8b94da6b08 100644
--- a/hw/ppc/ppc_booke.c
+++ b/hw/ppc/ppc_booke.c
@@ -34,15 +34,15 @@
/* Timer Control Register */
#define TCR_WP_SHIFT 30 /* Watchdog Timer Period */
-#define TCR_WP_MASK (0x3 << TCR_WP_SHIFT)
+#define TCR_WP_MASK (0x3U << TCR_WP_SHIFT)
#define TCR_WRC_SHIFT 28 /* Watchdog Timer Reset Control */
-#define TCR_WRC_MASK (0x3 << TCR_WRC_SHIFT)
-#define TCR_WIE (1 << 27) /* Watchdog Timer Interrupt Enable */
-#define TCR_DIE (1 << 26) /* Decrementer Interrupt Enable */
+#define TCR_WRC_MASK (0x3U << TCR_WRC_SHIFT)
+#define TCR_WIE (1U << 27) /* Watchdog Timer Interrupt Enable */
+#define TCR_DIE (1U << 26) /* Decrementer Interrupt Enable */
#define TCR_FP_SHIFT 24 /* Fixed-Interval Timer Period */
-#define TCR_FP_MASK (0x3 << TCR_FP_SHIFT)
-#define TCR_FIE (1 << 23) /* Fixed-Interval Timer Interrupt Enable */
-#define TCR_ARE (1 << 22) /* Auto-Reload Enable */
+#define TCR_FP_MASK (0x3U << TCR_FP_SHIFT)
+#define TCR_FIE (1U << 23) /* Fixed-Interval Timer Interrupt Enable */
+#define TCR_ARE (1U << 22) /* Auto-Reload Enable */
/* Timer Control Register (e500 specific fields) */
@@ -53,12 +53,12 @@
/* Timer Status Register */
-#define TSR_FIS (1 << 26) /* Fixed-Interval Timer Interrupt Status */
-#define TSR_DIS (1 << 27) /* Decrementer Interrupt Status */
+#define TSR_FIS (1U << 26) /* Fixed-Interval Timer Interrupt Status */
+#define TSR_DIS (1U << 27) /* Decrementer Interrupt Status */
#define TSR_WRS_SHIFT 28 /* Watchdog Timer Reset Status */
-#define TSR_WRS_MASK (0x3 << TSR_WRS_SHIFT)
-#define TSR_WIS (1 << 30) /* Watchdog Timer Interrupt Status */
-#define TSR_ENW (1 << 31) /* Enable Next Watchdog Timer */
+#define TSR_WRS_MASK (0x3U << TSR_WRS_SHIFT)
+#define TSR_WIS (1U << 30) /* Watchdog Timer Interrupt Status */
+#define TSR_ENW (1U << 31) /* Enable Next Watchdog Timer */
typedef struct booke_timer_t booke_timer_t;
struct booke_timer_t {
diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c
index 78b23fa597..d49f2b8803 100644
--- a/hw/ppc/ppce500_spin.c
+++ b/hw/ppc/ppce500_spin.c
@@ -65,9 +65,9 @@ static void spin_reset(void *opaque)
for (i = 0; i < MAX_CPUS; i++) {
SpinInfo *info = &s->spin[i];
- info->pir = i;
- info->r3 = i;
- info->addr = 1;
+ stl_p(&info->pir, i);
+ stq_p(&info->r3, i);
+ stq_p(&info->addr, 1);
}
}
@@ -117,7 +117,7 @@ static void spin_kick(void *data)
mmubooke_create_initial_mapping(env, 0, map_start, map_size);
cpu->halted = 0;
- env->exception_index = -1;
+ cpu->exception_index = -1;
cpu->stopped = false;
qemu_cpu_kick(cpu);
}
diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c
index 9f8538cd20..e2436512f7 100644
--- a/hw/ppc/prep.c
+++ b/hw/ppc/prep.c
@@ -185,6 +185,7 @@ typedef struct sysctrl_t {
uint8_t state;
uint8_t syscontrol;
int contiguous_map;
+ qemu_irq contiguous_map_irq;
int endian;
} sysctrl_t;
@@ -253,6 +254,7 @@ static void PREP_io_800_writeb (void *opaque, uint32_t addr, uint32_t val)
case 0x0850:
/* I/O map type register */
sysctrl->contiguous_map = val & 0x01;
+ qemu_set_irq(sysctrl->contiguous_map_irq, sysctrl->contiguous_map);
break;
default:
printf("ERROR: unaffected IO port write: %04" PRIx32
@@ -327,91 +329,6 @@ static uint32_t PREP_io_800_readb (void *opaque, uint32_t addr)
return retval;
}
-static inline hwaddr prep_IO_address(sysctrl_t *sysctrl,
- hwaddr addr)
-{
- if (sysctrl->contiguous_map == 0) {
- /* 64 KB contiguous space for IOs */
- addr &= 0xFFFF;
- } else {
- /* 8 MB non-contiguous space for IOs */
- addr = (addr & 0x1F) | ((addr & 0x007FFF000) >> 7);
- }
-
- return addr;
-}
-
-static void PPC_prep_io_writeb (void *opaque, hwaddr addr,
- uint32_t value)
-{
- sysctrl_t *sysctrl = opaque;
-
- addr = prep_IO_address(sysctrl, addr);
- cpu_outb(addr, value);
-}
-
-static uint32_t PPC_prep_io_readb (void *opaque, hwaddr addr)
-{
- sysctrl_t *sysctrl = opaque;
- uint32_t ret;
-
- addr = prep_IO_address(sysctrl, addr);
- ret = cpu_inb(addr);
-
- return ret;
-}
-
-static void PPC_prep_io_writew (void *opaque, hwaddr addr,
- uint32_t value)
-{
- sysctrl_t *sysctrl = opaque;
-
- addr = prep_IO_address(sysctrl, addr);
- PPC_IO_DPRINTF("0x" TARGET_FMT_plx " => 0x%08" PRIx32 "\n", addr, value);
- cpu_outw(addr, value);
-}
-
-static uint32_t PPC_prep_io_readw (void *opaque, hwaddr addr)
-{
- sysctrl_t *sysctrl = opaque;
- uint32_t ret;
-
- addr = prep_IO_address(sysctrl, addr);
- ret = cpu_inw(addr);
- PPC_IO_DPRINTF("0x" TARGET_FMT_plx " <= 0x%08" PRIx32 "\n", addr, ret);
-
- return ret;
-}
-
-static void PPC_prep_io_writel (void *opaque, hwaddr addr,
- uint32_t value)
-{
- sysctrl_t *sysctrl = opaque;
-
- addr = prep_IO_address(sysctrl, addr);
- PPC_IO_DPRINTF("0x" TARGET_FMT_plx " => 0x%08" PRIx32 "\n", addr, value);
- cpu_outl(addr, value);
-}
-
-static uint32_t PPC_prep_io_readl (void *opaque, hwaddr addr)
-{
- sysctrl_t *sysctrl = opaque;
- uint32_t ret;
-
- addr = prep_IO_address(sysctrl, addr);
- ret = cpu_inl(addr);
- PPC_IO_DPRINTF("0x" TARGET_FMT_plx " <= 0x%08" PRIx32 "\n", addr, ret);
-
- return ret;
-}
-
-static const MemoryRegionOps PPC_prep_io_ops = {
- .old_mmio = {
- .read = { PPC_prep_io_readb, PPC_prep_io_readw, PPC_prep_io_readl },
- .write = { PPC_prep_io_writeb, PPC_prep_io_writew, PPC_prep_io_writel },
- },
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
#define NVRAM_SIZE 0x2000
@@ -456,17 +373,15 @@ static void ppc_prep_init(QEMUMachineInitArgs *args)
MemoryRegion *sysmem = get_system_memory();
PowerPCCPU *cpu = NULL;
CPUPPCState *env = NULL;
- char *filename;
nvram_t nvram;
M48t59State *m48t59;
- MemoryRegion *PPC_io_memory = g_new(MemoryRegion, 1);
PortioList *port_list = g_new(PortioList, 1);
#if 0
MemoryRegion *xcsr = g_new(MemoryRegion, 1);
#endif
- int linux_boot, i, nb_nics1, bios_size;
+ int linux_boot, i, nb_nics1;
MemoryRegion *ram = g_new(MemoryRegion, 1);
- MemoryRegion *bios = g_new(MemoryRegion, 1);
+ MemoryRegion *vga = g_new(MemoryRegion, 1);
uint32_t kernel_base, initrd_base;
long kernel_size, initrd_size;
DeviceState *dev;
@@ -509,43 +424,6 @@ static void ppc_prep_init(QEMUMachineInitArgs *args)
vmstate_register_ram_global(ram);
memory_region_add_subregion(sysmem, 0, ram);
- /* allocate and load BIOS */
- memory_region_init_ram(bios, NULL, "ppc_prep.bios", BIOS_SIZE);
- memory_region_set_readonly(bios, true);
- memory_region_add_subregion(sysmem, (uint32_t)(-BIOS_SIZE), bios);
- vmstate_register_ram_global(bios);
- if (bios_name == NULL)
- bios_name = BIOS_FILENAME;
- filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
- if (filename) {
- bios_size = load_elf(filename, NULL, NULL, NULL,
- NULL, NULL, 1, ELF_MACHINE, 0);
- if (bios_size < 0) {
- bios_size = get_image_size(filename);
- if (bios_size > 0 && bios_size <= BIOS_SIZE) {
- hwaddr bios_addr;
- bios_size = (bios_size + 0xfff) & ~0xfff;
- bios_addr = (uint32_t)(-bios_size);
- bios_size = load_image_targphys(filename, bios_addr, bios_size);
- }
- if (bios_size > BIOS_SIZE) {
- fprintf(stderr, "qemu: PReP bios '%s' is too large (0x%x)\n",
- bios_name, bios_size);
- exit(1);
- }
- }
- } else {
- bios_size = -1;
- }
- if (bios_size < 0 && !qtest_enabled()) {
- fprintf(stderr, "qemu: could not load PPC PReP bios '%s'\n",
- bios_name);
- exit(1);
- }
- if (filename) {
- g_free(filename);
- }
-
if (linux_boot) {
kernel_base = KERNEL_LOAD_ADDR;
/* now we can load the kernel */
@@ -593,6 +471,11 @@ static void ppc_prep_init(QEMUMachineInitArgs *args)
}
dev = qdev_create(NULL, "raven-pcihost");
+ if (bios_name == NULL) {
+ bios_name = BIOS_FILENAME;
+ }
+ qdev_prop_set_string(dev, "bios-name", bios_name);
+ qdev_prop_set_uint32(dev, "elf-machine", ELF_MACHINE);
pcihost = PCI_HOST_BRIDGE(dev);
object_property_add_child(qdev_get_machine(), "raven", OBJECT(dev), NULL);
qdev_init_nofail(dev);
@@ -601,6 +484,7 @@ static void ppc_prep_init(QEMUMachineInitArgs *args)
fprintf(stderr, "Couldn't create PCI host controller.\n");
exit(1);
}
+ sysctrl->contiguous_map_irq = qdev_get_gpio_in(dev, 0);
/* PCI -> ISA bridge */
pci = pci_create_simple(pci_bus, PCI_DEVFN(1, 0), "i82378");
@@ -621,13 +505,16 @@ static void ppc_prep_init(QEMUMachineInitArgs *args)
qdev_prop_set_uint8(dev, "config", 13); /* fdc, ser0, ser1, par0 */
qdev_init_nofail(dev);
- /* Register 8 MB of ISA IO space (needed for non-contiguous map) */
- memory_region_init_io(PPC_io_memory, NULL, &PPC_prep_io_ops, sysctrl,
- "ppc-io", 0x00800000);
- memory_region_add_subregion(sysmem, 0x80000000, PPC_io_memory);
-
/* init basic PC hardware */
pci_vga_init(pci_bus);
+ /* Open Hack'Ware hack: PCI BAR#0 is programmed to 0xf0000000.
+ * While bios will access framebuffer at 0xf0000000, real physical
+ * address is 0xf0000000 + 0xc0000000 (PCI memory base).
+ * Alias the wrong memory accesses to the right place.
+ */
+ memory_region_init_alias(vga, NULL, "vga-alias", pci_address_space(pci),
+ 0xf0000000, 0x1000000);
+ memory_region_add_subregion_overlap(sysmem, 0xf0000000, vga, 10);
nb_nics1 = nb_nics;
if (nb_nics1 > NE2000_NB_MAX)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index bf46c380ec..a11e1217b9 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -26,6 +26,7 @@
*/
#include "sysemu/sysemu.h"
#include "hw/hw.h"
+#include "hw/fw-path-provider.h"
#include "elf.h"
#include "net/net.h"
#include "sysemu/blockdev.h"
@@ -45,6 +46,8 @@
#include "hw/pci/msi.h"
#include "hw/pci/pci.h"
+#include "hw/scsi/scsi.h"
+#include "hw/virtio/virtio-scsi.h"
#include "exec/address-spaces.h"
#include "hw/usb.h"
@@ -81,6 +84,8 @@
#define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift))
+#define TYPE_SPAPR_MACHINE "spapr-machine"
+
sPAPREnvironment *spapr;
int spapr_allocate_irq(int hint, bool lsi)
@@ -598,7 +603,9 @@ static void spapr_finalize_fdt(sPAPREnvironment *spapr,
hwaddr rtas_addr,
hwaddr rtas_size)
{
- int ret;
+ int ret, i;
+ size_t cb = 0;
+ char *bootlist;
void *fdt;
sPAPRPHBState *phb;
@@ -640,6 +647,21 @@ static void spapr_finalize_fdt(sPAPREnvironment *spapr,
fprintf(stderr, "Couldn't finalize CPU device tree properties\n");
}
+ bootlist = get_boot_devices_list(&cb, true);
+ if (cb && bootlist) {
+ int offset = fdt_path_offset(fdt, "/chosen");
+ if (offset < 0) {
+ exit(1);
+ }
+ for (i = 0; i < cb; i++) {
+ if (bootlist[i] == '\n') {
+ bootlist[i] = ' ';
+ }
+
+ }
+ ret = fdt_setprop_string(fdt, offset, "qemu,boot-list", bootlist);
+ }
+
if (!spapr->has_graphics) {
spapr_populate_chosen_stdout(fdt, spapr->vio_bus);
}
@@ -781,13 +803,15 @@ static int spapr_vga_init(PCIBus *pci_bus)
{
switch (vga_interface_type) {
case VGA_NONE:
+ return false;
+ case VGA_DEVICE:
+ return true;
case VGA_STD:
return pci_vga_init(pci_bus) != NULL;
default:
fprintf(stderr, "This vga model is not supported,"
"currently it only supports -vga std\n");
exit(0);
- break;
}
}
@@ -1408,9 +1432,86 @@ static QEMUMachine spapr_machine = {
.kvm_type = spapr_kvm_type,
};
-static void spapr_machine_init(void)
+/*
+ * Implementation of an interface to adjust firmware patch
+ * for the bootindex property handling.
+ */
+static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
+ DeviceState *dev)
+{
+#define CAST(type, obj, name) \
+ ((type *)object_dynamic_cast(OBJECT(obj), (name)))
+ SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE);
+ sPAPRPHBState *phb = CAST(sPAPRPHBState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
+
+ if (d) {
+ void *spapr = CAST(void, bus->parent, "spapr-vscsi");
+ VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
+ USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
+
+ if (spapr) {
+ /*
+ * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
+ * We use SRP luns of the form 8000 | (bus << 8) | (id << 5) | lun
+ * in the top 16 bits of the 64-bit LUN
+ */
+ unsigned id = 0x8000 | (d->id << 8) | d->lun;
+ return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
+ (uint64_t)id << 48);
+ } else if (virtio) {
+ /*
+ * We use SRP luns of the form 01000000 | (target << 8) | lun
+ * in the top 32 bits of the 64-bit LUN
+ * Note: the quote above is from SLOF and it is wrong,
+ * the actual binding is:
+ * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
+ */
+ unsigned id = 0x1000000 | (d->id << 16) | d->lun;
+ return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
+ (uint64_t)id << 32);
+ } else if (usb) {
+ /*
+ * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
+ * in the top 32 bits of the 64-bit LUN
+ */
+ unsigned usb_port = atoi(usb->port->path);
+ unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
+ return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
+ (uint64_t)id << 32);
+ }
+ }
+
+ if (phb) {
+ /* Replace "pci" with "pci@800000020000000" */
+ return g_strdup_printf("pci@%"PRIX64, phb->buid);
+ }
+
+ return NULL;
+}
+
+static void spapr_machine_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
+
+ mc->qemu_machine = data;
+ fwc->get_dev_path = spapr_get_fw_dev_path;
+}
+
+static const TypeInfo spapr_machine_info = {
+ .name = TYPE_SPAPR_MACHINE,
+ .parent = TYPE_MACHINE,
+ .class_init = spapr_machine_class_init,
+ .class_data = &spapr_machine,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_FW_PATH_PROVIDER },
+ { }
+ },
+};
+
+static void spapr_machine_register_types(void)
{
- qemu_register_machine(&spapr_machine);
+ type_register_static(&spapr_machine_info);
}
-machine_init(spapr_machine_init);
+type_init(spapr_machine_register_types)
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index d918780746..0bae0535e8 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -4,6 +4,36 @@
#include "hw/ppc/spapr.h"
#include "mmu-hash64.h"
+struct SPRSyncState {
+ CPUState *cs;
+ int spr;
+ target_ulong value;
+ target_ulong mask;
+};
+
+static void do_spr_sync(void *arg)
+{
+ struct SPRSyncState *s = arg;
+ PowerPCCPU *cpu = POWERPC_CPU(s->cs);
+ CPUPPCState *env = &cpu->env;
+
+ cpu_synchronize_state(s->cs);
+ env->spr[s->spr] &= ~s->mask;
+ env->spr[s->spr] |= s->value;
+}
+
+static void set_spr(CPUState *cs, int spr, target_ulong value,
+ target_ulong mask)
+{
+ struct SPRSyncState s = {
+ .cs = cs,
+ .spr = spr,
+ .value = value,
+ .mask = mask
+ };
+ run_on_cpu(cs, do_spr_sync, &s);
+}
+
static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
target_ulong pte_index)
{
@@ -110,16 +140,15 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPREnvironment *spapr,
if (likely((flags & H_EXACT) == 0)) {
pte_index &= ~7ULL;
token = ppc_hash64_start_access(cpu, pte_index);
- do {
- if (index == 8) {
- ppc_hash64_stop_access(token);
- return H_PTEG_FULL;
- }
+ for (; index < 8; index++) {
if ((ppc_hash64_load_hpte0(env, token, index) & HPTE64_V_VALID) == 0) {
break;
}
- } while (index++);
+ }
ppc_hash64_stop_access(token);
+ if (index == 8) {
+ return H_PTEG_FULL;
+ }
} else {
token = ppc_hash64_start_access(cpu, pte_index);
if (ppc_hash64_load_hpte0(env, token, 0) & HPTE64_V_VALID) {
@@ -356,7 +385,7 @@ static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
uint16_t size;
uint8_t tmp;
@@ -406,7 +435,7 @@ static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
uint32_t size;
if (addr == 0) {
@@ -442,7 +471,7 @@ static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
uint32_t size;
if (addr == 0) {
@@ -529,7 +558,7 @@ static target_ulong h_cede(PowerPCCPU *cpu, sPAPREnvironment *spapr,
hreg_compute_hflags(env);
if (!cpu_has_work(cs)) {
cs->halted = 1;
- env->exception_index = EXCP_HLT;
+ cs->exception_index = EXCP_HLT;
cs->exit_request = 1;
}
return H_SUCCESS;
@@ -690,7 +719,7 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong value2 = args[3];
target_ulong ret = H_P2;
- if (resource == H_SET_MODE_ENDIAN) {
+ if (resource == H_SET_MODE_RESOURCE_LE) {
if (value1) {
ret = H_P3;
goto out;
@@ -699,22 +728,17 @@ static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPREnvironment *spapr,
ret = H_P4;
goto out;
}
-
switch (mflags) {
case H_SET_MODE_ENDIAN_BIG:
CPU_FOREACH(cs) {
- PowerPCCPU *cp = POWERPC_CPU(cs);
- CPUPPCState *env = &cp->env;
- env->spr[SPR_LPCR] &= ~LPCR_ILE;
+ set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
}
ret = H_SUCCESS;
break;
case H_SET_MODE_ENDIAN_LITTLE:
CPU_FOREACH(cs) {
- PowerPCCPU *cp = POWERPC_CPU(cs);
- CPUPPCState *env = &cp->env;
- env->spr[SPR_LPCR] |= LPCR_ILE;
+ set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
}
ret = H_SUCCESS;
break;
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index cea9469872..cbef095935 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -32,6 +32,7 @@
#include "exec/address-spaces.h"
#include <libfdt.h>
#include "trace.h"
+#include "qemu/error-report.h"
#include "hw/pci/pci_bus.h"
@@ -292,7 +293,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
ret_intr_type = RTAS_TYPE_MSIX;
break;
default:
- fprintf(stderr, "rtas_ibm_change_msi(%u) is not implemented\n", func);
+ error_report("rtas_ibm_change_msi(%u) is not implemented", func);
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
@@ -326,7 +327,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
/* Find a device number in the map to add or reuse the existing one */
ndev = spapr_msicfg_find(phb, config_addr, true);
if (ndev >= SPAPR_MSIX_MAX_DEVS || ndev < 0) {
- fprintf(stderr, "No free entry for a new MSI device\n");
+ error_report("No free entry for a new MSI device");
rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -335,7 +336,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
/* Check if there is an old config and MSI number has not changed */
if (phb->msi_table[ndev].nvec && (req_num != phb->msi_table[ndev].nvec)) {
/* Unexpected behaviour */
- fprintf(stderr, "Cannot reuse MSI config for device#%d", ndev);
+ error_report("Cannot reuse MSI config for device#%d", ndev);
rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -345,7 +346,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
irq = spapr_allocate_irq_block(req_num, false,
ret_intr_type == RTAS_TYPE_MSI);
if (irq < 0) {
- fprintf(stderr, "Cannot allocate MSIs for device#%d", ndev);
+ error_report("Cannot allocate MSIs for device#%d", ndev);
rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
return;
}
@@ -505,12 +506,11 @@ static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
return &phb->iommu_as;
}
-static int spapr_phb_init(SysBusDevice *s)
+static void spapr_phb_realize(DeviceState *dev, Error **errp)
{
- DeviceState *dev = DEVICE(s);
+ SysBusDevice *s = SYS_BUS_DEVICE(dev);
sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
PCIHostState *phb = PCI_HOST_BRIDGE(s);
- const char *busname;
char *namebuf;
int i;
PCIBus *bus;
@@ -521,9 +521,9 @@ static int spapr_phb_init(SysBusDevice *s)
if ((sphb->buid != -1) || (sphb->dma_liobn != -1)
|| (sphb->mem_win_addr != -1)
|| (sphb->io_win_addr != -1)) {
- fprintf(stderr, "Either \"index\" or other parameters must"
- " be specified for PAPR PHB, not both\n");
- return -1;
+ error_setg(errp, "Either \"index\" or other parameters must"
+ " be specified for PAPR PHB, not both");
+ return;
}
sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
@@ -536,28 +536,28 @@ static int spapr_phb_init(SysBusDevice *s)
}
if (sphb->buid == -1) {
- fprintf(stderr, "BUID not specified for PHB\n");
- return -1;
+ error_setg(errp, "BUID not specified for PHB");
+ return;
}
if (sphb->dma_liobn == -1) {
- fprintf(stderr, "LIOBN not specified for PHB\n");
- return -1;
+ error_setg(errp, "LIOBN not specified for PHB");
+ return;
}
if (sphb->mem_win_addr == -1) {
- fprintf(stderr, "Memory window address not specified for PHB\n");
- return -1;
+ error_setg(errp, "Memory window address not specified for PHB");
+ return;
}
if (sphb->io_win_addr == -1) {
- fprintf(stderr, "IO window address not specified for PHB\n");
- return -1;
+ error_setg(errp, "IO window address not specified for PHB");
+ return;
}
if (find_phb(spapr, sphb->buid)) {
- fprintf(stderr, "PCI host bridges must have unique BUIDs\n");
- return -1;
+ error_setg(errp, "PCI host bridges must have unique BUIDs");
+ return;
}
sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
@@ -594,26 +594,8 @@ static int spapr_phb_init(SysBusDevice *s)
get_system_io(), 0, SPAPR_PCI_IO_WIN_SIZE);
memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
&sphb->iowindow);
- /*
- * Selecting a busname is more complex than you'd think, due to
- * interacting constraints. If the user has specified an id
- * explicitly for the phb , then we want to use the qdev default
- * of naming the bus based on the bridge device (so the user can
- * then assign devices to it in the way they expect). For the
- * first / default PCI bus (index=0) we want to use just "pci"
- * because libvirt expects there to be a bus called, simply,
- * "pci". Otherwise, we use the same name as in the device tree,
- * since it's unique by construction, and makes the guest visible
- * BUID clear.
- */
- if (dev->id) {
- busname = NULL;
- } else if (sphb->index == 0) {
- busname = "pci";
- } else {
- busname = sphb->dtbusname;
- }
- bus = pci_register_bus(dev, busname,
+
+ bus = pci_register_bus(dev, NULL,
pci_spapr_set_irq, pci_spapr_map_irq, sphb,
&sphb->memspace, &sphb->iospace,
PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS);
@@ -624,8 +606,9 @@ static int spapr_phb_init(SysBusDevice *s)
sphb->tcet = spapr_tce_new_table(dev, sphb->dma_liobn,
sphb->dma_window_size);
if (!sphb->tcet) {
- fprintf(stderr, "Unable to create TCE table for %s\n", sphb->dtbusname);
- return -1;
+ error_setg(errp, "Unable to create TCE table for %s",
+ sphb->dtbusname);
+ return;
}
address_space_init(&sphb->iommu_as, spapr_tce_get_iommu(sphb->tcet),
sphb->dtbusname);
@@ -642,13 +625,12 @@ static int spapr_phb_init(SysBusDevice *s)
irq = spapr_allocate_lsi(0);
if (!irq) {
- return -1;
+ error_setg(errp, "spapr_allocate_lsi failed");
+ return;
}
sphb->lsi_table[i].irq = irq;
}
-
- return 0;
}
static void spapr_phb_reset(DeviceState *qdev)
@@ -731,11 +713,10 @@ static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
static void spapr_phb_class_init(ObjectClass *klass, void *data)
{
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
- SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
hc->root_bus_path = spapr_phb_root_bus_path;
- sdc->init = spapr_phb_init;
+ dc->realize = spapr_phb_realize;
dc->props = spapr_phb_properties;
dc->reset = spapr_phb_reset;
dc->vmsd = &vmstate_spapr_pci;
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index 4e33f462d9..2ae06a3356 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -68,6 +68,7 @@ static void spapr_vio_bus_class_init(ObjectClass *klass, void *data)
BusClass *k = BUS_CLASS(klass);
k->get_dev_path = spapr_vio_get_dev_name;
+ k->get_fw_dev_path = spapr_vio_get_dev_name;
}
static const TypeInfo spapr_vio_bus_info = {
@@ -529,7 +530,9 @@ static int spapr_vio_bridge_init(SysBusDevice *dev)
static void spapr_vio_bridge_class_init(ObjectClass *klass, void *data)
{
SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->fw_name = "vdevice";
k->init = spapr_vio_bridge_init;
}
diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c
index ce8ea91e8b..3e3569d4b8 100644
--- a/hw/ppc/virtex_ml507.c
+++ b/hw/ppc/virtex_ml507.c
@@ -71,7 +71,7 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env,
tlb->attr = 0;
tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
- tlb->size = 1 << 31; /* up to 0x80000000 */
+ tlb->size = 1U << 31; /* up to 0x80000000 */
tlb->EPN = va & TARGET_PAGE_MASK;
tlb->RPN = pa & TARGET_PAGE_MASK;
tlb->PID = 0;
@@ -79,7 +79,7 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env,
tlb = &env->tlb.tlbe[1];
tlb->attr = 0;
tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
- tlb->size = 1 << 31; /* up to 0xffffffff */
+ tlb->size = 1U << 31; /* up to 0xffffffff */
tlb->EPN = 0x80000000 & TARGET_PAGE_MASK;
tlb->RPN = 0x80000000 & TARGET_PAGE_MASK;
tlb->PID = 0;
diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c
index 32d38a08f6..4fa9cffded 100644
--- a/hw/s390x/ipl.c
+++ b/hw/s390x/ipl.c
@@ -80,7 +80,7 @@ static int s390_ipl_init(SysBusDevice *dev)
bios_size = load_elf(bios_filename, NULL, NULL, &ipl->start_addr, NULL,
NULL, 1, ELF_MACHINE, 0);
- if (bios_size == -1) {
+ if (bios_size < 0) {
bios_size = load_image_targphys(bios_filename, ZIPL_IMAGE_START,
4096);
ipl->start_addr = ZIPL_IMAGE_START;
diff --git a/hw/s390x/s390-virtio-bus.c b/hw/s390x/s390-virtio-bus.c
index e4fc35366b..9c71afa031 100644
--- a/hw/s390x/s390-virtio-bus.c
+++ b/hw/s390x/s390-virtio-bus.c
@@ -313,7 +313,9 @@ static void s390_virtio_rng_instance_init(Object *obj)
object_initialize(&dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_RNG);
object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
object_property_add_link(obj, "rng", TYPE_RNG_BACKEND,
- (Object **)&dev->vdev.conf.rng, NULL);
+ (Object **)&dev->vdev.conf.rng,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
}
static uint64_t s390_virtio_device_vq_token(VirtIOS390Device *dev, int vq)
diff --git a/hw/s390x/s390-virtio.c b/hw/s390x/s390-virtio.c
index 0f03fd18b9..aef200310c 100644
--- a/hw/s390x/s390-virtio.c
+++ b/hw/s390x/s390-virtio.c
@@ -135,25 +135,23 @@ static unsigned s390_running_cpus;
void s390_add_running_cpu(S390CPU *cpu)
{
CPUState *cs = CPU(cpu);
- CPUS390XState *env = &cpu->env;
if (cs->halted) {
s390_running_cpus++;
cs->halted = 0;
- env->exception_index = -1;
+ cs->exception_index = -1;
}
}
unsigned s390_del_running_cpu(S390CPU *cpu)
{
CPUState *cs = CPU(cpu);
- CPUS390XState *env = &cpu->env;
if (cs->halted == 0) {
assert(s390_running_cpus >= 1);
s390_running_cpus--;
cs->halted = 1;
- env->exception_index = EXCP_HLT;
+ cs->exception_index = EXCP_HLT;
}
return s390_running_cpus;
}
@@ -196,7 +194,7 @@ void s390_init_cpus(const char *cpu_model, uint8_t *storage_keys)
ipi_states[i] = cpu;
cs->halted = 1;
- cpu->env.exception_index = EXCP_HLT;
+ cs->exception_index = EXCP_HLT;
cpu->env.storage_keys = storage_keys;
}
}
diff --git a/hw/s390x/sclpcpu.c b/hw/s390x/sclpcpu.c
index b9c238a0da..3600fe231d 100644
--- a/hw/s390x/sclpcpu.c
+++ b/hw/s390x/sclpcpu.c
@@ -25,13 +25,13 @@ typedef struct ConfigMgtData {
uint8_t event_qualifier;
} QEMU_PACKED ConfigMgtData;
-static qemu_irq irq_cpu_hotplug; /* Only used in this file */
+static qemu_irq *irq_cpu_hotplug; /* Only used in this file */
#define EVENT_QUAL_CPU_CHANGE 1
void raise_irq_cpu_hotplug(void)
{
- qemu_irq_raise(irq_cpu_hotplug);
+ qemu_irq_raise(*irq_cpu_hotplug);
}
static unsigned int send_mask(void)
@@ -81,7 +81,7 @@ static void trigger_signal(void *opaque, int n, int level)
static int irq_cpu_hotplug_init(SCLPEvent *event)
{
- irq_cpu_hotplug = *qemu_allocate_irqs(trigger_signal, event, 1);
+ irq_cpu_hotplug = qemu_allocate_irqs(trigger_signal, event, 1);
return 0;
}
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index a01801e342..2bf0af8f0a 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -1272,7 +1272,9 @@ static void virtio_ccw_rng_instance_init(Object *obj)
object_initialize(&dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_RNG);
object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
object_property_add_link(obj, "rng", TYPE_RNG_BACKEND,
- (Object **)&dev->vdev.conf.rng, NULL);
+ (Object **)&dev->vdev.conf.rng,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
}
static Property virtio_ccw_rng_properties[] = {
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index 50a0acf1fe..ae921a6a75 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -101,7 +101,6 @@ static void scsi_dma_restart_bh(void *opaque)
scsi_req_continue(req);
break;
case SCSI_XFER_NONE:
- assert(!req->sg);
scsi_req_dequeue(req);
scsi_req_enqueue(req);
break;
@@ -1905,6 +1904,26 @@ static const VMStateInfo vmstate_info_scsi_requests = {
.put = put_scsi_requests,
};
+static bool scsi_sense_state_needed(void *opaque)
+{
+ SCSIDevice *s = opaque;
+
+ return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD;
+}
+
+static const VMStateDescription vmstate_scsi_sense_state = {
+ .name = "SCSIDevice/sense",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
+ SCSI_SENSE_BUF_SIZE_OLD,
+ SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_scsi_device = {
.name = "SCSIDevice",
.version_id = 1,
@@ -1915,7 +1934,7 @@ const VMStateDescription vmstate_scsi_device = {
VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
VMSTATE_BOOL(sense_is_ua, SCSIDevice),
- VMSTATE_UINT8_ARRAY(sense, SCSIDevice, SCSI_SENSE_BUF_SIZE),
+ VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD),
VMSTATE_UINT32(sense_len, SCSIDevice),
{
.name = "requests",
@@ -1927,6 +1946,14 @@ const VMStateDescription vmstate_scsi_device = {
.offset = 0,
},
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (VMStateSubsection []) {
+ {
+ .vmsd = &vmstate_scsi_sense_state,
+ .needed = scsi_sense_state_needed,
+ }, {
+ /* empty */
+ }
}
};
diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c
index b3835c821d..d4ada4f335 100644
--- a/hw/scsi/spapr_vscsi.c
+++ b/hw/scsi/spapr_vscsi.c
@@ -195,9 +195,9 @@ static int vscsi_send_iu(VSCSIState *s, vscsi_req *req,
req->crq.s.IU_data_ptr = req->iu.srp.rsp.tag; /* right byte order */
if (rc == 0) {
- req->crq.s.status = 0x99; /* Just needs to be non-zero */
+ req->crq.s.status = VIOSRP_OK;
} else {
- req->crq.s.status = 0x00;
+ req->crq.s.status = VIOSRP_ADAPTER_FAIL;
}
rc1 = spapr_vio_send_crq(&s->vdev, req->crq.raw);
@@ -690,7 +690,7 @@ static void vscsi_inquiry_no_target(VSCSIState *s, vscsi_req *req)
int rc, len, alen;
/* We dont do EVPD. Also check that page_code is 0 */
- if ((cdb[1] & 0x01) || (cdb[1] & 0x01) || cdb[2] != 0) {
+ if ((cdb[1] & 0x01) || cdb[2] != 0) {
/* Send INVALID FIELD IN CDB */
vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0);
vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 6610b3aab3..b0d7517cb1 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -304,6 +304,8 @@ static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status,
size_t resid)
{
VirtIOSCSIReq *req = r->hba_private;
+ VirtIOSCSI *s = req->dev;
+ VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
uint32_t sense_len;
if (r->io_canceled) {
@@ -317,7 +319,7 @@ static void virtio_scsi_command_complete(SCSIRequest *r, uint32_t status,
} else {
req->resp.cmd->resid = 0;
sense_len = scsi_req_get_sense(r, req->resp.cmd->sense,
- VIRTIO_SCSI_SENSE_SIZE);
+ vs->sense_size);
req->resp.cmd->sense_len = tswap32(sense_len);
}
virtio_scsi_complete_req(req);
diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c
index 7d344b944e..e35bff76a9 100644
--- a/hw/scsi/vmw_pvscsi.c
+++ b/hw/scsi/vmw_pvscsi.c
@@ -479,12 +479,13 @@ static void
pvscsi_command_complete(SCSIRequest *req, uint32_t status, size_t resid)
{
PVSCSIRequest *pvscsi_req = req->hba_private;
- PVSCSIState *s = pvscsi_req->dev;
+ PVSCSIState *s;
if (!pvscsi_req) {
trace_pvscsi_command_complete_not_found(req->tag);
return;
}
+ s = pvscsi_req->dev;
if (resid) {
/* Short transfer. */
diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c
index 1bb56c4d54..3273c8a31f 100644
--- a/hw/sd/ssi-sd.c
+++ b/hw/sd/ssi-sd.c
@@ -238,9 +238,10 @@ static int ssi_sd_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
-static int ssi_sd_init(SSISlave *dev)
+static int ssi_sd_init(SSISlave *d)
{
- ssi_sd_state *s = FROM_SSI_SLAVE(ssi_sd_state, dev);
+ DeviceState *dev = DEVICE(d);
+ ssi_sd_state *s = FROM_SSI_SLAVE(ssi_sd_state, d);
DriveInfo *dinfo;
s->mode = SSI_SD_CMD;
@@ -249,7 +250,7 @@ static int ssi_sd_init(SSISlave *dev)
if (s->sd == NULL) {
return -1;
}
- register_savevm(&dev->qdev, "ssi_sd", -1, 1, ssi_sd_save, ssi_sd_load, s);
+ register_savevm(dev, "ssi_sd", -1, 1, ssi_sd_save, ssi_sd_load, s);
return 0;
}
diff --git a/hw/sh4/sh7750.c b/hw/sh4/sh7750.c
index 1439ba44e5..4a39357529 100644
--- a/hw/sh4/sh7750.c
+++ b/hw/sh4/sh7750.c
@@ -416,7 +416,7 @@ static void sh7750_mem_writel(void *opaque, hwaddr addr,
case SH7750_PTEH_A7:
/* If asid changes, clear all registered tlb entries. */
if ((s->cpu->env.pteh & 0xff) != (mem_value & 0xff)) {
- tlb_flush(&s->cpu->env, 1);
+ tlb_flush(CPU(s->cpu), 1);
}
s->cpu->env.pteh = mem_value;
return;
diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c
index 2c25260875..017f0221fb 100644
--- a/hw/ssi/ssi.c
+++ b/hw/ssi/ssi.c
@@ -15,7 +15,7 @@
#include "hw/ssi.h"
struct SSIBus {
- BusState qbus;
+ BusState parent_obj;
};
#define TYPE_SSI_BUS "SSI"
@@ -60,7 +60,7 @@ static int ssi_slave_init(DeviceState *dev)
if (ssc->transfer_raw == ssi_transfer_raw_default &&
ssc->cs_polarity != SSI_CS_NONE) {
- qdev_init_gpio_in(&s->qdev, ssi_cs_default, 1);
+ qdev_init_gpio_in(dev, ssi_cs_default, 1);
}
return ssc->init(s);
@@ -88,7 +88,7 @@ static const TypeInfo ssi_slave_info = {
DeviceState *ssi_create_slave_no_init(SSIBus *bus, const char *name)
{
- return qdev_create(&bus->qbus, name);
+ return qdev_create(BUS(bus), name);
}
DeviceState *ssi_create_slave(SSIBus *bus, const char *name)
@@ -108,11 +108,12 @@ SSIBus *ssi_create_bus(DeviceState *parent, const char *name)
uint32_t ssi_transfer(SSIBus *bus, uint32_t val)
{
+ BusState *b = BUS(bus);
BusChild *kid;
SSISlaveClass *ssc;
uint32_t r = 0;
- QTAILQ_FOREACH(kid, &bus->qbus.children, sibling) {
+ QTAILQ_FOREACH(kid, &b->children, sibling) {
SSISlave *slave = SSI_SLAVE(kid->child);
ssc = SSI_SLAVE_GET_CLASS(slave);
r |= ssc->transfer_raw(slave, val);
@@ -156,7 +157,7 @@ static int ssi_auto_connect_slave(Object *child, void *opaque)
}
cs_line = qdev_get_gpio_in(DEVICE(dev), 0);
- qdev_set_parent_bus(DEVICE(dev), &arg->bus->qbus);
+ qdev_set_parent_bus(DEVICE(dev), BUS(arg->bus));
**arg->cs_linep = cs_line;
(*arg->cs_linep)++;
return 0;
diff --git a/hw/timer/allwinner-a10-pit.c b/hw/timer/allwinner-a10-pit.c
index b27fce8cd2..d3c02ea18f 100644
--- a/hw/timer/allwinner-a10-pit.c
+++ b/hw/timer/allwinner-a10-pit.c
@@ -19,6 +19,15 @@
#include "sysemu/sysemu.h"
#include "hw/timer/allwinner-a10-pit.h"
+static void a10_pit_update_irq(AwA10PITState *s)
+{
+ int i;
+
+ for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) {
+ qemu_set_irq(s->irq[i], !!(s->irq_status & s->irq_enable & (1 << i)));
+ }
+}
+
static uint64_t a10_pit_read(void *opaque, hwaddr offset, unsigned size)
{
AwA10PITState *s = AW_A10_PIT(opaque);
@@ -65,6 +74,22 @@ static uint64_t a10_pit_read(void *opaque, hwaddr offset, unsigned size)
return 0;
}
+static void a10_pit_set_freq(AwA10PITState *s, int index)
+{
+ uint32_t prescaler, source, source_freq;
+
+ prescaler = 1 << extract32(s->control[index], 4, 3);
+ source = extract32(s->control[index], 2, 2);
+ source_freq = s->clk_freq[source];
+
+ if (source_freq) {
+ ptimer_set_freq(s->timer[index], source_freq / prescaler);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid clock source %u\n",
+ __func__, source);
+ }
+}
+
static void a10_pit_write(void *opaque, hwaddr offset, uint64_t value,
unsigned size)
{
@@ -74,9 +99,11 @@ static void a10_pit_write(void *opaque, hwaddr offset, uint64_t value,
switch (offset) {
case AW_A10_PIT_TIMER_IRQ_EN:
s->irq_enable = value;
+ a10_pit_update_irq(s);
break;
case AW_A10_PIT_TIMER_IRQ_ST:
s->irq_status &= ~value;
+ a10_pit_update_irq(s);
break;
case AW_A10_PIT_TIMER_BASE ... AW_A10_PIT_TIMER_BASE_END:
index = offset & 0xf0;
@@ -85,6 +112,7 @@ static void a10_pit_write(void *opaque, hwaddr offset, uint64_t value,
switch (offset & 0x0f) {
case AW_A10_PIT_TIMER_CONTROL:
s->control[index] = value;
+ a10_pit_set_freq(s, index);
if (s->control[index] & AW_A10_PIT_TIMER_RELOAD) {
ptimer_set_count(s->timer[index], s->interval[index]);
}
@@ -150,6 +178,14 @@ static const MemoryRegionOps a10_pit_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
+static Property a10_pit_properties[] = {
+ DEFINE_PROP_UINT32("clk0-freq", AwA10PITState, clk_freq[0], 0),
+ DEFINE_PROP_UINT32("clk1-freq", AwA10PITState, clk_freq[1], 0),
+ DEFINE_PROP_UINT32("clk2-freq", AwA10PITState, clk_freq[2], 0),
+ DEFINE_PROP_UINT32("clk3-freq", AwA10PITState, clk_freq[3], 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
static const VMStateDescription vmstate_a10_pit = {
.name = "a10.pit",
.version_id = 1,
@@ -178,11 +214,14 @@ static void a10_pit_reset(DeviceState *dev)
s->irq_enable = 0;
s->irq_status = 0;
+ a10_pit_update_irq(s);
+
for (i = 0; i < 6; i++) {
s->control[i] = AW_A10_PIT_DEFAULT_CLOCK;
s->interval[i] = 0;
s->count[i] = 0;
ptimer_stop(s->timer[i]);
+ a10_pit_set_freq(s, i);
}
s->watch_dog_mode = 0;
s->watch_dog_control = 0;
@@ -193,18 +232,17 @@ static void a10_pit_reset(DeviceState *dev)
static void a10_pit_timer_cb(void *opaque)
{
- AwA10PITState *s = AW_A10_PIT(opaque);
- uint8_t i;
+ AwA10TimerContext *tc = opaque;
+ AwA10PITState *s = tc->container;
+ uint8_t i = tc->index;
- for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) {
- if (s->control[i] & AW_A10_PIT_TIMER_EN) {
- s->irq_status |= 1 << i;
- if (s->control[i] & AW_A10_PIT_TIMER_MODE) {
- ptimer_stop(s->timer[i]);
- s->control[i] &= ~AW_A10_PIT_TIMER_EN;
- }
- qemu_irq_pulse(s->irq[i]);
+ if (s->control[i] & AW_A10_PIT_TIMER_EN) {
+ s->irq_status |= 1 << i;
+ if (s->control[i] & AW_A10_PIT_TIMER_MODE) {
+ ptimer_stop(s->timer[i]);
+ s->control[i] &= ~AW_A10_PIT_TIMER_EN;
}
+ a10_pit_update_irq(s);
}
}
@@ -223,9 +261,12 @@ static void a10_pit_init(Object *obj)
sysbus_init_mmio(sbd, &s->iomem);
for (i = 0; i < AW_A10_PIT_TIMER_NR; i++) {
- bh[i] = qemu_bh_new(a10_pit_timer_cb, s);
+ AwA10TimerContext *tc = &s->timer_context[i];
+
+ tc->container = s;
+ tc->index = i;
+ bh[i] = qemu_bh_new(a10_pit_timer_cb, tc);
s->timer[i] = ptimer_init(bh[i]);
- ptimer_set_freq(s->timer[i], 240000);
}
}
@@ -234,6 +275,7 @@ static void a10_pit_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
dc->reset = a10_pit_reset;
+ dc->props = a10_pit_properties;
dc->desc = "allwinner a10 timer";
dc->vmsd = &vmstate_a10_pit;
}
diff --git a/hw/timer/cadence_ttc.c b/hw/timer/cadence_ttc.c
index a279bced78..28cb328f9b 100644
--- a/hw/timer/cadence_ttc.c
+++ b/hw/timer/cadence_ttc.c
@@ -346,11 +346,13 @@ static void cadence_ttc_write(void *opaque, hwaddr offset,
case 0x34:
case 0x38:
s->reg_match[0] = value & 0xffff;
+ break;
case 0x3c: /* match register */
case 0x40:
case 0x44:
s->reg_match[1] = value & 0xffff;
+ break;
case 0x48: /* match register */
case 0x4c:
diff --git a/hw/timer/grlib_gptimer.c b/hw/timer/grlib_gptimer.c
index 74c16d6c90..d655bb2a27 100644
--- a/hw/timer/grlib_gptimer.c
+++ b/hw/timer/grlib_gptimer.c
@@ -106,9 +106,9 @@ static void grlib_gptimer_enable(GPTimer *timer)
/* ptimer is triggered when the counter reach 0 but GPTimer is triggered at
underflow. Set count + 1 to simulate the GPTimer behavior. */
- trace_grlib_gptimer_enable(timer->id, timer->counter + 1);
+ trace_grlib_gptimer_enable(timer->id, timer->counter);
- ptimer_set_count(timer->ptimer, timer->counter + 1);
+ ptimer_set_count(timer->ptimer, (uint64_t)timer->counter + 1);
ptimer_run(timer->ptimer, 1);
}
@@ -328,7 +328,6 @@ static void grlib_gptimer_reset(DeviceState *d)
unit->scaler = 0;
unit->reload = 0;
- unit->config = 0;
unit->config = unit->nr_timers;
unit->config |= unit->irq_line << 3;
diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c
index 1264dfd46a..e15d6bcac7 100644
--- a/hw/timer/hpet.c
+++ b/hw/timer/hpet.c
@@ -506,7 +506,8 @@ static void hpet_ram_write(void *opaque, hwaddr addr,
timer->cmp = (uint32_t)timer->cmp;
timer->period = (uint32_t)timer->period;
}
- if (activating_bit(old_val, new_val, HPET_TN_ENABLE)) {
+ if (activating_bit(old_val, new_val, HPET_TN_ENABLE) &&
+ hpet_enabled(s)) {
hpet_set_timer(timer);
} else if (deactivating_bit(old_val, new_val, HPET_TN_ENABLE)) {
hpet_del_timer(timer);
diff --git a/hw/usb/Makefile.objs b/hw/usb/Makefile.objs
index 97b457541f..17d460cb04 100644
--- a/hw/usb/Makefile.objs
+++ b/hw/usb/Makefile.objs
@@ -26,6 +26,10 @@ common-obj-y += ccid-card-passthru.o
common-obj-$(CONFIG_SMARTCARD_NSS) += ccid-card-emulated.o
endif
+ifeq ($(CONFIG_POSIX),y)
+common-obj-$(CONFIG_USB_STORAGE_MTP) += dev-mtp.o
+endif
+
# usb redirection
common-obj-$(CONFIG_USB_REDIR) += redirect.o quirks.o
diff --git a/hw/usb/desc-msos.c b/hw/usb/desc-msos.c
index ed8d62cab8..334d1aea8d 100644
--- a/hw/usb/desc-msos.c
+++ b/hw/usb/desc-msos.c
@@ -44,7 +44,7 @@ typedef struct msos_compat_hdr {
typedef struct msos_compat_func {
uint8_t bFirstInterfaceNumber;
uint8_t reserved_1;
- uint8_t compatibleId[8];
+ char compatibleId[8];
uint8_t subCompatibleId[8];
uint8_t reserved_2[6];
} QEMU_PACKED msos_compat_func;
@@ -59,6 +59,10 @@ static int usb_desc_msos_compat(const USBDesc *desc, uint8_t *dest)
func = (void *)(dest + length);
func->bFirstInterfaceNumber = 0;
func->reserved_1 = 0x01;
+ if (desc->msos->CompatibleID) {
+ snprintf(func->compatibleId, sizeof(func->compatibleId),
+ "%s", desc->msos->CompatibleID);
+ }
length += sizeof(*func);
count++;
diff --git a/hw/usb/desc.h b/hw/usb/desc.h
index 2b4fcdae76..8e8db03a0c 100644
--- a/hw/usb/desc.h
+++ b/hw/usb/desc.h
@@ -184,6 +184,7 @@ struct USBDescOther {
};
struct USBDescMSOS {
+ const char *CompatibleID;
const wchar_t *Label;
bool SelectiveSuspendEnabled;
};
diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c
new file mode 100644
index 0000000000..8b44032900
--- /dev/null
+++ b/hw/usb/dev-mtp.c
@@ -0,0 +1,1103 @@
+/*
+ * Media Transfer Protocol implementation, backed by host filesystem.
+ *
+ * Copyright Red Hat, Inc 2014
+ *
+ * Author:
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This code is licensed under the GPL v2 or later.
+ */
+
+#include <wchar.h>
+#include <dirent.h>
+#include <unistd.h>
+
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+
+#include "qemu-common.h"
+#include "qemu/iov.h"
+#include "trace.h"
+#include "hw/usb.h"
+#include "hw/usb/desc.h"
+
+/* ----------------------------------------------------------------------- */
+
+enum mtp_container_type {
+ TYPE_COMMAND = 1,
+ TYPE_DATA = 2,
+ TYPE_RESPONSE = 3,
+ TYPE_EVENT = 4,
+};
+
+enum mtp_code {
+ /* command codes */
+ CMD_GET_DEVICE_INFO = 0x1001,
+ CMD_OPEN_SESSION = 0x1002,
+ CMD_CLOSE_SESSION = 0x1003,
+ CMD_GET_STORAGE_IDS = 0x1004,
+ CMD_GET_STORAGE_INFO = 0x1005,
+ CMD_GET_NUM_OBJECTS = 0x1006,
+ CMD_GET_OBJECT_HANDLES = 0x1007,
+ CMD_GET_OBJECT_INFO = 0x1008,
+ CMD_GET_OBJECT = 0x1009,
+ CMD_GET_PARTIAL_OBJECT = 0x101b,
+
+ /* response codes */
+ RES_OK = 0x2001,
+ RES_SESSION_NOT_OPEN = 0x2003,
+ RES_INVALID_TRANSACTION_ID = 0x2004,
+ RES_OPERATION_NOT_SUPPORTED = 0x2005,
+ RES_PARAMETER_NOT_SUPPORTED = 0x2006,
+ RES_INVALID_STORAGE_ID = 0x2008,
+ RES_INVALID_OBJECT_HANDLE = 0x2009,
+ RES_SPEC_BY_FORMAT_UNSUPPORTED = 0x2014,
+ RES_INVALID_PARENT_OBJECT = 0x201a,
+ RES_INVALID_PARAMETER = 0x201d,
+ RES_SESSION_ALREADY_OPEN = 0x201e,
+
+ /* format codes */
+ FMT_UNDEFINED_OBJECT = 0x3000,
+ FMT_ASSOCIATION = 0x3001,
+};
+
+typedef struct {
+ uint32_t length;
+ uint16_t type;
+ uint16_t code;
+ uint32_t trans;
+} QEMU_PACKED mtp_container;
+
+/* ----------------------------------------------------------------------- */
+
+typedef struct MTPState MTPState;
+typedef struct MTPControl MTPControl;
+typedef struct MTPData MTPData;
+typedef struct MTPObject MTPObject;
+
+enum {
+ EP_DATA_IN = 1,
+ EP_DATA_OUT,
+ EP_EVENT,
+};
+
+struct MTPControl {
+ uint16_t code;
+ uint32_t trans;
+ int argc;
+ uint32_t argv[5];
+};
+
+struct MTPData {
+ uint16_t code;
+ uint32_t trans;
+ uint32_t offset;
+ uint32_t length;
+ uint32_t alloc;
+ uint8_t *data;
+ bool first;
+ int fd;
+};
+
+struct MTPObject {
+ uint32_t handle;
+ uint16_t format;
+ char *name;
+ char *path;
+ struct stat stat;
+ MTPObject *parent;
+ MTPObject **children;
+ int32_t nchildren;
+ QTAILQ_ENTRY(MTPObject) next;
+};
+
+struct MTPState {
+ USBDevice dev;
+ char *root;
+ char *desc;
+ uint32_t flags;
+
+ MTPData *data_in;
+ MTPData *data_out;
+ MTPControl *result;
+ uint32_t session;
+ uint32_t next_handle;
+
+ QTAILQ_HEAD(, MTPObject) objects;
+};
+
+#define QEMU_STORAGE_ID 0x00010001
+
+#define MTP_FLAG_WRITABLE 0
+
+#define FLAG_SET(_mtp, _flag) ((_mtp)->flags & (1 << (_flag)))
+
+/* ----------------------------------------------------------------------- */
+
+#define MTP_MANUFACTURER "QEMU"
+#define MTP_PRODUCT "QEMU filesharing"
+
+enum {
+ STR_MANUFACTURER = 1,
+ STR_PRODUCT,
+ STR_SERIALNUMBER,
+ STR_CONFIG_FULL,
+ STR_CONFIG_HIGH,
+ STR_CONFIG_SUPER,
+};
+
+static const USBDescStrings desc_strings = {
+ [STR_MANUFACTURER] = MTP_MANUFACTURER,
+ [STR_PRODUCT] = MTP_PRODUCT,
+ [STR_SERIALNUMBER] = "34617",
+ [STR_CONFIG_FULL] = "Full speed config (usb 1.1)",
+ [STR_CONFIG_HIGH] = "High speed config (usb 2.0)",
+ [STR_CONFIG_SUPER] = "Super speed config (usb 3.0)",
+};
+
+static const USBDescIface desc_iface_full = {
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_STILL_IMAGE,
+ .bInterfaceSubClass = 0x01,
+ .bInterfaceProtocol = 0x01,
+ .eps = (USBDescEndpoint[]) {
+ {
+ .bEndpointAddress = USB_DIR_IN | EP_DATA_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = 64,
+ },{
+ .bEndpointAddress = USB_DIR_OUT | EP_DATA_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = 64,
+ },{
+ .bEndpointAddress = USB_DIR_IN | EP_EVENT,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 8,
+ .bInterval = 0x0a,
+ },
+ }
+};
+
+static const USBDescDevice desc_device_full = {
+ .bcdUSB = 0x0200,
+ .bMaxPacketSize0 = 8,
+ .bNumConfigurations = 1,
+ .confs = (USBDescConfig[]) {
+ {
+ .bNumInterfaces = 1,
+ .bConfigurationValue = 1,
+ .iConfiguration = STR_CONFIG_FULL,
+ .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_WAKEUP,
+ .bMaxPower = 2,
+ .nif = 1,
+ .ifs = &desc_iface_full,
+ },
+ },
+};
+
+static const USBDescIface desc_iface_high = {
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_STILL_IMAGE,
+ .bInterfaceSubClass = 0x01,
+ .bInterfaceProtocol = 0x01,
+ .eps = (USBDescEndpoint[]) {
+ {
+ .bEndpointAddress = USB_DIR_IN | EP_DATA_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = 512,
+ },{
+ .bEndpointAddress = USB_DIR_OUT | EP_DATA_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = 512,
+ },{
+ .bEndpointAddress = USB_DIR_IN | EP_EVENT,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = 8,
+ .bInterval = 0x0a,
+ },
+ }
+};
+
+static const USBDescDevice desc_device_high = {
+ .bcdUSB = 0x0200,
+ .bMaxPacketSize0 = 64,
+ .bNumConfigurations = 1,
+ .confs = (USBDescConfig[]) {
+ {
+ .bNumInterfaces = 1,
+ .bConfigurationValue = 1,
+ .iConfiguration = STR_CONFIG_HIGH,
+ .bmAttributes = USB_CFG_ATT_ONE | USB_CFG_ATT_WAKEUP,
+ .bMaxPower = 2,
+ .nif = 1,
+ .ifs = &desc_iface_high,
+ },
+ },
+};
+
+static const USBDescMSOS desc_msos = {
+ .CompatibleID = "MTP",
+ .SelectiveSuspendEnabled = true,
+};
+
+static const USBDesc desc = {
+ .id = {
+ .idVendor = 0x46f4, /* CRC16() of "QEMU" */
+ .idProduct = 0x0004,
+ .bcdDevice = 0,
+ .iManufacturer = STR_MANUFACTURER,
+ .iProduct = STR_PRODUCT,
+ .iSerialNumber = STR_SERIALNUMBER,
+ },
+ .full = &desc_device_full,
+ .high = &desc_device_high,
+ .str = desc_strings,
+ .msos = &desc_msos,
+};
+
+/* ----------------------------------------------------------------------- */
+
+static MTPObject *usb_mtp_object_alloc(MTPState *s, uint32_t handle,
+ MTPObject *parent, char *name)
+{
+ MTPObject *o = g_new0(MTPObject, 1);
+
+ if (name[0] == '.') {
+ goto ignore;
+ }
+
+ o->handle = handle;
+ o->parent = parent;
+ o->name = g_strdup(name);
+ o->nchildren = -1;
+ if (parent == NULL) {
+ o->path = g_strdup(name);
+ } else {
+ o->path = g_strdup_printf("%s/%s", parent->path, name);
+ }
+
+ if (lstat(o->path, &o->stat) != 0) {
+ goto ignore;
+ }
+ if (S_ISREG(o->stat.st_mode)) {
+ o->format = FMT_UNDEFINED_OBJECT;
+ } else if (S_ISDIR(o->stat.st_mode)) {
+ o->format = FMT_ASSOCIATION;
+ } else {
+ goto ignore;
+ }
+
+ if (access(o->path, R_OK) != 0) {
+ goto ignore;
+ }
+
+ fprintf(stderr, "%s: 0x%x %s\n", __func__, o->handle, o->path);
+
+ QTAILQ_INSERT_TAIL(&s->objects, o, next);
+ return o;
+
+ignore:
+ g_free(o->name);
+ g_free(o->path);
+ g_free(o);
+ return NULL;
+}
+
+static void usb_mtp_object_free(MTPState *s, MTPObject *o)
+{
+ int i;
+
+ fprintf(stderr, "%s: 0x%x %s\n", __func__, o->handle, o->path);
+
+ QTAILQ_REMOVE(&s->objects, o, next);
+ for (i = 0; i < o->nchildren; i++) {
+ usb_mtp_object_free(s, o->children[i]);
+ }
+ g_free(o->children);
+ g_free(o->name);
+ g_free(o->path);
+ g_free(o);
+}
+
+static MTPObject *usb_mtp_object_lookup(MTPState *s, uint32_t handle)
+{
+ MTPObject *o;
+
+ QTAILQ_FOREACH(o, &s->objects, next) {
+ if (o->handle == handle) {
+ return o;
+ }
+ }
+ return NULL;
+}
+
+static void usb_mtp_object_readdir(MTPState *s, MTPObject *o)
+{
+ struct dirent *entry;
+ DIR *dir;
+
+ o->nchildren = 0;
+ dir = opendir(o->path);
+ if (!dir) {
+ return;
+ }
+ while ((entry = readdir(dir)) != NULL) {
+ if ((o->nchildren % 32) == 0) {
+ o->children = g_realloc(o->children,
+ (o->nchildren + 32) * sizeof(MTPObject *));
+ }
+ o->children[o->nchildren] =
+ usb_mtp_object_alloc(s, s->next_handle++, o, entry->d_name);
+ if (o->children[o->nchildren] != NULL) {
+ o->nchildren++;
+ }
+ }
+ closedir(dir);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static MTPData *usb_mtp_data_alloc(MTPControl *c)
+{
+ MTPData *data = g_new0(MTPData, 1);
+
+ data->code = c->code;
+ data->trans = c->trans;
+ data->fd = -1;
+ data->first = true;
+ return data;
+}
+
+static void usb_mtp_data_free(MTPData *data)
+{
+ if (data == NULL) {
+ return;
+ }
+ if (data->fd != -1) {
+ close(data->fd);
+ }
+ g_free(data->data);
+ g_free(data);
+}
+
+static void usb_mtp_realloc(MTPData *data, uint32_t bytes)
+{
+ if (data->length + bytes <= data->alloc) {
+ return;
+ }
+ data->alloc = (data->length + bytes + 0xff) & ~0xff;
+ data->data = g_realloc(data->data, data->alloc);
+}
+
+static void usb_mtp_add_u8(MTPData *data, uint8_t val)
+{
+ usb_mtp_realloc(data, 1);
+ data->data[data->length++] = val;
+}
+
+static void usb_mtp_add_u16(MTPData *data, uint16_t val)
+{
+ usb_mtp_realloc(data, 2);
+ data->data[data->length++] = (val >> 0) & 0xff;
+ data->data[data->length++] = (val >> 8) & 0xff;
+}
+
+static void usb_mtp_add_u32(MTPData *data, uint32_t val)
+{
+ usb_mtp_realloc(data, 4);
+ data->data[data->length++] = (val >> 0) & 0xff;
+ data->data[data->length++] = (val >> 8) & 0xff;
+ data->data[data->length++] = (val >> 16) & 0xff;
+ data->data[data->length++] = (val >> 24) & 0xff;
+}
+
+static void usb_mtp_add_u64(MTPData *data, uint64_t val)
+{
+ usb_mtp_realloc(data, 4);
+ data->data[data->length++] = (val >> 0) & 0xff;
+ data->data[data->length++] = (val >> 8) & 0xff;
+ data->data[data->length++] = (val >> 16) & 0xff;
+ data->data[data->length++] = (val >> 24) & 0xff;
+ data->data[data->length++] = (val >> 32) & 0xff;
+ data->data[data->length++] = (val >> 40) & 0xff;
+ data->data[data->length++] = (val >> 48) & 0xff;
+ data->data[data->length++] = (val >> 54) & 0xff;
+}
+
+static void usb_mtp_add_u16_array(MTPData *data, uint32_t len,
+ const uint16_t *vals)
+{
+ int i;
+
+ usb_mtp_add_u32(data, len);
+ for (i = 0; i < len; i++) {
+ usb_mtp_add_u16(data, vals[i]);
+ }
+}
+
+static void usb_mtp_add_u32_array(MTPData *data, uint32_t len,
+ const uint32_t *vals)
+{
+ int i;
+
+ usb_mtp_add_u32(data, len);
+ for (i = 0; i < len; i++) {
+ usb_mtp_add_u32(data, vals[i]);
+ }
+}
+
+static void usb_mtp_add_wstr(MTPData *data, const wchar_t *str)
+{
+ uint32_t len = wcslen(str);
+ int i;
+
+ if (len > 0) {
+ len++; /* include terminating L'\0' */
+ }
+
+ usb_mtp_add_u8(data, len);
+ for (i = 0; i < len; i++) {
+ usb_mtp_add_u16(data, str[i]);
+ }
+}
+
+static void usb_mtp_add_str(MTPData *data, const char *str)
+{
+ uint32_t len = strlen(str)+1;
+ wchar_t wstr[len];
+ size_t ret;
+
+ ret = mbstowcs(wstr, str, len);
+ if (ret == -1) {
+ usb_mtp_add_wstr(data, L"Oops");
+ } else {
+ usb_mtp_add_wstr(data, wstr);
+ }
+}
+
+static void usb_mtp_add_time(MTPData *data, time_t time)
+{
+ char buf[16];
+ struct tm tm;
+
+ gmtime_r(&time, &tm);
+ strftime(buf, sizeof(buf), "%Y%m%dT%H%M%S", &tm);
+ usb_mtp_add_str(data, buf);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static void usb_mtp_queue_result(MTPState *s, uint16_t code, uint32_t trans,
+ int argc, uint32_t arg0, uint32_t arg1)
+{
+ MTPControl *c = g_new0(MTPControl, 1);
+
+ c->code = code;
+ c->trans = trans;
+ c->argc = argc;
+ if (argc > 0) {
+ c->argv[0] = arg0;
+ }
+ if (argc > 1) {
+ c->argv[1] = arg1;
+ }
+
+ assert(s->result == NULL);
+ s->result = c;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static MTPData *usb_mtp_get_device_info(MTPState *s, MTPControl *c)
+{
+ static const uint16_t ops[] = {
+ CMD_GET_DEVICE_INFO,
+ CMD_OPEN_SESSION,
+ CMD_CLOSE_SESSION,
+ CMD_GET_STORAGE_IDS,
+ CMD_GET_STORAGE_INFO,
+ CMD_GET_NUM_OBJECTS,
+ CMD_GET_OBJECT_HANDLES,
+ CMD_GET_OBJECT_INFO,
+ CMD_GET_OBJECT,
+ CMD_GET_PARTIAL_OBJECT,
+ };
+ static const uint16_t fmt[] = {
+ FMT_UNDEFINED_OBJECT,
+ FMT_ASSOCIATION,
+ };
+ MTPData *d = usb_mtp_data_alloc(c);
+
+ trace_usb_mtp_op_get_device_info(s->dev.addr);
+
+ usb_mtp_add_u16(d, 0x0100);
+ usb_mtp_add_u32(d, 0xffffffff);
+ usb_mtp_add_u16(d, 0x0101);
+ usb_mtp_add_wstr(d, L"");
+ usb_mtp_add_u16(d, 0x0000);
+
+ usb_mtp_add_u16_array(d, ARRAY_SIZE(ops), ops);
+ usb_mtp_add_u16_array(d, 0, NULL);
+ usb_mtp_add_u16_array(d, 0, NULL);
+ usb_mtp_add_u16_array(d, 0, NULL);
+ usb_mtp_add_u16_array(d, ARRAY_SIZE(fmt), fmt);
+
+ usb_mtp_add_wstr(d, L"" MTP_MANUFACTURER);
+ usb_mtp_add_wstr(d, L"" MTP_PRODUCT);
+ usb_mtp_add_wstr(d, L"0.1");
+ usb_mtp_add_wstr(d, L"123456789abcdef123456789abcdef");
+
+ return d;
+}
+
+static MTPData *usb_mtp_get_storage_ids(MTPState *s, MTPControl *c)
+{
+ static const uint32_t ids[] = {
+ QEMU_STORAGE_ID,
+ };
+ MTPData *d = usb_mtp_data_alloc(c);
+
+ trace_usb_mtp_op_get_storage_ids(s->dev.addr);
+
+ usb_mtp_add_u32_array(d, ARRAY_SIZE(ids), ids);
+
+ return d;
+}
+
+static MTPData *usb_mtp_get_storage_info(MTPState *s, MTPControl *c)
+{
+ MTPData *d = usb_mtp_data_alloc(c);
+ struct statvfs buf;
+ int rc;
+
+ trace_usb_mtp_op_get_storage_info(s->dev.addr);
+
+ if (FLAG_SET(s, MTP_FLAG_WRITABLE)) {
+ usb_mtp_add_u16(d, 0x0003);
+ usb_mtp_add_u16(d, 0x0002);
+ usb_mtp_add_u16(d, 0x0000);
+ } else {
+ usb_mtp_add_u16(d, 0x0001);
+ usb_mtp_add_u16(d, 0x0002);
+ usb_mtp_add_u16(d, 0x0001);
+ }
+
+ rc = statvfs(s->root, &buf);
+ if (rc == 0) {
+ usb_mtp_add_u64(d, (uint64_t)buf.f_frsize * buf.f_blocks);
+ usb_mtp_add_u64(d, (uint64_t)buf.f_bavail * buf.f_blocks);
+ usb_mtp_add_u32(d, buf.f_ffree);
+ } else {
+ usb_mtp_add_u64(d, 0xffffffff);
+ usb_mtp_add_u64(d, 0xffffffff);
+ usb_mtp_add_u32(d, 0xffffffff);
+ }
+
+ usb_mtp_add_str(d, s->desc);
+ usb_mtp_add_wstr(d, L"123456789abcdef");
+ return d;
+}
+
+static MTPData *usb_mtp_get_object_handles(MTPState *s, MTPControl *c,
+ MTPObject *o)
+{
+ MTPData *d = usb_mtp_data_alloc(c);
+ uint32_t i, handles[o->nchildren];
+
+ trace_usb_mtp_op_get_object_handles(s->dev.addr, o->handle, o->path);
+
+ for (i = 0; i < o->nchildren; i++) {
+ handles[i] = o->children[i]->handle;
+ }
+ usb_mtp_add_u32_array(d, o->nchildren, handles);
+
+ return d;
+}
+
+static MTPData *usb_mtp_get_object_info(MTPState *s, MTPControl *c,
+ MTPObject *o)
+{
+ MTPData *d = usb_mtp_data_alloc(c);
+
+ trace_usb_mtp_op_get_object_info(s->dev.addr, o->handle, o->path);
+
+ usb_mtp_add_u32(d, QEMU_STORAGE_ID);
+ usb_mtp_add_u16(d, o->format);
+ usb_mtp_add_u16(d, 0);
+ usb_mtp_add_u32(d, o->stat.st_size);
+
+ usb_mtp_add_u16(d, 0);
+ usb_mtp_add_u32(d, 0);
+ usb_mtp_add_u32(d, 0);
+ usb_mtp_add_u32(d, 0);
+ usb_mtp_add_u32(d, 0);
+ usb_mtp_add_u32(d, 0);
+ usb_mtp_add_u32(d, 0);
+
+ if (o->parent) {
+ usb_mtp_add_u32(d, o->parent->handle);
+ } else {
+ usb_mtp_add_u32(d, 0);
+ }
+ if (o->format == FMT_ASSOCIATION) {
+ usb_mtp_add_u16(d, 0x0001);
+ usb_mtp_add_u32(d, 0x00000001);
+ usb_mtp_add_u32(d, 0);
+ } else {
+ usb_mtp_add_u16(d, 0);
+ usb_mtp_add_u32(d, 0);
+ usb_mtp_add_u32(d, 0);
+ }
+
+ usb_mtp_add_str(d, o->name);
+ usb_mtp_add_time(d, o->stat.st_ctime);
+ usb_mtp_add_time(d, o->stat.st_mtime);
+ usb_mtp_add_wstr(d, L"");
+
+ return d;
+}
+
+static MTPData *usb_mtp_get_object(MTPState *s, MTPControl *c,
+ MTPObject *o)
+{
+ MTPData *d = usb_mtp_data_alloc(c);
+
+ trace_usb_mtp_op_get_object(s->dev.addr, o->handle, o->path);
+
+ d->fd = open(o->path, O_RDONLY);
+ if (d->fd == -1) {
+ return NULL;
+ }
+ d->length = o->stat.st_size;
+ d->alloc = 512;
+ d->data = g_malloc(d->alloc);
+ return d;
+}
+
+static MTPData *usb_mtp_get_partial_object(MTPState *s, MTPControl *c,
+ MTPObject *o)
+{
+ MTPData *d = usb_mtp_data_alloc(c);
+ off_t offset;
+
+ trace_usb_mtp_op_get_partial_object(s->dev.addr, o->handle, o->path,
+ c->argv[1], c->argv[2]);
+
+ d->fd = open(o->path, O_RDONLY);
+ if (d->fd == -1) {
+ return NULL;
+ }
+
+ offset = c->argv[1];
+ if (offset > o->stat.st_size) {
+ offset = o->stat.st_size;
+ }
+ lseek(d->fd, offset, SEEK_SET);
+
+ d->length = c->argv[2];
+ if (d->length > o->stat.st_size - offset) {
+ d->length = o->stat.st_size - offset;
+ }
+
+ return d;
+}
+
+static void usb_mtp_command(MTPState *s, MTPControl *c)
+{
+ MTPData *data_in = NULL;
+ MTPObject *o;
+ uint32_t nres = 0, res0 = 0;
+
+ /* sanity checks */
+ if (c->code >= CMD_CLOSE_SESSION && s->session == 0) {
+ usb_mtp_queue_result(s, RES_SESSION_NOT_OPEN,
+ c->trans, 0, 0, 0);
+ return;
+ }
+
+ /* process commands */
+ switch (c->code) {
+ case CMD_GET_DEVICE_INFO:
+ data_in = usb_mtp_get_device_info(s, c);
+ break;
+ case CMD_OPEN_SESSION:
+ if (s->session) {
+ usb_mtp_queue_result(s, RES_SESSION_ALREADY_OPEN,
+ c->trans, 1, s->session, 0);
+ return;
+ }
+ if (c->argv[0] == 0) {
+ usb_mtp_queue_result(s, RES_INVALID_PARAMETER,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ trace_usb_mtp_op_open_session(s->dev.addr);
+ s->session = c->argv[0];
+ usb_mtp_object_alloc(s, s->next_handle++, NULL, s->root);
+ break;
+ case CMD_CLOSE_SESSION:
+ trace_usb_mtp_op_close_session(s->dev.addr);
+ s->session = 0;
+ s->next_handle = 0;
+ usb_mtp_object_free(s, QTAILQ_FIRST(&s->objects));
+ assert(QTAILQ_EMPTY(&s->objects));
+ break;
+ case CMD_GET_STORAGE_IDS:
+ data_in = usb_mtp_get_storage_ids(s, c);
+ break;
+ case CMD_GET_STORAGE_INFO:
+ if (c->argv[0] != QEMU_STORAGE_ID &&
+ c->argv[0] != 0xffffffff) {
+ usb_mtp_queue_result(s, RES_INVALID_STORAGE_ID,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ data_in = usb_mtp_get_storage_info(s, c);
+ break;
+ case CMD_GET_NUM_OBJECTS:
+ case CMD_GET_OBJECT_HANDLES:
+ if (c->argv[0] != QEMU_STORAGE_ID &&
+ c->argv[0] != 0xffffffff) {
+ usb_mtp_queue_result(s, RES_INVALID_STORAGE_ID,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ if (c->argv[1] != 0x00000000) {
+ usb_mtp_queue_result(s, RES_SPEC_BY_FORMAT_UNSUPPORTED,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ if (c->argv[2] == 0x00000000 ||
+ c->argv[2] == 0xffffffff) {
+ o = QTAILQ_FIRST(&s->objects);
+ } else {
+ o = usb_mtp_object_lookup(s, c->argv[2]);
+ }
+ if (o == NULL) {
+ usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ if (o->format != FMT_ASSOCIATION) {
+ usb_mtp_queue_result(s, RES_INVALID_PARENT_OBJECT,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ if (o->nchildren == -1) {
+ usb_mtp_object_readdir(s, o);
+ }
+ if (c->code == CMD_GET_NUM_OBJECTS) {
+ trace_usb_mtp_op_get_num_objects(s->dev.addr, o->handle, o->path);
+ nres = 1;
+ res0 = o->nchildren;
+ } else {
+ data_in = usb_mtp_get_object_handles(s, c, o);
+ }
+ break;
+ case CMD_GET_OBJECT_INFO:
+ o = usb_mtp_object_lookup(s, c->argv[0]);
+ if (o == NULL) {
+ usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ data_in = usb_mtp_get_object_info(s, c, o);
+ break;
+ case CMD_GET_OBJECT:
+ o = usb_mtp_object_lookup(s, c->argv[0]);
+ if (o == NULL) {
+ usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ if (o->format == FMT_ASSOCIATION) {
+ usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ data_in = usb_mtp_get_object(s, c, o);
+ if (NULL == data_in) {
+ fprintf(stderr, "%s: TODO: handle error\n", __func__);
+ }
+ break;
+ case CMD_GET_PARTIAL_OBJECT:
+ o = usb_mtp_object_lookup(s, c->argv[0]);
+ if (o == NULL) {
+ usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ if (o->format == FMT_ASSOCIATION) {
+ usb_mtp_queue_result(s, RES_INVALID_OBJECT_HANDLE,
+ c->trans, 0, 0, 0);
+ return;
+ }
+ data_in = usb_mtp_get_partial_object(s, c, o);
+ if (NULL == data_in) {
+ fprintf(stderr, "%s: TODO: handle error\n", __func__);
+ }
+ nres = 1;
+ res0 = data_in->length;
+ break;
+ default:
+ fprintf(stderr, "%s: unknown command code 0x%04x\n",
+ __func__, c->code);
+ usb_mtp_queue_result(s, RES_OPERATION_NOT_SUPPORTED,
+ c->trans, 0, 0, 0);
+ return;
+ }
+
+ /* return results on success */
+ if (data_in) {
+ assert(s->data_in == NULL);
+ s->data_in = data_in;
+ }
+ usb_mtp_queue_result(s, RES_OK, c->trans, nres, res0, 0);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static void usb_mtp_handle_reset(USBDevice *dev)
+{
+ MTPState *s = DO_UPCAST(MTPState, dev, dev);
+
+ trace_usb_mtp_reset(s->dev.addr);
+
+ s->session = 0;
+ usb_mtp_data_free(s->data_in);
+ s->data_in = NULL;
+ usb_mtp_data_free(s->data_out);
+ s->data_out = NULL;
+ g_free(s->result);
+ s->result = NULL;
+}
+
+static void usb_mtp_handle_control(USBDevice *dev, USBPacket *p,
+ int request, int value, int index,
+ int length, uint8_t *data)
+{
+ int ret;
+
+ ret = usb_desc_handle_control(dev, p, request, value, index, length, data);
+ if (ret >= 0) {
+ return;
+ }
+
+ trace_usb_mtp_stall(dev->addr, "unknown control request");
+ p->status = USB_RET_STALL;
+}
+
+static void usb_mtp_cancel_packet(USBDevice *dev, USBPacket *p)
+{
+ fprintf(stderr, "%s\n", __func__);
+}
+
+static void usb_mtp_handle_data(USBDevice *dev, USBPacket *p)
+{
+ MTPState *s = DO_UPCAST(MTPState, dev, dev);
+ MTPControl cmd;
+ mtp_container container;
+ uint32_t params[5];
+ int i, rc;
+
+ switch (p->ep->nr) {
+ case EP_DATA_IN:
+ if (s->data_out != NULL) {
+ /* guest bug */
+ trace_usb_mtp_stall(s->dev.addr, "awaiting data-out");
+ p->status = USB_RET_STALL;
+ return;
+ }
+ if (p->iov.size < sizeof(container)) {
+ trace_usb_mtp_stall(s->dev.addr, "packet too small");
+ p->status = USB_RET_STALL;
+ return;
+ }
+ if (s->data_in != NULL) {
+ MTPData *d = s->data_in;
+ int dlen = d->length - d->offset;
+ if (d->first) {
+ trace_usb_mtp_data_in(s->dev.addr, d->trans, d->length);
+ container.length = cpu_to_le32(d->length + sizeof(container));
+ container.type = cpu_to_le16(TYPE_DATA);
+ container.code = cpu_to_le16(d->code);
+ container.trans = cpu_to_le32(d->trans);
+ usb_packet_copy(p, &container, sizeof(container));
+ d->first = false;
+ if (dlen > p->iov.size - sizeof(container)) {
+ dlen = p->iov.size - sizeof(container);
+ }
+ } else {
+ if (dlen > p->iov.size) {
+ dlen = p->iov.size;
+ }
+ }
+ if (d->fd == -1) {
+ usb_packet_copy(p, d->data + d->offset, dlen);
+ } else {
+ if (d->alloc < p->iov.size) {
+ d->alloc = p->iov.size;
+ d->data = g_realloc(d->data, d->alloc);
+ }
+ rc = read(d->fd, d->data, dlen);
+ if (rc != dlen) {
+ fprintf(stderr, "%s: TODO: handle read error\n", __func__);
+ }
+ usb_packet_copy(p, d->data, dlen);
+ }
+ d->offset += dlen;
+ if (d->offset == d->length) {
+ usb_mtp_data_free(s->data_in);
+ s->data_in = NULL;
+ }
+ } else if (s->result != NULL) {
+ MTPControl *r = s->result;
+ int length = sizeof(container) + r->argc * sizeof(uint32_t);
+ if (r->code == RES_OK) {
+ trace_usb_mtp_success(s->dev.addr, r->trans,
+ (r->argc > 0) ? r->argv[0] : 0,
+ (r->argc > 1) ? r->argv[1] : 0);
+ } else {
+ trace_usb_mtp_error(s->dev.addr, r->code, r->trans,
+ (r->argc > 0) ? r->argv[0] : 0,
+ (r->argc > 1) ? r->argv[1] : 0);
+ }
+ container.length = cpu_to_le32(length);
+ container.type = cpu_to_le16(TYPE_RESPONSE);
+ container.code = cpu_to_le16(r->code);
+ container.trans = cpu_to_le32(r->trans);
+ for (i = 0; i < r->argc; i++) {
+ params[i] = cpu_to_le32(r->argv[i]);
+ }
+ usb_packet_copy(p, &container, sizeof(container));
+ usb_packet_copy(p, &params, length - sizeof(container));
+ g_free(s->result);
+ s->result = NULL;
+ }
+ break;
+ case EP_DATA_OUT:
+ if (p->iov.size < sizeof(container)) {
+ trace_usb_mtp_stall(s->dev.addr, "packet too small");
+ p->status = USB_RET_STALL;
+ return;
+ }
+ usb_packet_copy(p, &container, sizeof(container));
+ switch (le16_to_cpu(container.type)) {
+ case TYPE_COMMAND:
+ if (s->data_in || s->data_out || s->result) {
+ trace_usb_mtp_stall(s->dev.addr, "transaction inflight");
+ p->status = USB_RET_STALL;
+ return;
+ }
+ cmd.code = le16_to_cpu(container.code);
+ cmd.argc = (le32_to_cpu(container.length) - sizeof(container))
+ / sizeof(uint32_t);
+ cmd.trans = le32_to_cpu(container.trans);
+ usb_packet_copy(p, &params, cmd.argc * sizeof(uint32_t));
+ for (i = 0; i < cmd.argc; i++) {
+ cmd.argv[i] = le32_to_cpu(params[i]);
+ }
+ trace_usb_mtp_command(s->dev.addr, cmd.code, cmd.trans,
+ (cmd.argc > 0) ? cmd.argv[0] : 0,
+ (cmd.argc > 1) ? cmd.argv[1] : 0,
+ (cmd.argc > 2) ? cmd.argv[2] : 0,
+ (cmd.argc > 3) ? cmd.argv[3] : 0,
+ (cmd.argc > 4) ? cmd.argv[4] : 0);
+ usb_mtp_command(s, &cmd);
+ break;
+ default:
+ iov_hexdump(p->iov.iov, p->iov.niov, stderr, "mtp-out", 32);
+ trace_usb_mtp_stall(s->dev.addr, "TODO: implement data-out");
+ p->status = USB_RET_STALL;
+ return;
+ }
+ break;
+ case EP_EVENT:
+ p->status = USB_RET_NAK;
+ return;
+ default:
+ trace_usb_mtp_stall(s->dev.addr, "invalid endpoint");
+ p->status = USB_RET_STALL;
+ return;
+ }
+
+ if (p->actual_length == 0) {
+ trace_usb_mtp_nak(s->dev.addr, p->ep->nr);
+ p->status = USB_RET_NAK;
+ return;
+ } else {
+ trace_usb_mtp_xfer(s->dev.addr, p->ep->nr, p->actual_length,
+ p->iov.size);
+ return;
+ }
+}
+
+static int usb_mtp_initfn(USBDevice *dev)
+{
+ MTPState *s = DO_UPCAST(MTPState, dev, dev);
+
+ usb_desc_create_serial(dev);
+ usb_desc_init(dev);
+ QTAILQ_INIT(&s->objects);
+ if (s->desc == NULL) {
+ s->desc = strrchr(s->root, '/');
+ if (s->desc) {
+ s->desc = g_strdup(s->desc + 1);
+ } else {
+ s->desc = g_strdup("none");
+ }
+ }
+ return 0;
+}
+
+static const VMStateDescription vmstate_usb_mtp = {
+ .name = "usb-mtp",
+ .unmigratable = 1,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_USB_DEVICE(dev, MTPState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property mtp_properties[] = {
+ DEFINE_PROP_STRING("root", MTPState, root),
+ DEFINE_PROP_STRING("desc", MTPState, desc),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void usb_mtp_class_initfn(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
+
+ uc->init = usb_mtp_initfn;
+ uc->product_desc = "QEMU USB MTP";
+ uc->usb_desc = &desc;
+ uc->cancel_packet = usb_mtp_cancel_packet;
+ uc->handle_attach = usb_desc_attach;
+ uc->handle_reset = usb_mtp_handle_reset;
+ uc->handle_control = usb_mtp_handle_control;
+ uc->handle_data = usb_mtp_handle_data;
+ dc->fw_name = "mtp";
+ dc->vmsd = &vmstate_usb_mtp;
+ dc->props = mtp_properties;
+}
+
+static TypeInfo mtp_info = {
+ .name = "usb-mtp",
+ .parent = TYPE_USB_DEVICE,
+ .instance_size = sizeof(MTPState),
+ .class_init = usb_mtp_class_initfn,
+};
+
+static void usb_mtp_register_types(void)
+{
+ type_register_static(&mtp_info);
+}
+
+type_init(usb_mtp_register_types)
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
index 3d35058b14..93f186f5e7 100644
--- a/hw/usb/hcd-ohci.c
+++ b/hw/usb/hcd-ohci.c
@@ -234,15 +234,15 @@ struct ohci_iso_td {
#define OHCI_STATUS_OCR (1<<3)
#define OHCI_STATUS_SOC ((1<<6)|(1<<7))
-#define OHCI_INTR_SO (1<<0) /* Scheduling overrun */
-#define OHCI_INTR_WD (1<<1) /* HcDoneHead writeback */
-#define OHCI_INTR_SF (1<<2) /* Start of frame */
-#define OHCI_INTR_RD (1<<3) /* Resume detect */
-#define OHCI_INTR_UE (1<<4) /* Unrecoverable error */
-#define OHCI_INTR_FNO (1<<5) /* Frame number overflow */
-#define OHCI_INTR_RHSC (1<<6) /* Root hub status change */
-#define OHCI_INTR_OC (1<<30) /* Ownership change */
-#define OHCI_INTR_MIE (1<<31) /* Master Interrupt Enable */
+#define OHCI_INTR_SO (1U<<0) /* Scheduling overrun */
+#define OHCI_INTR_WD (1U<<1) /* HcDoneHead writeback */
+#define OHCI_INTR_SF (1U<<2) /* Start of frame */
+#define OHCI_INTR_RD (1U<<3) /* Resume detect */
+#define OHCI_INTR_UE (1U<<4) /* Unrecoverable error */
+#define OHCI_INTR_FNO (1U<<5) /* Frame number overflow */
+#define OHCI_INTR_RHSC (1U<<6) /* Root hub status change */
+#define OHCI_INTR_OC (1U<<30) /* Ownership change */
+#define OHCI_INTR_MIE (1U<<31) /* Master Interrupt Enable */
#define OHCI_HCCA_SIZE 0x100
#define OHCI_HCCA_MASK 0xffffff00
@@ -253,7 +253,7 @@ struct ohci_iso_td {
#define OHCI_FMI_FSMPS 0xffff0000
#define OHCI_FMI_FIT 0x80000000
-#define OHCI_FR_RT (1<<31)
+#define OHCI_FR_RT (1U<<31)
#define OHCI_LS_THRESH 0x628
@@ -265,12 +265,12 @@ struct ohci_iso_td {
#define OHCI_RHA_NOCP (1<<12)
#define OHCI_RHA_POTPGT_MASK 0xff000000
-#define OHCI_RHS_LPS (1<<0)
-#define OHCI_RHS_OCI (1<<1)
-#define OHCI_RHS_DRWE (1<<15)
-#define OHCI_RHS_LPSC (1<<16)
-#define OHCI_RHS_OCIC (1<<17)
-#define OHCI_RHS_CRWE (1<<31)
+#define OHCI_RHS_LPS (1U<<0)
+#define OHCI_RHS_OCI (1U<<1)
+#define OHCI_RHS_DRWE (1U<<15)
+#define OHCI_RHS_LPSC (1U<<16)
+#define OHCI_RHS_OCIC (1U<<17)
+#define OHCI_RHS_CRWE (1U<<31)
#define OHCI_PORT_CCS (1<<0)
#define OHCI_PORT_PES (1<<1)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 7b91841a1d..ce97514b69 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -1517,7 +1517,9 @@ static void virtio_rng_initfn(Object *obj)
object_initialize(&dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_RNG);
object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
object_property_add_link(obj, "rng", TYPE_RNG_BACKEND,
- (Object **)&dev->vdev.conf.rng, NULL);
+ (Object **)&dev->vdev.conf.rng,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
}
diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c
index a16e3bc52e..b6ab3610cb 100644
--- a/hw/virtio/virtio-rng.c
+++ b/hw/virtio/virtio-rng.c
@@ -162,6 +162,9 @@ static void virtio_rng_device_realize(DeviceState *dev, Error **errp)
OBJECT(vrng->conf.default_backend),
NULL);
+ /* The child property took a reference, we can safely drop ours now */
+ object_unref(OBJECT(vrng->conf.default_backend));
+
object_property_set_link(OBJECT(dev),
OBJECT(vrng->conf.default_backend),
"rng", NULL);
@@ -223,7 +226,9 @@ static void virtio_rng_initfn(Object *obj)
VirtIORNG *vrng = VIRTIO_RNG(obj);
object_property_add_link(obj, "rng", TYPE_RNG_BACKEND,
- (Object **)&vrng->conf.rng, NULL);
+ (Object **)&vrng->conf.rng,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
}
static const TypeInfo virtio_rng_info = {
diff --git a/include/block/aio.h b/include/block/aio.h
index 2efdf416cf..a92511bd3b 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -19,6 +19,7 @@
#include "qemu/queue.h"
#include "qemu/event_notifier.h"
#include "qemu/thread.h"
+#include "qemu/rfifolock.h"
#include "qemu/timer.h"
typedef struct BlockDriverAIOCB BlockDriverAIOCB;
@@ -47,6 +48,9 @@ typedef void IOHandler(void *opaque);
struct AioContext {
GSource source;
+ /* Protects all fields from multi-threaded access */
+ RFifoLock lock;
+
/* The list of registered AIO handlers */
QLIST_HEAD(, AioHandler) aio_handlers;
@@ -104,6 +108,20 @@ void aio_context_ref(AioContext *ctx);
*/
void aio_context_unref(AioContext *ctx);
+/* Take ownership of the AioContext. If the AioContext will be shared between
+ * threads, a thread must have ownership when calling aio_poll().
+ *
+ * Note that multiple threads calling aio_poll() means timers, BHs, and
+ * callbacks may be invoked from a different thread than they were registered
+ * from. Therefore, code must use AioContext acquire/release or use
+ * fine-grained synchronization to protect shared state if other threads will
+ * be accessing it simultaneously.
+ */
+void aio_context_acquire(AioContext *ctx);
+
+/* Relinquish ownership of the AioContext. */
+void aio_context_release(AioContext *ctx);
+
/**
* aio_bh_new: Allocate a new bottom half structure.
*
diff --git a/include/block/block.h b/include/block/block.h
index 780f48b7b3..c12808a252 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -180,7 +180,7 @@ int bdrv_create(BlockDriver *drv, const char* filename,
QEMUOptionParameter *options, Error **errp);
int bdrv_create_file(const char* filename, QEMUOptionParameter *options,
Error **errp);
-BlockDriverState *bdrv_new(const char *device_name);
+BlockDriverState *bdrv_new(const char *device_name, Error **errp);
void bdrv_make_anon(BlockDriverState *bs);
void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old);
void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top);
@@ -190,6 +190,7 @@ int bdrv_open_image(BlockDriverState **pbs, const char *filename,
QDict *options, const char *bdref_key, int flags,
bool allow_none, Error **errp);
int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp);
+void bdrv_append_temp_snapshot(BlockDriverState *bs, Error **errp);
int bdrv_open(BlockDriverState **pbs, const char *filename,
const char *reference, QDict *options, int flags,
BlockDriver *drv, Error **errp);
@@ -286,15 +287,6 @@ int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
int bdrv_amend_options(BlockDriverState *bs_new, QEMUOptionParameter *options);
/* external snapshots */
-
-typedef enum {
- BS_IS_A_FILTER,
- BS_FILTER_PASS_DOWN,
- BS_AUTHORIZATION_COUNT,
-} BsAuthorization;
-
-bool bdrv_generic_is_first_non_filter(BlockDriverState *bs,
- BlockDriverState *candidate);
bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
BlockDriverState *candidate);
bool bdrv_is_first_non_filter(BlockDriverState *candidate);
@@ -338,8 +330,8 @@ BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque);
/* Invalidate any cached metadata used by image formats */
-void bdrv_invalidate_cache(BlockDriverState *bs);
-void bdrv_invalidate_cache_all(void);
+void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp);
+void bdrv_invalidate_cache_all(Error **errp);
void bdrv_clear_incoming_migration_all(void);
@@ -437,7 +429,8 @@ bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov);
struct HBitmapIter;
typedef struct BdrvDirtyBitmap BdrvDirtyBitmap;
-BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity);
+BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity,
+ Error **errp);
void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap);
BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs);
int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector);
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 0bcf1c9b8c..cd5bc7308a 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -76,10 +76,10 @@ struct BlockDriver {
const char *format_name;
int instance_size;
- /* this table of boolean contains authorizations for the block operations */
- bool authorizations[BS_AUTHORIZATION_COUNT];
- /* for snapshots complex block filter like Quorum can implement the
- * following recursive callback instead of BS_IS_A_FILTER.
+ /* set to true if the BlockDriver is a block filter */
+ bool is_filter;
+ /* for snapshots block filter like Quorum can implement the
+ * following recursive callback.
* It's purpose is to recurse on the filter children while calling
* bdrv_recurse_is_first_non_filter on them.
* For a sample implementation look in the future Quorum block filter.
@@ -153,7 +153,7 @@ struct BlockDriver {
/*
* Invalidate any cached meta-data.
*/
- void (*bdrv_invalidate_cache)(BlockDriverState *bs);
+ void (*bdrv_invalidate_cache)(BlockDriverState *bs, Error **errp);
/*
* Flushes all data that was already written to the OS all the way down to
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 4cb4b4a53a..fb649a4029 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -360,9 +360,6 @@ int page_check_range(target_ulong start, target_ulong len, int flags);
CPUArchState *cpu_copy(CPUArchState *env);
-void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
- GCC_FMT_ATTR(2, 3);
-
/* Flags for use in ENV->INTERRUPT_PENDING.
The numbers assigned here are non-sequential in order to preserve
@@ -413,27 +410,6 @@ void QEMU_NORETURN cpu_abort(CPUArchState *env, const char *fmt, ...)
| CPU_INTERRUPT_TGT_EXT_3 \
| CPU_INTERRUPT_TGT_EXT_4)
-/* Breakpoint/watchpoint flags */
-#define BP_MEM_READ 0x01
-#define BP_MEM_WRITE 0x02
-#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
-#define BP_STOP_BEFORE_ACCESS 0x04
-#define BP_WATCHPOINT_HIT 0x08
-#define BP_GDB 0x10
-#define BP_CPU 0x20
-
-int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
- CPUBreakpoint **breakpoint);
-int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags);
-void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint);
-void cpu_breakpoint_remove_all(CPUArchState *env, int mask);
-int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
- int flags, CPUWatchpoint **watchpoint);
-int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr,
- target_ulong len, int flags);
-void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint);
-void cpu_watchpoint_remove_all(CPUArchState *env, int mask);
-
#if !defined(CONFIG_USER_ONLY)
/* memory API */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 66a3d46938..2dd6206d4a 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -24,7 +24,6 @@
#endif
#include "config.h"
-#include <setjmp.h>
#include <inttypes.h>
#include "qemu/osdep.h"
#include "qemu/queue.h"
@@ -61,9 +60,6 @@ typedef uint64_t target_ulong;
#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
-#define TB_JMP_CACHE_BITS 12
-#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
-
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
addresses on the same page. The top bits are the same. This allows
TLB invalidation to quickly clear a subset of the hash table. */
@@ -118,66 +114,9 @@ QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
#endif
-#ifdef HOST_WORDS_BIGENDIAN
-typedef struct icount_decr_u16 {
- uint16_t high;
- uint16_t low;
-} icount_decr_u16;
-#else
-typedef struct icount_decr_u16 {
- uint16_t low;
- uint16_t high;
-} icount_decr_u16;
-#endif
-
-typedef struct CPUBreakpoint {
- target_ulong pc;
- int flags; /* BP_* */
- QTAILQ_ENTRY(CPUBreakpoint) entry;
-} CPUBreakpoint;
-
-typedef struct CPUWatchpoint {
- target_ulong vaddr;
- target_ulong len_mask;
- int flags; /* BP_* */
- QTAILQ_ENTRY(CPUWatchpoint) entry;
-} CPUWatchpoint;
-
#define CPU_TEMP_BUF_NLONGS 128
#define CPU_COMMON \
/* soft mmu support */ \
- /* in order to avoid passing too many arguments to the MMIO \
- helpers, we store some rarely used information in the CPU \
- context) */ \
- uintptr_t mem_io_pc; /* host pc at which the memory was \
- accessed */ \
- target_ulong mem_io_vaddr; /* target virtual addr at which the \
- memory was accessed */ \
CPU_COMMON_TLB \
- struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
- \
- int64_t icount_extra; /* Instructions until next timer event. */ \
- /* Number of cycles left, with interrupt flag in high bit. \
- This allows a single read-compare-cbranch-write sequence to test \
- for both decrementer underflow and exceptions. */ \
- union { \
- uint32_t u32; \
- icount_decr_u16 u16; \
- } icount_decr; \
- uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \
- \
- /* from this point: preserved by CPU reset */ \
- /* ice debug support */ \
- QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
- \
- QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
- CPUWatchpoint *watchpoint_hit; \
- \
- /* Core interrupt code */ \
- sigjmp_buf jmp_env; \
- int exception_index; \
- \
- /* user data */ \
- void *opaque; \
#endif
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
index e21cb60442..b8ecd6f68d 100644
--- a/include/exec/cputlb.h
+++ b/include/exec/cputlb.h
@@ -22,7 +22,7 @@
#if !defined(CONFIG_USER_ONLY)
/* cputlb.c */
void tlb_protect_code(ram_addr_t ram_addr);
-void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
+void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr,
target_ulong vaddr);
void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
uintptr_t length);
@@ -31,12 +31,12 @@ void tlb_set_dirty(CPUArchState *env, target_ulong vaddr);
extern int tlb_flush_count;
/* exec.c */
-void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr);
+void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
MemoryRegionSection *
address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
hwaddr *plen);
-hwaddr memory_region_section_get_iotlb(CPUArchState *env,
+hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section,
target_ulong vaddr,
hwaddr paddr, hwaddr xlat,
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index a387922df4..f9ac332f9d 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -44,7 +44,7 @@ struct TranslationBlock;
typedef struct TranslationBlock TranslationBlock;
/* XXX: make safe guess about sizes */
-#define MAX_OP_PER_INSTR 208
+#define MAX_OP_PER_INSTR 266
#if HOST_LONG_BITS == 32
#define MAX_OPC_PARAM_PER_ARG 2
@@ -80,16 +80,16 @@ void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
void cpu_gen_init(void);
int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
int *gen_code_size_ptr);
-bool cpu_restore_state(CPUArchState *env, uintptr_t searched_pc);
+bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
void page_size_init(void);
-void QEMU_NORETURN cpu_resume_from_signal(CPUArchState *env1, void *puc);
-void QEMU_NORETURN cpu_io_recompile(CPUArchState *env, uintptr_t retaddr);
-TranslationBlock *tb_gen_code(CPUArchState *env,
+void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
+void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
+TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base, int flags,
int cflags);
void cpu_exec_init(CPUArchState *env);
-void QEMU_NORETURN cpu_loop_exit(CPUArchState *env1);
+void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
@@ -98,18 +98,18 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
#if !defined(CONFIG_USER_ONLY)
void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
/* cputlb.c */
-void tlb_flush_page(CPUArchState *env, target_ulong addr);
-void tlb_flush(CPUArchState *env, int flush_global);
-void tlb_set_page(CPUArchState *env, target_ulong vaddr,
+void tlb_flush_page(CPUState *cpu, target_ulong addr);
+void tlb_flush(CPUState *cpu, int flush_global);
+void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size);
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
#else
-static inline void tlb_flush_page(CPUArchState *env, target_ulong addr)
+static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
{
}
-static inline void tlb_flush(CPUArchState *env, int flush_global)
+static inline void tlb_flush(CPUState *cpu, int flush_global)
{
}
#endif
@@ -332,7 +332,7 @@ bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
uint64_t value, unsigned size);
-void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr);
uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
@@ -380,20 +380,25 @@ extern int singlestep;
/* cpu-exec.c */
extern volatile sig_atomic_t exit_request;
-/* Deterministic execution requires that IO only be performed on the last
- instruction of a TB so that interrupts take effect immediately. */
-static inline int can_do_io(CPUArchState *env)
+/**
+ * cpu_can_do_io:
+ * @cpu: The CPU for which to check IO.
+ *
+ * Deterministic execution requires that IO only be performed on the last
+ * instruction of a TB so that interrupts take effect immediately.
+ *
+ * Returns: %true if memory-mapped IO is safe, %false otherwise.
+ */
+static inline bool cpu_can_do_io(CPUState *cpu)
{
- CPUState *cpu = ENV_GET_CPU(env);
-
if (!use_icount) {
- return 1;
+ return true;
}
/* If not executing code then assume we are ok. */
if (cpu->current_tb == NULL) {
- return 1;
+ return true;
}
- return env->can_do_io != 0;
+ return cpu->can_do_io != 0;
}
#endif
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
index 39a6b61e4f..da53395de6 100644
--- a/include/exec/gen-icount.h
+++ b/include/exec/gen-icount.h
@@ -26,13 +26,15 @@ static inline void gen_tb_start(void)
icount_label = gen_new_label();
count = tcg_temp_local_new_i32();
- tcg_gen_ld_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u32));
+ tcg_gen_ld_i32(count, cpu_env,
+ -ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
/* This is a horrid hack to allow fixing up the value later. */
icount_arg = tcg_ctx.gen_opparam_ptr + 1;
tcg_gen_subi_i32(count, count, 0xdeadbeef);
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
- tcg_gen_st16_i32(count, cpu_env, offsetof(CPUArchState, icount_decr.u16.low));
+ tcg_gen_st16_i32(count, cpu_env,
+ -ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
tcg_temp_free_i32(count);
}
@@ -51,14 +53,14 @@ static void gen_tb_end(TranslationBlock *tb, int num_insns)
static inline void gen_io_start(void)
{
TCGv_i32 tmp = tcg_const_i32(1);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
+ tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_temp_free_i32(tmp);
}
static inline void gen_io_end(void)
{
TCGv_i32 tmp = tcg_const_i32(0);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUArchState, can_do_io));
+ tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_temp_free_i32(tmp);
}
diff --git a/include/exec/softmmu_exec.h b/include/exec/softmmu_exec.h
index 6fde154527..470db20174 100644
--- a/include/exec/softmmu_exec.h
+++ b/include/exec/softmmu_exec.h
@@ -162,3 +162,55 @@
#define stw(p, v) stw_data(p, v)
#define stl(p, v) stl_data(p, v)
#define stq(p, v) stq_data(p, v)
+
+/**
+ * tlb_vaddr_to_host:
+ * @env: CPUArchState
+ * @addr: guest virtual address to look up
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @mmu_idx: MMU index to use for lookup
+ *
+ * Look up the specified guest virtual index in the TCG softmmu TLB.
+ * If the TLB contains a host virtual address suitable for direct RAM
+ * access, then return it. Otherwise (TLB miss, TLB entry is for an
+ * I/O access, etc) return NULL.
+ *
+ * This is the equivalent of the initial fast-path code used by
+ * TCG backends for guest load and store accesses.
+ */
+static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
+ int access_type, int mmu_idx)
+{
+ int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
+ CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
+ target_ulong tlb_addr;
+ uintptr_t haddr;
+
+ switch (access_type) {
+ case 0:
+ tlb_addr = tlbentry->addr_read;
+ break;
+ case 1:
+ tlb_addr = tlbentry->addr_write;
+ break;
+ case 2:
+ tlb_addr = tlbentry->addr_code;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if ((addr & TARGET_PAGE_MASK)
+ != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
+ /* TLB entry is for a different page */
+ return NULL;
+ }
+
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ return NULL;
+ }
+
+ haddr = addr + env->tlb_table[mmu_idx][index].addend;
+ return (void *)haddr;
+}
diff --git a/include/exec/softmmu_template.h b/include/exec/softmmu_template.h
index c14a04d7e9..73ed7cf921 100644
--- a/include/exec/softmmu_template.h
+++ b/include/exec/softmmu_template.h
@@ -126,12 +126,12 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- env->mem_io_pc = retaddr;
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
- cpu_io_recompile(env, retaddr);
+ cpu->mem_io_pc = retaddr;
+ if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
+ cpu_io_recompile(cpu, retaddr);
}
- env->mem_io_vaddr = addr;
+ cpu->mem_io_vaddr = addr;
io_mem_read(mr, physaddr, &val, 1 << SHIFT);
return val;
}
@@ -158,7 +158,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
#endif
- tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
@@ -240,7 +240,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
#endif
- tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
+ tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
}
@@ -333,12 +333,12 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
- if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
- cpu_io_recompile(env, retaddr);
+ if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
+ cpu_io_recompile(cpu, retaddr);
}
- env->mem_io_vaddr = addr;
- env->mem_io_pc = retaddr;
+ cpu->mem_io_vaddr = addr;
+ cpu->mem_io_pc = retaddr;
io_mem_write(mr, physaddr, val, 1 << SHIFT);
}
@@ -360,7 +360,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
}
#endif
- tlb_fill(env, addr, 1, mmu_idx, retaddr);
+ tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
@@ -436,7 +436,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
}
#endif
- tlb_fill(env, addr, 1, mmu_idx, retaddr);
+ tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
}
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
index 4b4df88527..4b3090ca74 100644
--- a/include/fpu/softfloat.h
+++ b/include/fpu/softfloat.h
@@ -245,6 +245,13 @@ INLINE flag get_default_nan_mode(float_status *status)
void float_raise( int8 flags STATUS_PARAM);
/*----------------------------------------------------------------------------
+| If `a' is denormal and we are in flush-to-zero mode then set the
+| input-denormal exception and return zero. Otherwise just return the value.
+*----------------------------------------------------------------------------*/
+float32 float32_squash_input_denormal(float32 a STATUS_PARAM);
+float64 float64_squash_input_denormal(float64 a STATUS_PARAM);
+
+/*----------------------------------------------------------------------------
| Options to indicate which negations to perform in float*_muladd()
| Using these differs from negating an input or output before calling
| the muladd function in that this means that a NaN doesn't have its
@@ -335,6 +342,7 @@ uint32 float32_to_uint32( float32 STATUS_PARAM );
uint32 float32_to_uint32_round_to_zero( float32 STATUS_PARAM );
int64 float32_to_int64( float32 STATUS_PARAM );
uint64 float32_to_uint64(float32 STATUS_PARAM);
+uint64 float32_to_uint64_round_to_zero(float32 STATUS_PARAM);
int64 float32_to_int64_round_to_zero( float32 STATUS_PARAM );
float64 float32_to_float64( float32 STATUS_PARAM );
floatx80 float32_to_floatx80( float32 STATUS_PARAM );
diff --git a/include/hw/acpi/cpu_hotplug_defs.h b/include/hw/acpi/cpu_hotplug_defs.h
index 2725b50aac..9f33663511 100644
--- a/include/hw/acpi/cpu_hotplug_defs.h
+++ b/include/hw/acpi/cpu_hotplug_defs.h
@@ -17,7 +17,15 @@
* between C and ASL code.
*/
#define ACPI_CPU_HOTPLUG_STATUS 4
+
+/* Limit for CPU arch IDs for CPU hotplug. All hotpluggable CPUs should
+ * have CPUClass.get_arch_id() < ACPI_CPU_HOTPLUG_ID_LIMIT.
+ */
+#define ACPI_CPU_HOTPLUG_ID_LIMIT 256
+
+/* 256 CPU IDs, 8 bits per entry: */
#define ACPI_GPE_PROC_LEN 32
+
#define ICH9_CPU_HOTPLUG_IO_BASE 0x0CD8
#define PIIX4_CPU_HOTPLUG_IO_BASE 0xaf00
diff --git a/include/hw/boards.h b/include/hw/boards.h
index c2096e6ba2..dd2c70da36 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -4,8 +4,8 @@
#define HW_BOARDS_H
#include "sysemu/blockdev.h"
-#include "sysemu/qemumachine.h"
#include "hw/qdev.h"
+#include "qom/object.h"
typedef struct QEMUMachineInitArgs {
const QEMUMachine *machine;
@@ -50,9 +50,60 @@ struct QEMUMachine {
const char *hw_version;
};
+#define TYPE_MACHINE_SUFFIX "-machine"
int qemu_register_machine(QEMUMachine *m);
-QEMUMachine *find_default_machine(void);
-extern QEMUMachine *current_machine;
+#define TYPE_MACHINE "machine"
+#undef MACHINE /* BSD defines it and QEMU does not use it */
+#define MACHINE(obj) \
+ OBJECT_CHECK(MachineState, (obj), TYPE_MACHINE)
+#define MACHINE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(MachineClass, (obj), TYPE_MACHINE)
+#define MACHINE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(MachineClass, (klass), TYPE_MACHINE)
+
+typedef struct MachineState MachineState;
+typedef struct MachineClass MachineClass;
+
+MachineClass *find_default_machine(void);
+extern MachineState *current_machine;
+
+/**
+ * MachineClass:
+ * @qemu_machine: #QEMUMachine
+ */
+struct MachineClass {
+ /*< private >*/
+ ObjectClass parent_class;
+ /*< public >*/
+
+ QEMUMachine *qemu_machine;
+};
+
+/**
+ * MachineState:
+ */
+struct MachineState {
+ /*< private >*/
+ Object parent_obj;
+ /*< public >*/
+
+ char *accel;
+ bool kernel_irqchip;
+ int kvm_shadow_mem;
+ char *kernel;
+ char *initrd;
+ char *append;
+ char *dtb;
+ char *dumpdtb;
+ int phandle_start;
+ char *dt_compatible;
+ bool dump_guest_core;
+ bool mem_merge;
+ bool usb;
+ char *firmware;
+
+ QEMUMachineInitArgs init_args;
+};
#endif
diff --git a/include/hw/fw-path-provider.h b/include/hw/fw-path-provider.h
new file mode 100644
index 0000000000..7afaec0b1d
--- /dev/null
+++ b/include/hw/fw-path-provider.h
@@ -0,0 +1,48 @@
+/*
+ * Firmware patch provider class and helpers definitions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef FW_PATH_PROVIDER_H
+#define FW_PATH_PROVIDER_H 1
+
+#include "qemu-common.h"
+#include "qom/object.h"
+
+#define TYPE_FW_PATH_PROVIDER "fw-path-provider"
+
+#define FW_PATH_PROVIDER_CLASS(klass) \
+ OBJECT_CLASS_CHECK(FWPathProviderClass, (klass), TYPE_FW_PATH_PROVIDER)
+#define FW_PATH_PROVIDER_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(FWPathProviderClass, (obj), TYPE_FW_PATH_PROVIDER)
+#define FW_PATH_PROVIDER(obj) \
+ INTERFACE_CHECK(FWPathProvider, (obj), TYPE_FW_PATH_PROVIDER)
+
+typedef struct FWPathProvider {
+ Object parent_obj;
+} FWPathProvider;
+
+typedef struct FWPathProviderClass {
+ InterfaceClass parent_class;
+
+ char *(*get_dev_path)(FWPathProvider *p, BusState *bus, DeviceState *dev);
+} FWPathProviderClass;
+
+char *fw_path_provider_get_dev_path(FWPathProvider *p, BusState *bus,
+ DeviceState *dev);
+char *fw_path_provider_try_get_dev_path(Object *o, BusState *bus,
+ DeviceState *dev);
+
+#endif /* FW_PATH_PROVIDER_H */
diff --git a/include/hw/net/allwinner_emac.h b/include/hw/net/allwinner_emac.h
index a5e944af05..5ae7717300 100644
--- a/include/hw/net/allwinner_emac.h
+++ b/include/hw/net/allwinner_emac.h
@@ -144,6 +144,7 @@
#define MII_BMSR_10T_FD (1 << 12)
#define MII_BMSR_10T_HD (1 << 11)
#define MII_BMSR_MFPS (1 << 6)
+#define MII_BMSR_AN_COMP (1 << 5)
#define MII_BMSR_AUTONEG (1 << 3)
#define MII_BMSR_LINK_ST (1 << 2)
diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h
index 835418aeb0..d71bd07497 100644
--- a/include/hw/ppc/ppc.h
+++ b/include/hw/ppc/ppc.h
@@ -44,6 +44,9 @@ struct ppc_tb_t {
#define PPC_DECR_ZERO_TRIGGERED (1 << 3) /* Decr interrupt triggered when
* the decrementer reaches zero.
*/
+#define PPC_DECR_UNDERFLOW_LEVEL (1 << 4) /* Decr interrupt active when
+ * the most significant bit is 1.
+ */
uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset);
clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq);
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 449fc7ca2d..5fdac1e009 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -153,8 +153,13 @@ typedef struct sPAPREnvironment {
#define H_PP1 (1ULL<<(63-62))
#define H_PP2 (1ULL<<(63-63))
-/* H_SET_MODE flags */
-#define H_SET_MODE_ENDIAN 4
+/* Values for 2nd argument to H_SET_MODE */
+#define H_SET_MODE_RESOURCE_SET_CIABR 1
+#define H_SET_MODE_RESOURCE_SET_DAWR 2
+#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
+#define H_SET_MODE_RESOURCE_LE 4
+
+/* Flags for H_SET_MODE_RESOURCE_LE */
#define H_SET_MODE_ENDIAN_BIG 0
#define H_SET_MODE_ENDIAN_LITTLE 1
diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h
index 1ed0691716..dbe473c344 100644
--- a/include/hw/qdev-core.h
+++ b/include/hw/qdev-core.h
@@ -36,6 +36,8 @@ typedef int (*qdev_event)(DeviceState *dev);
typedef void (*qdev_resetfn)(DeviceState *dev);
typedef void (*DeviceRealize)(DeviceState *dev, Error **errp);
typedef void (*DeviceUnrealize)(DeviceState *dev, Error **errp);
+typedef void (*BusRealize)(BusState *bus, Error **errp);
+typedef void (*BusUnrealize)(BusState *bus, Error **errp);
struct VMStateDescription;
@@ -174,6 +176,9 @@ struct BusClass {
*/
char *(*get_fw_dev_path)(DeviceState *dev);
void (*reset)(BusState *bus);
+ BusRealize realize;
+ BusUnrealize unrealize;
+
/* maximum devices allowed on the bus, 0: no limit. */
int max_dev;
/* number of automatically allocated bus ids (e.g. ide.0) */
@@ -199,6 +204,7 @@ struct BusState {
int allow_hotplug;
HotplugHandler *hotplug_handler;
int max_index;
+ bool realized;
QTAILQ_HEAD(ChildrenHead, BusChild) children;
QLIST_ENTRY(BusState) sibling;
};
diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h
index 0c0babfa6a..c46e908d71 100644
--- a/include/hw/qdev-properties.h
+++ b/include/hw/qdev-properties.h
@@ -22,6 +22,7 @@ extern PropertyInfo qdev_prop_bios_chs_trans;
extern PropertyInfo qdev_prop_drive;
extern PropertyInfo qdev_prop_netdev;
extern PropertyInfo qdev_prop_vlan;
+extern PropertyInfo qdev_prop_iothread;
extern PropertyInfo qdev_prop_pci_devfn;
extern PropertyInfo qdev_prop_blocksize;
extern PropertyInfo qdev_prop_pci_host_devaddr;
@@ -142,6 +143,8 @@ extern PropertyInfo qdev_prop_arraylen;
DEFINE_PROP(_n, _s, _f, qdev_prop_vlan, NICPeers)
#define DEFINE_PROP_DRIVE(_n, _s, _f) \
DEFINE_PROP(_n, _s, _f, qdev_prop_drive, BlockDriverState *)
+#define DEFINE_PROP_IOTHREAD(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_iothread, IOThread *)
#define DEFINE_PROP_MACADDR(_n, _s, _f) \
DEFINE_PROP(_n, _s, _f, qdev_prop_macaddr, MACAddr)
#define DEFINE_PROP_LOSTTICKPOLICY(_n, _s, _f, _d) \
@@ -201,4 +204,15 @@ void qdev_property_add_static(DeviceState *dev, Property *prop, Error **errp);
*/
void qdev_prop_set_after_realize(DeviceState *dev, const char *name,
Error **errp);
+
+/**
+ * qdev_prop_allow_set_link_before_realize:
+ *
+ * Set the #Error object if an attempt is made to set the link after realize.
+ * This function should be used as the check() argument to
+ * object_property_add_link().
+ */
+void qdev_prop_allow_set_link_before_realize(Object *obj, const char *name,
+ Object *val, Error **errp);
+
#endif
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
index e5fc39d504..1adb54906e 100644
--- a/include/hw/scsi/scsi.h
+++ b/include/hw/scsi/scsi.h
@@ -31,6 +31,7 @@ typedef struct SCSISense {
uint8_t ascq;
} SCSISense;
+#define SCSI_SENSE_BUF_SIZE_OLD 96
#define SCSI_SENSE_BUF_SIZE 252
struct SCSICommand {
diff --git a/include/hw/ssi.h b/include/hw/ssi.h
index fdae317295..6c13fb2e44 100644
--- a/include/hw/ssi.h
+++ b/include/hw/ssi.h
@@ -56,13 +56,12 @@ typedef struct SSISlaveClass {
} SSISlaveClass;
struct SSISlave {
- DeviceState qdev;
+ DeviceState parent_obj;
/* Chip select state */
bool cs;
};
-#define SSI_SLAVE_FROM_QDEV(dev) DO_UPCAST(SSISlave, qdev, dev)
#define FROM_SSI_SLAVE(type, dev) DO_UPCAST(type, ssidev, dev)
extern const VMStateDescription vmstate_ssi_slave;
diff --git a/include/hw/timer/allwinner-a10-pit.h b/include/hw/timer/allwinner-a10-pit.h
index 15efab8b5f..770bdc03c1 100644
--- a/include/hw/timer/allwinner-a10-pit.h
+++ b/include/hw/timer/allwinner-a10-pit.h
@@ -35,13 +35,22 @@
#define AW_A10_PIT_DEFAULT_CLOCK 0x4
-typedef struct AwA10PITState {
+typedef struct AwA10PITState AwA10PITState;
+
+typedef struct AwA10TimerContext {
+ AwA10PITState *container;
+ int index;
+} AwA10TimerContext;
+
+struct AwA10PITState {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
qemu_irq irq[AW_A10_PIT_TIMER_NR];
ptimer_state * timer[AW_A10_PIT_TIMER_NR];
+ AwA10TimerContext timer_context[AW_A10_PIT_TIMER_NR];
MemoryRegion iomem;
+ uint32_t clk_freq[4];
uint32_t irq_enable;
uint32_t irq_status;
@@ -53,6 +62,6 @@ typedef struct AwA10PITState {
uint32_t count_lo;
uint32_t count_hi;
uint32_t count_ctl;
-} AwA10PITState;
+};
#endif
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 41885da1a0..e4c41ff2ef 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -16,6 +16,7 @@
#include "hw/virtio/virtio.h"
#include "hw/block/block.h"
+#include "sysemu/iothread.h"
#define TYPE_VIRTIO_BLK "virtio-blk-device"
#define VIRTIO_BLK(obj) \
@@ -106,6 +107,7 @@ struct virtio_scsi_inhdr
struct VirtIOBlkConf
{
BlockConf conf;
+ IOThread *iothread;
char *serial;
uint32_t scsi;
uint32_t config_wce;
@@ -140,13 +142,15 @@ typedef struct VirtIOBlock {
DEFINE_BLOCK_CHS_PROPERTIES(_state, _field.conf), \
DEFINE_PROP_STRING("serial", _state, _field.serial), \
DEFINE_PROP_BIT("config-wce", _state, _field.config_wce, 0, true), \
- DEFINE_PROP_BIT("scsi", _state, _field.scsi, 0, true)
+ DEFINE_PROP_BIT("scsi", _state, _field.scsi, 0, true), \
+ DEFINE_PROP_IOTHREAD("x-iothread", _state, _field.iothread)
#else
#define DEFINE_VIRTIO_BLK_PROPERTIES(_state, _field) \
DEFINE_BLOCK_PROPERTIES(_state, _field.conf), \
DEFINE_BLOCK_CHS_PROPERTIES(_state, _field.conf), \
DEFINE_PROP_STRING("serial", _state, _field.serial), \
- DEFINE_PROP_BIT("config-wce", _state, _field.config_wce, 0, true)
+ DEFINE_PROP_BIT("config-wce", _state, _field.config_wce, 0, true), \
+ DEFINE_PROP_IOTHREAD("x-iothread", _state, _field.iothread)
#endif /* __linux__ */
void virtio_blk_set_conf(DeviceState *dev, VirtIOBlkConf *blk);
diff --git a/include/hw/virtio/virtio-serial.h b/include/hw/virtio/virtio-serial.h
index 1d2040b245..4746312a83 100644
--- a/include/hw/virtio/virtio-serial.h
+++ b/include/hw/virtio/virtio-serial.h
@@ -81,15 +81,15 @@ typedef struct VirtIOSerialPortClass {
bool is_console;
/*
- * The per-port (or per-app) init function that's called when a
+ * The per-port (or per-app) realize function that's called when a
* new device is found on the bus.
*/
- int (*init)(VirtIOSerialPort *port);
+ DeviceRealize realize;
/*
- * Per-port exit function that's called when a port gets
+ * Per-port unrealize function that's called when a port gets
* hot-unplugged or removed.
*/
- int (*exit)(VirtIOSerialPort *port);
+ DeviceUnrealize unrealize;
/* Callbacks for guest events */
/* Guest opened/closed device. */
diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h
index e1818213b2..9d549fc83d 100644
--- a/include/hw/xen/xen.h
+++ b/include/hw/xen/xen.h
@@ -10,7 +10,6 @@
#include "hw/irq.h"
#include "qemu-common.h"
-#include "sysemu/qemumachine.h"
/* xen-machine.c */
enum xen_mode {
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index ded8e2302f..e7e170561d 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -650,6 +650,9 @@ extern const VMStateInfo vmstate_info_bitmap;
#define VMSTATE_UINT8_ARRAY(_f, _s, _n) \
VMSTATE_UINT8_ARRAY_V(_f, _s, _n, 0)
+#define VMSTATE_UINT8_SUB_ARRAY(_f, _s, _start, _num) \
+ VMSTATE_SUB_ARRAY(_f, _s, _start, _num, 0, vmstate_info_uint8, uint8_t)
+
#define VMSTATE_UINT8_2DARRAY(_f, _s, _n1, _n2) \
VMSTATE_UINT8_2DARRAY_V(_f, _s, _n1, _n2, 0)
diff --git a/include/monitor/monitor.h b/include/monitor/monitor.h
index a49ea11eb4..42d867155b 100644
--- a/include/monitor/monitor.h
+++ b/include/monitor/monitor.h
@@ -79,7 +79,6 @@ int monitor_handle_fd_param(Monitor *mon, const char *fdname);
void monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
GCC_FMT_ATTR(2, 0);
void monitor_printf(Monitor *mon, const char *fmt, ...) GCC_FMT_ATTR(2, 3);
-void monitor_print_filename(Monitor *mon, const char *filename);
void monitor_flush(Monitor *mon);
int monitor_set_cpu(int cpu_index);
int monitor_get_cpu_index(void);
diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h
index da75abf6d6..902d1a7a18 100644
--- a/include/qapi/qmp/qerror.h
+++ b/include/qapi/qmp/qerror.h
@@ -12,7 +12,6 @@
#ifndef QERROR_H
#define QERROR_H
-#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qstring.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
@@ -35,51 +34,30 @@ void qerror_report_err(Error *err);
* Please keep the definitions in alphabetical order.
* Use scripts/check-qerror.sh to check.
*/
-#define QERR_ADD_CLIENT_FAILED \
- ERROR_CLASS_GENERIC_ERROR, "Could not add client"
-
-#define QERR_AMBIGUOUS_PATH \
- ERROR_CLASS_GENERIC_ERROR, "Path '%s' does not uniquely identify an object"
-
-#define QERR_BAD_BUS_FOR_DEVICE \
- ERROR_CLASS_GENERIC_ERROR, "Device '%s' can't go on a %s bus"
-
#define QERR_BASE_NOT_FOUND \
ERROR_CLASS_GENERIC_ERROR, "Base '%s' not found"
#define QERR_BLOCK_JOB_NOT_ACTIVE \
ERROR_CLASS_DEVICE_NOT_ACTIVE, "No active block job on device '%s'"
-#define QERR_BLOCK_JOB_PAUSED \
- ERROR_CLASS_GENERIC_ERROR, "The block job for device '%s' is currently paused"
-
#define QERR_BLOCK_JOB_NOT_READY \
ERROR_CLASS_GENERIC_ERROR, "The active block job for device '%s' cannot be completed"
#define QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED \
ERROR_CLASS_GENERIC_ERROR, "Block format '%s' used by device '%s' does not support feature '%s'"
-#define QERR_BUFFER_OVERRUN \
- ERROR_CLASS_GENERIC_ERROR, "An internal buffer overran"
-
#define QERR_BUS_NO_HOTPLUG \
ERROR_CLASS_GENERIC_ERROR, "Bus '%s' does not support hotplugging"
#define QERR_BUS_NOT_FOUND \
ERROR_CLASS_GENERIC_ERROR, "Bus '%s' not found"
-#define QERR_COMMAND_DISABLED \
- ERROR_CLASS_GENERIC_ERROR, "The command %s has been disabled for this instance"
-
#define QERR_COMMAND_NOT_FOUND \
ERROR_CLASS_COMMAND_NOT_FOUND, "The command %s has not been found"
#define QERR_DEVICE_ENCRYPTED \
ERROR_CLASS_DEVICE_ENCRYPTED, "'%s' (%s) is encrypted"
-#define QERR_DEVICE_FEATURE_BLOCKS_MIGRATION \
- ERROR_CLASS_GENERIC_ERROR, "Migration is disabled when using feature '%s' in device '%s'"
-
#define QERR_DEVICE_HAS_NO_MEDIUM \
ERROR_CLASS_GENERIC_ERROR, "Device '%s' has no medium"
@@ -92,15 +70,6 @@ void qerror_report_err(Error *err);
#define QERR_DEVICE_IS_READ_ONLY \
ERROR_CLASS_GENERIC_ERROR, "Device '%s' is read only"
-#define QERR_DEVICE_LOCKED \
- ERROR_CLASS_GENERIC_ERROR, "Device '%s' is locked"
-
-#define QERR_DEVICE_MULTIPLE_BUSSES \
- ERROR_CLASS_GENERIC_ERROR, "Device '%s' has multiple child busses"
-
-#define QERR_DEVICE_NO_BUS \
- ERROR_CLASS_GENERIC_ERROR, "Device '%s' has no child bus"
-
#define QERR_DEVICE_NO_HOTPLUG \
ERROR_CLASS_GENERIC_ERROR, "Device '%s' does not support hotplugging"
@@ -113,12 +82,6 @@ void qerror_report_err(Error *err);
#define QERR_DEVICE_NOT_FOUND \
ERROR_CLASS_DEVICE_NOT_FOUND, "Device '%s' not found"
-#define QERR_DEVICE_NOT_REMOVABLE \
- ERROR_CLASS_GENERIC_ERROR, "Device '%s' is not removable"
-
-#define QERR_DUPLICATE_ID \
- ERROR_CLASS_GENERIC_ERROR, "Duplicate ID '%s' for %s"
-
#define QERR_FD_NOT_FOUND \
ERROR_CLASS_GENERIC_ERROR, "File descriptor named '%s' not found"
@@ -131,15 +94,9 @@ void qerror_report_err(Error *err);
#define QERR_INVALID_BLOCK_FORMAT \
ERROR_CLASS_GENERIC_ERROR, "Invalid block format '%s'"
-#define QERR_INVALID_OPTION_GROUP \
- ERROR_CLASS_GENERIC_ERROR, "There is no option group '%s'"
-
#define QERR_INVALID_PARAMETER \
ERROR_CLASS_GENERIC_ERROR, "Invalid parameter '%s'"
-#define QERR_INVALID_PARAMETER_COMBINATION \
- ERROR_CLASS_GENERIC_ERROR, "Invalid parameter combination"
-
#define QERR_INVALID_PARAMETER_TYPE \
ERROR_CLASS_GENERIC_ERROR, "Invalid parameter type for '%s', expected: %s"
@@ -152,9 +109,6 @@ void qerror_report_err(Error *err);
#define QERR_IO_ERROR \
ERROR_CLASS_GENERIC_ERROR, "An IO error has occurred"
-#define QERR_JSON_PARSE_ERROR \
- ERROR_CLASS_GENERIC_ERROR, "JSON parse error, %s"
-
#define QERR_JSON_PARSING \
ERROR_CLASS_GENERIC_ERROR, "Invalid JSON syntax"
@@ -164,45 +118,21 @@ void qerror_report_err(Error *err);
#define QERR_MIGRATION_ACTIVE \
ERROR_CLASS_GENERIC_ERROR, "There's a migration process in progress"
-#define QERR_MIGRATION_NOT_SUPPORTED \
- ERROR_CLASS_GENERIC_ERROR, "State blocked by non-migratable device '%s'"
-
#define QERR_MISSING_PARAMETER \
ERROR_CLASS_GENERIC_ERROR, "Parameter '%s' is missing"
-#define QERR_NO_BUS_FOR_DEVICE \
- ERROR_CLASS_GENERIC_ERROR, "No '%s' bus found for device '%s'"
-
-#define QERR_NOT_SUPPORTED \
- ERROR_CLASS_GENERIC_ERROR, "Not supported"
-
#define QERR_PERMISSION_DENIED \
ERROR_CLASS_GENERIC_ERROR, "Insufficient permission to perform this operation"
-#define QERR_PROPERTY_NOT_FOUND \
- ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' not found"
-
#define QERR_PROPERTY_VALUE_BAD \
ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' doesn't take value '%s'"
-#define QERR_PROPERTY_VALUE_IN_USE \
- ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' can't take value '%s', it's in use"
-
-#define QERR_PROPERTY_VALUE_NOT_FOUND \
- ERROR_CLASS_GENERIC_ERROR, "Property '%s.%s' can't find value '%s'"
-
-#define QERR_PROPERTY_VALUE_NOT_POWER_OF_2 \
- ERROR_CLASS_GENERIC_ERROR, "Property %s.%s doesn't take value '%" PRId64 "', it's not a power of 2"
-
#define QERR_PROPERTY_VALUE_OUT_OF_RANGE \
ERROR_CLASS_GENERIC_ERROR, "Property %s.%s doesn't take value %" PRId64 " (minimum: %" PRId64 ", maximum: %" PRId64 ")"
#define QERR_QGA_COMMAND_FAILED \
ERROR_CLASS_GENERIC_ERROR, "Guest agent command failed, error was '%s'"
-#define QERR_QGA_LOGGING_FAILED \
- ERROR_CLASS_GENERIC_ERROR, "Guest agent failed to log non-optional log statement"
-
#define QERR_QMP_BAD_INPUT_OBJECT \
ERROR_CLASS_GENERIC_ERROR, "Expected '%s' in QMP input"
@@ -212,15 +142,9 @@ void qerror_report_err(Error *err);
#define QERR_QMP_EXTRA_MEMBER \
ERROR_CLASS_GENERIC_ERROR, "QMP input object member '%s' is unexpected"
-#define QERR_RESET_REQUIRED \
- ERROR_CLASS_GENERIC_ERROR, "Resetting the Virtual Machine is required"
-
#define QERR_SET_PASSWD_FAILED \
ERROR_CLASS_GENERIC_ERROR, "Could not set password"
-#define QERR_TOO_MANY_FILES \
- ERROR_CLASS_GENERIC_ERROR, "Too many open files"
-
#define QERR_UNDEFINED_ERROR \
ERROR_CLASS_GENERIC_ERROR, "An undefined error has occurred"
@@ -230,9 +154,6 @@ void qerror_report_err(Error *err);
#define QERR_UNSUPPORTED \
ERROR_CLASS_GENERIC_ERROR, "this feature or command is not currently supported"
-#define QERR_VIRTFS_FEATURE_BLOCKS_MIGRATION \
- ERROR_CLASS_GENERIC_ERROR, "Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'"
-
#define QERR_SOCKET_CONNECT_FAILED \
ERROR_CLASS_GENERIC_ERROR, "Failed to connect to socket"
diff --git a/include/qemu-common.h b/include/qemu-common.h
index c8a58a873a..a998e8d36c 100644
--- a/include/qemu-common.h
+++ b/include/qemu-common.h
@@ -472,4 +472,6 @@ size_t buffer_find_nonzero_offset(const void *buf, size_t len);
*/
int parse_debug_env(const char *name, int max, int initial);
+const char *qemu_ether_ntoa(const MACAddr *mac);
+
#endif
diff --git a/include/qemu-io.h b/include/qemu-io.h
index 7e7c07c09b..5d6006f73b 100644
--- a/include/qemu-io.h
+++ b/include/qemu-io.h
@@ -38,6 +38,8 @@ typedef struct cmdinfo {
helpfunc_t help;
} cmdinfo_t;
+extern bool qemuio_misalign;
+
bool qemuio_command(BlockDriverState *bs, const char *cmd);
void qemuio_add_command(const cmdinfo_t *ci);
diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h
index 0cb7c05554..0f9c6cf15d 100644
--- a/include/qemu/bswap.h
+++ b/include/qemu/bswap.h
@@ -11,6 +11,8 @@
# include <sys/endian.h>
# include <sys/types.h>
# include <machine/bswap.h>
+#elif defined(__FreeBSD__)
+# include <sys/endian.h>
#elif defined(CONFIG_BYTESWAP_H)
# include <byteswap.h>
diff --git a/include/qemu/config-file.h b/include/qemu/config-file.h
index dbd97c4bdb..d4ba20e049 100644
--- a/include/qemu/config-file.h
+++ b/include/qemu/config-file.h
@@ -8,6 +8,8 @@
QemuOptsList *qemu_find_opts(const char *group);
QemuOptsList *qemu_find_opts_err(const char *group, Error **errp);
+QemuOpts *qemu_find_opts_singleton(const char *group);
+
void qemu_add_opts(QemuOptsList *list);
void qemu_add_drive_opts(QemuOptsList *list);
int qemu_set_option(const char *str);
diff --git a/include/qemu/error-report.h b/include/qemu/error-report.h
index 3b098a9173..000eae3957 100644
--- a/include/qemu/error-report.h
+++ b/include/qemu/error-report.h
@@ -37,7 +37,6 @@ void loc_set_file(const char *fname, int lno);
void error_vprintf(const char *fmt, va_list ap) GCC_FMT_ATTR(1, 0);
void error_printf(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
void error_printf_unless_qmp(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
-void error_print_loc(void);
void error_set_progname(const char *argv0);
void error_report(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
const char *error_get_progname(void);
diff --git a/include/qemu/int128.h b/include/qemu/int128.h
index 9ed47aafd3..f59703143a 100644
--- a/include/qemu/int128.h
+++ b/include/qemu/int128.h
@@ -53,7 +53,7 @@ static inline Int128 int128_rshift(Int128 a, int n)
if (n >= 64) {
return (Int128) { h, h >> 63 };
} else {
- return (Int128) { (a.lo >> n) | (a.hi << (64 - n)), h };
+ return (Int128) { (a.lo >> n) | ((uint64_t)a.hi << (64 - n)), h };
}
}
@@ -78,7 +78,7 @@ static inline Int128 int128_neg(Int128 a)
static inline Int128 int128_sub(Int128 a, Int128 b)
{
- return (Int128){ a.lo - b.lo, a.hi - b.hi - (a.lo < b.lo) };
+ return (Int128){ a.lo - b.lo, (uint64_t)a.hi - b.hi - (a.lo < b.lo) };
}
static inline bool int128_nonneg(Int128 a)
diff --git a/include/qemu/rfifolock.h b/include/qemu/rfifolock.h
new file mode 100644
index 0000000000..b23ab538a6
--- /dev/null
+++ b/include/qemu/rfifolock.h
@@ -0,0 +1,54 @@
+/*
+ * Recursive FIFO lock
+ *
+ * Copyright Red Hat, Inc. 2013
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_RFIFOLOCK_H
+#define QEMU_RFIFOLOCK_H
+
+#include "qemu/thread.h"
+
+/* Recursive FIFO lock
+ *
+ * This lock provides more features than a plain mutex:
+ *
+ * 1. Fairness - enforces FIFO order.
+ * 2. Nesting - can be taken recursively.
+ * 3. Contention callback - optional, called when thread must wait.
+ *
+ * The recursive FIFO lock is heavyweight so prefer other synchronization
+ * primitives if you do not need its features.
+ */
+typedef struct {
+ QemuMutex lock; /* protects all fields */
+
+ /* FIFO order */
+ unsigned int head; /* active ticket number */
+ unsigned int tail; /* waiting ticket number */
+ QemuCond cond; /* used to wait for our ticket number */
+
+ /* Nesting */
+ QemuThread owner_thread; /* thread that currently has ownership */
+ unsigned int nesting; /* amount of nesting levels */
+
+ /* Contention callback */
+ void (*cb)(void *); /* called when thread must wait, with ->lock
+ * held so it may not recursively lock/unlock
+ */
+ void *cb_opaque;
+} RFifoLock;
+
+void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque);
+void rfifolock_destroy(RFifoLock *r);
+void rfifolock_lock(RFifoLock *r);
+void rfifolock_unlock(RFifoLock *r);
+
+#endif /* QEMU_RFIFOLOCK_H */
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index 83c9b1675d..bf8daac659 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -30,6 +30,7 @@ typedef struct MemoryListener MemoryListener;
typedef struct MemoryMappingList MemoryMappingList;
+typedef struct QEMUMachine QEMUMachine;
typedef struct NICInfo NICInfo;
typedef struct HCIInfo HCIInfo;
typedef struct AudioState AudioState;
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index d734be8a40..df977c88f0 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -21,6 +21,7 @@
#define QEMU_CPU_H
#include <signal.h>
+#include <setjmp.h>
#include "hw/qdev-core.h"
#include "exec/hwaddr.h"
#include "qemu/queue.h"
@@ -52,7 +53,12 @@ typedef uint64_t vaddr;
#define TYPE_CPU "cpu"
-#define CPU(obj) OBJECT_CHECK(CPUState, (obj), TYPE_CPU)
+/* Since this macro is used a lot in hot code paths and in conjunction with
+ * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
+ * an unchecked cast.
+ */
+#define CPU(obj) ((CPUState *)(obj))
+
#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
@@ -68,8 +74,10 @@ struct TranslationBlock;
* CPUClass:
* @class_by_name: Callback to map -cpu command line model name to an
* instantiatable CPU type.
+ * @parse_features: Callback to parse command line arguments.
* @reset: Callback to reset the #CPUState to its initial state.
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
+ * @has_work: Callback for checking if there is work to do.
* @do_interrupt: Callback for interrupt handling.
* @do_unassigned_access: Callback for unassigned access handling.
* @memory_rw_debug: Callback for GDB memory access.
@@ -81,6 +89,7 @@ struct TranslationBlock;
* @set_pc: Callback for setting the Program Counter register.
* @synchronize_from_tb: Callback for synchronizing state from a TCG
* #TranslationBlock.
+ * @handle_mmu_fault: Callback for handling an MMU fault.
* @get_phys_page_debug: Callback for obtaining a physical address.
* @gdb_read_register: Callback for letting GDB read a register.
* @gdb_write_register: Callback for letting GDB write a register.
@@ -96,9 +105,11 @@ typedef struct CPUClass {
/*< public >*/
ObjectClass *(*class_by_name)(const char *cpu_model);
+ void (*parse_features)(CPUState *cpu, char *str, Error **errp);
void (*reset)(CPUState *cpu);
int reset_dump_flags;
+ bool (*has_work)(CPUState *cpu);
void (*do_interrupt)(CPUState *cpu);
CPUUnassignedAccess do_unassigned_access;
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
@@ -113,6 +124,8 @@ typedef struct CPUClass {
Error **errp);
void (*set_pc)(CPUState *cpu, vaddr value);
void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
+ int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
+ int mmu_index);
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
@@ -131,9 +144,37 @@ typedef struct CPUClass {
const char *gdb_core_xml_file;
} CPUClass;
+#ifdef HOST_WORDS_BIGENDIAN
+typedef struct icount_decr_u16 {
+ uint16_t high;
+ uint16_t low;
+} icount_decr_u16;
+#else
+typedef struct icount_decr_u16 {
+ uint16_t low;
+ uint16_t high;
+} icount_decr_u16;
+#endif
+
+typedef struct CPUBreakpoint {
+ vaddr pc;
+ int flags; /* BP_* */
+ QTAILQ_ENTRY(CPUBreakpoint) entry;
+} CPUBreakpoint;
+
+typedef struct CPUWatchpoint {
+ vaddr vaddr;
+ vaddr len_mask;
+ int flags; /* BP_* */
+ QTAILQ_ENTRY(CPUWatchpoint) entry;
+} CPUWatchpoint;
+
struct KVMState;
struct kvm_run;
+#define TB_JMP_CACHE_BITS 12
+#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
+
/**
* CPUState:
* @cpu_index: CPU index (informative).
@@ -150,12 +191,20 @@ struct kvm_run;
* @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
* CPU and return to its top level loop.
* @singlestep_enabled: Flags for single-stepping.
+ * @icount_extra: Instructions until next timer event.
+ * @icount_decr: Number of cycles left, with interrupt flag in high bit.
+ * This allows a single read-compare-cbranch-write sequence to test
+ * for both decrementer underflow and exceptions.
+ * @can_do_io: Nonzero if memory-mapped IO is safe.
* @env_ptr: Pointer to subclass-specific CPUArchState field.
* @current_tb: Currently executing TB.
* @gdb_regs: Additional GDB registers.
* @gdb_num_regs: Number of total registers accessible to GDB.
* @gdb_num_g_regs: Number of registers in GDB 'g' packets.
* @next_cpu: Next CPU sharing TB cache.
+ * @opaque: User data.
+ * @mem_io_pc: Host Program Counter at which the memory was accessed.
+ * @mem_io_vaddr: Target virtual address at which the memory was accessed.
* @kvm_fd: vCPU file descriptor for KVM.
*
* State of one CPU core or thread.
@@ -183,20 +232,36 @@ struct CPUState {
bool stop;
bool stopped;
volatile sig_atomic_t exit_request;
- volatile sig_atomic_t tcg_exit_req;
uint32_t interrupt_request;
int singlestep_enabled;
+ int64_t icount_extra;
+ sigjmp_buf jmp_env;
AddressSpace *as;
MemoryListener *tcg_as_listener;
void *env_ptr; /* CPUArchState */
struct TranslationBlock *current_tb;
+ struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
struct GDBRegisterState *gdb_regs;
int gdb_num_regs;
int gdb_num_g_regs;
QTAILQ_ENTRY(CPUState) node;
+ /* ice debug support */
+ QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
+
+ QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
+ CPUWatchpoint *watchpoint_hit;
+
+ void *opaque;
+
+ /* In order to avoid passing too many arguments to the MMIO helpers,
+ * we store some rarely used information in the CPU context.
+ */
+ uintptr_t mem_io_pc;
+ vaddr mem_io_vaddr;
+
int kvm_fd;
bool kvm_vcpu_dirty;
struct KVMState *kvm_state;
@@ -205,6 +270,18 @@ struct CPUState {
/* TODO Move common fields from CPUArchState here. */
int cpu_index; /* used by alpha TCG */
uint32_t halted; /* used by alpha, cris, ppc TCG */
+ union {
+ uint32_t u32;
+ icount_decr_u16 u16;
+ } icount_decr;
+ uint32_t can_do_io;
+ int32_t exception_index; /* used by m68k TCG */
+
+ /* Note that this is accessed at the start of every TB via a negative
+ offset from AREG0. Leave this field at the end so as to make the
+ (absolute value) offset as small as possible. This reduces code
+ size, especially for hosts without large memory offsets. */
+ volatile sig_atomic_t tcg_exit_req;
};
QTAILQ_HEAD(CPUTailQ, CPUState);
@@ -348,14 +425,31 @@ void cpu_reset(CPUState *cpu);
ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
/**
- * qemu_cpu_has_work:
+ * cpu_generic_init:
+ * @typename: The CPU base type.
+ * @cpu_model: The model string including optional parameters.
+ *
+ * Instantiates a CPU, processes optional parameters and realizes the CPU.
+ *
+ * Returns: A #CPUState or %NULL if an error occurred.
+ */
+CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
+
+/**
+ * cpu_has_work:
* @cpu: The vCPU to check.
*
* Checks whether the CPU has work to do.
*
* Returns: %true if the CPU has work, %false otherwise.
*/
-bool qemu_cpu_has_work(CPUState *cpu);
+static inline bool cpu_has_work(CPUState *cpu)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
+ g_assert(cc->has_work);
+ return cc->has_work(cpu);
+}
/**
* qemu_cpu_is_self:
@@ -511,6 +605,31 @@ void qemu_init_vcpu(CPUState *cpu);
*/
void cpu_single_step(CPUState *cpu, int enabled);
+/* Breakpoint/watchpoint flags */
+#define BP_MEM_READ 0x01
+#define BP_MEM_WRITE 0x02
+#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
+#define BP_STOP_BEFORE_ACCESS 0x04
+#define BP_WATCHPOINT_HIT 0x08
+#define BP_GDB 0x10
+#define BP_CPU 0x20
+
+int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
+ CPUBreakpoint **breakpoint);
+int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
+void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
+void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
+
+int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
+ int flags, CPUWatchpoint **watchpoint);
+int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
+ vaddr len, int flags);
+void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
+void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
+
+void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
+ GCC_FMT_ATTR(2, 3);
+
#ifdef CONFIG_SOFTMMU
extern const struct VMStateDescription vmstate_cpu_common;
#else
diff --git a/include/qom/object.h b/include/qom/object.h
index 9c7c361d30..a641dcde10 100644
--- a/include/qom/object.h
+++ b/include/qom/object.h
@@ -974,6 +974,14 @@ const char *object_property_get_type(Object *obj, const char *name,
Object *object_get_root(void);
/**
+ * object_get_canonical_path_component:
+ *
+ * Returns: The final component in the object's canonical path. The canonical
+ * path is the path within the composition tree starting from the root.
+ */
+gchar *object_get_canonical_path_component(Object *obj);
+
+/**
* object_get_canonical_path:
*
* Returns: The canonical path for a object. This is the path within the
@@ -1059,12 +1067,29 @@ Object *object_resolve_path_component(Object *parent, const gchar *part);
void object_property_add_child(Object *obj, const char *name,
Object *child, Error **errp);
+typedef enum {
+ /* Unref the link pointer when the property is deleted */
+ OBJ_PROP_LINK_UNREF_ON_RELEASE = 0x1,
+} ObjectPropertyLinkFlags;
+
+/**
+ * object_property_allow_set_link:
+ *
+ * The default implementation of the object_property_add_link() check()
+ * callback function. It allows the link property to be set and never returns
+ * an error.
+ */
+void object_property_allow_set_link(Object *, const char *,
+ Object *, Error **);
+
/**
* object_property_add_link:
* @obj: the object to add a property to
* @name: the name of the property
* @type: the qobj type of the link
* @child: a pointer to where the link object reference is stored
+ * @check: callback to veto setting or NULL if the property is read-only
+ * @flags: additional options for the link
* @errp: if an error occurs, a pointer to an area to store the area
*
* Links establish relationships between objects. Links are unidirectional
@@ -1073,13 +1098,23 @@ void object_property_add_child(Object *obj, const char *name,
*
* Links form the graph in the object model.
*
+ * The <code>@check()</code> callback is invoked when
+ * object_property_set_link() is called and can raise an error to prevent the
+ * link being set. If <code>@check</code> is NULL, the property is read-only
+ * and cannot be set.
+ *
* Ownership of the pointer that @child points to is transferred to the
* link property. The reference count for <code>*@child</code> is
* managed by the property from after the function returns till the
- * property is deleted with object_property_del().
+ * property is deleted with object_property_del(). If the
+ * <code>@flags</code> <code>OBJ_PROP_LINK_UNREF_ON_RELEASE</code> bit is set,
+ * the reference count is decremented when the property is deleted.
*/
void object_property_add_link(Object *obj, const char *name,
const char *type, Object **child,
+ void (*check)(Object *obj, const char *name,
+ Object *val, Error **errp),
+ ObjectPropertyLinkFlags flags,
Error **errp);
/**
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
new file mode 100644
index 0000000000..7c01a61d5e
--- /dev/null
+++ b/include/sysemu/iothread.h
@@ -0,0 +1,40 @@
+/*
+ * Event loop thread
+ *
+ * Copyright Red Hat Inc., 2013
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef IOTHREAD_H
+#define IOTHREAD_H
+
+#include "block/aio.h"
+#include "qemu/thread.h"
+
+#define TYPE_IOTHREAD "iothread"
+
+typedef struct {
+ Object parent_obj;
+
+ QemuThread thread;
+ AioContext *ctx;
+ QemuMutex init_done_lock;
+ QemuCond init_done_cond; /* is thread initialization done? */
+ bool stopping;
+ int thread_id;
+} IOThread;
+
+#define IOTHREAD(obj) \
+ OBJECT_CHECK(IOThread, obj, TYPE_IOTHREAD)
+
+IOThread *iothread_find(const char *id);
+char *iothread_get_id(IOThread *iothread);
+AioContext *iothread_get_aio_context(IOThread *iothread);
+
+#endif /* IOTHREAD_H */
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index ed01998aa8..0bee1e8996 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -18,7 +18,6 @@
#include "config-host.h"
#include "qemu/queue.h"
#include "qom/cpu.h"
-#include "sysemu/qemumachine.h"
#ifdef CONFIG_KVM
#include <linux/kvm.h>
diff --git a/include/sysemu/qemumachine.h b/include/sysemu/qemumachine.h
deleted file mode 100644
index 4cefd56b67..0000000000
--- a/include/sysemu/qemumachine.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * QEMU Machine typedef
- *
- * Copyright Alexander Graf <agraf@suse.de>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#ifndef QEMUMACHINE_H
-#define QEMUMACHINE_H
-
-typedef struct QEMUMachine QEMUMachine;
-
-#endif /* !QEMUMACHINE_H */
diff --git a/include/sysemu/qtest.h b/include/sysemu/qtest.h
index e62281d4bf..224131f298 100644
--- a/include/sysemu/qtest.h
+++ b/include/sysemu/qtest.h
@@ -16,7 +16,6 @@
#include "qemu-common.h"
#include "qapi/error.h"
-#include "sysemu/qemumachine.h"
extern bool qtest_allowed;
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index b90df9ada1..ba5c7f8093 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -104,7 +104,7 @@ extern int autostart;
typedef enum {
VGA_NONE, VGA_STD, VGA_CIRRUS, VGA_VMWARE, VGA_XENFB, VGA_QXL,
- VGA_TCX, VGA_CG3,
+ VGA_TCX, VGA_CG3, VGA_DEVICE
} VGAInterfaceType;
extern int vga_interface_type;
@@ -133,7 +133,14 @@ extern uint8_t qemu_extra_params_fw[2];
extern QEMUClockType rtc_clock;
#define MAX_NODES 64
+
+/* The following shall be true for all CPUs:
+ * cpu->cpu_index < max_cpus <= MAX_CPUMASK_BITS
+ *
+ * Note that cpu->get_arch_id() may be larger than MAX_CPUMASK_BITS.
+ */
#define MAX_CPUMASK_BITS 255
+
extern int nb_numa_nodes;
extern uint64_t node_mem[MAX_NODES];
extern unsigned long *node_cpumask[MAX_NODES];
@@ -186,7 +193,7 @@ void rtc_change_mon_event(struct tm *tm);
void add_boot_device_path(int32_t bootindex, DeviceState *dev,
const char *suffix);
-char *get_boot_devices_list(size_t *size);
+char *get_boot_devices_list(size_t *size, bool ignore_suffixes);
DeviceState *get_boot_device(uint32_t position);
diff --git a/include/ui/console.h b/include/ui/console.h
index 08a38eab13..8a866176db 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -345,6 +345,6 @@ int index_from_key(const char *key);
/* gtk.c */
void early_gtk_display_init(void);
-void gtk_display_init(DisplayState *ds, bool full_screen);
+void gtk_display_init(DisplayState *ds, bool full_screen, bool grab_on_hover);
#endif
diff --git a/iothread.c b/iothread.c
new file mode 100644
index 0000000000..1fbf9f1c49
--- /dev/null
+++ b/iothread.c
@@ -0,0 +1,167 @@
+/*
+ * Event loop thread
+ *
+ * Copyright Red Hat Inc., 2013
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qom/object.h"
+#include "qom/object_interfaces.h"
+#include "qemu/module.h"
+#include "block/aio.h"
+#include "sysemu/iothread.h"
+#include "qmp-commands.h"
+
+#define IOTHREADS_PATH "/objects"
+
+typedef ObjectClass IOThreadClass;
+
+#define IOTHREAD_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(IOThreadClass, obj, TYPE_IOTHREAD)
+#define IOTHREAD_CLASS(klass) \
+ OBJECT_CLASS_CHECK(IOThreadClass, klass, TYPE_IOTHREAD)
+
+static void *iothread_run(void *opaque)
+{
+ IOThread *iothread = opaque;
+
+ qemu_mutex_lock(&iothread->init_done_lock);
+ iothread->thread_id = qemu_get_thread_id();
+ qemu_cond_signal(&iothread->init_done_cond);
+ qemu_mutex_unlock(&iothread->init_done_lock);
+
+ while (!iothread->stopping) {
+ aio_context_acquire(iothread->ctx);
+ while (!iothread->stopping && aio_poll(iothread->ctx, true)) {
+ /* Progress was made, keep going */
+ }
+ aio_context_release(iothread->ctx);
+ }
+ return NULL;
+}
+
+static void iothread_instance_finalize(Object *obj)
+{
+ IOThread *iothread = IOTHREAD(obj);
+
+ iothread->stopping = true;
+ aio_notify(iothread->ctx);
+ qemu_thread_join(&iothread->thread);
+ qemu_cond_destroy(&iothread->init_done_cond);
+ qemu_mutex_destroy(&iothread->init_done_lock);
+ aio_context_unref(iothread->ctx);
+}
+
+static void iothread_complete(UserCreatable *obj, Error **errp)
+{
+ IOThread *iothread = IOTHREAD(obj);
+
+ iothread->stopping = false;
+ iothread->ctx = aio_context_new();
+ iothread->thread_id = -1;
+
+ qemu_mutex_init(&iothread->init_done_lock);
+ qemu_cond_init(&iothread->init_done_cond);
+
+ /* This assumes we are called from a thread with useful CPU affinity for us
+ * to inherit.
+ */
+ qemu_thread_create(&iothread->thread, "iothread", iothread_run,
+ iothread, QEMU_THREAD_JOINABLE);
+
+ /* Wait for initialization to complete */
+ qemu_mutex_lock(&iothread->init_done_lock);
+ while (iothread->thread_id == -1) {
+ qemu_cond_wait(&iothread->init_done_cond,
+ &iothread->init_done_lock);
+ }
+ qemu_mutex_unlock(&iothread->init_done_lock);
+}
+
+static void iothread_class_init(ObjectClass *klass, void *class_data)
+{
+ UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
+ ucc->complete = iothread_complete;
+}
+
+static const TypeInfo iothread_info = {
+ .name = TYPE_IOTHREAD,
+ .parent = TYPE_OBJECT,
+ .class_init = iothread_class_init,
+ .instance_size = sizeof(IOThread),
+ .instance_finalize = iothread_instance_finalize,
+ .interfaces = (InterfaceInfo[]) {
+ {TYPE_USER_CREATABLE},
+ {}
+ },
+};
+
+static void iothread_register_types(void)
+{
+ type_register_static(&iothread_info);
+}
+
+type_init(iothread_register_types)
+
+IOThread *iothread_find(const char *id)
+{
+ Object *container = container_get(object_get_root(), IOTHREADS_PATH);
+ Object *child;
+
+ child = object_property_get_link(container, id, NULL);
+ if (!child) {
+ return NULL;
+ }
+ return (IOThread *)object_dynamic_cast(child, TYPE_IOTHREAD);
+}
+
+char *iothread_get_id(IOThread *iothread)
+{
+ return object_get_canonical_path_component(OBJECT(iothread));
+}
+
+AioContext *iothread_get_aio_context(IOThread *iothread)
+{
+ return iothread->ctx;
+}
+
+static int query_one_iothread(Object *object, void *opaque)
+{
+ IOThreadInfoList ***prev = opaque;
+ IOThreadInfoList *elem;
+ IOThreadInfo *info;
+ IOThread *iothread;
+
+ iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD);
+ if (!iothread) {
+ return 0;
+ }
+
+ info = g_new0(IOThreadInfo, 1);
+ info->id = iothread_get_id(iothread);
+ info->thread_id = iothread->thread_id;
+
+ elem = g_new0(IOThreadInfoList, 1);
+ elem->value = info;
+ elem->next = NULL;
+
+ **prev = elem;
+ *prev = &elem->next;
+ return 0;
+}
+
+IOThreadInfoList *qmp_query_iothreads(Error **errp)
+{
+ IOThreadInfoList *head = NULL;
+ IOThreadInfoList **prev = &head;
+ Object *container = container_get(object_get_root(), IOTHREADS_PATH);
+
+ object_child_foreach(container, query_one_iothread, &prev);
+ return head;
+}
diff --git a/kvm-stub.c b/kvm-stub.c
index 4ef084e84a..ccdba62d67 100644
--- a/kvm-stub.c
+++ b/kvm-stub.c
@@ -14,7 +14,6 @@
#include "hw/hw.h"
#include "cpu.h"
#include "sysemu/kvm.h"
-#include "sysemu/qemumachine.h"
#ifndef CONFIG_USER_ONLY
#include "hw/pci/msi.h"
diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h
index cb4c1eb8a0..c003c6a73b 100644
--- a/linux-headers/asm-s390/kvm.h
+++ b/linux-headers/asm-s390/kvm.h
@@ -22,6 +22,8 @@
#define KVM_DEV_FLIC_CLEAR_IRQS 3
#define KVM_DEV_FLIC_APF_ENABLE 4
#define KVM_DEV_FLIC_APF_DISABLE_WAIT 5
+#define KVM_DEV_FLIC_ADAPTER_REGISTER 6
+#define KVM_DEV_FLIC_ADAPTER_MODIFY 7
/*
* We can have up to 4*64k pending subchannels + 8 adapter interrupts,
* as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
@@ -32,6 +34,26 @@
#define KVM_S390_MAX_FLOAT_IRQS 266250
#define KVM_S390_FLIC_MAX_BUFFER 0x2000000
+struct kvm_s390_io_adapter {
+ __u32 id;
+ __u8 isc;
+ __u8 maskable;
+ __u8 swap;
+ __u8 pad;
+};
+
+#define KVM_S390_IO_ADAPTER_MASK 1
+#define KVM_S390_IO_ADAPTER_MAP 2
+#define KVM_S390_IO_ADAPTER_UNMAP 3
+
+struct kvm_s390_io_adapter_req {
+ __u32 id;
+ __u8 type;
+ __u8 mask;
+ __u16 pad0;
+ __u64 addr;
+};
+
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
/* general purpose regs for s390 */
@@ -76,4 +98,6 @@ struct kvm_sync_regs {
#define KVM_REG_S390_PFTOKEN (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x5)
#define KVM_REG_S390_PFCOMPARE (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x6)
#define KVM_REG_S390_PFSELECT (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x7)
+#define KVM_REG_S390_PP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x8)
+#define KVM_REG_S390_GBEA (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x9)
#endif
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
index e27a4b33cf..b278ab3326 100644
--- a/linux-headers/linux/kvm.h
+++ b/linux-headers/linux/kvm.h
@@ -740,6 +740,9 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_SPAPR_MULTITCE 94
#define KVM_CAP_EXT_EMUL_CPUID 95
#define KVM_CAP_HYPERV_TIME 96
+#define KVM_CAP_IOAPIC_POLARITY_IGNORED 97
+#define KVM_CAP_ENABLE_CAP_VM 98
+#define KVM_CAP_S390_IRQCHIP 99
#ifdef KVM_CAP_IRQ_ROUTING
@@ -755,9 +758,18 @@ struct kvm_irq_routing_msi {
__u32 pad;
};
+struct kvm_irq_routing_s390_adapter {
+ __u64 ind_addr;
+ __u64 summary_addr;
+ __u64 ind_offset;
+ __u32 summary_offset;
+ __u32 adapter_id;
+};
+
/* gsi routing entry types */
#define KVM_IRQ_ROUTING_IRQCHIP 1
#define KVM_IRQ_ROUTING_MSI 2
+#define KVM_IRQ_ROUTING_S390_ADAPTER 3
struct kvm_irq_routing_entry {
__u32 gsi;
@@ -767,6 +779,7 @@ struct kvm_irq_routing_entry {
union {
struct kvm_irq_routing_irqchip irqchip;
struct kvm_irq_routing_msi msi;
+ struct kvm_irq_routing_s390_adapter adapter;
__u32 pad[8];
} u;
};
@@ -1075,6 +1088,10 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_DEBUGREGS */
#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
+/*
+ * vcpu version available with KVM_ENABLE_CAP
+ * vm version available with KVM_CAP_ENABLE_CAP_VM
+ */
#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
/* Available with KVM_CAP_XSAVE */
#define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave)
diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h
index 17c58e0ede..26c218e692 100644
--- a/linux-headers/linux/vfio.h
+++ b/linux-headers/linux/vfio.h
@@ -23,6 +23,12 @@
#define VFIO_TYPE1_IOMMU 1
#define VFIO_SPAPR_TCE_IOMMU 2
+#define VFIO_TYPE1v2_IOMMU 3
+/*
+ * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping). This
+ * capability is subject to change as groups are added or removed.
+ */
+#define VFIO_DMA_CC_IOMMU 4
/*
* The IOCTL interface is designed for extensibility by embedding the
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index c0687e3b38..d2380b6ccb 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -352,6 +352,9 @@ enum
ARM_HWCAP_ARM_VFPv3D16 = 1 << 13,
};
+#ifndef TARGET_AARCH64
+/* The commpage only exists for 32 bit kernels */
+
#define TARGET_HAS_VALIDATE_GUEST_SPACE
/* Return 1 if the proposed guest space is suitable for the guest.
* Return 0 if the proposed guest space isn't suitable, but another
@@ -411,7 +414,7 @@ static int validate_guest_space(unsigned long guest_base,
return 1; /* All good */
}
-
+#endif
#define ELF_HWCAP get_elf_hwcap()
@@ -1073,7 +1076,7 @@ struct exec
#define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
-#define DLINFO_ITEMS 13
+#define DLINFO_ITEMS 14
static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
{
@@ -2621,7 +2624,8 @@ static int write_note(struct memelfnote *men, int fd)
static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
{
- TaskState *ts = (TaskState *)env->opaque;
+ CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
+ TaskState *ts = (TaskState *)cpu->opaque;
struct elf_thread_status *ets;
ets = g_malloc0(sizeof (*ets));
@@ -2650,8 +2654,8 @@ static int fill_note_info(struct elf_note_info *info,
long signr, const CPUArchState *env)
{
#define NUMNOTES 3
- CPUState *cpu = NULL;
- TaskState *ts = (TaskState *)env->opaque;
+ CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
+ TaskState *ts = (TaskState *)cpu->opaque;
int i;
info->notes = g_malloc0(NUMNOTES * sizeof (struct memelfnote));
@@ -2775,7 +2779,8 @@ static int write_note_info(struct elf_note_info *info, int fd)
*/
static int elf_core_dump(int signr, const CPUArchState *env)
{
- const TaskState *ts = (const TaskState *)env->opaque;
+ const CPUState *cpu = ENV_GET_CPU((CPUArchState *)env);
+ const TaskState *ts = (const TaskState *)cpu->opaque;
struct vm_area_struct *vma = NULL;
char corefile[PATH_MAX];
struct elf_note_info info;
diff --git a/linux-user/linuxload.c b/linux-user/linuxload.c
index f2997c2f4b..506e837ae1 100644
--- a/linux-user/linuxload.c
+++ b/linux-user/linuxload.c
@@ -89,8 +89,7 @@ static int prepare_binprm(struct linux_binprm *bprm)
abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp,
abi_ulong stringp, int push_ptr)
{
- CPUArchState *env = thread_cpu->env_ptr;
- TaskState *ts = (TaskState *)env->opaque;
+ TaskState *ts = (TaskState *)thread_cpu->opaque;
int n = sizeof(abi_ulong);
abi_ulong envp;
abi_ulong argv;
diff --git a/linux-user/m68k-sim.c b/linux-user/m68k-sim.c
index d5926eec4b..1994e40000 100644
--- a/linux-user/m68k-sim.c
+++ b/linux-user/m68k-sim.c
@@ -98,6 +98,7 @@ static int translate_openflags(int flags)
#define ARG(x) tswap32(args[x])
void do_m68k_simcall(CPUM68KState *env, int nr)
{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
uint32_t *args;
args = (uint32_t *)(unsigned long)(env->aregs[7] + 4);
@@ -165,6 +166,6 @@ void do_m68k_simcall(CPUM68KState *env, int nr)
check_err(env, lseek(ARG(0), (int32_t)ARG(1), ARG(2)));
break;
default:
- cpu_abort(env, "Unsupported m68k sim syscall %d\n", nr);
+ cpu_abort(CPU(cpu), "Unsupported m68k sim syscall %d\n", nr);
}
}
diff --git a/linux-user/m68k/target_cpu.h b/linux-user/m68k/target_cpu.h
index cad9c90dd0..bb4d3fabe1 100644
--- a/linux-user/m68k/target_cpu.h
+++ b/linux-user/m68k/target_cpu.h
@@ -31,7 +31,9 @@ static inline void cpu_clone_regs(CPUM68KState *env, target_ulong newsp)
static inline void cpu_set_tls(CPUM68KState *env, target_ulong newtls)
{
- TaskState *ts = env->opaque;
+ CPUState *cs = CPU(m68k_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
+
ts->tp_value = newtls;
}
diff --git a/linux-user/main.c b/linux-user/main.c
index dee10841c3..947358a886 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -483,17 +483,17 @@ static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
addr = env->regs[2];
if (get_user_u64(oldval, env->regs[0])) {
- env->cp15.c6_data = env->regs[0];
+ env->exception.vaddress = env->regs[0];
goto segv;
};
if (get_user_u64(newval, env->regs[1])) {
- env->cp15.c6_data = env->regs[1];
+ env->exception.vaddress = env->regs[1];
goto segv;
};
if (get_user_u64(val, addr)) {
- env->cp15.c6_data = addr;
+ env->exception.vaddress = addr;
goto segv;
}
@@ -501,7 +501,7 @@ static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
val = newval;
if (put_user_u64(val, addr)) {
- env->cp15.c6_data = addr;
+ env->exception.vaddress = addr;
goto segv;
};
@@ -523,7 +523,7 @@ segv:
info.si_errno = 0;
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
- info._sifields._sigfault._addr = env->cp15.c6_data;
+ info._sifields._sigfault._addr = env->exception.vaddress;
queue_signal(env, info.si_signo, &info);
end_exclusive();
@@ -620,14 +620,14 @@ static int do_strex(CPUARMState *env)
abort();
}
if (segv) {
- env->cp15.c6_data = addr;
+ env->exception.vaddress = addr;
goto done;
}
if (size == 3) {
uint32_t valhi;
segv = get_user_u32(valhi, addr + 4);
if (segv) {
- env->cp15.c6_data = addr + 4;
+ env->exception.vaddress = addr + 4;
goto done;
}
val = deposit64(val, 32, 32, valhi);
@@ -650,14 +650,14 @@ static int do_strex(CPUARMState *env)
break;
}
if (segv) {
- env->cp15.c6_data = addr;
+ env->exception.vaddress = addr;
goto done;
}
if (size == 3) {
val = env->regs[(env->exclusive_info >> 12) & 0xf];
segv = put_user_u32(val, addr + 4);
if (segv) {
- env->cp15.c6_data = addr + 4;
+ env->exception.vaddress = addr + 4;
goto done;
}
}
@@ -685,7 +685,7 @@ void cpu_loop(CPUARMState *env)
switch(trapnr) {
case EXCP_UDEF:
{
- TaskState *ts = env->opaque;
+ TaskState *ts = cs->opaque;
uint32_t opcode;
int rc;
@@ -832,12 +832,14 @@ void cpu_loop(CPUARMState *env)
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
+ case EXCP_STREX:
+ if (!do_strex(env)) {
+ break;
+ }
+ /* fall through for segv */
case EXCP_PREFETCH_ABORT:
- addr = env->cp15.c6_insn;
- goto do_segv;
case EXCP_DATA_ABORT:
- addr = env->cp15.c6_data;
- do_segv:
+ addr = env->exception.vaddress;
{
info.si_signo = SIGSEGV;
info.si_errno = 0;
@@ -865,12 +867,6 @@ void cpu_loop(CPUARMState *env)
if (do_kernel_trap(env))
goto error;
break;
- case EXCP_STREX:
- if (do_strex(env)) {
- addr = env->cp15.c6_data;
- goto do_segv;
- }
- break;
default:
error:
fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
@@ -933,7 +929,7 @@ static int do_strex_a64(CPUARMState *env)
abort();
}
if (segv) {
- env->cp15.c6_data = addr;
+ env->exception.vaddress = addr;
goto error;
}
if (val != env->exclusive_val) {
@@ -946,7 +942,7 @@ static int do_strex_a64(CPUARMState *env)
segv = get_user_u64(val, addr + 8);
}
if (segv) {
- env->cp15.c6_data = addr + (size == 2 ? 4 : 8);
+ env->exception.vaddress = addr + (size == 2 ? 4 : 8);
goto error;
}
if (val != env->exclusive_high) {
@@ -981,7 +977,7 @@ static int do_strex_a64(CPUARMState *env)
segv = put_user_u64(val, addr + 8);
}
if (segv) {
- env->cp15.c6_data = addr + (size == 2 ? 4 : 8);
+ env->exception.vaddress = addr + (size == 2 ? 4 : 8);
goto error;
}
}
@@ -1037,12 +1033,14 @@ void cpu_loop(CPUARMState *env)
info._sifields._sigfault._addr = env->pc;
queue_signal(env, info.si_signo, &info);
break;
+ case EXCP_STREX:
+ if (!do_strex_a64(env)) {
+ break;
+ }
+ /* fall through for segv */
case EXCP_PREFETCH_ABORT:
- addr = env->cp15.c6_insn;
- goto do_segv;
case EXCP_DATA_ABORT:
- addr = env->cp15.c6_data;
- do_segv:
+ addr = env->exception.vaddress;
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* XXX: check env->error_code */
@@ -1060,12 +1058,6 @@ void cpu_loop(CPUARMState *env)
queue_signal(env, info.si_signo, &info);
}
break;
- case EXCP_STREX:
- if (do_strex_a64(env)) {
- addr = env->cp15.c6_data;
- goto do_segv;
- }
- break;
default:
fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
trapnr);
@@ -1577,11 +1569,11 @@ void cpu_loop(CPUPPCState *env)
/* Just go on */
break;
case POWERPC_EXCP_CRITICAL: /* Critical input */
- cpu_abort(env, "Critical interrupt while in user mode. "
+ cpu_abort(cs, "Critical interrupt while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_MCHECK: /* Machine check exception */
- cpu_abort(env, "Machine check exception while in user mode. "
+ cpu_abort(cs, "Machine check exception while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_DSI: /* Data storage exception */
@@ -1645,7 +1637,7 @@ void cpu_loop(CPUPPCState *env)
queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_EXTERNAL: /* External input */
- cpu_abort(env, "External interrupt while in user mode. "
+ cpu_abort(cs, "External interrupt while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_ALIGN: /* Alignment exception */
@@ -1739,11 +1731,11 @@ void cpu_loop(CPUPPCState *env)
}
break;
case POWERPC_EXCP_TRAP:
- cpu_abort(env, "Tried to call a TRAP\n");
+ cpu_abort(cs, "Tried to call a TRAP\n");
break;
default:
/* Should not happen ! */
- cpu_abort(env, "Unknown program exception (%02x)\n",
+ cpu_abort(cs, "Unknown program exception (%02x)\n",
env->error_code);
break;
}
@@ -1759,7 +1751,7 @@ void cpu_loop(CPUPPCState *env)
queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_SYSCALL: /* System call exception */
- cpu_abort(env, "Syscall exception while in user mode. "
+ cpu_abort(cs, "Syscall exception while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
@@ -1771,23 +1763,23 @@ void cpu_loop(CPUPPCState *env)
queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_DECR: /* Decrementer exception */
- cpu_abort(env, "Decrementer interrupt while in user mode. "
+ cpu_abort(cs, "Decrementer interrupt while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
- cpu_abort(env, "Fix interval timer interrupt while in user mode. "
+ cpu_abort(cs, "Fix interval timer interrupt while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
- cpu_abort(env, "Watchdog timer interrupt while in user mode. "
+ cpu_abort(cs, "Watchdog timer interrupt while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_DTLB: /* Data TLB error */
- cpu_abort(env, "Data TLB exception while in user mode. "
+ cpu_abort(cs, "Data TLB exception while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_ITLB: /* Instruction TLB error */
- cpu_abort(env, "Instruction TLB exception while in user mode. "
+ cpu_abort(cs, "Instruction TLB exception while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */
@@ -1799,37 +1791,37 @@ void cpu_loop(CPUPPCState *env)
queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */
- cpu_abort(env, "Embedded floating-point data IRQ not handled\n");
+ cpu_abort(cs, "Embedded floating-point data IRQ not handled\n");
break;
case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */
- cpu_abort(env, "Embedded floating-point round IRQ not handled\n");
+ cpu_abort(cs, "Embedded floating-point round IRQ not handled\n");
break;
case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */
- cpu_abort(env, "Performance monitor exception not handled\n");
+ cpu_abort(cs, "Performance monitor exception not handled\n");
break;
case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
- cpu_abort(env, "Doorbell interrupt while in user mode. "
+ cpu_abort(cs, "Doorbell interrupt while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
- cpu_abort(env, "Doorbell critical interrupt while in user mode. "
+ cpu_abort(cs, "Doorbell critical interrupt while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_RESET: /* System reset exception */
- cpu_abort(env, "Reset interrupt while in user mode. "
+ cpu_abort(cs, "Reset interrupt while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_DSEG: /* Data segment exception */
- cpu_abort(env, "Data segment exception while in user mode. "
+ cpu_abort(cs, "Data segment exception while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_ISEG: /* Instruction segment exception */
- cpu_abort(env, "Instruction segment exception "
+ cpu_abort(cs, "Instruction segment exception "
"while in user mode. Aborting\n");
break;
/* PowerPC 64 with hypervisor mode support */
case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
- cpu_abort(env, "Hypervisor decrementer interrupt "
+ cpu_abort(cs, "Hypervisor decrementer interrupt "
"while in user mode. Aborting\n");
break;
case POWERPC_EXCP_TRACE: /* Trace exception */
@@ -1839,19 +1831,19 @@ void cpu_loop(CPUPPCState *env)
break;
/* PowerPC 64 with hypervisor mode support */
case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
- cpu_abort(env, "Hypervisor data storage exception "
+ cpu_abort(cs, "Hypervisor data storage exception "
"while in user mode. Aborting\n");
break;
case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */
- cpu_abort(env, "Hypervisor instruction storage exception "
+ cpu_abort(cs, "Hypervisor instruction storage exception "
"while in user mode. Aborting\n");
break;
case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
- cpu_abort(env, "Hypervisor data segment exception "
+ cpu_abort(cs, "Hypervisor data segment exception "
"while in user mode. Aborting\n");
break;
case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */
- cpu_abort(env, "Hypervisor instruction segment exception "
+ cpu_abort(cs, "Hypervisor instruction segment exception "
"while in user mode. Aborting\n");
break;
case POWERPC_EXCP_VPU: /* Vector unavailable exception */
@@ -1863,58 +1855,58 @@ void cpu_loop(CPUPPCState *env)
queue_signal(env, info.si_signo, &info);
break;
case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */
- cpu_abort(env, "Programmable interval timer interrupt "
+ cpu_abort(cs, "Programmable interval timer interrupt "
"while in user mode. Aborting\n");
break;
case POWERPC_EXCP_IO: /* IO error exception */
- cpu_abort(env, "IO error exception while in user mode. "
+ cpu_abort(cs, "IO error exception while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_RUNM: /* Run mode exception */
- cpu_abort(env, "Run mode exception while in user mode. "
+ cpu_abort(cs, "Run mode exception while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_EMUL: /* Emulation trap exception */
- cpu_abort(env, "Emulation trap exception not handled\n");
+ cpu_abort(cs, "Emulation trap exception not handled\n");
break;
case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
- cpu_abort(env, "Instruction fetch TLB exception "
+ cpu_abort(cs, "Instruction fetch TLB exception "
"while in user-mode. Aborting");
break;
case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
- cpu_abort(env, "Data load TLB exception while in user-mode. "
+ cpu_abort(cs, "Data load TLB exception while in user-mode. "
"Aborting");
break;
case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
- cpu_abort(env, "Data store TLB exception while in user-mode. "
+ cpu_abort(cs, "Data store TLB exception while in user-mode. "
"Aborting");
break;
case POWERPC_EXCP_FPA: /* Floating-point assist exception */
- cpu_abort(env, "Floating-point assist exception not handled\n");
+ cpu_abort(cs, "Floating-point assist exception not handled\n");
break;
case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
- cpu_abort(env, "Instruction address breakpoint exception "
+ cpu_abort(cs, "Instruction address breakpoint exception "
"not handled\n");
break;
case POWERPC_EXCP_SMI: /* System management interrupt */
- cpu_abort(env, "System management interrupt while in user mode. "
+ cpu_abort(cs, "System management interrupt while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_THERM: /* Thermal interrupt */
- cpu_abort(env, "Thermal interrupt interrupt while in user mode. "
+ cpu_abort(cs, "Thermal interrupt interrupt while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */
- cpu_abort(env, "Performance monitor exception not handled\n");
+ cpu_abort(cs, "Performance monitor exception not handled\n");
break;
case POWERPC_EXCP_VPUA: /* Vector assist exception */
- cpu_abort(env, "Vector assist exception not handled\n");
+ cpu_abort(cs, "Vector assist exception not handled\n");
break;
case POWERPC_EXCP_SOFTP: /* Soft patch exception */
- cpu_abort(env, "Soft patch exception not handled\n");
+ cpu_abort(cs, "Soft patch exception not handled\n");
break;
case POWERPC_EXCP_MAINT: /* Maintenance exception */
- cpu_abort(env, "Maintenance exception while in user mode. "
+ cpu_abort(cs, "Maintenance exception while in user mode. "
"Aborting\n");
break;
case POWERPC_EXCP_STOP: /* stop translation */
@@ -1970,7 +1962,7 @@ void cpu_loop(CPUPPCState *env)
/* just indicate that signals should be handled asap */
break;
default:
- cpu_abort(env, "Unknown exception 0x%d. Aborting\n", trapnr);
+ cpu_abort(cs, "Unknown exception 0x%d. Aborting\n", trapnr);
break;
}
process_pending_signals(env);
@@ -2965,7 +2957,7 @@ void cpu_loop(CPUM68KState *env)
int trapnr;
unsigned int n;
target_siginfo_t info;
- TaskState *ts = env->opaque;
+ TaskState *ts = cs->opaque;
for(;;) {
trapnr = cpu_m68k_exec(env);
@@ -3435,28 +3427,30 @@ void init_task_state(TaskState *ts)
CPUArchState *cpu_copy(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
CPUArchState *new_env = cpu_init(cpu_model);
+ CPUState *new_cpu = ENV_GET_CPU(new_env);
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
CPUWatchpoint *wp;
#endif
/* Reset non arch specific state */
- cpu_reset(ENV_GET_CPU(new_env));
+ cpu_reset(new_cpu);
memcpy(new_env, env, sizeof(CPUArchState));
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
BP_CPU break/watchpoints are handled correctly on clone. */
- QTAILQ_INIT(&env->breakpoints);
- QTAILQ_INIT(&env->watchpoints);
+ QTAILQ_INIT(&cpu->breakpoints);
+ QTAILQ_INIT(&cpu->watchpoints);
#if defined(TARGET_HAS_ICE)
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
- cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
+ QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
+ cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL);
}
- QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
- cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
+ QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
+ cpu_watchpoint_insert(new_cpu, wp->vaddr, (~wp->len_mask) + 1,
wp->flags, NULL);
}
#endif
@@ -4001,7 +3995,7 @@ int main(int argc, char **argv, char **envp)
/* build Task State */
ts->info = info;
ts->bprm = &bprm;
- env->opaque = ts;
+ cpu->opaque = ts;
task_settid(ts);
execfd = qemu_getauxval(AT_EXECFD);
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index c2f74f33d6..36d4a738ea 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -126,6 +126,7 @@ typedef struct TaskState {
#endif
uint32_t stack_base;
int used; /* non zero if used */
+ bool sigsegv_blocked; /* SIGSEGV blocked by guest */
struct image_info *info;
struct linux_binprm *bprm;
@@ -235,6 +236,7 @@ int host_to_target_signal(int sig);
long do_sigreturn(CPUArchState *env);
long do_rt_sigreturn(CPUArchState *env);
abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp);
+int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset);
#ifdef TARGET_I386
/* vm86.c */
diff --git a/linux-user/signal.c b/linux-user/signal.c
index c8a1da0749..7d6246f448 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -197,6 +197,55 @@ void target_to_host_old_sigset(sigset_t *sigset,
target_to_host_sigset(sigset, &d);
}
+/* Wrapper for sigprocmask function
+ * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
+ * are host signal set, not guest ones. This wraps the sigprocmask host calls
+ * that should be protected (calls originated from guest)
+ */
+int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
+{
+ int ret;
+ sigset_t val;
+ sigset_t *temp = NULL;
+ CPUState *cpu = thread_cpu;
+ TaskState *ts = (TaskState *)cpu->opaque;
+ bool segv_was_blocked = ts->sigsegv_blocked;
+
+ if (set) {
+ bool has_sigsegv = sigismember(set, SIGSEGV);
+ val = *set;
+ temp = &val;
+
+ sigdelset(temp, SIGSEGV);
+
+ switch (how) {
+ case SIG_BLOCK:
+ if (has_sigsegv) {
+ ts->sigsegv_blocked = true;
+ }
+ break;
+ case SIG_UNBLOCK:
+ if (has_sigsegv) {
+ ts->sigsegv_blocked = false;
+ }
+ break;
+ case SIG_SETMASK:
+ ts->sigsegv_blocked = has_sigsegv;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ ret = sigprocmask(how, temp, oldset);
+
+ if (oldset && segv_was_blocked) {
+ sigaddset(oldset, SIGSEGV);
+ }
+
+ return ret;
+}
+
/* siginfo conversion */
static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
@@ -370,7 +419,8 @@ void signal_init(void)
static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
{
- TaskState *ts = env->opaque;
+ CPUState *cpu = ENV_GET_CPU(env);
+ TaskState *ts = cpu->opaque;
struct sigqueue *q = ts->first_free;
if (!q)
return NULL;
@@ -380,7 +430,9 @@ static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
{
- TaskState *ts = env->opaque;
+ CPUState *cpu = ENV_GET_CPU(env);
+ TaskState *ts = cpu->opaque;
+
q->next = ts->first_free;
ts->first_free = q;
}
@@ -388,8 +440,9 @@ static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
/* abort execution with signal */
static void QEMU_NORETURN force_sig(int target_sig)
{
- CPUArchState *env = thread_cpu->env_ptr;
- TaskState *ts = (TaskState *)env->opaque;
+ CPUState *cpu = thread_cpu;
+ CPUArchState *env = cpu->env_ptr;
+ TaskState *ts = (TaskState *)cpu->opaque;
int host_sig, core_dumped = 0;
struct sigaction act;
host_sig = target_to_host_signal(target_sig);
@@ -440,7 +493,8 @@ static void QEMU_NORETURN force_sig(int target_sig)
as possible */
int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
{
- TaskState *ts = env->opaque;
+ CPUState *cpu = ENV_GET_CPU(env);
+ TaskState *ts = cpu->opaque;
struct emulated_sigtable *k;
struct sigqueue *q, **pq;
abi_ulong handler;
@@ -453,6 +507,19 @@ int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
k = &ts->sigtab[sig - 1];
queue = gdb_queuesig ();
handler = sigact_table[sig - 1]._sa_handler;
+
+ if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
+ /* Guest has blocked SIGSEGV but we got one anyway. Assume this
+ * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
+ * because it got a real MMU fault). A blocked SIGSEGV in that
+ * situation is treated as if using the default handler. This is
+ * not correct if some other process has randomly sent us a SIGSEGV
+ * via kill(), but that is not easy to distinguish at this point,
+ * so we assume it doesn't happen.
+ */
+ handler = TARGET_SIG_DFL;
+ }
+
if (!queue && handler == TARGET_SIG_DFL) {
if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
kill(getpid(),SIGSTOP);
@@ -774,8 +841,9 @@ static int
setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate,
CPUX86State *env, abi_ulong mask, abi_ulong fpstate_addr)
{
- int err = 0;
- uint16_t magic;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ int err = 0;
+ uint16_t magic;
/* already locked in setup_frame() */
err |= __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
@@ -790,7 +858,7 @@ setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate,
err |= __put_user(env->regs[R_EDX], &sc->edx);
err |= __put_user(env->regs[R_ECX], &sc->ecx);
err |= __put_user(env->regs[R_EAX], &sc->eax);
- err |= __put_user(env->exception_index, &sc->trapno);
+ err |= __put_user(cs->exception_index, &sc->trapno);
err |= __put_user(env->error_code, &sc->err);
err |= __put_user(env->eip, &sc->eip);
err |= __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
@@ -1050,7 +1118,7 @@ long do_sigreturn(CPUX86State *env)
}
target_to_host_sigset_internal(&set, &target_set);
- sigprocmask(SIG_SETMASK, &set, NULL);
+ do_sigprocmask(SIG_SETMASK, &set, NULL);
/* restore registers */
if (restore_sigcontext(env, &frame->sc, &eax))
@@ -1075,7 +1143,7 @@ long do_rt_sigreturn(CPUX86State *env)
if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
goto badframe;
target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
- sigprocmask(SIG_SETMASK, &set, NULL);
+ do_sigprocmask(SIG_SETMASK, &set, NULL);
if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax))
goto badframe;
@@ -1214,7 +1282,7 @@ static int target_restore_sigframe(CPUARMState *env,
uint64_t pstate;
target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
- sigprocmask(SIG_SETMASK, &set, NULL);
+ do_sigprocmask(SIG_SETMASK, &set, NULL);
for (i = 0; i < 31; i++) {
__get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
@@ -1334,7 +1402,7 @@ static void setup_frame(int sig, struct target_sigaction *ka,
long do_rt_sigreturn(CPUARMState *env)
{
- struct target_rt_sigframe *frame;
+ struct target_rt_sigframe *frame = NULL;
abi_ulong frame_addr = env->xregs[31];
if (frame_addr & 15) {
@@ -1855,7 +1923,7 @@ static long do_sigreturn_v1(CPUARMState *env)
}
target_to_host_sigset_internal(&host_set, &set);
- sigprocmask(SIG_SETMASK, &host_set, NULL);
+ do_sigprocmask(SIG_SETMASK, &host_set, NULL);
if (restore_sigcontext(env, &frame->sc))
goto badframe;
@@ -1936,7 +2004,7 @@ static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
abi_ulong *regspace;
target_to_host_sigset(&host_set, &uc->tuc_sigmask);
- sigprocmask(SIG_SETMASK, &host_set, NULL);
+ do_sigprocmask(SIG_SETMASK, &host_set, NULL);
if (restore_sigcontext(env, &uc->tuc_mcontext))
return 1;
@@ -2027,7 +2095,7 @@ static long do_rt_sigreturn_v1(CPUARMState *env)
goto badframe;
target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
- sigprocmask(SIG_SETMASK, &host_set, NULL);
+ do_sigprocmask(SIG_SETMASK, &host_set, NULL);
if (restore_sigcontext(env, &frame->uc.tuc_mcontext))
goto badframe;
@@ -2438,7 +2506,7 @@ long do_sigreturn(CPUSPARCState *env)
}
target_to_host_sigset_internal(&host_set, &set);
- sigprocmask(SIG_SETMASK, &host_set, NULL);
+ do_sigprocmask(SIG_SETMASK, &host_set, NULL);
if (err)
goto segv_and_exit;
@@ -2561,7 +2629,7 @@ void sparc64_set_context(CPUSPARCState *env)
goto do_sigsegv;
}
target_to_host_sigset_internal(&set, &target_set);
- sigprocmask(SIG_SETMASK, &set, NULL);
+ do_sigprocmask(SIG_SETMASK, &set, NULL);
}
env->pc = pc;
env->npc = npc;
@@ -2650,7 +2718,7 @@ void sparc64_get_context(CPUSPARCState *env)
err = 0;
- sigprocmask(0, NULL, &set);
+ do_sigprocmask(0, NULL, &set);
host_to_target_sigset_internal(&target_set, &set);
if (TARGET_NSIG_WORDS == 1) {
err |= __put_user(target_set.sig[0],
@@ -2985,7 +3053,7 @@ long do_sigreturn(CPUMIPSState *regs)
}
target_to_host_sigset_internal(&blocked, &target_set);
- sigprocmask(SIG_SETMASK, &blocked, NULL);
+ do_sigprocmask(SIG_SETMASK, &blocked, NULL);
if (restore_sigcontext(regs, &frame->sf_sc))
goto badframe;
@@ -3089,7 +3157,7 @@ long do_rt_sigreturn(CPUMIPSState *env)
goto badframe;
target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
- sigprocmask(SIG_SETMASK, &blocked, NULL);
+ do_sigprocmask(SIG_SETMASK, &blocked, NULL);
if (restore_sigcontext(env, &frame->rs_uc.tuc_mcontext))
goto badframe;
@@ -3379,7 +3447,7 @@ long do_sigreturn(CPUSH4State *regs)
goto badframe;
target_to_host_sigset_internal(&blocked, &target_set);
- sigprocmask(SIG_SETMASK, &blocked, NULL);
+ do_sigprocmask(SIG_SETMASK, &blocked, NULL);
if (restore_sigcontext(regs, &frame->sc, &r0))
goto badframe;
@@ -3408,7 +3476,7 @@ long do_rt_sigreturn(CPUSH4State *regs)
goto badframe;
target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
- sigprocmask(SIG_SETMASK, &blocked, NULL);
+ do_sigprocmask(SIG_SETMASK, &blocked, NULL);
if (restore_sigcontext(regs, &frame->uc.tuc_mcontext, &r0))
goto badframe;
@@ -3638,7 +3706,7 @@ long do_sigreturn(CPUMBState *env)
goto badframe;
}
target_to_host_sigset_internal(&set, &target_set);
- sigprocmask(SIG_SETMASK, &set, NULL);
+ do_sigprocmask(SIG_SETMASK, &set, NULL);
restore_sigcontext(&frame->uc.tuc_mcontext, env);
/* We got here through a sigreturn syscall, our path back is via an
@@ -3813,7 +3881,7 @@ long do_sigreturn(CPUCRISState *env)
goto badframe;
}
target_to_host_sigset_internal(&set, &target_set);
- sigprocmask(SIG_SETMASK, &set, NULL);
+ do_sigprocmask(SIG_SETMASK, &set, NULL);
restore_sigcontext(&frame->sc, env);
unlock_user_struct(frame, frame_addr, 0);
@@ -3975,8 +4043,6 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka,
struct target_rt_sigframe *frame;
abi_ulong info_addr, uc_addr;
- frame_addr = get_sigframe(ka, env, sizeof *frame);
-
frame_addr = get_sigframe(ka, env, sizeof(*frame));
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
goto give_sigsegv;
@@ -4344,7 +4410,7 @@ long do_sigreturn(CPUS390XState *env)
}
target_to_host_sigset_internal(&set, &target_set);
- sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
+ do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
if (restore_sigregs(env, &frame->sregs)) {
goto badframe;
@@ -4372,7 +4438,7 @@ long do_rt_sigreturn(CPUS390XState *env)
}
target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
- sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
+ do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
goto badframe;
@@ -4900,7 +4966,7 @@ long do_sigreturn(CPUPPCState *env)
goto sigsegv;
#endif
target_to_host_sigset_internal(&blocked, &set);
- sigprocmask(SIG_SETMASK, &blocked, NULL);
+ do_sigprocmask(SIG_SETMASK, &blocked, NULL);
if (__get_user(sr_addr, &sc->regs))
goto sigsegv;
@@ -4944,7 +5010,7 @@ static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
return 1;
target_to_host_sigset_internal(&blocked, &set);
- sigprocmask(SIG_SETMASK, &blocked, NULL);
+ do_sigprocmask(SIG_SETMASK, &blocked, NULL);
if (restore_user_regs(env, mcp, sig))
goto sigsegv;
@@ -5318,7 +5384,7 @@ long do_sigreturn(CPUM68KState *env)
}
target_to_host_sigset_internal(&set, &target_set);
- sigprocmask(SIG_SETMASK, &set, NULL);
+ do_sigprocmask(SIG_SETMASK, &set, NULL);
/* restore registers */
@@ -5346,7 +5412,7 @@ long do_rt_sigreturn(CPUM68KState *env)
goto badframe;
target_to_host_sigset_internal(&set, &target_set);
- sigprocmask(SIG_SETMASK, &set, NULL);
+ do_sigprocmask(SIG_SETMASK, &set, NULL);
/* restore registers */
@@ -5593,7 +5659,7 @@ long do_sigreturn(CPUAlphaState *env)
}
target_to_host_sigset_internal(&set, &target_set);
- sigprocmask(SIG_SETMASK, &set, NULL);
+ do_sigprocmask(SIG_SETMASK, &set, NULL);
if (restore_sigcontext(env, sc)) {
goto badframe;
@@ -5616,7 +5682,7 @@ long do_rt_sigreturn(CPUAlphaState *env)
goto badframe;
}
target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
- sigprocmask(SIG_SETMASK, &set, NULL);
+ do_sigprocmask(SIG_SETMASK, &set, NULL);
if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
goto badframe;
@@ -5675,7 +5741,7 @@ void process_pending_signals(CPUArchState *cpu_env)
struct emulated_sigtable *k;
struct target_sigaction *sa;
struct sigqueue *q;
- TaskState *ts = cpu_env->opaque;
+ TaskState *ts = cpu->opaque;
if (!ts->signal_pending)
return;
@@ -5710,6 +5776,14 @@ void process_pending_signals(CPUArchState *cpu_env)
handler = sa->_sa_handler;
}
+ if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
+ /* Guest has blocked SIGSEGV but we got one anyway. Assume this
+ * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
+ * because it got a real MMU fault), and treat as if default handler.
+ */
+ handler = TARGET_SIG_DFL;
+ }
+
if (handler == TARGET_SIG_DFL) {
/* default handler : ignore some signal. The other are job control or fatal */
if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
@@ -5733,7 +5807,7 @@ void process_pending_signals(CPUArchState *cpu_env)
sigaddset(&set, target_to_host_signal(sig));
/* block signals in the handler using Linux */
- sigprocmask(SIG_BLOCK, &set, &old_set);
+ do_sigprocmask(SIG_BLOCK, &set, &old_set);
/* save the previous blocked signal state to restore it at the
end of the signal execution (see do_sigreturn) */
host_to_target_sigset_internal(&target_old_set, &old_set);
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index e2c10cc0bd..9864813b7a 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -43,6 +43,7 @@
#include <sys/resource.h>
#include <sys/mman.h>
#include <sys/swap.h>
+#include <linux/capability.h>
#include <signal.h>
#include <sched.h>
#ifdef __ia64__
@@ -243,6 +244,10 @@ _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long *, user_mask_ptr);
_syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
void *, arg);
+_syscall2(int, capget, struct __user_cap_header_struct *, header,
+ struct __user_cap_data_struct *, data);
+_syscall2(int, capset, struct __user_cap_header_struct *, header,
+ struct __user_cap_data_struct *, data);
static bitmask_transtbl fcntl_flags_tbl[] = {
{ TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
@@ -2057,9 +2062,12 @@ static abi_long do_accept4(int fd, abi_ulong target_addr,
socklen_t addrlen;
void *addr;
abi_long ret;
+ int host_flags;
+
+ host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
if (target_addr == 0) {
- return get_errno(accept4(fd, NULL, NULL, flags));
+ return get_errno(accept4(fd, NULL, NULL, host_flags));
}
/* linux returns EINVAL if addrlen pointer is invalid */
@@ -2075,7 +2083,7 @@ static abi_long do_accept4(int fd, abi_ulong target_addr,
addr = alloca(addrlen);
- ret = get_errno(accept4(fd, addr, &addrlen, flags));
+ ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
if (!is_error(ret)) {
host_to_target_sockaddr(target_addr, addr, addrlen);
if (put_user_u32(addrlen, target_addrlen_addr))
@@ -4243,7 +4251,7 @@ static void *clone_func(void *arg)
env = info->env;
cpu = ENV_GET_CPU(env);
thread_cpu = cpu;
- ts = (TaskState *)env->opaque;
+ ts = (TaskState *)cpu->opaque;
info->tid = gettid();
cpu->host_tid = info->tid;
task_settid(ts);
@@ -4271,8 +4279,10 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
abi_ulong parent_tidptr, target_ulong newtls,
abi_ulong child_tidptr)
{
+ CPUState *cpu = ENV_GET_CPU(env);
int ret;
TaskState *ts;
+ CPUState *new_cpu;
CPUArchState *new_env;
unsigned int nptl_flags;
sigset_t sigmask;
@@ -4282,7 +4292,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
flags &= ~(CLONE_VFORK | CLONE_VM);
if (flags & CLONE_VM) {
- TaskState *parent_ts = (TaskState *)env->opaque;
+ TaskState *parent_ts = (TaskState *)cpu->opaque;
new_thread_info info;
pthread_attr_t attr;
@@ -4292,7 +4302,8 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
new_env = cpu_copy(env);
/* Init regs that differ from the parent. */
cpu_clone_regs(new_env, newsp);
- new_env->opaque = ts;
+ new_cpu = ENV_GET_CPU(new_env);
+ new_cpu->opaque = ts;
ts->bprm = parent_ts->bprm;
ts->info = parent_ts->info;
nptl_flags = flags;
@@ -4364,7 +4375,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
put_user_u32(gettid(), child_tidptr);
if (flags & CLONE_PARENT_SETTID)
put_user_u32(gettid(), parent_tidptr);
- ts = (TaskState *)env->opaque;
+ ts = (TaskState *)cpu->opaque;
if (flags & CLONE_SETTLS)
cpu_set_tls (env, newtls);
if (flags & CLONE_CHILD_CLEARTID)
@@ -4418,6 +4429,14 @@ static int target_to_host_fcntl_cmd(int cmd)
#endif
case TARGET_F_NOTIFY:
return F_NOTIFY;
+#ifdef F_GETOWN_EX
+ case TARGET_F_GETOWN_EX:
+ return F_GETOWN_EX;
+#endif
+#ifdef F_SETOWN_EX
+ case TARGET_F_SETOWN_EX:
+ return F_SETOWN_EX;
+#endif
default:
return -TARGET_EINVAL;
}
@@ -4440,6 +4459,10 @@ static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
struct target_flock *target_fl;
struct flock64 fl64;
struct target_flock64 *target_fl64;
+#ifdef F_GETOWN_EX
+ struct f_owner_ex fox;
+ struct target_f_owner_ex *target_fox;
+#endif
abi_long ret;
int host_cmd = target_to_host_fcntl_cmd(cmd);
@@ -4533,6 +4556,30 @@ static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
break;
+#ifdef F_GETOWN_EX
+ case TARGET_F_GETOWN_EX:
+ ret = get_errno(fcntl(fd, host_cmd, &fox));
+ if (ret >= 0) {
+ if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
+ return -TARGET_EFAULT;
+ target_fox->type = tswap32(fox.type);
+ target_fox->pid = tswap32(fox.pid);
+ unlock_user_struct(target_fox, arg, 1);
+ }
+ break;
+#endif
+
+#ifdef F_SETOWN_EX
+ case TARGET_F_SETOWN_EX:
+ if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
+ return -TARGET_EFAULT;
+ fox.type = tswap32(target_fox->type);
+ fox.pid = tswap32(target_fox->pid);
+ unlock_user_struct(target_fox, arg, 0);
+ ret = get_errno(fcntl(fd, host_cmd, &fox));
+ break;
+#endif
+
case TARGET_F_SETOWN:
case TARGET_F_GETOWN:
case TARGET_F_SETSIG:
@@ -4974,7 +5021,8 @@ void init_qemu_uname_release(void)
static int open_self_maps(void *cpu_env, int fd)
{
#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
- TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
+ CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
+ TaskState *ts = cpu->opaque;
#endif
FILE *fp;
char *line = NULL;
@@ -5026,7 +5074,8 @@ static int open_self_maps(void *cpu_env, int fd)
static int open_self_stat(void *cpu_env, int fd)
{
- TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
+ CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
+ TaskState *ts = cpu->opaque;
abi_ulong start_stack = ts->info->start_stack;
int i;
@@ -5062,7 +5111,8 @@ static int open_self_stat(void *cpu_env, int fd)
static int open_self_auxv(void *cpu_env, int fd)
{
- TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
+ CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
+ TaskState *ts = cpu->opaque;
abi_ulong auxv = ts->info->saved_auxv;
abi_ulong len = ts->info->auxv_len;
char *ptr;
@@ -5244,14 +5294,14 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
/* Remove the CPU from the list. */
QTAILQ_REMOVE(&cpus, cpu, node);
cpu_list_unlock();
- ts = ((CPUArchState *)cpu_env)->opaque;
+ ts = cpu->opaque;
if (ts->child_tidptr) {
put_user_u32(0, ts->child_tidptr);
sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
NULL, NULL, 0);
}
thread_cpu = NULL;
- object_unref(OBJECT(ENV_GET_CPU(cpu_env)));
+ object_unref(OBJECT(cpu));
g_free(ts);
pthread_exit(NULL);
}
@@ -5987,7 +6037,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
{
sigset_t cur_set;
abi_ulong target_set;
- sigprocmask(0, NULL, &cur_set);
+ do_sigprocmask(0, NULL, &cur_set);
host_to_target_old_sigset(&target_set, &cur_set);
ret = target_set;
}
@@ -5998,10 +6048,10 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
{
sigset_t set, oset, cur_set;
abi_ulong target_set = arg1;
- sigprocmask(0, NULL, &cur_set);
+ do_sigprocmask(0, NULL, &cur_set);
target_to_host_old_sigset(&set, &target_set);
sigorset(&set, &set, &cur_set);
- sigprocmask(SIG_SETMASK, &set, &oset);
+ do_sigprocmask(SIG_SETMASK, &set, &oset);
host_to_target_old_sigset(&target_set, &oset);
ret = target_set;
}
@@ -6032,7 +6082,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
mask = arg2;
target_to_host_old_sigset(&set, &mask);
- ret = get_errno(sigprocmask(how, &set, &oldset));
+ ret = get_errno(do_sigprocmask(how, &set, &oldset));
if (!is_error(ret)) {
host_to_target_old_sigset(&mask, &oldset);
ret = mask;
@@ -6066,7 +6116,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
how = 0;
set_ptr = NULL;
}
- ret = get_errno(sigprocmask(how, set_ptr, &oldset));
+ ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
if (!is_error(ret) && arg3) {
if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
goto efault;
@@ -6106,7 +6156,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
how = 0;
set_ptr = NULL;
}
- ret = get_errno(sigprocmask(how, set_ptr, &oldset));
+ ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
if (!is_error(ret) && arg3) {
if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
goto efault;
@@ -6555,7 +6605,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
break;
case TARGET_NR_mprotect:
{
- TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
+ TaskState *ts = cpu->opaque;
/* Special hack to detect libc making the stack executable. */
if ((arg3 & PROT_GROWSDOWN)
&& arg1 >= ts->info->stack_limit
@@ -7635,9 +7685,75 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
unlock_user(p, arg1, ret);
break;
case TARGET_NR_capget:
- goto unimplemented;
case TARGET_NR_capset:
- goto unimplemented;
+ {
+ struct target_user_cap_header *target_header;
+ struct target_user_cap_data *target_data = NULL;
+ struct __user_cap_header_struct header;
+ struct __user_cap_data_struct data[2];
+ struct __user_cap_data_struct *dataptr = NULL;
+ int i, target_datalen;
+ int data_items = 1;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
+ goto efault;
+ }
+ header.version = tswap32(target_header->version);
+ header.pid = tswap32(target_header->pid);
+
+ if (header.version != _LINUX_CAPABILITY_VERSION) {
+ /* Version 2 and up takes pointer to two user_data structs */
+ data_items = 2;
+ }
+
+ target_datalen = sizeof(*target_data) * data_items;
+
+ if (arg2) {
+ if (num == TARGET_NR_capget) {
+ target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
+ } else {
+ target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
+ }
+ if (!target_data) {
+ unlock_user_struct(target_header, arg1, 0);
+ goto efault;
+ }
+
+ if (num == TARGET_NR_capset) {
+ for (i = 0; i < data_items; i++) {
+ data[i].effective = tswap32(target_data[i].effective);
+ data[i].permitted = tswap32(target_data[i].permitted);
+ data[i].inheritable = tswap32(target_data[i].inheritable);
+ }
+ }
+
+ dataptr = data;
+ }
+
+ if (num == TARGET_NR_capget) {
+ ret = get_errno(capget(&header, dataptr));
+ } else {
+ ret = get_errno(capset(&header, dataptr));
+ }
+
+ /* The kernel always updates version for both capget and capset */
+ target_header->version = tswap32(header.version);
+ unlock_user_struct(target_header, arg1, 1);
+
+ if (arg2) {
+ if (num == TARGET_NR_capget) {
+ for (i = 0; i < data_items; i++) {
+ target_data[i].effective = tswap32(data[i].effective);
+ target_data[i].permitted = tswap32(data[i].permitted);
+ target_data[i].inheritable = tswap32(data[i].inheritable);
+ }
+ unlock_user(target_data, arg2, target_datalen);
+ } else {
+ unlock_user(target_data, arg2, 0);
+ }
+ }
+ break;
+ }
case TARGET_NR_sigaltstack:
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
@@ -8119,7 +8235,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
}
mask = arg2;
target_to_host_old_sigset(&set, &mask);
- sigprocmask(how, &set, &oldset);
+ do_sigprocmask(how, &set, &oldset);
host_to_target_old_sigset(&mask, &oldset);
ret = mask;
}
@@ -8647,7 +8763,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
break;
#elif defined(TARGET_M68K)
{
- TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
+ TaskState *ts = cpu->opaque;
ts->tp_value = arg1;
ret = 0;
break;
@@ -8663,7 +8779,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
break;
#elif defined(TARGET_M68K)
{
- TaskState *ts = ((CPUArchState *)cpu_env)->opaque;
+ TaskState *ts = cpu->opaque;
ret = ts->tp_value;
break;
}
@@ -9142,6 +9258,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_atomic_barrier:
{
/* Like the kernel implementation and the qemu arm barrier, no-op this? */
+ ret = 0;
break;
}
#endif
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index 732c9e3dbb..fdf9a47595 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -2123,6 +2123,8 @@ struct target_statfs64 {
#define TARGET_F_SETOWN 8 /* for sockets. */
#define TARGET_F_GETOWN 9 /* for sockets. */
#endif
+#define TARGET_F_SETOWN_EX 15
+#define TARGET_F_GETOWN_EX 16
#ifndef TARGET_F_RDLCK
#define TARGET_F_RDLCK 0
@@ -2305,6 +2307,11 @@ struct target_eabi_flock64 {
} QEMU_PACKED;
#endif
+struct target_f_owner_ex {
+ int type; /* Owner type of ID. */
+ int pid; /* ID of owner. */
+};
+
/* soundcard defines */
/* XXX: convert them all to arch indepedent entries */
#define TARGET_SNDCTL_COPR_HALT TARGET_IOWR('C', 7, int);
@@ -2559,3 +2566,14 @@ struct target_sigevent {
} _sigev_thread;
} _sigev_un;
};
+
+struct target_user_cap_header {
+ uint32_t version;
+ int pid;
+};
+
+struct target_user_cap_data {
+ uint32_t effective;
+ uint32_t permitted;
+ uint32_t inheritable;
+};
diff --git a/linux-user/vm86.c b/linux-user/vm86.c
index 2c4ffeb551..45ef559ec6 100644
--- a/linux-user/vm86.c
+++ b/linux-user/vm86.c
@@ -72,7 +72,8 @@ static inline unsigned int vm_getl(uint32_t segptr, unsigned int reg16)
void save_v86_state(CPUX86State *env)
{
- TaskState *ts = env->opaque;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
struct target_vm86plus_struct * target_v86;
if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0))
@@ -131,7 +132,8 @@ static inline void return_to_32bit(CPUX86State *env, int retval)
static inline int set_IF(CPUX86State *env)
{
- TaskState *ts = env->opaque;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
ts->v86flags |= VIF_MASK;
if (ts->v86flags & VIP_MASK) {
@@ -143,7 +145,8 @@ static inline int set_IF(CPUX86State *env)
static inline void clear_IF(CPUX86State *env)
{
- TaskState *ts = env->opaque;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
ts->v86flags &= ~VIF_MASK;
}
@@ -160,7 +163,8 @@ static inline void clear_AC(CPUX86State *env)
static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
{
- TaskState *ts = env->opaque;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
set_flags(ts->v86flags, eflags, ts->v86mask);
set_flags(env->eflags, eflags, SAFE_MASK);
@@ -173,7 +177,8 @@ static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
{
- TaskState *ts = env->opaque;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
set_flags(env->eflags, flags, SAFE_MASK);
@@ -186,7 +191,8 @@ static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
static inline unsigned int get_vflags(CPUX86State *env)
{
- TaskState *ts = env->opaque;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
unsigned int flags;
flags = env->eflags & RETURN_MASK;
@@ -202,7 +208,8 @@ static inline unsigned int get_vflags(CPUX86State *env)
support TSS interrupt revectoring, so this code is always executed) */
static void do_int(CPUX86State *env, int intno)
{
- TaskState *ts = env->opaque;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
uint32_t int_addr, segoffs, ssp;
unsigned int sp;
@@ -260,7 +267,8 @@ void handle_vm86_trap(CPUX86State *env, int trapno)
void handle_vm86_fault(CPUX86State *env)
{
- TaskState *ts = env->opaque;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
uint32_t csp, ssp;
unsigned int ip, sp, newflags, newip, newcs, opcode, intno;
int data32, pref_done;
@@ -384,7 +392,8 @@ void handle_vm86_fault(CPUX86State *env)
int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr)
{
- TaskState *ts = env->opaque;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
struct target_vm86plus_struct * target_v86;
int ret;
diff --git a/main-loop.c b/main-loop.c
index c3c9c28fba..8a854938e7 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -25,6 +25,7 @@
#include "qemu-common.h"
#include "qemu/timer.h"
#include "qemu/sockets.h" // struct in_addr needed for libslirp.h
+#include "sysemu/qtest.h"
#include "slirp/libslirp.h"
#include "qemu/main-loop.h"
#include "block/aio.h"
@@ -208,7 +209,7 @@ static int os_host_main_loop_wait(int64_t timeout)
if (!timeout && (spin_counter > MAX_MAIN_LOOP_SPIN)) {
static bool notified;
- if (!notified) {
+ if (!notified && !qtest_enabled()) {
fprintf(stderr,
"main-loop: WARNING: I/O thread spun for %d iterations\n",
MAX_MAIN_LOOP_SPIN);
diff --git a/migration.c b/migration.c
index 00f465ea46..bd1fb912ae 100644
--- a/migration.c
+++ b/migration.c
@@ -26,16 +26,6 @@
#include "qmp-commands.h"
#include "trace.h"
-//#define DEBUG_MIGRATION
-
-#ifdef DEBUG_MIGRATION
-#define DPRINTF(fmt, ...) \
- do { printf("migration: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
- do { } while (0)
-#endif
-
enum {
MIG_STATE_ERROR = -1,
MIG_STATE_NONE,
@@ -101,6 +91,7 @@ void qemu_start_incoming_migration(const char *uri, Error **errp)
static void process_incoming_migration_co(void *opaque)
{
QEMUFile *f = opaque;
+ Error *local_err = NULL;
int ret;
ret = qemu_loadvm_state(f);
@@ -111,11 +102,15 @@ static void process_incoming_migration_co(void *opaque)
exit(EXIT_FAILURE);
}
qemu_announce_self();
- DPRINTF("successfully loaded vm state\n");
bdrv_clear_incoming_migration_all();
/* Make sure all file formats flush their mutable metadata */
- bdrv_invalidate_cache_all();
+ bdrv_invalidate_cache_all(&local_err);
+ if (local_err) {
+ qerror_report_err(local_err);
+ error_free(local_err);
+ exit(EXIT_FAILURE);
+ }
if (autostart) {
vm_start();
@@ -300,7 +295,7 @@ static void migrate_fd_cleanup(void *opaque)
s->cleanup_bh = NULL;
if (s->file) {
- DPRINTF("closing file\n");
+ trace_migrate_fd_cleanup();
qemu_mutex_unlock_iothread();
qemu_thread_join(&s->thread);
qemu_mutex_lock_iothread();
@@ -323,7 +318,7 @@ static void migrate_fd_cleanup(void *opaque)
void migrate_fd_error(MigrationState *s)
{
- DPRINTF("setting error state\n");
+ trace_migrate_fd_error();
assert(s->file == NULL);
s->state = MIG_STATE_ERROR;
trace_migrate_set_state(MIG_STATE_ERROR);
@@ -333,7 +328,7 @@ void migrate_fd_error(MigrationState *s)
static void migrate_fd_cancel(MigrationState *s)
{
int old_state ;
- DPRINTF("cancelling migration\n");
+ trace_migrate_fd_cancel();
do {
old_state = s->state;
@@ -583,29 +578,23 @@ static void *migration_thread(void *opaque)
int64_t start_time = initial_time;
bool old_vm_running = false;
- DPRINTF("beginning savevm\n");
qemu_savevm_state_begin(s->file, &s->params);
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
migrate_set_state(s, MIG_STATE_SETUP, MIG_STATE_ACTIVE);
- DPRINTF("setup complete\n");
-
while (s->state == MIG_STATE_ACTIVE) {
int64_t current_time;
uint64_t pending_size;
if (!qemu_file_rate_limit(s->file)) {
- DPRINTF("iterate\n");
pending_size = qemu_savevm_state_pending(s->file, max_size);
- DPRINTF("pending size %" PRIu64 " max %" PRIu64 "\n",
- pending_size, max_size);
+ trace_migrate_pending(pending_size, max_size);
if (pending_size && pending_size >= max_size) {
qemu_savevm_state_iterate(s->file);
} else {
int ret;
- DPRINTF("done iterating\n");
qemu_mutex_lock_iothread();
start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
@@ -644,9 +633,8 @@ static void *migration_thread(void *opaque)
s->mbps = time_spent ? (((double) transferred_bytes * 8.0) /
((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1;
- DPRINTF("transferred %" PRIu64 " time_spent %" PRIu64
- " bandwidth %g max_size %" PRId64 "\n",
- transferred_bytes, time_spent, bandwidth, max_size);
+ trace_migrate_transferred(transferred_bytes, time_spent,
+ bandwidth, max_size);
/* if we haven't sent anything, we don't want to recalculate
10000 is a small enough number for our purposes */
if (s->dirty_bytes_rate && transferred_bytes > 10000) {
diff --git a/monitor.c b/monitor.c
index 342e83baea..1266ba06fb 100644
--- a/monitor.c
+++ b/monitor.c
@@ -137,6 +137,7 @@ typedef struct mon_cmd_t {
* used, and mhandler of 1st level plays the role of help function.
*/
struct mon_cmd_t *sub_table;
+ void (*command_completion)(ReadLineState *rs, int nb_args, const char *str);
} mon_cmd_t;
/* file descriptors passed via SCM_RIGHTS */
@@ -352,33 +353,6 @@ void monitor_printf(Monitor *mon, const char *fmt, ...)
va_end(ap);
}
-void monitor_print_filename(Monitor *mon, const char *filename)
-{
- int i;
-
- for (i = 0; filename[i]; i++) {
- switch (filename[i]) {
- case ' ':
- case '"':
- case '\\':
- monitor_printf(mon, "\\%c", filename[i]);
- break;
- case '\t':
- monitor_printf(mon, "\\t");
- break;
- case '\r':
- monitor_printf(mon, "\\r");
- break;
- case '\n':
- monitor_printf(mon, "\\n");
- break;
- default:
- monitor_printf(mon, "%c", filename[i]);
- break;
- }
- }
-}
-
static int GCC_FMT_ATTR(2, 3) monitor_fprintf(FILE *stream,
const char *fmt, ...)
{
@@ -2254,6 +2228,7 @@ void qmp_getfd(const char *fdname, Error **errp)
}
if (qemu_isdigit(fdname[0])) {
+ close(fd);
error_set(errp, QERR_INVALID_PARAMETER_VALUE, "fdname",
"a name not starting with a digit");
return;
@@ -4277,11 +4252,15 @@ static const char *next_arg_type(const char *typestr)
return (p != NULL ? ++p : typestr);
}
-static void device_add_completion(ReadLineState *rs, const char *str)
+void device_add_completion(ReadLineState *rs, int nb_args, const char *str)
{
GSList *list, *elt;
size_t len;
+ if (nb_args != 2) {
+ return;
+ }
+
len = strlen(str);
readline_set_completion_index(rs, len);
list = elt = object_class_get_list(TYPE_DEVICE, false);
@@ -4290,7 +4269,9 @@ static void device_add_completion(ReadLineState *rs, const char *str)
DeviceClass *dc = OBJECT_CLASS_CHECK(DeviceClass, elt->data,
TYPE_DEVICE);
name = object_class_get_name(OBJECT_CLASS(dc));
- if (!strncmp(name, str, len)) {
+
+ if (!dc->cannot_instantiate_with_device_add_yet
+ && !strncmp(name, str, len)) {
readline_add_completion(rs, name);
}
elt = elt->next;
@@ -4298,11 +4279,15 @@ static void device_add_completion(ReadLineState *rs, const char *str)
g_slist_free(list);
}
-static void object_add_completion(ReadLineState *rs, const char *str)
+void object_add_completion(ReadLineState *rs, int nb_args, const char *str)
{
GSList *list, *elt;
size_t len;
+ if (nb_args != 2) {
+ return;
+ }
+
len = strlen(str);
readline_set_completion_index(rs, len);
list = elt = object_class_get_list(TYPE_USER_CREATABLE, false);
@@ -4318,8 +4303,8 @@ static void object_add_completion(ReadLineState *rs, const char *str)
g_slist_free(list);
}
-static void device_del_completion(ReadLineState *rs, BusState *bus,
- const char *str, size_t len)
+static void device_del_bus_completion(ReadLineState *rs, BusState *bus,
+ const char *str, size_t len)
{
BusChild *kid;
@@ -4332,16 +4317,32 @@ static void device_del_completion(ReadLineState *rs, BusState *bus,
}
QLIST_FOREACH(dev_child, &dev->child_bus, sibling) {
- device_del_completion(rs, dev_child, str, len);
+ device_del_bus_completion(rs, dev_child, str, len);
}
}
}
-static void object_del_completion(ReadLineState *rs, const char *str)
+void device_del_completion(ReadLineState *rs, int nb_args, const char *str)
+{
+ size_t len;
+
+ if (nb_args != 2) {
+ return;
+ }
+
+ len = strlen(str);
+ readline_set_completion_index(rs, len);
+ device_del_bus_completion(rs, sysbus_get_default(), str, len);
+}
+
+void object_del_completion(ReadLineState *rs, int nb_args, const char *str)
{
ObjectPropertyInfoList *list, *start;
size_t len;
+ if (nb_args != 2) {
+ return;
+ }
len = strlen(str);
readline_set_completion_index(rs, len);
@@ -4395,6 +4396,9 @@ static void monitor_find_completion_by_table(Monitor *mon,
return monitor_find_completion_by_table(mon, cmd->sub_table,
&args[1], nb_args - 1);
}
+ if (cmd->command_completion) {
+ return cmd->command_completion(mon->rs, nb_args, args[nb_args - 1]);
+ }
ptype = next_arg_type(cmd->args_type);
for(i = 0; i < nb_args - 2; i++) {
@@ -4421,13 +4425,6 @@ static void monitor_find_completion_by_table(Monitor *mon,
readline_set_completion_index(mon->rs, strlen(str));
bdrv_iterate(block_completion_it, &mbs);
break;
- case 'O':
- if (!strcmp(cmd->name, "device_add") && nb_args == 2) {
- device_add_completion(mon->rs, str);
- } else if (!strcmp(cmd->name, "object_add") && nb_args == 2) {
- object_add_completion(mon->rs, str);
- }
- break;
case 's':
case 'S':
if (!strcmp(cmd->name, "sendkey")) {
@@ -4441,12 +4438,6 @@ static void monitor_find_completion_by_table(Monitor *mon,
} else if (!strcmp(cmd->name, "help|?")) {
monitor_find_completion_by_table(mon, cmd_table,
&args[1], nb_args - 1);
- } else if (!strcmp(cmd->name, "device_del") && nb_args == 2) {
- size_t len = strlen(str);
- readline_set_completion_index(mon->rs, len);
- device_del_completion(mon->rs, sysbus_get_default(), str, len);
- } else if (!strcmp(cmd->name, "object_del") && nb_args == 2) {
- object_del_completion(mon->rs, str);
}
break;
default:
diff --git a/net/net.c b/net/net.c
index e3ef1e4f1d..9db4dba769 100644
--- a/net/net.c
+++ b/net/net.c
@@ -473,7 +473,7 @@ ssize_t qemu_deliver_packet(NetClientState *sender,
if (ret == 0) {
nc->receive_disabled = 1;
- };
+ }
return ret;
}
@@ -952,10 +952,12 @@ void net_host_device_remove(Monitor *mon, const QDict *qdict)
nc = net_hub_find_client_by_name(vlan_id, device);
if (!nc) {
+ error_report("Host network device '%s' on hub '%d' not found",
+ device, vlan_id);
return;
}
if (!net_host_check_device(nc->model)) {
- monitor_printf(mon, "invalid host network device %s\n", device);
+ error_report("invalid host network device '%s'", device);
return;
}
qemu_del_net_client(nc);
@@ -1043,7 +1045,7 @@ RxFilterInfoList *qmp_query_rx_filter(bool has_name, const char *name,
if (nc->info->type != NET_CLIENT_OPTIONS_KIND_NIC) {
if (has_name) {
error_setg(errp, "net client(%s) isn't a NIC", name);
- break;
+ return NULL;
}
continue;
}
@@ -1062,11 +1064,15 @@ RxFilterInfoList *qmp_query_rx_filter(bool has_name, const char *name,
} else if (has_name) {
error_setg(errp, "net client(%s) doesn't support"
" rx-filter querying", name);
+ return NULL;
+ }
+
+ if (has_name) {
break;
}
}
- if (filter_list == NULL && !error_is_set(errp) && has_name) {
+ if (filter_list == NULL && has_name) {
error_setg(errp, "invalid net client name: %s", name);
}
diff --git a/net/netmap.c b/net/netmap.c
index 8213304a5b..0c1772b03f 100644
--- a/net/netmap.c
+++ b/net/netmap.c
@@ -177,8 +177,8 @@ static void netmap_poll(NetClientState *nc, bool enable)
NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
if (s->read_poll != enable || s->write_poll != enable) {
- s->read_poll = enable;
- s->read_poll = enable;
+ s->write_poll = enable;
+ s->read_poll = enable;
netmap_update_fd_handler(s);
}
}
diff --git a/net/slirp.c b/net/slirp.c
index cce026bf12..8fddc03841 100644
--- a/net/slirp.c
+++ b/net/slirp.c
@@ -527,6 +527,7 @@ static int slirp_smb(SlirpState* s, const char *exported_dir,
"pid directory=%s\n"
"lock directory=%s\n"
"state directory=%s\n"
+ "ncalrpc dir=%s/ncalrpc\n"
"log file=%s/log.smbd\n"
"smb passwd file=%s/smbpasswd\n"
"security = user\n"
@@ -542,6 +543,7 @@ static int slirp_smb(SlirpState* s, const char *exported_dir,
s->smb_dir,
s->smb_dir,
s->smb_dir,
+ s->smb_dir,
exported_dir,
passwd->pw_name
);
diff --git a/net/tap.c b/net/tap.c
index 8847ce100a..fc1b865e08 100644
--- a/net/tap.c
+++ b/net/tap.c
@@ -367,11 +367,8 @@ static int launch_script(const char *setup_script, const char *ifname, int fd)
if (pid == 0) {
int open_max = sysconf(_SC_OPEN_MAX), i;
- for (i = 0; i < open_max; i++) {
- if (i != STDIN_FILENO &&
- i != STDOUT_FILENO &&
- i != STDERR_FILENO &&
- i != fd) {
+ for (i = 3; i < open_max; i++) {
+ if (i != fd) {
close(i);
}
}
@@ -452,11 +449,8 @@ static int net_bridge_run_helper(const char *helper, const char *bridge)
char br_buf[6+IFNAMSIZ] = {0};
char helper_cmd[PATH_MAX + sizeof(fd_buf) + sizeof(br_buf) + 15];
- for (i = 0; i < open_max; i++) {
- if (i != STDIN_FILENO &&
- i != STDOUT_FILENO &&
- i != STDERR_FILENO &&
- i != sv[1]) {
+ for (i = 3; i < open_max; i++) {
+ if (i != sv[1]) {
close(i);
}
}
diff --git a/os-posix.c b/os-posix.c
index 6187301481..cb2a7f7ad7 100644
--- a/os-posix.c
+++ b/os-posix.c
@@ -44,10 +44,6 @@
#include <sys/prctl.h>
#endif
-#ifdef __FreeBSD__
-#include <sys/sysctl.h>
-#endif
-
static struct passwd *user_pwd;
static const char *chroot_dir;
static int daemonize;
diff --git a/pc-bios/README b/pc-bios/README
index 42f7f565b0..4381718e15 100644
--- a/pc-bios/README
+++ b/pc-bios/README
@@ -17,7 +17,7 @@
- SLOF (Slimline Open Firmware) is a free IEEE 1275 Open Firmware
implementation for certain IBM POWER hardware. The sources are at
https://github.com/aik/SLOF, and the image currently in qemu is
- built from git tag qemu-slof-20131015.
+ built from git tag qemu-slof-20140404.
- sgabios (the Serial Graphics Adapter option ROM) provides a means for
legacy x86 software to communicate with an attached serial console as
diff --git a/pc-bios/ppc_rom.bin b/pc-bios/ppc_rom.bin
index d378d9a95e..e7f769328b 100644
--- a/pc-bios/ppc_rom.bin
+++ b/pc-bios/ppc_rom.bin
Binary files differ
diff --git a/pc-bios/qemu_logo.svg b/pc-bios/qemu_logo.svg
new file mode 100644
index 0000000000..07b5b516ec
--- /dev/null
+++ b/pc-bios/qemu_logo.svg
@@ -0,0 +1,1010 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="351.84259"
+ height="111.86757"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.3.1 r9886"
+ sodipodi:docname="qemu_logo.svg">
+ <title
+ id="title3182">Kew the Angry Emu</title>
+ <defs
+ id="defs4">
+ <linearGradient
+ id="linearGradient4686">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop4688" />
+ <stop
+ id="stop3956"
+ offset="0.75"
+ style="stop-color:#000000;stop-opacity:0.87843138;" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.43921569;"
+ offset="0.75"
+ id="stop3958" />
+ <stop
+ id="stop3960"
+ offset="0.88"
+ style="stop-color:#181818;stop-opacity:1;" />
+ <stop
+ style="stop-color:#242424;stop-opacity:1;"
+ offset="0.88"
+ id="stop3962" />
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="1"
+ id="stop4690" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4467">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop4469" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0.8974359;"
+ offset="1"
+ id="stop4471" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4431">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop4433" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop4435" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4466">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop4468" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop4470" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4321">
+ <stop
+ style="stop-color:#ff6702;stop-opacity:1;"
+ offset="0"
+ id="stop4323" />
+ <stop
+ style="stop-color:#ff9a55;stop-opacity:1;"
+ offset="1"
+ id="stop4325" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4283">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop4285" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0;"
+ offset="1"
+ id="stop4287" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4251">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop4253" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0;"
+ offset="1"
+ id="stop4255" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4007">
+ <stop
+ style="stop-color:#ff6600;stop-opacity:1;"
+ offset="0"
+ id="stop4009" />
+ <stop
+ style="stop-color:#ff9148;stop-opacity:1;"
+ offset="1"
+ id="stop4011" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3999">
+ <stop
+ style="stop-color:#fff7f2;stop-opacity:1;"
+ offset="0"
+ id="stop4001" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop4003" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3890">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop3892" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3894" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3880">
+ <stop
+ style="stop-color:#eb7400;stop-opacity:1;"
+ offset="0"
+ id="stop3882" />
+ <stop
+ style="stop-color:#f7b06a;stop-opacity:1;"
+ offset="1"
+ id="stop3884" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4011">
+ <stop
+ style="stop-color:#042dc8;stop-opacity:1;"
+ offset="0"
+ id="stop4013" />
+ <stop
+ style="stop-color:#4260d5;stop-opacity:1;"
+ offset="1"
+ id="stop4015" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3879">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.90598291;"
+ offset="0"
+ id="stop3881" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3883" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3869">
+ <stop
+ style="stop-color:#c95000;stop-opacity:1;"
+ offset="0"
+ id="stop3871" />
+ <stop
+ style="stop-color:#ff9e5e;stop-opacity:1;"
+ offset="1"
+ id="stop3873" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3861">
+ <stop
+ style="stop-color:#f06000;stop-opacity:1;"
+ offset="0"
+ id="stop3863" />
+ <stop
+ style="stop-color:#ffccaa;stop-opacity:1;"
+ offset="1"
+ id="stop3865" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3826">
+ <stop
+ style="stop-color:#ff6600;stop-opacity:1;"
+ offset="0"
+ id="stop3828" />
+ <stop
+ style="stop-color:#ff893b;stop-opacity:1;"
+ offset="1"
+ id="stop3830" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3879-6">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.90598291;"
+ offset="0"
+ id="stop3881-4" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3883-7" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3869-5">
+ <stop
+ style="stop-color:#c95000;stop-opacity:1;"
+ offset="0"
+ id="stop3871-9" />
+ <stop
+ style="stop-color:#ff9e5e;stop-opacity:1;"
+ offset="1"
+ id="stop3873-4" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3879-4"
+ id="linearGradient3885-6"
+ x1="76.025352"
+ y1="124.8497"
+ x2="75.874107"
+ y2="143.03978"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient3879-4">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.93162394;"
+ offset="0"
+ id="stop3881-6" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3883-74" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3869-2">
+ <stop
+ style="stop-color:#c95000;stop-opacity:1;"
+ offset="0"
+ id="stop3871-99" />
+ <stop
+ style="stop-color:#ff9e5e;stop-opacity:1;"
+ offset="1"
+ id="stop3873-6" />
+ </linearGradient>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4011"
+ id="radialGradient4017"
+ cx="66.639"
+ cy="93.096375"
+ fx="66.639"
+ fy="93.096375"
+ r="11.515625"
+ gradientTransform="matrix(0.23244854,1.600893,-1.0124495,0.14700695,145.40424,-26.300303)"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3879-4-7"
+ id="linearGradient3885-6-2"
+ x1="76.025352"
+ y1="124.8497"
+ x2="75.874107"
+ y2="143.03978"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient3879-4-7">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.93162394;"
+ offset="0"
+ id="stop3881-6-7" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3883-74-6" />
+ </linearGradient>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4011-5"
+ id="radialGradient4017-7"
+ cx="66.639"
+ cy="93.096375"
+ fx="66.639"
+ fy="93.096375"
+ r="11.515625"
+ gradientTransform="matrix(0.99779178,6.8718773,-4.3459674,0.6310314,452.75975,-225.98471)"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient4011-5">
+ <stop
+ style="stop-color:#042dc8;stop-opacity:1;"
+ offset="0"
+ id="stop4013-1" />
+ <stop
+ style="stop-color:#4260d5;stop-opacity:1;"
+ offset="1"
+ id="stop4015-3" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3879-4-75"
+ id="linearGradient3885-6-8"
+ x1="76.025352"
+ y1="124.8497"
+ x2="75.874107"
+ y2="143.03978"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient3879-4-75">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.93162394;"
+ offset="0"
+ id="stop3881-6-1" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3883-74-4" />
+ </linearGradient>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4011-0"
+ id="radialGradient4017-5"
+ cx="66.639"
+ cy="93.096375"
+ fx="66.639"
+ fy="93.096375"
+ r="11.515625"
+ gradientTransform="matrix(0.23244854,1.600893,-1.0124495,0.14700695,146.34996,53.681728)"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient4011-0">
+ <stop
+ style="stop-color:#042dc8;stop-opacity:1;"
+ offset="0"
+ id="stop4013-4" />
+ <stop
+ style="stop-color:#4260d5;stop-opacity:1;"
+ offset="1"
+ id="stop4015-0" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4011-0"
+ id="linearGradient4117"
+ x1="107.03001"
+ y1="189.72537"
+ x2="107.18476"
+ y2="173.47537"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3879-4-7-2"
+ id="linearGradient3885-6-2-8"
+ x1="76.025352"
+ y1="124.8497"
+ x2="75.874107"
+ y2="143.03978"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient3879-4-7-2">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.93162394;"
+ offset="0"
+ id="stop3881-6-7-9" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3883-74-6-9" />
+ </linearGradient>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4011-5-1"
+ id="radialGradient4017-7-9"
+ cx="66.639"
+ cy="93.096375"
+ fx="66.639"
+ fy="93.096375"
+ r="11.515625"
+ gradientTransform="matrix(0.99779178,6.8718773,-4.3459674,0.6310314,448.94742,-406.99277)"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient4011-5-1">
+ <stop
+ style="stop-color:#042dc8;stop-opacity:1;"
+ offset="0"
+ id="stop4013-1-9" />
+ <stop
+ style="stop-color:#4260d5;stop-opacity:1;"
+ offset="1"
+ id="stop4015-3-8" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3879-4-7-2-7"
+ id="linearGradient3885-6-2-8-0"
+ x1="76.025352"
+ y1="124.8497"
+ x2="75.874107"
+ y2="143.03978"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient3879-4-7-2-7">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.93162394;"
+ offset="0"
+ id="stop3881-6-7-9-3" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3883-74-6-9-6" />
+ </linearGradient>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4011-5-1-5"
+ id="radialGradient4017-7-9-5"
+ cx="66.639"
+ cy="93.096375"
+ fx="66.639"
+ fy="93.096375"
+ r="11.515625"
+ gradientTransform="matrix(0.55965334,3.8543806,-2.4376181,0.3539404,454.75182,-145.44353)"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient4011-5-1-5">
+ <stop
+ style="stop-color:#042dc8;stop-opacity:1;"
+ offset="0"
+ id="stop4013-1-9-6" />
+ <stop
+ style="stop-color:#4260d5;stop-opacity:1;"
+ offset="1"
+ id="stop4015-3-8-9" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient3879-4-7-2-4"
+ id="linearGradient3885-6-2-8-4"
+ x1="76.025352"
+ y1="124.8497"
+ x2="75.874107"
+ y2="143.03978"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient3879-4-7-2-4">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.93162394;"
+ offset="0"
+ id="stop3881-6-7-9-9" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3883-74-6-9-3" />
+ </linearGradient>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4011-5-1-7"
+ id="radialGradient4017-7-9-7"
+ cx="66.639"
+ cy="93.096375"
+ fx="66.639"
+ fy="93.096375"
+ r="11.515625"
+ gradientTransform="matrix(0.26837158,1.8482981,-1.1689154,0.16972569,466.57614,26.180822)"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient4011-5-1-7">
+ <stop
+ style="stop-color:#042dc8;stop-opacity:1;"
+ offset="0"
+ id="stop4013-1-9-1" />
+ <stop
+ style="stop-color:#4260d5;stop-opacity:1;"
+ offset="1"
+ id="stop4015-3-8-5" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3879-4-7-2-0">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.93162394;"
+ offset="0"
+ id="stop3881-6-7-9-7" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3883-74-6-9-8" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4011-5-1-55">
+ <stop
+ style="stop-color:#000a30;stop-opacity:1;"
+ offset="0"
+ id="stop4013-1-9-8" />
+ <stop
+ style="stop-color:#4260d5;stop-opacity:1;"
+ offset="1"
+ id="stop4015-3-8-3" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3890-9">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop3892-0" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3894-9" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3880-4">
+ <stop
+ style="stop-color:#eb7400;stop-opacity:1;"
+ offset="0"
+ id="stop3882-5" />
+ <stop
+ style="stop-color:#f7b06a;stop-opacity:1;"
+ offset="1"
+ id="stop3884-1" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3999-7">
+ <stop
+ style="stop-color:#fff7f2;stop-opacity:1;"
+ offset="0"
+ id="stop4001-9" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop4003-4" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4007-9">
+ <stop
+ style="stop-color:#ff6600;stop-opacity:1;"
+ offset="0"
+ id="stop4009-1" />
+ <stop
+ style="stop-color:#ff9148;stop-opacity:1;"
+ offset="1"
+ id="stop4011-9" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4007-9-5">
+ <stop
+ style="stop-color:#ff6600;stop-opacity:1;"
+ offset="0"
+ id="stop4009-1-9" />
+ <stop
+ style="stop-color:#ff9148;stop-opacity:1;"
+ offset="1"
+ id="stop4011-9-5" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3999-7-1">
+ <stop
+ style="stop-color:#fff7f2;stop-opacity:1;"
+ offset="0"
+ id="stop4001-9-1" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop4003-4-4" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4007-9-5-3">
+ <stop
+ style="stop-color:#ff6600;stop-opacity:1;"
+ offset="0"
+ id="stop4009-1-9-3" />
+ <stop
+ style="stop-color:#ff9148;stop-opacity:1;"
+ offset="1"
+ id="stop4011-9-5-9" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3999-7-1-4">
+ <stop
+ style="stop-color:#fff7f2;stop-opacity:1;"
+ offset="0"
+ id="stop4001-9-1-4" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop4003-4-4-4" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient3879-4-7-2-3">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0.93162394;"
+ offset="0"
+ id="stop3881-6-7-9-1" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop3883-74-6-9-87" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4011-5-1-1">
+ <stop
+ style="stop-color:#fde8a1;stop-opacity:1;"
+ offset="0"
+ id="stop4013-1-9-63" />
+ <stop
+ style="stop-color:#2947b9;stop-opacity:1;"
+ offset="1"
+ id="stop4015-3-8-8" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4466"
+ id="linearGradient4472"
+ x1="161.7561"
+ y1="540.72662"
+ x2="161.7561"
+ y2="579.80206"
+ gradientUnits="userSpaceOnUse" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4321"
+ id="radialGradient4474"
+ cx="130.8242"
+ cy="575.27838"
+ fx="130.8242"
+ fy="575.27838"
+ r="49.498173"
+ gradientTransform="matrix(0.95670828,0.96684666,-0.72623533,0.71862001,423.45109,35.05138)"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4466-5"
+ id="linearGradient4472-9"
+ x1="161.7561"
+ y1="540.72662"
+ x2="161.7561"
+ y2="579.80206"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient4466-5">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop4468-2" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop4470-3" />
+ </linearGradient>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4321-0"
+ id="radialGradient4474-6"
+ cx="130.8242"
+ cy="575.27838"
+ fx="130.8242"
+ fy="575.27838"
+ r="49.498173"
+ gradientTransform="matrix(0.95670828,0.96684666,-0.72623533,0.71862001,442.64399,170.9169)"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient4321-0">
+ <stop
+ style="stop-color:#ff6702;stop-opacity:1;"
+ offset="0"
+ id="stop4323-3" />
+ <stop
+ style="stop-color:#ff9a55;stop-opacity:1;"
+ offset="1"
+ id="stop4325-1" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4466-5-5">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop4468-2-9" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop4470-3-4" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4321-0-0">
+ <stop
+ style="stop-color:#ff6702;stop-opacity:1;"
+ offset="0"
+ id="stop4323-3-9" />
+ <stop
+ style="stop-color:#ff9a55;stop-opacity:1;"
+ offset="1"
+ id="stop4325-1-1" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4466-5-9">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop4468-2-7" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop4470-3-7" />
+ </linearGradient>
+ <linearGradient
+ id="linearGradient4321-0-7">
+ <stop
+ style="stop-color:#ff6702;stop-opacity:1;"
+ offset="0"
+ id="stop4323-3-3" />
+ <stop
+ style="stop-color:#ff9a55;stop-opacity:1;"
+ offset="1"
+ id="stop4325-1-6" />
+ </linearGradient>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4431"
+ id="linearGradient4437"
+ x1="142.81854"
+ y1="831.52283"
+ x2="142.81854"
+ y2="878.90735"
+ gradientUnits="userSpaceOnUse" />
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4467"
+ id="radialGradient4475"
+ cx="116.51958"
+ cy="98.282051"
+ fx="116.51958"
+ fy="98.282051"
+ r="55.859375"
+ gradientTransform="matrix(0.97442557,1.5088911,-0.83559154,0.53961599,79.641615,-130.28522)"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4431-3"
+ id="linearGradient4437-6"
+ x1="142.81854"
+ y1="831.52283"
+ x2="142.81854"
+ y2="878.90735"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient4431-3">
+ <stop
+ style="stop-color:#ffffff;stop-opacity:1;"
+ offset="0"
+ id="stop4433-0" />
+ <stop
+ style="stop-color:#ffffff;stop-opacity:0;"
+ offset="1"
+ id="stop4435-2" />
+ </linearGradient>
+ <radialGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient4467-7"
+ id="radialGradient4475-0"
+ cx="116.51958"
+ cy="98.282051"
+ fx="116.51958"
+ fy="98.282051"
+ r="55.859375"
+ gradientTransform="matrix(0.97442557,1.5088911,-0.83559154,0.53961599,225.10358,63.664066)"
+ gradientUnits="userSpaceOnUse" />
+ <linearGradient
+ id="linearGradient4467-7">
+ <stop
+ style="stop-color:#000000;stop-opacity:1;"
+ offset="0"
+ id="stop4469-4" />
+ <stop
+ style="stop-color:#000000;stop-opacity:0.8974359;"
+ offset="1"
+ id="stop4471-7" />
+ </linearGradient>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.0170184"
+ inkscape:cx="539.34448"
+ inkscape:cy="-81.088823"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ showguides="false"
+ inkscape:guide-bbox="true"
+ inkscape:window-width="992"
+ inkscape:window-height="547"
+ inkscape:window-x="30"
+ inkscape:window-y="24"
+ inkscape:window-maximized="0"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0">
+ <sodipodi:guide
+ orientation="0,1"
+ position="52.563745,58.089579"
+ id="guide2989" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="54.584055,28.037549"
+ id="guide2991" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="51.048515,41.169529"
+ id="guide2993" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="77.817565,40.916989"
+ id="guide2995" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="51.048515,41.169529"
+ id="guide3017" />
+ <inkscape:grid
+ type="xygrid"
+ id="grid3019"
+ empspacing="5"
+ visible="true"
+ enabled="true"
+ snapvisiblegridlinesonly="true"
+ originx="-62.341105px"
+ originy="-884.63528px" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="85.658895,8.3647193"
+ id="guide3021" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="106.6589,4.3647193"
+ id="guide3023" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="90.658895,17.364719"
+ id="guide3025" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="90.658895,-6.6352807"
+ id="guide3027" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="-0.341105,-14.635281"
+ id="guide3810" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="1.658895,-49.635281"
+ id="guide3814" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="-17.698248,11.257579"
+ id="guide3856" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="6.601806,11.257579"
+ id="guide3887" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="24.658283,58.089579"
+ id="guide4019" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="106.6589,-6.6352807"
+ id="guide4481" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="139.08747,-193.20671"
+ id="guide4483" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata7">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title>Kew the Angry Emu</dc:title>
+ <dc:creator>
+ <cc:Agent>
+ <dc:title>Benoît Canet</dc:title>
+ </cc:Agent>
+ </dc:creator>
+ <dc:rights>
+ <cc:Agent>
+ <dc:title>CC BY 3.0</dc:title>
+ </cc:Agent>
+ </dc:rights>
+ <dc:publisher>
+ <cc:Agent>
+ <dc:title>QEMU Community</dc:title>
+ </cc:Agent>
+ </dc:publisher>
+ <dc:date>2012-02-15</dc:date>
+ <cc:license
+ rdf:resource="http://creativecommons.org/licenses/by/3.0/" />
+ <dc:subject>
+ <rdf:Bag>
+ <rdf:li>QEMU logo</rdf:li>
+ <rdf:li>QEMU mascot</rdf:li>
+ </rdf:Bag>
+ </dc:subject>
+ <dc:source>http://lists.gnu.org/archive/html/qemu-devel/2012-02/msg01961.html</dc:source>
+ </cc:Work>
+ <cc:License
+ rdf:about="http://creativecommons.org/licenses/by/3.0/">
+ <cc:permits
+ rdf:resource="http://creativecommons.org/ns#Reproduction" />
+ <cc:permits
+ rdf:resource="http://creativecommons.org/ns#Distribution" />
+ <cc:requires
+ rdf:resource="http://creativecommons.org/ns#Notice" />
+ <cc:requires
+ rdf:resource="http://creativecommons.org/ns#Attribution" />
+ <cc:permits
+ rdf:resource="http://creativecommons.org/ns#DerivativeWorks" />
+ </cc:License>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-62.341105,-55.859333)">
+ <path
+ inkscape:connector-curvature="0"
+ style="fill:url(#radialGradient4475);fill-opacity:1;stroke:none"
+ d="m 118.2161,55.859333 c -30.850815,0 -55.874995,24.87043 -55.874995,55.562497 0,30.69207 25.02418,55.56249 55.874995,55.56249 10.09496,0 19.54625,-2.6525 27.71875,-7.3125 l 2.90625,7.3125 2.40625,0 20,0 0.125,0 -8.8125,-21.78124 c 7.21537,-9.3622 11.5,-21.07236 11.5,-33.78125 0,-30.692067 -24.99293,-55.562497 -55.84375,-55.562497 z"
+ id="path3834-7-7-2-5-5-0-5-4" />
+ <path
+ sodipodi:type="arc"
+ style="fill:url(#linearGradient4437);fill-opacity:1;stroke:none"
+ id="path3661"
+ sodipodi:cx="142.5"
+ sodipodi:cy="856.29077"
+ sodipodi:rx="35.357143"
+ sodipodi:ry="24.642857"
+ d="m 177.85714,856.29077 c 0,13.60988 -15.82993,24.64286 -35.35714,24.64286 -19.52721,0 -35.35714,-11.03298 -35.35714,-24.64286 0,-13.60987 15.82993,-24.64286 35.35714,-24.64286 19.52721,0 35.35714,11.03299 35.35714,24.64286 z"
+ transform="matrix(1.0465082,0,0,1.2920463,-31.641235,-1016.8612)" />
+ <path
+ sodipodi:type="arc"
+ style="fill:#000000;fill-opacity:1;stroke:none"
+ id="path4442"
+ sodipodi:cx="115.66247"
+ sodipodi:cy="856.39258"
+ sodipodi:rx="6.5659914"
+ sodipodi:ry="6.5659914"
+ d="m 122.22846,856.39258 c 0,3.6263 -2.9397,6.56599 -6.56599,6.56599 -3.6263,0 -6.56599,-2.93969 -6.56599,-6.56599 0,-3.6263 2.93969,-6.56599 6.56599,-6.56599 3.62629,0 6.56599,2.93969 6.56599,6.56599 z"
+ transform="translate(7.6700247,-777.60351)" />
+ <rect
+ style="fill:#000000;fill-opacity:1;stroke:none"
+ id="rect4444"
+ width="37.643608"
+ height="5.5005069"
+ x="125.01157"
+ y="65.255234"
+ transform="matrix(0.98974903,0.14281759,-0.18972639,0.981837,0,0)" />
+ <rect
+ style="fill:#000000;fill-opacity:1;stroke:none"
+ id="rect4446"
+ width="6.5659914"
+ height="2.9041886"
+ x="144.92451"
+ y="89.016899" />
+ <path
+ style="fill:#ff6600;fill-opacity:1"
+ d="m 103.38797,65.010543 c -0.057,2.18531 -3.865755,0.28296 -4.031245,2.78125 -4.22387,-1.88052 0.32884,2.87188 -0.0937,3.3125 l -0.0312,0 -0.3125,-0.0312 c -0.20386,-0.0728 -0.49977,-0.19904 -0.9375,-0.46875 -2.9499,2.35025 -3.02157,7.23369 -6.0625,9.9375 -1.99467,4.30504 -2.47977,8.98337 -3.9375,13.46875 -0.71796,4.30292 -1.34881,8.597857 -0.28125,12.906247 0.32053,3.50159 -0.68919,8.25865 2.5,10.71875 4.72728,3.88304 8.65575,8.79543 12.624995,13.46875 6.21914,7.65333 11.72948,15.86251 16.59375,24.4375 0.32431,-2.11756 1.10954,4.26459 2.53125,4.6875 -0.49161,-3.19231 -1.13213,-8.26328 -1.4375,-12.1875 -1.5814,-10.2909 -6.65305,-19.64903 -8.5625,-29.84375 -0.0587,-0.43037 -0.12809,-0.87203 -0.1875,-1.3125 l 0,-1.28125 -0.15625,0 c -0.62551,-5.04297 -0.8504,-10.46546 2.8125,-14.40625 3.73968,-3.772097 9.30633,-4.722447 13.8125,-7.343747 1.00194,-0.59119 2.04921,-1.07174 3.125,-1.40625 0.009,-0.003 0.0228,0.003 0.0312,0 3.11701,-0.96341 6.44862,-0.93323 9.6875,-0.40625 0.0479,0.008 0.10841,0.0233 0.15625,0.0312 0.29455,0.0493 0.61389,0.099 0.90625,0.15625 2.37136,0.21133 7.14463,1.13687 8,-0.5 -3.27225,-2.78631 -7.98526,-2.59211 -11.96875,-3.6875 -0.63059,-0.11469 -1.41182,-0.24041 -2.1875,-0.3125 l -3.90625,-0.875 -0.96875,-0.25 0,0.0312 -13.96875,-2.71875 c -0.22212,-0.20226 -0.46434,-0.40933 -0.6875,-0.5625 l 13.625,1.6875 0,-0.0625 c 0.48011,0.10699 0.95576,0.19361 1.4375,0.25 l 0,0.0312 9.625,1.78125 c 1.66103,0.61952 3.4322,1.08374 5.09375,1.1875 2.74263,0.39907 6.22526,4.49092 7.125,4.6875 -0.44096,-4.307 -4.7422,-6.23586 -8.3125,-7.5 -4.1712,-2.02803 -10.4023,-1.95417 -11.0625,-7.5625 -0.1756,-0.39076 -0.34902,-0.78118 -0.5625,-1.15625 l -1.625,-2.15625 0.0625,-0.0312 c -2.21724,-2.61691 -5.34011,-4.52196 -8.65625,-5.25 -3.2914,-1.13611 -6.98773,-2.2671 -10.46875,-2.71875 -1.18132,3.47826 -2.5031,-2.75561 -5.34375,-0.90625 -2.48996,0.29488 -2.14614,0.95256 -4,-0.625 z m 17.90625,10.15625 c 0.90187,-0.0238 1.93277,0.14208 2.96875,0.5 2.76259,0.95447 4.56151,2.96523 4.03125,4.5 -0.53026,1.53477 -3.20616,1.98572 -5.96875,1.03125 -2.76259,-0.95447 -4.5615,-2.93398 -4.03125,-4.46875 0.33141,-0.95923 1.49689,-1.52281 3,-1.5625 z"
+ id="path3499-9-7"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-size:95.54121399px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans Bold"
+ x="184.89412"
+ y="166.37402"
+ id="text4477"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan4479"
+ x="184.89412"
+ y="166.37402"
+ style="fill:#000000;fill-opacity:1">EMU</tspan></text>
+ </g>
+</svg>
diff --git a/pc-bios/slof.bin b/pc-bios/slof.bin
index 92a9831be7..972e012e51 100644
--- a/pc-bios/slof.bin
+++ b/pc-bios/slof.bin
Binary files differ
diff --git a/po/Makefile b/po/Makefile
index 705166e2d3..669f8654a6 100644
--- a/po/Makefile
+++ b/po/Makefile
@@ -37,8 +37,8 @@ install: $(OBJS)
$(call quiet-command, msgfmt -o $@ $<, " GEN $@")
$(PO_PATH)/messages.po: $(SRC_PATH)/ui/gtk.c
- $(call quiet-command, cd $(SRC_PATH) && \
- (xgettext -o - --from-code=UTF-8 --foreign-user \
+ $(call quiet-command, ( cd $(SRC_PATH) && \
+ xgettext -o - --from-code=UTF-8 --foreign-user \
--package-name=QEMU --package-version=$(VERSION) \
--msgid-bugs-address=qemu-devel@nongnu.org -k_ -C ui/gtk.c | \
sed -e s/CHARSET/UTF-8/) >$@, " GEN $@")
diff --git a/po/de_DE.po b/po/de_DE.po
index fcbde954c2..dec68c9ee8 100644
--- a/po/de_DE.po
+++ b/po/de_DE.po
@@ -10,7 +10,7 @@ msgstr ""
"PO-Revision-Date: 2012-02-28 16:00+0100\n"
"Last-Translator: Kevin Wolf <kwolf@redhat.com>\n"
"Language-Team: Deutsch <de@li.org>\n"
-"Language: \n"
+"Language: de\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
diff --git a/po/fr_FR.po b/po/fr_FR.po
index 45b2c0153d..ec54eb95a8 100644
--- a/po/fr_FR.po
+++ b/po/fr_FR.po
@@ -10,7 +10,7 @@ msgstr ""
"PO-Revision-Date: 2013-03-31 19:39+0200\n"
"Last-Translator: Aurelien Jarno <aurelien@aurel32.net>\n"
"Language-Team: French <FR@li.org>\n"
-"Language: \n"
+"Language: fr\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
diff --git a/po/hu.po b/po/hu.po
index 0a44c664df..401ed211ad 100644
--- a/po/hu.po
+++ b/po/hu.po
@@ -10,7 +10,7 @@ msgstr ""
"PO-Revision-Date: 2013-05-06 20:42+0200\n"
"Last-Translator: Ákos Kovács <akoskovacs@gmx.com>\n"
"Language-Team: Hungarian <hu@li.org>\n"
-"Language: \n"
+"Language: hu\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
diff --git a/po/it.po b/po/it.po
index 592d3d85f9..a62665cb22 100644
--- a/po/it.po
+++ b/po/it.po
@@ -10,7 +10,7 @@ msgstr ""
"PO-Revision-Date: 2012-02-27 08:23+0100\n"
"Last-Translator: Paolo Bonzini <pbonzini@redhat.com>\n"
"Language-Team: Italian <it@li.org>\n"
-"Language: \n"
+"Language: it\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
diff --git a/po/tr.po b/po/tr.po
index d57995a60b..d712ced9cc 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -10,7 +10,7 @@ msgstr ""
"PO-Revision-Date: 2013-04-22 18:35+0300\n"
"Last-Translator: Ozan Çağlayan <ozancag@gmail.com>\n"
"Language-Team: Türkçe <>\n"
-"Language: \n"
+"Language: tr\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
diff --git a/qapi-schema.json b/qapi-schema.json
index f4f9439fe6..0b00427c8c 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -883,6 +883,35 @@
{ 'command': 'query-cpus', 'returns': ['CpuInfo'] }
##
+# @IOThreadInfo:
+#
+# Information about an iothread
+#
+# @id: the identifier of the iothread
+#
+# @thread-id: ID of the underlying host thread
+#
+# Since: 2.0
+##
+{ 'type': 'IOThreadInfo',
+ 'data': {'id': 'str', 'thread-id': 'int'} }
+
+##
+# @query-iothreads:
+#
+# Returns a list of information about each iothread.
+#
+# Note this list excludes the QEMU main loop thread, which is not declared
+# using the -object iothread command-line option. It is always the main thread
+# of the process.
+#
+# Returns: a list of @IOThreadInfo for each iothread
+#
+# Since: 2.0
+##
+{ 'command': 'query-iothreads', 'returns': ['IOThreadInfo'] }
+
+##
# @BlockDeviceInfo:
#
# Information about the backing device for a block device.
@@ -4155,6 +4184,8 @@
#
# @unicast: unicast receive state
#
+# @vlan: vlan receive state (Since 2.0)
+#
# @broadcast-allowed: whether to receive broadcast
#
# @multicast-overflow: multicast table is overflowed or not
@@ -4178,6 +4209,7 @@
'promiscuous': 'bool',
'multicast': 'RxState',
'unicast': 'RxState',
+ 'vlan': 'RxState',
'broadcast-allowed': 'bool',
'multicast-overflow': 'bool',
'unicast-overflow': 'bool',
@@ -4253,10 +4285,13 @@
#
# Drivers that are supported in block device operations.
#
+# @host_device, @host_cdrom, @host_floppy: Since 2.1
+#
# Since: 2.0
##
{ 'enum': 'BlockdevDriver',
- 'data': [ 'file', 'http', 'https', 'ftp', 'ftps', 'tftp', 'vvfat', 'blkdebug',
+ 'data': [ 'file', 'host_device', 'host_cdrom', 'host_floppy',
+ 'http', 'https', 'ftp', 'ftps', 'tftp', 'vvfat', 'blkdebug',
'blkverify', 'bochs', 'cloop', 'cow', 'dmg', 'parallels', 'qcow',
'qcow2', 'qed', 'raw', 'vdi', 'vhdx', 'vmdk', 'vpc', 'quorum' ] }
@@ -4523,6 +4558,9 @@
'discriminator': 'driver',
'data': {
'file': 'BlockdevOptionsFile',
+ 'host_device':'BlockdevOptionsFile',
+ 'host_cdrom': 'BlockdevOptionsFile',
+ 'host_floppy':'BlockdevOptionsFile',
'http': 'BlockdevOptionsFile',
'https': 'BlockdevOptionsFile',
'ftp': 'BlockdevOptionsFile',
diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c
index 921de33bce..9c614494f1 100644
--- a/qapi/qmp-dispatch.c
+++ b/qapi/qmp-dispatch.c
@@ -80,7 +80,8 @@ static QObject *do_qmp_dispatch(QObject *request, Error **errp)
return NULL;
}
if (!cmd->enabled) {
- error_set(errp, QERR_COMMAND_DISABLED, command);
+ error_setg(errp, "The command %s has been disabled for this instance",
+ command);
return NULL;
}
diff --git a/qapi/qmp-input-visitor.c b/qapi/qmp-input-visitor.c
index bf42c04ea6..a2bed1ef10 100644
--- a/qapi/qmp-input-visitor.c
+++ b/qapi/qmp-input-visitor.c
@@ -71,7 +71,7 @@ static void qmp_input_push(QmpInputVisitor *qiv, QObject *obj, Error **errp)
GHashTable *h;
if (qiv->nb_stack >= QIV_STACK_SIZE) {
- error_set(errp, QERR_BUFFER_OVERRUN);
+ error_setg(errp, "An internal buffer overran");
return;
}
diff --git a/qdev-monitor.c b/qdev-monitor.c
index 6673e3cb61..6189780fd7 100644
--- a/qdev-monitor.c
+++ b/qdev-monitor.c
@@ -422,12 +422,14 @@ static BusState *qbus_find(const char *path)
* one child bus accept it nevertheless */
switch (dev->num_child_bus) {
case 0:
- qerror_report(QERR_DEVICE_NO_BUS, elem);
+ qerror_report(ERROR_CLASS_GENERIC_ERROR,
+ "Device '%s' has no child bus", elem);
return NULL;
case 1:
return QLIST_FIRST(&dev->child_bus);
default:
- qerror_report(QERR_DEVICE_MULTIPLE_BUSSES, elem);
+ qerror_report(ERROR_CLASS_GENERIC_ERROR,
+ "Device '%s' has multiple child busses", elem);
if (!monitor_cur_is_qmp()) {
qbus_list_bus(dev);
}
@@ -505,14 +507,16 @@ DeviceState *qdev_device_add(QemuOpts *opts)
return NULL;
}
if (!object_dynamic_cast(OBJECT(bus), dc->bus_type)) {
- qerror_report(QERR_BAD_BUS_FOR_DEVICE,
+ qerror_report(ERROR_CLASS_GENERIC_ERROR,
+ "Device '%s' can't go on a %s bus",
driver, object_get_typename(OBJECT(bus)));
return NULL;
}
} else if (dc->bus_type != NULL) {
bus = qbus_find_recursive(sysbus_get_default(), NULL, dc->bus_type);
if (!bus) {
- qerror_report(QERR_NO_BUS_FOR_DEVICE,
+ qerror_report(ERROR_CLASS_GENERIC_ERROR,
+ "No '%s' bus found for device '%s'",
dc->bus_type, driver);
return NULL;
}
@@ -522,7 +526,7 @@ DeviceState *qdev_device_add(QemuOpts *opts)
return NULL;
}
- /* create device, set properties */
+ /* create device */
dev = DEVICE(object_new(driver));
if (bus) {
@@ -533,11 +537,7 @@ DeviceState *qdev_device_add(QemuOpts *opts)
if (id) {
dev->id = id;
}
- if (qemu_opt_foreach(opts, set_property, dev, 1) != 0) {
- object_unparent(OBJECT(dev));
- object_unref(OBJECT(dev));
- return NULL;
- }
+
if (dev->id) {
object_property_add_child(qdev_get_peripheral(), dev->id,
OBJECT(dev), NULL);
@@ -549,6 +549,13 @@ DeviceState *qdev_device_add(QemuOpts *opts)
g_free(name);
}
+ /* set properties */
+ if (qemu_opt_foreach(opts, set_property, dev, 1) != 0) {
+ object_unparent(OBJECT(dev));
+ object_unref(OBJECT(dev));
+ return NULL;
+ }
+
dev->opts = opts;
object_property_set_bool(OBJECT(dev), true, "realized", &err);
if (err != NULL) {
diff --git a/qemu-char.c b/qemu-char.c
index 4d50838b3b..54ed244542 100644
--- a/qemu-char.c
+++ b/qemu-char.c
@@ -213,7 +213,7 @@ void qemu_chr_add_handlers(CharDriverState *s,
s->chr_read = fd_read;
s->chr_event = fd_event;
s->handler_opaque = opaque;
- if (s->chr_update_read_handler)
+ if (fe_open && s->chr_update_read_handler)
s->chr_update_read_handler(s);
if (!s->explicit_fe_open) {
@@ -1136,13 +1136,14 @@ static void pty_chr_state(CharDriverState *chr, int connected)
if (!s->connected) {
s->connected = 1;
qemu_chr_be_generic_open(chr);
+ }
+ if (!chr->fd_in_tag) {
chr->fd_in_tag = io_add_watch_poll(s->fd, pty_chr_read_poll,
pty_chr_read, chr);
}
}
}
-
static void pty_chr_close(struct CharDriverState *chr)
{
PtyCharDriver *s = chr->opaque;
@@ -2509,6 +2510,17 @@ static void tcp_chr_connect(void *opaque)
qemu_chr_be_generic_open(chr);
}
+static void tcp_chr_update_read_handler(CharDriverState *chr)
+{
+ TCPCharDriver *s = chr->opaque;
+
+ remove_fd_in_watch(chr);
+ if (s->chan) {
+ chr->fd_in_tag = io_add_watch_poll(s->chan, tcp_chr_read_poll,
+ tcp_chr_read, chr);
+ }
+}
+
#define IACSET(x,a,b,c) x[0] = a; x[1] = b; x[2] = c;
static void tcp_chr_telnet_init(int fd)
{
@@ -2664,6 +2676,7 @@ static CharDriverState *qemu_chr_open_socket_fd(int fd, bool do_nodelay,
chr->get_msgfd = tcp_get_msgfd;
chr->chr_add_client = tcp_chr_add_client;
chr->chr_add_watch = tcp_chr_add_watch;
+ chr->chr_update_read_handler = tcp_chr_update_read_handler;
/* be isn't opened until we get a connection */
chr->explicit_be_open = true;
diff --git a/qemu-doc.texi b/qemu-doc.texi
index ad31f2d2d0..88ec9bb133 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -823,7 +823,7 @@ In this case, the block device must be exported using qemu-nbd:
qemu-nbd --socket=/tmp/my_socket my_disk.qcow2
@end example
-The use of qemu-nbd allows to share a disk between several guests:
+The use of qemu-nbd allows sharing of a disk between several guests:
@example
qemu-nbd --socket=/tmp/my_socket --share=2 my_disk.qcow2
@end example
@@ -1938,7 +1938,7 @@ The following options are specific to the PowerPC emulation:
@item -g @var{W}x@var{H}[x@var{DEPTH}]
-Set the initial VGA graphic mode. The default is 800x600x15.
+Set the initial VGA graphic mode. The default is 800x600x32.
@item -prom-env @var{string}
@@ -1996,7 +1996,7 @@ QEMU emulates the following sun4m peripherals:
@item
IOMMU
@item
-TCX Frame buffer
+TCX or cgthree Frame buffer
@item
Lance (Am7990) Ethernet
@item
@@ -2023,7 +2023,7 @@ firmware implementation. The goal is to implement a 100% IEEE
A sample Linux 2.6 series kernel and ram disk image are available on
the QEMU web site. There are still issues with NetBSD and OpenBSD, but
-some kernel versions work. Please note that currently Solaris kernels
+some kernel versions work. Please note that currently older Solaris kernels
don't work probably due to interface issues between OpenBIOS and
Solaris.
@@ -2035,8 +2035,9 @@ The following options are specific to the Sparc32 emulation:
@item -g @var{W}x@var{H}x[x@var{DEPTH}]
-Set the initial TCX graphic mode. The default is 1024x768x8, currently
-the only other possible mode is 1024x768x24.
+Set the initial graphics mode. For TCX, the default is 1024x768x8 with the
+option of 1024x768x24. For cgthree, the default is 1024x768x8 with the option
+of 1152x900x8 for people who wish to use OBP.
@item -prom-env @var{string}
diff --git a/qemu-file.c b/qemu-file.c
index e5ec798e0b..8d5f45dcb0 100644
--- a/qemu-file.c
+++ b/qemu-file.c
@@ -4,6 +4,7 @@
#include "block/coroutine.h"
#include "migration/migration.h"
#include "migration/qemu-file.h"
+#include "trace.h"
#define IO_BUF_SIZE 32768
#define MAX_IOV_SIZE MIN(IOV_MAX, 64)
@@ -595,6 +596,7 @@ int qemu_fclose(QEMUFile *f)
ret = f->last_error;
}
g_free(f);
+ trace_qemu_file_fclose();
return ret;
}
diff --git a/qemu-img.c b/qemu-img.c
index 2e40cc1e69..968b4c8e83 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -57,8 +57,22 @@ static void format_print(void *opaque, const char *name)
printf(" %s", name);
}
+static void QEMU_NORETURN GCC_FMT_ATTR(1, 2) error_exit(const char *fmt, ...)
+{
+ va_list ap;
+
+ error_printf("qemu-img: ");
+
+ va_start(ap, fmt);
+ error_vprintf(fmt, ap);
+ va_end(ap);
+
+ error_printf("\nTry 'qemu-img --help' for more information\n");
+ exit(EXIT_FAILURE);
+}
+
/* Please keep in synch with qemu-img.texi */
-static void help(void)
+static void QEMU_NORETURN help(void)
{
const char *help_msg =
"qemu-img version " QEMU_VERSION ", Copyright (c) 2004-2008 Fabrice Bellard\n"
@@ -129,7 +143,7 @@ static void help(void)
printf("%s\nSupported formats:", help_msg);
bdrv_iterate_format(format_print, NULL);
printf("\n");
- exit(1);
+ exit(EXIT_SUCCESS);
}
static int GCC_FMT_ATTR(2, 3) qprintf(bool quiet, const char *fmt, ...)
@@ -262,7 +276,8 @@ static int print_block_option_help(const char *filename, const char *fmt)
return 0;
}
-static BlockDriverState *bdrv_new_open(const char *filename,
+static BlockDriverState *bdrv_new_open(const char *id,
+ const char *filename,
const char *fmt,
int flags,
bool require_io,
@@ -274,7 +289,7 @@ static BlockDriverState *bdrv_new_open(const char *filename,
Error *local_err = NULL;
int ret;
- bs = bdrv_new("image");
+ bs = bdrv_new(id, &error_abort);
if (fmt) {
drv = bdrv_find_format(fmt);
@@ -398,7 +413,7 @@ static int img_create(int argc, char **argv)
}
if (optind >= argc) {
- help();
+ error_exit("Expecting image file name");
}
optind++;
@@ -421,7 +436,7 @@ static int img_create(int argc, char **argv)
img_size = (uint64_t)sval;
}
if (optind != argc) {
- help();
+ error_exit("Unexpected argument: %s", argv[optind]);
}
bdrv_img_create(filename, fmt, base_filename, base_fmt,
@@ -442,12 +457,12 @@ fail:
static void dump_json_image_check(ImageCheck *check, bool quiet)
{
- Error *errp = NULL;
+ Error *local_err = NULL;
QString *str;
QmpOutputVisitor *ov = qmp_output_visitor_new();
QObject *obj;
visit_type_ImageCheck(qmp_output_get_visitor(ov),
- &check, NULL, &errp);
+ &check, NULL, &local_err);
obj = qmp_output_get_qobject(ov);
str = qobject_to_json_pretty(obj);
assert(str != NULL);
@@ -565,7 +580,7 @@ static int img_check(int argc, char **argv)
static const struct option long_options[] = {
{"help", no_argument, 0, 'h'},
{"format", required_argument, 0, 'f'},
- {"repair", no_argument, 0, 'r'},
+ {"repair", required_argument, 0, 'r'},
{"output", required_argument, 0, OPTION_OUTPUT},
{0, 0, 0, 0}
};
@@ -590,7 +605,8 @@ static int img_check(int argc, char **argv)
} else if (!strcmp(optarg, "all")) {
fix = BDRV_FIX_LEAKS | BDRV_FIX_ERRORS;
} else {
- help();
+ error_exit("Unknown option value for -r "
+ "(expecting 'leaks' or 'all'): %s", optarg);
}
break;
case OPTION_OUTPUT:
@@ -602,7 +618,7 @@ static int img_check(int argc, char **argv)
}
}
if (optind != argc - 1) {
- help();
+ error_exit("Expecting one image file name");
}
filename = argv[optind++];
@@ -615,7 +631,7 @@ static int img_check(int argc, char **argv)
return 1;
}
- bs = bdrv_new_open(filename, fmt, flags, true, quiet);
+ bs = bdrv_new_open("image", filename, fmt, flags, true, quiet);
if (!bs) {
return 1;
}
@@ -713,7 +729,7 @@ static int img_commit(int argc, char **argv)
}
}
if (optind != argc - 1) {
- help();
+ error_exit("Expecting one image file name");
}
filename = argv[optind++];
@@ -724,7 +740,7 @@ static int img_commit(int argc, char **argv)
return -1;
}
- bs = bdrv_new_open(filename, fmt, flags, true, quiet);
+ bs = bdrv_new_open("image", filename, fmt, flags, true, quiet);
if (!bs) {
return 1;
}
@@ -959,7 +975,7 @@ static int img_compare(int argc, char **argv)
if (optind != argc - 2) {
- help();
+ error_exit("Expecting two image file names");
}
filename1 = argv[optind++];
filename2 = argv[optind++];
@@ -967,14 +983,14 @@ static int img_compare(int argc, char **argv)
/* Initialize before goto out */
qemu_progress_init(progress, 2.0);
- bs1 = bdrv_new_open(filename1, fmt1, BDRV_O_FLAGS, true, quiet);
+ bs1 = bdrv_new_open("image 1", filename1, fmt1, BDRV_O_FLAGS, true, quiet);
if (!bs1) {
error_report("Can't open file %s", filename1);
ret = 2;
goto out3;
}
- bs2 = bdrv_new_open(filename2, fmt2, BDRV_O_FLAGS, true, quiet);
+ bs2 = bdrv_new_open("image 2", filename2, fmt2, BDRV_O_FLAGS, true, quiet);
if (!bs2) {
error_report("Can't open file %s", filename2);
ret = 2;
@@ -1275,7 +1291,7 @@ static int img_convert(int argc, char **argv)
}
if (bs_n < 1) {
- help();
+ error_exit("Must specify image file name");
}
@@ -1292,8 +1308,11 @@ static int img_convert(int argc, char **argv)
total_sectors = 0;
for (bs_i = 0; bs_i < bs_n; bs_i++) {
- bs[bs_i] = bdrv_new_open(argv[optind + bs_i], fmt, BDRV_O_FLAGS, true,
- quiet);
+ char *id = bs_n > 1 ? g_strdup_printf("source %d", bs_i)
+ : g_strdup("source");
+ bs[bs_i] = bdrv_new_open(id, argv[optind + bs_i], fmt, BDRV_O_FLAGS,
+ true, quiet);
+ g_free(id);
if (!bs[bs_i]) {
error_report("Could not open '%s'", argv[optind + bs_i]);
ret = -1;
@@ -1416,7 +1435,7 @@ static int img_convert(int argc, char **argv)
return -1;
}
- out_bs = bdrv_new_open(out_filename, out_fmt, flags, true, quiet);
+ out_bs = bdrv_new_open("target", out_filename, out_fmt, flags, true, quiet);
if (!out_bs) {
ret = -1;
goto out;
@@ -1712,12 +1731,12 @@ static void dump_snapshots(BlockDriverState *bs)
static void dump_json_image_info_list(ImageInfoList *list)
{
- Error *errp = NULL;
+ Error *local_err = NULL;
QString *str;
QmpOutputVisitor *ov = qmp_output_visitor_new();
QObject *obj;
visit_type_ImageInfoList(qmp_output_get_visitor(ov),
- &list, NULL, &errp);
+ &list, NULL, &local_err);
obj = qmp_output_get_qobject(ov);
str = qobject_to_json_pretty(obj);
assert(str != NULL);
@@ -1729,12 +1748,12 @@ static void dump_json_image_info_list(ImageInfoList *list)
static void dump_json_image_info(ImageInfo *info)
{
- Error *errp = NULL;
+ Error *local_err = NULL;
QString *str;
QmpOutputVisitor *ov = qmp_output_visitor_new();
QObject *obj;
visit_type_ImageInfo(qmp_output_get_visitor(ov),
- &info, NULL, &errp);
+ &info, NULL, &local_err);
obj = qmp_output_get_qobject(ov);
str = qobject_to_json_pretty(obj);
assert(str != NULL);
@@ -1799,8 +1818,8 @@ static ImageInfoList *collect_image_info_list(const char *filename,
}
g_hash_table_insert(filenames, (gpointer)filename, NULL);
- bs = bdrv_new_open(filename, fmt, BDRV_O_FLAGS | BDRV_O_NO_BACKING,
- false, false);
+ bs = bdrv_new_open("image", filename, fmt,
+ BDRV_O_FLAGS | BDRV_O_NO_BACKING, false, false);
if (!bs) {
goto err;
}
@@ -1809,6 +1828,7 @@ static ImageInfoList *collect_image_info_list(const char *filename,
if (err) {
error_report("%s", error_get_pretty(err));
error_free(err);
+ bdrv_unref(bs);
goto err;
}
@@ -1881,7 +1901,7 @@ static int img_info(int argc, char **argv)
}
}
if (optind != argc - 1) {
- help();
+ error_exit("Expecting one image file name");
}
filename = argv[optind++];
@@ -2045,10 +2065,10 @@ static int img_map(int argc, char **argv)
break;
}
}
- if (optind >= argc) {
- help();
+ if (optind != argc - 1) {
+ error_exit("Expecting one image file name");
}
- filename = argv[optind++];
+ filename = argv[optind];
if (output && !strcmp(output, "json")) {
output_format = OFORMAT_JSON;
@@ -2059,7 +2079,7 @@ static int img_map(int argc, char **argv)
return 1;
}
- bs = bdrv_new_open(filename, fmt, BDRV_O_FLAGS, true, false);
+ bs = bdrv_new_open("image", filename, fmt, BDRV_O_FLAGS, true, false);
if (!bs) {
return 1;
}
@@ -2137,7 +2157,7 @@ static int img_snapshot(int argc, char **argv)
return 0;
case 'l':
if (action) {
- help();
+ error_exit("Cannot mix '-l', '-a', '-c', '-d'");
return 0;
}
action = SNAPSHOT_LIST;
@@ -2145,7 +2165,7 @@ static int img_snapshot(int argc, char **argv)
break;
case 'a':
if (action) {
- help();
+ error_exit("Cannot mix '-l', '-a', '-c', '-d'");
return 0;
}
action = SNAPSHOT_APPLY;
@@ -2153,7 +2173,7 @@ static int img_snapshot(int argc, char **argv)
break;
case 'c':
if (action) {
- help();
+ error_exit("Cannot mix '-l', '-a', '-c', '-d'");
return 0;
}
action = SNAPSHOT_CREATE;
@@ -2161,7 +2181,7 @@ static int img_snapshot(int argc, char **argv)
break;
case 'd':
if (action) {
- help();
+ error_exit("Cannot mix '-l', '-a', '-c', '-d'");
return 0;
}
action = SNAPSHOT_DELETE;
@@ -2174,12 +2194,12 @@ static int img_snapshot(int argc, char **argv)
}
if (optind != argc - 1) {
- help();
+ error_exit("Expecting one image file name");
}
filename = argv[optind++];
/* Open the image */
- bs = bdrv_new_open(filename, NULL, bdrv_oflags, true, quiet);
+ bs = bdrv_new_open("image", filename, NULL, bdrv_oflags, true, quiet);
if (!bs) {
return 1;
}
@@ -2287,8 +2307,11 @@ static int img_rebase(int argc, char **argv)
progress = 0;
}
- if ((optind != argc - 1) || (!unsafe && !out_baseimg)) {
- help();
+ if (optind != argc - 1) {
+ error_exit("Expecting one image file name");
+ }
+ if (!unsafe && !out_baseimg) {
+ error_exit("Must specify backing file (-b) or use unsafe mode (-u)");
}
filename = argv[optind++];
@@ -2308,7 +2331,7 @@ static int img_rebase(int argc, char **argv)
* Ignore the old backing file for unsafe rebase in case we want to correct
* the reference to a renamed or moved backing file.
*/
- bs = bdrv_new_open(filename, fmt, flags, true, quiet);
+ bs = bdrv_new_open("image", filename, fmt, flags, true, quiet);
if (!bs) {
return 1;
}
@@ -2343,7 +2366,7 @@ static int img_rebase(int argc, char **argv)
} else {
char backing_name[1024];
- bs_old_backing = bdrv_new("old_backing");
+ bs_old_backing = bdrv_new("old_backing", &error_abort);
bdrv_get_backing_filename(bs, backing_name, sizeof(backing_name));
ret = bdrv_open(&bs_old_backing, backing_name, NULL, NULL, BDRV_O_FLAGS,
old_backing_drv, &local_err);
@@ -2354,7 +2377,7 @@ static int img_rebase(int argc, char **argv)
goto out;
}
if (out_baseimg[0]) {
- bs_new_backing = bdrv_new("new_backing");
+ bs_new_backing = bdrv_new("new_backing", &error_abort);
ret = bdrv_open(&bs_new_backing, out_baseimg, NULL, NULL,
BDRV_O_FLAGS, new_backing_drv, &local_err);
if (ret) {
@@ -2548,7 +2571,7 @@ static int img_resize(int argc, char **argv)
/* Remove size from argv manually so that negative numbers are not treated
* as options by getopt. */
if (argc < 3) {
- help();
+ error_exit("Not enough arguments");
return 1;
}
@@ -2575,7 +2598,7 @@ static int img_resize(int argc, char **argv)
}
}
if (optind != argc - 1) {
- help();
+ error_exit("Expecting one image file name");
}
filename = argv[optind++];
@@ -2605,7 +2628,8 @@ static int img_resize(int argc, char **argv)
n = qemu_opt_get_size(param, BLOCK_OPT_SIZE, 0);
qemu_opts_del(param);
- bs = bdrv_new_open(filename, fmt, BDRV_O_FLAGS | BDRV_O_RDWR, true, quiet);
+ bs = bdrv_new_open("image", filename, fmt, BDRV_O_FLAGS | BDRV_O_RDWR,
+ true, quiet);
if (!bs) {
ret = -1;
goto out;
@@ -2691,7 +2715,7 @@ static int img_amend(int argc, char **argv)
}
if (!options) {
- help();
+ error_exit("Must specify options (-o)");
}
filename = (optind == argc - 1) ? argv[argc - 1] : NULL;
@@ -2703,10 +2727,11 @@ static int img_amend(int argc, char **argv)
}
if (optind != argc - 1) {
- help();
+ error_exit("Expecting one image file name");
}
- bs = bdrv_new_open(filename, fmt, BDRV_O_FLAGS | BDRV_O_RDWR, true, quiet);
+ bs = bdrv_new_open("image", filename, fmt,
+ BDRV_O_FLAGS | BDRV_O_RDWR, true, quiet);
if (!bs) {
error_report("Could not open image '%s'", filename);
ret = -1;
@@ -2774,8 +2799,9 @@ int main(int argc, char **argv)
qemu_init_main_loop();
bdrv_init();
- if (argc < 2)
- help();
+ if (argc < 2) {
+ error_exit("Not enough arguments");
+ }
cmdname = argv[1];
argc--; argv++;
@@ -2787,6 +2813,5 @@ int main(int argc, char **argv)
}
/* not found */
- help();
- return 0;
+ error_exit("Command not found: %s", cmdname);
}
diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c
index f1de24c91c..60c1cebffc 100644
--- a/qemu-io-cmds.c
+++ b/qemu-io-cmds.c
@@ -16,7 +16,7 @@
#define CMD_NOFILE_OK 0x01
-int qemuio_misalign;
+bool qemuio_misalign;
static cmdinfo_t *cmdtab;
static int ncmds;
@@ -1087,7 +1087,7 @@ writev_help(void)
" writes a range of bytes from the given offset source from multiple buffers\n"
"\n"
" Example:\n"
-" 'write 512 1k 1k' - writes 2 kilobytes at 512 bytes into the open file\n"
+" 'writev 512 1k 1k' - writes 2 kilobytes at 512 bytes into the open file\n"
"\n"
" Writes into a segment of the currently open file, using a buffer\n"
" filled with a set pattern (0xcdcdcdcd).\n"
diff --git a/qemu-io.c b/qemu-io.c
index fc3860884c..9fcd72bb10 100644
--- a/qemu-io.c
+++ b/qemu-io.c
@@ -24,10 +24,9 @@
#define CMD_NOFILE_OK 0x01
-char *progname;
+static char *progname;
-BlockDriverState *qemuio_bs;
-extern int qemuio_misalign;
+static BlockDriverState *qemuio_bs;
/* qemu-io commands passed using -c */
static int ncmdline;
@@ -68,7 +67,7 @@ static int openfile(char *name, int flags, int growable, QDict *opts)
return 1;
}
} else {
- qemuio_bs = bdrv_new("hda");
+ qemuio_bs = bdrv_new("hda", &error_abort);
if (bdrv_open(&qemuio_bs, name, NULL, opts, flags, NULL, &local_err)
< 0)
@@ -194,10 +193,11 @@ static const cmdinfo_t quit_cmd = {
static void usage(const char *name)
{
printf(
-"Usage: %s [-h] [-V] [-rsnm] [-c cmd] ... [file]\n"
+"Usage: %s [-h] [-V] [-rsnm] [-c STRING] ... [file]\n"
"QEMU Disk exerciser\n"
"\n"
-" -c, --cmd command to execute\n"
+" -c, --cmd STRING execute command with its arguments\n"
+" from the given string\n"
" -r, --read-only export read-only\n"
" -s, --snapshot use snapshot file\n"
" -n, --nocache disable host cache\n"
@@ -208,8 +208,10 @@ static void usage(const char *name)
" -T, --trace FILE enable trace events listed in the given file\n"
" -h, --help display this help and exit\n"
" -V, --version output version information and exit\n"
+"\n"
+"See '%s -c help' for information on available commands."
"\n",
- name);
+ name, name);
}
static char *get_prompt(void)
@@ -408,7 +410,7 @@ int main(int argc, char **argv)
readonly = 1;
break;
case 'm':
- qemuio_misalign = 1;
+ qemuio_misalign = true;
break;
case 'g':
growable = 1;
diff --git a/qemu-nbd.c b/qemu-nbd.c
index bdac1f3f1f..eed79fa15e 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -288,19 +288,19 @@ static void *nbd_client_thread(void *arg)
ret = nbd_receive_negotiate(sock, NULL, &nbdflags,
&size, &blocksize);
if (ret < 0) {
- goto out;
+ goto out_socket;
}
fd = open(device, O_RDWR);
if (fd < 0) {
/* Linux-only, we can use %m in printf. */
fprintf(stderr, "Failed to open %s: %m", device);
- goto out;
+ goto out_socket;
}
ret = nbd_init(fd, sock, nbdflags, size, blocksize);
if (ret < 0) {
- goto out;
+ goto out_fd;
}
/* update partition table */
@@ -316,12 +316,16 @@ static void *nbd_client_thread(void *arg)
ret = nbd_client(fd);
if (ret) {
- goto out;
+ goto out_fd;
}
close(fd);
kill(getpid(), SIGTERM);
return (void *) EXIT_SUCCESS;
+out_fd:
+ close(fd);
+out_socket:
+ closesocket(sock);
out:
kill(getpid(), SIGTERM);
return (void *) EXIT_FAILURE;
@@ -355,6 +359,11 @@ static void nbd_accept(void *opaque)
socklen_t addr_len = sizeof(addr);
int fd = accept(server_fd, (struct sockaddr *)&addr, &addr_len);
+ if (fd < 0) {
+ perror("accept");
+ return;
+ }
+
if (state >= TERMINATE) {
close(fd);
return;
@@ -648,7 +657,8 @@ int main(int argc, char **argv)
drv = NULL;
}
- bs = bdrv_new("hda");
+ bs = bdrv_new("hda", &error_abort);
+
srcpath = argv[optind];
ret = bdrv_open(&bs, srcpath, NULL, NULL, flags, drv, &local_err);
if (ret < 0) {
diff --git a/qemu-options.hx b/qemu-options.hx
index 068da2df09..781af14bf5 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -210,10 +210,13 @@ use is discouraged as it may be removed from future versions.
ETEXI
DEF("m", HAS_ARG, QEMU_OPTION_m,
- "-m megs set virtual RAM size to megs MB [default="
- stringify(DEFAULT_RAM_SIZE) "]\n", QEMU_ARCH_ALL)
+ "-m [size=]megs\n"
+ " configure guest RAM\n"
+ " size: initial amount of guest memory (default: "
+ stringify(DEFAULT_RAM_SIZE) "MiB)\n",
+ QEMU_ARCH_ALL)
STEXI
-@item -m @var{megs}
+@item -m [size=]@var{megs}
@findex -m
Set virtual RAM size to @var{megs} megabytes. Default is 128 MiB. Optionally,
a suffix of ``M'' or ``G'' can be used to signify a value in megabytes or
@@ -408,7 +411,8 @@ DEF("drive", HAS_ARG, QEMU_OPTION_drive,
"-drive [file=file][,if=type][,bus=n][,unit=m][,media=d][,index=i]\n"
" [,cyls=c,heads=h,secs=s[,trans=t]][,snapshot=on|off]\n"
" [,cache=writethrough|writeback|none|directsync|unsafe][,format=f]\n"
- " [,serial=s][,addr=A][,id=name][,aio=threads|native]\n"
+ " [,serial=s][,addr=A][,rerror=ignore|stop|report]\n"
+ " [,werror=ignore|stop|report|enospc][,id=name][,aio=threads|native]\n"
" [,readonly=on|off][,copy-on-read=on|off]\n"
" [[,bps=b]|[[,bps_rd=r][,bps_wr=w]]]\n"
" [[,iops=i]|[[,iops_rd=r][,iops_wr=w]]]\n"
@@ -444,7 +448,8 @@ This option defines the type of the media: disk or cdrom.
@item cyls=@var{c},heads=@var{h},secs=@var{s}[,trans=@var{t}]
These options have the same definition as they have in @option{-hdachs}.
@item snapshot=@var{snapshot}
-@var{snapshot} is "on" or "off" and allows to enable snapshot for given drive (see @option{-snapshot}).
+@var{snapshot} is "on" or "off" and controls snapshot mode for the given drive
+(see @option{-snapshot}).
@item cache=@var{cache}
@var{cache} is "none", "writeback", "unsafe", "directsync" or "writethrough" and controls how the host cache is used to access block data.
@item aio=@var{aio}
@@ -810,6 +815,7 @@ ETEXI
DEF("display", HAS_ARG, QEMU_OPTION_display,
"-display sdl[,frame=on|off][,alt_grab=on|off][,ctrl_grab=on|off]\n"
" [,window_close=on|off]|curses|none|\n"
+ " gtk[,grab_on_hover=on|off]|\n"
" vnc=<display>[,<optargs>]\n"
" select display type\n", QEMU_ARCH_ALL)
STEXI
@@ -833,6 +839,10 @@ graphics card, but its output will not be displayed to the QEMU
user. This option differs from the -nographic option in that it
only affects what is done with video output; -nographic also changes
the destination of the serial and parallel port data.
+@item gtk
+Display video output in a GTK window. This interface provides drop-down
+menus and other UI elements to configure and control the VM during
+runtime.
@item vnc
Start a VNC server on display <arg>
@end table
@@ -1038,7 +1048,7 @@ Rotate graphical output some deg left (only PXA LCD).
ETEXI
DEF("vga", HAS_ARG, QEMU_OPTION_vga,
- "-vga [std|cirrus|vmware|qxl|xenfb|none]\n"
+ "-vga [std|cirrus|vmware|qxl|xenfb|tcx|cg3|none]\n"
" select video card type\n", QEMU_ARCH_ALL)
STEXI
@item -vga @var{type}
@@ -1063,6 +1073,14 @@ card.
QXL paravirtual graphic card. It is VGA compatible (including VESA
2.0 VBE support). Works best with qxl guest drivers installed though.
Recommended choice when using the spice protocol.
+@item tcx
+(sun4m only) Sun TCX framebuffer. This is the default framebuffer for
+sun4m machines and offers both 8-bit and 24-bit colour depths at a
+fixed resolution of 1024x768.
+@item cg3
+(sun4m only) Sun cgthree framebuffer. This is a simple 8-bit framebuffer
+for sun4m machines available in both 1024x768 (OpenBIOS) and 1152x900 (OBP)
+resolutions aimed at people wishing to run older Solaris versions.
@item none
Disable VGA card.
@end table
@@ -1229,7 +1247,7 @@ Disable adaptive encodings. Adaptive encodings are enabled by default.
An adaptive encoding will try to detect frequently updated screen regions,
and send updates in these regions using a lossy encoding (like JPEG).
This can be really helpful to save bandwidth when playing videos. Disabling
-adaptive encodings allows to restore the original static behavior of encodings
+adaptive encodings restores the original static behavior of encodings
like Tight.
@item share=[allow-exclusive|force-shared|ignore]
@@ -2792,7 +2810,7 @@ UTC or local time, respectively. @code{localtime} is required for correct date i
MS-DOS or Windows. To start at a specific point in time, provide @var{date} in the
format @code{2006-06-17T16:01:21} or @code{2006-06-17}. The default base is UTC.
-By default the RTC is driven by the host system time. This allows to use the
+By default the RTC is driven by the host system time. This allows using of the
RTC as accurate reference clock inside the guest, specifically if the host
time is smoothly following an accurate external reference clock, e.g. via NTP.
If you want to isolate the guest time from the host, you can set @option{clock}
diff --git a/qemu.sasl b/qemu.sasl
index 9dc8323f1d..64fdef3d5b 100644
--- a/qemu.sasl
+++ b/qemu.sasl
@@ -22,7 +22,9 @@ mech_list: digest-md5
# Some older builds of MIT kerberos on Linux ignore this option &
# instead need KRB5_KTNAME env var.
# For modern Linux, and other OS, this should be sufficient
-keytab: /etc/qemu/krb5.tab
+#
+# There is no default value here, uncomment if you need this
+#keytab: /etc/qemu/krb5.tab
# If using digest-md5 for username/passwds, then this is the file
# containing the passwds. Use 'saslpasswd2 -a qemu [username]'
diff --git a/qga/commands-posix.c b/qga/commands-posix.c
index 6b5f11f83f..935a4ec14d 100644
--- a/qga/commands-posix.c
+++ b/qga/commands-posix.c
@@ -171,7 +171,7 @@ void qmp_guest_set_time(bool has_time, int64_t time_ns, Error **errp)
/* Now, if user has passed a time to set and the system time is set, we
* just need to synchronize the hardware clock. However, if no time was
* passed, user is requesting the opposite: set the system time from the
- * hardware clock. */
+ * hardware clock (RTC). */
pid = fork();
if (pid == 0) {
setsid();
diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json
index 80edca143a..a8cdcb35c4 100644
--- a/qga/qapi-schema.json
+++ b/qga/qapi-schema.json
@@ -96,8 +96,8 @@
##
# @guest-get-time:
#
-# Get the information about guest time relative to the Epoch
-# of 1970-01-01 in UTC.
+# Get the information about guest's System Time relative to
+# the Epoch of 1970-01-01 in UTC.
#
# Returns: Time in nanoseconds.
#
@@ -117,11 +117,11 @@
# gap was, NTP might not be able to resynchronize the
# guest.
#
-# This command tries to set guest time to the given value,
-# then sets the Hardware Clock to the current System Time.
-# This will make it easier for a guest to resynchronize
-# without waiting for NTP. If no @time is specified, then
-# the time to set is read from RTC.
+# This command tries to set guest's System Time to the
+# given value, then sets the Hardware Clock (RTC) to the
+# current System Time. This will make it easier for a guest
+# to resynchronize without waiting for NTP. If no @time is
+# specified, then the time to set is read from RTC.
#
# @time: #optional time of nanoseconds, relative to the Epoch
# of 1970-01-01 in UTC.
diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp
index b791a6c33b..b0e4426c72 100644
--- a/qga/vss-win32/install.cpp
+++ b/qga/vss-win32/install.cpp
@@ -75,10 +75,13 @@ static void errmsg_dialog(DWORD err, const char *text, const char *opt = "")
#define chk(status) _chk(hr, status, "Failed to " #status, out)
+#if !defined(__MINGW64_VERSION_MAJOR) || !defined(__MINGW64_VERSION_MINOR) || \
+ __MINGW64_VERSION_MAJOR * 100 + __MINGW64_VERSION_MINOR < 301
void __stdcall _com_issue_error(HRESULT hr)
{
errmsg(hr, "Unexpected error in COM");
}
+#endif
template<class T>
HRESULT put_Value(ICatalogObject *pObj, LPCWSTR name, T val)
diff --git a/qmp-commands.hx b/qmp-commands.hx
index d982cd62b9..ed3ab9225b 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -2327,6 +2327,45 @@ EQMP
},
SQMP
+query-iothreads
+---------------
+
+Returns a list of information about each iothread.
+
+Note this list excludes the QEMU main loop thread, which is not declared
+using the -object iothread command-line option. It is always the main thread
+of the process.
+
+Return a json-array. Each iothread is represented by a json-object, which contains:
+
+- "id": name of iothread (json-str)
+- "thread-id": ID of the underlying host thread (json-int)
+
+Example:
+
+-> { "execute": "query-iothreads" }
+<- {
+ "return":[
+ {
+ "id":"iothread0",
+ "thread-id":3134
+ },
+ {
+ "id":"iothread1",
+ "thread-id":3135
+ }
+ ]
+ }
+
+EQMP
+
+ {
+ .name = "query-iothreads",
+ .args_type = "",
+ .mhandler.cmd_new = qmp_marshal_input_query_iothreads,
+ },
+
+SQMP
query-pci
---------
@@ -3368,6 +3407,7 @@ Each array entry contains the following:
- "promiscuous": promiscuous mode is enabled (json-bool)
- "multicast": multicast receive state (one of 'normal', 'none', 'all')
- "unicast": unicast receive state (one of 'normal', 'none', 'all')
+- "vlan": vlan receive state (one of 'normal', 'none', 'all') (Since 2.0)
- "broadcast-allowed": allow to receive broadcast (json-bool)
- "multicast-overflow": multicast table is overflowed (json-bool)
- "unicast-overflow": unicast table is overflowed (json-bool)
@@ -3385,6 +3425,7 @@ Example:
"name": "vnet0",
"main-mac": "52:54:00:12:34:56",
"unicast": "normal",
+ "vlan": "normal",
"vlan-table": [
4,
0
diff --git a/qmp.c b/qmp.c
index f556a04d19..74107be41b 100644
--- a/qmp.c
+++ b/qmp.c
@@ -114,8 +114,11 @@ void qmp_cpu(int64_t index, Error **errp)
void qmp_cpu_add(int64_t id, Error **errp)
{
- if (current_machine->hot_add_cpu) {
- current_machine->hot_add_cpu(id, errp);
+ MachineClass *mc;
+
+ mc = MACHINE_GET_CLASS(current_machine);
+ if (mc->qemu_machine->hot_add_cpu) {
+ mc->qemu_machine->hot_add_cpu(id, errp);
} else {
error_setg(errp, "Not supported");
}
@@ -163,7 +166,7 @@ void qmp_cont(Error **errp)
Error *local_err = NULL;
if (runstate_needs_reset()) {
- error_set(errp, QERR_RESET_REQUIRED);
+ error_setg(errp, "Resetting the Virtual Machine is required");
return;
} else if (runstate_check(RUN_STATE_SUSPENDED)) {
return;
@@ -537,14 +540,27 @@ void object_add(const char *type, const char *id, const QDict *qdict,
Visitor *v, Error **errp)
{
Object *obj;
+ ObjectClass *klass;
const QDictEntry *e;
Error *local_err = NULL;
- if (!object_class_by_name(type)) {
+ klass = object_class_by_name(type);
+ if (!klass) {
error_setg(errp, "invalid class name");
return;
}
+ if (!object_class_dynamic_cast(klass, TYPE_USER_CREATABLE)) {
+ error_setg(errp, "object type '%s' isn't supported by object-add",
+ type);
+ return;
+ }
+
+ if (object_class_is_abstract(klass)) {
+ error_setg(errp, "object type '%s' is abstract", type);
+ return;
+ }
+
obj = object_new(type);
if (qdict) {
for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) {
@@ -555,12 +571,6 @@ void object_add(const char *type, const char *id, const QDict *qdict,
}
}
- if (!object_dynamic_cast(obj, TYPE_USER_CREATABLE)) {
- error_setg(&local_err, "object type '%s' isn't supported by object-add",
- type);
- goto out;
- }
-
user_creatable_complete(obj, &local_err);
if (local_err) {
goto out;
diff --git a/qobject/json-parser.c b/qobject/json-parser.c
index e7947b340c..e46c26448e 100644
--- a/qobject/json-parser.c
+++ b/qobject/json-parser.c
@@ -110,7 +110,7 @@ static void GCC_FMT_ATTR(3, 4) parse_error(JSONParserContext *ctxt,
error_free(ctxt->err);
ctxt->err = NULL;
}
- error_set(&ctxt->err, QERR_JSON_PARSE_ERROR, message);
+ error_setg(&ctxt->err, "JSON parse error, %s", message);
}
/**
diff --git a/qom/cpu.c b/qom/cpu.c
index 9d62479546..fada2d4b92 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -1,7 +1,7 @@
/*
* QEMU CPU model
*
- * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -23,6 +23,7 @@
#include "sysemu/kvm.h"
#include "qemu/notify.h"
#include "qemu/log.h"
+#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
bool cpu_exists(int64_t id)
@@ -39,6 +40,46 @@ bool cpu_exists(int64_t id)
return false;
}
+CPUState *cpu_generic_init(const char *typename, const char *cpu_model)
+{
+ char *str, *name, *featurestr;
+ CPUState *cpu;
+ ObjectClass *oc;
+ CPUClass *cc;
+ Error *err = NULL;
+
+ str = g_strdup(cpu_model);
+ name = strtok(str, ",");
+
+ oc = cpu_class_by_name(typename, name);
+ if (oc == NULL) {
+ g_free(str);
+ return NULL;
+ }
+
+ cpu = CPU(object_new(object_class_get_name(oc)));
+ cc = CPU_GET_CLASS(cpu);
+
+ featurestr = strtok(NULL, ",");
+ cc->parse_features(cpu, featurestr, &err);
+ g_free(str);
+ if (err != NULL) {
+ goto out;
+ }
+
+ object_property_set_bool(OBJECT(cpu), true, "realized", &err);
+
+out:
+ if (err != NULL) {
+ error_report("%s", error_get_pretty(err));
+ error_free(err);
+ object_unref(OBJECT(cpu));
+ return NULL;
+ }
+
+ return cpu;
+}
+
bool cpu_paging_enabled(const CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
@@ -195,10 +236,20 @@ static void cpu_common_reset(CPUState *cpu)
log_cpu_state(cpu, cc->reset_dump_flags);
}
- cpu->exit_request = 0;
cpu->interrupt_request = 0;
cpu->current_tb = NULL;
cpu->halted = 0;
+ cpu->mem_io_pc = 0;
+ cpu->mem_io_vaddr = 0;
+ cpu->icount_extra = 0;
+ cpu->icount_decr.u32 = 0;
+ cpu->can_do_io = 0;
+ memset(cpu->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
+}
+
+static bool cpu_common_has_work(CPUState *cs)
+{
+ return false;
}
ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
@@ -213,6 +264,34 @@ static ObjectClass *cpu_common_class_by_name(const char *cpu_model)
return NULL;
}
+static void cpu_common_parse_features(CPUState *cpu, char *features,
+ Error **errp)
+{
+ char *featurestr; /* Single "key=value" string being parsed */
+ char *val;
+ Error *err = NULL;
+
+ featurestr = features ? strtok(features, ",") : NULL;
+
+ while (featurestr) {
+ val = strchr(featurestr, '=');
+ if (val) {
+ *val = 0;
+ val++;
+ object_property_parse(OBJECT(cpu), val, featurestr, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ } else {
+ error_setg(errp, "Expected key=value format, found %s.",
+ featurestr);
+ return;
+ }
+ featurestr = strtok(NULL, ",");
+ }
+}
+
static void cpu_common_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cpu = CPU(dev);
@@ -243,8 +322,10 @@ static void cpu_class_init(ObjectClass *klass, void *data)
CPUClass *k = CPU_CLASS(klass);
k->class_by_name = cpu_common_class_by_name;
+ k->parse_features = cpu_common_parse_features;
k->reset = cpu_common_reset;
k->get_arch_id = cpu_common_get_arch_id;
+ k->has_work = cpu_common_has_work;
k->get_paging_enabled = cpu_common_get_paging_enabled;
k->get_memory_mapping = cpu_common_get_memory_mapping;
k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;
diff --git a/qom/object.c b/qom/object.c
index 660859c0e7..e42b254303 100644
--- a/qom/object.c
+++ b/qom/object.c
@@ -768,7 +768,7 @@ ObjectProperty *object_property_find(Object *obj, const char *name,
}
}
- error_set(errp, QERR_PROPERTY_NOT_FOUND, "", name);
+ error_setg(errp, "Property '.%s' not found", name);
return NULL;
}
@@ -1023,10 +1023,23 @@ out:
g_free(type);
}
+void object_property_allow_set_link(Object *obj, const char *name,
+ Object *val, Error **errp)
+{
+ /* Allow the link to be set, always */
+}
+
+typedef struct {
+ Object **child;
+ void (*check)(Object *, const char *, Object *, Error **);
+ ObjectPropertyLinkFlags flags;
+} LinkProperty;
+
static void object_get_link_property(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
- Object **child = opaque;
+ LinkProperty *lprop = opaque;
+ Object **child = lprop->child;
gchar *path;
if (*child) {
@@ -1039,102 +1052,167 @@ static void object_get_link_property(Object *obj, Visitor *v, void *opaque,
}
}
-static void object_set_link_property(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
+/*
+ * object_resolve_link:
+ *
+ * Lookup an object and ensure its type matches the link property type. This
+ * is similar to object_resolve_path() except type verification against the
+ * link property is performed.
+ *
+ * Returns: The matched object or NULL on path lookup failures.
+ */
+static Object *object_resolve_link(Object *obj, const char *name,
+ const char *path, Error **errp)
{
- Object **child = opaque;
- Object *old_target;
- bool ambiguous = false;
const char *type;
- char *path;
gchar *target_type;
+ bool ambiguous = false;
+ Object *target;
+ /* Go from link<FOO> to FOO. */
type = object_property_get_type(obj, name, NULL);
+ target_type = g_strndup(&type[5], strlen(type) - 6);
+ target = object_resolve_path_type(path, target_type, &ambiguous);
+
+ if (ambiguous) {
+ error_set(errp, ERROR_CLASS_GENERIC_ERROR,
+ "Path '%s' does not uniquely identify an object", path);
+ } else if (!target) {
+ target = object_resolve_path(path, &ambiguous);
+ if (target || ambiguous) {
+ error_set(errp, QERR_INVALID_PARAMETER_TYPE, name, target_type);
+ } else {
+ error_set(errp, QERR_DEVICE_NOT_FOUND, path);
+ }
+ target = NULL;
+ }
+ g_free(target_type);
- visit_type_str(v, &path, name, errp);
-
- old_target = *child;
- *child = NULL;
+ return target;
+}
- if (strcmp(path, "") != 0) {
- Object *target;
+static void object_set_link_property(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ Error *local_err = NULL;
+ LinkProperty *prop = opaque;
+ Object **child = prop->child;
+ Object *old_target = *child;
+ Object *new_target = NULL;
+ char *path = NULL;
- /* Go from link<FOO> to FOO. */
- target_type = g_strndup(&type[5], strlen(type) - 6);
- target = object_resolve_path_type(path, target_type, &ambiguous);
+ visit_type_str(v, &path, name, &local_err);
- if (ambiguous) {
- error_set(errp, QERR_AMBIGUOUS_PATH, path);
- } else if (target) {
- object_ref(target);
- *child = target;
- } else {
- target = object_resolve_path(path, &ambiguous);
- if (target || ambiguous) {
- error_set(errp, QERR_INVALID_PARAMETER_TYPE, name, target_type);
- } else {
- error_set(errp, QERR_DEVICE_NOT_FOUND, path);
- }
- }
- g_free(target_type);
+ if (!local_err && strcmp(path, "") != 0) {
+ new_target = object_resolve_link(obj, name, path, &local_err);
}
g_free(path);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ prop->check(obj, name, new_target, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (new_target) {
+ object_ref(new_target);
+ }
+ *child = new_target;
if (old_target != NULL) {
object_unref(old_target);
}
}
+static void object_release_link_property(Object *obj, const char *name,
+ void *opaque)
+{
+ LinkProperty *prop = opaque;
+
+ if ((prop->flags & OBJ_PROP_LINK_UNREF_ON_RELEASE) && *prop->child) {
+ object_unref(*prop->child);
+ }
+ g_free(prop);
+}
+
void object_property_add_link(Object *obj, const char *name,
const char *type, Object **child,
+ void (*check)(Object *, const char *,
+ Object *, Error **),
+ ObjectPropertyLinkFlags flags,
Error **errp)
{
+ Error *local_err = NULL;
+ LinkProperty *prop = g_malloc(sizeof(*prop));
gchar *full_type;
+ prop->child = child;
+ prop->check = check;
+ prop->flags = flags;
+
full_type = g_strdup_printf("link<%s>", type);
object_property_add(obj, name, full_type,
object_get_link_property,
- object_set_link_property,
- NULL, child, errp);
+ check ? object_set_link_property : NULL,
+ object_release_link_property,
+ prop,
+ &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ g_free(prop);
+ }
g_free(full_type);
}
+gchar *object_get_canonical_path_component(Object *obj)
+{
+ ObjectProperty *prop = NULL;
+
+ g_assert(obj);
+ g_assert(obj->parent != NULL);
+
+ QTAILQ_FOREACH(prop, &obj->parent->properties, node) {
+ if (!object_property_is_child(prop)) {
+ continue;
+ }
+
+ if (prop->opaque == obj) {
+ return g_strdup(prop->name);
+ }
+ }
+
+ /* obj had a parent but was not a child, should never happen */
+ g_assert_not_reached();
+ return NULL;
+}
+
gchar *object_get_canonical_path(Object *obj)
{
Object *root = object_get_root();
- char *newpath = NULL, *path = NULL;
+ char *newpath, *path = NULL;
while (obj != root) {
- ObjectProperty *prop = NULL;
-
- g_assert(obj->parent != NULL);
-
- QTAILQ_FOREACH(prop, &obj->parent->properties, node) {
- if (!object_property_is_child(prop)) {
- continue;
- }
+ char *component = object_get_canonical_path_component(obj);
- if (prop->opaque == obj) {
- if (path) {
- newpath = g_strdup_printf("%s/%s", prop->name, path);
- g_free(path);
- path = newpath;
- } else {
- path = g_strdup(prop->name);
- }
- break;
- }
+ if (path) {
+ newpath = g_strdup_printf("%s/%s", component, path);
+ g_free(component);
+ g_free(path);
+ path = newpath;
+ } else {
+ path = component;
}
- g_assert(prop != NULL);
-
obj = obj->parent;
}
- newpath = g_strdup_printf("/%s", path);
+ newpath = g_strdup_printf("/%s", path ? path : "");
g_free(path);
return newpath;
@@ -1148,7 +1226,8 @@ Object *object_resolve_path_component(Object *parent, const gchar *part)
}
if (object_property_is_link(prop)) {
- return *(Object **)prop->opaque;
+ LinkProperty *lprop = prop->opaque;
+ return *lprop->child;
} else if (object_property_is_child(prop)) {
return prop->opaque;
} else {
@@ -1293,6 +1372,7 @@ void object_property_add_str(Object *obj, const char *name,
void (*set)(Object *, const char *, Error **),
Error **errp)
{
+ Error *local_err = NULL;
StringProperty *prop = g_malloc0(sizeof(*prop));
prop->get = get;
@@ -1302,7 +1382,11 @@ void object_property_add_str(Object *obj, const char *name,
get ? property_get_str : NULL,
set ? property_set_str : NULL,
property_release_str,
- prop, errp);
+ prop, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ g_free(prop);
+ }
}
typedef struct BoolProperty
@@ -1349,6 +1433,7 @@ void object_property_add_bool(Object *obj, const char *name,
void (*set)(Object *, bool, Error **),
Error **errp)
{
+ Error *local_err = NULL;
BoolProperty *prop = g_malloc0(sizeof(*prop));
prop->get = get;
@@ -1358,7 +1443,11 @@ void object_property_add_bool(Object *obj, const char *name,
get ? property_get_bool : NULL,
set ? property_set_bool : NULL,
property_release_bool,
- prop, errp);
+ prop, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ g_free(prop);
+ }
}
static char *qdev_get_type(Object *obj, Error **errp)
diff --git a/roms/SLOF b/roms/SLOF
-Subproject e2e8ac901e617573ea383f9cffd136146d0675a
+Subproject c90b50b5055f976a0da3c032f26fb80157292ad
diff --git a/roms/openhackware b/roms/openhackware
-Subproject e9829b5584169ad14df2ca8776b87f4985aa9f0
+Subproject 1af7e55425e58a6dcb5133b092fcf16f8c654fb
diff --git a/rules.mak b/rules.mak
index 9dda9f760f..5c454d80d5 100644
--- a/rules.mak
+++ b/rules.mak
@@ -23,8 +23,8 @@ QEMU_DGFLAGS += -MMD -MP -MT $@ -MF $(*D)/$(*F).d
QEMU_INCLUDES += -I$(<D) -I$(@D)
maybe-add = $(filter-out $1, $2) $1
-extract-libs = $(strip $(sort $(foreach o,$1,$($o-libs)) \
- $(foreach o,$(call expand-objs,$1),$($o-libs))))
+extract-libs = $(strip $(sort $(foreach o,$1,$($o-libs))) \
+ $(foreach o,$(call expand-objs,$1),$($o-libs)))
expand-objs = $(strip $(sort $(filter %.o,$1)) \
$(foreach o,$(filter %.mo,$1),$($o-objs)) \
$(filter-out %.o %.mo,$1))
diff --git a/savevm.c b/savevm.c
index d094fbb854..da8aa2428d 100644
--- a/savevm.c
+++ b/savevm.c
@@ -81,6 +81,7 @@ static void qemu_announce_self_iter(NICState *nic, void *opaque)
uint8_t buf[60];
int len;
+ trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
len = announce_self_create(buf, nic->conf->macaddr.a);
qemu_send_packet_raw(qemu_get_queue(nic), buf, len);
@@ -429,6 +430,7 @@ void vmstate_unregister(DeviceState *dev, const VMStateDescription *vmsd,
static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
{
+ trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)");
if (!se->vmsd) { /* Old style */
return se->ops->load_state(f, se->opaque, version_id);
}
@@ -437,6 +439,7 @@ static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
static void vmstate_save(QEMUFile *f, SaveStateEntry *se)
{
+ trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)");
if (!se->vmsd) { /* Old style */
se->ops->save_state(f, se->opaque);
return;
@@ -450,7 +453,8 @@ bool qemu_savevm_state_blocked(Error **errp)
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
if (se->no_migrate) {
- error_set(errp, QERR_MIGRATION_NOT_SUPPORTED, se->idstr);
+ error_setg(errp, "State blocked by non-migratable device '%s'",
+ se->idstr);
return true;
}
}
@@ -463,6 +467,7 @@ void qemu_savevm_state_begin(QEMUFile *f,
SaveStateEntry *se;
int ret;
+ trace_savevm_state_begin();
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
if (!se->ops || !se->ops->set_params) {
continue;
@@ -515,6 +520,7 @@ int qemu_savevm_state_iterate(QEMUFile *f)
SaveStateEntry *se;
int ret = 1;
+ trace_savevm_state_iterate();
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
if (!se->ops || !se->ops->save_live_iterate) {
continue;
@@ -554,6 +560,8 @@ void qemu_savevm_state_complete(QEMUFile *f)
SaveStateEntry *se;
int ret;
+ trace_savevm_state_complete();
+
cpu_synchronize_all_states();
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
@@ -628,6 +636,7 @@ void qemu_savevm_state_cancel(void)
{
SaveStateEntry *se;
+ trace_savevm_state_cancel();
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
if (se->ops && se->ops->cancel) {
se->ops->cancel(se->opaque);
diff --git a/scripts/coverity-model.c b/scripts/coverity-model.c
new file mode 100644
index 0000000000..4c99a85cfc
--- /dev/null
+++ b/scripts/coverity-model.c
@@ -0,0 +1,183 @@
+/* Coverity Scan model
+ *
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>
+ * Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or, at your
+ * option, any later version. See the COPYING file in the top-level directory.
+ */
+
+
+/*
+ * This is the source code for our Coverity user model file. The
+ * purpose of user models is to increase scanning accuracy by explaining
+ * code Coverity can't see (out of tree libraries) or doesn't
+ * sufficiently understand. Better accuracy means both fewer false
+ * positives and more true defects. Memory leaks in particular.
+ *
+ * - A model file can't import any header files. Some built-in primitives are
+ * available but not wchar_t, NULL etc.
+ * - Modeling doesn't need full structs and typedefs. Rudimentary structs
+ * and similar types are sufficient.
+ * - An uninitialized local variable signifies that the variable could be
+ * any value.
+ *
+ * The model file must be uploaded by an admin in the analysis settings of
+ * http://scan.coverity.com/projects/378
+ */
+
+#define NULL ((void *)0)
+
+typedef unsigned char uint8_t;
+typedef char int8_t;
+typedef unsigned int uint32_t;
+typedef int int32_t;
+typedef long ssize_t;
+typedef unsigned long long uint64_t;
+typedef long long int64_t;
+typedef _Bool bool;
+
+/* exec.c */
+
+typedef struct AddressSpace AddressSpace;
+typedef uint64_t hwaddr;
+
+static void __write(uint8_t *buf, ssize_t len)
+{
+ int first, last;
+ __coverity_negative_sink__(len);
+ if (len == 0) return;
+ buf[0] = first;
+ buf[len-1] = last;
+ __coverity_writeall__(buf);
+}
+
+static void __read(uint8_t *buf, ssize_t len)
+{
+ __coverity_negative_sink__(len);
+ if (len == 0) return;
+ int first = buf[0];
+ int last = buf[len-1];
+}
+
+bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
+ int len, bool is_write)
+{
+ bool result;
+
+ // TODO: investigate impact of treating reads as producing
+ // tainted data, with __coverity_tainted_data_argument__(buf).
+ if (is_write) __write(buf, len); else __read(buf, len);
+
+ return result;
+}
+
+/* Tainting */
+
+typedef struct {} name2keysym_t;
+static int get_keysym(const name2keysym_t *table,
+ const char *name)
+{
+ int result;
+ if (result > 0) {
+ __coverity_tainted_string_sanitize_content__(name);
+ return result;
+ } else {
+ return 0;
+ }
+}
+
+/* glib memory allocation functions.
+ *
+ * Note that we ignore the fact that g_malloc of 0 bytes returns NULL,
+ * and g_realloc of 0 bytes frees the pointer.
+ *
+ * Modeling this would result in Coverity flagging a lot of memory
+ * allocations as potentially returning NULL, and asking us to check
+ * whether the result of the allocation is NULL or not. However, the
+ * resulting pointer should never be dereferenced anyway, and in fact
+ * it is not in the vast majority of cases.
+ *
+ * If a dereference did happen, this would suppress a defect report
+ * for an actual null pointer dereference. But it's too unlikely to
+ * be worth wading through the false positives, and with some luck
+ * we'll get a buffer overflow reported anyway.
+ */
+
+void *malloc(size_t);
+void *calloc(size_t, size_t);
+void *realloc(void *, size_t);
+void free(void *);
+
+void *
+g_malloc(size_t n_bytes)
+{
+ void *mem;
+ __coverity_negative_sink__(n_bytes);
+ mem = malloc(n_bytes == 0 ? 1 : n_bytes);
+ if (!mem) __coverity_panic__();
+ return mem;
+}
+
+void *
+g_malloc0(size_t n_bytes)
+{
+ void *mem;
+ __coverity_negative_sink__(n_bytes);
+ mem = calloc(1, n_bytes == 0 ? 1 : n_bytes);
+ if (!mem) __coverity_panic__();
+ return mem;
+}
+
+void g_free(void *mem)
+{
+ free(mem);
+}
+
+void *g_realloc(void * mem, size_t n_bytes)
+{
+ __coverity_negative_sink__(n_bytes);
+ mem = realloc(mem, n_bytes == 0 ? 1 : n_bytes);
+ if (!mem) __coverity_panic__();
+ return mem;
+}
+
+void *g_try_malloc(size_t n_bytes)
+{
+ __coverity_negative_sink__(n_bytes);
+ return malloc(n_bytes == 0 ? 1 : n_bytes);
+}
+
+void *g_try_malloc0(size_t n_bytes)
+{
+ __coverity_negative_sink__(n_bytes);
+ return calloc(1, n_bytes == 0 ? 1 : n_bytes);
+}
+
+void *g_try_realloc(void *mem, size_t n_bytes)
+{
+ __coverity_negative_sink__(n_bytes);
+ return realloc(mem, n_bytes == 0 ? 1 : n_bytes);
+}
+
+/* Other glib functions */
+
+typedef struct _GIOChannel GIOChannel;
+GIOChannel *g_io_channel_unix_new(int fd)
+{
+ GIOChannel *c = g_malloc0(sizeof(GIOChannel));
+ __coverity_escape__(fd);
+ return c;
+}
+
+void g_assertion_message_expr(const char *domain,
+ const char *file,
+ int line,
+ const char *func,
+ const char *expr)
+{
+ __coverity_panic__();
+}
diff --git a/scripts/make-release b/scripts/make-release
index 196c755f57..fa6323fda8 100755
--- a/scripts/make-release
+++ b/scripts/make-release
@@ -18,7 +18,8 @@ git clone "${src}" ${destination}
pushd ${destination}
git checkout "v${version}"
git submodule update --init
-rm -rf .git roms/*/.git
+(cd roms/seabios && git describe --tags --long --dirty > .version)
+rm -rf .git roms/*/.git dtc/.git pixman/.git
popd
tar cfj ${destination}.tar.bz2 ${destination}
rm -rf ${destination}
diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh
index 0da2618883..289b1a3963 100644
--- a/scripts/qemu-binfmt-conf.sh
+++ b/scripts/qemu-binfmt-conf.sh
@@ -41,6 +41,9 @@ if [ $cpu != "arm" ] ; then
echo ':arm:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-arm:' > /proc/sys/fs/binfmt_misc/register
echo ':armeb:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-armeb:' > /proc/sys/fs/binfmt_misc/register
fi
+if [ $cpu != "aarch64" ] ; then
+ echo ':aarch64:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-aarch64:' > /proc/sys/fs/binfmt_misc/register
+fi
if [ $cpu != "sparc" ] ; then
echo ':sparc:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x02:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-sparc:' > /proc/sys/fs/binfmt_misc/register
fi
diff --git a/slirp/misc.c b/slirp/misc.c
index 6c1636f7b6..b8eb74cab0 100644
--- a/slirp/misc.c
+++ b/slirp/misc.c
@@ -136,7 +136,7 @@ fork_exec(struct socket *so, const char *ex, int do_pty)
if ((s = qemu_socket(AF_INET, SOCK_STREAM, 0)) < 0 ||
bind(s, (struct sockaddr *)&addr, addrlen) < 0 ||
listen(s, 1) < 0) {
- lprint("Error: inet socket: %s\n", strerror(errno));
+ error_report("Error: inet socket: %s", strerror(errno));
closesocket(s);
return 0;
@@ -146,7 +146,7 @@ fork_exec(struct socket *so, const char *ex, int do_pty)
pid = fork();
switch(pid) {
case -1:
- lprint("Error: fork failed: %s\n", strerror(errno));
+ error_report("Error: fork failed: %s", strerror(errno));
close(s);
return 0;
@@ -242,15 +242,6 @@ strdup(str)
}
#endif
-void lprint(const char *format, ...)
-{
- va_list args;
-
- va_start(args, format);
- monitor_vprintf(default_mon, format, args);
- va_end(args);
-}
-
void slirp_connection_info(Slirp *slirp, Monitor *mon)
{
const char * const tcpstates[] = {
diff --git a/slirp/slirp.c b/slirp/slirp.c
index bad8dad02e..3fb48a4921 100644
--- a/slirp/slirp.c
+++ b/slirp/slirp.c
@@ -139,7 +139,7 @@ int get_dns_addr(struct in_addr *pdns_addr)
return -1;
#ifdef DEBUG
- lprint("IP address of your DNS(s): ");
+ fprintf(stderr, "IP address of your DNS(s): ");
#endif
while (fgets(buff, 512, f) != NULL) {
if (sscanf(buff, "nameserver%*[ \t]%256s", buff2) == 1) {
@@ -153,17 +153,17 @@ int get_dns_addr(struct in_addr *pdns_addr)
}
#ifdef DEBUG
else
- lprint(", ");
+ fprintf(stderr, ", ");
#endif
if (++found > 3) {
#ifdef DEBUG
- lprint("(more)");
+ fprintf(stderr, "(more)");
#endif
break;
}
#ifdef DEBUG
else
- lprint("%s", inet_ntoa(tmp_addr));
+ fprintf(stderr, "%s", inet_ntoa(tmp_addr));
#endif
}
}
diff --git a/slirp/slirp.h b/slirp/slirp.h
index e4a1bd4abb..6589d7eef0 100644
--- a/slirp/slirp.h
+++ b/slirp/slirp.h
@@ -287,8 +287,6 @@ void if_start(struct ttys *);
long gethostid(void);
#endif
-void lprint(const char *, ...) GCC_FMT_ATTR(1, 2);
-
#ifndef _WIN32
#include <netdb.h>
#endif
diff --git a/slirp/tftp.c b/slirp/tftp.c
index 1a79c45cfb..a329fb281b 100644
--- a/slirp/tftp.c
+++ b/slirp/tftp.c
@@ -279,7 +279,7 @@ static void tftp_handle_rrq(Slirp *slirp, struct tftp_t *tp, int pktlen)
spt = &slirp->tftp_sessions[s];
- /* unspecifed prefix means service disabled */
+ /* unspecified prefix means service disabled */
if (!slirp->tftp_prefix) {
tftp_send_error(spt, 2, "Access violation", tp);
return;
diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
index df3aa7a64e..d99e2b9259 100644
--- a/stubs/Makefile.objs
+++ b/stubs/Makefile.objs
@@ -14,11 +14,12 @@ stub-obj-y += iothread-lock.o
stub-obj-y += migr-blocker.o
stub-obj-y += mon-is-qmp.o
stub-obj-y += mon-printf.o
-stub-obj-y += mon-print-filename.o
stub-obj-y += mon-protocol-event.o
stub-obj-y += mon-set-error.o
stub-obj-y += pci-drive-hot-add.o
+stub-obj-y += qtest.o
stub-obj-y += reset.o
+stub-obj-y += runstate-check.o
stub-obj-y += set-fd-handler.o
stub-obj-y += slirp.o
stub-obj-y += sysbus.o
diff --git a/stubs/arch-query-cpu-def.c b/stubs/arch-query-cpu-def.c
index fa6789598a..22e0b43de9 100644
--- a/stubs/arch-query-cpu-def.c
+++ b/stubs/arch-query-cpu-def.c
@@ -4,6 +4,6 @@
CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
{
- error_set(errp, QERR_NOT_SUPPORTED);
+ error_set(errp, QERR_UNSUPPORTED);
return NULL;
}
diff --git a/stubs/mon-print-filename.c b/stubs/mon-print-filename.c
deleted file mode 100644
index 9c939641ff..0000000000
--- a/stubs/mon-print-filename.c
+++ /dev/null
@@ -1,6 +0,0 @@
-#include "qemu-common.h"
-#include "monitor/monitor.h"
-
-void monitor_print_filename(Monitor *mon, const char *filename)
-{
-}
diff --git a/stubs/qtest.c b/stubs/qtest.c
new file mode 100644
index 0000000000..e671ed8e8e
--- /dev/null
+++ b/stubs/qtest.c
@@ -0,0 +1,14 @@
+/*
+ * qtest stubs
+ *
+ * Copyright (c) 2014 Linaro Limited
+ * Written by Peter Maydell
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu-common.h"
+
+/* Needed for qtest_allowed() */
+bool qtest_allowed;
diff --git a/stubs/runstate-check.c b/stubs/runstate-check.c
new file mode 100644
index 0000000000..bd2e3757ae
--- /dev/null
+++ b/stubs/runstate-check.c
@@ -0,0 +1,6 @@
+#include "sysemu/sysemu.h"
+
+bool runstate_check(RunState state)
+{
+ return state == RUN_STATE_PRELAUNCH;
+}
diff --git a/target-alpha/cpu.c b/target-alpha/cpu.c
index a0d5d5bd93..7ec46b90fc 100644
--- a/target-alpha/cpu.c
+++ b/target-alpha/cpu.c
@@ -31,6 +31,21 @@ static void alpha_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.pc = value;
}
+static bool alpha_cpu_has_work(CPUState *cs)
+{
+ /* Here we are checking to see if the CPU should wake up from HALT.
+ We will have gotten into this state only for WTINT from PALmode. */
+ /* ??? I'm not sure how the IPL state works with WTINT to keep a CPU
+ asleep even if (some) interrupts have been asserted. For now,
+ assume that if a CPU really wants to stay asleep, it will mask
+ interrupts at the chipset level, which will prevent these bits
+ from being set in the first place. */
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD
+ | CPU_INTERRUPT_TIMER
+ | CPU_INTERRUPT_SMP
+ | CPU_INTERRUPT_MCHK);
+}
+
static void alpha_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
@@ -243,7 +258,7 @@ static void alpha_cpu_initfn(Object *obj)
cs->env_ptr = env;
cpu_exec_init(env);
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
alpha_translate_init();
@@ -267,12 +282,15 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
dc->realize = alpha_cpu_realizefn;
cc->class_by_name = alpha_cpu_class_by_name;
+ cc->has_work = alpha_cpu_has_work;
cc->do_interrupt = alpha_cpu_do_interrupt;
cc->dump_state = alpha_cpu_dump_state;
cc->set_pc = alpha_cpu_set_pc;
cc->gdb_read_register = alpha_cpu_gdb_read_register;
cc->gdb_write_register = alpha_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = alpha_cpu_handle_mmu_fault;
+#else
cc->do_unassigned_access = alpha_cpu_unassigned_access;
cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_alpha_cpu;
diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h
index c85dc6e575..07d9f63d1f 100644
--- a/target-alpha/cpu.h
+++ b/target-alpha/cpu.h
@@ -446,9 +446,8 @@ int cpu_alpha_exec(CPUAlphaState *s);
is returned if the signal was handled by the virtual CPU. */
int cpu_alpha_signal_handler(int host_signum, void *pinfo,
void *puc);
-int cpu_alpha_handle_mmu_fault (CPUAlphaState *env, uint64_t address, int rw,
- int mmu_idx);
-#define cpu_handle_mmu_fault cpu_alpha_handle_mmu_fault
+int alpha_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
void do_restore_state(CPUAlphaState *, uintptr_t retaddr);
void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int);
void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t);
@@ -498,21 +497,6 @@ static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc,
*pflags = flags;
}
-static inline bool cpu_has_work(CPUState *cpu)
-{
- /* Here we are checking to see if the CPU should wake up from HALT.
- We will have gotten into this state only for WTINT from PALmode. */
- /* ??? I'm not sure how the IPL state works with WTINT to keep a CPU
- asleep even if (some) interrupts have been asserted. For now,
- assume that if a CPU really wants to stay asleep, it will mask
- interrupts at the chipset level, which will prevent these bits
- from being set in the first place. */
- return cpu->interrupt_request & (CPU_INTERRUPT_HARD
- | CPU_INTERRUPT_TIMER
- | CPU_INTERRUPT_SMP
- | CPU_INTERRUPT_MCHK);
-}
-
#include "exec/exec-all.h"
#endif /* !defined (__CPU_ALPHA_H__) */
diff --git a/target-alpha/fpu_helper.c b/target-alpha/fpu_helper.c
index fad3575549..ee731555d3 100644
--- a/target-alpha/fpu_helper.c
+++ b/target-alpha/fpu_helper.c
@@ -820,3 +820,10 @@ uint64_t helper_cvtqg(CPUAlphaState *env, uint64_t a)
fr = int64_to_float64(a, &FP_STATUS);
return float64_to_g(fr);
}
+
+void helper_fcvtql_v_input(CPUAlphaState *env, uint64_t val)
+{
+ if (val != (int32_t)val) {
+ arith_excp(env, GETPC(), EXC_M_IOV, 0);
+ }
+}
diff --git a/target-alpha/helper.c b/target-alpha/helper.c
index 025fdaf4d1..cbd03c415b 100644
--- a/target-alpha/helper.c
+++ b/target-alpha/helper.c
@@ -168,11 +168,13 @@ void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
}
#if defined(CONFIG_USER_ONLY)
-int cpu_alpha_handle_mmu_fault(CPUAlphaState *env, target_ulong address,
+int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int rw, int mmu_idx)
{
- env->exception_index = EXCP_MMFAULT;
- env->trap_arg0 = address;
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+
+ cs->exception_index = EXCP_MMFAULT;
+ cpu->env.trap_arg0 = address;
return 1;
}
#else
@@ -213,7 +215,7 @@ static int get_physical_address(CPUAlphaState *env, target_ulong addr,
int prot_need, int mmu_idx,
target_ulong *pphys, int *pprot)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(alpha_env_get_cpu(env));
target_long saddr = addr;
target_ulong phys = 0;
target_ulong L1pte, L2pte, L3pte;
@@ -326,22 +328,24 @@ hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return (fail >= 0 ? -1 : phys);
}
-int cpu_alpha_handle_mmu_fault(CPUAlphaState *env, target_ulong addr, int rw,
+int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int rw,
int mmu_idx)
{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
target_ulong phys;
int prot, fail;
fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
if (unlikely(fail >= 0)) {
- env->exception_index = EXCP_MMFAULT;
+ cs->exception_index = EXCP_MMFAULT;
env->trap_arg0 = addr;
env->trap_arg1 = fail;
env->trap_arg2 = (rw == 2 ? -1 : rw);
return 1;
}
- tlb_set_page(env, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
+ tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
}
@@ -351,7 +355,7 @@ void alpha_cpu_do_interrupt(CPUState *cs)
{
AlphaCPU *cpu = ALPHA_CPU(cs);
CPUAlphaState *env = &cpu->env;
- int i = env->exception_index;
+ int i = cs->exception_index;
if (qemu_loglevel_mask(CPU_LOG_INT)) {
static int count;
@@ -402,7 +406,7 @@ void alpha_cpu_do_interrupt(CPUState *cs)
++count, name, env->error_code, env->pc, env->ir[IR_SP]);
}
- env->exception_index = -1;
+ cs->exception_index = -1;
#if !defined(CONFIG_USER_ONLY)
switch (i) {
@@ -448,7 +452,7 @@ void alpha_cpu_do_interrupt(CPUState *cs)
}
break;
default:
- cpu_abort(env, "Unhandled CPU exception");
+ cpu_abort(cs, "Unhandled CPU exception");
}
/* Remember where the exception happened. Emulate real hardware in
@@ -504,21 +508,27 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
We expect that ENV->PC has already been updated. */
void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
{
- env->exception_index = excp;
+ AlphaCPU *cpu = alpha_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = excp;
env->error_code = error;
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
int excp, int error)
{
- env->exception_index = excp;
+ AlphaCPU *cpu = alpha_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = excp;
env->error_code = error;
if (retaddr) {
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr,
diff --git a/target-alpha/helper.h b/target-alpha/helper.h
index 4f127c49c5..2389e96ea3 100644
--- a/target-alpha/helper.h
+++ b/target-alpha/helper.h
@@ -96,6 +96,7 @@ DEF_HELPER_FLAGS_3(fp_exc_raise_s, TCG_CALL_NO_WG, void, env, i32, i32)
DEF_HELPER_FLAGS_2(ieee_input, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_2(ieee_input_cmp, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_2(fcvtql_v_input, TCG_CALL_NO_WG, void, env, i64)
#if !defined (CONFIG_USER_ONLY)
DEF_HELPER_2(hw_ret, void, env, i64)
diff --git a/target-alpha/mem_helper.c b/target-alpha/mem_helper.c
index ea587043d4..5964bdcda8 100644
--- a/target-alpha/mem_helper.c
+++ b/target-alpha/mem_helper.c
@@ -26,45 +26,45 @@
uint64_t helper_ldl_phys(CPUAlphaState *env, uint64_t p)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(alpha_env_get_cpu(env));
return (int32_t)ldl_phys(cs->as, p);
}
uint64_t helper_ldq_phys(CPUAlphaState *env, uint64_t p)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(alpha_env_get_cpu(env));
return ldq_phys(cs->as, p);
}
uint64_t helper_ldl_l_phys(CPUAlphaState *env, uint64_t p)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(alpha_env_get_cpu(env));
env->lock_addr = p;
return env->lock_value = (int32_t)ldl_phys(cs->as, p);
}
uint64_t helper_ldq_l_phys(CPUAlphaState *env, uint64_t p)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(alpha_env_get_cpu(env));
env->lock_addr = p;
return env->lock_value = ldq_phys(cs->as, p);
}
void helper_stl_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(alpha_env_get_cpu(env));
stl_phys(cs->as, p, v);
}
void helper_stq_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(alpha_env_get_cpu(env));
stq_phys(cs->as, p, v);
}
uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(alpha_env_get_cpu(env));
uint64_t ret = 0;
if (p == env->lock_addr) {
@@ -81,7 +81,7 @@ uint64_t helper_stl_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(alpha_env_get_cpu(env));
uint64_t ret = 0;
if (p == env->lock_addr) {
@@ -99,11 +99,13 @@ uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v)
static void do_unaligned_access(CPUAlphaState *env, target_ulong addr,
int is_write, int is_user, uintptr_t retaddr)
{
+ AlphaCPU *cpu = alpha_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
uint64_t pc;
uint32_t insn;
if (retaddr) {
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
pc = env->pc;
@@ -112,9 +114,9 @@ static void do_unaligned_access(CPUAlphaState *env, target_ulong addr,
env->trap_arg0 = addr;
env->trap_arg1 = insn >> 26; /* opcode */
env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
- env->exception_index = EXCP_UNALIGN;
+ cs->exception_index = EXCP_UNALIGN;
env->error_code = 0;
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr,
@@ -150,18 +152,18 @@ void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr,
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
-void tlb_fill(CPUAlphaState *env, target_ulong addr, int is_write,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write,
int mmu_idx, uintptr_t retaddr)
{
int ret;
- ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = alpha_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret != 0)) {
if (retaddr) {
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
/* Exception index and error code are already set */
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
}
#endif /* CONFIG_USER_ONLY */
diff --git a/target-alpha/sys_helper.c b/target-alpha/sys_helper.c
index 035810c27c..187ccf7297 100644
--- a/target-alpha/sys_helper.c
+++ b/target-alpha/sys_helper.c
@@ -64,12 +64,12 @@ void helper_call_pal(CPUAlphaState *env, uint64_t pc, uint64_t entry_ofs)
void helper_tbia(CPUAlphaState *env)
{
- tlb_flush(env, 1);
+ tlb_flush(CPU(alpha_env_get_cpu(env)), 1);
}
void helper_tbis(CPUAlphaState *env, uint64_t p)
{
- tlb_flush_page(env, p);
+ tlb_flush_page(CPU(alpha_env_get_cpu(env)), p);
}
void helper_tb_flush(CPUAlphaState *env)
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 4c94bed704..d0357ff114 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -49,6 +49,12 @@ struct DisasContext {
/* implver value for this CPU. */
int implver;
+ /* Temporaries for $31 and $f31 as source and destination. */
+ TCGv zero;
+ TCGv sink;
+ /* Temporary for immediate constants. */
+ TCGv lit;
+
bool singlestep_enabled;
};
@@ -83,64 +89,128 @@ static TCGv cpu_pc;
static TCGv cpu_lock_addr;
static TCGv cpu_lock_st_addr;
static TCGv cpu_lock_value;
-static TCGv cpu_unique;
-#ifndef CONFIG_USER_ONLY
-static TCGv cpu_sysval;
-static TCGv cpu_usp;
-#endif
-
-/* register names */
-static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
#include "exec/gen-icount.h"
void alpha_translate_init(void)
{
+#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
+
+ typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
+ static const GlobalVar vars[] = {
+ DEF_VAR(pc),
+ DEF_VAR(lock_addr),
+ DEF_VAR(lock_st_addr),
+ DEF_VAR(lock_value),
+ };
+
+#undef DEF_VAR
+
+ /* Use the symbolic register names that match the disassembler. */
+ static const char greg_names[31][4] = {
+ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+ "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
+ "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
+ "t10", "t11", "ra", "t12", "at", "gp", "sp"
+ };
+ static const char freg_names[31][4] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30"
+ };
+
+ static bool done_init = 0;
int i;
- char *p;
- static int done_init = 0;
- if (done_init)
+ if (done_init) {
return;
+ }
+ done_init = 1;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- p = cpu_reg_names;
for (i = 0; i < 31; i++) {
- sprintf(p, "ir%d", i);
cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUAlphaState, ir[i]), p);
- p += (i < 10) ? 4 : 5;
+ offsetof(CPUAlphaState, ir[i]),
+ greg_names[i]);
+ }
- sprintf(p, "fir%d", i);
+ for (i = 0; i < 31; i++) {
cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUAlphaState, fir[i]), p);
- p += (i < 10) ? 5 : 6;
+ offsetof(CPUAlphaState, fir[i]),
+ freg_names[i]);
}
- cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUAlphaState, pc), "pc");
-
- cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUAlphaState, lock_addr),
- "lock_addr");
- cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUAlphaState, lock_st_addr),
- "lock_st_addr");
- cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUAlphaState, lock_value),
- "lock_value");
-
- cpu_unique = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUAlphaState, unique), "unique");
-#ifndef CONFIG_USER_ONLY
- cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUAlphaState, sysval), "sysval");
- cpu_usp = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUAlphaState, usp), "usp");
-#endif
+ for (i = 0; i < ARRAY_SIZE(vars); ++i) {
+ const GlobalVar *v = &vars[i];
+ *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name);
+ }
+}
- done_init = 1;
+static TCGv load_zero(DisasContext *ctx)
+{
+ if (TCGV_IS_UNUSED_I64(ctx->zero)) {
+ ctx->zero = tcg_const_i64(0);
+ }
+ return ctx->zero;
+}
+
+static TCGv dest_sink(DisasContext *ctx)
+{
+ if (TCGV_IS_UNUSED_I64(ctx->sink)) {
+ ctx->sink = tcg_temp_new();
+ }
+ return ctx->sink;
+}
+
+static TCGv load_gpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return cpu_ir[reg];
+ } else {
+ return load_zero(ctx);
+ }
+}
+
+static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
+ uint8_t lit, bool islit)
+{
+ if (islit) {
+ ctx->lit = tcg_const_i64(lit);
+ return ctx->lit;
+ } else if (likely(reg < 31)) {
+ return cpu_ir[reg];
+ } else {
+ return load_zero(ctx);
+ }
+}
+
+static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return cpu_ir[reg];
+ } else {
+ return dest_sink(ctx);
+ }
+}
+
+static TCGv load_fpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return cpu_fir[reg];
+ } else {
+ return load_zero(ctx);
+ }
+}
+
+static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return cpu_fir[reg];
+ } else {
+ return dest_sink(ctx);
+ }
}
static void gen_excp_1(int exception, int error_code)
@@ -207,10 +277,10 @@ static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
static inline void gen_load_mem(DisasContext *ctx,
void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
int flags),
- int ra, int rb, int32_t disp16, int fp,
- int clear)
+ int ra, int rb, int32_t disp16, bool fp,
+ bool clear)
{
- TCGv addr, va;
+ TCGv tmp, addr, va;
/* LDQ_U with ra $31 is UNOP. Other various loads are forms of
prefetches, which we can treat as nops. No worries about
@@ -219,23 +289,22 @@ static inline void gen_load_mem(DisasContext *ctx,
return;
}
- addr = tcg_temp_new();
- if (rb != 31) {
- tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
- if (clear) {
- tcg_gen_andi_i64(addr, addr, ~0x7);
- }
- } else {
- if (clear) {
- disp16 &= ~0x7;
- }
- tcg_gen_movi_i64(addr, disp16);
+ tmp = tcg_temp_new();
+ addr = load_gpr(ctx, rb);
+
+ if (disp16) {
+ tcg_gen_addi_i64(tmp, addr, disp16);
+ addr = tmp;
+ }
+ if (clear) {
+ tcg_gen_andi_i64(tmp, addr, ~0x7);
+ addr = tmp;
}
va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
tcg_gen_qemu_load(va, addr, ctx->mem_idx);
- tcg_temp_free(addr);
+ tcg_temp_free(tmp);
}
static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
@@ -265,35 +334,27 @@ static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
static inline void gen_store_mem(DisasContext *ctx,
void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
int flags),
- int ra, int rb, int32_t disp16, int fp,
- int clear)
+ int ra, int rb, int32_t disp16, bool fp,
+ bool clear)
{
- TCGv addr, va;
+ TCGv tmp, addr, va;
- addr = tcg_temp_new();
- if (rb != 31) {
- tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
- if (clear) {
- tcg_gen_andi_i64(addr, addr, ~0x7);
- }
- } else {
- if (clear) {
- disp16 &= ~0x7;
- }
- tcg_gen_movi_i64(addr, disp16);
- }
+ tmp = tcg_temp_new();
+ addr = load_gpr(ctx, rb);
- if (ra == 31) {
- va = tcg_const_i64(0);
- } else {
- va = (fp ? cpu_fir[ra] : cpu_ir[ra]);
+ if (disp16) {
+ tcg_gen_addi_i64(tmp, addr, disp16);
+ addr = tmp;
}
+ if (clear) {
+ tcg_gen_andi_i64(tmp, addr, ~0x7);
+ addr = tmp;
+ }
+
+ va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
tcg_gen_qemu_store(va, addr, ctx->mem_idx);
- tcg_temp_free(addr);
- if (ra == 31) {
- tcg_temp_free(va);
- }
+ tcg_temp_free(tmp);
}
static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
@@ -313,11 +374,7 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
addr = tcg_temp_local_new();
#endif
- if (rb != 31) {
- tcg_gen_addi_i64(addr, cpu_ir[rb], disp16);
- } else {
- tcg_gen_movi_i64(addr, disp16);
- }
+ tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
#if defined(CONFIG_USER_ONLY)
/* ??? This is handled via a complicated version of compare-and-swap
@@ -367,7 +424,8 @@ static bool in_superpage(DisasContext *ctx, int64_t addr)
static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
{
/* Suppress goto_tb in the case of single-steping and IO. */
- if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) {
+ if ((ctx->tb->cflags & CF_LAST_IO)
+ || ctx->singlestep_enabled || singlestep) {
return false;
}
/* If the destination is in the superpage, the page perms can't change. */
@@ -438,15 +496,11 @@ static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
{
TCGv cmp_tmp;
- if (unlikely(ra == 31)) {
- cmp_tmp = tcg_const_i64(0);
- } else {
+ if (mask) {
cmp_tmp = tcg_temp_new();
- if (mask) {
- tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1);
- } else {
- tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]);
- }
+ tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
+ } else {
+ cmp_tmp = load_gpr(ctx, ra);
}
return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
@@ -487,83 +541,23 @@ static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
int32_t disp)
{
- TCGv cmp_tmp;
-
- if (unlikely(ra == 31)) {
- /* Very uncommon case, but easier to optimize it to an integer
- comparison than continuing with the floating point comparison. */
- return gen_bcond(ctx, cond, ra, disp, 0);
- }
-
- cmp_tmp = tcg_temp_new();
- gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]);
+ TCGv cmp_tmp = tcg_temp_new();
+ gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
}
-static void gen_cmov(TCGCond cond, int ra, int rb, int rc,
- int islit, uint8_t lit, int mask)
-{
- TCGv_i64 c1, z, v1;
-
- if (unlikely(rc == 31)) {
- return;
- }
-
- if (ra == 31) {
- /* Very uncommon case - Do not bother to optimize. */
- c1 = tcg_const_i64(0);
- } else if (mask) {
- c1 = tcg_const_i64(1);
- tcg_gen_and_i64(c1, c1, cpu_ir[ra]);
- } else {
- c1 = cpu_ir[ra];
- }
- if (islit) {
- v1 = tcg_const_i64(lit);
- } else {
- v1 = cpu_ir[rb];
- }
- z = tcg_const_i64(0);
-
- tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]);
-
- tcg_temp_free_i64(z);
- if (ra == 31 || mask) {
- tcg_temp_free_i64(c1);
- }
- if (islit) {
- tcg_temp_free_i64(v1);
- }
-}
-
-static void gen_fcmov(TCGCond cond, int ra, int rb, int rc)
+static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
{
- TCGv_i64 c1, z, v1;
-
- if (unlikely(rc == 31)) {
- return;
- }
+ TCGv_i64 va, vb, z;
- c1 = tcg_temp_new_i64();
- if (unlikely(ra == 31)) {
- tcg_gen_movi_i64(c1, 0);
- } else {
- gen_fold_mzero(cond, c1, cpu_fir[ra]);
- }
- if (rb == 31) {
- v1 = tcg_const_i64(0);
- } else {
- v1 = cpu_fir[rb];
- }
- z = tcg_const_i64(0);
+ z = load_zero(ctx);
+ vb = load_fpr(ctx, rb);
+ va = tcg_temp_new();
+ gen_fold_mzero(cond, va, load_fpr(ctx, ra));
- tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]);
+ tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
- tcg_temp_free_i64(z);
- tcg_temp_free_i64(c1);
- if (rb == 31) {
- tcg_temp_free_i64(v1);
- }
+ tcg_temp_free(va);
}
#define QUAL_RM_N 0x080 /* Round mode nearest even */
@@ -647,21 +641,21 @@ static void gen_qual_flushzero(DisasContext *ctx, int fn11)
tcg_temp_free_i32(tmp);
}
-static TCGv gen_ieee_input(int reg, int fn11, int is_cmp)
+static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
{
TCGv val;
- if (reg == 31) {
- val = tcg_const_i64(0);
+
+ if (unlikely(reg == 31)) {
+ val = load_zero(ctx);
} else {
+ val = cpu_fir[reg];
if ((fn11 & QUAL_S) == 0) {
if (is_cmp) {
- gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]);
+ gen_helper_ieee_input_cmp(cpu_env, val);
} else {
- gen_helper_ieee_input(cpu_env, cpu_fir[reg]);
+ gen_helper_ieee_input(cpu_env, val);
}
}
- val = tcg_temp_new();
- tcg_gen_mov_i64(val, cpu_fir[reg]);
}
return val;
}
@@ -721,105 +715,46 @@ static inline void gen_fp_exc_raise(int rc, int fn11)
gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact);
}
-static void gen_fcvtlq(int rb, int rc)
+static void gen_fcvtlq(TCGv vc, TCGv vb)
{
- if (unlikely(rc == 31)) {
- return;
- }
- if (unlikely(rb == 31)) {
- tcg_gen_movi_i64(cpu_fir[rc], 0);
- } else {
- TCGv tmp = tcg_temp_new();
+ TCGv tmp = tcg_temp_new();
- /* The arithmetic right shift here, plus the sign-extended mask below
- yields a sign-extended result without an explicit ext32s_i64. */
- tcg_gen_sari_i64(tmp, cpu_fir[rb], 32);
- tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29);
- tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
- tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff);
- tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
+ /* The arithmetic right shift here, plus the sign-extended mask below
+ yields a sign-extended result without an explicit ext32s_i64. */
+ tcg_gen_sari_i64(tmp, vb, 32);
+ tcg_gen_shri_i64(vc, vb, 29);
+ tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
+ tcg_gen_andi_i64(vc, vc, 0x3fffffff);
+ tcg_gen_or_i64(vc, vc, tmp);
- tcg_temp_free(tmp);
- }
+ tcg_temp_free(tmp);
}
-static void gen_fcvtql(int rb, int rc)
+static void gen_fcvtql(TCGv vc, TCGv vb)
{
- if (unlikely(rc == 31)) {
- return;
- }
- if (unlikely(rb == 31)) {
- tcg_gen_movi_i64(cpu_fir[rc], 0);
- } else {
- TCGv tmp = tcg_temp_new();
+ TCGv tmp = tcg_temp_new();
- tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000);
- tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF);
- tcg_gen_shli_i64(tmp, tmp, 32);
- tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29);
- tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp);
+ tcg_gen_andi_i64(tmp, vb, (int32_t)0xc0000000);
+ tcg_gen_andi_i64(vc, vb, 0x3FFFFFFF);
+ tcg_gen_shli_i64(tmp, tmp, 32);
+ tcg_gen_shli_i64(vc, vc, 29);
+ tcg_gen_or_i64(vc, vc, tmp);
- tcg_temp_free(tmp);
- }
+ tcg_temp_free(tmp);
}
-static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc)
-{
- if (rb != 31) {
- int lab = gen_new_label();
- TCGv tmp = tcg_temp_new();
-
- tcg_gen_ext32s_i64(tmp, cpu_fir[rb]);
- tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab);
- gen_excp(ctx, EXCP_ARITH, EXC_M_IOV);
-
- gen_set_label(lab);
- }
- gen_fcvtql(rb, rc);
-}
-
-#define FARITH2(name) \
- static inline void glue(gen_f, name)(int rb, int rc) \
- { \
- if (unlikely(rc == 31)) { \
- return; \
- } \
- if (rb != 31) { \
- gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
- } else { \
- TCGv tmp = tcg_const_i64(0); \
- gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
- tcg_temp_free(tmp); \
- } \
- }
-
-/* ??? VAX instruction qualifiers ignored. */
-FARITH2(sqrtf)
-FARITH2(sqrtg)
-FARITH2(cvtgf)
-FARITH2(cvtgq)
-FARITH2(cvtqf)
-FARITH2(cvtqg)
-
static void gen_ieee_arith2(DisasContext *ctx,
void (*helper)(TCGv, TCGv_ptr, TCGv),
int rb, int rc, int fn11)
{
TCGv vb;
- /* ??? This is wrong: the instruction is not a nop, it still may
- raise exceptions. */
- if (unlikely(rc == 31)) {
- return;
- }
-
gen_qual_roundmode(ctx, fn11);
gen_qual_flushzero(ctx, fn11);
gen_fp_exc_clear();
- vb = gen_ieee_input(rb, fn11, 0);
- helper(cpu_fir[rc], cpu_env, vb);
- tcg_temp_free(vb);
+ vb = gen_ieee_input(ctx, rb, fn11, 0);
+ helper(dest_fpr(ctx, rc), cpu_env, vb);
gen_fp_exc_raise(rc, fn11);
}
@@ -837,40 +772,34 @@ IEEE_ARITH2(cvtts)
static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11)
{
- TCGv vb;
+ TCGv vb, vc;
int ignore = 0;
- /* ??? This is wrong: the instruction is not a nop, it still may
- raise exceptions. */
- if (unlikely(rc == 31)) {
- return;
- }
-
/* No need to set flushzero, since we have an integer output. */
gen_fp_exc_clear();
- vb = gen_ieee_input(rb, fn11, 0);
+ vb = gen_ieee_input(ctx, rb, fn11, 0);
+ vc = dest_fpr(ctx, rc);
/* Almost all integer conversions use cropped rounding, and most
also do not have integer overflow enabled. Special case that. */
switch (fn11) {
case QUAL_RM_C:
- gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb);
+ gen_helper_cvttq_c(vc, cpu_env, vb);
break;
case QUAL_V | QUAL_RM_C:
case QUAL_S | QUAL_V | QUAL_RM_C:
ignore = float_flag_inexact;
/* FALLTHRU */
case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C:
- gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb);
+ gen_helper_cvttq_svic(vc, cpu_env, vb);
break;
default:
gen_qual_roundmode(ctx, fn11);
- gen_helper_cvttq(cpu_fir[rc], cpu_env, vb);
+ gen_helper_cvttq(vc, cpu_env, vb);
ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow);
ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact);
break;
}
- tcg_temp_free(vb);
gen_fp_exc_raise_ignore(rc, fn11, ignore);
}
@@ -879,35 +808,21 @@ static void gen_ieee_intcvt(DisasContext *ctx,
void (*helper)(TCGv, TCGv_ptr, TCGv),
int rb, int rc, int fn11)
{
- TCGv vb;
-
- /* ??? This is wrong: the instruction is not a nop, it still may
- raise exceptions. */
- if (unlikely(rc == 31)) {
- return;
- }
+ TCGv vb, vc;
gen_qual_roundmode(ctx, fn11);
-
- if (rb == 31) {
- vb = tcg_const_i64(0);
- } else {
- vb = cpu_fir[rb];
- }
+ vb = load_fpr(ctx, rb);
+ vc = dest_fpr(ctx, rc);
/* The only exception that can be raised by integer conversion
is inexact. Thus we only need to worry about exceptions when
inexact handling is requested. */
if (fn11 & QUAL_I) {
gen_fp_exc_clear();
- helper(cpu_fir[rc], cpu_env, vb);
+ helper(vc, cpu_env, vb);
gen_fp_exc_raise(rc, fn11);
} else {
- helper(cpu_fir[rc], cpu_env, vb);
- }
-
- if (rb == 31) {
- tcg_temp_free(vb);
+ helper(vc, cpu_env, vb);
}
}
@@ -920,144 +835,38 @@ static inline void glue(gen_f, name)(DisasContext *ctx, \
IEEE_INTCVT(cvtqs)
IEEE_INTCVT(cvtqt)
-static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask)
+static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
{
- TCGv va, vb, vmask;
- int za = 0, zb = 0;
-
- if (unlikely(rc == 31)) {
- return;
- }
-
- vmask = tcg_const_i64(mask);
-
- TCGV_UNUSED_I64(va);
- if (ra == 31) {
- if (inv_a) {
- va = vmask;
- } else {
- za = 1;
- }
- } else {
- va = tcg_temp_new_i64();
- tcg_gen_mov_i64(va, cpu_fir[ra]);
- if (inv_a) {
- tcg_gen_andc_i64(va, vmask, va);
- } else {
- tcg_gen_and_i64(va, va, vmask);
- }
- }
+ TCGv vmask = tcg_const_i64(mask);
+ TCGv tmp = tcg_temp_new_i64();
- TCGV_UNUSED_I64(vb);
- if (rb == 31) {
- zb = 1;
+ if (inv_a) {
+ tcg_gen_andc_i64(tmp, vmask, va);
} else {
- vb = tcg_temp_new_i64();
- tcg_gen_andc_i64(vb, cpu_fir[rb], vmask);
+ tcg_gen_and_i64(tmp, va, vmask);
}
- switch (za << 1 | zb) {
- case 0 | 0:
- tcg_gen_or_i64(cpu_fir[rc], va, vb);
- break;
- case 0 | 1:
- tcg_gen_mov_i64(cpu_fir[rc], va);
- break;
- case 2 | 0:
- tcg_gen_mov_i64(cpu_fir[rc], vb);
- break;
- case 2 | 1:
- tcg_gen_movi_i64(cpu_fir[rc], 0);
- break;
- }
+ tcg_gen_andc_i64(vc, vb, vmask);
+ tcg_gen_or_i64(vc, vc, tmp);
tcg_temp_free(vmask);
- if (ra != 31) {
- tcg_temp_free(va);
- }
- if (rb != 31) {
- tcg_temp_free(vb);
- }
-}
-
-static inline void gen_fcpys(int ra, int rb, int rc)
-{
- gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL);
-}
-
-static inline void gen_fcpysn(int ra, int rb, int rc)
-{
- gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL);
+ tcg_temp_free(tmp);
}
-static inline void gen_fcpyse(int ra, int rb, int rc)
-{
- gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL);
-}
-
-#define FARITH3(name) \
- static inline void glue(gen_f, name)(int ra, int rb, int rc) \
- { \
- TCGv va, vb; \
- \
- if (unlikely(rc == 31)) { \
- return; \
- } \
- if (ra == 31) { \
- va = tcg_const_i64(0); \
- } else { \
- va = cpu_fir[ra]; \
- } \
- if (rb == 31) { \
- vb = tcg_const_i64(0); \
- } else { \
- vb = cpu_fir[rb]; \
- } \
- \
- gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
- \
- if (ra == 31) { \
- tcg_temp_free(va); \
- } \
- if (rb == 31) { \
- tcg_temp_free(vb); \
- } \
- }
-
-/* ??? VAX instruction qualifiers ignored. */
-FARITH3(addf)
-FARITH3(subf)
-FARITH3(mulf)
-FARITH3(divf)
-FARITH3(addg)
-FARITH3(subg)
-FARITH3(mulg)
-FARITH3(divg)
-FARITH3(cmpgeq)
-FARITH3(cmpglt)
-FARITH3(cmpgle)
-
static void gen_ieee_arith3(DisasContext *ctx,
void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
int ra, int rb, int rc, int fn11)
{
- TCGv va, vb;
-
- /* ??? This is wrong: the instruction is not a nop, it still may
- raise exceptions. */
- if (unlikely(rc == 31)) {
- return;
- }
+ TCGv va, vb, vc;
gen_qual_roundmode(ctx, fn11);
gen_qual_flushzero(ctx, fn11);
gen_fp_exc_clear();
- va = gen_ieee_input(ra, fn11, 0);
- vb = gen_ieee_input(rb, fn11, 0);
- helper(cpu_fir[rc], cpu_env, va, vb);
- tcg_temp_free(va);
- tcg_temp_free(vb);
+ va = gen_ieee_input(ctx, ra, fn11, 0);
+ vb = gen_ieee_input(ctx, rb, fn11, 0);
+ vc = dest_fpr(ctx, rc);
+ helper(vc, cpu_env, va, vb);
gen_fp_exc_raise(rc, fn11);
}
@@ -1081,21 +890,14 @@ static void gen_ieee_compare(DisasContext *ctx,
void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
int ra, int rb, int rc, int fn11)
{
- TCGv va, vb;
-
- /* ??? This is wrong: the instruction is not a nop, it still may
- raise exceptions. */
- if (unlikely(rc == 31)) {
- return;
- }
+ TCGv va, vb, vc;
gen_fp_exc_clear();
- va = gen_ieee_input(ra, fn11, 1);
- vb = gen_ieee_input(rb, fn11, 1);
- helper(cpu_fir[rc], cpu_env, va, vb);
- tcg_temp_free(va);
- tcg_temp_free(vb);
+ va = gen_ieee_input(ctx, ra, fn11, 1);
+ vb = gen_ieee_input(ctx, rb, fn11, 1);
+ vc = dest_fpr(ctx, rc);
+ helper(vc, cpu_env, va, vb);
gen_fp_exc_raise(rc, fn11);
}
@@ -1117,8 +919,9 @@ static inline uint64_t zapnot_mask(uint8_t lit)
int i;
for (i = 0; i < 8; ++i) {
- if ((lit >> i) & 1)
+ if ((lit >> i) & 1) {
mask |= 0xffull << (i * 8);
+ }
}
return mask;
}
@@ -1145,165 +948,111 @@ static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
tcg_gen_mov_i64(dest, src);
break;
default:
- tcg_gen_andi_i64 (dest, src, zapnot_mask (lit));
+ tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
break;
}
}
-static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit)
-{
- if (unlikely(rc == 31))
- return;
- else if (unlikely(ra == 31))
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- else if (islit)
- gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit);
- else
- gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-}
-
-static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit)
-{
- if (unlikely(rc == 31))
- return;
- else if (unlikely(ra == 31))
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- else if (islit)
- gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit);
- else
- gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
-}
-
-
/* EXTWH, EXTLH, EXTQH */
-static void gen_ext_h(int ra, int rb, int rc, int islit,
+static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
uint8_t lit, uint8_t byte_mask)
{
- if (unlikely(rc == 31))
- return;
- else if (unlikely(ra == 31))
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- else {
- if (islit) {
- lit = (64 - (lit & 7) * 8) & 0x3f;
- tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit);
- } else {
- TCGv tmp1 = tcg_temp_new();
- tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
- tcg_gen_shli_i64(tmp1, tmp1, 3);
- tcg_gen_neg_i64(tmp1, tmp1);
- tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
- tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
- tcg_temp_free(tmp1);
- }
- gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
+ if (islit) {
+ tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
+ } else {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
+ tcg_gen_neg_i64(tmp, tmp);
+ tcg_gen_andi_i64(tmp, tmp, 0x3f);
+ tcg_gen_shl_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
}
+ gen_zapnoti(vc, vc, byte_mask);
}
/* EXTBL, EXTWL, EXTLL, EXTQL */
-static void gen_ext_l(int ra, int rb, int rc, int islit,
+static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
uint8_t lit, uint8_t byte_mask)
{
- if (unlikely(rc == 31))
- return;
- else if (unlikely(ra == 31))
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- else {
- if (islit) {
- tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8);
- } else {
- TCGv tmp = tcg_temp_new();
- tcg_gen_andi_i64(tmp, cpu_ir[rb], 7);
- tcg_gen_shli_i64(tmp, tmp, 3);
- tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp);
- tcg_temp_free(tmp);
- }
- gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask);
+ if (islit) {
+ tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
+ } else {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
+ tcg_gen_shli_i64(tmp, tmp, 3);
+ tcg_gen_shr_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
}
+ gen_zapnoti(vc, vc, byte_mask);
}
/* INSWH, INSLH, INSQH */
-static void gen_ins_h(int ra, int rb, int rc, int islit,
+static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
uint8_t lit, uint8_t byte_mask)
{
- if (unlikely(rc == 31))
- return;
- else if (unlikely(ra == 31) || (islit && (lit & 7) == 0))
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- else {
- TCGv tmp = tcg_temp_new();
+ TCGv tmp = tcg_temp_new();
- /* The instruction description has us left-shift the byte mask
- and extract bits <15:8> and apply that zap at the end. This
- is equivalent to simply performing the zap first and shifting
- afterward. */
- gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
+ /* The instruction description has us left-shift the byte mask and extract
+ bits <15:8> and apply that zap at the end. This is equivalent to simply
+ performing the zap first and shifting afterward. */
+ gen_zapnoti(tmp, va, byte_mask);
- if (islit) {
- /* Note that we have handled the lit==0 case above. */
- tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8);
+ if (islit) {
+ lit &= 7;
+ if (unlikely(lit == 0)) {
+ tcg_gen_movi_i64(vc, 0);
} else {
- TCGv shift = tcg_temp_new();
-
- /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
- Do this portably by splitting the shift into two parts:
- shift_count-1 and 1. Arrange for the -1 by using
- ones-complement instead of twos-complement in the negation:
- ~((B & 7) * 8) & 63. */
-
- tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
- tcg_gen_shli_i64(shift, shift, 3);
- tcg_gen_not_i64(shift, shift);
- tcg_gen_andi_i64(shift, shift, 0x3f);
-
- tcg_gen_shr_i64(cpu_ir[rc], tmp, shift);
- tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1);
- tcg_temp_free(shift);
+ tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
}
- tcg_temp_free(tmp);
+ } else {
+ TCGv shift = tcg_temp_new();
+
+ /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
+ portably by splitting the shift into two parts: shift_count-1 and 1.
+ Arrange for the -1 by using ones-complement instead of
+ twos-complement in the negation: ~(B * 8) & 63. */
+
+ tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
+ tcg_gen_not_i64(shift, shift);
+ tcg_gen_andi_i64(shift, shift, 0x3f);
+
+ tcg_gen_shr_i64(vc, tmp, shift);
+ tcg_gen_shri_i64(vc, vc, 1);
+ tcg_temp_free(shift);
}
+ tcg_temp_free(tmp);
}
/* INSBL, INSWL, INSLL, INSQL */
-static void gen_ins_l(int ra, int rb, int rc, int islit,
+static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
uint8_t lit, uint8_t byte_mask)
{
- if (unlikely(rc == 31))
- return;
- else if (unlikely(ra == 31))
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- else {
- TCGv tmp = tcg_temp_new();
+ TCGv tmp = tcg_temp_new();
- /* The instruction description has us left-shift the byte mask
- the same number of byte slots as the data and apply the zap
- at the end. This is equivalent to simply performing the zap
- first and shifting afterward. */
- gen_zapnoti (tmp, cpu_ir[ra], byte_mask);
+ /* The instruction description has us left-shift the byte mask
+ the same number of byte slots as the data and apply the zap
+ at the end. This is equivalent to simply performing the zap
+ first and shifting afterward. */
+ gen_zapnoti(tmp, va, byte_mask);
- if (islit) {
- tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8);
- } else {
- TCGv shift = tcg_temp_new();
- tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
- tcg_gen_shli_i64(shift, shift, 3);
- tcg_gen_shl_i64(cpu_ir[rc], tmp, shift);
- tcg_temp_free(shift);
- }
- tcg_temp_free(tmp);
+ if (islit) {
+ tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
+ } else {
+ TCGv shift = tcg_temp_new();
+ tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
+ tcg_gen_shli_i64(shift, shift, 3);
+ tcg_gen_shl_i64(vc, tmp, shift);
+ tcg_temp_free(shift);
}
+ tcg_temp_free(tmp);
}
/* MSKWH, MSKLH, MSKQH */
-static void gen_msk_h(int ra, int rb, int rc, int islit,
+static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
uint8_t lit, uint8_t byte_mask)
{
- if (unlikely(rc == 31))
- return;
- else if (unlikely(ra == 31))
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- else if (islit) {
- gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8));
+ if (islit) {
+ gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
} else {
TCGv shift = tcg_temp_new();
TCGv mask = tcg_temp_new();
@@ -1315,17 +1064,16 @@ static void gen_msk_h(int ra, int rb, int rc, int islit,
shift of 64 bits in order to generate a zero. This is done by
splitting the shift into two parts, the variable shift - 1
followed by a constant 1 shift. The code we expand below is
- equivalent to ~((B & 7) * 8) & 63. */
+ equivalent to ~(B * 8) & 63. */
- tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
- tcg_gen_shli_i64(shift, shift, 3);
+ tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
tcg_gen_not_i64(shift, shift);
tcg_gen_andi_i64(shift, shift, 0x3f);
tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
tcg_gen_shr_i64(mask, mask, shift);
tcg_gen_shri_i64(mask, mask, 1);
- tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
+ tcg_gen_andc_i64(vc, va, mask);
tcg_temp_free(mask);
tcg_temp_free(shift);
@@ -1333,150 +1081,27 @@ static void gen_msk_h(int ra, int rb, int rc, int islit,
}
/* MSKBL, MSKWL, MSKLL, MSKQL */
-static void gen_msk_l(int ra, int rb, int rc, int islit,
+static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
uint8_t lit, uint8_t byte_mask)
{
- if (unlikely(rc == 31))
- return;
- else if (unlikely(ra == 31))
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- else if (islit) {
- gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7)));
+ if (islit) {
+ gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
} else {
TCGv shift = tcg_temp_new();
TCGv mask = tcg_temp_new();
- tcg_gen_andi_i64(shift, cpu_ir[rb], 7);
+ tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
tcg_gen_shli_i64(shift, shift, 3);
- tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
+ tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
tcg_gen_shl_i64(mask, mask, shift);
- tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask);
+ tcg_gen_andc_i64(vc, va, mask);
tcg_temp_free(mask);
tcg_temp_free(shift);
}
}
-/* Code to call arith3 helpers */
-#define ARITH3(name) \
-static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
- uint8_t lit) \
-{ \
- if (unlikely(rc == 31)) \
- return; \
- \
- if (ra != 31) { \
- if (islit) { \
- TCGv tmp = tcg_const_i64(lit); \
- gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
- tcg_temp_free(tmp); \
- } else \
- gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
- } else { \
- TCGv tmp1 = tcg_const_i64(0); \
- if (islit) { \
- TCGv tmp2 = tcg_const_i64(lit); \
- gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
- tcg_temp_free(tmp2); \
- } else \
- gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
- tcg_temp_free(tmp1); \
- } \
-}
-ARITH3(cmpbge)
-ARITH3(minub8)
-ARITH3(minsb8)
-ARITH3(minuw4)
-ARITH3(minsw4)
-ARITH3(maxub8)
-ARITH3(maxsb8)
-ARITH3(maxuw4)
-ARITH3(maxsw4)
-ARITH3(perr)
-
-/* Code to call arith3 helpers */
-#define ARITH3_EX(name) \
- static inline void glue(gen_, name)(int ra, int rb, int rc, \
- int islit, uint8_t lit) \
- { \
- if (unlikely(rc == 31)) { \
- return; \
- } \
- if (ra != 31) { \
- if (islit) { \
- TCGv tmp = tcg_const_i64(lit); \
- gen_helper_ ## name(cpu_ir[rc], cpu_env, \
- cpu_ir[ra], tmp); \
- tcg_temp_free(tmp); \
- } else { \
- gen_helper_ ## name(cpu_ir[rc], cpu_env, \
- cpu_ir[ra], cpu_ir[rb]); \
- } \
- } else { \
- TCGv tmp1 = tcg_const_i64(0); \
- if (islit) { \
- TCGv tmp2 = tcg_const_i64(lit); \
- gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
- tcg_temp_free(tmp2); \
- } else { \
- gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
- } \
- tcg_temp_free(tmp1); \
- } \
- }
-ARITH3_EX(addlv)
-ARITH3_EX(sublv)
-ARITH3_EX(addqv)
-ARITH3_EX(subqv)
-ARITH3_EX(mullv)
-ARITH3_EX(mulqv)
-
-#define MVIOP2(name) \
-static inline void glue(gen_, name)(int rb, int rc) \
-{ \
- if (unlikely(rc == 31)) \
- return; \
- if (unlikely(rb == 31)) \
- tcg_gen_movi_i64(cpu_ir[rc], 0); \
- else \
- gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
-}
-MVIOP2(pklb)
-MVIOP2(pkwb)
-MVIOP2(unpkbl)
-MVIOP2(unpkbw)
-
-static void gen_cmp(TCGCond cond, int ra, int rb, int rc,
- int islit, uint8_t lit)
-{
- TCGv va, vb;
-
- if (unlikely(rc == 31)) {
- return;
- }
-
- if (ra == 31) {
- va = tcg_const_i64(0);
- } else {
- va = cpu_ir[ra];
- }
- if (islit) {
- vb = tcg_const_i64(lit);
- } else {
- vb = cpu_ir[rb];
- }
-
- tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb);
-
- if (ra == 31) {
- tcg_temp_free(va);
- }
- if (islit) {
- tcg_temp_free(vb);
- }
-}
-
static void gen_rx(int ra, int set)
{
TCGv_i32 tmp;
@@ -1504,11 +1129,13 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
break;
case 0x9E:
/* RDUNIQUE */
- tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
+ tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, unique));
break;
case 0x9F:
/* WRUNIQUE */
- tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
+ tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, unique));
break;
default:
palcode &= 0xbf;
@@ -1531,15 +1158,18 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
break;
case 0x2D:
/* WRVPTPTR */
- tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
+ tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, vptptr));
break;
case 0x31:
/* WRVAL */
- tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
+ tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, sysval));
break;
case 0x32:
/* RDVAL */
- tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
+ tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, sysval));
break;
case 0x35: {
@@ -1548,7 +1178,8 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
/* Note that we already know we're in kernel mode, so we know
that PS only contains the 3 IPL bits. */
- tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
+ tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, ps));
/* But make sure and store only the 3 IPL bits from the user. */
tmp = tcg_temp_new();
@@ -1560,15 +1191,18 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
case 0x36:
/* RDPS */
- tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
+ tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, ps));
break;
case 0x38:
/* WRUSP */
- tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
+ tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, usp));
break;
case 0x3A:
/* RDUSP */
- tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
+ tcg_gen_st_i64(cpu_ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, usp));
break;
case 0x3C:
/* WHAMI */
@@ -1648,16 +1282,10 @@ static int cpu_pr_data(int pr)
return 0;
}
-static ExitStatus gen_mfpr(int ra, int regno)
+static ExitStatus gen_mfpr(TCGv va, int regno)
{
int data = cpu_pr_data(regno);
- /* In our emulated PALcode, these processor registers have no
- side effects from reading. */
- if (ra == 31) {
- return NO_EXIT;
- }
-
/* Special help for VMTIME and WALLTIME. */
if (regno == 250 || regno == 249) {
void (*helper)(TCGv) = gen_helper_get_walltime;
@@ -1666,11 +1294,11 @@ static ExitStatus gen_mfpr(int ra, int regno)
}
if (use_icount) {
gen_io_start();
- helper(cpu_ir[ra]);
+ helper(va);
gen_io_end();
return EXIT_PC_STALE;
} else {
- helper(cpu_ir[ra]);
+ helper(va);
return NO_EXIT;
}
}
@@ -1678,28 +1306,22 @@ static ExitStatus gen_mfpr(int ra, int regno)
/* The basic registers are data only, and unknown registers
are read-zero, write-ignore. */
if (data == 0) {
- tcg_gen_movi_i64(cpu_ir[ra], 0);
+ tcg_gen_movi_i64(va, 0);
} else if (data & PR_BYTE) {
- tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE);
+ tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
} else if (data & PR_LONG) {
- tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG);
+ tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
} else {
- tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data);
+ tcg_gen_ld_i64(va, cpu_env, data);
}
return NO_EXIT;
}
-static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
+static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
{
TCGv tmp;
int data;
- if (rb == 31) {
- tmp = tcg_const_i64(0);
- } else {
- tmp = cpu_ir[rb];
- }
-
switch (regno) {
case 255:
/* TBIA */
@@ -1708,7 +1330,7 @@ static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
case 254:
/* TBIS */
- gen_helper_tbis(cpu_env, tmp);
+ gen_helper_tbis(cpu_env, vb);
break;
case 253:
@@ -1720,17 +1342,17 @@ static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
case 252:
/* HALT */
- gen_helper_halt(tmp);
+ gen_helper_halt(vb);
return EXIT_PC_STALE;
case 251:
/* ALARM */
- gen_helper_set_alarm(cpu_env, tmp);
+ gen_helper_set_alarm(cpu_env, vb);
break;
case 7:
/* PALBR */
- tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr));
+ tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
/* Changing the PAL base register implies un-chaining all of the TBs
that ended with a CALL_PAL. Since the base register usually only
changes during boot, flushing everything works well. */
@@ -1743,64 +1365,70 @@ static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno)
data = cpu_pr_data(regno);
if (data != 0) {
if (data & PR_BYTE) {
- tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE);
+ tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
} else if (data & PR_LONG) {
- tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG);
+ tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
} else {
- tcg_gen_st_i64(tmp, cpu_env, data);
+ tcg_gen_st_i64(vb, cpu_env, data);
}
}
break;
}
- if (rb == 31) {
- tcg_temp_free(tmp);
- }
-
return NO_EXIT;
}
#endif /* !USER_ONLY*/
+#define REQUIRE_TB_FLAG(FLAG) \
+ do { \
+ if ((ctx->tb->flags & (FLAG)) == 0) { \
+ goto invalid_opc; \
+ } \
+ } while (0)
+
+#define REQUIRE_REG_31(WHICH) \
+ do { \
+ if (WHICH != 31) { \
+ goto invalid_opc; \
+ } \
+ } while (0)
+
static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
{
- uint32_t palcode;
- int32_t disp21, disp16;
-#ifndef CONFIG_USER_ONLY
- int32_t disp12;
-#endif
+ int32_t disp21, disp16, disp12 __attribute__((unused));
uint16_t fn11;
- uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
- uint8_t lit;
+ uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
+ bool islit;
+ TCGv va, vb, vc, tmp;
+ TCGv_i32 t32;
ExitStatus ret;
/* Decode all instruction fields */
- opc = insn >> 26;
- ra = (insn >> 21) & 0x1F;
- rb = (insn >> 16) & 0x1F;
- rc = insn & 0x1F;
- real_islit = islit = (insn >> 12) & 1;
+ opc = extract32(insn, 26, 6);
+ ra = extract32(insn, 21, 5);
+ rb = extract32(insn, 16, 5);
+ rc = extract32(insn, 0, 5);
+ islit = extract32(insn, 12, 1);
+ lit = extract32(insn, 13, 8);
+
+ disp21 = sextract32(insn, 0, 21);
+ disp16 = sextract32(insn, 0, 16);
+ disp12 = sextract32(insn, 0, 12);
+
+ fn11 = extract32(insn, 5, 11);
+ fpfn = extract32(insn, 5, 6);
+ fn7 = extract32(insn, 5, 7);
+
if (rb == 31 && !islit) {
- islit = 1;
+ islit = true;
lit = 0;
- } else
- lit = (insn >> 13) & 0xFF;
- palcode = insn & 0x03FFFFFF;
- disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
- disp16 = (int16_t)(insn & 0x0000FFFF);
-#ifndef CONFIG_USER_ONLY
- disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
-#endif
- fn11 = (insn >> 5) & 0x000007FF;
- fpfn = fn11 & 0x3F;
- fn7 = (insn >> 5) & 0x0000007F;
- LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
- opc, ra, rb, rc, disp16);
+ }
ret = NO_EXIT;
switch (opc) {
case 0x00:
/* CALL_PAL */
- ret = gen_call_pal(ctx, palcode);
+ ret = gen_call_pal(ctx, insn & 0x03ffffff);
break;
case 0x01:
/* OPC01 */
@@ -1823,838 +1451,627 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
case 0x07:
/* OPC07 */
goto invalid_opc;
- case 0x08:
- /* LDA */
- if (likely(ra != 31)) {
- if (rb != 31)
- tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
- else
- tcg_gen_movi_i64(cpu_ir[ra], disp16);
- }
- break;
+
case 0x09:
/* LDAH */
- if (likely(ra != 31)) {
- if (rb != 31)
- tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
- else
- tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
+ disp16 = (uint32_t)disp16 << 16;
+ /* fall through */
+ case 0x08:
+ /* LDA */
+ va = dest_gpr(ctx, ra);
+ /* It's worth special-casing immediate loads. */
+ if (rb == 31) {
+ tcg_gen_movi_i64(va, disp16);
+ } else {
+ tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
}
break;
+
case 0x0A:
/* LDBU */
- if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
- gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+ gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
+ break;
case 0x0B:
/* LDQ_U */
gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
break;
case 0x0C:
/* LDWU */
- if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
- gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+ gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
+ break;
case 0x0D:
/* STW */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
break;
case 0x0E:
/* STB */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
break;
case 0x0F:
/* STQ_U */
gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
break;
+
case 0x10:
+ vc = dest_gpr(ctx, rc);
+ vb = load_gpr_lit(ctx, rb, lit, islit);
+
+ if (ra == 31) {
+ if (fn7 == 0x00) {
+ /* Special case ADDL as SEXTL. */
+ tcg_gen_ext32s_i64(vc, vb);
+ break;
+ }
+ if (fn7 == 0x29) {
+ /* Special case SUBQ as NEGQ. */
+ tcg_gen_neg_i64(vc, vb);
+ break;
+ }
+ }
+
+ va = load_gpr(ctx, ra);
switch (fn7) {
case 0x00:
/* ADDL */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit) {
- tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
- tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
- } else {
- tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
- }
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], lit);
- else
- tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tcg_gen_add_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
break;
case 0x02:
/* S4ADDL */
- if (likely(rc != 31)) {
- if (ra != 31) {
- TCGv tmp = tcg_temp_new();
- tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
- if (islit)
- tcg_gen_addi_i64(tmp, tmp, lit);
- else
- tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
- tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
- tcg_temp_free(tmp);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], lit);
- else
- tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_add_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
break;
case 0x09:
/* SUBL */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit)
- tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
- else
- tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], -lit);
- else {
- tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
- tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
- }
- }
+ tcg_gen_sub_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
break;
case 0x0B:
/* S4SUBL */
- if (likely(rc != 31)) {
- if (ra != 31) {
- TCGv tmp = tcg_temp_new();
- tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
- if (islit)
- tcg_gen_subi_i64(tmp, tmp, lit);
- else
- tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
- tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
- tcg_temp_free(tmp);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], -lit);
- else {
- tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
- tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
- }
- }
- }
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_sub_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
break;
case 0x0F:
/* CMPBGE */
- gen_cmpbge(ra, rb, rc, islit, lit);
+ gen_helper_cmpbge(vc, va, vb);
break;
case 0x12:
/* S8ADDL */
- if (likely(rc != 31)) {
- if (ra != 31) {
- TCGv tmp = tcg_temp_new();
- tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
- if (islit)
- tcg_gen_addi_i64(tmp, tmp, lit);
- else
- tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
- tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
- tcg_temp_free(tmp);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], lit);
- else
- tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_add_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
break;
case 0x1B:
/* S8SUBL */
- if (likely(rc != 31)) {
- if (ra != 31) {
- TCGv tmp = tcg_temp_new();
- tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
- if (islit)
- tcg_gen_subi_i64(tmp, tmp, lit);
- else
- tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
- tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
- tcg_temp_free(tmp);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], -lit);
- else
- tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
- tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
- }
- }
- }
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_sub_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
break;
case 0x1D:
/* CMPULT */
- gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
+ tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
break;
case 0x20:
/* ADDQ */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit)
- tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
- else
- tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], lit);
- else
- tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tcg_gen_add_i64(vc, va, vb);
break;
case 0x22:
/* S4ADDQ */
- if (likely(rc != 31)) {
- if (ra != 31) {
- TCGv tmp = tcg_temp_new();
- tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
- if (islit)
- tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
- else
- tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
- tcg_temp_free(tmp);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], lit);
- else
- tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_add_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
break;
case 0x29:
/* SUBQ */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit)
- tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
- else
- tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], -lit);
- else
- tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tcg_gen_sub_i64(vc, va, vb);
break;
case 0x2B:
/* S4SUBQ */
- if (likely(rc != 31)) {
- if (ra != 31) {
- TCGv tmp = tcg_temp_new();
- tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
- if (islit)
- tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
- else
- tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
- tcg_temp_free(tmp);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], -lit);
- else
- tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_sub_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
break;
case 0x2D:
/* CMPEQ */
- gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
+ tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
break;
case 0x32:
/* S8ADDQ */
- if (likely(rc != 31)) {
- if (ra != 31) {
- TCGv tmp = tcg_temp_new();
- tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
- if (islit)
- tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
- else
- tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
- tcg_temp_free(tmp);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], lit);
- else
- tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_add_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
break;
case 0x3B:
/* S8SUBQ */
- if (likely(rc != 31)) {
- if (ra != 31) {
- TCGv tmp = tcg_temp_new();
- tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
- if (islit)
- tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
- else
- tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
- tcg_temp_free(tmp);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], -lit);
- else
- tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_sub_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
break;
case 0x3D:
/* CMPULE */
- gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
+ tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
break;
case 0x40:
/* ADDL/V */
- gen_addlv(ra, rb, rc, islit, lit);
+ gen_helper_addlv(vc, cpu_env, va, vb);
break;
case 0x49:
/* SUBL/V */
- gen_sublv(ra, rb, rc, islit, lit);
+ gen_helper_sublv(vc, cpu_env, va, vb);
break;
case 0x4D:
/* CMPLT */
- gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
+ tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
break;
case 0x60:
/* ADDQ/V */
- gen_addqv(ra, rb, rc, islit, lit);
+ gen_helper_addqv(vc, cpu_env, va, vb);
break;
case 0x69:
/* SUBQ/V */
- gen_subqv(ra, rb, rc, islit, lit);
+ gen_helper_subqv(vc, cpu_env, va, vb);
break;
case 0x6D:
/* CMPLE */
- gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
+ tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
break;
default:
goto invalid_opc;
}
break;
+
case 0x11:
+ if (fn7 == 0x20) {
+ if (rc == 31) {
+ /* Special case BIS as NOP. */
+ break;
+ }
+ if (ra == 31) {
+ /* Special case BIS as MOV. */
+ vc = dest_gpr(ctx, rc);
+ if (islit) {
+ tcg_gen_movi_i64(vc, lit);
+ } else {
+ tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
+ }
+ break;
+ }
+ }
+
+ vc = dest_gpr(ctx, rc);
+ vb = load_gpr_lit(ctx, rb, lit, islit);
+
+ if (fn7 == 0x28 && ra == 31) {
+ /* Special case ORNOT as NOT. */
+ tcg_gen_not_i64(vc, vb);
+ break;
+ }
+
+ va = load_gpr(ctx, ra);
switch (fn7) {
case 0x00:
/* AND */
- if (likely(rc != 31)) {
- if (ra == 31)
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- else if (islit)
- tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
- else
- tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- }
+ tcg_gen_and_i64(vc, va, vb);
break;
case 0x08:
/* BIC */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit)
- tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
- else
- tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- } else
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- }
+ tcg_gen_andc_i64(vc, va, vb);
break;
case 0x14:
/* CMOVLBS */
- gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, va, 1);
+ tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ tcg_temp_free(tmp);
break;
case 0x16:
/* CMOVLBC */
- gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, va, 1);
+ tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ tcg_temp_free(tmp);
break;
case 0x20:
/* BIS */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit)
- tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
- else
- tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], lit);
- else
- tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tcg_gen_or_i64(vc, va, vb);
break;
case 0x24:
/* CMOVEQ */
- gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
break;
case 0x26:
/* CMOVNE */
- gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
break;
case 0x28:
/* ORNOT */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit)
- tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
- else
- tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], ~lit);
- else
- tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tcg_gen_orc_i64(vc, va, vb);
break;
case 0x40:
/* XOR */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit)
- tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
- else
- tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], lit);
- else
- tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tcg_gen_xor_i64(vc, va, vb);
break;
case 0x44:
/* CMOVLT */
- gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
+ tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
break;
case 0x46:
/* CMOVGE */
- gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
+ tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
break;
case 0x48:
/* EQV */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit)
- tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
- else
- tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- } else {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], ~lit);
- else
- tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
+ tcg_gen_eqv_i64(vc, va, vb);
break;
case 0x61:
/* AMASK */
- if (likely(rc != 31)) {
+ REQUIRE_REG_31(ra);
+ {
uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
-
- if (islit) {
- tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
- } else {
- tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
- }
+ tcg_gen_andi_i64(vc, vb, ~amask);
}
break;
case 0x64:
/* CMOVLE */
- gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
+ tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
break;
case 0x66:
/* CMOVGT */
- gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
+ tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
break;
case 0x6C:
/* IMPLVER */
- if (rc != 31) {
- tcg_gen_movi_i64(cpu_ir[rc], ctx->implver);
- }
+ REQUIRE_REG_31(ra);
+ tcg_gen_movi_i64(vc, ctx->implver);
break;
default:
goto invalid_opc;
}
break;
+
case 0x12:
+ vc = dest_gpr(ctx, rc);
+ va = load_gpr(ctx, ra);
switch (fn7) {
case 0x02:
/* MSKBL */
- gen_msk_l(ra, rb, rc, islit, lit, 0x01);
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
break;
case 0x06:
/* EXTBL */
- gen_ext_l(ra, rb, rc, islit, lit, 0x01);
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
break;
case 0x0B:
/* INSBL */
- gen_ins_l(ra, rb, rc, islit, lit, 0x01);
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
break;
case 0x12:
/* MSKWL */
- gen_msk_l(ra, rb, rc, islit, lit, 0x03);
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
break;
case 0x16:
/* EXTWL */
- gen_ext_l(ra, rb, rc, islit, lit, 0x03);
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
break;
case 0x1B:
/* INSWL */
- gen_ins_l(ra, rb, rc, islit, lit, 0x03);
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
break;
case 0x22:
/* MSKLL */
- gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
break;
case 0x26:
/* EXTLL */
- gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
break;
case 0x2B:
/* INSLL */
- gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
break;
case 0x30:
/* ZAP */
- gen_zap(ra, rb, rc, islit, lit);
+ if (islit) {
+ gen_zapnoti(vc, va, ~lit);
+ } else {
+ gen_helper_zap(vc, va, load_gpr(ctx, rb));
+ }
break;
case 0x31:
/* ZAPNOT */
- gen_zapnot(ra, rb, rc, islit, lit);
+ if (islit) {
+ gen_zapnoti(vc, va, lit);
+ } else {
+ gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
+ }
break;
case 0x32:
/* MSKQL */
- gen_msk_l(ra, rb, rc, islit, lit, 0xff);
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
break;
case 0x34:
/* SRL */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit)
- tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
- else {
- TCGv shift = tcg_temp_new();
- tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
- tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
- tcg_temp_free(shift);
- }
- } else
- tcg_gen_movi_i64(cpu_ir[rc], 0);
+ if (islit) {
+ tcg_gen_shri_i64(vc, va, lit & 0x3f);
+ } else {
+ tmp = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_shr_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
}
break;
case 0x36:
/* EXTQL */
- gen_ext_l(ra, rb, rc, islit, lit, 0xff);
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
break;
case 0x39:
/* SLL */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit)
- tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
- else {
- TCGv shift = tcg_temp_new();
- tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
- tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
- tcg_temp_free(shift);
- }
- } else
- tcg_gen_movi_i64(cpu_ir[rc], 0);
+ if (islit) {
+ tcg_gen_shli_i64(vc, va, lit & 0x3f);
+ } else {
+ tmp = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_shl_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
}
break;
case 0x3B:
/* INSQL */
- gen_ins_l(ra, rb, rc, islit, lit, 0xff);
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
break;
case 0x3C:
/* SRA */
- if (likely(rc != 31)) {
- if (ra != 31) {
- if (islit)
- tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
- else {
- TCGv shift = tcg_temp_new();
- tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
- tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
- tcg_temp_free(shift);
- }
- } else
- tcg_gen_movi_i64(cpu_ir[rc], 0);
+ if (islit) {
+ tcg_gen_sari_i64(vc, va, lit & 0x3f);
+ } else {
+ tmp = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_sar_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
}
break;
case 0x52:
/* MSKWH */
- gen_msk_h(ra, rb, rc, islit, lit, 0x03);
+ gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
break;
case 0x57:
/* INSWH */
- gen_ins_h(ra, rb, rc, islit, lit, 0x03);
+ gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
break;
case 0x5A:
/* EXTWH */
- gen_ext_h(ra, rb, rc, islit, lit, 0x03);
+ gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
break;
case 0x62:
/* MSKLH */
- gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
+ gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
break;
case 0x67:
/* INSLH */
- gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
+ gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
break;
case 0x6A:
/* EXTLH */
- gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
+ gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
break;
case 0x72:
/* MSKQH */
- gen_msk_h(ra, rb, rc, islit, lit, 0xff);
+ gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
break;
case 0x77:
/* INSQH */
- gen_ins_h(ra, rb, rc, islit, lit, 0xff);
+ gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
break;
case 0x7A:
/* EXTQH */
- gen_ext_h(ra, rb, rc, islit, lit, 0xff);
+ gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
break;
default:
goto invalid_opc;
}
break;
+
case 0x13:
+ vc = dest_gpr(ctx, rc);
+ vb = load_gpr_lit(ctx, rb, lit, islit);
+ va = load_gpr(ctx, ra);
switch (fn7) {
case 0x00:
/* MULL */
- if (likely(rc != 31)) {
- if (ra == 31)
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- else {
- if (islit)
- tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
- else
- tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
- }
- }
+ tcg_gen_mul_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
break;
case 0x20:
/* MULQ */
- if (likely(rc != 31)) {
- if (ra == 31)
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- else if (islit)
- tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
- else
- tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- }
+ tcg_gen_mul_i64(vc, va, vb);
break;
case 0x30:
/* UMULH */
- {
- TCGv low;
- if (unlikely(rc == 31)){
- break;
- }
- if (ra == 31) {
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- break;
- }
- low = tcg_temp_new();
- if (islit) {
- tcg_gen_movi_tl(low, lit);
- tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
- } else {
- tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
- }
- tcg_temp_free(low);
- }
+ tmp = tcg_temp_new();
+ tcg_gen_mulu2_i64(tmp, vc, va, vb);
+ tcg_temp_free(tmp);
break;
case 0x40:
/* MULL/V */
- gen_mullv(ra, rb, rc, islit, lit);
+ gen_helper_mullv(vc, cpu_env, va, vb);
break;
case 0x60:
/* MULQ/V */
- gen_mulqv(ra, rb, rc, islit, lit);
+ gen_helper_mulqv(vc, cpu_env, va, vb);
break;
default:
goto invalid_opc;
}
break;
+
case 0x14:
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
+ vc = dest_fpr(ctx, rc);
switch (fpfn) { /* fn11 & 0x3F */
case 0x04:
/* ITOFS */
- if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
- goto invalid_opc;
- }
- if (likely(rc != 31)) {
- if (ra != 31) {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
- gen_helper_memory_to_s(cpu_fir[rc], tmp);
- tcg_temp_free_i32(tmp);
- } else
- tcg_gen_movi_i64(cpu_fir[rc], 0);
- }
+ REQUIRE_REG_31(rb);
+ t32 = tcg_temp_new_i32();
+ va = load_gpr(ctx, ra);
+ tcg_gen_trunc_i64_i32(t32, va);
+ gen_helper_memory_to_s(vc, t32);
+ tcg_temp_free_i32(t32);
break;
case 0x0A:
/* SQRTF */
- if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
- gen_fsqrtf(rb, rc);
- break;
- }
- goto invalid_opc;
+ REQUIRE_REG_31(ra);
+ vb = load_fpr(ctx, rb);
+ gen_helper_sqrtf(vc, cpu_env, vb);
+ break;
case 0x0B:
/* SQRTS */
- if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
- gen_fsqrts(ctx, rb, rc, fn11);
- break;
- }
- goto invalid_opc;
+ REQUIRE_REG_31(ra);
+ gen_fsqrts(ctx, rb, rc, fn11);
+ break;
case 0x14:
/* ITOFF */
- if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
- goto invalid_opc;
- }
- if (likely(rc != 31)) {
- if (ra != 31) {
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
- gen_helper_memory_to_f(cpu_fir[rc], tmp);
- tcg_temp_free_i32(tmp);
- } else
- tcg_gen_movi_i64(cpu_fir[rc], 0);
- }
+ REQUIRE_REG_31(rb);
+ t32 = tcg_temp_new_i32();
+ va = load_gpr(ctx, ra);
+ tcg_gen_trunc_i64_i32(t32, va);
+ gen_helper_memory_to_f(vc, t32);
+ tcg_temp_free_i32(t32);
break;
case 0x24:
/* ITOFT */
- if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
- goto invalid_opc;
- }
- if (likely(rc != 31)) {
- if (ra != 31)
- tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
- else
- tcg_gen_movi_i64(cpu_fir[rc], 0);
- }
+ REQUIRE_REG_31(rb);
+ va = load_gpr(ctx, ra);
+ tcg_gen_mov_i64(vc, va);
break;
case 0x2A:
/* SQRTG */
- if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
- gen_fsqrtg(rb, rc);
- break;
- }
- goto invalid_opc;
+ REQUIRE_REG_31(ra);
+ vb = load_fpr(ctx, rb);
+ gen_helper_sqrtg(vc, cpu_env, vb);
+ break;
case 0x02B:
/* SQRTT */
- if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
- gen_fsqrtt(ctx, rb, rc, fn11);
- break;
- }
- goto invalid_opc;
+ REQUIRE_REG_31(ra);
+ gen_fsqrtt(ctx, rb, rc, fn11);
+ break;
default:
goto invalid_opc;
}
break;
+
case 0x15:
/* VAX floating point */
/* XXX: rounding mode and trap are ignored (!) */
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ va = load_fpr(ctx, ra);
switch (fpfn) { /* fn11 & 0x3F */
case 0x00:
/* ADDF */
- gen_faddf(ra, rb, rc);
+ gen_helper_addf(vc, cpu_env, va, vb);
break;
case 0x01:
/* SUBF */
- gen_fsubf(ra, rb, rc);
+ gen_helper_subf(vc, cpu_env, va, vb);
break;
case 0x02:
/* MULF */
- gen_fmulf(ra, rb, rc);
+ gen_helper_mulf(vc, cpu_env, va, vb);
break;
case 0x03:
/* DIVF */
- gen_fdivf(ra, rb, rc);
+ gen_helper_divf(vc, cpu_env, va, vb);
break;
case 0x1E:
- /* CVTDG */
-#if 0 // TODO
- gen_fcvtdg(rb, rc);
-#else
+ /* CVTDG -- TODO */
+ REQUIRE_REG_31(ra);
goto invalid_opc;
-#endif
- break;
case 0x20:
/* ADDG */
- gen_faddg(ra, rb, rc);
+ gen_helper_addg(vc, cpu_env, va, vb);
break;
case 0x21:
/* SUBG */
- gen_fsubg(ra, rb, rc);
+ gen_helper_subg(vc, cpu_env, va, vb);
break;
case 0x22:
/* MULG */
- gen_fmulg(ra, rb, rc);
+ gen_helper_mulg(vc, cpu_env, va, vb);
break;
case 0x23:
/* DIVG */
- gen_fdivg(ra, rb, rc);
+ gen_helper_divg(vc, cpu_env, va, vb);
break;
case 0x25:
/* CMPGEQ */
- gen_fcmpgeq(ra, rb, rc);
+ gen_helper_cmpgeq(vc, cpu_env, va, vb);
break;
case 0x26:
/* CMPGLT */
- gen_fcmpglt(ra, rb, rc);
+ gen_helper_cmpglt(vc, cpu_env, va, vb);
break;
case 0x27:
/* CMPGLE */
- gen_fcmpgle(ra, rb, rc);
+ gen_helper_cmpgle(vc, cpu_env, va, vb);
break;
case 0x2C:
/* CVTGF */
- gen_fcvtgf(rb, rc);
+ REQUIRE_REG_31(ra);
+ gen_helper_cvtgf(vc, cpu_env, vb);
break;
case 0x2D:
- /* CVTGD */
-#if 0 // TODO
- gen_fcvtgd(rb, rc);
-#else
+ /* CVTGD -- TODO */
+ REQUIRE_REG_31(ra);
goto invalid_opc;
-#endif
- break;
case 0x2F:
/* CVTGQ */
- gen_fcvtgq(rb, rc);
+ REQUIRE_REG_31(ra);
+ gen_helper_cvtgq(vc, cpu_env, vb);
break;
case 0x3C:
/* CVTQF */
- gen_fcvtqf(rb, rc);
+ REQUIRE_REG_31(ra);
+ gen_helper_cvtqf(vc, cpu_env, vb);
break;
case 0x3E:
/* CVTQG */
- gen_fcvtqg(rb, rc);
+ REQUIRE_REG_31(ra);
+ gen_helper_cvtqg(vc, cpu_env, vb);
break;
default:
goto invalid_opc;
}
break;
+
case 0x16:
/* IEEE floating-point */
switch (fpfn) { /* fn11 & 0x3F */
@@ -2707,6 +2124,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
gen_fcmptle(ctx, ra, rb, rc, fn11);
break;
case 0x2C:
+ REQUIRE_REG_31(ra);
if (fn11 == 0x2AC || fn11 == 0x6AC) {
/* CVTST */
gen_fcvtst(ctx, rb, rc, fn11);
@@ -2717,104 +2135,122 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x2F:
/* CVTTQ */
+ REQUIRE_REG_31(ra);
gen_fcvttq(ctx, rb, rc, fn11);
break;
case 0x3C:
/* CVTQS */
+ REQUIRE_REG_31(ra);
gen_fcvtqs(ctx, rb, rc, fn11);
break;
case 0x3E:
/* CVTQT */
+ REQUIRE_REG_31(ra);
gen_fcvtqt(ctx, rb, rc, fn11);
break;
default:
goto invalid_opc;
}
break;
+
case 0x17:
switch (fn11) {
case 0x010:
/* CVTLQ */
- gen_fcvtlq(rb, rc);
+ REQUIRE_REG_31(ra);
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ gen_fcvtlq(vc, vb);
break;
case 0x020:
- if (likely(rc != 31)) {
+ /* CPYS */
+ if (rc == 31) {
+ /* Special case CPYS as FNOP. */
+ } else {
+ vc = dest_fpr(ctx, rc);
+ va = load_fpr(ctx, ra);
if (ra == rb) {
- /* FMOV */
- if (ra == 31)
- tcg_gen_movi_i64(cpu_fir[rc], 0);
- else
- tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
+ /* Special case CPYS as FMOV. */
+ tcg_gen_mov_i64(vc, va);
} else {
- /* CPYS */
- gen_fcpys(ra, rb, rc);
+ vb = load_fpr(ctx, rb);
+ gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
}
}
break;
case 0x021:
/* CPYSN */
- gen_fcpysn(ra, rb, rc);
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ va = load_fpr(ctx, ra);
+ gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
break;
case 0x022:
/* CPYSE */
- gen_fcpyse(ra, rb, rc);
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ va = load_fpr(ctx, ra);
+ gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
break;
case 0x024:
/* MT_FPCR */
- if (likely(ra != 31))
- gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
- else {
- TCGv tmp = tcg_const_i64(0);
- gen_helper_store_fpcr(cpu_env, tmp);
- tcg_temp_free(tmp);
- }
+ va = load_fpr(ctx, ra);
+ gen_helper_store_fpcr(cpu_env, va);
break;
case 0x025:
/* MF_FPCR */
- if (likely(ra != 31))
- gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
+ va = dest_fpr(ctx, ra);
+ gen_helper_load_fpcr(va, cpu_env);
break;
case 0x02A:
/* FCMOVEQ */
- gen_fcmov(TCG_COND_EQ, ra, rb, rc);
+ gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
break;
case 0x02B:
/* FCMOVNE */
- gen_fcmov(TCG_COND_NE, ra, rb, rc);
+ gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
break;
case 0x02C:
/* FCMOVLT */
- gen_fcmov(TCG_COND_LT, ra, rb, rc);
+ gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
break;
case 0x02D:
/* FCMOVGE */
- gen_fcmov(TCG_COND_GE, ra, rb, rc);
+ gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
break;
case 0x02E:
/* FCMOVLE */
- gen_fcmov(TCG_COND_LE, ra, rb, rc);
+ gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
break;
case 0x02F:
/* FCMOVGT */
- gen_fcmov(TCG_COND_GT, ra, rb, rc);
+ gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
break;
case 0x030:
/* CVTQL */
- gen_fcvtql(rb, rc);
+ REQUIRE_REG_31(ra);
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ gen_fcvtql(vc, vb);
break;
case 0x130:
/* CVTQL/V */
case 0x530:
/* CVTQL/SV */
+ REQUIRE_REG_31(ra);
/* ??? I'm pretty sure there's nothing that /sv needs to do that
/v doesn't do. The only thing I can think is that /sv is a
valid instruction merely for completeness in the ISA. */
- gen_fcvtql_v(ctx, rb, rc);
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ gen_helper_fcvtql_v_input(cpu_env, vb);
+ gen_fcvtql(vc, vb);
break;
default:
goto invalid_opc;
}
break;
+
case 0x18:
switch ((uint16_t)disp16) {
case 0x0000:
@@ -2843,15 +2279,14 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0xC000:
/* RPCC */
- if (ra != 31) {
- if (use_icount) {
- gen_io_start();
- gen_helper_load_pcc(cpu_ir[ra], cpu_env);
- gen_io_end();
- ret = EXIT_PC_STALE;
- } else {
- gen_helper_load_pcc(cpu_ir[ra], cpu_env);
- }
+ va = dest_gpr(ctx, ra);
+ if (use_icount) {
+ gen_io_start();
+ gen_helper_load_pcc(va, cpu_env);
+ gen_io_end();
+ ret = EXIT_PC_STALE;
+ } else {
+ gen_helper_load_pcc(va, cpu_env);
}
break;
case 0xE000:
@@ -2873,58 +2308,55 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
goto invalid_opc;
}
break;
+
case 0x19:
/* HW_MFPR (PALcode) */
#ifndef CONFIG_USER_ONLY
- if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
- return gen_mfpr(ra, insn & 0xffff);
- }
-#endif
+ REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+ va = dest_gpr(ctx, ra);
+ ret = gen_mfpr(va, insn & 0xffff);
+ break;
+#else
goto invalid_opc;
+#endif
+
case 0x1A:
/* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
prediction stack action, which of course we don't implement. */
- if (rb != 31) {
- tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
- } else {
- tcg_gen_movi_i64(cpu_pc, 0);
- }
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(cpu_pc, vb, ~3);
if (ra != 31) {
tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
}
ret = EXIT_PC_UPDATED;
break;
+
case 0x1B:
/* HW_LD (PALcode) */
#ifndef CONFIG_USER_ONLY
- if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
- TCGv addr;
+ REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+ {
+ TCGv addr = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ va = dest_gpr(ctx, ra);
- if (ra == 31) {
- break;
- }
-
- addr = tcg_temp_new();
- if (rb != 31)
- tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
- else
- tcg_gen_movi_i64(addr, disp12);
+ tcg_gen_addi_i64(addr, vb, disp12);
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access (hw_ldl/p) */
- gen_helper_ldl_phys(cpu_ir[ra], cpu_env, addr);
+ gen_helper_ldl_phys(va, cpu_env, addr);
break;
case 0x1:
/* Quadword physical access (hw_ldq/p) */
- gen_helper_ldq_phys(cpu_ir[ra], cpu_env, addr);
+ gen_helper_ldq_phys(va, cpu_env, addr);
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
- gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
+ gen_helper_ldl_l_phys(va, cpu_env, addr);
break;
case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */
- gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
+ gen_helper_ldq_l_phys(va, cpu_env, addr);
break;
case 0x4:
/* Longword virtual PTE fetch (hw_ldl/v) */
@@ -2947,11 +2379,11 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
goto invalid_opc;
case 0xA:
/* Longword virtual access with protection check (hw_ldl/w) */
- tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
break;
case 0xB:
/* Quadword virtual access with protection check (hw_ldq/w) */
- tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LEQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
break;
case 0xC:
/* Longword virtual access with alt access mode (hw_ldl/a)*/
@@ -2962,282 +2394,215 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
case 0xE:
/* Longword virtual access with alternate access mode and
protection checks (hw_ldl/wa) */
- tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LESL);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
break;
case 0xF:
/* Quadword virtual access with alternate access mode and
protection checks (hw_ldq/wa) */
- tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LEQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
break;
}
tcg_temp_free(addr);
break;
}
-#endif
+#else
goto invalid_opc;
+#endif
+
case 0x1C:
+ vc = dest_gpr(ctx, rc);
+ if (fn7 == 0x70) {
+ /* FTOIT */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
+ REQUIRE_REG_31(rb);
+ va = load_fpr(ctx, ra);
+ tcg_gen_mov_i64(vc, va);
+ break;
+ } else if (fn7 == 0x78) {
+ /* FTOIS */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
+ REQUIRE_REG_31(rb);
+ t32 = tcg_temp_new_i32();
+ va = load_fpr(ctx, ra);
+ gen_helper_s_to_memory(t32, va);
+ tcg_gen_ext_i32_i64(vc, t32);
+ tcg_temp_free_i32(t32);
+ break;
+ }
+
+ vb = load_gpr_lit(ctx, rb, lit, islit);
switch (fn7) {
case 0x00:
/* SEXTB */
- if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
- goto invalid_opc;
- }
- if (likely(rc != 31)) {
- if (islit)
- tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
- else
- tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
- }
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+ REQUIRE_REG_31(ra);
+ tcg_gen_ext8s_i64(vc, vb);
break;
case 0x01:
/* SEXTW */
- if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
- if (likely(rc != 31)) {
- if (islit) {
- tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
- } else {
- tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
- }
- }
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+ REQUIRE_REG_31(ra);
+ tcg_gen_ext16s_i64(vc, vb);
+ break;
case 0x30:
/* CTPOP */
- if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
- if (likely(rc != 31)) {
- if (islit) {
- tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
- } else {
- gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
- }
- }
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
+ REQUIRE_REG_31(ra);
+ gen_helper_ctpop(vc, vb);
+ break;
case 0x31:
/* PERR */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- gen_perr(ra, rb, rc, islit, lit);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_perr(vc, va, vb);
+ break;
case 0x32:
/* CTLZ */
- if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
- if (likely(rc != 31)) {
- if (islit) {
- tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
- } else {
- gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
- }
- }
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
+ REQUIRE_REG_31(ra);
+ gen_helper_ctlz(vc, vb);
+ break;
case 0x33:
/* CTTZ */
- if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
- if (likely(rc != 31)) {
- if (islit) {
- tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
- } else {
- gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
- }
- }
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
+ REQUIRE_REG_31(ra);
+ gen_helper_cttz(vc, vb);
+ break;
case 0x34:
/* UNPKBW */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- if (real_islit || ra != 31) {
- goto invalid_opc;
- }
- gen_unpkbw(rb, rc);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ REQUIRE_REG_31(ra);
+ gen_helper_unpkbw(vc, vb);
+ break;
case 0x35:
/* UNPKBL */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- if (real_islit || ra != 31) {
- goto invalid_opc;
- }
- gen_unpkbl(rb, rc);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ REQUIRE_REG_31(ra);
+ gen_helper_unpkbl(vc, vb);
+ break;
case 0x36:
/* PKWB */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- if (real_islit || ra != 31) {
- goto invalid_opc;
- }
- gen_pkwb(rb, rc);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ REQUIRE_REG_31(ra);
+ gen_helper_pkwb(vc, vb);
+ break;
case 0x37:
/* PKLB */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- if (real_islit || ra != 31) {
- goto invalid_opc;
- }
- gen_pklb(rb, rc);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ REQUIRE_REG_31(ra);
+ gen_helper_pklb(vc, vb);
+ break;
case 0x38:
/* MINSB8 */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- gen_minsb8(ra, rb, rc, islit, lit);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minsb8(vc, va, vb);
+ break;
case 0x39:
/* MINSW4 */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- gen_minsw4(ra, rb, rc, islit, lit);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minsw4(vc, va, vb);
+ break;
case 0x3A:
/* MINUB8 */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- gen_minub8(ra, rb, rc, islit, lit);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minub8(vc, va, vb);
+ break;
case 0x3B:
/* MINUW4 */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- gen_minuw4(ra, rb, rc, islit, lit);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minuw4(vc, va, vb);
+ break;
case 0x3C:
/* MAXUB8 */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- gen_maxub8(ra, rb, rc, islit, lit);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxub8(vc, va, vb);
+ break;
case 0x3D:
/* MAXUW4 */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- gen_maxuw4(ra, rb, rc, islit, lit);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxuw4(vc, va, vb);
+ break;
case 0x3E:
/* MAXSB8 */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- gen_maxsb8(ra, rb, rc, islit, lit);
- break;
- }
- goto invalid_opc;
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxsb8(vc, va, vb);
+ break;
case 0x3F:
/* MAXSW4 */
- if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
- gen_maxsw4(ra, rb, rc, islit, lit);
- break;
- }
- goto invalid_opc;
- case 0x70:
- /* FTOIT */
- if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
- goto invalid_opc;
- }
- if (likely(rc != 31)) {
- if (ra != 31)
- tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
- else
- tcg_gen_movi_i64(cpu_ir[rc], 0);
- }
- break;
- case 0x78:
- /* FTOIS */
- if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
- goto invalid_opc;
- }
- if (rc != 31) {
- TCGv_i32 tmp1 = tcg_temp_new_i32();
- if (ra != 31)
- gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
- else {
- TCGv tmp2 = tcg_const_i64(0);
- gen_helper_s_to_memory(tmp1, tmp2);
- tcg_temp_free(tmp2);
- }
- tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
- tcg_temp_free_i32(tmp1);
- }
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxsw4(vc, va, vb);
break;
default:
goto invalid_opc;
}
break;
+
case 0x1D:
/* HW_MTPR (PALcode) */
#ifndef CONFIG_USER_ONLY
- if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
- return gen_mtpr(ctx, rb, insn & 0xffff);
- }
-#endif
+ REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+ vb = load_gpr(ctx, rb);
+ ret = gen_mtpr(ctx, vb, insn & 0xffff);
+ break;
+#else
goto invalid_opc;
+#endif
+
case 0x1E:
/* HW_RET (PALcode) */
#ifndef CONFIG_USER_ONLY
- if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
- if (rb == 31) {
- /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
- address from EXC_ADDR. This turns out to be useful for our
- emulation PALcode, so continue to accept it. */
- TCGv tmp = tcg_temp_new();
- tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
- gen_helper_hw_ret(cpu_env, tmp);
- tcg_temp_free(tmp);
- } else {
- gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
- }
- ret = EXIT_PC_UPDATED;
- break;
+ REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+ if (rb == 31) {
+ /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
+ address from EXC_ADDR. This turns out to be useful for our
+ emulation PALcode, so continue to accept it. */
+ tmp = tcg_temp_new();
+ tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
+ gen_helper_hw_ret(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ } else {
+ gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb));
}
-#endif
+ ret = EXIT_PC_UPDATED;
+ break;
+#else
goto invalid_opc;
+#endif
+
case 0x1F:
/* HW_ST (PALcode) */
#ifndef CONFIG_USER_ONLY
- if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
- TCGv addr, val;
- addr = tcg_temp_new();
- if (rb != 31)
- tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
- else
- tcg_gen_movi_i64(addr, disp12);
- if (ra != 31)
- val = cpu_ir[ra];
- else {
- val = tcg_temp_new();
- tcg_gen_movi_i64(val, 0);
- }
+ REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+ {
+ TCGv addr = tcg_temp_new();
+ va = load_gpr(ctx, ra);
+ vb = load_gpr(ctx, rb);
+
+ tcg_gen_addi_i64(addr, vb, disp12);
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access */
- gen_helper_stl_phys(cpu_env, addr, val);
+ gen_helper_stl_phys(cpu_env, addr, va);
break;
case 0x1:
/* Quadword physical access */
- gen_helper_stq_phys(cpu_env, addr, val);
+ gen_helper_stq_phys(cpu_env, addr, va);
break;
case 0x2:
/* Longword physical access with lock */
- gen_helper_stl_c_phys(val, cpu_env, addr, val);
+ gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
break;
case 0x3:
/* Quadword physical access with lock */
- gen_helper_stq_c_phys(val, cpu_env, addr, val);
+ gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va);
break;
case 0x4:
/* Longword virtual access */
@@ -3276,13 +2641,12 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
/* Invalid */
goto invalid_opc;
}
- if (ra == 31)
- tcg_temp_free(val);
tcg_temp_free(addr);
break;
}
-#endif
+#else
goto invalid_opc;
+#endif
case 0x20:
/* LDF */
gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
@@ -3463,8 +2827,8 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
gen_tb_start();
do {
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == ctx.pc) {
gen_excp(&ctx, EXCP_DEBUG, 0);
break;
@@ -3482,8 +2846,9 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
tcg_ctx.gen_opc_instr_start[lj] = 1;
tcg_ctx.gen_opc_icount[lj] = num_insns;
}
- if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
+ if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
+ }
insn = cpu_ldl_code(env, ctx.pc);
num_insns++;
@@ -3491,9 +2856,24 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
tcg_gen_debug_insn_start(ctx.pc);
}
+ TCGV_UNUSED_I64(ctx.zero);
+ TCGV_UNUSED_I64(ctx.sink);
+ TCGV_UNUSED_I64(ctx.lit);
+
ctx.pc += 4;
ret = translate_one(ctxp, insn);
+ if (!TCGV_IS_UNUSED_I64(ctx.sink)) {
+ tcg_gen_discard_i64(ctx.sink);
+ tcg_temp_free(ctx.sink);
+ }
+ if (!TCGV_IS_UNUSED_I64(ctx.zero)) {
+ tcg_temp_free(ctx.zero);
+ }
+ if (!TCGV_IS_UNUSED_I64(ctx.lit)) {
+ tcg_temp_free(ctx.lit);
+ }
+
/* If we reach a page boundary, are single stepping,
or exhaust instruction count, stop generation. */
if (ret == NO_EXIT
diff --git a/target-arm/arm-semi.c b/target-arm/arm-semi.c
index ee469c49c5..ebb5235521 100644
--- a/target-arm/arm-semi.c
+++ b/target-arm/arm-semi.c
@@ -127,7 +127,7 @@ static void arm_semi_cb(CPUState *cs, target_ulong ret, target_ulong err)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
#ifdef CONFIG_USER_ONLY
- TaskState *ts = env->opaque;
+ TaskState *ts = cs->opaque;
#endif
if (ret == (target_ulong)-1) {
@@ -164,7 +164,7 @@ static void arm_semi_flen_cb(CPUState *cs, target_ulong ret, target_ulong err)
cpu_memory_rw_debug(cs, env->regs[13]-64+32, (uint8_t *)&size, 4, 0);
env->regs[0] = be32_to_cpu(size);
#ifdef CONFIG_USER_ONLY
- ((TaskState *)env->opaque)->swi_errno = err;
+ ((TaskState *)cs->opaque)->swi_errno = err;
#else
syscall_err = err;
#endif
@@ -183,6 +183,7 @@ static void arm_semi_flen_cb(CPUState *cs, target_ulong ret, target_ulong err)
uint32_t do_arm_semihosting(CPUARMState *env)
{
ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
target_ulong args;
target_ulong arg0, arg1, arg2, arg3;
char * s;
@@ -190,7 +191,7 @@ uint32_t do_arm_semihosting(CPUARMState *env)
uint32_t ret;
uint32_t len;
#ifdef CONFIG_USER_ONLY
- TaskState *ts = env->opaque;
+ TaskState *ts = cs->opaque;
#else
CPUARMState *ts = env;
#endif
@@ -554,7 +555,7 @@ uint32_t do_arm_semihosting(CPUARMState *env)
exit(0);
default:
fprintf(stderr, "qemu: Unsupported SemiHosting SWI 0x%02x\n", nr);
- cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
+ cpu_dump_state(cs, stderr, fprintf, 0);
abort();
}
}
diff --git a/target-arm/cpu-qom.h b/target-arm/cpu-qom.h
index 00234e1d3d..edc7f262fc 100644
--- a/target-arm/cpu-qom.h
+++ b/target-arm/cpu-qom.h
@@ -116,6 +116,7 @@ typedef struct ARMCPU {
uint32_t reset_fpsid;
uint32_t mvfr0;
uint32_t mvfr1;
+ uint32_t mvfr2;
uint32_t ctr;
uint32_t reset_sctlr;
uint32_t id_pfr0;
@@ -147,9 +148,12 @@ typedef struct ARMCPU {
* in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
*/
uint32_t ccsidr[16];
- uint32_t reset_cbar;
+ uint64_t reset_cbar;
uint32_t reset_auxcr;
bool reset_hivecs;
+ /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
+ uint32_t dcz_blocksize;
+ uint64_t rvbar;
} ARMCPU;
#define TYPE_AARCH64_CPU "aarch64-cpu"
@@ -196,10 +200,10 @@ void arm_gt_ptimer_cb(void *opaque);
void arm_gt_vtimer_cb(void *opaque);
#ifdef TARGET_AARCH64
-void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
- fprintf_function cpu_fprintf, int flags);
int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+void aarch64_cpu_do_interrupt(CPUState *cs);
#endif
#endif
diff --git a/target-arm/cpu.c b/target-arm/cpu.c
index 1ce8a9bc38..c0ddc3e3df 100644
--- a/target-arm/cpu.c
+++ b/target-arm/cpu.c
@@ -19,6 +19,7 @@
*/
#include "cpu.h"
+#include "internals.h"
#include "qemu-common.h"
#include "hw/qdev-properties.h"
#include "qapi/qmp/qerror.h"
@@ -36,6 +37,12 @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.regs[15] = value;
}
+static bool arm_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request &
+ (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
+}
+
static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
{
/* Reset a single ARMCPRegInfo register */
@@ -76,11 +83,12 @@ static void arm_cpu_reset(CPUState *s)
acc->parent_reset(s);
- memset(env, 0, offsetof(CPUARMState, breakpoints));
+ memset(env, 0, offsetof(CPUARMState, features));
g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
+ env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2;
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
@@ -93,8 +101,16 @@ static void arm_cpu_reset(CPUState *s)
env->pstate = PSTATE_MODE_EL0t;
/* Userspace expects access to CTL_EL0 and the cache ops */
env->cp15.c1_sys |= SCTLR_UCT | SCTLR_UCI;
+ /* and to the FP/Neon instructions */
+ env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 2, 3);
#else
env->pstate = PSTATE_MODE_EL1h;
+ env->pc = cpu->rvbar;
+#endif
+ } else {
+#if defined(CONFIG_USER_ONLY)
+ /* Userspace expects access to cp10 and cp11 for FP/Neon */
+ env->cp15.c1_coproc = deposit64(env->cp15.c1_coproc, 20, 4, 0xf);
#endif
}
@@ -143,7 +159,7 @@ static void arm_cpu_reset(CPUState *s)
&env->vfp.fp_status);
set_float_detect_tininess(float_tininess_before_rounding,
&env->vfp.standard_fp_status);
- tlb_flush(env, 1);
+ tlb_flush(s, 1);
/* Reset is a state change for some CPUARMState fields which we
* bake assumptions about into translated code, so we need to
* tb_flush().
@@ -246,16 +262,20 @@ static void arm_cpu_initfn(Object *obj)
}
static Property arm_cpu_reset_cbar_property =
- DEFINE_PROP_UINT32("reset-cbar", ARMCPU, reset_cbar, 0);
+ DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
static Property arm_cpu_reset_hivecs_property =
DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
+static Property arm_cpu_rvbar_property =
+ DEFINE_PROP_UINT64("rvbar", ARMCPU, rvbar, 0);
+
static void arm_cpu_post_init(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
- if (arm_feature(&cpu->env, ARM_FEATURE_CBAR)) {
+ if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
+ arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property,
&error_abort);
}
@@ -264,6 +284,11 @@ static void arm_cpu_post_init(Object *obj)
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property,
&error_abort);
}
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_rvbar_property,
+ &error_abort);
+ }
}
static void arm_cpu_finalizefn(Object *obj)
@@ -325,6 +350,9 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
set_feature(env, ARM_FEATURE_V7MP);
set_feature(env, ARM_FEATURE_PXN);
}
+ if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
+ set_feature(env, ARM_FEATURE_CBAR);
+ }
if (cpu->reset_hivecs) {
cpu->reset_sctlr |= (1 << 13);
@@ -411,7 +439,7 @@ static void arm1026_initfn(Object *obj)
ARMCPRegInfo ifar = {
.name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c6_insn),
+ .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el1),
.resetvalue = 0
};
define_one_arm_cp_reg(cpu, &ifar);
@@ -716,7 +744,7 @@ static void cortex_a15_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
- set_feature(&cpu->env, ARM_FEATURE_CBAR);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
set_feature(&cpu->env, ARM_FEATURE_LPAE);
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
cpu->midr = 0x412fc0f1;
@@ -1001,12 +1029,15 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->reset = arm_cpu_reset;
cc->class_by_name = arm_cpu_class_by_name;
+ cc->has_work = arm_cpu_has_work;
cc->do_interrupt = arm_cpu_do_interrupt;
cc->dump_state = arm_cpu_dump_state;
cc->set_pc = arm_cpu_set_pc;
cc->gdb_read_register = arm_cpu_gdb_read_register;
cc->gdb_write_register = arm_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
+#else
cc->get_phys_page_debug = arm_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_arm_cpu;
#endif
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 0a7edfe6cb..c83f2495a8 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -111,11 +111,6 @@ typedef struct ARMGenericTimer {
#define GTIMER_VIRT 1
#define NUM_GTIMERS 2
-/* Scale factor for generic timers, ie number of ns per tick.
- * This gives a 62.5MHz timer.
- */
-#define GTIMER_SCALE 16
-
typedef struct CPUARMState {
/* Regs for current mode. */
uint32_t regs[16];
@@ -148,7 +143,7 @@ typedef struct CPUARMState {
uint32_t spsr;
/* Banked registers. */
- uint32_t banked_spsr[6];
+ uint64_t banked_spsr[6];
uint32_t banked_r13[6];
uint32_t banked_r14[6];
@@ -165,7 +160,10 @@ typedef struct CPUARMState {
uint32_t GE; /* cpsr[19:16] */
uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
- uint32_t daif; /* exception masks, in the bits they are in in PSTATE */
+ uint64_t daif; /* exception masks, in the bits they are in in PSTATE */
+
+ uint64_t elr_el1; /* AArch64 ELR_EL1 */
+ uint64_t sp_el[2]; /* AArch64 banked stack pointers */
/* System control coprocessor (cp15) */
struct {
@@ -184,13 +182,13 @@ typedef struct CPUARMState {
uint32_t c2_insn; /* MPU instruction cachable bits. */
uint32_t c3; /* MMU domain access control register
MPU write buffer control. */
- uint32_t c5_insn; /* Fault status registers. */
- uint32_t c5_data;
+ uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
+ uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
+ uint32_t ifsr_el2; /* Fault status registers. */
+ uint64_t esr_el1;
uint32_t c6_region[8]; /* MPU base/size registers. */
- uint32_t c6_insn; /* Fault address registers. */
- uint32_t c6_data;
- uint32_t c7_par; /* Translation result. */
- uint32_t c7_par_hi; /* Translation result, high 32 bits */
+ uint64_t far_el1; /* Fault address registers. */
+ uint64_t par_el1; /* Translation result. */
uint32_t c9_insn; /* Cache lockdown registers. */
uint32_t c9_data;
uint32_t c9_pmcr; /* performance monitor control register */
@@ -202,7 +200,7 @@ typedef struct CPUARMState {
uint64_t mair_el1;
uint64_t c12_vbar; /* vector base address register */
uint32_t c13_fcse; /* FCSE PID. */
- uint32_t c13_context; /* Context ID. */
+ uint64_t contextidr_el1; /* Context ID. */
uint64_t tpidr_el0; /* User RW Thread register. */
uint64_t tpidrro_el0; /* User RO Thread register. */
uint64_t tpidr_el1; /* Privileged Thread register. */
@@ -238,6 +236,21 @@ typedef struct CPUARMState {
int pending_exception;
} v7m;
+ /* Information associated with an exception about to be taken:
+ * code which raises an exception must set cs->exception_index and
+ * the relevant parts of this structure; the cpu_do_interrupt function
+ * will then set the guest-visible registers as part of the exception
+ * entry process.
+ */
+ struct {
+ uint32_t syndrome; /* AArch64 format syndrome register */
+ uint32_t fsr; /* AArch32 format fault status register info */
+ uint64_t vaddress; /* virtual addr associated with exception, if any */
+ /* If we implement EL2 we will also need to store information
+ * about the intermediate physical address for stage 2 faults.
+ */
+ } exception;
+
/* Thumb-2 EE state. */
uint32_t teecr;
uint32_t teehbr;
@@ -322,11 +335,7 @@ typedef struct CPUARMState {
#include "cpu-qom.h"
ARMCPU *cpu_arm_init(const char *cpu_model);
-void arm_translate_init(void);
-void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
int cpu_arm_exec(CPUARMState *s);
-int bank_number(int mode);
-void switch_mode(CPUARMState *, int);
uint32_t do_arm_semihosting(CPUARMState *env);
static inline bool is_a64(CPUARMState *env)
@@ -339,9 +348,8 @@ static inline bool is_a64(CPUARMState *env)
is returned if the signal was handled by the virtual CPU. */
int cpu_arm_signal_handler(int host_signum, void *pinfo,
void *puc);
-int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
- int mmu_idx);
-#define cpu_handle_mmu_fault cpu_arm_handle_mmu_fault
+int arm_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
/* SCTLR bit meanings. Several bits have been reused in newer
* versions of the architecture; in that case we define constants
@@ -426,6 +434,7 @@ int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
* Only these are valid when in AArch64 mode; in
* AArch32 mode SPSRs are basically CPSR-format.
*/
+#define PSTATE_SP (1U)
#define PSTATE_M (0xFU)
#define PSTATE_nRW (1U << 4)
#define PSTATE_F (1U << 6)
@@ -549,17 +558,6 @@ static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
vfp_set_fpscr(env, new_fpscr);
}
-enum arm_fprounding {
- FPROUNDING_TIEEVEN,
- FPROUNDING_POSINF,
- FPROUNDING_NEGINF,
- FPROUNDING_ZERO,
- FPROUNDING_TIEAWAY,
- FPROUNDING_ODD
-};
-
-int arm_rmode_to_sf(int rmode);
-
enum arm_cpu_mode {
ARM_CPU_MODE_USR = 0x10,
ARM_CPU_MODE_FIQ = 0x11,
@@ -573,6 +571,7 @@ enum arm_cpu_mode {
/* VFP system registers. */
#define ARM_VFP_FPSID 0
#define ARM_VFP_FPSCR 1
+#define ARM_VFP_MVFR2 5
#define ARM_VFP_MVFR1 6
#define ARM_VFP_MVFR0 7
#define ARM_VFP_FPEXC 8
@@ -631,6 +630,7 @@ enum arm_features {
ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
ARM_FEATURE_CBAR, /* has cp15 CBAR */
ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
+ ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
};
static inline int arm_feature(CPUARMState *env, int feature)
@@ -764,7 +764,8 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8))
#define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8))
#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | (4 << 8))
-#define ARM_LAST_SPECIAL ARM_CP_CURRENTEL
+#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | (5 << 8))
+#define ARM_LAST_SPECIAL ARM_CP_DC_ZVA
/* Used only as a terminator for ARMCPRegInfo lists */
#define ARM_CP_SENTINEL 0xffff
/* Mask of only the flag bits in a type field */
@@ -1110,10 +1111,14 @@ static inline int cpu_mmu_index (CPUARMState *env)
#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
#define ARM_TBFLAG_BSWAP_CODE_SHIFT 16
#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT)
+#define ARM_TBFLAG_CPACR_FPEN_SHIFT 17
+#define ARM_TBFLAG_CPACR_FPEN_MASK (1 << ARM_TBFLAG_CPACR_FPEN_SHIFT)
/* Bit usage when in AArch64 state */
#define ARM_TBFLAG_AA64_EL_SHIFT 0
#define ARM_TBFLAG_AA64_EL_MASK (0x3 << ARM_TBFLAG_AA64_EL_SHIFT)
+#define ARM_TBFLAG_AA64_FPEN_SHIFT 2
+#define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT)
/* some convenience accessor macros */
#define ARM_TBFLAG_AARCH64_STATE(F) \
@@ -1132,16 +1137,25 @@ static inline int cpu_mmu_index (CPUARMState *env)
(((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT)
#define ARM_TBFLAG_BSWAP_CODE(F) \
(((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT)
+#define ARM_TBFLAG_CPACR_FPEN(F) \
+ (((F) & ARM_TBFLAG_CPACR_FPEN_MASK) >> ARM_TBFLAG_CPACR_FPEN_SHIFT)
#define ARM_TBFLAG_AA64_EL(F) \
(((F) & ARM_TBFLAG_AA64_EL_MASK) >> ARM_TBFLAG_AA64_EL_SHIFT)
+#define ARM_TBFLAG_AA64_FPEN(F) \
+ (((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT)
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
+ int fpen = extract32(env->cp15.c1_coproc, 20, 2);
+
if (is_a64(env)) {
*pc = env->pc;
*flags = ARM_TBFLAG_AARCH64_STATE_MASK
| (arm_current_pl(env) << ARM_TBFLAG_AA64_EL_SHIFT);
+ if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) {
+ *flags |= ARM_TBFLAG_AA64_FPEN_MASK;
+ }
} else {
int privmode;
*pc = env->regs[15];
@@ -1158,20 +1172,18 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
if (privmode) {
*flags |= ARM_TBFLAG_PRIV_MASK;
}
- if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
+ || arm_el_is_aa64(env, 1)) {
*flags |= ARM_TBFLAG_VFPEN_MASK;
}
+ if (fpen == 3 || (fpen == 1 && arm_current_pl(env) != 0)) {
+ *flags |= ARM_TBFLAG_CPACR_FPEN_MASK;
+ }
}
*cs_base = 0;
}
-static inline bool cpu_has_work(CPUState *cpu)
-{
- return cpu->interrupt_request &
- (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
-}
-
#include "exec/exec-all.h"
static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb)
diff --git a/target-arm/cpu64.c b/target-arm/cpu64.c
index 8426bf1333..8daa622bdc 100644
--- a/target-arm/cpu64.c
+++ b/target-arm/cpu64.c
@@ -32,6 +32,104 @@ static inline void set_feature(CPUARMState *env, int feature)
env->features |= 1ULL << feature;
}
+#ifndef CONFIG_USER_ONLY
+static uint64_t a57_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* Number of processors is in [25:24]; otherwise we RAZ */
+ return (smp_cpus - 1) << 24;
+}
+#endif
+
+static const ARMCPRegInfo cortexa57_cp_reginfo[] = {
+#ifndef CONFIG_USER_ONLY
+ { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2,
+ .access = PL1_RW, .readfn = a57_l2ctlr_read,
+ .writefn = arm_cp_write_ignore },
+ { .name = "L2CTLR",
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2,
+ .access = PL1_RW, .readfn = a57_l2ctlr_read,
+ .writefn = arm_cp_write_ignore },
+#endif
+ { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2ECTLR",
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUACTLR",
+ .cp = 15, .opc1 = 0, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUECTLR",
+ .cp = 15, .opc1 = 1, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUMERRSR",
+ .cp = 15, .opc1 = 2, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2MERRSR",
+ .cp = 15, .opc1 = 3, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void aarch64_a57_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
+ set_feature(&cpu->env, ARM_FEATURE_VFP_FP16);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
+ cpu->midr = 0x411fd070;
+ cpu->reset_fpsid = 0x41034070;
+ cpu->mvfr0 = 0x10110222;
+ cpu->mvfr1 = 0x12111111;
+ cpu->mvfr2 = 0x00000043;
+ cpu->ctr = 0x8444c004;
+ cpu->reset_sctlr = 0x00c50838;
+ cpu->id_pfr0 = 0x00000131;
+ cpu->id_pfr1 = 0x00011011;
+ cpu->id_dfr0 = 0x03010066;
+ cpu->id_afr0 = 0x00000000;
+ cpu->id_mmfr0 = 0x10101105;
+ cpu->id_mmfr1 = 0x40000000;
+ cpu->id_mmfr2 = 0x01260000;
+ cpu->id_mmfr3 = 0x02102211;
+ cpu->id_isar0 = 0x02101110;
+ cpu->id_isar1 = 0x13112111;
+ cpu->id_isar2 = 0x21232042;
+ cpu->id_isar3 = 0x01112131;
+ cpu->id_isar4 = 0x00011142;
+ cpu->id_aa64pfr0 = 0x00002222;
+ cpu->id_aa64dfr0 = 0x10305106;
+ cpu->id_aa64isar0 = 0x00010000;
+ cpu->id_aa64mmfr0 = 0x00001124;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
+ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
+ cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
+ cpu->dcz_blocksize = 4; /* 64 bytes */
+}
+
#ifdef CONFIG_USER_ONLY
static void aarch64_any_initfn(Object *obj)
{
@@ -41,11 +139,11 @@ static void aarch64_any_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_VFP4);
set_feature(&cpu->env, ARM_FEATURE_VFP_FP16);
set_feature(&cpu->env, ARM_FEATURE_NEON);
- set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
set_feature(&cpu->env, ARM_FEATURE_V7MP);
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
cpu->ctr = 0x80030003; /* 32 byte I and D cacheline size, VIPT icache */
+ cpu->dcz_blocksize = 7; /* 512 bytes */
}
#endif
@@ -56,6 +154,7 @@ typedef struct ARMCPUInfo {
} ARMCPUInfo;
static const ARMCPUInfo aarch64_cpus[] = {
+ { .name = "cortex-a57", .initfn = aarch64_a57_initfn },
#ifdef CONFIG_USER_ONLY
{ .name = "any", .initfn = aarch64_any_initfn },
#endif
@@ -73,18 +172,22 @@ static void aarch64_cpu_finalizefn(Object *obj)
static void aarch64_cpu_set_pc(CPUState *cs, vaddr value)
{
ARMCPU *cpu = ARM_CPU(cs);
- /*
- * TODO: this will need updating for system emulation,
- * when the core may be in AArch32 mode.
+ /* It's OK to look at env for the current mode here, because it's
+ * never possible for an AArch64 TB to chain to an AArch32 TB.
+ * (Otherwise we would need to use synchronize_from_tb instead.)
*/
- cpu->env.pc = value;
+ if (is_a64(&cpu->env)) {
+ cpu->env.pc = value;
+ } else {
+ cpu->env.regs[15] = value;
+ }
}
static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
{
CPUClass *cc = CPU_CLASS(oc);
- cc->dump_state = aarch64_cpu_dump_state;
+ cc->do_interrupt = aarch64_cpu_do_interrupt;
cc->set_pc = aarch64_cpu_set_pc;
cc->gdb_read_register = aarch64_cpu_gdb_read_register;
cc->gdb_write_register = aarch64_cpu_gdb_write_register;
diff --git a/target-arm/gdbstub64.c b/target-arm/gdbstub64.c
index e8a82952a4..8f3b8d1778 100644
--- a/target-arm/gdbstub64.c
+++ b/target-arm/gdbstub64.c
@@ -32,10 +32,8 @@ int aarch64_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
switch (n) {
case 31:
return gdb_get_reg64(mem_buf, env->xregs[31]);
- break;
case 32:
return gdb_get_reg64(mem_buf, env->pc);
- break;
case 33:
return gdb_get_reg32(mem_buf, pstate_read(env));
}
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c
index c2ce33ee88..bf921ccdc0 100644
--- a/target-arm/helper-a64.c
+++ b/target-arm/helper-a64.c
@@ -23,6 +23,7 @@
#include "qemu/host-utils.h"
#include "sysemu/sysemu.h"
#include "qemu/bitops.h"
+#include "internals.h"
/* C2.4.7 Multiply and divide */
/* special cases for 0 and LLONG_MIN are mandated by the standard */
@@ -60,6 +61,11 @@ uint32_t HELPER(cls32)(uint32_t x)
return clrsb32(x);
}
+uint32_t HELPER(clz32)(uint32_t x)
+{
+ return clz32(x);
+}
+
uint64_t HELPER(rbit64)(uint64_t x)
{
/* assign the correct byte position */
@@ -180,6 +186,36 @@ uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices,
return result;
}
+/* Helper function for 64 bit polynomial multiply case:
+ * perform PolynomialMult(op1, op2) and return either the top or
+ * bottom half of the 128 bit result.
+ */
+uint64_t HELPER(neon_pmull_64_lo)(uint64_t op1, uint64_t op2)
+{
+ int bitnum;
+ uint64_t res = 0;
+
+ for (bitnum = 0; bitnum < 64; bitnum++) {
+ if (op1 & (1ULL << bitnum)) {
+ res ^= op2 << bitnum;
+ }
+ }
+ return res;
+}
+uint64_t HELPER(neon_pmull_64_hi)(uint64_t op1, uint64_t op2)
+{
+ int bitnum;
+ uint64_t res = 0;
+
+ /* bit 0 of op1 can't influence the high 64 bits at all */
+ for (bitnum = 1; bitnum < 64; bitnum++) {
+ if (op1 & (1ULL << bitnum)) {
+ res ^= op2 >> (64 - bitnum);
+ }
+ }
+ return res;
+}
+
/* 64bit/double versions of the neon float compare functions */
uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
{
@@ -258,3 +294,221 @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
}
return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
}
+
+/* Pairwise long add: add pairs of adjacent elements into
+ * double-width elements in the result (eg _s8 is an 8x8->16 op)
+ */
+uint64_t HELPER(neon_addlp_s8)(uint64_t a)
+{
+ uint64_t nsignmask = 0x0080008000800080ULL;
+ uint64_t wsignmask = 0x8000800080008000ULL;
+ uint64_t elementmask = 0x00ff00ff00ff00ffULL;
+ uint64_t tmp1, tmp2;
+ uint64_t res, signres;
+
+ /* Extract odd elements, sign extend each to a 16 bit field */
+ tmp1 = a & elementmask;
+ tmp1 ^= nsignmask;
+ tmp1 |= wsignmask;
+ tmp1 = (tmp1 - nsignmask) ^ wsignmask;
+ /* Ditto for the even elements */
+ tmp2 = (a >> 8) & elementmask;
+ tmp2 ^= nsignmask;
+ tmp2 |= wsignmask;
+ tmp2 = (tmp2 - nsignmask) ^ wsignmask;
+
+ /* calculate the result by summing bits 0..14, 16..22, etc,
+ * and then adjusting the sign bits 15, 23, etc manually.
+ * This ensures the addition can't overflow the 16 bit field.
+ */
+ signres = (tmp1 ^ tmp2) & wsignmask;
+ res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask);
+ res ^= signres;
+
+ return res;
+}
+
+uint64_t HELPER(neon_addlp_u8)(uint64_t a)
+{
+ uint64_t tmp;
+
+ tmp = a & 0x00ff00ff00ff00ffULL;
+ tmp += (a >> 8) & 0x00ff00ff00ff00ffULL;
+ return tmp;
+}
+
+uint64_t HELPER(neon_addlp_s16)(uint64_t a)
+{
+ int32_t reslo, reshi;
+
+ reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16);
+ reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48);
+
+ return (uint32_t)reslo | (((uint64_t)reshi) << 32);
+}
+
+uint64_t HELPER(neon_addlp_u16)(uint64_t a)
+{
+ uint64_t tmp;
+
+ tmp = a & 0x0000ffff0000ffffULL;
+ tmp += (a >> 16) & 0x0000ffff0000ffffULL;
+ return tmp;
+}
+
+/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
+float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ uint32_t val32, sbit;
+ int32_t exp;
+
+ if (float32_is_any_nan(a)) {
+ float32 nan = a;
+ if (float32_is_signaling_nan(a)) {
+ float_raise(float_flag_invalid, fpst);
+ nan = float32_maybe_silence_nan(a);
+ }
+ if (fpst->default_nan_mode) {
+ nan = float32_default_nan;
+ }
+ return nan;
+ }
+
+ val32 = float32_val(a);
+ sbit = 0x80000000ULL & val32;
+ exp = extract32(val32, 23, 8);
+
+ if (exp == 0) {
+ return make_float32(sbit | (0xfe << 23));
+ } else {
+ return make_float32(sbit | (~exp & 0xff) << 23);
+ }
+}
+
+float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ uint64_t val64, sbit;
+ int64_t exp;
+
+ if (float64_is_any_nan(a)) {
+ float64 nan = a;
+ if (float64_is_signaling_nan(a)) {
+ float_raise(float_flag_invalid, fpst);
+ nan = float64_maybe_silence_nan(a);
+ }
+ if (fpst->default_nan_mode) {
+ nan = float64_default_nan;
+ }
+ return nan;
+ }
+
+ val64 = float64_val(a);
+ sbit = 0x8000000000000000ULL & val64;
+ exp = extract64(float64_val(a), 52, 11);
+
+ if (exp == 0) {
+ return make_float64(sbit | (0x7feULL << 52));
+ } else {
+ return make_float64(sbit | (~exp & 0x7ffULL) << 52);
+ }
+}
+
+float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
+{
+ /* Von Neumann rounding is implemented by using round-to-zero
+ * and then setting the LSB of the result if Inexact was raised.
+ */
+ float32 r;
+ float_status *fpst = &env->vfp.fp_status;
+ float_status tstat = *fpst;
+ int exflags;
+
+ set_float_rounding_mode(float_round_to_zero, &tstat);
+ set_float_exception_flags(0, &tstat);
+ r = float64_to_float32(a, &tstat);
+ r = float32_maybe_silence_nan(r);
+ exflags = get_float_exception_flags(&tstat);
+ if (exflags & float_flag_inexact) {
+ r = make_float32(float32_val(r) | 1);
+ }
+ exflags |= get_float_exception_flags(fpst);
+ set_float_exception_flags(exflags, fpst);
+ return r;
+}
+
+/* Handle a CPU exception. */
+void aarch64_cpu_do_interrupt(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ target_ulong addr = env->cp15.c12_vbar;
+ int i;
+
+ if (arm_current_pl(env) == 0) {
+ if (env->aarch64) {
+ addr += 0x400;
+ } else {
+ addr += 0x600;
+ }
+ } else if (pstate_read(env) & PSTATE_SP) {
+ addr += 0x200;
+ }
+
+ arm_log_exception(cs->exception_index);
+ qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_pl(env));
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && !excp_is_internal(cs->exception_index)) {
+ qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%" PRIx32 "\n",
+ env->exception.syndrome);
+ }
+
+ env->cp15.esr_el1 = env->exception.syndrome;
+ env->cp15.far_el1 = env->exception.vaddress;
+
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ case EXCP_DATA_ABORT:
+ qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
+ env->cp15.far_el1);
+ break;
+ case EXCP_BKPT:
+ case EXCP_UDEF:
+ case EXCP_SWI:
+ break;
+ case EXCP_IRQ:
+ addr += 0x80;
+ break;
+ case EXCP_FIQ:
+ addr += 0x100;
+ break;
+ default:
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+ }
+
+ if (is_a64(env)) {
+ env->banked_spsr[0] = pstate_read(env);
+ env->sp_el[arm_current_pl(env)] = env->xregs[31];
+ env->xregs[31] = env->sp_el[1];
+ env->elr_el1 = env->pc;
+ } else {
+ env->banked_spsr[0] = cpsr_read(env);
+ if (!env->thumb) {
+ env->cp15.esr_el1 |= 1 << 25;
+ }
+ env->elr_el1 = env->regs[15];
+
+ for (i = 0; i < 15; i++) {
+ env->xregs[i] = env->regs[i];
+ }
+
+ env->condexec_bits = 0;
+ }
+
+ pstate_write(env, PSTATE_DAIF | PSTATE_MODE_EL1h);
+ env->aarch64 = 1;
+
+ env->pc = addr;
+ cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+}
diff --git a/target-arm/helper-a64.h b/target-arm/helper-a64.h
index ab9933cab0..3f05bedcca 100644
--- a/target-arm/helper-a64.h
+++ b/target-arm/helper-a64.h
@@ -21,12 +21,15 @@ DEF_HELPER_FLAGS_2(sdiv64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
DEF_HELPER_FLAGS_1(clz64, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(cls64, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(cls32, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(clz32, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr)
DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr)
DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr)
DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr)
DEF_HELPER_FLAGS_5(simd_tbl, TCG_CALL_NO_RWG_SE, i64, env, i64, i64, i32, i32)
+DEF_HELPER_FLAGS_2(neon_pmull_64_lo, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(neon_pmull_64_hi, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr)
@@ -36,3 +39,10 @@ DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
+DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_u8, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_u16, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env)
diff --git a/target-arm/helper.c b/target-arm/helper.c
index f65cbac1ee..43c1b4f01d 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -1,4 +1,5 @@
#include "cpu.h"
+#include "internals.h"
#include "exec/gdbstub.h"
#include "helper.h"
#include "qemu/host-utils.h"
@@ -9,7 +10,9 @@
#include <zlib.h> /* For crc32 */
#ifndef CONFIG_USER_ONLY
-static inline int get_phys_addr(CPUARMState *env, uint32_t address,
+#include "exec/softmmu_exec.h"
+
+static inline int get_phys_addr(CPUARMState *env, target_ulong address,
int access_type, int is_user,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size);
@@ -301,19 +304,34 @@ void init_cpreg_list(ARMCPU *cpu)
g_list_free(keys);
}
+/* Return true if extended addresses are enabled.
+ * This is always the case if our translation regime is 64 bit,
+ * but depends on TTBCR.EAE for 32 bit.
+ */
+static inline bool extended_addresses_enabled(CPUARMState *env)
+{
+ return arm_el_is_aa64(env, 1)
+ || ((arm_feature(env, ARM_FEATURE_LPAE)
+ && (env->cp15.c2_control & (1U << 31))));
+}
+
static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
env->cp15.c3 = value;
- tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
+ tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
}
static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
if (env->cp15.c13_fcse != value) {
/* Unlike real hardware the qemu TLB uses virtual addresses,
* not modified virtual addresses, so this causes a TLB flush.
*/
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
env->cp15.c13_fcse = value;
}
}
@@ -321,42 +339,53 @@ static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- if (env->cp15.c13_context != value && !arm_feature(env, ARM_FEATURE_MPU)) {
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (env->cp15.contextidr_el1 != value && !arm_feature(env, ARM_FEATURE_MPU)
+ && !extended_addresses_enabled(env)) {
/* For VMSA (when not using the LPAE long descriptor page table
* format) this register includes the ASID, so do a TLB flush.
* For PMSA it is purely a process ID and no action is needed.
*/
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
- env->cp15.c13_context = value;
+ env->cp15.contextidr_el1 = value;
}
static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate all (TLBIALL) */
- tlb_flush(env, 1);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush(CPU(cpu), 1);
}
static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
- tlb_flush_page(env, value & TARGET_PAGE_MASK);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by ASID (TLBIASID) */
- tlb_flush(env, value == 0);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush(CPU(cpu), value == 0);
}
static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
- tlb_flush_page(env, value & TARGET_PAGE_MASK);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
static const ARMCPRegInfo cp_reginfo[] = {
@@ -366,17 +395,26 @@ static const ARMCPRegInfo cp_reginfo[] = {
*/
{ .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "FCSEIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
+ .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
+ { .name = "CONTEXTIDR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el1),
+ .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo not_v8_cp_reginfo[] = {
+ /* NB: Some of these registers exist in v8 but with more precise
+ * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
+ */
/* MMU Domain access control / MPU write buffer control */
{ .name = "DACR", .cp = 15,
.crn = 3, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
.resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
- { .name = "FCSEIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
- .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
- { .name = "CONTEXTIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_context),
- .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
/* ??? This covers not just the impdef TLB lockdown registers but also
* some v7VMSA registers relating to TEX remap, so it is overly broad.
*/
@@ -458,7 +496,8 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
{ .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
.access = PL0_W, .type = ARM_CP_NOP },
{ .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c6_insn),
+ .access = PL1_RW,
+ .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el1),
.resetvalue = 0, },
/* Watchpoint Fault Address Register : should actually only be present
* for 1136, 1176, 11MPCore.
@@ -474,7 +513,7 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
- /* Perfomance monitor registers user accessibility is controlled
+ /* Performance monitor registers user accessibility is controlled
* by PMUSERENR.
*/
if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
@@ -633,6 +672,21 @@ static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
env->cp15.c0_cssel = value & 0xf;
}
+static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+ uint64_t ret = 0;
+
+ if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ ret |= CPSR_I;
+ }
+ if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
+ ret |= CPSR_F;
+ }
+ /* External aborts are not possible in QEMU so A bit is always clear */
+ return ret;
+}
+
static const ARMCPRegInfo v7_cp_reginfo[] = {
/* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
* debug components
@@ -727,8 +781,18 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
/* Auxiliary ID register: this actually has an IMPDEF value but for now
* just RAZ for all cores:
*/
- { .name = "AIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 7,
+ { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* Auxiliary fault status registers: these also are IMPDEF, and we
+ * choose to RAZ/WI for all cores.
+ */
+ { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
/* MAIR can just read-as-written because we don't implement caches
* and so don't need to care about memory attributes.
*/
@@ -749,6 +813,9 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
.fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el1),
.resetfn = arm_cp_reset_ignore },
+ { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_R, .readfn = isr_read },
REGINFO_SENTINEL
};
@@ -1125,27 +1192,17 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
if (arm_feature(env, ARM_FEATURE_LPAE)) {
- env->cp15.c7_par = value;
+ env->cp15.par_el1 = value;
} else if (arm_feature(env, ARM_FEATURE_V7)) {
- env->cp15.c7_par = value & 0xfffff6ff;
+ env->cp15.par_el1 = value & 0xfffff6ff;
} else {
- env->cp15.c7_par = value & 0xfffff1ff;
+ env->cp15.par_el1 = value & 0xfffff1ff;
}
}
#ifndef CONFIG_USER_ONLY
/* get_phys_addr() isn't present for user-mode-only targets */
-/* Return true if extended addresses are enabled, ie this is an
- * LPAE implementation and we are using the long-descriptor translation
- * table format because the TTBCR EAE bit is set.
- */
-static inline bool extended_addresses_enabled(CPUARMState *env)
-{
- return arm_feature(env, ARM_FEATURE_LPAE)
- && (env->cp15.c2_control & (1U << 31));
-}
-
static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
if (ri->opc2 & 4) {
@@ -1186,8 +1243,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
* fault.
*/
}
- env->cp15.c7_par = par64;
- env->cp15.c7_par_hi = par64 >> 32;
+ env->cp15.par_el1 = par64;
} else {
/* ret is a DFSR/IFSR value for the short descriptor
* translation table format (with WnR always clear).
@@ -1197,16 +1253,15 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* We do not set any attribute bits in the PAR */
if (page_size == (1 << 24)
&& arm_feature(env, ARM_FEATURE_V7)) {
- env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
+ env->cp15.par_el1 = (phys_addr & 0xff000000) | 1 << 1;
} else {
- env->cp15.c7_par = phys_addr & 0xfffff000;
+ env->cp15.par_el1 = phys_addr & 0xfffff000;
}
} else {
- env->cp15.c7_par = ((ret & (1 << 10)) >> 5) |
+ env->cp15.par_el1 = ((ret & (1 << 10)) >> 5) |
((ret & (1 << 12)) >> 6) |
((ret & 0xf) << 1) | 1;
}
- env->cp15.c7_par_hi = 0;
}
}
#endif
@@ -1214,7 +1269,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
static const ARMCPRegInfo vapa_cp_reginfo[] = {
{ .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .resetvalue = 0,
- .fieldoffset = offsetof(CPUARMState, cp15.c7_par),
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.par_el1),
.writefn = par_write },
#ifndef CONFIG_USER_ONLY
{ .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
@@ -1257,40 +1312,44 @@ static uint32_t extended_mpu_ap_bits(uint32_t val)
static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- env->cp15.c5_data = extended_mpu_ap_bits(value);
+ env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
}
static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
- return simple_mpu_ap_bits(env->cp15.c5_data);
+ return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
}
static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- env->cp15.c5_insn = extended_mpu_ap_bits(value);
+ env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
}
static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
- return simple_mpu_ap_bits(env->cp15.c5_insn);
+ return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
}
static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
{ .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
- .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
+ .resetvalue = 0,
.readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
{ .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
- .fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
+ .resetvalue = 0,
.readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
{ .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, },
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
+ .resetvalue = 0, },
{ .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0, },
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
+ .resetvalue = 0, },
{ .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW,
.fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
@@ -1348,11 +1407,13 @@ static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
if (arm_feature(env, ARM_FEATURE_LPAE)) {
/* With LPAE the TTBCR could result in a change of ASID
* via the TTBCR.A1 bit, so do a TLB flush.
*/
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
vmsa_ttbcr_raw_write(env, ri, value);
}
@@ -1367,8 +1428,10 @@ static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
/* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
env->cp15.c2_control = value;
}
@@ -1379,18 +1442,25 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
* must flush the TLB.
*/
if (cpreg_field_is_64bit(ri)) {
- tlb_flush(env, 1);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush(CPU(cpu), 1);
}
raw_write(env, ri, value);
}
static const ARMCPRegInfo vmsa_cp_reginfo[] = {
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, },
+ .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el1),
+ .resetfn = arm_cp_reset_ignore, },
{ .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0, },
+ .fieldoffset = offsetof(CPUARMState, cp15.ifsr_el2), .resetvalue = 0, },
+ { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.esr_el1), .resetvalue = 0, },
{ .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
@@ -1408,8 +1478,10 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE, .writefn = vmsa_ttbcr_write,
.resetfn = arm_cp_reset_ignore, .raw_writefn = vmsa_ttbcr_raw_write,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c2_control) },
- { .name = "DFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c6_data),
+ /* 64-bit FAR; this entry also gives us the AArch32 DFAR */
+ { .name = "FAR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el1),
.resetvalue = 0, },
REGINFO_SENTINEL
};
@@ -1449,7 +1521,8 @@ static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
static const ARMCPRegInfo omap_cp_reginfo[] = {
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
- .fieldoffset = offsetof(CPUARMState, cp15.c5_data), .resetvalue = 0, },
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el1),
+ .resetvalue = 0, },
{ .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_NOP },
{ .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
@@ -1599,24 +1672,6 @@ static const ARMCPRegInfo mpidr_cp_reginfo[] = {
REGINFO_SENTINEL
};
-static uint64_t par64_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- return ((uint64_t)env->cp15.c7_par_hi << 32) | env->cp15.c7_par;
-}
-
-static void par64_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- env->cp15.c7_par_hi = value >> 32;
- env->cp15.c7_par = value;
-}
-
-static void par64_reset(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- env->cp15.c7_par_hi = 0;
- env->cp15.c7_par = 0;
-}
-
static const ARMCPRegInfo lpae_cp_reginfo[] = {
/* NOP AMAIR0/1: the override is because these clash with the rather
* broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
@@ -1636,7 +1691,7 @@ static const ARMCPRegInfo lpae_cp_reginfo[] = {
.access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
{ .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
.access = PL1_RW, .type = ARM_CP_64BIT,
- .readfn = par64_read, .writefn = par64_write, .resetfn = par64_reset },
+ .fieldoffset = offsetof(CPUARMState, cp15.par_el1), .resetvalue = 0 },
{ .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
.access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
@@ -1670,6 +1725,20 @@ static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
vfp_set_fpsr(env, value);
}
+static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->daif = value & PSTATE_DAIF;
+}
+
static CPAccessResult aa64_cacheop_access(CPUARMState *env,
const ARMCPRegInfo *ri)
{
@@ -1686,24 +1755,71 @@ static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by VA (AArch64 version) */
+ ARMCPU *cpu = arm_env_get_cpu(env);
uint64_t pageaddr = value << 12;
- tlb_flush_page(env, pageaddr);
+ tlb_flush_page(CPU(cpu), pageaddr);
}
static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by VA, all ASIDs (AArch64 version) */
+ ARMCPU *cpu = arm_env_get_cpu(env);
uint64_t pageaddr = value << 12;
- tlb_flush_page(env, pageaddr);
+ tlb_flush_page(CPU(cpu), pageaddr);
}
static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by ASID (AArch64 version) */
+ ARMCPU *cpu = arm_env_get_cpu(env);
int asid = extract64(value, 48, 16);
- tlb_flush(env, asid == 0);
+ tlb_flush(CPU(cpu), asid == 0);
+}
+
+static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* We don't implement EL2, so the only control on DC ZVA is the
+ * bit in the SCTLR which can prohibit access for EL0.
+ */
+ if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
+static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int dzp_bit = 1 << 4;
+
+ /* DZP indicates whether DC ZVA access is allowed */
+ if (aa64_zva_access(env, NULL) != CP_ACCESS_OK) {
+ dzp_bit = 0;
+ }
+ return cpu->dcz_blocksize | dzp_bit;
+}
+
+static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ if (!env->pstate & PSTATE_SP) {
+ /* Access to SP_EL0 is undefined if it's being used as
+ * the stack pointer.
+ */
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+ return CP_ACCESS_OK;
+}
+
+static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_SP;
+}
+
+static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
+{
+ update_spsel(env, val);
}
static const ARMCPRegInfo v8_cp_reginfo[] = {
@@ -1713,19 +1829,30 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "NZCV", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
.access = PL0_RW, .type = ARM_CP_NZCV },
+ { .name = "DAIF", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
+ .type = ARM_CP_NO_MIGRATE,
+ .access = PL0_RW, .accessfn = aa64_daif_access,
+ .fieldoffset = offsetof(CPUARMState, daif),
+ .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
{ .name = "FPCR", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
.access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
{ .name = "FPSR", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
.access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
- /* Prohibit use of DC ZVA. OPTME: implement DC ZVA and allow its use.
- * For system mode the DZP bit here will need to be computed, not constant.
- */
{ .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
- .access = PL0_R, .type = ARM_CP_CONST,
- .resetvalue = 0x10 },
+ .access = PL0_R, .type = ARM_CP_NO_MIGRATE,
+ .readfn = aa64_dczid_read },
+ { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_DC_ZVA,
+#ifndef CONFIG_USER_ONLY
+ /* Avoid overhead of an access check that always passes in user-mode */
+ .accessfn = aa64_zva_access,
+#endif
+ },
{ .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
.access = PL1_R, .type = ARM_CP_CURRENTEL },
@@ -1813,6 +1940,93 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 1, .opc2 = 0, .crn = 8, .crm = 7, .opc2 = 7,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
.writefn = tlbi_aa64_vaa_write },
+#ifndef CONFIG_USER_ONLY
+ /* 64 bit address translation operations */
+ { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+ { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+ { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+ { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_MIGRATE, .writefn = ats_write },
+#endif
+ /* 32 bit TLB invalidates, Inner Shareable */
+ { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ /* 32 bit ITLB invalidates */
+ { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ /* 32 bit DTLB invalidates */
+ { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ /* 32 bit TLB invalidates */
+ { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbiasid_write },
+ { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
+ .type = ARM_CP_NO_MIGRATE, .access = PL1_W, .writefn = tlbimvaa_write },
+ /* 32 bit cache operations */
+ { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ /* MMU Domain access control / MPU write buffer control */
+ { .name = "DACR", .cp = 15,
+ .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
+ .resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
/* Dummy implementation of monitor debug system control register:
* we don't support debug.
*/
@@ -1823,16 +2037,39 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "OSLAR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
.access = PL1_W, .type = ARM_CP_NOP },
+ { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_MIGRATE,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, elr_el1) },
+ { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_MIGRATE,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[0]) },
+ /* We rely on the access checks not allowing the guest to write to the
+ * state field when SPSel indicates that it's being used as the stack
+ * pointer.
+ */
+ { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .accessfn = sp_el0_access,
+ .type = ARM_CP_NO_MIGRATE,
+ .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
+ { .name = "SPSel", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
REGINFO_SENTINEL
};
static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
env->cp15.c1_sys = value;
/* ??? Lots of these bits are not implemented. */
/* This may enable/disable the MMU, so do a TLB flush. */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
@@ -1887,50 +2124,71 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
define_arm_cp_regs(cpu, cp_reginfo);
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ /* Must go early as it is full of wildcards that may be
+ * overridden by later definitions.
+ */
+ define_arm_cp_regs(cpu, not_v8_cp_reginfo);
+ }
+
if (arm_feature(env, ARM_FEATURE_V6)) {
/* The ID registers all have impdef reset values */
ARMCPRegInfo v6_idregs[] = {
- { .name = "ID_PFR0", .cp = 15, .crn = 0, .crm = 1,
- .opc1 = 0, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_pfr0 },
- { .name = "ID_PFR1", .cp = 15, .crn = 0, .crm = 1,
- .opc1 = 0, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_pfr1 },
- { .name = "ID_DFR0", .cp = 15, .crn = 0, .crm = 1,
- .opc1 = 0, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_dfr0 },
- { .name = "ID_AFR0", .cp = 15, .crn = 0, .crm = 1,
- .opc1 = 0, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_afr0 },
- { .name = "ID_MMFR0", .cp = 15, .crn = 0, .crm = 1,
- .opc1 = 0, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_mmfr0 },
- { .name = "ID_MMFR1", .cp = 15, .crn = 0, .crm = 1,
- .opc1 = 0, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_mmfr1 },
- { .name = "ID_MMFR2", .cp = 15, .crn = 0, .crm = 1,
- .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_mmfr2 },
- { .name = "ID_MMFR3", .cp = 15, .crn = 0, .crm = 1,
- .opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_mmfr3 },
- { .name = "ID_ISAR0", .cp = 15, .crn = 0, .crm = 2,
- .opc1 = 0, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar0 },
- { .name = "ID_ISAR1", .cp = 15, .crn = 0, .crm = 2,
- .opc1 = 0, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar1 },
- { .name = "ID_ISAR2", .cp = 15, .crn = 0, .crm = 2,
- .opc1 = 0, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar2 },
- { .name = "ID_ISAR3", .cp = 15, .crn = 0, .crm = 2,
- .opc1 = 0, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar3 },
- { .name = "ID_ISAR4", .cp = 15, .crn = 0, .crm = 2,
- .opc1 = 0, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar4 },
- { .name = "ID_ISAR5", .cp = 15, .crn = 0, .crm = 2,
- .opc1 = 0, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar5 },
/* 6..7 are as yet unallocated and must RAZ */
{ .name = "ID_ISAR6", .cp = 15, .crn = 0, .crm = 2,
@@ -1958,6 +2216,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
ARMCPRegInfo pmcr = {
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
.access = PL0_RW, .resetvalue = cpu->midr & 0xff000000,
+ .type = ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
.accessfn = pmreg_access, .writefn = pmcr_write,
.raw_writefn = raw_write,
@@ -1988,7 +2247,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{ .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_aa64dfr0 },
+ /* We mask out the PMUVer field, beacuse we don't currently
+ * implement the PMU. Not advertising it prevents the guest
+ * from trying to use it and getting UNDEFs on registers we
+ * don't implement.
+ */
+ .resetvalue = cpu->id_aa64dfr0 & ~0xf00 },
{ .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -2017,8 +2281,26 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_aa64mmfr1 },
+ { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->mvfr0 },
+ { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->mvfr1 },
+ { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->mvfr2 },
REGINFO_SENTINEL
};
+ ARMCPRegInfo rvbar = {
+ .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
+ .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
+ };
+ define_one_arm_cp_reg(cpu, &rvbar);
define_arm_cp_regs(cpu, v8_idregs);
define_arm_cp_regs(cpu, v8_cp_reginfo);
define_aarch64_debug_regs(cpu);
@@ -2072,8 +2354,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
* be read-only (ie write causes UNDEF exception).
*/
{
- ARMCPRegInfo id_cp_reginfo[] = {
- /* Note that the MIDR isn't a simple constant register because
+ ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
+ /* Pre-v8 MIDR space.
+ * Note that the MIDR isn't a simple constant register because
* of the TI925 behaviour where writes to another register can
* cause the MIDR value to change.
*
@@ -2087,22 +2370,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
.fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
.type = ARM_CP_OVERRIDE },
- { .name = "MIDR_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .opc2 = 0, .crn = 0, .crm = 0,
- .access = PL1_R, .resetvalue = cpu->midr, .type = ARM_CP_CONST },
- { .name = "CTR",
- .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
- .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
- { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
- .access = PL0_R, .accessfn = ctr_el0_access,
- .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
- { .name = "TCMTR",
- .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
- .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "TLBTR",
- .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
- .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
/* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
{ .name = "DUMMY",
.cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
@@ -2121,6 +2388,37 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
REGINFO_SENTINEL
};
+ ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
+ /* v8 MIDR -- the wildcard isn't necessary, and nor is the
+ * variable-MIDR TI925 behaviour. Instead we have a single
+ * (strictly speaking IMPDEF) alias of the MIDR, REVIDR.
+ */
+ { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->midr },
+ { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->midr },
+ REGINFO_SENTINEL
+ };
+ ARMCPRegInfo id_cp_reginfo[] = {
+ /* These are common to v8 and pre-v8 */
+ { .name = "CTR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
+ { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
+ .access = PL0_R, .accessfn = ctr_el0_access,
+ .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
+ /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
+ { .name = "TCMTR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "TLBTR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
ARMCPRegInfo crn0_wi_reginfo = {
.name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
@@ -2135,10 +2433,19 @@ void register_cp_regs_for_features(ARMCPU *cpu)
* UNDEF.
*/
define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
+ for (r = id_pre_v8_midr_cp_reginfo;
+ r->type != ARM_CP_SENTINEL; r++) {
+ r->access = PL1_RW;
+ }
for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
r->access = PL1_RW;
}
}
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
+ } else {
+ define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
+ }
define_arm_cp_regs(cpu, id_cp_reginfo);
}
@@ -2148,7 +2455,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_AUXCR)) {
ARMCPRegInfo auxcr = {
- .name = "AUXCR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_CONST,
.resetvalue = cpu->reset_auxcr
};
@@ -2156,12 +2464,39 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
if (arm_feature(env, ARM_FEATURE_CBAR)) {
- ARMCPRegInfo cbar = {
- .name = "CBAR", .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
- .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
- .fieldoffset = offsetof(CPUARMState, cp15.c15_config_base_address)
- };
- define_one_arm_cp_reg(cpu, &cbar);
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /* 32 bit view is [31:18] 0...0 [43:32]. */
+ uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
+ | extract64(cpu->reset_cbar, 32, 12);
+ ARMCPRegInfo cbar_reginfo[] = {
+ { .name = "CBAR",
+ .type = ARM_CP_CONST,
+ .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
+ .access = PL1_R, .resetvalue = cpu->reset_cbar },
+ { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_CONST,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
+ .access = PL1_R, .resetvalue = cbar32 },
+ REGINFO_SENTINEL
+ };
+ /* We don't implement a r/w 64 bit CBAR currently */
+ assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
+ define_arm_cp_regs(cpu, cbar_reginfo);
+ } else {
+ ARMCPRegInfo cbar = {
+ .name = "CBAR",
+ .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
+ .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
+ .fieldoffset = offsetof(CPUARMState,
+ cp15.c15_config_base_address)
+ };
+ if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
+ cbar.access = PL1_R;
+ cbar.fieldoffset = 0;
+ cbar.type = ARM_CP_CONST;
+ }
+ define_one_arm_cp_reg(cpu, &cbar);
+ }
}
/* Generic registers whose values depend on the implementation */
@@ -2186,19 +2521,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
ARMCPU *cpu_arm_init(const char *cpu_model)
{
- ARMCPU *cpu;
- ObjectClass *oc;
-
- oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
- if (!oc) {
- return NULL;
- }
- cpu = ARM_CPU(object_new(object_class_get_name(oc)));
-
- /* TODO this should be set centrally, once possible */
- object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
-
- return cpu;
+ return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
}
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
@@ -2661,21 +2984,20 @@ uint32_t HELPER(rbit)(uint32_t x)
void arm_cpu_do_interrupt(CPUState *cs)
{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
-
- env->exception_index = -1;
+ cs->exception_index = -1;
}
-int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
- int mmu_idx)
+int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ env->exception.vaddress = address;
if (rw == 2) {
- env->exception_index = EXCP_PREFETCH_ABORT;
- env->cp15.c6_insn = address;
+ cs->exception_index = EXCP_PREFETCH_ABORT;
} else {
- env->exception_index = EXCP_DATA_ABORT;
- env->cp15.c6_data = address;
+ cs->exception_index = EXCP_DATA_ABORT;
}
return 1;
}
@@ -2683,29 +3005,40 @@ int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
/* These should probably raise undefined insn exceptions. */
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
{
- cpu_abort(env, "v7m_mrs %d\n", reg);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
}
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
{
- cpu_abort(env, "v7m_mrs %d\n", reg);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
return 0;
}
void switch_mode(CPUARMState *env, int mode)
{
- if (mode != ARM_CPU_MODE_USR)
- cpu_abort(env, "Tried to switch out of user mode\n");
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (mode != ARM_CPU_MODE_USR) {
+ cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
+ }
}
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
{
- cpu_abort(env, "banked r13 write\n");
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ cpu_abort(CPU(cpu), "banked r13 write\n");
}
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
{
- cpu_abort(env, "banked r13 read\n");
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ cpu_abort(CPU(cpu), "banked r13 read\n");
return 0;
}
@@ -2762,15 +3095,17 @@ void switch_mode(CPUARMState *env, int mode)
static void v7m_push(CPUARMState *env, uint32_t val)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+
env->regs[13] -= 4;
stl_phys(cs->as, env->regs[13], val);
}
static uint32_t v7m_pop(CPUARMState *env)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(arm_env_get_cpu(env));
uint32_t val;
+
val = ldl_phys(cs->as, env->regs[13]);
env->regs[13] += 4;
return val;
@@ -2819,37 +3154,6 @@ static void do_v7m_exception_exit(CPUARMState *env)
pointer. */
}
-/* Exception names for debug logging; note that not all of these
- * precisely correspond to architectural exceptions.
- */
-static const char * const excnames[] = {
- [EXCP_UDEF] = "Undefined Instruction",
- [EXCP_SWI] = "SVC",
- [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
- [EXCP_DATA_ABORT] = "Data Abort",
- [EXCP_IRQ] = "IRQ",
- [EXCP_FIQ] = "FIQ",
- [EXCP_BKPT] = "Breakpoint",
- [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
- [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
- [EXCP_STREX] = "QEMU intercept of STREX",
-};
-
-static inline void arm_log_exception(int idx)
-{
- if (qemu_loglevel_mask(CPU_LOG_INT)) {
- const char *exc = NULL;
-
- if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
- exc = excnames[idx];
- }
- if (!exc) {
- exc = "unknown";
- }
- qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
- }
-}
-
void arm_v7m_cpu_do_interrupt(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -2858,7 +3162,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
uint32_t lr;
uint32_t addr;
- arm_log_exception(env->exception_index);
+ arm_log_exception(cs->exception_index);
lr = 0xfffffff1;
if (env->v7m.current_sp)
@@ -2870,7 +3174,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
handle it. */
/* TODO: Need to escalate if the current priority is higher than the
one we're raising. */
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_UDEF:
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
return;
@@ -2880,6 +3184,9 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
return;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
+ /* TODO: if we implemented the MPU registers, this is where we
+ * should set the MMFAR, etc from exception.fsr and exception.vaddress.
+ */
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
return;
case EXCP_BKPT:
@@ -2902,7 +3209,7 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
do_v7m_exception_exit(env);
return;
default:
- cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
}
@@ -2943,10 +3250,10 @@ void arm_cpu_do_interrupt(CPUState *cs)
assert(!IS_M(env));
- arm_log_exception(env->exception_index);
+ arm_log_exception(cs->exception_index);
/* TODO: Vectored interrupt controller. */
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_UDEF:
new_mode = ARM_CPU_MODE_UND;
addr = 0x04;
@@ -2994,19 +3301,26 @@ void arm_cpu_do_interrupt(CPUState *cs)
return;
}
}
- env->cp15.c5_insn = 2;
+ env->exception.fsr = 2;
/* Fall through to prefetch abort. */
case EXCP_PREFETCH_ABORT:
+ env->cp15.ifsr_el2 = env->exception.fsr;
+ env->cp15.far_el1 = deposit64(env->cp15.far_el1, 32, 32,
+ env->exception.vaddress);
qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
- env->cp15.c5_insn, env->cp15.c6_insn);
+ env->cp15.ifsr_el2, (uint32_t)env->exception.vaddress);
new_mode = ARM_CPU_MODE_ABT;
addr = 0x0c;
mask = CPSR_A | CPSR_I;
offset = 4;
break;
case EXCP_DATA_ABORT:
+ env->cp15.esr_el1 = env->exception.fsr;
+ env->cp15.far_el1 = deposit64(env->cp15.far_el1, 0, 32,
+ env->exception.vaddress);
qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
- env->cp15.c5_data, env->cp15.c6_data);
+ (uint32_t)env->cp15.esr_el1,
+ (uint32_t)env->exception.vaddress);
new_mode = ARM_CPU_MODE_ABT;
addr = 0x10;
mask = CPSR_A | CPSR_I;
@@ -3027,7 +3341,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
offset = 4;
break;
default:
- cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
}
/* High vectors. */
@@ -3134,7 +3448,7 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
int is_user, hwaddr *phys_ptr,
int *prot, target_ulong *page_size)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(arm_env_get_cpu(env));
int code;
uint32_t table;
uint32_t desc;
@@ -3230,7 +3544,7 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
int is_user, hwaddr *phys_ptr,
int *prot, target_ulong *page_size)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(arm_env_get_cpu(env));
int code;
uint32_t table;
uint32_t desc;
@@ -3348,36 +3662,56 @@ typedef enum {
permission_fault = 3,
} MMUFaultType;
-static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
+static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
int access_type, int is_user,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size_ptr)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(arm_env_get_cpu(env));
/* Read an LPAE long-descriptor translation table. */
MMUFaultType fault_type = translation_fault;
uint32_t level = 1;
uint32_t epd;
- uint32_t tsz;
+ int32_t tsz;
+ uint32_t tg;
uint64_t ttbr;
int ttbr_select;
- int n;
- hwaddr descaddr;
+ hwaddr descaddr, descmask;
uint32_t tableattrs;
target_ulong page_size;
uint32_t attrs;
+ int32_t granule_sz = 9;
+ int32_t va_size = 32;
+ int32_t tbi = 0;
+
+ if (arm_el_is_aa64(env, 1)) {
+ va_size = 64;
+ if (extract64(address, 55, 1))
+ tbi = extract64(env->cp15.c2_control, 38, 1);
+ else
+ tbi = extract64(env->cp15.c2_control, 37, 1);
+ tbi *= 8;
+ }
/* Determine whether this address is in the region controlled by
* TTBR0 or TTBR1 (or if it is in neither region and should fault).
* This is a Non-secure PL0/1 stage 1 translation, so controlled by
* TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
*/
- uint32_t t0sz = extract32(env->cp15.c2_control, 0, 3);
- uint32_t t1sz = extract32(env->cp15.c2_control, 16, 3);
- if (t0sz && !extract32(address, 32 - t0sz, t0sz)) {
+ uint32_t t0sz = extract32(env->cp15.c2_control, 0, 6);
+ if (arm_el_is_aa64(env, 1)) {
+ t0sz = MIN(t0sz, 39);
+ t0sz = MAX(t0sz, 16);
+ }
+ uint32_t t1sz = extract32(env->cp15.c2_control, 16, 6);
+ if (arm_el_is_aa64(env, 1)) {
+ t1sz = MIN(t1sz, 39);
+ t1sz = MAX(t1sz, 16);
+ }
+ if (t0sz && !extract64(address, va_size - t0sz, t0sz - tbi)) {
/* there is a ttbr0 region and we are in it (high bits all zero) */
ttbr_select = 0;
- } else if (t1sz && !extract32(~address, 32 - t1sz, t1sz)) {
+ } else if (t1sz && !extract64(~address, va_size - t1sz, t1sz - tbi)) {
/* there is a ttbr1 region and we are in it (high bits all one) */
ttbr_select = 1;
} else if (!t0sz) {
@@ -3403,10 +3737,26 @@ static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
ttbr = env->cp15.ttbr0_el1;
epd = extract32(env->cp15.c2_control, 7, 1);
tsz = t0sz;
+
+ tg = extract32(env->cp15.c2_control, 14, 2);
+ if (tg == 1) { /* 64KB pages */
+ granule_sz = 13;
+ }
+ if (tg == 2) { /* 16KB pages */
+ granule_sz = 11;
+ }
} else {
ttbr = env->cp15.ttbr1_el1;
epd = extract32(env->cp15.c2_control, 23, 1);
tsz = t1sz;
+
+ tg = extract32(env->cp15.c2_control, 30, 2);
+ if (tg == 3) { /* 64KB pages */
+ granule_sz = 13;
+ }
+ if (tg == 1) { /* 16KB pages */
+ granule_sz = 11;
+ }
}
if (epd) {
@@ -3414,34 +3764,37 @@ static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
goto do_fault;
}
- /* If the region is small enough we will skip straight to a 2nd level
- * lookup. This affects the number of bits of the address used in
- * combination with the TTBR to find the first descriptor. ('n' here
- * matches the usage in the ARM ARM sB3.6.6, where bits [39..n] are
- * from the TTBR, [n-1..3] from the vaddr, and [2..0] always zero).
+ /* The starting level depends on the virtual address size which can be
+ * up to 48-bits and the translation granule size.
*/
- if (tsz > 1) {
- level = 2;
- n = 14 - tsz;
+ if ((va_size - tsz) > (granule_sz * 4 + 3)) {
+ level = 0;
+ } else if ((va_size - tsz) > (granule_sz * 3 + 3)) {
+ level = 1;
} else {
- n = 5 - tsz;
+ level = 2;
}
/* Clear the vaddr bits which aren't part of the within-region address,
* so that we don't have to special case things when calculating the
* first descriptor address.
*/
- address &= (0xffffffffU >> tsz);
+ if (tsz) {
+ address &= (1ULL << (va_size - tsz)) - 1;
+ }
+
+ descmask = (1ULL << (granule_sz + 3)) - 1;
/* Now we can extract the actual base address from the TTBR */
- descaddr = extract64(ttbr, 0, 40);
- descaddr &= ~((1ULL << n) - 1);
+ descaddr = extract64(ttbr, 0, 48);
+ descaddr &= ~((1ULL << (va_size - tsz - (granule_sz * (4 - level)))) - 1);
tableattrs = 0;
for (;;) {
uint64_t descriptor;
- descaddr |= ((address >> (9 * (4 - level))) & 0xff8);
+ descaddr |= (address >> (granule_sz * (4 - level))) & descmask;
+ descaddr &= ~7ULL;
descriptor = ldq_phys(cs->as, descaddr);
if (!(descriptor & 1) ||
(!(descriptor & 2) && (level == 3))) {
@@ -3464,11 +3817,16 @@ static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
* These are basically the same thing, although the number
* of bits we pull in from the vaddr varies.
*/
- page_size = (1 << (39 - (9 * level)));
+ page_size = (1 << ((granule_sz * (4 - level)) + 3));
descaddr |= (address & (page_size - 1));
/* Extract attributes from the descriptor and merge with table attrs */
- attrs = extract64(descriptor, 2, 10)
- | (extract64(descriptor, 52, 12) << 10);
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ attrs = extract64(descriptor, 2, 10)
+ | (extract64(descriptor, 53, 11) << 10);
+ } else {
+ attrs = extract64(descriptor, 2, 10)
+ | (extract64(descriptor, 52, 12) << 10);
+ }
attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
/* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
@@ -3542,9 +3900,9 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
return 2;
if (access_type == 2) {
- mask = env->cp15.c5_insn;
+ mask = env->cp15.pmsav5_insn_ap;
} else {
- mask = env->cp15.c5_data;
+ mask = env->cp15.pmsav5_data_ap;
}
mask = (mask >> (n * 4)) & 0xf;
switch (mask) {
@@ -3602,7 +3960,7 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
* @prot: set to the permissions for the page containing phys_ptr
* @page_size: set to the size of the page containing phys_ptr
*/
-static inline int get_phys_addr(CPUARMState *env, uint32_t address,
+static inline int get_phys_addr(CPUARMState *env, target_ulong address,
int access_type, int is_user,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size)
@@ -3633,13 +3991,17 @@ static inline int get_phys_addr(CPUARMState *env, uint32_t address,
}
}
-int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
- int access_type, int mmu_idx)
+int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+ int access_type, int mmu_idx)
{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
hwaddr phys_addr;
target_ulong page_size;
int prot;
int ret, is_user;
+ uint32_t syn;
+ bool same_el = (arm_current_pl(env) != 0);
is_user = mmu_idx == MMU_USER_IDX;
ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
@@ -3647,22 +4009,31 @@ int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
if (ret == 0) {
/* Map a single [sub]page. */
phys_addr &= ~(hwaddr)0x3ff;
- address &= ~(uint32_t)0x3ff;
- tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
+ address &= ~(target_ulong)0x3ff;
+ tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
return 0;
}
+ /* AArch64 syndrome does not have an LPAE bit */
+ syn = ret & ~(1 << 9);
+
+ /* For insn and data aborts we assume there is no instruction syndrome
+ * information; this is always true for exceptions reported to EL1.
+ */
if (access_type == 2) {
- env->cp15.c5_insn = ret;
- env->cp15.c6_insn = address;
- env->exception_index = EXCP_PREFETCH_ABORT;
+ syn = syn_insn_abort(same_el, 0, 0, syn);
+ cs->exception_index = EXCP_PREFETCH_ABORT;
} else {
- env->cp15.c5_data = ret;
- if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
- env->cp15.c5_data |= (1 << 11);
- env->cp15.c6_data = address;
- env->exception_index = EXCP_DATA_ABORT;
+ syn = syn_data_abort(same_el, 0, 0, 0, access_type == 1, syn);
+ if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) {
+ ret |= (1 << 11);
+ }
+ cs->exception_index = EXCP_DATA_ABORT;
}
+
+ env->exception.syndrome = syn;
+ env->exception.vaddress = address;
+ env->exception.fsr = ret;
return 1;
}
@@ -3703,6 +4074,8 @@ uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
switch (reg) {
case 0: /* APSR */
return xpsr_read(env) & 0xf8000000;
@@ -3733,13 +4106,15 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
return env->v7m.control;
default:
/* ??? For debugging only. */
- cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
+ cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg);
return 0;
}
}
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
switch (reg) {
case 0: /* APSR */
xpsr_write(env, val, 0xf8000000);
@@ -3802,13 +4177,95 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
break;
default:
/* ??? For debugging only. */
- cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
+ cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg);
return;
}
}
#endif
+void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
+{
+ /* Implement DC ZVA, which zeroes a fixed-length block of memory.
+ * Note that we do not implement the (architecturally mandated)
+ * alignment fault for attempts to use this on Device memory
+ * (which matches the usual QEMU behaviour of not implementing either
+ * alignment faults or any memory attribute handling).
+ */
+
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint64_t blocklen = 4 << cpu->dcz_blocksize;
+ uint64_t vaddr = vaddr_in & ~(blocklen - 1);
+
+#ifndef CONFIG_USER_ONLY
+ {
+ /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
+ * the block size so we might have to do more than one TLB lookup.
+ * We know that in fact for any v8 CPU the page size is at least 4K
+ * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
+ * 1K as an artefact of legacy v5 subpage support being present in the
+ * same QEMU executable.
+ */
+ int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
+ void *hostaddr[maxidx];
+ int try, i;
+
+ for (try = 0; try < 2; try++) {
+
+ for (i = 0; i < maxidx; i++) {
+ hostaddr[i] = tlb_vaddr_to_host(env,
+ vaddr + TARGET_PAGE_SIZE * i,
+ 1, cpu_mmu_index(env));
+ if (!hostaddr[i]) {
+ break;
+ }
+ }
+ if (i == maxidx) {
+ /* If it's all in the TLB it's fair game for just writing to;
+ * we know we don't need to update dirty status, etc.
+ */
+ for (i = 0; i < maxidx - 1; i++) {
+ memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
+ }
+ memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
+ return;
+ }
+ /* OK, try a store and see if we can populate the tlb. This
+ * might cause an exception if the memory isn't writable,
+ * in which case we will longjmp out of here. We must for
+ * this purpose use the actual register value passed to us
+ * so that we get the fault address right.
+ */
+ helper_ret_stb_mmu(env, vaddr_in, 0, cpu_mmu_index(env), GETRA());
+ /* Now we can populate the other TLB entries, if any */
+ for (i = 0; i < maxidx; i++) {
+ uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
+ if (va != (vaddr_in & TARGET_PAGE_MASK)) {
+ helper_ret_stb_mmu(env, va, 0, cpu_mmu_index(env), GETRA());
+ }
+ }
+ }
+
+ /* Slow path (probably attempt to do this to an I/O device or
+ * similar, or clearing of a block of code we have translations
+ * cached for). Just do a series of byte writes as the architecture
+ * demands. It's not worth trying to use a cpu_physical_memory_map(),
+ * memset(), unmap() sequence here because:
+ * + we'd need to account for the blocksize being larger than a page
+ * + the direct-RAM access case is almost always going to be dealt
+ * with in the fastpath code above, so there's no speed benefit
+ * + we would have to deal with the map returning NULL because the
+ * bounce buffer was in use
+ */
+ for (i = 0; i < blocklen; i++) {
+ helper_ret_stb_mmu(env, vaddr + i, 0, cpu_mmu_index(env), GETRA());
+ }
+ }
+#else
+ memset(g2h(vaddr), 0, blocklen);
+#endif
+}
+
/* Note that signed overflow is undefined in C. The following routines are
careful to use unsigned types where modulo arithmetic is required.
Failure to do so _will_ break on newer gcc. */
@@ -4487,16 +4944,21 @@ float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
* int->float conversions at run-time. */
#define float64_256 make_float64(0x4070000000000000LL)
#define float64_512 make_float64(0x4080000000000000LL)
+#define float32_maxnorm make_float32(0x7f7fffff)
+#define float64_maxnorm make_float64(0x7fefffffffffffffLL)
-/* The algorithm that must be used to calculate the estimate
- * is specified by the ARM ARM.
+/* Reciprocal functions
+ *
+ * The algorithm that must be used to calculate the estimate
+ * is specified by the ARM ARM, see FPRecipEstimate()
*/
-static float64 recip_estimate(float64 a, CPUARMState *env)
+
+static float64 recip_estimate(float64 a, float_status *real_fp_status)
{
/* These calculations mustn't set any fp exception flags,
* so we use a local copy of the fp_status.
*/
- float_status dummy_status = env->vfp.standard_fp_status;
+ float_status dummy_status = *real_fp_status;
float_status *s = &dummy_status;
/* q = (int)(a * 512.0) */
float64 q = float64_mul(float64_512, a, s);
@@ -4517,56 +4979,178 @@ static float64 recip_estimate(float64 a, CPUARMState *env)
return float64_div(int64_to_float64(q_int, s), float64_256, s);
}
-float32 HELPER(recpe_f32)(float32 a, CPUARMState *env)
+/* Common wrapper to call recip_estimate */
+static float64 call_recip_estimate(float64 num, int off, float_status *fpst)
{
- float_status *s = &env->vfp.standard_fp_status;
- float64 f64;
- uint32_t val32 = float32_val(a);
+ uint64_t val64 = float64_val(num);
+ uint64_t frac = extract64(val64, 0, 52);
+ int64_t exp = extract64(val64, 52, 11);
+ uint64_t sbit;
+ float64 scaled, estimate;
- int result_exp;
- int a_exp = (val32 & 0x7f800000) >> 23;
- int sign = val32 & 0x80000000;
+ /* Generate the scaled number for the estimate function */
+ if (exp == 0) {
+ if (extract64(frac, 51, 1) == 0) {
+ exp = -1;
+ frac = extract64(frac, 0, 50) << 2;
+ } else {
+ frac = extract64(frac, 0, 51) << 1;
+ }
+ }
- if (float32_is_any_nan(a)) {
- if (float32_is_signaling_nan(a)) {
- float_raise(float_flag_invalid, s);
+ /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
+ scaled = make_float64((0x3feULL << 52)
+ | extract64(frac, 44, 8) << 44);
+
+ estimate = recip_estimate(scaled, fpst);
+
+ /* Build new result */
+ val64 = float64_val(estimate);
+ sbit = 0x8000000000000000ULL & val64;
+ exp = off - exp;
+ frac = extract64(val64, 0, 52);
+
+ if (exp == 0) {
+ frac = 1ULL << 51 | extract64(frac, 1, 51);
+ } else if (exp == -1) {
+ frac = 1ULL << 50 | extract64(frac, 2, 50);
+ exp = 0;
+ }
+
+ return make_float64(sbit | (exp << 52) | frac);
+}
+
+static bool round_to_inf(float_status *fpst, bool sign_bit)
+{
+ switch (fpst->float_rounding_mode) {
+ case float_round_nearest_even: /* Round to Nearest */
+ return true;
+ case float_round_up: /* Round to +Inf */
+ return !sign_bit;
+ case float_round_down: /* Round to -Inf */
+ return sign_bit;
+ case float_round_to_zero: /* Round to Zero */
+ return false;
+ }
+
+ g_assert_not_reached();
+}
+
+float32 HELPER(recpe_f32)(float32 input, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float32 f32 = float32_squash_input_denormal(input, fpst);
+ uint32_t f32_val = float32_val(f32);
+ uint32_t f32_sbit = 0x80000000ULL & f32_val;
+ int32_t f32_exp = extract32(f32_val, 23, 8);
+ uint32_t f32_frac = extract32(f32_val, 0, 23);
+ float64 f64, r64;
+ uint64_t r64_val;
+ int64_t r64_exp;
+ uint64_t r64_frac;
+
+ if (float32_is_any_nan(f32)) {
+ float32 nan = f32;
+ if (float32_is_signaling_nan(f32)) {
+ float_raise(float_flag_invalid, fpst);
+ nan = float32_maybe_silence_nan(f32);
}
- return float32_default_nan;
- } else if (float32_is_infinity(a)) {
- return float32_set_sign(float32_zero, float32_is_neg(a));
- } else if (float32_is_zero_or_denormal(a)) {
- if (!float32_is_zero(a)) {
- float_raise(float_flag_input_denormal, s);
+ if (fpst->default_nan_mode) {
+ nan = float32_default_nan;
}
- float_raise(float_flag_divbyzero, s);
- return float32_set_sign(float32_infinity, float32_is_neg(a));
- } else if (a_exp >= 253) {
- float_raise(float_flag_underflow, s);
- return float32_set_sign(float32_zero, float32_is_neg(a));
+ return nan;
+ } else if (float32_is_infinity(f32)) {
+ return float32_set_sign(float32_zero, float32_is_neg(f32));
+ } else if (float32_is_zero(f32)) {
+ float_raise(float_flag_divbyzero, fpst);
+ return float32_set_sign(float32_infinity, float32_is_neg(f32));
+ } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) {
+ /* Abs(value) < 2.0^-128 */
+ float_raise(float_flag_overflow | float_flag_inexact, fpst);
+ if (round_to_inf(fpst, f32_sbit)) {
+ return float32_set_sign(float32_infinity, float32_is_neg(f32));
+ } else {
+ return float32_set_sign(float32_maxnorm, float32_is_neg(f32));
+ }
+ } else if (f32_exp >= 253 && fpst->flush_to_zero) {
+ float_raise(float_flag_underflow, fpst);
+ return float32_set_sign(float32_zero, float32_is_neg(f32));
}
- f64 = make_float64((0x3feULL << 52)
- | ((int64_t)(val32 & 0x7fffff) << 29));
- result_exp = 253 - a_exp;
+ f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29);
+ r64 = call_recip_estimate(f64, 253, fpst);
+ r64_val = float64_val(r64);
+ r64_exp = extract64(r64_val, 52, 11);
+ r64_frac = extract64(r64_val, 0, 52);
- f64 = recip_estimate(f64, env);
+ /* result = sign : result_exp<7:0> : fraction<51:29>; */
+ return make_float32(f32_sbit |
+ (r64_exp & 0xff) << 23 |
+ extract64(r64_frac, 29, 24));
+}
- val32 = sign
- | ((result_exp & 0xff) << 23)
- | ((float64_val(f64) >> 29) & 0x7fffff);
- return make_float32(val32);
+float64 HELPER(recpe_f64)(float64 input, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float64 f64 = float64_squash_input_denormal(input, fpst);
+ uint64_t f64_val = float64_val(f64);
+ uint64_t f64_sbit = 0x8000000000000000ULL & f64_val;
+ int64_t f64_exp = extract64(f64_val, 52, 11);
+ float64 r64;
+ uint64_t r64_val;
+ int64_t r64_exp;
+ uint64_t r64_frac;
+
+ /* Deal with any special cases */
+ if (float64_is_any_nan(f64)) {
+ float64 nan = f64;
+ if (float64_is_signaling_nan(f64)) {
+ float_raise(float_flag_invalid, fpst);
+ nan = float64_maybe_silence_nan(f64);
+ }
+ if (fpst->default_nan_mode) {
+ nan = float64_default_nan;
+ }
+ return nan;
+ } else if (float64_is_infinity(f64)) {
+ return float64_set_sign(float64_zero, float64_is_neg(f64));
+ } else if (float64_is_zero(f64)) {
+ float_raise(float_flag_divbyzero, fpst);
+ return float64_set_sign(float64_infinity, float64_is_neg(f64));
+ } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
+ /* Abs(value) < 2.0^-1024 */
+ float_raise(float_flag_overflow | float_flag_inexact, fpst);
+ if (round_to_inf(fpst, f64_sbit)) {
+ return float64_set_sign(float64_infinity, float64_is_neg(f64));
+ } else {
+ return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
+ }
+ } else if (f64_exp >= 1023 && fpst->flush_to_zero) {
+ float_raise(float_flag_underflow, fpst);
+ return float64_set_sign(float64_zero, float64_is_neg(f64));
+ }
+
+ r64 = call_recip_estimate(f64, 2045, fpst);
+ r64_val = float64_val(r64);
+ r64_exp = extract64(r64_val, 52, 11);
+ r64_frac = extract64(r64_val, 0, 52);
+
+ /* result = sign : result_exp<10:0> : fraction<51:0> */
+ return make_float64(f64_sbit |
+ ((r64_exp & 0x7ff) << 52) |
+ r64_frac);
}
/* The algorithm that must be used to calculate the estimate
* is specified by the ARM ARM.
*/
-static float64 recip_sqrt_estimate(float64 a, CPUARMState *env)
+static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status)
{
/* These calculations mustn't set any fp exception flags,
* so we use a local copy of the fp_status.
*/
- float_status dummy_status = env->vfp.standard_fp_status;
+ float_status dummy_status = *real_fp_status;
float_status *s = &dummy_status;
float64 q;
int64_t q_int;
@@ -4613,49 +5197,64 @@ static float64 recip_sqrt_estimate(float64 a, CPUARMState *env)
return float64_div(int64_to_float64(q_int, s), float64_256, s);
}
-float32 HELPER(rsqrte_f32)(float32 a, CPUARMState *env)
+float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
{
- float_status *s = &env->vfp.standard_fp_status;
+ float_status *s = fpstp;
+ float32 f32 = float32_squash_input_denormal(input, s);
+ uint32_t val = float32_val(f32);
+ uint32_t f32_sbit = 0x80000000 & val;
+ int32_t f32_exp = extract32(val, 23, 8);
+ uint32_t f32_frac = extract32(val, 0, 23);
+ uint64_t f64_frac;
+ uint64_t val64;
int result_exp;
float64 f64;
- uint32_t val;
- uint64_t val64;
-
- val = float32_val(a);
- if (float32_is_any_nan(a)) {
- if (float32_is_signaling_nan(a)) {
+ if (float32_is_any_nan(f32)) {
+ float32 nan = f32;
+ if (float32_is_signaling_nan(f32)) {
float_raise(float_flag_invalid, s);
+ nan = float32_maybe_silence_nan(f32);
}
- return float32_default_nan;
- } else if (float32_is_zero_or_denormal(a)) {
- if (!float32_is_zero(a)) {
- float_raise(float_flag_input_denormal, s);
+ if (s->default_nan_mode) {
+ nan = float32_default_nan;
}
+ return nan;
+ } else if (float32_is_zero(f32)) {
float_raise(float_flag_divbyzero, s);
- return float32_set_sign(float32_infinity, float32_is_neg(a));
- } else if (float32_is_neg(a)) {
+ return float32_set_sign(float32_infinity, float32_is_neg(f32));
+ } else if (float32_is_neg(f32)) {
float_raise(float_flag_invalid, s);
return float32_default_nan;
- } else if (float32_is_infinity(a)) {
+ } else if (float32_is_infinity(f32)) {
return float32_zero;
}
- /* Normalize to a double-precision value between 0.25 and 1.0,
+ /* Scale and normalize to a double-precision value between 0.25 and 1.0,
* preserving the parity of the exponent. */
- if ((val & 0x800000) == 0) {
- f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
+
+ f64_frac = ((uint64_t) f32_frac) << 29;
+ if (f32_exp == 0) {
+ while (extract64(f64_frac, 51, 1) == 0) {
+ f64_frac = f64_frac << 1;
+ f32_exp = f32_exp-1;
+ }
+ f64_frac = extract64(f64_frac, 0, 51) << 1;
+ }
+
+ if (extract64(f32_exp, 0, 1) == 0) {
+ f64 = make_float64(((uint64_t) f32_sbit) << 32
| (0x3feULL << 52)
- | ((uint64_t)(val & 0x7fffff) << 29));
+ | f64_frac);
} else {
- f64 = make_float64(((uint64_t)(val & 0x80000000) << 32)
+ f64 = make_float64(((uint64_t) f32_sbit) << 32
| (0x3fdULL << 52)
- | ((uint64_t)(val & 0x7fffff) << 29));
+ | f64_frac);
}
- result_exp = (380 - ((val & 0x7f800000) >> 23)) / 2;
+ result_exp = (380 - f32_exp) / 2;
- f64 = recip_sqrt_estimate(f64, env);
+ f64 = recip_sqrt_estimate(f64, s);
val64 = float64_val(f64);
@@ -4664,8 +5263,72 @@ float32 HELPER(rsqrte_f32)(float32 a, CPUARMState *env)
return make_float32(val);
}
-uint32_t HELPER(recpe_u32)(uint32_t a, CPUARMState *env)
+float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
{
+ float_status *s = fpstp;
+ float64 f64 = float64_squash_input_denormal(input, s);
+ uint64_t val = float64_val(f64);
+ uint64_t f64_sbit = 0x8000000000000000ULL & val;
+ int64_t f64_exp = extract64(val, 52, 11);
+ uint64_t f64_frac = extract64(val, 0, 52);
+ int64_t result_exp;
+ uint64_t result_frac;
+
+ if (float64_is_any_nan(f64)) {
+ float64 nan = f64;
+ if (float64_is_signaling_nan(f64)) {
+ float_raise(float_flag_invalid, s);
+ nan = float64_maybe_silence_nan(f64);
+ }
+ if (s->default_nan_mode) {
+ nan = float64_default_nan;
+ }
+ return nan;
+ } else if (float64_is_zero(f64)) {
+ float_raise(float_flag_divbyzero, s);
+ return float64_set_sign(float64_infinity, float64_is_neg(f64));
+ } else if (float64_is_neg(f64)) {
+ float_raise(float_flag_invalid, s);
+ return float64_default_nan;
+ } else if (float64_is_infinity(f64)) {
+ return float64_zero;
+ }
+
+ /* Scale and normalize to a double-precision value between 0.25 and 1.0,
+ * preserving the parity of the exponent. */
+
+ if (f64_exp == 0) {
+ while (extract64(f64_frac, 51, 1) == 0) {
+ f64_frac = f64_frac << 1;
+ f64_exp = f64_exp - 1;
+ }
+ f64_frac = extract64(f64_frac, 0, 51) << 1;
+ }
+
+ if (extract64(f64_exp, 0, 1) == 0) {
+ f64 = make_float64(f64_sbit
+ | (0x3feULL << 52)
+ | f64_frac);
+ } else {
+ f64 = make_float64(f64_sbit
+ | (0x3fdULL << 52)
+ | f64_frac);
+ }
+
+ result_exp = (3068 - f64_exp) / 2;
+
+ f64 = recip_sqrt_estimate(f64, s);
+
+ result_frac = extract64(float64_val(f64), 0, 52);
+
+ return make_float64(f64_sbit |
+ ((result_exp & 0x7ff) << 52) |
+ result_frac);
+}
+
+uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
+{
+ float_status *s = fpstp;
float64 f64;
if ((a & 0x80000000) == 0) {
@@ -4675,13 +5338,14 @@ uint32_t HELPER(recpe_u32)(uint32_t a, CPUARMState *env)
f64 = make_float64((0x3feULL << 52)
| ((int64_t)(a & 0x7fffffff) << 21));
- f64 = recip_estimate (f64, env);
+ f64 = recip_estimate(f64, s);
return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
}
-uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUARMState *env)
+uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
{
+ float_status *fpst = fpstp;
float64 f64;
if ((a & 0xc0000000) == 0) {
@@ -4696,7 +5360,7 @@ uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUARMState *env)
| ((uint64_t)(a & 0x3fffffff) << 22));
}
- f64 = recip_sqrt_estimate(f64, env);
+ f64 = recip_sqrt_estimate(f64, fpst);
return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
}
diff --git a/target-arm/helper.h b/target-arm/helper.h
index 8923f8ae71..a5449e7b6f 100644
--- a/target-arm/helper.h
+++ b/target-arm/helper.h
@@ -48,7 +48,8 @@ DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32)
DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
i32, i32, i32, i32)
-DEF_HELPER_2(exception, void, env, i32)
+DEF_HELPER_2(exception_internal, void, env, i32)
+DEF_HELPER_3(exception_with_syndrome, void, env, i32, i32)
DEF_HELPER_1(wfi, void, env)
DEF_HELPER_1(wfe, void, env)
@@ -58,13 +59,14 @@ DEF_HELPER_1(cpsr_read, i32, env)
DEF_HELPER_3(v7m_msr, void, env, i32, i32)
DEF_HELPER_2(v7m_mrs, i32, env, i32)
-DEF_HELPER_2(access_check_cp_reg, void, env, ptr)
+DEF_HELPER_3(access_check_cp_reg, void, env, ptr, i32)
DEF_HELPER_3(set_cp_reg, void, env, ptr, i32)
DEF_HELPER_2(get_cp_reg, i32, env, ptr)
DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64)
DEF_HELPER_2(get_cp_reg64, i64, env, ptr)
DEF_HELPER_3(msr_i_pstate, void, env, i32, i32)
+DEF_HELPER_1(exception_return, void, env)
DEF_HELPER_2(get_r13_banked, i32, env, i32)
DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
@@ -167,10 +169,12 @@ DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr)
DEF_HELPER_3(recps_f32, f32, f32, f32, env)
DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env)
-DEF_HELPER_2(recpe_f32, f32, f32, env)
-DEF_HELPER_2(rsqrte_f32, f32, f32, env)
-DEF_HELPER_2(recpe_u32, i32, i32, env)
-DEF_HELPER_2(rsqrte_u32, i32, i32, env)
+DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_2(recpe_u32, i32, i32, ptr)
+DEF_HELPER_FLAGS_2(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32, ptr)
DEF_HELPER_5(neon_tbl, i32, env, i32, i32, i32, i32)
DEF_HELPER_3(shl_cc, i32, env, i32, i32)
@@ -184,12 +188,20 @@ DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr)
DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr)
/* neon_helper.c */
-DEF_HELPER_3(neon_qadd_u8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qadd_s8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qadd_u16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qadd_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qadd_u32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qadd_s32, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s64, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(neon_sqadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_sqadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_sqadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_sqadd_u64, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32)
DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32)
DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32)
@@ -373,12 +385,14 @@ DEF_HELPER_2(neon_mull_s16, i64, i32, i32)
DEF_HELPER_1(neon_negl_u16, i64, i64)
DEF_HELPER_1(neon_negl_u32, i64, i64)
-DEF_HELPER_2(neon_qabs_s8, i32, env, i32)
-DEF_HELPER_2(neon_qabs_s16, i32, env, i32)
-DEF_HELPER_2(neon_qabs_s32, i32, env, i32)
-DEF_HELPER_2(neon_qneg_s8, i32, env, i32)
-DEF_HELPER_2(neon_qneg_s16, i32, env, i32)
-DEF_HELPER_2(neon_qneg_s32, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_3(neon_abd_f32, i32, i32, i32, ptr)
DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr)
@@ -502,6 +516,7 @@ DEF_HELPER_4(crypto_aesmc, void, env, i32, i32, i32)
DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+DEF_HELPER_2(dc_zva, void, env, i64)
#ifdef TARGET_AARCH64
#include "helper-a64.h"
diff --git a/target-arm/internals.h b/target-arm/internals.h
new file mode 100644
index 0000000000..d63a975a7e
--- /dev/null
+++ b/target-arm/internals.h
@@ -0,0 +1,267 @@
+/*
+ * QEMU ARM CPU -- internal functions and types
+ *
+ * Copyright (c) 2014 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This header defines functions, types, etc which need to be shared
+ * between different source files within target-arm/ but which are
+ * private to it and not required by the rest of QEMU.
+ */
+
+#ifndef TARGET_ARM_INTERNALS_H
+#define TARGET_ARM_INTERNALS_H
+
+static inline bool excp_is_internal(int excp)
+{
+ /* Return true if this exception number represents a QEMU-internal
+ * exception that will not be passed to the guest.
+ */
+ return excp == EXCP_INTERRUPT
+ || excp == EXCP_HLT
+ || excp == EXCP_DEBUG
+ || excp == EXCP_HALTED
+ || excp == EXCP_EXCEPTION_EXIT
+ || excp == EXCP_KERNEL_TRAP
+ || excp == EXCP_STREX;
+}
+
+/* Exception names for debug logging; note that not all of these
+ * precisely correspond to architectural exceptions.
+ */
+static const char * const excnames[] = {
+ [EXCP_UDEF] = "Undefined Instruction",
+ [EXCP_SWI] = "SVC",
+ [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
+ [EXCP_DATA_ABORT] = "Data Abort",
+ [EXCP_IRQ] = "IRQ",
+ [EXCP_FIQ] = "FIQ",
+ [EXCP_BKPT] = "Breakpoint",
+ [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
+ [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
+ [EXCP_STREX] = "QEMU intercept of STREX",
+};
+
+static inline void arm_log_exception(int idx)
+{
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ const char *exc = NULL;
+
+ if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
+ exc = excnames[idx];
+ }
+ if (!exc) {
+ exc = "unknown";
+ }
+ qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
+ }
+}
+
+/* Scale factor for generic timers, ie number of ns per tick.
+ * This gives a 62.5MHz timer.
+ */
+#define GTIMER_SCALE 16
+
+int bank_number(int mode);
+void switch_mode(CPUARMState *, int);
+void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
+void arm_translate_init(void);
+
+enum arm_fprounding {
+ FPROUNDING_TIEEVEN,
+ FPROUNDING_POSINF,
+ FPROUNDING_NEGINF,
+ FPROUNDING_ZERO,
+ FPROUNDING_TIEAWAY,
+ FPROUNDING_ODD
+};
+
+int arm_rmode_to_sf(int rmode);
+
+static inline void update_spsel(CPUARMState *env, uint32_t imm)
+{
+ /* Update PSTATE SPSel bit; this requires us to update the
+ * working stack pointer in xregs[31].
+ */
+ if (!((imm ^ env->pstate) & PSTATE_SP)) {
+ return;
+ }
+ env->pstate = deposit32(env->pstate, 0, 1, imm);
+
+ /* EL0 has no access rights to update SPSel, and this code
+ * assumes we are updating SP for EL1 while running as EL1.
+ */
+ assert(arm_current_pl(env) == 1);
+ if (env->pstate & PSTATE_SP) {
+ /* Switch from using SP_EL0 to using SP_ELx */
+ env->sp_el[0] = env->xregs[31];
+ env->xregs[31] = env->sp_el[1];
+ } else {
+ /* Switch from SP_EL0 to SP_ELx */
+ env->sp_el[1] = env->xregs[31];
+ env->xregs[31] = env->sp_el[0];
+ }
+}
+
+/* Valid Syndrome Register EC field values */
+enum arm_exception_class {
+ EC_UNCATEGORIZED = 0x00,
+ EC_WFX_TRAP = 0x01,
+ EC_CP15RTTRAP = 0x03,
+ EC_CP15RRTTRAP = 0x04,
+ EC_CP14RTTRAP = 0x05,
+ EC_CP14DTTRAP = 0x06,
+ EC_ADVSIMDFPACCESSTRAP = 0x07,
+ EC_FPIDTRAP = 0x08,
+ EC_CP14RRTTRAP = 0x0c,
+ EC_ILLEGALSTATE = 0x0e,
+ EC_AA32_SVC = 0x11,
+ EC_AA32_HVC = 0x12,
+ EC_AA32_SMC = 0x13,
+ EC_AA64_SVC = 0x15,
+ EC_AA64_HVC = 0x16,
+ EC_AA64_SMC = 0x17,
+ EC_SYSTEMREGISTERTRAP = 0x18,
+ EC_INSNABORT = 0x20,
+ EC_INSNABORT_SAME_EL = 0x21,
+ EC_PCALIGNMENT = 0x22,
+ EC_DATAABORT = 0x24,
+ EC_DATAABORT_SAME_EL = 0x25,
+ EC_SPALIGNMENT = 0x26,
+ EC_AA32_FPTRAP = 0x28,
+ EC_AA64_FPTRAP = 0x2c,
+ EC_SERROR = 0x2f,
+ EC_BREAKPOINT = 0x30,
+ EC_BREAKPOINT_SAME_EL = 0x31,
+ EC_SOFTWARESTEP = 0x32,
+ EC_SOFTWARESTEP_SAME_EL = 0x33,
+ EC_WATCHPOINT = 0x34,
+ EC_WATCHPOINT_SAME_EL = 0x35,
+ EC_AA32_BKPT = 0x38,
+ EC_VECTORCATCH = 0x3a,
+ EC_AA64_BKPT = 0x3c,
+};
+
+#define ARM_EL_EC_SHIFT 26
+#define ARM_EL_IL_SHIFT 25
+#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
+
+/* Utility functions for constructing various kinds of syndrome value.
+ * Note that in general we follow the AArch64 syndrome values; in a
+ * few cases the value in HSR for exceptions taken to AArch32 Hyp
+ * mode differs slightly, so if we ever implemented Hyp mode then the
+ * syndrome value would need some massaging on exception entry.
+ * (One example of this is that AArch64 defaults to IL bit set for
+ * exceptions which don't specifically indicate information about the
+ * trapping instruction, whereas AArch32 defaults to IL bit clear.)
+ */
+static inline uint32_t syn_uncategorized(void)
+{
+ return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
+static inline uint32_t syn_aa64_svc(uint32_t imm16)
+{
+ return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_thumb)
+{
+ return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
+ | (is_thumb ? 0 : ARM_EL_IL);
+}
+
+static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
+{
+ return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_thumb)
+{
+ return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
+ | (is_thumb ? 0 : ARM_EL_IL);
+}
+
+static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2,
+ int crn, int crm, int rt,
+ int isread)
+{
+ return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL
+ | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5)
+ | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2,
+ int crn, int crm, int rt, int isread,
+ bool is_thumb)
+{
+ return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT)
+ | (is_thumb ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
+ | (crn << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2,
+ int crn, int crm, int rt, int isread,
+ bool is_thumb)
+{
+ return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT)
+ | (is_thumb ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
+ | (crn << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm,
+ int rt, int rt2, int isread,
+ bool is_thumb)
+{
+ return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT)
+ | (is_thumb ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc1 << 16)
+ | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
+ int rt, int rt2, int isread,
+ bool is_thumb)
+{
+ return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT)
+ | (is_thumb ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc1 << 16)
+ | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_thumb)
+{
+ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
+ | (is_thumb ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20);
+}
+
+static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
+{
+ return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | (ea << 9) | (s1ptw << 7) | fsc;
+}
+
+static inline uint32_t syn_data_abort(int same_el, int ea, int cm, int s1ptw,
+ int wnr, int fsc)
+{
+ return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
+}
+
+#endif
diff --git a/target-arm/kvm32.c b/target-arm/kvm32.c
index a4fde07969..a690d9935f 100644
--- a/target-arm/kvm32.c
+++ b/target-arm/kvm32.c
@@ -21,6 +21,7 @@
#include "sysemu/kvm.h"
#include "kvm_arm.h"
#include "cpu.h"
+#include "internals.h"
#include "hw/arm/arm.h"
static inline void set_feature(uint64_t *features, int feature)
@@ -294,6 +295,14 @@ typedef struct Reg {
offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
}
+/* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */
+#define COREREG64(KERNELNAME, QEMUFIELD) \
+ { \
+ KVM_REG_ARM | KVM_REG_SIZE_U32 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
+ offsetoflow32(CPUARMState, QEMUFIELD) \
+ }
+
static const Reg regs[] = {
/* R0_usr .. R14_usr */
COREREG(usr_regs.uregs[0], regs[0]),
@@ -314,16 +323,16 @@ static const Reg regs[] = {
/* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
COREREG(svc_regs[0], banked_r13[1]),
COREREG(svc_regs[1], banked_r14[1]),
- COREREG(svc_regs[2], banked_spsr[1]),
+ COREREG64(svc_regs[2], banked_spsr[1]),
COREREG(abt_regs[0], banked_r13[2]),
COREREG(abt_regs[1], banked_r14[2]),
- COREREG(abt_regs[2], banked_spsr[2]),
+ COREREG64(abt_regs[2], banked_spsr[2]),
COREREG(und_regs[0], banked_r13[3]),
COREREG(und_regs[1], banked_r14[3]),
- COREREG(und_regs[2], banked_spsr[3]),
+ COREREG64(und_regs[2], banked_spsr[3]),
COREREG(irq_regs[0], banked_r13[4]),
COREREG(irq_regs[1], banked_r14[4]),
- COREREG(irq_regs[2], banked_spsr[4]),
+ COREREG64(irq_regs[2], banked_spsr[4]),
/* R8_fiq .. R14_fiq and SPSR_fiq */
COREREG(fiq_regs[0], fiq_regs[0]),
COREREG(fiq_regs[1], fiq_regs[1]),
@@ -332,7 +341,7 @@ static const Reg regs[] = {
COREREG(fiq_regs[4], fiq_regs[4]),
COREREG(fiq_regs[5], banked_r13[5]),
COREREG(fiq_regs[6], banked_r14[5]),
- COREREG(fiq_regs[7], banked_spsr[5]),
+ COREREG64(fiq_regs[7], banked_spsr[5]),
/* R15 */
COREREG(usr_regs.uregs[15], regs[15]),
/* VFP system registers */
diff --git a/target-arm/kvm64.c b/target-arm/kvm64.c
index 1b7ca90374..e115879d9a 100644
--- a/target-arm/kvm64.c
+++ b/target-arm/kvm64.c
@@ -121,8 +121,24 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
}
+ /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
+ * QEMU side we keep the current SP in xregs[31] as well.
+ */
+ if (env->pstate & PSTATE_SP) {
+ env->sp_el[1] = env->xregs[31];
+ } else {
+ env->sp_el[0] = env->xregs[31];
+ }
+
reg.id = AARCH64_CORE_REG(regs.sp);
- reg.addr = (uintptr_t) &env->xregs[31];
+ reg.addr = (uintptr_t) &env->sp_el[0];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(sp_el1);
+ reg.addr = (uintptr_t) &env->sp_el[1];
ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
if (ret) {
return ret;
@@ -144,10 +160,23 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return ret;
}
+ reg.id = AARCH64_CORE_REG(elr_el1);
+ reg.addr = (uintptr_t) &env->elr_el1;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ for (i = 0; i < KVM_NR_SPSR; i++) {
+ reg.id = AARCH64_CORE_REG(spsr[i]);
+ reg.addr = (uintptr_t) &env->banked_spsr[i - 1];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
/* TODO:
- * SP_EL1
- * ELR_EL1
- * SPSR[]
* FP state
* system registers
*/
@@ -174,7 +203,14 @@ int kvm_arch_get_registers(CPUState *cs)
}
reg.id = AARCH64_CORE_REG(regs.sp);
- reg.addr = (uintptr_t) &env->xregs[31];
+ reg.addr = (uintptr_t) &env->sp_el[0];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(sp_el1);
+ reg.addr = (uintptr_t) &env->sp_el[1];
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
if (ret) {
return ret;
@@ -188,6 +224,15 @@ int kvm_arch_get_registers(CPUState *cs)
}
pstate_write(env, val);
+ /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
+ * QEMU side we keep the current SP in xregs[31] as well.
+ */
+ if (env->pstate & PSTATE_SP) {
+ env->xregs[31] = env->sp_el[1];
+ } else {
+ env->xregs[31] = env->sp_el[0];
+ }
+
reg.id = AARCH64_CORE_REG(regs.pc);
reg.addr = (uintptr_t) &env->pc;
ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
@@ -195,6 +240,22 @@ int kvm_arch_get_registers(CPUState *cs)
return ret;
}
+ reg.id = AARCH64_CORE_REG(elr_el1);
+ reg.addr = (uintptr_t) &env->elr_el1;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ for (i = 0; i < KVM_NR_SPSR; i++) {
+ reg.id = AARCH64_CORE_REG(spsr[i]);
+ reg.addr = (uintptr_t) &env->banked_spsr[i - 1];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
/* TODO: other registers */
return ret;
}
diff --git a/target-arm/machine.c b/target-arm/machine.c
index 8f9e7d4d28..b967223fc0 100644
--- a/target-arm/machine.c
+++ b/target-arm/machine.c
@@ -88,7 +88,7 @@ static bool m_needed(void *opaque)
return arm_feature(env, ARM_FEATURE_M);
}
-const VMStateDescription vmstate_m = {
+static const VMStateDescription vmstate_m = {
.name = "cpu/m",
.version_id = 1,
.minimum_version_id = 1,
@@ -222,9 +222,9 @@ static int cpu_post_load(void *opaque, int version_id)
const VMStateDescription vmstate_arm_cpu = {
.name = "cpu",
- .version_id = 14,
- .minimum_version_id = 14,
- .minimum_version_id_old = 14,
+ .version_id = 17,
+ .minimum_version_id = 17,
+ .minimum_version_id_old = 17,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
.fields = (VMStateField[]) {
@@ -238,11 +238,13 @@ const VMStateDescription vmstate_arm_cpu = {
.offset = 0,
},
VMSTATE_UINT32(env.spsr, ARMCPU),
- VMSTATE_UINT32_ARRAY(env.banked_spsr, ARMCPU, 6),
+ VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 6),
VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 6),
VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 6),
VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
+ VMSTATE_UINT64(env.elr_el1, ARMCPU),
+ VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 2),
/* The length-check must come before the arrays to avoid
* incoming data possibly overflowing the array.
*/
@@ -257,6 +259,9 @@ const VMStateDescription vmstate_arm_cpu = {
VMSTATE_UINT64(env.exclusive_val, ARMCPU),
VMSTATE_UINT64(env.exclusive_high, ARMCPU),
VMSTATE_UINT64(env.features, ARMCPU),
+ VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
+ VMSTATE_UINT32(env.exception.fsr, ARMCPU),
+ VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
VMSTATE_TIMER(gt_timer[GTIMER_PHYS], ARMCPU),
VMSTATE_TIMER(gt_timer[GTIMER_VIRT], ARMCPU),
VMSTATE_END_OF_LIST()
diff --git a/target-arm/neon_helper.c b/target-arm/neon_helper.c
index 13752baf63..8d6f9a92f2 100644
--- a/target-arm/neon_helper.c
+++ b/target-arm/neon_helper.c
@@ -236,6 +236,171 @@ uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
return res;
}
+/* Unsigned saturating accumulate of signed value
+ *
+ * Op1/Rn is treated as signed
+ * Op2/Rd is treated as unsigned
+ *
+ * Explicit casting is used to ensure the correct sign extension of
+ * inputs. The result is treated as a unsigned value and saturated as such.
+ *
+ * We use a macro for the 8/16 bit cases which expects signed integers of va,
+ * vb, and vr for interim calculation and an unsigned 32 bit result value r.
+ */
+
+#define USATACC(bits, shift) \
+ do { \
+ va = sextract32(a, shift, bits); \
+ vb = extract32(b, shift, bits); \
+ vr = va + vb; \
+ if (vr > UINT##bits##_MAX) { \
+ SET_QC(); \
+ vr = UINT##bits##_MAX; \
+ } else if (vr < 0) { \
+ SET_QC(); \
+ vr = 0; \
+ } \
+ r = deposit32(r, shift, bits, vr); \
+ } while (0)
+
+uint32_t HELPER(neon_uqadd_s8)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int16_t va, vb, vr;
+ uint32_t r = 0;
+
+ USATACC(8, 0);
+ USATACC(8, 8);
+ USATACC(8, 16);
+ USATACC(8, 24);
+ return r;
+}
+
+uint32_t HELPER(neon_uqadd_s16)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int32_t va, vb, vr;
+ uint64_t r = 0;
+
+ USATACC(16, 0);
+ USATACC(16, 16);
+ return r;
+}
+
+#undef USATACC
+
+uint32_t HELPER(neon_uqadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int64_t va = (int32_t)a;
+ int64_t vb = (uint32_t)b;
+ int64_t vr = va + vb;
+ if (vr > UINT32_MAX) {
+ SET_QC();
+ vr = UINT32_MAX;
+ } else if (vr < 0) {
+ SET_QC();
+ vr = 0;
+ }
+ return vr;
+}
+
+uint64_t HELPER(neon_uqadd_s64)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ uint64_t res;
+ res = a + b;
+ /* We only need to look at the pattern of SIGN bits to detect
+ * +ve/-ve saturation
+ */
+ if (~a & b & ~res & SIGNBIT64) {
+ SET_QC();
+ res = UINT64_MAX;
+ } else if (a & ~b & res & SIGNBIT64) {
+ SET_QC();
+ res = 0;
+ }
+ return res;
+}
+
+/* Signed saturating accumulate of unsigned value
+ *
+ * Op1/Rn is treated as unsigned
+ * Op2/Rd is treated as signed
+ *
+ * The result is treated as a signed value and saturated as such
+ *
+ * We use a macro for the 8/16 bit cases which expects signed integers of va,
+ * vb, and vr for interim calculation and an unsigned 32 bit result value r.
+ */
+
+#define SSATACC(bits, shift) \
+ do { \
+ va = extract32(a, shift, bits); \
+ vb = sextract32(b, shift, bits); \
+ vr = va + vb; \
+ if (vr > INT##bits##_MAX) { \
+ SET_QC(); \
+ vr = INT##bits##_MAX; \
+ } else if (vr < INT##bits##_MIN) { \
+ SET_QC(); \
+ vr = INT##bits##_MIN; \
+ } \
+ r = deposit32(r, shift, bits, vr); \
+ } while (0)
+
+uint32_t HELPER(neon_sqadd_u8)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int16_t va, vb, vr;
+ uint32_t r = 0;
+
+ SSATACC(8, 0);
+ SSATACC(8, 8);
+ SSATACC(8, 16);
+ SSATACC(8, 24);
+ return r;
+}
+
+uint32_t HELPER(neon_sqadd_u16)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int32_t va, vb, vr;
+ uint32_t r = 0;
+
+ SSATACC(16, 0);
+ SSATACC(16, 16);
+
+ return r;
+}
+
+#undef SSATACC
+
+uint32_t HELPER(neon_sqadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int64_t res;
+ int64_t op1 = (uint32_t)a;
+ int64_t op2 = (int32_t)b;
+ res = op1 + op2;
+ if (res > INT32_MAX) {
+ SET_QC();
+ res = INT32_MAX;
+ } else if (res < INT32_MIN) {
+ SET_QC();
+ res = INT32_MIN;
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_sqadd_u64)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ uint64_t res;
+ res = a + b;
+ /* We only need to look at the pattern of SIGN bits to detect an overflow */
+ if (((a & res)
+ | (~b & res)
+ | (a & ~b)) & SIGNBIT64) {
+ SET_QC();
+ res = INT64_MAX;
+ }
+ return res;
+}
+
+
#define NEON_USAT(dest, src1, src2, type) do { \
uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
if (tmp != (type)tmp) { \
@@ -1776,6 +1941,28 @@ uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x)
return x;
}
+uint64_t HELPER(neon_qabs_s64)(CPUARMState *env, uint64_t x)
+{
+ if (x == SIGNBIT64) {
+ SET_QC();
+ x = ~SIGNBIT64;
+ } else if ((int64_t)x < 0) {
+ x = -x;
+ }
+ return x;
+}
+
+uint64_t HELPER(neon_qneg_s64)(CPUARMState *env, uint64_t x)
+{
+ if (x == SIGNBIT64) {
+ SET_QC();
+ x = ~SIGNBIT64;
+ } else {
+ x = -x;
+ }
+ return x;
+}
+
/* NEON Float helpers. */
uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp)
{
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 5851e041a0..57e7d9c480 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -18,14 +18,18 @@
*/
#include "cpu.h"
#include "helper.h"
+#include "internals.h"
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
static void raise_exception(CPUARMState *env, int tt)
{
- env->exception_index = tt;
- cpu_loop_exit(env);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = tt;
+ cpu_loop_exit(cs);
}
uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
@@ -69,20 +73,24 @@ uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
#include "exec/softmmu_template.h"
/* try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-void tlb_fill(CPUARMState *env, target_ulong addr, int is_write, int mmu_idx,
+ * NULL, it means that the function was called in C code (i.e. not
+ * from generated code or from helper.c)
+ */
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
int ret;
- ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) {
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
if (retaddr) {
/* now we have a real cpu fault */
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- raise_exception(env, env->exception_index);
+ raise_exception(env, cs->exception_index);
}
}
#endif
@@ -220,24 +228,47 @@ void HELPER(wfi)(CPUARMState *env)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
- env->exception_index = EXCP_HLT;
+ cs->exception_index = EXCP_HLT;
cs->halted = 1;
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
void HELPER(wfe)(CPUARMState *env)
{
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+
/* Don't actually halt the CPU, just yield back to top
* level loop
*/
- env->exception_index = EXCP_YIELD;
- cpu_loop_exit(env);
+ cs->exception_index = EXCP_YIELD;
+ cpu_loop_exit(cs);
+}
+
+/* Raise an internal-to-QEMU exception. This is limited to only
+ * those EXCP values which are special cases for QEMU to interrupt
+ * execution and not to be used for exceptions which are passed to
+ * the guest (those must all have syndrome information and thus should
+ * use exception_with_syndrome).
+ */
+void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
+{
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+
+ assert(excp_is_internal(excp));
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
}
-void HELPER(exception)(CPUARMState *env, uint32_t excp)
+/* Raise an exception with the specified syndrome register value */
+void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
+ uint32_t syndrome)
{
- env->exception_index = excp;
- cpu_loop_exit(env);
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+
+ assert(!excp_is_internal(excp));
+ cs->exception_index = excp;
+ env->exception.syndrome = syndrome;
+ cpu_loop_exit(cs);
}
uint32_t HELPER(cpsr_read)(CPUARMState *env)
@@ -282,17 +313,17 @@ void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
}
}
-void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip)
+void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
{
const ARMCPRegInfo *ri = rip;
switch (ri->accessfn(env, ri)) {
case CP_ACCESS_OK:
return;
case CP_ACCESS_TRAP:
+ env->exception.syndrome = syndrome;
+ break;
case CP_ACCESS_TRAP_UNCATEGORIZED:
- /* These cases will eventually need to generate different
- * syndrome information.
- */
+ env->exception.syndrome = syn_uncategorized();
break;
default:
g_assert_not_reached();
@@ -340,7 +371,7 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
switch (op) {
case 0x05: /* SPSel */
- env->pstate = deposit32(env->pstate, 0, 1, imm);
+ update_spsel(env, imm);
break;
case 0x1e: /* DAIFSet */
env->daif |= (imm << 6) & PSTATE_DAIF;
@@ -353,6 +384,66 @@ void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
}
}
+void HELPER(exception_return)(CPUARMState *env)
+{
+ uint32_t spsr = env->banked_spsr[0];
+ int new_el, i;
+
+ if (env->pstate & PSTATE_SP) {
+ env->sp_el[1] = env->xregs[31];
+ } else {
+ env->sp_el[0] = env->xregs[31];
+ }
+
+ env->exclusive_addr = -1;
+
+ if (spsr & PSTATE_nRW) {
+ env->aarch64 = 0;
+ new_el = 0;
+ env->uncached_cpsr = 0x10;
+ cpsr_write(env, spsr, ~0);
+ for (i = 0; i < 15; i++) {
+ env->regs[i] = env->xregs[i];
+ }
+
+ env->regs[15] = env->elr_el1 & ~0x1;
+ } else {
+ new_el = extract32(spsr, 2, 2);
+ if (new_el > 1) {
+ /* Return to unimplemented EL */
+ goto illegal_return;
+ }
+ if (extract32(spsr, 1, 1)) {
+ /* Return with reserved M[1] bit set */
+ goto illegal_return;
+ }
+ if (new_el == 0 && (spsr & PSTATE_SP)) {
+ /* Return to EL1 with M[0] bit set */
+ goto illegal_return;
+ }
+ env->aarch64 = 1;
+ pstate_write(env, spsr);
+ env->xregs[31] = env->sp_el[new_el];
+ env->pc = env->elr_el1;
+ }
+
+ return;
+
+illegal_return:
+ /* Illegal return events of various kinds have architecturally
+ * mandated behaviour:
+ * restore NZCV and DAIF from SPSR_ELx
+ * set PSTATE.IL
+ * restore PC from ELR_ELx
+ * no change to exception level, execution state or stack pointer
+ */
+ env->pstate |= PSTATE_IL;
+ env->pc = env->elr_el1;
+ spsr &= PSTATE_NZCV | PSTATE_DAIF;
+ spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
+ pstate_write(env, spsr);
+}
+
/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
The only way to do that in TCG is a conditional branch, which clobbers
all our temporaries. For now implement these as helper functions. */
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index 37e05e81f7..d86b8ffa55 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -26,6 +26,7 @@
#include "tcg-op.h"
#include "qemu/log.h"
#include "translate.h"
+#include "internals.h"
#include "qemu/host-utils.h"
#include "exec/gen-icount.h"
@@ -73,14 +74,17 @@ typedef struct AArch64DecodeTable {
} AArch64DecodeTable;
/* Function prototype for gen_ functions for calling Neon helpers */
+typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
+typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
+typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
/* initialize TCG globals. */
void a64_translate_init(void)
@@ -172,18 +176,37 @@ void gen_a64_set_pc_im(uint64_t val)
tcg_gen_movi_i64(cpu_pc, val);
}
-static void gen_exception(int excp)
+static void gen_exception_internal(int excp)
{
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, excp);
- gen_helper_exception(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+
+ assert(excp_is_internal(excp));
+ gen_helper_exception_internal(cpu_env, tcg_excp);
+ tcg_temp_free_i32(tcg_excp);
+}
+
+static void gen_exception(int excp, uint32_t syndrome)
+{
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+ TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
+
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_excp);
}
-static void gen_exception_insn(DisasContext *s, int offset, int excp)
+static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
{
gen_a64_set_pc_im(s->pc - offset);
- gen_exception(excp);
+ gen_exception_internal(excp);
+ s->is_jmp = DISAS_EXC;
+}
+
+static void gen_exception_insn(DisasContext *s, int offset, int excp,
+ uint32_t syndrome)
+{
+ gen_a64_set_pc_im(s->pc - offset);
+ gen_exception(excp, syndrome);
s->is_jmp = DISAS_EXC;
}
@@ -215,7 +238,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
} else {
gen_a64_set_pc_im(dest);
if (s->singlestep_enabled) {
- gen_exception(EXCP_DEBUG);
+ gen_exception_internal(EXCP_DEBUG);
}
tcg_gen_exit_tb(0);
s->is_jmp = DISAS_JUMP;
@@ -224,7 +247,8 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
static void unallocated_encoding(DisasContext *s)
{
- gen_exception_insn(s, 4, EXCP_UDEF);
+ /* Unallocated and reserved encodings are uncategorized */
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
}
#define unsupported_encoding(s, insn) \
@@ -329,11 +353,30 @@ static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
return v;
}
+/* We should have at some point before trying to access an FP register
+ * done the necessary access check, so assert that
+ * (a) we did the check and
+ * (b) we didn't then just plough ahead anyway if it failed.
+ * Print the instruction pattern in the abort message so we can figure
+ * out what we need to fix if a user encounters this problem in the wild.
+ */
+static inline void assert_fp_access_checked(DisasContext *s)
+{
+#ifdef CONFIG_DEBUG_TCG
+ if (unlikely(!s->fp_access_checked || !s->cpacr_fpen)) {
+ fprintf(stderr, "target-arm: FP access check missing for "
+ "instruction 0x%08x\n", s->insn);
+ abort();
+ }
+#endif
+}
+
/* Return the offset into CPUARMState of an element of specified
* size, 'element' places in from the least significant end of
* the FP/vector register Qn.
*/
-static inline int vec_reg_offset(int regno, int element, TCGMemOp size)
+static inline int vec_reg_offset(DisasContext *s, int regno,
+ int element, TCGMemOp size)
{
int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
#ifdef HOST_WORDS_BIGENDIAN
@@ -348,6 +391,7 @@ static inline int vec_reg_offset(int regno, int element, TCGMemOp size)
#else
offs += element * (1 << size);
#endif
+ assert_fp_access_checked(s);
return offs;
}
@@ -356,18 +400,20 @@ static inline int vec_reg_offset(int regno, int element, TCGMemOp size)
* Dn, Sn, Hn or Bn).
* (Note that this is not the same mapping as for A32; see cpu.h)
*/
-static inline int fp_reg_offset(int regno, TCGMemOp size)
+static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
{
int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
#ifdef HOST_WORDS_BIGENDIAN
offs += (8 - (1 << size));
#endif
+ assert_fp_access_checked(s);
return offs;
}
/* Offset of the high half of the 128 bit vector Qn */
-static inline int fp_reg_hi_offset(int regno)
+static inline int fp_reg_hi_offset(DisasContext *s, int regno)
{
+ assert_fp_access_checked(s);
return offsetof(CPUARMState, vfp.regs[regno * 2 + 1]);
}
@@ -381,7 +427,7 @@ static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
{
TCGv_i64 v = tcg_temp_new_i64();
- tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(reg, MO_64));
+ tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
return v;
}
@@ -389,7 +435,7 @@ static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
{
TCGv_i32 v = tcg_temp_new_i32();
- tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(reg, MO_32));
+ tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
return v;
}
@@ -397,8 +443,8 @@ static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
{
TCGv_i64 tcg_zero = tcg_const_i64(0);
- tcg_gen_st_i64(v, cpu_env, fp_reg_offset(reg, MO_64));
- tcg_gen_st_i64(tcg_zero, cpu_env, fp_reg_hi_offset(reg));
+ tcg_gen_st_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
+ tcg_gen_st_i64(tcg_zero, cpu_env, fp_reg_hi_offset(s, reg));
tcg_temp_free_i64(tcg_zero);
}
@@ -669,14 +715,14 @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
{
/* This writes the bottom N bits of a 128 bit wide vector to memory */
TCGv_i64 tmp = tcg_temp_new_i64();
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(srcidx, MO_64));
+ tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
if (size < 4) {
tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TE + size);
} else {
TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TEQ);
tcg_gen_qemu_st64(tmp, tcg_addr, get_mem_index(s));
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(srcidx));
+ tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
tcg_gen_qemu_st_i64(tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ);
tcg_temp_free_i64(tcg_hiaddr);
@@ -709,8 +755,8 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
tcg_temp_free_i64(tcg_hiaddr);
}
- tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(destidx, MO_64));
- tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(destidx));
+ tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
+ tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
tcg_temp_free_i64(tmplo);
tcg_temp_free_i64(tmphi);
@@ -732,7 +778,7 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
int element, TCGMemOp memop)
{
- int vect_off = vec_reg_offset(srcidx, element, memop & MO_SIZE);
+ int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
switch (memop) {
case MO_8:
tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
@@ -764,7 +810,7 @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
int element, TCGMemOp memop)
{
- int vect_off = vec_reg_offset(srcidx, element, memop & MO_SIZE);
+ int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
switch (memop) {
case MO_8:
tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
@@ -791,7 +837,7 @@ static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
int element, TCGMemOp memop)
{
- int vect_off = vec_reg_offset(destidx, element, memop & MO_SIZE);
+ int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
switch (memop) {
case MO_8:
tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
@@ -813,7 +859,7 @@ static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
int destidx, int element, TCGMemOp memop)
{
- int vect_off = vec_reg_offset(destidx, element, memop & MO_SIZE);
+ int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
switch (memop) {
case MO_8:
tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
@@ -866,6 +912,26 @@ static void do_vec_ld(DisasContext *s, int destidx, int element,
tcg_temp_free_i64(tcg_tmp);
}
+/* Check that FP/Neon access is enabled. If it is, return
+ * true. If not, emit code to generate an appropriate exception,
+ * and return false; the caller should not emit any code for
+ * the instruction. Note that this check must happen after all
+ * unallocated-encoding checks (otherwise the syndrome information
+ * for the resulting exception will be incorrect).
+ */
+static inline bool fp_access_check(DisasContext *s)
+{
+ assert(!s->fp_access_checked);
+ s->fp_access_checked = true;
+
+ if (s->cpacr_fpen) {
+ return true;
+ }
+
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false));
+ return false;
+}
+
/*
* This utility function is for doing register extension with an
* optional shift. You will likely want to pass a temporary for the
@@ -1238,10 +1304,16 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
* runtime; this may result in an exception.
*/
TCGv_ptr tmpptr;
+ TCGv_i32 tcg_syn;
+ uint32_t syndrome;
+
gen_a64_set_pc_im(s->pc - 4);
tmpptr = tcg_const_ptr(ri);
- gen_helper_access_check_cp_reg(cpu_env, tmpptr);
+ syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
+ tcg_syn = tcg_const_i32(syndrome);
+ gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
tcg_temp_free_ptr(tmpptr);
+ tcg_temp_free_i32(tcg_syn);
}
/* Handle special cases first */
@@ -1263,6 +1335,11 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
tcg_rt = cpu_reg(s, rt);
tcg_gen_movi_i64(tcg_rt, s->current_pl << 2);
return;
+ case ARM_CP_DC_ZVA:
+ /* Writes clear the aligned block of memory which rt points into. */
+ tcg_rt = cpu_reg(s, rt);
+ gen_helper_dc_zva(cpu_env, tcg_rt);
+ return;
default:
break;
}
@@ -1363,6 +1440,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
{
int opc = extract32(insn, 21, 3);
int op2_ll = extract32(insn, 0, 5);
+ int imm16 = extract32(insn, 5, 16);
switch (opc) {
case 0:
@@ -1373,7 +1451,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
break;
}
- gen_exception_insn(s, 0, EXCP_SWI);
+ gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
break;
case 1:
if (op2_ll != 0) {
@@ -1381,7 +1459,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
break;
}
/* BRK */
- gen_exception_insn(s, 0, EXCP_BKPT);
+ gen_exception_insn(s, 0, EXCP_BKPT, syn_aa64_bkpt(imm16));
break;
case 2:
if (op2_ll != 0) {
@@ -1434,6 +1512,9 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
break;
case 4: /* ERET */
+ gen_helper_exception_return(cpu_env);
+ s->is_jmp = DISAS_JUMP;
+ return;
case 5: /* DRPS */
if (rn != 0x1f) {
unallocated_encoding(s);
@@ -1530,7 +1611,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
tcg_gen_mov_i64(cpu_exclusive_test, addr);
tcg_gen_movi_i32(cpu_exclusive_info,
size | is_pair << 2 | (rd << 4) | (rt << 9) | (rt2 << 14));
- gen_exception_insn(s, 4, EXCP_STREX);
+ gen_exception_internal_insn(s, 4, EXCP_STREX);
}
#else
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
@@ -1697,6 +1778,9 @@ static void disas_ld_lit(DisasContext *s, uint32_t insn)
return;
}
size = 2 + opc;
+ if (!fp_access_check(s)) {
+ return;
+ }
} else {
if (opc == 3) {
/* PRFM (literal) : prefetch */
@@ -1806,6 +1890,10 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
break;
}
+ if (is_vector && !fp_access_check(s)) {
+ return;
+ }
+
offset <<= size;
if (rn == 31) {
@@ -1899,6 +1987,9 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
return;
}
is_store = ((opc & 1) == 0);
+ if (!fp_access_check(s)) {
+ return;
+ }
} else {
if (size == 3 && opc == 2) {
/* PRFM - prefetch */
@@ -2019,6 +2110,9 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
return;
}
is_store = !extract32(opc, 0, 1);
+ if (!fp_access_check(s)) {
+ return;
+ }
} else {
if (size == 3 && opc == 2) {
/* PRFM - prefetch */
@@ -2099,6 +2193,9 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
return;
}
is_store = !extract32(opc, 0, 1);
+ if (!fp_access_check(s)) {
+ return;
+ }
} else {
if (size == 3 && opc == 2) {
/* PRFM - prefetch */
@@ -2241,6 +2338,10 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (rn == 31) {
gen_check_sp_alignment(s);
}
@@ -2367,6 +2468,10 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
g_assert_not_reached();
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
ebytes = 1 << scale;
if (rn == 31) {
@@ -3096,12 +3201,11 @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
/* non-flag setting ops may use SP */
if (!setflags) {
- tcg_rn = read_cpu_reg_sp(s, rn, sf);
tcg_rd = cpu_reg_sp(s, rd);
} else {
- tcg_rn = read_cpu_reg(s, rn, sf);
tcg_rd = cpu_reg(s, rd);
}
+ tcg_rn = read_cpu_reg_sp(s, rn, sf);
tcg_rm = read_cpu_reg(s, rm, sf);
ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
@@ -3844,6 +3948,10 @@ static void disas_fp_compare(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2);
}
@@ -3872,6 +3980,10 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (cond < 0x0e) { /* not always */
int label_match = gen_new_label();
label_continue = gen_new_label();
@@ -3928,6 +4040,10 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (cond < 0x0e) { /* not always */
int label_match = gen_new_label();
label_continue = gen_new_label();
@@ -4145,6 +4261,10 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
break;
}
@@ -4154,9 +4274,17 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn)
/* 32-to-32 and 64-to-64 ops */
switch (type) {
case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_fp_1src_single(s, opcode, rd, rn);
break;
case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_fp_1src_double(s, opcode, rd, rn);
break;
default:
@@ -4296,9 +4424,15 @@ static void disas_fp_2src(DisasContext *s, uint32_t insn)
switch (type) {
case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fp_2src_single(s, opcode, rd, rn, rm);
break;
case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fp_2src_double(s, opcode, rd, rn, rm);
break;
default:
@@ -4400,9 +4534,15 @@ static void disas_fp_3src(DisasContext *s, uint32_t insn)
switch (type) {
case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
break;
case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
break;
default:
@@ -4429,6 +4569,10 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
/* The imm8 encodes the sign bit, enough bits to represent
* an exponent in the range 01....1xx to 10....0xx,
* and the most significant 4 bits of the mantissa; see
@@ -4615,6 +4759,10 @@ static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
}
@@ -4633,9 +4781,9 @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
/* 32 bit */
TCGv_i64 tmp = tcg_temp_new_i64();
tcg_gen_ext32u_i64(tmp, tcg_rn);
- tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(rd, MO_64));
+ tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(s, rd, MO_64));
tcg_gen_movi_i64(tmp, 0);
- tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(rd));
+ tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
tcg_temp_free_i64(tmp);
break;
}
@@ -4643,14 +4791,14 @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
{
/* 64 bit */
TCGv_i64 tmp = tcg_const_i64(0);
- tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(rd, MO_64));
- tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(rd));
+ tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(s, rd, MO_64));
+ tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
tcg_temp_free_i64(tmp);
break;
}
case 2:
/* 64 bit to top half. */
- tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(rd));
+ tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
break;
}
} else {
@@ -4659,15 +4807,15 @@ static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
switch (type) {
case 0:
/* 32 bit */
- tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(rn, MO_32));
+ tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
break;
case 1:
/* 64 bit */
- tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(rn, MO_64));
+ tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
break;
case 2:
/* 64 bits from top half */
- tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(rn));
+ tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
break;
}
}
@@ -4714,6 +4862,9 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
break;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fmov(s, rd, rn, type, itof);
} else {
/* actual FP conversions */
@@ -4724,6 +4875,9 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
}
}
@@ -4824,6 +4978,10 @@ static void disas_simd_ext(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
tcg_resh = tcg_temp_new_i64();
tcg_resl = tcg_temp_new_i64();
@@ -4894,6 +5052,10 @@ static void disas_simd_tb(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
/* This does a table lookup: for every byte element in the input
* we index into a table formed from up to four vector registers,
* and then the output is the result of the lookups. Our helper
@@ -4964,6 +5126,10 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
tcg_resl = tcg_const_i64(0);
tcg_resh = tcg_const_i64(0);
tcg_res = tcg_temp_new_i64();
@@ -5097,6 +5263,10 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
esize = 8 << size;
elements = (is_q ? 128 : 64) / esize;
@@ -5229,6 +5399,10 @@ static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
index = imm5 >> (size + 1);
tmp = tcg_temp_new_i64();
@@ -5263,6 +5437,10 @@ static void handle_simd_dupes(DisasContext *s, int rd, int rn,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
index = imm5 >> (size + 1);
/* This instruction just extracts the specified element and
@@ -5295,6 +5473,11 @@ static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
unallocated_encoding(s);
return;
}
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
for (i = 0; i < elements; i++) {
write_vec_element(s, cpu_reg(s, rn), rd, i, size);
}
@@ -5324,6 +5507,11 @@ static void handle_simd_inse(DisasContext *s, int rd, int rn,
unallocated_encoding(s);
return;
}
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
dst_index = extract32(imm5, 1+size, 5);
src_index = extract32(imm4, size, 4);
@@ -5356,6 +5544,10 @@ static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
idx = extract32(imm5, 1 + size, 4 - size);
write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
}
@@ -5393,6 +5585,11 @@ static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
return;
}
}
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
element = extract32(imm5, 1+size, 4);
tcg_rd = cpu_reg(s, rd);
@@ -5485,6 +5682,10 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
/* See AdvSIMDExpandImm() in ARM ARM */
switch (cmode_3_1) {
case 0: /* Replicate(Zeros(24):imm8, 2) */
@@ -5559,7 +5760,7 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
tcg_rd = new_tmp_a64(s);
for (i = 0; i < 2; i++) {
- int foffs = i ? fp_reg_hi_offset(rd) : fp_reg_offset(rd, MO_64);
+ int foffs = i ? fp_reg_hi_offset(s, rd) : fp_reg_offset(s, rd, MO_64);
if (i == 1 && !is_q) {
/* non-quad ops clear high half of vector */
@@ -5633,6 +5834,10 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
TCGV_UNUSED_PTR(fpst);
break;
case 0xc: /* FMAXNMP */
@@ -5645,6 +5850,10 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
size = extract32(size, 0, 1) ? 3 : 2;
fpst = get_fpstatus_ptr();
break;
@@ -5828,6 +6037,21 @@ static void handle_shli_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
}
}
+/* SRI: shift right with insert */
+static void handle_shri_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
+ int size, int shift)
+{
+ int esize = 8 << size;
+
+ /* shift count same as element size is valid but does nothing;
+ * special case to avoid potential shift by 64.
+ */
+ if (shift != esize) {
+ tcg_gen_shri_i64(tcg_src, tcg_src, shift);
+ tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, 0, esize - shift);
+ }
+}
+
/* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
static void handle_scalar_simd_shri(DisasContext *s,
bool is_u, int immh, int immb,
@@ -5838,6 +6062,7 @@ static void handle_scalar_simd_shri(DisasContext *s,
int shift = 2 * (8 << size) - immhb;
bool accumulate = false;
bool round = false;
+ bool insert = false;
TCGv_i64 tcg_rn;
TCGv_i64 tcg_rd;
TCGv_i64 tcg_round;
@@ -5847,6 +6072,10 @@ static void handle_scalar_simd_shri(DisasContext *s,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
switch (opcode) {
case 0x02: /* SSRA / USRA (accumulate) */
accumulate = true;
@@ -5857,6 +6086,9 @@ static void handle_scalar_simd_shri(DisasContext *s,
case 0x06: /* SRSRA / URSRA (accum + rounding) */
accumulate = round = true;
break;
+ case 0x08: /* SRI */
+ insert = true;
+ break;
}
if (round) {
@@ -5867,10 +6099,14 @@ static void handle_scalar_simd_shri(DisasContext *s,
}
tcg_rn = read_fp_dreg(s, rn);
- tcg_rd = accumulate ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
+ tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
- handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
- accumulate, is_u, size, shift);
+ if (insert) {
+ handle_shri_with_ins(tcg_rd, tcg_rn, size, shift);
+ } else {
+ handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+ accumulate, is_u, size, shift);
+ }
write_fp_dreg(s, rd, tcg_rd);
@@ -5897,6 +6133,10 @@ static void handle_scalar_simd_shli(DisasContext *s, bool insert,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
tcg_rn = read_fp_dreg(s, rn);
tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
@@ -5908,6 +6148,391 @@ static void handle_scalar_simd_shli(DisasContext *s, bool insert,
tcg_temp_free_i64(tcg_rd);
}
+/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
+ * (signed/unsigned) narrowing */
+static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
+ bool is_u_shift, bool is_u_narrow,
+ int immh, int immb, int opcode,
+ int rn, int rd)
+{
+ int immhb = immh << 3 | immb;
+ int size = 32 - clz32(immh) - 1;
+ int esize = 8 << size;
+ int shift = (2 * esize) - immhb;
+ int elements = is_scalar ? 1 : (64 / esize);
+ bool round = extract32(opcode, 0, 1);
+ TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
+ TCGv_i64 tcg_rn, tcg_rd, tcg_round;
+ TCGv_i32 tcg_rd_narrowed;
+ TCGv_i64 tcg_final;
+
+ static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
+ { gen_helper_neon_narrow_sat_s8,
+ gen_helper_neon_unarrow_sat8 },
+ { gen_helper_neon_narrow_sat_s16,
+ gen_helper_neon_unarrow_sat16 },
+ { gen_helper_neon_narrow_sat_s32,
+ gen_helper_neon_unarrow_sat32 },
+ { NULL, NULL },
+ };
+ static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
+ gen_helper_neon_narrow_sat_u8,
+ gen_helper_neon_narrow_sat_u16,
+ gen_helper_neon_narrow_sat_u32,
+ NULL
+ };
+ NeonGenNarrowEnvFn *narrowfn;
+
+ int i;
+
+ assert(size < 4);
+
+ if (extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (is_u_shift) {
+ narrowfn = unsigned_narrow_fns[size];
+ } else {
+ narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
+ }
+
+ tcg_rn = tcg_temp_new_i64();
+ tcg_rd = tcg_temp_new_i64();
+ tcg_rd_narrowed = tcg_temp_new_i32();
+ tcg_final = tcg_const_i64(0);
+
+ if (round) {
+ uint64_t round_const = 1ULL << (shift - 1);
+ tcg_round = tcg_const_i64(round_const);
+ } else {
+ TCGV_UNUSED_I64(tcg_round);
+ }
+
+ for (i = 0; i < elements; i++) {
+ read_vec_element(s, tcg_rn, rn, i, ldop);
+ handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+ false, is_u_shift, size+1, shift);
+ narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
+ tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ }
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ write_vec_element(s, tcg_final, rd, 0, MO_64);
+ } else {
+ write_vec_element(s, tcg_final, rd, 1, MO_64);
+ }
+
+ if (round) {
+ tcg_temp_free_i64(tcg_round);
+ }
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i32(tcg_rd_narrowed);
+ tcg_temp_free_i64(tcg_final);
+ return;
+}
+
+/* SQSHLU, UQSHL, SQSHL: saturating left shifts */
+static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
+ bool src_unsigned, bool dst_unsigned,
+ int immh, int immb, int rn, int rd)
+{
+ int immhb = immh << 3 | immb;
+ int size = 32 - clz32(immh) - 1;
+ int shift = immhb - (8 << size);
+ int pass;
+
+ assert(immh != 0);
+ assert(!(scalar && is_q));
+
+ if (!scalar) {
+ if (!is_q && extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Since we use the variable-shift helpers we must
+ * replicate the shift count into each element of
+ * the tcg_shift value.
+ */
+ switch (size) {
+ case 0:
+ shift |= shift << 8;
+ /* fall through */
+ case 1:
+ shift |= shift << 16;
+ break;
+ case 2:
+ case 3:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (size == 3) {
+ TCGv_i64 tcg_shift = tcg_const_i64(shift);
+ static NeonGenTwo64OpEnvFn * const fns[2][2] = {
+ { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
+ { NULL, gen_helper_neon_qshl_u64 },
+ };
+ NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
+ int maxpass = is_q ? 2 : 1;
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
+ write_vec_element(s, tcg_op, rd, pass, MO_64);
+
+ tcg_temp_free_i64(tcg_op);
+ }
+ tcg_temp_free_i64(tcg_shift);
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+ } else {
+ TCGv_i32 tcg_shift = tcg_const_i32(shift);
+ static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
+ {
+ { gen_helper_neon_qshl_s8,
+ gen_helper_neon_qshl_s16,
+ gen_helper_neon_qshl_s32 },
+ { gen_helper_neon_qshlu_s8,
+ gen_helper_neon_qshlu_s16,
+ gen_helper_neon_qshlu_s32 }
+ }, {
+ { NULL, NULL, NULL },
+ { gen_helper_neon_qshl_u8,
+ gen_helper_neon_qshl_u16,
+ gen_helper_neon_qshl_u32 }
+ }
+ };
+ NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
+ TCGMemOp memop = scalar ? size : MO_32;
+ int maxpass = scalar ? 1 : is_q ? 4 : 2;
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, pass, memop);
+ genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
+ if (scalar) {
+ switch (size) {
+ case 0:
+ tcg_gen_ext8u_i32(tcg_op, tcg_op);
+ break;
+ case 1:
+ tcg_gen_ext16u_i32(tcg_op, tcg_op);
+ break;
+ case 2:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ write_fp_sreg(s, rd, tcg_op);
+ } else {
+ write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
+ }
+
+ tcg_temp_free_i32(tcg_op);
+ }
+ tcg_temp_free_i32(tcg_shift);
+
+ if (!is_q && !scalar) {
+ clear_vec_high(s, rd);
+ }
+ }
+}
+
+/* Common vector code for handling integer to FP conversion */
+static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
+ int elements, int is_signed,
+ int fracbits, int size)
+{
+ bool is_double = size == 3 ? true : false;
+ TCGv_ptr tcg_fpst = get_fpstatus_ptr();
+ TCGv_i32 tcg_shift = tcg_const_i32(fracbits);
+ TCGv_i64 tcg_int = tcg_temp_new_i64();
+ TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
+ int pass;
+
+ for (pass = 0; pass < elements; pass++) {
+ read_vec_element(s, tcg_int, rn, pass, mop);
+
+ if (is_double) {
+ TCGv_i64 tcg_double = tcg_temp_new_i64();
+ if (is_signed) {
+ gen_helper_vfp_sqtod(tcg_double, tcg_int,
+ tcg_shift, tcg_fpst);
+ } else {
+ gen_helper_vfp_uqtod(tcg_double, tcg_int,
+ tcg_shift, tcg_fpst);
+ }
+ if (elements == 1) {
+ write_fp_dreg(s, rd, tcg_double);
+ } else {
+ write_vec_element(s, tcg_double, rd, pass, MO_64);
+ }
+ tcg_temp_free_i64(tcg_double);
+ } else {
+ TCGv_i32 tcg_single = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_sqtos(tcg_single, tcg_int,
+ tcg_shift, tcg_fpst);
+ } else {
+ gen_helper_vfp_uqtos(tcg_single, tcg_int,
+ tcg_shift, tcg_fpst);
+ }
+ if (elements == 1) {
+ write_fp_sreg(s, rd, tcg_single);
+ } else {
+ write_vec_element_i32(s, tcg_single, rd, pass, MO_32);
+ }
+ tcg_temp_free_i32(tcg_single);
+ }
+ }
+
+ if (!is_double && elements == 2) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i64(tcg_int);
+ tcg_temp_free_ptr(tcg_fpst);
+ tcg_temp_free_i32(tcg_shift);
+}
+
+/* UCVTF/SCVTF - Integer to FP conversion */
+static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
+ bool is_q, bool is_u,
+ int immh, int immb, int opcode,
+ int rn, int rd)
+{
+ bool is_double = extract32(immh, 3, 1);
+ int size = is_double ? MO_64 : MO_32;
+ int elements;
+ int immhb = immh << 3 | immb;
+ int fracbits = (is_double ? 128 : 64) - immhb;
+
+ if (!extract32(immh, 2, 2)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_scalar) {
+ elements = 1;
+ } else {
+ elements = is_double ? 2 : is_q ? 4 : 2;
+ if (is_double && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* immh == 0 would be a failure of the decode logic */
+ g_assert(immh);
+
+ handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
+}
+
+/* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
+static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
+ bool is_q, bool is_u,
+ int immh, int immb, int rn, int rd)
+{
+ bool is_double = extract32(immh, 3, 1);
+ int immhb = immh << 3 | immb;
+ int fracbits = (is_double ? 128 : 64) - immhb;
+ int pass;
+ TCGv_ptr tcg_fpstatus;
+ TCGv_i32 tcg_rmode, tcg_shift;
+
+ if (!extract32(immh, 2, 2)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!is_scalar && !is_q && is_double) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ assert(!(is_scalar && is_q));
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_fpstatus = get_fpstatus_ptr();
+ tcg_shift = tcg_const_i32(fracbits);
+
+ if (is_double) {
+ int maxpass = is_scalar ? 1 : is_q ? 2 : 1;
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ if (is_u) {
+ gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
+ }
+ write_vec_element(s, tcg_op, rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_op);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+ } else {
+ int maxpass = is_scalar ? 1 : is_q ? 4 : 2;
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
+ if (is_u) {
+ gen_helper_vfp_touls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_tosls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
+ }
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_op);
+ } else {
+ write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
+ }
+ tcg_temp_free_i32(tcg_op);
+ }
+ if (!is_q && !is_scalar) {
+ clear_vec_high(s, rd);
+ }
+ }
+
+ tcg_temp_free_ptr(tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+}
+
/* C3.6.9 AdvSIMD scalar shift by immediate
* 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
* +-----+---+-------------+------+------+--------+---+------+------+
@@ -5925,7 +6550,18 @@ static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
int immh = extract32(insn, 19, 4);
bool is_u = extract32(insn, 29, 1);
+ if (immh == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
switch (opcode) {
+ case 0x08: /* SRI */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
case 0x00: /* SSHR / USHR */
case 0x02: /* SSRA / USRA */
case 0x04: /* SRSHR / URSHR */
@@ -5935,8 +6571,39 @@ static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
case 0x0a: /* SHL / SLI */
handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
break;
+ case 0x1c: /* SCVTF, UCVTF */
+ handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
+ opcode, rn, rd);
+ break;
+ case 0x10: /* SQSHRUN, SQSHRUN2 */
+ case 0x11: /* SQRSHRUN, SQRSHRUN2 */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_vec_simd_sqshrn(s, true, false, false, true,
+ immh, immb, opcode, rn, rd);
+ break;
+ case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
+ case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
+ handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
+ immh, immb, opcode, rn, rd);
+ break;
+ case 0xc: /* SQSHLU */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
+ break;
+ case 0xe: /* SQSHL, UQSHL */
+ handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
+ break;
+ case 0x1f: /* FCVTZS, FCVTZU */
+ handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
+ break;
default:
- unsupported_encoding(s, insn);
+ unallocated_encoding(s);
break;
}
}
@@ -5975,6 +6642,10 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (size == 2) {
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
TCGv_i64 tcg_op2 = tcg_temp_new_i64();
@@ -6359,6 +7030,10 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
return;
}
@@ -6391,6 +7066,10 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
tcg_rd = tcg_temp_new_i64();
if (size == 3) {
@@ -6483,21 +7162,38 @@ static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
}
static void handle_2misc_64(DisasContext *s, int opcode, bool u,
- TCGv_i64 tcg_rd, TCGv_i64 tcg_rn)
+ TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
+ TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
{
/* Handle 64->64 opcodes which are shared between the scalar and
* vector 2-reg-misc groups. We cover every integer opcode where size == 3
* is valid in either group and also the double-precision fp ops.
+ * The caller only need provide tcg_rmode and tcg_fpstatus if the op
+ * requires them.
*/
TCGCond cond;
switch (opcode) {
+ case 0x4: /* CLS, CLZ */
+ if (u) {
+ gen_helper_clz64(tcg_rd, tcg_rn);
+ } else {
+ gen_helper_cls64(tcg_rd, tcg_rn);
+ }
+ break;
case 0x5: /* NOT */
/* This opcode is shared with CNT and RBIT but we have earlier
* enforced that size == 3 if and only if this is the NOT insn.
*/
tcg_gen_not_i64(tcg_rd, tcg_rn);
break;
+ case 0x7: /* SQABS, SQNEG */
+ if (u) {
+ gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
+ } else {
+ gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
+ }
+ break;
case 0xa: /* CMLT */
/* 64 bit integer comparison against zero, result is
* test ? (2^64 - 1) : 0. We implement via setcond(!test) and
@@ -6531,6 +7227,42 @@ static void handle_2misc_64(DisasContext *s, int opcode, bool u,
case 0x6f: /* FNEG */
gen_helper_vfp_negd(tcg_rd, tcg_rn);
break;
+ case 0x7f: /* FSQRT */
+ gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
+ break;
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x18: /* FRINTN */
+ case 0x19: /* FRINTM */
+ case 0x38: /* FRINTP */
+ case 0x39: /* FRINTZ */
+ case 0x58: /* FRINTA */
+ case 0x79: /* FRINTI */
+ gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
+ break;
+ case 0x59: /* FRINTX */
+ gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
+ break;
default:
g_assert_not_reached();
}
@@ -6541,7 +7273,13 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
int size, int rn, int rd)
{
bool is_double = (size == 3);
- TCGv_ptr fpst = get_fpstatus_ptr();
+ TCGv_ptr fpst;
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ fpst = get_fpstatus_ptr();
if (is_double) {
TCGv_i64 tcg_op = tcg_temp_new_i64();
@@ -6645,6 +7383,289 @@ static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
tcg_temp_free_ptr(fpst);
}
+static void handle_2misc_reciprocal(DisasContext *s, int opcode,
+ bool is_scalar, bool is_u, bool is_q,
+ int size, int rn, int rd)
+{
+ bool is_double = (size == 3);
+ TCGv_ptr fpst = get_fpstatus_ptr();
+
+ if (is_double) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+ int pass;
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ switch (opcode) {
+ case 0x3d: /* FRECPE */
+ gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
+ break;
+ case 0x3f: /* FRECPX */
+ gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
+ break;
+ case 0x7d: /* FRSQRTE */
+ gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+ }
+ if (is_scalar) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_op);
+ } else {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ int pass, maxpasses;
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ maxpasses = is_q ? 4 : 2;
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
+
+ switch (opcode) {
+ case 0x3c: /* URECPE */
+ gen_helper_recpe_u32(tcg_res, tcg_op, fpst);
+ break;
+ case 0x3d: /* FRECPE */
+ gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
+ break;
+ case 0x3f: /* FRECPX */
+ gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
+ break;
+ case 0x7d: /* FRSQRTE */
+ gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_res);
+ } else {
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ }
+ }
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op);
+ if (!is_q && !is_scalar) {
+ clear_vec_high(s, rd);
+ }
+ }
+ tcg_temp_free_ptr(fpst);
+}
+
+static void handle_2misc_narrow(DisasContext *s, bool scalar,
+ int opcode, bool u, bool is_q,
+ int size, int rn, int rd)
+{
+ /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
+ * in the source becomes a size element in the destination).
+ */
+ int pass;
+ TCGv_i32 tcg_res[2];
+ int destelt = is_q ? 2 : 0;
+ int passes = scalar ? 1 : 2;
+
+ if (scalar) {
+ tcg_res[1] = tcg_const_i32(0);
+ }
+
+ for (pass = 0; pass < passes; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ NeonGenNarrowFn *genfn = NULL;
+ NeonGenNarrowEnvFn *genenvfn = NULL;
+
+ if (scalar) {
+ read_vec_element(s, tcg_op, rn, pass, size + 1);
+ } else {
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ }
+ tcg_res[pass] = tcg_temp_new_i32();
+
+ switch (opcode) {
+ case 0x12: /* XTN, SQXTUN */
+ {
+ static NeonGenNarrowFn * const xtnfns[3] = {
+ gen_helper_neon_narrow_u8,
+ gen_helper_neon_narrow_u16,
+ tcg_gen_trunc_i64_i32,
+ };
+ static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
+ gen_helper_neon_unarrow_sat8,
+ gen_helper_neon_unarrow_sat16,
+ gen_helper_neon_unarrow_sat32,
+ };
+ if (u) {
+ genenvfn = sqxtunfns[size];
+ } else {
+ genfn = xtnfns[size];
+ }
+ break;
+ }
+ case 0x14: /* SQXTN, UQXTN */
+ {
+ static NeonGenNarrowEnvFn * const fns[3][2] = {
+ { gen_helper_neon_narrow_sat_s8,
+ gen_helper_neon_narrow_sat_u8 },
+ { gen_helper_neon_narrow_sat_s16,
+ gen_helper_neon_narrow_sat_u16 },
+ { gen_helper_neon_narrow_sat_s32,
+ gen_helper_neon_narrow_sat_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x16: /* FCVTN, FCVTN2 */
+ /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
+ if (size == 2) {
+ gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
+ } else {
+ TCGv_i32 tcg_lo = tcg_temp_new_i32();
+ TCGv_i32 tcg_hi = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tcg_lo, tcg_op);
+ gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, cpu_env);
+ tcg_gen_shri_i64(tcg_op, tcg_op, 32);
+ tcg_gen_trunc_i64_i32(tcg_hi, tcg_op);
+ gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, cpu_env);
+ tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
+ tcg_temp_free_i32(tcg_lo);
+ tcg_temp_free_i32(tcg_hi);
+ }
+ break;
+ case 0x56: /* FCVTXN, FCVTXN2 */
+ /* 64 bit to 32 bit float conversion
+ * with von Neumann rounding (round to odd)
+ */
+ assert(size == 2);
+ gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (genfn) {
+ genfn(tcg_res[pass], tcg_op);
+ } else if (genenvfn) {
+ genenvfn(tcg_res[pass], cpu_env, tcg_op);
+ }
+
+ tcg_temp_free_i64(tcg_op);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+}
+
+/* Remaining saturating accumulating ops */
+static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
+ bool is_q, int size, int rn, int rd)
+{
+ bool is_double = (size == 3);
+
+ if (is_double) {
+ TCGv_i64 tcg_rn = tcg_temp_new_i64();
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ int pass;
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ read_vec_element(s, tcg_rn, rn, pass, MO_64);
+ read_vec_element(s, tcg_rd, rd, pass, MO_64);
+
+ if (is_u) { /* USQADD */
+ gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ } else { /* SUQADD */
+ gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ }
+ write_vec_element(s, tcg_rd, rd, pass, MO_64);
+ }
+ if (is_scalar) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ } else {
+ TCGv_i32 tcg_rn = tcg_temp_new_i32();
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ int pass, maxpasses;
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ maxpasses = is_q ? 4 : 2;
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ if (is_scalar) {
+ read_vec_element_i32(s, tcg_rn, rn, pass, size);
+ read_vec_element_i32(s, tcg_rd, rd, pass, size);
+ } else {
+ read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
+ read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
+ }
+
+ if (is_u) { /* USQADD */
+ switch (size) {
+ case 0:
+ gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 1:
+ gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 2:
+ gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else { /* SUQADD */
+ switch (size) {
+ case 0:
+ gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 1:
+ gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 2:
+ gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if (is_scalar) {
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+ write_vec_element(s, tcg_zero, rd, 0, MO_64);
+ tcg_temp_free_i64(tcg_zero);
+ }
+ write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
+ }
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i32(tcg_rd);
+ tcg_temp_free_i32(tcg_rn);
+ }
+}
+
/* C3.6.12 AdvSIMD scalar two reg misc
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
* +-----+---+-----------+------+-----------+--------+-----+------+------+
@@ -6658,8 +7679,20 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
int opcode = extract32(insn, 12, 5);
int size = extract32(insn, 22, 2);
bool u = extract32(insn, 29, 1);
+ bool is_fcvt = false;
+ int rmode;
+ TCGv_i32 tcg_rmode;
+ TCGv_ptr tcg_fpstatus;
switch (opcode) {
+ case 0x3: /* USQADD / SUQADD*/
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_satacc(s, true, u, false, size, rn, rd);
+ return;
+ case 0x7: /* SQABS / SQNEG */
+ break;
case 0xa: /* CMLT */
if (u) {
unallocated_encoding(s);
@@ -6674,6 +7707,22 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
return;
}
break;
+ case 0x12: /* SQXTUN */
+ if (!u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x14: /* SQXTN, UQXTN */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
+ return;
case 0xc ... 0xf:
case 0x16 ... 0x1d:
case 0x1f:
@@ -6690,23 +7739,50 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
case 0x6d: /* FCMLE (zero) */
handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
return;
+ case 0x1d: /* SCVTF */
+ case 0x5d: /* UCVTF */
+ {
+ bool is_signed = (opcode == 0x1d);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
+ return;
+ }
+ case 0x3d: /* FRECPE */
+ case 0x3f: /* FRECPX */
+ case 0x7d: /* FRSQRTE */
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
+ return;
case 0x1a: /* FCVTNS */
case 0x1b: /* FCVTMS */
- case 0x1c: /* FCVTAS */
- case 0x1d: /* SCVTF */
case 0x3a: /* FCVTPS */
case 0x3b: /* FCVTZS */
- case 0x3d: /* FRECPE */
- case 0x3f: /* FRECPX */
- case 0x56: /* FCVTXN, FCVTXN2 */
case 0x5a: /* FCVTNU */
case 0x5b: /* FCVTMU */
- case 0x5c: /* FCVTAU */
- case 0x5d: /* UCVTF */
case 0x7a: /* FCVTPU */
case 0x7b: /* FCVTZU */
- case 0x7d: /* FRSQRTE */
- unsupported_encoding(s, insn);
+ is_fcvt = true;
+ rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
+ break;
+ case 0x1c: /* FCVTAS */
+ case 0x5c: /* FCVTAU */
+ /* TIEAWAY doesn't fit in the usual rounding mode encoding */
+ is_fcvt = true;
+ rmode = FPROUNDING_TIEAWAY;
+ break;
+ case 0x56: /* FCVTXN, FCVTXN2 */
+ if (size == 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
return;
default:
unallocated_encoding(s);
@@ -6714,26 +7790,85 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
}
break;
default:
- /* Other categories of encoding in this class:
- * + SUQADD/USQADD/SQABS/SQNEG : size 8, 16, 32 or 64
- * + SQXTN/SQXTN2/SQXTUN/SQXTUN2/UQXTN/UQXTN2:
- * narrowing saturate ops: size 64/32/16 -> 32/16/8
- */
- unsupported_encoding(s, insn);
+ unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (is_fcvt) {
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_fpstatus = get_fpstatus_ptr();
+ } else {
+ TCGV_UNUSED_I32(tcg_rmode);
+ TCGV_UNUSED_PTR(tcg_fpstatus);
+ }
+
if (size == 3) {
TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
TCGv_i64 tcg_rd = tcg_temp_new_i64();
- handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn);
+ handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
write_fp_dreg(s, rd, tcg_rd);
tcg_temp_free_i64(tcg_rd);
tcg_temp_free_i64(tcg_rn);
} else {
- /* the 'size might not be 64' ops aren't implemented yet */
- g_assert_not_reached();
+ TCGv_i32 tcg_rn = tcg_temp_new_i32();
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_rn, rn, 0, size);
+
+ switch (opcode) {
+ case 0x7: /* SQABS, SQNEG */
+ {
+ NeonGenOneOpEnvFn *genfn;
+ static NeonGenOneOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
+ { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
+ { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
+ };
+ genfn = fns[size][u];
+ genfn(tcg_rd, cpu_env, tcg_rn);
+ break;
+ }
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ tcg_temp_free_i32(tcg_rn);
+ }
+
+ if (is_fcvt) {
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+ tcg_temp_free_ptr(tcg_fpstatus);
}
}
@@ -6746,6 +7881,7 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
int shift = 2 * (8 << size) - immhb;
bool accumulate = false;
bool round = false;
+ bool insert = false;
int dsize = is_q ? 128 : 64;
int esize = 8 << size;
int elements = dsize/esize;
@@ -6765,6 +7901,10 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
switch (opcode) {
case 0x02: /* SSRA / USRA (accumulate) */
accumulate = true;
@@ -6775,6 +7915,9 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
case 0x06: /* SRSRA / URSRA (accum + rounding) */
accumulate = round = true;
break;
+ case 0x08: /* SRI */
+ insert = true;
+ break;
}
if (round) {
@@ -6786,12 +7929,16 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
for (i = 0; i < elements; i++) {
read_vec_element(s, tcg_rn, rn, i, memop);
- if (accumulate) {
+ if (accumulate || insert) {
read_vec_element(s, tcg_rd, rd, i, memop);
}
- handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
- accumulate, is_u, size, shift);
+ if (insert) {
+ handle_shri_with_ins(tcg_rd, tcg_rn, size, shift);
+ } else {
+ handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+ accumulate, is_u, size, shift);
+ }
write_vec_element(s, tcg_rd, rd, i, size);
}
@@ -6829,6 +7976,10 @@ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
for (i = 0; i < elements; i++) {
read_vec_element(s, tcg_rn, rn, i, size);
if (insert) {
@@ -6864,6 +8015,10 @@ static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
/* For the LL variants the store is larger than the load,
* so if rd == rn we would overwrite parts of our input.
* So load everything right now and use shifts in the main loop.
@@ -6878,6 +8033,66 @@ static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
}
}
+/* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
+static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
+ int immh, int immb, int opcode, int rn, int rd)
+{
+ int immhb = immh << 3 | immb;
+ int size = 32 - clz32(immh) - 1;
+ int dsize = 64;
+ int esize = 8 << size;
+ int elements = dsize/esize;
+ int shift = (2 * esize) - immhb;
+ bool round = extract32(opcode, 0, 1);
+ TCGv_i64 tcg_rn, tcg_rd, tcg_final;
+ TCGv_i64 tcg_round;
+ int i;
+
+ if (extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_rn = tcg_temp_new_i64();
+ tcg_rd = tcg_temp_new_i64();
+ tcg_final = tcg_temp_new_i64();
+ read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
+
+ if (round) {
+ uint64_t round_const = 1ULL << (shift - 1);
+ tcg_round = tcg_const_i64(round_const);
+ } else {
+ TCGV_UNUSED_I64(tcg_round);
+ }
+
+ for (i = 0; i < elements; i++) {
+ read_vec_element(s, tcg_rn, rn, i, size+1);
+ handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+ false, true, size+1, shift);
+
+ tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ }
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ write_vec_element(s, tcg_final, rd, 0, MO_64);
+ } else {
+ write_vec_element(s, tcg_final, rd, 1, MO_64);
+ }
+
+ if (round) {
+ tcg_temp_free_i64(tcg_round);
+ }
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_final);
+ return;
+}
+
/* C3.6.14 AdvSIMD shift by immediate
* 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
@@ -6896,6 +8111,12 @@ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
bool is_q = extract32(insn, 30, 1);
switch (opcode) {
+ case 0x08: /* SRI */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
case 0x00: /* SSHR / USHR */
case 0x02: /* SSRA / USRA (accumulate) */
case 0x04: /* SRSHR / URSHR (rounding) */
@@ -6905,15 +8126,42 @@ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
case 0x0a: /* SHL / SLI */
handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
break;
+ case 0x10: /* SHRN */
+ case 0x11: /* RSHRN / SQRSHRUN */
+ if (is_u) {
+ handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
+ opcode, rn, rd);
+ } else {
+ handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
+ }
+ break;
+ case 0x12: /* SQSHRN / UQSHRN */
+ case 0x13: /* SQRSHRN / UQRSHRN */
+ handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
+ opcode, rn, rd);
+ break;
case 0x14: /* SSHLL / USHLL */
handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
break;
+ case 0x1c: /* SCVTF / UCVTF */
+ handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
+ opcode, rn, rd);
+ break;
+ case 0xc: /* SQSHLU */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
+ break;
+ case 0xe: /* SQSHL, UQSHL */
+ handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
+ break;
+ case 0x1f: /* FCVTZS/ FCVTZU */
+ handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
+ return;
default:
- /* We don't currently implement any of the Narrow or saturating shifts;
- * nor do we implement the fixed-point conversions in this
- * encoding group (SCVTF, FCVTZS, UCVTF, FCVTZU).
- */
- unsupported_encoding(s, insn);
+ unallocated_encoding(s);
return;
}
}
@@ -7124,6 +8372,10 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
tcg_passres, tcg_passres);
break;
+ case 14: /* PMULL */
+ assert(size == 0);
+ gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
+ break;
default:
g_assert_not_reached();
}
@@ -7243,6 +8495,30 @@ static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
}
}
+static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
+{
+ /* PMULL of 64 x 64 -> 128 is an odd special case because it
+ * is the only three-reg-diff instruction which produces a
+ * 128-bit wide result from a single operation. However since
+ * it's possible to calculate the two halves more or less
+ * separately we just use two helper calls.
+ */
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, is_q, MO_64);
+ read_vec_element(s, tcg_op2, rm, is_q, MO_64);
+ gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
+ write_vec_element(s, tcg_res, rd, 0, MO_64);
+ gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
+ write_vec_element(s, tcg_res, rd, 1, MO_64);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+}
+
/* C3.6.15 AdvSIMD three different
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
* +---+---+---+-----------+------+---+------+--------+-----+------+------+
@@ -7277,6 +8553,9 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
break;
case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
@@ -7286,6 +8565,9 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
break;
case 14: /* PMULL, PMULL2 */
@@ -7293,8 +8575,18 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
- unsupported_encoding(s, insn);
- break;
+ if (size == 3) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_pmull_64(s, is_q, rd, rn, rm);
+ return;
+ }
+ goto is_widening;
case 9: /* SQDMLAL, SQDMLAL2 */
case 11: /* SQDMLSL, SQDMLSL2 */
case 13: /* SQDMULL, SQDMULL2 */
@@ -7315,6 +8607,11 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ is_widening:
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
break;
default:
@@ -7333,11 +8630,15 @@ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
int size = extract32(insn, 22, 2);
bool is_u = extract32(insn, 29, 1);
bool is_q = extract32(insn, 30, 1);
- TCGv_i64 tcg_op1 = tcg_temp_new_i64();
- TCGv_i64 tcg_op2 = tcg_temp_new_i64();
- TCGv_i64 tcg_res[2];
+ TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
int pass;
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_op1 = tcg_temp_new_i64();
+ tcg_op2 = tcg_temp_new_i64();
tcg_res[0] = tcg_temp_new_i64();
tcg_res[1] = tcg_temp_new_i64();
@@ -7440,6 +8741,10 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
TCGV_UNUSED_PTR(fpst);
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
/* These operations work on the concatenated rm:rn, with each pair of
* adjacent elements being operated on to produce an element in the result.
*/
@@ -7632,6 +8937,10 @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
case 0x5f: /* FDIV */
case 0x7a: /* FABD */
case 0x7c: /* FCMGT */
+ if (!fp_access_check(s)) {
+ return;
+ }
+
handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
return;
default:
@@ -7686,6 +8995,10 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
break;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (size == 3) {
for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
@@ -7926,7 +9239,7 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
genfn = fns[size][is_sub];
read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
- genfn(tcg_res, tcg_res, tcg_op1);
+ genfn(tcg_res, tcg_op1, tcg_res);
}
write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
@@ -7991,76 +9304,48 @@ static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
}
}
-static void handle_2misc_narrow(DisasContext *s, int opcode, bool u, bool is_q,
- int size, int rn, int rd)
+static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
+ int size, int rn, int rd)
{
- /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
- * in the source becomes a size element in the destination).
+ /* Handle 2-reg-misc ops which are widening (so each size element
+ * in the source becomes a 2*size element in the destination.
+ * The only instruction like this is FCVTL.
*/
int pass;
- TCGv_i32 tcg_res[2];
- int destelt = is_q ? 2 : 0;
- for (pass = 0; pass < 2; pass++) {
- TCGv_i64 tcg_op = tcg_temp_new_i64();
- NeonGenNarrowFn *genfn = NULL;
- NeonGenNarrowEnvFn *genenvfn = NULL;
+ if (size == 3) {
+ /* 32 -> 64 bit fp conversion */
+ TCGv_i64 tcg_res[2];
+ int srcelt = is_q ? 2 : 0;
- read_vec_element(s, tcg_op, rn, pass, MO_64);
- tcg_res[pass] = tcg_temp_new_i32();
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ tcg_res[pass] = tcg_temp_new_i64();
- switch (opcode) {
- case 0x12: /* XTN, SQXTUN */
- {
- static NeonGenNarrowFn * const xtnfns[3] = {
- gen_helper_neon_narrow_u8,
- gen_helper_neon_narrow_u16,
- tcg_gen_trunc_i64_i32,
- };
- static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
- gen_helper_neon_unarrow_sat8,
- gen_helper_neon_unarrow_sat16,
- gen_helper_neon_unarrow_sat32,
- };
- if (u) {
- genenvfn = sqxtunfns[size];
- } else {
- genfn = xtnfns[size];
- }
- break;
- }
- case 0x14: /* SQXTN, UQXTN */
- {
- static NeonGenNarrowEnvFn * const fns[3][2] = {
- { gen_helper_neon_narrow_sat_s8,
- gen_helper_neon_narrow_sat_u8 },
- { gen_helper_neon_narrow_sat_s16,
- gen_helper_neon_narrow_sat_u16 },
- { gen_helper_neon_narrow_sat_s32,
- gen_helper_neon_narrow_sat_u32 },
- };
- genenvfn = fns[size][u];
- break;
- }
- default:
- g_assert_not_reached();
+ read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
+ gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
+ tcg_temp_free_i32(tcg_op);
}
-
- if (genfn) {
- genfn(tcg_res[pass], tcg_op);
- } else {
- genenvfn(tcg_res[pass], cpu_env, tcg_op);
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
}
+ } else {
+ /* 16 -> 32 bit fp conversion */
+ int srcelt = is_q ? 4 : 0;
+ TCGv_i32 tcg_res[4];
- tcg_temp_free_i64(tcg_op);
- }
+ for (pass = 0; pass < 4; pass++) {
+ tcg_res[pass] = tcg_temp_new_i32();
- for (pass = 0; pass < 2; pass++) {
- write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
- tcg_temp_free_i32(tcg_res[pass]);
- }
- if (!is_q) {
- clear_vec_high(s, rd);
+ read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
+ gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
+ cpu_env);
+ }
+ for (pass = 0; pass < 4; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
}
}
@@ -8078,6 +9363,10 @@ static void handle_rev(DisasContext *s, int opcode, bool u,
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (size == 0) {
/* Special case bytes, use bswap op on each group of elements */
int groups = dsize / (8 << grp_size);
@@ -8133,6 +9422,108 @@ static void handle_rev(DisasContext *s, int opcode, bool u,
}
}
+static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
+ bool is_q, int size, int rn, int rd)
+{
+ /* Implement the pairwise operations from 2-misc:
+ * SADDLP, UADDLP, SADALP, UADALP.
+ * These all add pairs of elements in the input to produce a
+ * double-width result element in the output (possibly accumulating).
+ */
+ bool accum = (opcode == 0x6);
+ int maxpass = is_q ? 2 : 1;
+ int pass;
+ TCGv_i64 tcg_res[2];
+
+ if (size == 2) {
+ /* 32 + 32 -> 64 op */
+ TCGMemOp memop = size + (u ? 0 : MO_SIGN);
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, pass * 2, memop);
+ read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
+ tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ if (accum) {
+ read_vec_element(s, tcg_op1, rd, pass, MO_64);
+ tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
+ }
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ }
+ } else {
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ NeonGenOneOpFn *genfn;
+ static NeonGenOneOpFn * const fns[2][2] = {
+ { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
+ { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
+ };
+
+ genfn = fns[size][u];
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ genfn(tcg_res[pass], tcg_op);
+
+ if (accum) {
+ read_vec_element(s, tcg_op, rd, pass, MO_64);
+ if (size == 0) {
+ gen_helper_neon_addl_u16(tcg_res[pass],
+ tcg_res[pass], tcg_op);
+ } else {
+ gen_helper_neon_addl_u32(tcg_res[pass],
+ tcg_res[pass], tcg_op);
+ }
+ }
+ tcg_temp_free_i64(tcg_op);
+ }
+ }
+ if (!is_q) {
+ tcg_res[1] = tcg_const_i64(0);
+ }
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+}
+
+static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
+{
+ /* Implement SHLL and SHLL2 */
+ int pass;
+ int part = is_q ? 2 : 0;
+ TCGv_i64 tcg_res[2];
+
+ for (pass = 0; pass < 2; pass++) {
+ static NeonGenWidenFn * const widenfns[3] = {
+ gen_helper_neon_widen_u8,
+ gen_helper_neon_widen_u16,
+ tcg_gen_extu_i32_i64,
+ };
+ NeonGenWidenFn *widenfn = widenfns[size];
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
+ tcg_res[pass] = tcg_temp_new_i64();
+ widenfn(tcg_res[pass], tcg_op);
+ tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
+
+ tcg_temp_free_i32(tcg_op);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+}
+
/* C3.6.17 AdvSIMD two reg misc
* 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
* +---+---+---+-----------+------+-----------+--------+-----+------+------+
@@ -8147,6 +9538,11 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
bool is_q = extract32(insn, 30, 1);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
+ bool need_fpstatus = false;
+ bool need_rmode = false;
+ int rmode = -1;
+ TCGv_i32 tcg_rmode;
+ TCGv_ptr tcg_fpstatus;
switch (opcode) {
case 0x0: /* REV64, REV32 */
@@ -8173,23 +9569,38 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
- handle_2misc_narrow(s, opcode, u, is_q, size, rn, rd);
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
return;
- case 0x2: /* SADDLP, UADDLP */
case 0x4: /* CLS, CLZ */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x2: /* SADDLP, UADDLP */
case 0x6: /* SADALP, UADALP */
if (size == 3) {
unallocated_encoding(s);
return;
}
- unsupported_encoding(s, insn);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
return;
case 0x13: /* SHLL, SHLL2 */
if (u == 0 || size == 3) {
unallocated_encoding(s);
return;
}
- unsupported_encoding(s, insn);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_shll(s, is_q, size, rn, rd);
return;
case 0xa: /* CMLT */
if (u == 1) {
@@ -8206,13 +9617,21 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
}
break;
case 0x3: /* SUQADD, USQADD */
- case 0x7: /* SQABS, SQNEG */
if (size == 3 && !is_q) {
unallocated_encoding(s);
return;
}
- unsupported_encoding(s, insn);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
return;
+ case 0x7: /* SQABS, SQNEG */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
case 0xc ... 0xf:
case 0x16 ... 0x1d:
case 0x1f:
@@ -8220,8 +9639,9 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
/* Floating point: U, size[1] and opcode indicate operation;
* size[0] indicates single or double precision.
*/
+ int is_double = extract32(size, 0, 1);
opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
- size = extract32(size, 0, 1) ? 3 : 2;
+ size = is_double ? 3 : 2;
switch (opcode) {
case 0x2f: /* FABS */
case 0x6f: /* FNEG */
@@ -8230,6 +9650,21 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
return;
}
break;
+ case 0x1d: /* SCVTF */
+ case 0x5d: /* UCVTF */
+ {
+ bool is_signed = (opcode == 0x1d) ? true : false;
+ int elements = is_double ? 2 : is_q ? 4 : 2;
+ if (is_double && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
+ return;
+ }
case 0x2c: /* FCMGT (zero) */
case 0x2d: /* FCMEQ (zero) */
case 0x2e: /* FCMLT (zero) */
@@ -8241,35 +9676,107 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
}
handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
return;
- case 0x16: /* FCVTN, FCVTN2 */
- case 0x17: /* FCVTL, FCVTL2 */
- case 0x18: /* FRINTN */
- case 0x19: /* FRINTM */
+ case 0x7f: /* FSQRT */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
case 0x1a: /* FCVTNS */
case 0x1b: /* FCVTMS */
- case 0x1c: /* FCVTAS */
- case 0x1d: /* SCVTF */
- case 0x38: /* FRINTP */
- case 0x39: /* FRINTZ */
case 0x3a: /* FCVTPS */
case 0x3b: /* FCVTZS */
- case 0x3c: /* URECPE */
- case 0x3d: /* FRECPE */
- case 0x56: /* FCVTXN, FCVTXN2 */
- case 0x58: /* FRINTA */
- case 0x59: /* FRINTX */
case 0x5a: /* FCVTNU */
case 0x5b: /* FCVTMU */
- case 0x5c: /* FCVTAU */
- case 0x5d: /* UCVTF */
- case 0x79: /* FRINTI */
case 0x7a: /* FCVTPU */
case 0x7b: /* FCVTZU */
- case 0x7c: /* URSQRTE */
+ need_fpstatus = true;
+ need_rmode = true;
+ rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x5c: /* FCVTAU */
+ case 0x1c: /* FCVTAS */
+ need_fpstatus = true;
+ need_rmode = true;
+ rmode = FPROUNDING_TIEAWAY;
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x3c: /* URECPE */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x3d: /* FRECPE */
case 0x7d: /* FRSQRTE */
- case 0x7f: /* FSQRT */
- unsupported_encoding(s, insn);
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
return;
+ case 0x56: /* FCVTXN, FCVTXN2 */
+ if (size == 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x16: /* FCVTN, FCVTN2 */
+ /* handle_2misc_narrow does a 2*size -> size operation, but these
+ * instructions encode the source size rather than dest size.
+ */
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
+ return;
+ case 0x17: /* FCVTL, FCVTL2 */
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_widening(s, opcode, is_q, size, rn, rd);
+ return;
+ case 0x18: /* FRINTN */
+ case 0x19: /* FRINTM */
+ case 0x38: /* FRINTP */
+ case 0x39: /* FRINTZ */
+ need_rmode = true;
+ rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
+ /* fall through */
+ case 0x59: /* FRINTX */
+ case 0x79: /* FRINTI */
+ need_fpstatus = true;
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x58: /* FRINTA */
+ need_rmode = true;
+ rmode = FPROUNDING_TIEAWAY;
+ need_fpstatus = true;
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x7c: /* URSQRTE */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ need_fpstatus = true;
+ break;
default:
unallocated_encoding(s);
return;
@@ -8281,6 +9788,22 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
return;
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (need_fpstatus) {
+ tcg_fpstatus = get_fpstatus_ptr();
+ } else {
+ TCGV_UNUSED_PTR(tcg_fpstatus);
+ }
+ if (need_rmode) {
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ } else {
+ TCGV_UNUSED_I32(tcg_rmode);
+ }
+
if (size == 3) {
/* All 64-bit element operations can be shared with scalar 2misc */
int pass;
@@ -8291,7 +9814,8 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
read_vec_element(s, tcg_op, rn, pass, MO_64);
- handle_2misc_64(s, opcode, u, tcg_res, tcg_op);
+ handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
+ tcg_rmode, tcg_fpstatus);
write_vec_element(s, tcg_res, rd, pass, MO_64);
@@ -8327,6 +9851,20 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
case 0x9: /* CMEQ, CMLE */
cond = u ? TCG_COND_LE : TCG_COND_EQ;
goto do_cmop;
+ case 0x4: /* CLS */
+ if (u) {
+ gen_helper_clz32(tcg_res, tcg_op);
+ } else {
+ gen_helper_cls32(tcg_res, tcg_op);
+ }
+ break;
+ case 0x7: /* SQABS, SQNEG */
+ if (u) {
+ gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
+ } else {
+ gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
+ }
+ break;
case 0xb: /* ABS, NEG */
if (u) {
tcg_gen_neg_i32(tcg_res, tcg_op);
@@ -8344,6 +9882,47 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
case 0x6f: /* FNEG */
gen_helper_vfp_negs(tcg_res, tcg_op);
break;
+ case 0x7f: /* FSQRT */
+ gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
+ break;
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_tosls(tcg_res, tcg_op,
+ tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_touls(tcg_res, tcg_op,
+ tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x18: /* FRINTN */
+ case 0x19: /* FRINTM */
+ case 0x38: /* FRINTP */
+ case 0x39: /* FRINTZ */
+ case 0x58: /* FRINTA */
+ case 0x79: /* FRINTI */
+ gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x59: /* FRINTX */
+ gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x7c: /* URSQRTE */
+ gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
+ break;
default:
g_assert_not_reached();
}
@@ -8360,6 +9939,17 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
gen_helper_neon_cnt_u8(tcg_res, tcg_op);
}
break;
+ case 0x7: /* SQABS, SQNEG */
+ {
+ NeonGenOneOpEnvFn *genfn;
+ static NeonGenOneOpEnvFn * const fns[2][2] = {
+ { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
+ { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
+ };
+ genfn = fns[size][u];
+ genfn(tcg_res, cpu_env, tcg_op);
+ break;
+ }
case 0x8: /* CMGT, CMGE */
case 0x9: /* CMEQ, CMLE */
case 0xa: /* CMLT */
@@ -8407,6 +9997,21 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
}
}
break;
+ case 0x4: /* CLS, CLZ */
+ if (u) {
+ if (size == 0) {
+ gen_helper_neon_clz_u8(tcg_res, tcg_op);
+ } else {
+ gen_helper_neon_clz_u16(tcg_res, tcg_op);
+ }
+ } else {
+ if (size == 0) {
+ gen_helper_neon_cls_s8(tcg_res, tcg_op);
+ } else {
+ gen_helper_neon_cls_s16(tcg_res, tcg_op);
+ }
+ }
+ break;
default:
g_assert_not_reached();
}
@@ -8421,6 +10026,14 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
if (!is_q) {
clear_vec_high(s, rd);
}
+
+ if (need_rmode) {
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+ }
+ if (need_fpstatus) {
+ tcg_temp_free_ptr(tcg_fpstatus);
+ }
}
/* C3.6.13 AdvSIMD scalar x indexed element
@@ -8542,6 +10155,10 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
}
}
+ if (!fp_access_check(s)) {
+ return;
+ }
+
if (is_fp) {
fpst = get_fpstatus_ptr();
} else {
@@ -8973,6 +10590,8 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
s->insn = insn;
s->pc += 4;
+ s->fp_access_checked = false;
+
switch (extract32(insn, 25, 4)) {
case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
unallocated_encoding(s);
@@ -9040,11 +10659,12 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
#if !defined(CONFIG_USER_ONLY)
dc->user = (ARM_TBFLAG_AA64_EL(tb->flags) == 0);
#endif
- dc->vfp_enabled = 0;
+ dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags);
dc->vec_len = 0;
dc->vec_stride = 0;
dc->cp_regs = cpu->cp_regs;
dc->current_pl = arm_current_pl(env);
+ dc->features = env->features;
init_tmp_a64_array(dc);
@@ -9061,10 +10681,10 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
tcg_clear_temp_count();
do {
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
- gen_exception_insn(dc, 0, EXCP_DEBUG);
+ gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
/* Advance PC so that clearing the breakpoint will
invalidate this TB. */
dc->pc += 2;
@@ -9127,7 +10747,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
if (dc->is_jmp != DISAS_JUMP) {
gen_a64_set_pc_im(dc->pc);
}
- gen_exception(EXCP_DEBUG);
+ gen_exception_internal(EXCP_DEBUG);
} else {
switch (dc->is_jmp) {
case DISAS_NEXT:
diff --git a/target-arm/translate.c b/target-arm/translate.c
index df259debcc..a4d920b629 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -25,6 +25,7 @@
#include <inttypes.h>
#include "cpu.h"
+#include "internals.h"
#include "disas/disas.h"
#include "tcg-op.h"
#include "qemu/log.h"
@@ -182,12 +183,23 @@ static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
/* Set NZCV flags from the high 4 bits of var. */
#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
-static void gen_exception(int excp)
+static void gen_exception_internal(int excp)
{
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, excp);
- gen_helper_exception(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+
+ assert(excp_is_internal(excp));
+ gen_helper_exception_internal(cpu_env, tcg_excp);
+ tcg_temp_free_i32(tcg_excp);
+}
+
+static void gen_exception(int excp, uint32_t syndrome)
+{
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+ TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
+
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_excp);
}
static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
@@ -899,6 +911,33 @@ static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
tcg_gen_movi_i32(cpu_R[15], val);
}
+static inline void
+gen_set_condexec (DisasContext *s)
+{
+ if (s->condexec_mask) {
+ uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, val);
+ store_cpu_field(tmp, condexec_bits);
+ }
+}
+
+static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
+{
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - offset);
+ gen_exception_internal(excp);
+ s->is_jmp = DISAS_JUMP;
+}
+
+static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
+{
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - offset);
+ gen_exception(excp, syn);
+ s->is_jmp = DISAS_JUMP;
+}
+
/* Force a TB lookup after an instruction that changes the CPU state. */
static inline void gen_lookup_tb(DisasContext *s)
{
@@ -2913,14 +2952,25 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
if (!arm_feature(env, ARM_FEATURE_VFP))
return 1;
+ /* FIXME: this access check should not take precedence over UNDEF
+ * for invalid encodings; we will generate incorrect syndrome information
+ * for attempts to execute invalid vfp/neon encodings with FP disabled.
+ */
+ if (!s->cpacr_fpen) {
+ gen_exception_insn(s, 4, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, s->thumb));
+ return 0;
+ }
+
if (!s->vfp_enabled) {
/* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
if ((insn & 0x0fe00fff) != 0x0ee00a10)
return 1;
rn = (insn >> 16) & 0xf;
- if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
- && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
+ if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
+ && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
return 1;
+ }
}
if (extract32(insn, 28, 4) == 0xf) {
@@ -3066,6 +3116,11 @@ static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
gen_helper_vfp_get_fpscr(tmp, cpu_env);
}
break;
+ case ARM_VFP_MVFR2:
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ return 1;
+ }
+ /* fall through */
case ARM_VFP_MVFR0:
case ARM_VFP_MVFR1:
if (IS_USER(s)
@@ -3912,25 +3967,6 @@ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
s->is_jmp = DISAS_UPDATE;
}
-static inline void
-gen_set_condexec (DisasContext *s)
-{
- if (s->condexec_mask) {
- uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, val);
- store_cpu_field(tmp, condexec_bits);
- }
-}
-
-static void gen_exception_insn(DisasContext *s, int offset, int excp)
-{
- gen_set_condexec(s);
- gen_set_pc_im(s, s->pc - offset);
- gen_exception(excp);
- s->is_jmp = DISAS_JUMP;
-}
-
static void gen_nop_hint(DisasContext *s, int val)
{
switch (val) {
@@ -4212,6 +4248,16 @@ static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
TCGv_i32 tmp2;
TCGv_i64 tmp64;
+ /* FIXME: this access check should not take precedence over UNDEF
+ * for invalid encodings; we will generate incorrect syndrome information
+ * for attempts to execute invalid vfp/neon encodings with FP disabled.
+ */
+ if (!s->cpacr_fpen) {
+ gen_exception_insn(s, 4, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, s->thumb));
+ return 0;
+ }
+
if (!s->vfp_enabled)
return 1;
VFP_DREG_D(rd, insn);
@@ -4934,6 +4980,16 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
TCGv_i64 tmp64;
+ /* FIXME: this access check should not take precedence over UNDEF
+ * for invalid encodings; we will generate incorrect syndrome information
+ * for attempts to execute invalid vfp/neon encodings with FP disabled.
+ */
+ if (!s->cpacr_fpen) {
+ gen_exception_insn(s, 4, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, s->thumb));
+ return 0;
+ }
+
if (!s->vfp_enabled)
return 1;
q = (insn & (1 << 6)) != 0;
@@ -6682,17 +6738,33 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
break;
}
case NEON_2RM_VRECPE:
- gen_helper_recpe_u32(tmp, tmp, cpu_env);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_recpe_u32(tmp, tmp, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VRSQRTE:
- gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VRECPE_F:
- gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VRSQRTE_F:
- gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
gen_vfp_sito(0, 1);
break;
@@ -6845,10 +6917,53 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
* runtime; this may result in an exception.
*/
TCGv_ptr tmpptr;
+ TCGv_i32 tcg_syn;
+ uint32_t syndrome;
+
+ /* Note that since we are an implementation which takes an
+ * exception on a trapped conditional instruction only if the
+ * instruction passes its condition code check, we can take
+ * advantage of the clause in the ARM ARM that allows us to set
+ * the COND field in the instruction to 0xE in all cases.
+ * We could fish the actual condition out of the insn (ARM)
+ * or the condexec bits (Thumb) but it isn't necessary.
+ */
+ switch (cpnum) {
+ case 14:
+ if (is64) {
+ syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
+ isread, s->thumb);
+ } else {
+ syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
+ rt, isread, s->thumb);
+ }
+ break;
+ case 15:
+ if (is64) {
+ syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
+ isread, s->thumb);
+ } else {
+ syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
+ rt, isread, s->thumb);
+ }
+ break;
+ default:
+ /* ARMv8 defines that only coprocessors 14 and 15 exist,
+ * so this can only happen if this is an ARMv7 or earlier CPU,
+ * in which case the syndrome information won't actually be
+ * guest visible.
+ */
+ assert(!arm_feature(env, ARM_FEATURE_V8));
+ syndrome = syn_uncategorized();
+ break;
+ }
+
gen_set_pc_im(s, s->pc);
tmpptr = tcg_const_ptr(ri);
- gen_helper_access_check_cp_reg(cpu_env, tmpptr);
+ tcg_syn = tcg_const_i32(syndrome);
+ gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
tcg_temp_free_ptr(tmpptr);
+ tcg_temp_free_i32(tcg_syn);
}
/* Handle special cases first */
@@ -7100,7 +7215,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
tcg_gen_movi_i32(cpu_exclusive_info,
size | (rd << 4) | (rt << 8) | (rt2 << 12));
- gen_exception_insn(s, 4, EXCP_STREX);
+ gen_exception_internal_insn(s, 4, EXCP_STREX);
}
#else
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
@@ -7610,6 +7725,8 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
store_reg(s, rd, tmp);
break;
case 7:
+ {
+ int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
/* SMC instruction (op1 == 3)
and undefined instructions (op1 == 0 || op1 == 2)
will trap */
@@ -7618,8 +7735,9 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
}
/* bkpt */
ARCH(5);
- gen_exception_insn(s, 4, EXCP_BKPT);
+ gen_exception_insn(s, 4, EXCP_BKPT, syn_aa32_bkpt(imm16, false));
break;
+ }
case 0x8: /* signed multiply */
case 0xa:
case 0xc:
@@ -8312,27 +8430,39 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
if (insn & (1 << 5))
gen_swap_half(tmp2);
gen_smul_dual(tmp, tmp2);
- if (insn & (1 << 6)) {
- /* This subtraction cannot overflow. */
- tcg_gen_sub_i32(tmp, tmp, tmp2);
- } else {
- /* This addition cannot overflow 32 bits;
- * however it may overflow considered as a signed
- * operation, in which case we must set the Q flag.
- */
- gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
- }
- tcg_temp_free_i32(tmp2);
if (insn & (1 << 22)) {
/* smlald, smlsld */
+ TCGv_i64 tmp64_2;
+
tmp64 = tcg_temp_new_i64();
+ tmp64_2 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp64, tmp);
+ tcg_gen_ext_i32_i64(tmp64_2, tmp2);
tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ if (insn & (1 << 6)) {
+ tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
+ } else {
+ tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
+ }
+ tcg_temp_free_i64(tmp64_2);
gen_addq(s, tmp64, rd, rn);
gen_storeq_reg(s, rd, rn, tmp64);
tcg_temp_free_i64(tmp64);
} else {
/* smuad, smusd, smlad, smlsd */
+ if (insn & (1 << 6)) {
+ /* This subtraction cannot overflow. */
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ } else {
+ /* This addition cannot overflow 32 bits;
+ * however it may overflow considered as a
+ * signed operation, in which case we must set
+ * the Q flag.
+ */
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp2);
if (rd != 15)
{
tmp2 = load_reg(s, rd);
@@ -8626,11 +8756,12 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
case 0xf:
/* swi */
gen_set_pc_im(s, s->pc);
+ s->svc_imm = extract32(insn, 0, 24);
s->is_jmp = DISAS_SWI;
break;
default:
illegal_op:
- gen_exception_insn(s, 4, EXCP_UDEF);
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
break;
}
}
@@ -10441,9 +10572,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
break;
case 0xe: /* bkpt */
+ {
+ int imm8 = extract32(insn, 0, 8);
ARCH(5);
- gen_exception_insn(s, 2, EXCP_BKPT);
+ gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
break;
+ }
case 0xa: /* rev */
ARCH(6);
@@ -10560,6 +10694,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (cond == 0xf) {
/* swi */
gen_set_pc_im(s, s->pc);
+ s->svc_imm = extract32(insn, 0, 8);
s->is_jmp = DISAS_SWI;
break;
}
@@ -10595,11 +10730,11 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
}
return;
undef32:
- gen_exception_insn(s, 4, EXCP_UDEF);
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
return;
illegal_op:
undef:
- gen_exception_insn(s, 2, EXCP_UDEF);
+ gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
@@ -10649,11 +10784,13 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
#if !defined(CONFIG_USER_ONLY)
dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
#endif
+ dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
dc->cp_regs = cpu->cp_regs;
dc->current_pl = arm_current_pl(env);
+ dc->features = env->features;
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();
@@ -10719,7 +10856,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
if (dc->pc >= 0xffff0000) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
- gen_exception(EXCP_KERNEL_TRAP);
+ gen_exception_internal(EXCP_KERNEL_TRAP);
dc->is_jmp = DISAS_UPDATE;
break;
}
@@ -10727,16 +10864,16 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
if (dc->pc >= 0xfffffff0 && IS_M(env)) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
- gen_exception(EXCP_EXCEPTION_EXIT);
+ gen_exception_internal(EXCP_EXCEPTION_EXIT);
dc->is_jmp = DISAS_UPDATE;
break;
}
#endif
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
- gen_exception_insn(dc, 0, EXCP_DEBUG);
+ gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
/* Advance PC so that clearing the breakpoint will
invalidate this TB. */
dc->pc += 2;
@@ -10803,7 +10940,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
if (dc->condjmp) {
/* FIXME: This can theoretically happen with self-modifying
code. */
- cpu_abort(env, "IO on conditional branch instruction");
+ cpu_abort(cs, "IO on conditional branch instruction");
}
gen_io_end();
}
@@ -10816,9 +10953,9 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
if (dc->condjmp) {
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI) {
- gen_exception(EXCP_SWI);
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
} else {
- gen_exception(EXCP_DEBUG);
+ gen_exception_internal(EXCP_DEBUG);
}
gen_set_label(dc->condlabel);
}
@@ -10828,11 +10965,11 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
}
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
- gen_exception(EXCP_SWI);
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
} else {
/* FIXME: Single stepping a WFI insn will not halt
the CPU. */
- gen_exception(EXCP_DEBUG);
+ gen_exception_internal(EXCP_DEBUG);
}
} else {
/* While branches must always occur at the end of an IT block,
@@ -10864,7 +11001,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
gen_helper_wfe(cpu_env);
break;
case DISAS_SWI:
- gen_exception(EXCP_SWI);
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
break;
}
if (dc->condjmp) {
@@ -10922,6 +11059,11 @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
int i;
uint32_t psr;
+ if (is_a64(env)) {
+ aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
+ return;
+ }
+
for(i=0;i<16;i++) {
cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
if ((i % 4) == 3)
diff --git a/target-arm/translate.h b/target-arm/translate.h
index 2f491f9ff6..34328f4660 100644
--- a/target-arm/translate.h
+++ b/target-arm/translate.h
@@ -20,12 +20,26 @@ typedef struct DisasContext {
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
- int vfp_enabled;
+ bool cpacr_fpen; /* FP enabled via CPACR.FPEN */
+ bool vfp_enabled; /* FP enabled via FPSCR.EN */
int vec_len;
int vec_stride;
+ /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
+ * so that top level loop can generate correct syndrome information.
+ */
+ uint32_t svc_imm;
int aarch64;
int current_pl;
GHashTable *cp_regs;
+ uint64_t features; /* CPU features bits */
+ /* Because unallocated encodings generate different exception syndrome
+ * information from traps due to FP being disabled, we can't do a single
+ * "is fp access disabled" check at a high level in the decode tree.
+ * To help in catching bugs where the access check was forgotten in some
+ * code path, we set this flag when the access check is done, and assert
+ * that it is set at the point where we actually touch the FP regs.
+ */
+ bool fp_access_checked;
#define TMP_A64_MAX 16
int tmp_a64_count;
TCGv_i64 tmp_a64[TMP_A64_MAX];
@@ -33,6 +47,11 @@ typedef struct DisasContext {
extern TCGv_ptr cpu_env;
+static inline int arm_dc_feature(DisasContext *dc, int feature)
+{
+ return (dc->features & (1ULL << feature)) != 0;
+}
+
/* target-specific extra values for is_jmp */
/* These instructions trap after executing, so the A32/T32 decoder must
* defer them until after the conditional execution state has been updated.
@@ -53,6 +72,8 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
TranslationBlock *tb,
bool search_pc);
void gen_a64_set_pc_im(uint64_t val);
+void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
#else
static inline void a64_translate_init(void)
{
@@ -67,6 +88,12 @@ static inline void gen_intermediate_code_internal_a64(ARMCPU *cpu,
static inline void gen_a64_set_pc_im(uint64_t val)
{
}
+
+static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf,
+ int flags)
+{
+}
#endif
void arm_gen_test_cc(int cc, int label);
diff --git a/target-cris/cpu.c b/target-cris/cpu.c
index 1ac8124d8c..20d8809699 100644
--- a/target-cris/cpu.c
+++ b/target-cris/cpu.c
@@ -33,6 +33,11 @@ static void cris_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.pc = value;
}
+static bool cris_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
+}
+
/* CPUClass::reset() */
static void cris_cpu_reset(CPUState *s)
{
@@ -44,9 +49,9 @@ static void cris_cpu_reset(CPUState *s)
ccc->parent_reset(s);
vr = env->pregs[PR_VR];
- memset(env, 0, offsetof(CPUCRISState, breakpoints));
+ memset(env, 0, offsetof(CPUCRISState, load_info));
env->pregs[PR_VR] = vr;
- tlb_flush(env, 1);
+ tlb_flush(s, 1);
#if defined(CONFIG_USER_ONLY)
/* start in user mode with interrupts enabled. */
@@ -84,18 +89,7 @@ static ObjectClass *cris_cpu_class_by_name(const char *cpu_model)
CRISCPU *cpu_cris_init(const char *cpu_model)
{
- CRISCPU *cpu;
- ObjectClass *oc;
-
- oc = cris_cpu_class_by_name(cpu_model);
- if (oc == NULL) {
- return NULL;
- }
- cpu = CRIS_CPU(object_new(object_class_get_name(oc)));
-
- object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
-
- return cpu;
+ return CRIS_CPU(cpu_generic_init(TYPE_CRIS_CPU, cpu_model));
}
/* Sort alphabetically by VR. */
@@ -283,12 +277,15 @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
cc->reset = cris_cpu_reset;
cc->class_by_name = cris_cpu_class_by_name;
+ cc->has_work = cris_cpu_has_work;
cc->do_interrupt = cris_cpu_do_interrupt;
cc->dump_state = cris_cpu_dump_state;
cc->set_pc = cris_cpu_set_pc;
cc->gdb_read_register = cris_cpu_gdb_read_register;
cc->gdb_write_register = cris_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = cris_cpu_handle_mmu_fault;
+#else
cc->get_phys_page_debug = cris_cpu_get_phys_page_debug;
#endif
diff --git a/target-cris/cpu.h b/target-cris/cpu.h
index 1d7d80d3dc..b88c147518 100644
--- a/target-cris/cpu.h
+++ b/target-cris/cpu.h
@@ -171,8 +171,8 @@ typedef struct CPUCRISState {
CPU_COMMON
- /* Members after CPU_COMMON are preserved across resets. */
- void *load_info;
+ /* Members from load_info on are preserved across resets. */
+ void *load_info;
} CPUCRISState;
#include "cpu-qom.h"
@@ -247,9 +247,8 @@ static inline int cpu_mmu_index (CPUCRISState *env)
return !!(env->pregs[PR_CCS] & U_FLAG);
}
-int cpu_cris_handle_mmu_fault(CPUCRISState *env, target_ulong address, int rw,
+int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx);
-#define cpu_handle_mmu_fault cpu_cris_handle_mmu_fault
/* Support function regs. */
#define SFR_RW_GC_CFG 0][0
@@ -276,11 +275,6 @@ static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc,
#define cpu_list cris_cpu_list
void cris_cpu_list(FILE *f, fprintf_function cpu_fprintf);
-static inline bool cpu_has_work(CPUState *cpu)
-{
- return cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
-}
-
#include "exec/exec-all.h"
#endif
diff --git a/target-cris/helper.c b/target-cris/helper.c
index c940582132..4092d279ba 100644
--- a/target-cris/helper.c
+++ b/target-cris/helper.c
@@ -41,7 +41,7 @@ void cris_cpu_do_interrupt(CPUState *cs)
CRISCPU *cpu = CRIS_CPU(cs);
CPUCRISState *env = &cpu->env;
- env->exception_index = -1;
+ cs->exception_index = -1;
env->pregs[PR_ERP] = env->pc;
}
@@ -50,14 +50,14 @@ void crisv10_cpu_do_interrupt(CPUState *cs)
cris_cpu_do_interrupt(cs);
}
-int cpu_cris_handle_mmu_fault(CPUCRISState * env, target_ulong address, int rw,
+int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{
- CRISCPU *cpu = cris_env_get_cpu(env);
+ CRISCPU *cpu = CRIS_CPU(cs);
- env->exception_index = 0xaa;
- env->pregs[PR_EDA] = address;
- cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
+ cs->exception_index = 0xaa;
+ cpu->env.pregs[PR_EDA] = address;
+ cpu_dump_state(cs, stderr, fprintf, 0);
return 1;
}
@@ -73,28 +73,30 @@ static void cris_shift_ccs(CPUCRISState *env)
env->pregs[PR_CCS] = ccs;
}
-int cpu_cris_handle_mmu_fault(CPUCRISState *env, target_ulong address, int rw,
+int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{
- D(CPUState *cpu = CPU(cris_env_get_cpu(env)));
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
struct cris_mmu_result res;
int prot, miss;
int r = -1;
target_ulong phy;
- D(printf("%s addr=%x pc=%x rw=%x\n", __func__, address, env->pc, rw));
+ D(printf("%s addr=%" VADDR_PRIx " pc=%x rw=%x\n",
+ __func__, address, env->pc, rw));
miss = cris_mmu_translate(&res, env, address & TARGET_PAGE_MASK,
rw, mmu_idx, 0);
if (miss) {
- if (env->exception_index == EXCP_BUSFAULT) {
- cpu_abort(env,
+ if (cs->exception_index == EXCP_BUSFAULT) {
+ cpu_abort(cs,
"CRIS: Illegal recursive bus fault."
- "addr=%x rw=%d\n",
+ "addr=%" VADDR_PRIx " rw=%d\n",
address, rw);
}
env->pregs[PR_EDA] = address;
- env->exception_index = EXCP_BUSFAULT;
+ cs->exception_index = EXCP_BUSFAULT;
env->fault_vector = res.bf_vec;
r = 1;
} else {
@@ -104,13 +106,13 @@ int cpu_cris_handle_mmu_fault(CPUCRISState *env, target_ulong address, int rw,
*/
phy = res.phy & ~0x80000000;
prot = res.prot;
- tlb_set_page(env, address & TARGET_PAGE_MASK, phy,
+ tlb_set_page(cs, address & TARGET_PAGE_MASK, phy,
prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0;
}
if (r > 0) {
- D_LOG("%s returns %d irqreq=%x addr=%x phy=%x vec=%x pc=%x\n",
- __func__, r, cpu->interrupt_request, address, res.phy,
+ D_LOG("%s returns %d irqreq=%x addr=%" VADDR_PRIx " phy=%x vec=%x"
+ " pc=%x\n", __func__, r, cs->interrupt_request, address, res.phy,
res.bf_vec, env->pc);
}
return r;
@@ -123,16 +125,16 @@ void crisv10_cpu_do_interrupt(CPUState *cs)
int ex_vec = -1;
D_LOG("exception index=%d interrupt_req=%d\n",
- env->exception_index,
+ cs->exception_index,
cs->interrupt_request);
if (env->dslot) {
/* CRISv10 never takes interrupts while in a delay-slot. */
- cpu_abort(env, "CRIS: Interrupt on delay-slot\n");
+ cpu_abort(cs, "CRIS: Interrupt on delay-slot\n");
}
assert(!(env->pregs[PR_CCS] & PFIX_FLAG));
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_BREAK:
/* These exceptions are genereated by the core itself.
ERP should point to the insn following the brk. */
@@ -148,7 +150,7 @@ void crisv10_cpu_do_interrupt(CPUState *cs)
break;
case EXCP_BUSFAULT:
- cpu_abort(env, "Unhandled busfault");
+ cpu_abort(cs, "Unhandled busfault");
break;
default:
@@ -185,10 +187,10 @@ void cris_cpu_do_interrupt(CPUState *cs)
int ex_vec = -1;
D_LOG("exception index=%d interrupt_req=%d\n",
- env->exception_index,
+ cs->exception_index,
cs->interrupt_request);
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_BREAK:
/* These exceptions are genereated by the core itself.
ERP should point to the insn following the brk. */
@@ -251,7 +253,7 @@ void cris_cpu_do_interrupt(CPUState *cs)
/* Clear the excption_index to avoid spurios hw_aborts for recursive
bus faults. */
- env->exception_index = -1;
+ cs->exception_index = -1;
D_LOG("%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n",
__func__, env->pc, ex_vec,
diff --git a/target-cris/mmu.c b/target-cris/mmu.c
index 512e28b481..1c95a415f2 100644
--- a/target-cris/mmu.c
+++ b/target-cris/mmu.c
@@ -290,6 +290,7 @@ static int cris_mmu_translate_page(struct cris_mmu_result *res,
void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid)
{
+ CRISCPU *cpu = cris_env_get_cpu(env);
target_ulong vaddr;
unsigned int idx;
uint32_t lo, hi;
@@ -315,7 +316,7 @@ void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid)
vaddr = tlb_vpn << TARGET_PAGE_BITS;
D_LOG("flush pid=%x vaddr=%x\n",
pid, vaddr);
- tlb_flush_page(env, vaddr);
+ tlb_flush_page(CPU(cpu), vaddr);
}
}
}
diff --git a/target-cris/op_helper.c b/target-cris/op_helper.c
index b580513848..bd9a583f9a 100644
--- a/target-cris/op_helper.c
+++ b/target-cris/op_helper.c
@@ -54,23 +54,25 @@
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
-void tlb_fill(CPUCRISState *env, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
int ret;
D_LOG("%s pc=%x tpc=%x ra=%p\n", __func__,
env->pc, env->pregs[PR_EDA], (void *)retaddr);
- ret = cpu_cris_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = cris_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
/* now we have a real cpu fault */
- if (cpu_restore_state(env, retaddr)) {
+ if (cpu_restore_state(cs, retaddr)) {
/* Evaluate flags after retranslation. */
helper_top_evaluate_flags(env);
}
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
}
@@ -78,8 +80,10 @@ void tlb_fill(CPUCRISState *env, target_ulong addr, int is_write, int mmu_idx,
void helper_raise_exception(CPUCRISState *env, uint32_t index)
{
- env->exception_index = index;
- cpu_loop_exit(env);
+ CPUState *cs = CPU(cris_env_get_cpu(env));
+
+ cs->exception_index = index;
+ cpu_loop_exit(cs);
}
void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid)
@@ -94,8 +98,11 @@ void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid)
void helper_spc_write(CPUCRISState *env, uint32_t new_spc)
{
#if !defined(CONFIG_USER_ONLY)
- tlb_flush_page(env, env->pregs[PR_SPC]);
- tlb_flush_page(env, new_spc);
+ CRISCPU *cpu = cris_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ tlb_flush_page(cs, env->pregs[PR_SPC]);
+ tlb_flush_page(cs, new_spc);
#endif
}
@@ -110,6 +117,9 @@ void helper_dump(uint32_t a0, uint32_t a1, uint32_t a2)
void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg)
{
+#if !defined(CONFIG_USER_ONLY)
+ CRISCPU *cpu = cris_env_get_cpu(env);
+#endif
uint32_t srs;
srs = env->pregs[PR_SRS];
srs &= 3;
@@ -151,7 +161,7 @@ void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg)
D_LOG("tlb flush vaddr=%x v=%d pc=%x\n",
vaddr, tlb_v, env->pc);
if (tlb_v) {
- tlb_flush_page(env, vaddr);
+ tlb_flush_page(CPU(cpu), vaddr);
}
}
}
diff --git a/target-cris/translate.c b/target-cris/translate.c
index f990d591c7..724f920e92 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -74,7 +74,7 @@ static TCGv env_pc;
/* This is the state at translation time. */
typedef struct DisasContext {
- CPUCRISState *env;
+ CRISCPU *cpu;
target_ulong pc, ppc;
/* Decoder. */
@@ -129,7 +129,7 @@ static void gen_BUG(DisasContext *dc, const char *file, int line)
{
printf("BUG: pc=%x %s %d\n", dc->pc, file, line);
qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line);
- cpu_abort(dc->env, "%s:%d\n", file, line);
+ cpu_abort(CPU(dc->cpu), "%s:%d\n", file, line);
}
static const char *regnames[] =
@@ -272,7 +272,7 @@ static int cris_fetch(CPUCRISState *env, DisasContext *dc, uint32_t addr,
break;
}
default:
- cpu_abort(dc->env, "Invalid fetch size %d\n", size);
+ cpu_abort(CPU(dc->cpu), "Invalid fetch size %d\n", size);
break;
}
return r;
@@ -1125,7 +1125,7 @@ static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
{
- int mem_index = cpu_mmu_index(dc->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
@@ -1139,7 +1139,7 @@ static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
unsigned int size, int sign)
{
- int mem_index = cpu_mmu_index(dc->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
@@ -1154,7 +1154,7 @@ static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
unsigned int size)
{
- int mem_index = cpu_mmu_index(dc->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
@@ -3089,10 +3089,11 @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
static void check_breakpoint(CPUCRISState *env, DisasContext *dc)
{
+ CPUState *cs = CPU(cris_env_get_cpu(env));
CPUBreakpoint *bp;
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
cris_evaluate_flags(dc);
tcg_gen_movi_tl(env_pc, dc->pc);
@@ -3169,7 +3170,7 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
* delayslot, like in real hw.
*/
pc_start = tb->pc & ~1;
- dc->env = env;
+ dc->cpu = cpu;
dc->tb = tb;
gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
@@ -3390,7 +3391,7 @@ gen_intermediate_code_internal(CRISCPU *cpu, TranslationBlock *tb,
#if !DISAS_CRIS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
log_target_disas(env, pc_start, dc->pc - pc_start,
- dc->env->pregs[PR_VR]);
+ env->pregs[PR_VR]);
qemu_log("\nisize=%d osize=%td\n",
dc->pc - pc_start, tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf);
}
diff --git a/target-cris/translate_v10.c b/target-cris/translate_v10.c
index d6ef084483..2ad2b142a9 100644
--- a/target-cris/translate_v10.c
+++ b/target-cris/translate_v10.c
@@ -96,7 +96,7 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
unsigned int size)
{
- int mem_index = cpu_mmu_index(dc->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env);
/* If we get a fault on a delayslot we must keep the jmp state in
the cpu-state to be able to re-execute the jmp. */
@@ -340,7 +340,7 @@ static unsigned int dec10_quick_imm(DisasContext *dc)
default:
LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n",
dc->pc, dc->mode, dc->opcode, dc->src, dc->dst);
- cpu_abort(dc->env, "Unhandled quickimm\n");
+ cpu_abort(CPU(dc->cpu), "Unhandled quickimm\n");
break;
}
return 2;
@@ -651,7 +651,7 @@ static unsigned int dec10_reg(DisasContext *dc)
case 2: tmp = 1; break;
case 1: tmp = 0; break;
default:
- cpu_abort(dc->env, "Unhandled BIAP");
+ cpu_abort(CPU(dc->cpu), "Unhandled BIAP");
break;
}
@@ -669,7 +669,7 @@ static unsigned int dec10_reg(DisasContext *dc)
default:
LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc,
dc->opcode, dc->src, dc->dst);
- cpu_abort(dc->env, "Unhandled opcode");
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
break;
}
} else {
@@ -745,7 +745,7 @@ static unsigned int dec10_reg(DisasContext *dc)
default:
LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc,
dc->opcode, dc->src, dc->dst);
- cpu_abort(dc->env, "Unhandled opcode");
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
break;
}
}
@@ -1006,7 +1006,7 @@ static int dec10_bdap_m(CPUCRISState *env, DisasContext *dc, int size)
if (!dc->postinc && (dc->ir & (1 << 11))) {
int simm = dc->ir & 0xff;
- /* cpu_abort(dc->env, "Unhandled opcode"); */
+ /* cpu_abort(CPU(dc->cpu), "Unhandled opcode"); */
/* sign extended. */
simm = (int8_t)simm;
@@ -1105,7 +1105,7 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
default:
LOG_DIS("pc=%x var-ind.%d %d r%d r%d\n",
dc->pc, size, dc->opcode, dc->src, dc->dst);
- cpu_abort(dc->env, "Unhandled opcode");
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
break;
}
return insn_len;
@@ -1198,7 +1198,7 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
break;
default:
LOG_DIS("ERROR pc=%x opcode=%d\n", dc->pc, dc->opcode);
- cpu_abort(dc->env, "Unhandled opcode");
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
break;
}
diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
index 722f11a04f..e9b3d577b3 100644
--- a/target-i386/cpu-qom.h
+++ b/target-i386/cpu-qom.h
@@ -38,7 +38,17 @@
OBJECT_GET_CLASS(X86CPUClass, (obj), TYPE_X86_CPU)
/**
+ * X86CPUDefinition:
+ *
+ * CPU model definition data that was not converted to QOM per-subclass
+ * property defaults yet.
+ */
+typedef struct X86CPUDefinition X86CPUDefinition;
+
+/**
* X86CPUClass:
+ * @cpu_def: CPU model definition
+ * @kvm_required: Whether CPU model requires KVM to be enabled.
* @parent_realize: The parent class' realize handler.
* @parent_reset: The parent class' reset handler.
*
@@ -49,6 +59,11 @@ typedef struct X86CPUClass {
CPUClass parent_class;
/*< public >*/
+ /* Should be eventually replaced by subclass-specific property defaults. */
+ X86CPUDefinition *cpu_def;
+
+ bool kvm_required;
+
DeviceRealize parent_realize;
void (*parent_reset)(CPUState *cpu);
} X86CPUClass;
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 5cfe4506e0..8fd1497dc4 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -316,7 +316,7 @@ typedef struct X86RegisterInfo32 {
#define REGISTER(reg) \
[R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
-X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
+static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
REGISTER(EAX),
REGISTER(ECX),
REGISTER(EDX),
@@ -358,17 +358,23 @@ typedef struct model_features_t {
FeatureWord feat_word;
} model_features_t;
-static uint32_t kvm_default_features = (1 << KVM_FEATURE_CLOCKSOURCE) |
+/* KVM-specific features that are automatically added to all CPU models
+ * when KVM is enabled.
+ */
+static uint32_t kvm_default_features[FEATURE_WORDS] = {
+ [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
(1 << KVM_FEATURE_NOP_IO_DELAY) |
(1 << KVM_FEATURE_CLOCKSOURCE2) |
(1 << KVM_FEATURE_ASYNC_PF) |
(1 << KVM_FEATURE_STEAL_TIME) |
(1 << KVM_FEATURE_PV_EOI) |
- (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+ (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
+ [FEAT_1_ECX] = CPUID_EXT_X2APIC,
+};
-void disable_kvm_pv_eoi(void)
+void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features)
{
- kvm_default_features &= ~(1UL << KVM_FEATURE_PV_EOI);
+ kvm_default_features[w] &= ~features;
}
void host_cpuid(uint32_t function, uint32_t count,
@@ -484,7 +490,35 @@ static void add_flagname_to_bitmaps(const char *flagname,
}
}
-typedef struct x86_def_t {
+/* CPU class name definitions: */
+
+#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
+#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
+
+/* Return type name for a given CPU model name
+ * Caller is responsible for freeing the returned string.
+ */
+static char *x86_cpu_type_name(const char *model_name)
+{
+ return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
+}
+
+static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ typename = x86_cpu_type_name(cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ return oc;
+}
+
+struct X86CPUDefinition {
const char *name;
uint32_t level;
uint32_t xlevel;
@@ -497,7 +531,7 @@ typedef struct x86_def_t {
FeatureWordArray features;
char model_id[48];
bool cache_info_passthrough;
-} x86_def_t;
+};
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
@@ -547,9 +581,7 @@ typedef struct x86_def_t {
CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
CPUID_7_0_EBX_RDSEED */
-/* built-in CPU model definitions
- */
-static x86_def_t builtin_x86_defs[] = {
+static X86CPUDefinition builtin_x86_defs[] = {
{
.name = "qemu64",
.level = 4,
@@ -1108,7 +1140,7 @@ static x86_def_t builtin_x86_defs[] = {
void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
uint32_t feat_add, uint32_t feat_remove)
{
- x86_def_t *def;
+ X86CPUDefinition *def;
int i;
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
def = &builtin_x86_defs[i];
@@ -1119,6 +1151,8 @@ void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
}
}
+#ifdef CONFIG_KVM
+
static int cpu_x86_fill_model_id(char *str)
{
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
@@ -1134,44 +1168,68 @@ static int cpu_x86_fill_model_id(char *str)
return 0;
}
-/* Fill a x86_def_t struct with information about the host CPU, and
- * the CPU features supported by the host hardware + host kernel
+static X86CPUDefinition host_cpudef;
+
+/* class_init for the "host" CPU model
*
- * This function may be called only if KVM is enabled.
+ * This function may be called before KVM is initialized.
*/
-static void kvm_cpu_fill_host(x86_def_t *x86_cpu_def)
+static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
{
- KVMState *s = kvm_state;
+ X86CPUClass *xcc = X86_CPU_CLASS(oc);
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
- assert(kvm_enabled());
+ xcc->kvm_required = true;
- x86_cpu_def->name = "host";
- x86_cpu_def->cache_info_passthrough = true;
host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
- x86_cpu_vendor_words2str(x86_cpu_def->vendor, ebx, edx, ecx);
+ x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
- x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
- x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
- x86_cpu_def->stepping = eax & 0x0F;
+ host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
+ host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
+ host_cpudef.stepping = eax & 0x0F;
- x86_cpu_def->level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
- x86_cpu_def->xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
- x86_cpu_def->xlevel2 =
- kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
+ cpu_x86_fill_model_id(host_cpudef.model_id);
- cpu_x86_fill_model_id(x86_cpu_def->model_id);
+ xcc->cpu_def = &host_cpudef;
+ host_cpudef.cache_info_passthrough = true;
+ /* level, xlevel, xlevel2, and the feature words are initialized on
+ * instance_init, because they require KVM to be initialized.
+ */
+}
+
+static void host_x86_cpu_initfn(Object *obj)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ KVMState *s = kvm_state;
FeatureWord w;
+
+ assert(kvm_enabled());
+
+ env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
+ env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
+ env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
+
for (w = 0; w < FEATURE_WORDS; w++) {
FeatureWordInfo *wi = &feature_word_info[w];
- x86_cpu_def->features[w] =
+ env->features[w] =
kvm_arch_get_supported_cpuid(s, wi->cpuid_eax, wi->cpuid_ecx,
wi->cpuid_reg);
}
+ object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
}
+static const TypeInfo host_x86_cpu_type_info = {
+ .name = X86_CPU_TYPE_NAME("host"),
+ .parent = TYPE_X86_CPU,
+ .instance_init = host_x86_cpu_initfn,
+ .class_init = host_x86_cpu_class_init,
+};
+
+#endif
+
static int unavailable_host_feature(FeatureWordInfo *f, uint32_t mask)
{
int i;
@@ -1582,32 +1640,6 @@ static PropertyInfo qdev_prop_spinlocks = {
.set = x86_set_hv_spinlocks,
};
-static int cpu_x86_find_by_name(X86CPU *cpu, x86_def_t *x86_cpu_def,
- const char *name)
-{
- x86_def_t *def;
- int i;
-
- if (name == NULL) {
- return -1;
- }
- if (kvm_enabled() && strcmp(name, "host") == 0) {
- kvm_cpu_fill_host(x86_cpu_def);
- object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
- def = &builtin_x86_defs[i];
- if (strcmp(name, def->name) == 0) {
- memcpy(x86_cpu_def, def, sizeof(*def));
- return 0;
- }
- }
-
- return -1;
-}
-
/* Convert all '_' in a feature string option name to '-', to make feature
* name conform to QOM property naming rule, which uses '-' instead of '_'.
*/
@@ -1620,8 +1652,10 @@ static inline void feat2prop(char *s)
/* Parse "+feature,-feature,feature=foo" CPU feature string
*/
-static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
+static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
+ Error **errp)
{
+ X86CPU *cpu = X86_CPU(cs);
char *featurestr; /* Single 'key=value" string being parsed */
/* Features to be added */
FeatureWordArray plus_features = { 0 };
@@ -1629,6 +1663,7 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
FeatureWordArray minus_features = { 0 };
uint32_t numvalue;
CPUX86State *env = &cpu->env;
+ Error *local_err = NULL;
featurestr = features ? strtok(features, ",") : NULL;
@@ -1647,16 +1682,16 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
numvalue = strtoul(val, &err, 0);
if (!*val || *err) {
- error_setg(errp, "bad numerical value %s", val);
+ error_setg(&local_err, "bad numerical value %s", val);
goto out;
}
if (numvalue < 0x80000000) {
- fprintf(stderr, "xlevel value shall always be >= 0x80000000"
- ", fixup will be removed in future versions\n");
+ error_report("xlevel value shall always be >= 0x80000000"
+ ", fixup will be removed in future versions");
numvalue += 0x80000000;
}
snprintf(num, sizeof(num), "%" PRIu32, numvalue);
- object_property_parse(OBJECT(cpu), num, featurestr, errp);
+ object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
} else if (!strcmp(featurestr, "tsc-freq")) {
int64_t tsc_freq;
char *err;
@@ -1665,36 +1700,38 @@ static void cpu_x86_parse_featurestr(X86CPU *cpu, char *features, Error **errp)
tsc_freq = strtosz_suffix_unit(val, &err,
STRTOSZ_DEFSUFFIX_B, 1000);
if (tsc_freq < 0 || *err) {
- error_setg(errp, "bad numerical value %s", val);
+ error_setg(&local_err, "bad numerical value %s", val);
goto out;
}
snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
- object_property_parse(OBJECT(cpu), num, "tsc-frequency", errp);
+ object_property_parse(OBJECT(cpu), num, "tsc-frequency",
+ &local_err);
} else if (!strcmp(featurestr, "hv-spinlocks")) {
char *err;
const int min = 0xFFF;
char num[32];
numvalue = strtoul(val, &err, 0);
if (!*val || *err) {
- error_setg(errp, "bad numerical value %s", val);
+ error_setg(&local_err, "bad numerical value %s", val);
goto out;
}
if (numvalue < min) {
- fprintf(stderr, "hv-spinlocks value shall always be >= 0x%x"
- ", fixup will be removed in future versions\n",
+ error_report("hv-spinlocks value shall always be >= 0x%x"
+ ", fixup will be removed in future versions",
min);
numvalue = min;
}
snprintf(num, sizeof(num), "%" PRId32, numvalue);
- object_property_parse(OBJECT(cpu), num, featurestr, errp);
+ object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
} else {
- object_property_parse(OBJECT(cpu), val, featurestr, errp);
+ object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
}
} else {
feat2prop(featurestr);
- object_property_parse(OBJECT(cpu), "on", featurestr, errp);
+ object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
}
- if (error_is_set(errp)) {
+ if (local_err) {
+ error_propagate(errp, local_err);
goto out;
}
featurestr = strtok(NULL, ",");
@@ -1753,7 +1790,7 @@ static void listflags(char *buf, int bufsize, uint32_t fbits,
/* generate CPU information. */
void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
{
- x86_def_t *def;
+ X86CPUDefinition *def;
char buf[256];
int i;
@@ -1780,7 +1817,7 @@ void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
{
CpuDefinitionInfoList *cpu_list = NULL;
- x86_def_t *def;
+ X86CPUDefinition *def;
int i;
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
@@ -1817,17 +1854,13 @@ static void filter_features_for_kvm(X86CPU *cpu)
}
}
-static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp)
+/* Load data from X86CPUDefinition
+ */
+static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
{
CPUX86State *env = &cpu->env;
- x86_def_t def1, *def = &def1;
-
- memset(def, 0, sizeof(*def));
-
- if (cpu_x86_find_by_name(cpu, def, name) < 0) {
- error_setg(errp, "Unable to find CPU definition: %s", name);
- return;
- }
+ const char *vendor;
+ char host_vendor[CPUID_VENDOR_SZ + 1];
object_property_set_int(OBJECT(cpu), def->level, "level", errp);
object_property_set_int(OBJECT(cpu), def->family, "family", errp);
@@ -1847,10 +1880,14 @@ static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp)
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
- /* Special cases not set in the x86_def_t structs: */
+ /* Special cases not set in the X86CPUDefinition structs: */
if (kvm_enabled()) {
- env->features[FEAT_KVM] |= kvm_default_features;
+ FeatureWord w;
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ env->features[w] |= kvm_default_features[w];
+ }
}
+
env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
/* sysenter isn't supported in compatibility mode on AMD,
@@ -1860,8 +1897,7 @@ static void cpu_x86_register(X86CPU *cpu, const char *name, Error **errp)
* KVM's sysenter/syscall emulation in compatibility mode and
* when doing cross vendor migration
*/
- const char *vendor = def->vendor;
- char host_vendor[CPUID_VENDOR_SZ + 1];
+ vendor = def->vendor;
if (kvm_enabled()) {
uint32_t ebx = 0, ecx = 0, edx = 0;
host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
@@ -1877,9 +1913,10 @@ X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
Error **errp)
{
X86CPU *cpu = NULL;
+ X86CPUClass *xcc;
+ ObjectClass *oc;
gchar **model_pieces;
char *name, *features;
- char *typename;
Error *error = NULL;
model_pieces = g_strsplit(cpu_model, ",", 2);
@@ -1890,30 +1927,30 @@ X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
name = model_pieces[0];
features = model_pieces[1];
- cpu = X86_CPU(object_new(TYPE_X86_CPU));
-#ifndef CONFIG_USER_ONLY
- if (icc_bridge == NULL) {
- error_setg(&error, "Invalid icc-bridge value");
+ oc = x86_cpu_class_by_name(name);
+ if (oc == NULL) {
+ error_setg(&error, "Unable to find CPU definition: %s", name);
goto out;
}
- qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
- object_unref(OBJECT(cpu));
-#endif
+ xcc = X86_CPU_CLASS(oc);
- cpu_x86_register(cpu, name, &error);
- if (error) {
+ if (xcc->kvm_required && !kvm_enabled()) {
+ error_setg(&error, "CPU model '%s' requires KVM", name);
goto out;
}
- /* Emulate per-model subclasses for global properties */
- typename = g_strdup_printf("%s-" TYPE_X86_CPU, name);
- qdev_prop_set_globals_for_type(DEVICE(cpu), typename, &error);
- g_free(typename);
- if (error) {
+ cpu = X86_CPU(object_new(object_class_get_name(oc)));
+
+#ifndef CONFIG_USER_ONLY
+ if (icc_bridge == NULL) {
+ error_setg(&error, "Invalid icc-bridge value");
goto out;
}
+ qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
+ object_unref(OBJECT(cpu));
+#endif
- cpu_x86_parse_featurestr(cpu, features, &error);
+ x86_cpu_parse_featurestr(CPU(cpu), features, &error);
if (error) {
goto out;
}
@@ -1921,8 +1958,10 @@ X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
out:
if (error != NULL) {
error_propagate(errp, error);
- object_unref(OBJECT(cpu));
- cpu = NULL;
+ if (cpu) {
+ object_unref(OBJECT(cpu));
+ cpu = NULL;
+ }
}
g_strfreev(model_pieces);
return cpu;
@@ -1952,6 +1991,28 @@ out:
return cpu;
}
+static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
+{
+ X86CPUDefinition *cpudef = data;
+ X86CPUClass *xcc = X86_CPU_CLASS(oc);
+
+ xcc->cpu_def = cpudef;
+}
+
+static void x86_register_cpudef_type(X86CPUDefinition *def)
+{
+ char *typename = x86_cpu_type_name(def->name);
+ TypeInfo ti = {
+ .name = typename,
+ .parent = TYPE_X86_CPU,
+ .class_init = x86_cpu_cpudef_class_init,
+ .class_data = def,
+ };
+
+ type_register(&ti);
+ g_free(typename);
+}
+
#if !defined(CONFIG_USER_ONLY)
void cpu_clear_apic_feature(CPUX86State *env)
@@ -1969,7 +2030,7 @@ void x86_cpudef_setup(void)
static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
- x86_def_t *def = &builtin_x86_defs[i];
+ X86CPUDefinition *def = &builtin_x86_defs[i];
/* Look for specific "cpudef" models that */
/* have the QEMU version in .model_id */
@@ -2349,9 +2410,9 @@ static void x86_cpu_reset(CPUState *s)
xcc->parent_reset(s);
- memset(env, 0, offsetof(CPUX86State, breakpoints));
+ memset(env, 0, offsetof(CPUX86State, pat));
- tlb_flush(env, 1);
+ tlb_flush(s, 1);
env->old_exception = -1;
@@ -2412,8 +2473,8 @@ static void x86_cpu_reset(CPUState *s)
memset(env->dr, 0, sizeof(env->dr));
env->dr[6] = DR6_FIXED_1;
env->dr[7] = DR7_FIXED_1;
- cpu_breakpoint_remove_all(env, BP_CPU);
- cpu_watchpoint_remove_all(env, BP_CPU);
+ cpu_breakpoint_remove_all(s, BP_CPU);
+ cpu_watchpoint_remove_all(s, BP_CPU);
env->tsc_adjust = 0;
env->tsc = 0;
@@ -2613,6 +2674,7 @@ static void x86_cpu_initfn(Object *obj)
{
CPUState *cs = CPU(obj);
X86CPU *cpu = X86_CPU(obj);
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
CPUX86State *env = &cpu->env;
static int inited;
@@ -2656,6 +2718,8 @@ static void x86_cpu_initfn(Object *obj)
cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
+ x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
+
/* init various static tables used in TCG mode */
if (tcg_enabled() && !inited) {
inited = 1;
@@ -2695,6 +2759,20 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
cpu->env.eip = tb->pc - tb->cs_base;
}
+static bool x86_cpu_has_work(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_POLL)) &&
+ (env->eflags & IF_MASK)) ||
+ (cs->interrupt_request & (CPU_INTERRUPT_NMI |
+ CPU_INTERRUPT_INIT |
+ CPU_INTERRUPT_SIPI |
+ CPU_INTERRUPT_MCE));
+}
+
static Property x86_cpu_properties[] = {
DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
{ .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
@@ -2721,6 +2799,9 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->reset = x86_cpu_reset;
cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
+ cc->class_by_name = x86_cpu_class_by_name;
+ cc->parse_features = x86_cpu_parse_featurestr;
+ cc->has_work = x86_cpu_has_work;
cc->do_interrupt = x86_cpu_do_interrupt;
cc->dump_state = x86_cpu_dump_state;
cc->set_pc = x86_cpu_set_pc;
@@ -2729,7 +2810,9 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
cc->gdb_write_register = x86_cpu_gdb_write_register;
cc->get_arch_id = x86_cpu_get_arch_id;
cc->get_paging_enabled = x86_cpu_get_paging_enabled;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
+#else
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
cc->write_elf64_note = x86_cpu_write_elf64_note;
@@ -2746,14 +2829,22 @@ static const TypeInfo x86_cpu_type_info = {
.parent = TYPE_CPU,
.instance_size = sizeof(X86CPU),
.instance_init = x86_cpu_initfn,
- .abstract = false,
+ .abstract = true,
.class_size = sizeof(X86CPUClass),
.class_init = x86_cpu_common_class_init,
};
static void x86_cpu_register_types(void)
{
+ int i;
+
type_register_static(&x86_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
+ x86_register_cpudef_type(&builtin_x86_defs[i]);
+ }
+#ifdef CONFIG_KVM
+ type_register_static(&host_x86_cpu_type_info);
+#endif
}
type_init(x86_cpu_register_types)
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 0014acca7b..2a22a7d64e 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -194,35 +194,35 @@
#define CR0_PE_SHIFT 0
#define CR0_MP_SHIFT 1
-#define CR0_PE_MASK (1 << 0)
-#define CR0_MP_MASK (1 << 1)
-#define CR0_EM_MASK (1 << 2)
-#define CR0_TS_MASK (1 << 3)
-#define CR0_ET_MASK (1 << 4)
-#define CR0_NE_MASK (1 << 5)
-#define CR0_WP_MASK (1 << 16)
-#define CR0_AM_MASK (1 << 18)
-#define CR0_PG_MASK (1 << 31)
-
-#define CR4_VME_MASK (1 << 0)
-#define CR4_PVI_MASK (1 << 1)
-#define CR4_TSD_MASK (1 << 2)
-#define CR4_DE_MASK (1 << 3)
-#define CR4_PSE_MASK (1 << 4)
-#define CR4_PAE_MASK (1 << 5)
-#define CR4_MCE_MASK (1 << 6)
-#define CR4_PGE_MASK (1 << 7)
-#define CR4_PCE_MASK (1 << 8)
+#define CR0_PE_MASK (1U << 0)
+#define CR0_MP_MASK (1U << 1)
+#define CR0_EM_MASK (1U << 2)
+#define CR0_TS_MASK (1U << 3)
+#define CR0_ET_MASK (1U << 4)
+#define CR0_NE_MASK (1U << 5)
+#define CR0_WP_MASK (1U << 16)
+#define CR0_AM_MASK (1U << 18)
+#define CR0_PG_MASK (1U << 31)
+
+#define CR4_VME_MASK (1U << 0)
+#define CR4_PVI_MASK (1U << 1)
+#define CR4_TSD_MASK (1U << 2)
+#define CR4_DE_MASK (1U << 3)
+#define CR4_PSE_MASK (1U << 4)
+#define CR4_PAE_MASK (1U << 5)
+#define CR4_MCE_MASK (1U << 6)
+#define CR4_PGE_MASK (1U << 7)
+#define CR4_PCE_MASK (1U << 8)
#define CR4_OSFXSR_SHIFT 9
-#define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT)
-#define CR4_OSXMMEXCPT_MASK (1 << 10)
-#define CR4_VMXE_MASK (1 << 13)
-#define CR4_SMXE_MASK (1 << 14)
-#define CR4_FSGSBASE_MASK (1 << 16)
-#define CR4_PCIDE_MASK (1 << 17)
-#define CR4_OSXSAVE_MASK (1 << 18)
-#define CR4_SMEP_MASK (1 << 20)
-#define CR4_SMAP_MASK (1 << 21)
+#define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
+#define CR4_OSXMMEXCPT_MASK (1U << 10)
+#define CR4_VMXE_MASK (1U << 13)
+#define CR4_SMXE_MASK (1U << 14)
+#define CR4_FSGSBASE_MASK (1U << 16)
+#define CR4_PCIDE_MASK (1U << 17)
+#define CR4_OSXSAVE_MASK (1U << 18)
+#define CR4_SMEP_MASK (1U << 20)
+#define CR4_SMAP_MASK (1U << 21)
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
@@ -407,96 +407,96 @@ typedef enum FeatureWord {
typedef uint32_t FeatureWordArray[FEATURE_WORDS];
/* cpuid_features bits */
-#define CPUID_FP87 (1 << 0)
-#define CPUID_VME (1 << 1)
-#define CPUID_DE (1 << 2)
-#define CPUID_PSE (1 << 3)
-#define CPUID_TSC (1 << 4)
-#define CPUID_MSR (1 << 5)
-#define CPUID_PAE (1 << 6)
-#define CPUID_MCE (1 << 7)
-#define CPUID_CX8 (1 << 8)
-#define CPUID_APIC (1 << 9)
-#define CPUID_SEP (1 << 11) /* sysenter/sysexit */
-#define CPUID_MTRR (1 << 12)
-#define CPUID_PGE (1 << 13)
-#define CPUID_MCA (1 << 14)
-#define CPUID_CMOV (1 << 15)
-#define CPUID_PAT (1 << 16)
-#define CPUID_PSE36 (1 << 17)
-#define CPUID_PN (1 << 18)
-#define CPUID_CLFLUSH (1 << 19)
-#define CPUID_DTS (1 << 21)
-#define CPUID_ACPI (1 << 22)
-#define CPUID_MMX (1 << 23)
-#define CPUID_FXSR (1 << 24)
-#define CPUID_SSE (1 << 25)
-#define CPUID_SSE2 (1 << 26)
-#define CPUID_SS (1 << 27)
-#define CPUID_HT (1 << 28)
-#define CPUID_TM (1 << 29)
-#define CPUID_IA64 (1 << 30)
-#define CPUID_PBE (1 << 31)
-
-#define CPUID_EXT_SSE3 (1 << 0)
-#define CPUID_EXT_PCLMULQDQ (1 << 1)
-#define CPUID_EXT_DTES64 (1 << 2)
-#define CPUID_EXT_MONITOR (1 << 3)
-#define CPUID_EXT_DSCPL (1 << 4)
-#define CPUID_EXT_VMX (1 << 5)
-#define CPUID_EXT_SMX (1 << 6)
-#define CPUID_EXT_EST (1 << 7)
-#define CPUID_EXT_TM2 (1 << 8)
-#define CPUID_EXT_SSSE3 (1 << 9)
-#define CPUID_EXT_CID (1 << 10)
-#define CPUID_EXT_FMA (1 << 12)
-#define CPUID_EXT_CX16 (1 << 13)
-#define CPUID_EXT_XTPR (1 << 14)
-#define CPUID_EXT_PDCM (1 << 15)
-#define CPUID_EXT_PCID (1 << 17)
-#define CPUID_EXT_DCA (1 << 18)
-#define CPUID_EXT_SSE41 (1 << 19)
-#define CPUID_EXT_SSE42 (1 << 20)
-#define CPUID_EXT_X2APIC (1 << 21)
-#define CPUID_EXT_MOVBE (1 << 22)
-#define CPUID_EXT_POPCNT (1 << 23)
-#define CPUID_EXT_TSC_DEADLINE_TIMER (1 << 24)
-#define CPUID_EXT_AES (1 << 25)
-#define CPUID_EXT_XSAVE (1 << 26)
-#define CPUID_EXT_OSXSAVE (1 << 27)
-#define CPUID_EXT_AVX (1 << 28)
-#define CPUID_EXT_F16C (1 << 29)
-#define CPUID_EXT_RDRAND (1 << 30)
-#define CPUID_EXT_HYPERVISOR (1 << 31)
-
-#define CPUID_EXT2_FPU (1 << 0)
-#define CPUID_EXT2_VME (1 << 1)
-#define CPUID_EXT2_DE (1 << 2)
-#define CPUID_EXT2_PSE (1 << 3)
-#define CPUID_EXT2_TSC (1 << 4)
-#define CPUID_EXT2_MSR (1 << 5)
-#define CPUID_EXT2_PAE (1 << 6)
-#define CPUID_EXT2_MCE (1 << 7)
-#define CPUID_EXT2_CX8 (1 << 8)
-#define CPUID_EXT2_APIC (1 << 9)
-#define CPUID_EXT2_SYSCALL (1 << 11)
-#define CPUID_EXT2_MTRR (1 << 12)
-#define CPUID_EXT2_PGE (1 << 13)
-#define CPUID_EXT2_MCA (1 << 14)
-#define CPUID_EXT2_CMOV (1 << 15)
-#define CPUID_EXT2_PAT (1 << 16)
-#define CPUID_EXT2_PSE36 (1 << 17)
-#define CPUID_EXT2_MP (1 << 19)
-#define CPUID_EXT2_NX (1 << 20)
-#define CPUID_EXT2_MMXEXT (1 << 22)
-#define CPUID_EXT2_MMX (1 << 23)
-#define CPUID_EXT2_FXSR (1 << 24)
-#define CPUID_EXT2_FFXSR (1 << 25)
-#define CPUID_EXT2_PDPE1GB (1 << 26)
-#define CPUID_EXT2_RDTSCP (1 << 27)
-#define CPUID_EXT2_LM (1 << 29)
-#define CPUID_EXT2_3DNOWEXT (1 << 30)
-#define CPUID_EXT2_3DNOW (1 << 31)
+#define CPUID_FP87 (1U << 0)
+#define CPUID_VME (1U << 1)
+#define CPUID_DE (1U << 2)
+#define CPUID_PSE (1U << 3)
+#define CPUID_TSC (1U << 4)
+#define CPUID_MSR (1U << 5)
+#define CPUID_PAE (1U << 6)
+#define CPUID_MCE (1U << 7)
+#define CPUID_CX8 (1U << 8)
+#define CPUID_APIC (1U << 9)
+#define CPUID_SEP (1U << 11) /* sysenter/sysexit */
+#define CPUID_MTRR (1U << 12)
+#define CPUID_PGE (1U << 13)
+#define CPUID_MCA (1U << 14)
+#define CPUID_CMOV (1U << 15)
+#define CPUID_PAT (1U << 16)
+#define CPUID_PSE36 (1U << 17)
+#define CPUID_PN (1U << 18)
+#define CPUID_CLFLUSH (1U << 19)
+#define CPUID_DTS (1U << 21)
+#define CPUID_ACPI (1U << 22)
+#define CPUID_MMX (1U << 23)
+#define CPUID_FXSR (1U << 24)
+#define CPUID_SSE (1U << 25)
+#define CPUID_SSE2 (1U << 26)
+#define CPUID_SS (1U << 27)
+#define CPUID_HT (1U << 28)
+#define CPUID_TM (1U << 29)
+#define CPUID_IA64 (1U << 30)
+#define CPUID_PBE (1U << 31)
+
+#define CPUID_EXT_SSE3 (1U << 0)
+#define CPUID_EXT_PCLMULQDQ (1U << 1)
+#define CPUID_EXT_DTES64 (1U << 2)
+#define CPUID_EXT_MONITOR (1U << 3)
+#define CPUID_EXT_DSCPL (1U << 4)
+#define CPUID_EXT_VMX (1U << 5)
+#define CPUID_EXT_SMX (1U << 6)
+#define CPUID_EXT_EST (1U << 7)
+#define CPUID_EXT_TM2 (1U << 8)
+#define CPUID_EXT_SSSE3 (1U << 9)
+#define CPUID_EXT_CID (1U << 10)
+#define CPUID_EXT_FMA (1U << 12)
+#define CPUID_EXT_CX16 (1U << 13)
+#define CPUID_EXT_XTPR (1U << 14)
+#define CPUID_EXT_PDCM (1U << 15)
+#define CPUID_EXT_PCID (1U << 17)
+#define CPUID_EXT_DCA (1U << 18)
+#define CPUID_EXT_SSE41 (1U << 19)
+#define CPUID_EXT_SSE42 (1U << 20)
+#define CPUID_EXT_X2APIC (1U << 21)
+#define CPUID_EXT_MOVBE (1U << 22)
+#define CPUID_EXT_POPCNT (1U << 23)
+#define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
+#define CPUID_EXT_AES (1U << 25)
+#define CPUID_EXT_XSAVE (1U << 26)
+#define CPUID_EXT_OSXSAVE (1U << 27)
+#define CPUID_EXT_AVX (1U << 28)
+#define CPUID_EXT_F16C (1U << 29)
+#define CPUID_EXT_RDRAND (1U << 30)
+#define CPUID_EXT_HYPERVISOR (1U << 31)
+
+#define CPUID_EXT2_FPU (1U << 0)
+#define CPUID_EXT2_VME (1U << 1)
+#define CPUID_EXT2_DE (1U << 2)
+#define CPUID_EXT2_PSE (1U << 3)
+#define CPUID_EXT2_TSC (1U << 4)
+#define CPUID_EXT2_MSR (1U << 5)
+#define CPUID_EXT2_PAE (1U << 6)
+#define CPUID_EXT2_MCE (1U << 7)
+#define CPUID_EXT2_CX8 (1U << 8)
+#define CPUID_EXT2_APIC (1U << 9)
+#define CPUID_EXT2_SYSCALL (1U << 11)
+#define CPUID_EXT2_MTRR (1U << 12)
+#define CPUID_EXT2_PGE (1U << 13)
+#define CPUID_EXT2_MCA (1U << 14)
+#define CPUID_EXT2_CMOV (1U << 15)
+#define CPUID_EXT2_PAT (1U << 16)
+#define CPUID_EXT2_PSE36 (1U << 17)
+#define CPUID_EXT2_MP (1U << 19)
+#define CPUID_EXT2_NX (1U << 20)
+#define CPUID_EXT2_MMXEXT (1U << 22)
+#define CPUID_EXT2_MMX (1U << 23)
+#define CPUID_EXT2_FXSR (1U << 24)
+#define CPUID_EXT2_FFXSR (1U << 25)
+#define CPUID_EXT2_PDPE1GB (1U << 26)
+#define CPUID_EXT2_RDTSCP (1U << 27)
+#define CPUID_EXT2_LM (1U << 29)
+#define CPUID_EXT2_3DNOWEXT (1U << 30)
+#define CPUID_EXT2_3DNOW (1U << 31)
/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
@@ -509,53 +509,53 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
-#define CPUID_EXT3_LAHF_LM (1 << 0)
-#define CPUID_EXT3_CMP_LEG (1 << 1)
-#define CPUID_EXT3_SVM (1 << 2)
-#define CPUID_EXT3_EXTAPIC (1 << 3)
-#define CPUID_EXT3_CR8LEG (1 << 4)
-#define CPUID_EXT3_ABM (1 << 5)
-#define CPUID_EXT3_SSE4A (1 << 6)
-#define CPUID_EXT3_MISALIGNSSE (1 << 7)
-#define CPUID_EXT3_3DNOWPREFETCH (1 << 8)
-#define CPUID_EXT3_OSVW (1 << 9)
-#define CPUID_EXT3_IBS (1 << 10)
-#define CPUID_EXT3_XOP (1 << 11)
-#define CPUID_EXT3_SKINIT (1 << 12)
-#define CPUID_EXT3_WDT (1 << 13)
-#define CPUID_EXT3_LWP (1 << 15)
-#define CPUID_EXT3_FMA4 (1 << 16)
-#define CPUID_EXT3_TCE (1 << 17)
-#define CPUID_EXT3_NODEID (1 << 19)
-#define CPUID_EXT3_TBM (1 << 21)
-#define CPUID_EXT3_TOPOEXT (1 << 22)
-#define CPUID_EXT3_PERFCORE (1 << 23)
-#define CPUID_EXT3_PERFNB (1 << 24)
-
-#define CPUID_SVM_NPT (1 << 0)
-#define CPUID_SVM_LBRV (1 << 1)
-#define CPUID_SVM_SVMLOCK (1 << 2)
-#define CPUID_SVM_NRIPSAVE (1 << 3)
-#define CPUID_SVM_TSCSCALE (1 << 4)
-#define CPUID_SVM_VMCBCLEAN (1 << 5)
-#define CPUID_SVM_FLUSHASID (1 << 6)
-#define CPUID_SVM_DECODEASSIST (1 << 7)
-#define CPUID_SVM_PAUSEFILTER (1 << 10)
-#define CPUID_SVM_PFTHRESHOLD (1 << 12)
-
-#define CPUID_7_0_EBX_FSGSBASE (1 << 0)
-#define CPUID_7_0_EBX_BMI1 (1 << 3)
-#define CPUID_7_0_EBX_HLE (1 << 4)
-#define CPUID_7_0_EBX_AVX2 (1 << 5)
-#define CPUID_7_0_EBX_SMEP (1 << 7)
-#define CPUID_7_0_EBX_BMI2 (1 << 8)
-#define CPUID_7_0_EBX_ERMS (1 << 9)
-#define CPUID_7_0_EBX_INVPCID (1 << 10)
-#define CPUID_7_0_EBX_RTM (1 << 11)
-#define CPUID_7_0_EBX_MPX (1 << 14)
-#define CPUID_7_0_EBX_RDSEED (1 << 18)
-#define CPUID_7_0_EBX_ADX (1 << 19)
-#define CPUID_7_0_EBX_SMAP (1 << 20)
+#define CPUID_EXT3_LAHF_LM (1U << 0)
+#define CPUID_EXT3_CMP_LEG (1U << 1)
+#define CPUID_EXT3_SVM (1U << 2)
+#define CPUID_EXT3_EXTAPIC (1U << 3)
+#define CPUID_EXT3_CR8LEG (1U << 4)
+#define CPUID_EXT3_ABM (1U << 5)
+#define CPUID_EXT3_SSE4A (1U << 6)
+#define CPUID_EXT3_MISALIGNSSE (1U << 7)
+#define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
+#define CPUID_EXT3_OSVW (1U << 9)
+#define CPUID_EXT3_IBS (1U << 10)
+#define CPUID_EXT3_XOP (1U << 11)
+#define CPUID_EXT3_SKINIT (1U << 12)
+#define CPUID_EXT3_WDT (1U << 13)
+#define CPUID_EXT3_LWP (1U << 15)
+#define CPUID_EXT3_FMA4 (1U << 16)
+#define CPUID_EXT3_TCE (1U << 17)
+#define CPUID_EXT3_NODEID (1U << 19)
+#define CPUID_EXT3_TBM (1U << 21)
+#define CPUID_EXT3_TOPOEXT (1U << 22)
+#define CPUID_EXT3_PERFCORE (1U << 23)
+#define CPUID_EXT3_PERFNB (1U << 24)
+
+#define CPUID_SVM_NPT (1U << 0)
+#define CPUID_SVM_LBRV (1U << 1)
+#define CPUID_SVM_SVMLOCK (1U << 2)
+#define CPUID_SVM_NRIPSAVE (1U << 3)
+#define CPUID_SVM_TSCSCALE (1U << 4)
+#define CPUID_SVM_VMCBCLEAN (1U << 5)
+#define CPUID_SVM_FLUSHASID (1U << 6)
+#define CPUID_SVM_DECODEASSIST (1U << 7)
+#define CPUID_SVM_PAUSEFILTER (1U << 10)
+#define CPUID_SVM_PFTHRESHOLD (1U << 12)
+
+#define CPUID_7_0_EBX_FSGSBASE (1U << 0)
+#define CPUID_7_0_EBX_BMI1 (1U << 3)
+#define CPUID_7_0_EBX_HLE (1U << 4)
+#define CPUID_7_0_EBX_AVX2 (1U << 5)
+#define CPUID_7_0_EBX_SMEP (1U << 7)
+#define CPUID_7_0_EBX_BMI2 (1U << 8)
+#define CPUID_7_0_EBX_ERMS (1U << 9)
+#define CPUID_7_0_EBX_INVPCID (1U << 10)
+#define CPUID_7_0_EBX_RTM (1U << 11)
+#define CPUID_7_0_EBX_MPX (1U << 14)
+#define CPUID_7_0_EBX_RDSEED (1U << 18)
+#define CPUID_7_0_EBX_ADX (1U << 19)
+#define CPUID_7_0_EBX_SMAP (1U << 20)
#define CPUID_VENDOR_SZ 12
@@ -571,8 +571,8 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_VENDOR_VIA "CentaurHauls"
-#define CPUID_MWAIT_IBE (1 << 1) /* Interrupts can exit capability */
-#define CPUID_MWAIT_EMX (1 << 0) /* enumeration supported */
+#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
+#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
#ifndef HYPERV_SPINLOCK_NEVER_RETRY
#define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
@@ -875,8 +875,8 @@ typedef struct CPUX86State {
target_ulong exception_next_eip;
target_ulong dr[8]; /* debug registers */
union {
- CPUBreakpoint *cpu_breakpoint[4];
- CPUWatchpoint *cpu_watchpoint[4];
+ struct CPUBreakpoint *cpu_breakpoint[4];
+ struct CPUWatchpoint *cpu_watchpoint[4];
}; /* break/watchpoints for dr[0..3] */
uint32_t smbase;
int old_exception; /* exception in flight */
@@ -887,6 +887,7 @@ typedef struct CPUX86State {
CPU_COMMON
+ /* Fields from here on are preserved across CPU reset. */
uint64_t pat;
/* processor features (e.g. for CPUID insn) */
@@ -1067,9 +1068,8 @@ void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
/* helper.c */
-int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
+int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
int is_write, int mmu_idx);
-#define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault
void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
@@ -1186,20 +1186,6 @@ void optimize_flags_init(void);
#include "hw/i386/apic.h"
#endif
-static inline bool cpu_has_work(CPUState *cs)
-{
- X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
-
- return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_POLL)) &&
- (env->eflags & IF_MASK)) ||
- (cs->interrupt_request & (CPU_INTERRUPT_NMI |
- CPU_INTERRUPT_INIT |
- CPU_INTERRUPT_SIPI |
- CPU_INTERRUPT_MCE));
-}
-
#include "exec/exec-all.h"
static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
@@ -1276,11 +1262,11 @@ void do_smm_enter(X86CPU *cpu);
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
-void disable_kvm_pv_eoi(void);
-
void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
uint32_t feat_add, uint32_t feat_remove);
+void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features);
+
/* Return name of 32-bit register, from a R_* constant */
const char *get_register_name_32(unsigned int reg);
diff --git a/target-i386/excp_helper.c b/target-i386/excp_helper.c
index 5319aef7df..f337fd20fb 100644
--- a/target-i386/excp_helper.c
+++ b/target-i386/excp_helper.c
@@ -94,6 +94,8 @@ static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
int is_int, int error_code,
int next_eip_addend)
{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
if (!is_int) {
cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
error_code);
@@ -102,11 +104,11 @@ static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0);
}
- env->exception_index = intno;
+ cs->exception_index = intno;
env->error_code = error_code;
env->exception_is_int = is_int;
env->exception_next_eip = env->eip + next_eip_addend;
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
/* shortcuts to generate exceptions */
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 55c04577dc..372f0e3ecb 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -385,22 +385,25 @@ void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
a20_state = (a20_state != 0);
if (a20_state != ((env->a20_mask >> 20) & 1)) {
+ CPUState *cs = CPU(cpu);
+
#if defined(DEBUG_MMU)
printf("A20 update: a20=%d\n", a20_state);
#endif
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
- cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
+ cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
/* when a20 is changed, all the MMU mappings are invalid, so
we must flush everything */
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
env->a20_mask = ~(1 << 20) | (a20_state << 20);
}
}
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
int pe_state;
#if defined(DEBUG_MMU)
@@ -408,7 +411,7 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
#endif
if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
(env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
#ifdef TARGET_X86_64
@@ -444,24 +447,28 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
the PDPT */
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
+
env->cr[3] = new_cr3;
if (env->cr[0] & CR0_PG_MASK) {
#if defined(DEBUG_MMU)
printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
#endif
- tlb_flush(env, 0);
+ tlb_flush(CPU(cpu), 0);
}
}
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
+
#if defined(DEBUG_MMU)
printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
#endif
if ((new_cr4 ^ env->cr[4]) &
(CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
CR4_SMEP_MASK | CR4_SMAP_MASK)) {
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
/* SSE handling */
if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
@@ -485,15 +492,18 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
#if defined(CONFIG_USER_ONLY)
-int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
+int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
int is_write, int mmu_idx)
{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
/* user mode only emulation */
is_write &= 1;
env->cr[2] = addr;
env->error_code = (is_write << PG_ERROR_W_BIT);
env->error_code |= PG_ERROR_U_MASK;
- env->exception_index = EXCP0E_PAGE;
+ cs->exception_index = EXCP0E_PAGE;
return 1;
}
@@ -508,14 +518,15 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
# endif
/* return value:
- -1 = cannot handle fault
- 0 = nothing more to do
- 1 = generate PF fault
-*/
-int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
+ * -1 = cannot handle fault
+ * 0 = nothing more to do
+ * 1 = generate PF fault
+ */
+int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
int is_write1, int mmu_idx)
{
- CPUState *cs = ENV_GET_CPU(env);
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
uint64_t ptep, pte;
target_ulong pde_addr, pte_addr;
int error_code, is_dirty, prot, page_size, is_write, is_user;
@@ -525,7 +536,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
is_user = mmu_idx == MMU_USER_IDX;
#if defined(DEBUG_MMU)
- printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
+ printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
addr, is_write1, is_user, env->eip);
#endif
is_write = is_write1 & 1;
@@ -557,7 +568,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
sext = (int64_t)addr >> 47;
if (sext != 0 && sext != -1) {
env->error_code = 0;
- env->exception_index = EXCP0D_GPF;
+ cs->exception_index = EXCP0D_GPF;
return 1;
}
@@ -866,7 +877,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
paddr = (pte & TARGET_PAGE_MASK) + page_offset;
vaddr = virt_addr + page_offset;
- tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
+ tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
return 0;
do_fault_protect:
error_code = PG_ERROR_P_MASK;
@@ -888,7 +899,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
env->cr[2] = addr;
}
env->error_code = error_code;
- env->exception_index = EXCP0E_PAGE;
+ cs->exception_index = EXCP0E_PAGE;
return 1;
}
@@ -930,6 +941,14 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
pdpe = ldq_phys(cs->as, pdpe_addr);
if (!(pdpe & PG_PRESENT_MASK))
return -1;
+
+ if (pdpe & PG_PSE_MASK) {
+ page_size = 1024 * 1024 * 1024;
+ pte = pdpe & ~( (page_size - 1) & ~0xfff);
+ pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
+ goto out;
+ }
+
} else
#endif
{
@@ -982,6 +1001,9 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
pte = pte & env->a20_mask;
}
+#ifdef TARGET_X86_64
+out:
+#endif
page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
paddr = (pte & TARGET_PAGE_MASK) + page_offset;
return paddr;
@@ -989,12 +1011,13 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
void hw_breakpoint_insert(CPUX86State *env, int index)
{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
int type = 0, err = 0;
switch (hw_breakpoint_type(env->dr[7], index)) {
case DR7_TYPE_BP_INST:
if (hw_breakpoint_enabled(env->dr[7], index)) {
- err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
+ err = cpu_breakpoint_insert(cs, env->dr[index], BP_CPU,
&env->cpu_breakpoint[index]);
}
break;
@@ -1010,7 +1033,7 @@ void hw_breakpoint_insert(CPUX86State *env, int index)
}
if (type != 0) {
- err = cpu_watchpoint_insert(env, env->dr[index],
+ err = cpu_watchpoint_insert(cs, env->dr[index],
hw_breakpoint_len(env->dr[7], index),
type, &env->cpu_watchpoint[index]);
}
@@ -1022,17 +1045,21 @@ void hw_breakpoint_insert(CPUX86State *env, int index)
void hw_breakpoint_remove(CPUX86State *env, int index)
{
- if (!env->cpu_breakpoint[index])
+ CPUState *cs;
+
+ if (!env->cpu_breakpoint[index]) {
return;
+ }
+ cs = CPU(x86_env_get_cpu(env));
switch (hw_breakpoint_type(env->dr[7], index)) {
case DR7_TYPE_BP_INST:
if (hw_breakpoint_enabled(env->dr[7], index)) {
- cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
+ cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
}
break;
case DR7_TYPE_DATA_WR:
case DR7_TYPE_DATA_RW:
- cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
break;
case DR7_TYPE_IO_RW:
/* No support for I/O watchpoints yet */
@@ -1084,19 +1111,20 @@ bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
void breakpoint_handler(CPUX86State *env)
{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
CPUBreakpoint *bp;
- if (env->watchpoint_hit) {
- if (env->watchpoint_hit->flags & BP_CPU) {
- env->watchpoint_hit = NULL;
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ cs->watchpoint_hit = NULL;
if (check_hw_breakpoints(env, false)) {
raise_exception(env, EXCP01_DB);
} else {
- cpu_resume_from_signal(env, NULL);
+ cpu_resume_from_signal(cs, NULL);
}
}
} else {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry)
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == env->eip) {
if (bp->flags & BP_CPU) {
check_hw_breakpoints(env, true);
@@ -1104,6 +1132,7 @@ void breakpoint_handler(CPUX86State *env)
}
break;
}
+ }
}
}
@@ -1250,13 +1279,14 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
{
X86CPU *cpu = x86_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
if (kvm_enabled()) {
env->tpr_access_type = access;
- cpu_interrupt(CPU(cpu), CPU_INTERRUPT_TPR);
+ cpu_interrupt(cs, CPU_INTERRUPT_TPR);
} else {
- cpu_restore_state(env, env->mem_io_pc);
+ cpu_restore_state(cs, cs->mem_io_pc);
apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
}
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index e555040a97..4389959f61 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -122,7 +122,7 @@ static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
return cpuid;
}
-struct kvm_para_features {
+static const struct kvm_para_features {
int cap;
int feature;
} para_features[] = {
@@ -2277,13 +2277,13 @@ static int kvm_handle_debug(X86CPU *cpu,
break;
case 0x1:
ret = EXCP_DEBUG;
- env->watchpoint_hit = &hw_watchpoint;
+ cs->watchpoint_hit = &hw_watchpoint;
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
hw_watchpoint.flags = BP_MEM_WRITE;
break;
case 0x3:
ret = EXCP_DEBUG;
- env->watchpoint_hit = &hw_watchpoint;
+ cs->watchpoint_hit = &hw_watchpoint;
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
hw_watchpoint.flags = BP_MEM_ACCESS;
break;
@@ -2291,11 +2291,11 @@ static int kvm_handle_debug(X86CPU *cpu,
}
}
}
- } else if (kvm_find_sw_breakpoint(CPU(cpu), arch_info->pc)) {
+ } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
ret = EXCP_DEBUG;
}
if (ret == 0) {
- cpu_synchronize_state(CPU(cpu));
+ cpu_synchronize_state(cs);
assert(env->exception_injected == -1);
/* pass to guest */
diff --git a/target-i386/machine.c b/target-i386/machine.c
index d548c055a9..168cab681b 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -290,6 +290,7 @@ static void cpu_pre_save(void *opaque)
static int cpu_post_load(void *opaque, int version_id)
{
X86CPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
int i;
@@ -319,12 +320,12 @@ static int cpu_post_load(void *opaque, int version_id)
env->fptags[i] = (env->fptag_vmstate >> i) & 1;
}
- cpu_breakpoint_remove_all(env, BP_CPU);
- cpu_watchpoint_remove_all(env, BP_CPU);
+ cpu_breakpoint_remove_all(cs, BP_CPU);
+ cpu_watchpoint_remove_all(cs, BP_CPU);
for (i = 0; i < DR7_MAX_BP; i++) {
hw_breakpoint_insert(env, i);
}
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
return 0;
}
@@ -568,8 +569,8 @@ static const VMStateDescription vmstate_msr_hypercall_hypercall = {
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField []) {
- VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
VMSTATE_END_OF_LIST()
}
};
diff --git a/target-i386/mem_helper.c b/target-i386/mem_helper.c
index 319a219f8a..b3b811bc8c 100644
--- a/target-i386/mem_helper.c
+++ b/target-i386/mem_helper.c
@@ -129,21 +129,25 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
#if !defined(CONFIG_USER_ONLY)
/* try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
+ * NULL, it means that the function was called in C code (i.e. not
+ * from generated code or from helper.c)
+ */
/* XXX: fix it to restore all registers */
-void tlb_fill(CPUX86State *env, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
int ret;
- ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = x86_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (ret) {
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
if (retaddr) {
/* now we have a real cpu fault */
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- raise_exception_err(env, env->exception_index, env->error_code);
+ raise_exception_err(env, cs->exception_index, env->error_code);
}
}
#endif
diff --git a/target-i386/misc_helper.c b/target-i386/misc_helper.c
index 47f6a2f7c1..1e2da1ed68 100644
--- a/target-i386/misc_helper.c
+++ b/target-i386/misc_helper.c
@@ -221,8 +221,10 @@ void helper_lmsw(CPUX86State *env, target_ulong t0)
void helper_invlpg(CPUX86State *env, target_ulong addr)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
+
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0);
- tlb_flush_page(env, addr);
+ tlb_flush_page(CPU(cpu), addr);
}
void helper_rdtsc(CPUX86State *env)
@@ -568,11 +570,11 @@ void helper_rdmsr(CPUX86State *env)
static void do_pause(X86CPU *cpu)
{
- CPUX86State *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
/* Just let another CPU run. */
- env->exception_index = EXCP_INTERRUPT;
- cpu_loop_exit(env);
+ cs->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit(cs);
}
static void do_hlt(X86CPU *cpu)
@@ -582,8 +584,8 @@ static void do_hlt(X86CPU *cpu)
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
cs->halted = 1;
- env->exception_index = EXCP_HLT;
- cpu_loop_exit(env);
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
}
void helper_hlt(CPUX86State *env, int next_eip_addend)
@@ -638,6 +640,8 @@ void helper_pause(CPUX86State *env, int next_eip_addend)
void helper_debug(CPUX86State *env)
{
- env->exception_index = EXCP_DEBUG;
- cpu_loop_exit(env);
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
}
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 959212bfe3..8c3f92c22b 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -95,6 +95,7 @@ static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
uint32_t *esp_ptr, int dpl)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
int type, index, shift;
#if 0
@@ -112,11 +113,11 @@ static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
#endif
if (!(env->tr.flags & DESC_P_MASK)) {
- cpu_abort(env, "invalid tss");
+ cpu_abort(CPU(cpu), "invalid tss");
}
type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
if ((type & 7) != 1) {
- cpu_abort(env, "invalid tss type");
+ cpu_abort(CPU(cpu), "invalid tss type");
}
shift = type >> 3;
index = (dpl * 4 + 2) << shift;
@@ -782,6 +783,7 @@ static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
int index;
#if 0
@@ -790,7 +792,7 @@ static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
#endif
if (!(env->tr.flags & DESC_P_MASK)) {
- cpu_abort(env, "invalid tss");
+ cpu_abort(CPU(cpu), "invalid tss");
}
index = 8 * level + 4;
if ((index + 7) > env->tr.limit) {
@@ -935,9 +937,11 @@ static void do_interrupt64(CPUX86State *env, int intno, int is_int,
#if defined(CONFIG_USER_ONLY)
void helper_syscall(CPUX86State *env, int next_eip_addend)
{
- env->exception_index = EXCP_SYSCALL;
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ cs->exception_index = EXCP_SYSCALL;
env->exception_next_eip = env->eip + next_eip_addend;
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
#else
void helper_syscall(CPUX86State *env, int next_eip_addend)
@@ -1131,7 +1135,7 @@ static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
static void handle_even_inj(CPUX86State *env, int intno, int is_int,
int error_code, int is_hw, int rm)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(x86_env_get_cpu(env));
uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj));
@@ -1248,7 +1252,7 @@ void x86_cpu_do_interrupt(CPUState *cs)
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
- do_interrupt_user(env, env->exception_index,
+ do_interrupt_user(env, cs->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip);
@@ -1258,7 +1262,7 @@ void x86_cpu_do_interrupt(CPUState *cs)
/* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle
double or triple faults yet. */
- do_interrupt_all(cpu, env->exception_index,
+ do_interrupt_all(cpu, cs->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip, 0);
diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c
index 71c64b2479..35901c9e89 100644
--- a/target-i386/smm_helper.c
+++ b/target-i386/smm_helper.c
@@ -181,8 +181,8 @@ void do_smm_enter(X86CPU *cpu)
void helper_rsm(CPUX86State *env)
{
- CPUState *cs = ENV_GET_CPU(env);
X86CPU *cpu = x86_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
target_ulong sm_state;
int i, offset;
uint32_t val;
diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c
index b38d45002f..aa17ecdece 100644
--- a/target-i386/svm_helper.c
+++ b/target-i386/svm_helper.c
@@ -88,7 +88,8 @@ void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
const SegmentCache *sc)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
sc->selector);
stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
@@ -102,7 +103,7 @@ static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
SegmentCache *sc)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(x86_env_get_cpu(env));
unsigned int flags;
sc->selector = lduw_phys(cs->as,
@@ -125,7 +126,7 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(x86_env_get_cpu(env));
target_ulong addr;
uint32_t event_inj;
uint32_t int_ctl;
@@ -293,7 +294,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
break;
case TLB_CONTROL_FLUSH_ALL_ASID:
/* FIXME: this is not 100% correct but should work for now */
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
break;
}
@@ -319,7 +320,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
/* FIXME: need to implement valid_err */
switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
case SVM_EVTINJ_TYPE_INTR:
- env->exception_index = vector;
+ cs->exception_index = vector;
env->error_code = event_inj_err;
env->exception_is_int = 0;
env->exception_next_eip = -1;
@@ -328,31 +329,31 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
do_interrupt_x86_hardirq(env, vector, 1);
break;
case SVM_EVTINJ_TYPE_NMI:
- env->exception_index = EXCP02_NMI;
+ cs->exception_index = EXCP02_NMI;
env->error_code = event_inj_err;
env->exception_is_int = 0;
env->exception_next_eip = env->eip;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
break;
case SVM_EVTINJ_TYPE_EXEPT:
- env->exception_index = vector;
+ cs->exception_index = vector;
env->error_code = event_inj_err;
env->exception_is_int = 0;
env->exception_next_eip = -1;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
break;
case SVM_EVTINJ_TYPE_SOFT:
- env->exception_index = vector;
+ cs->exception_index = vector;
env->error_code = event_inj_err;
env->exception_is_int = 1;
env->exception_next_eip = env->eip;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
break;
}
- qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
env->error_code);
}
}
@@ -365,7 +366,7 @@ void helper_vmmcall(CPUX86State *env)
void helper_vmload(CPUX86State *env, int aflag)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(x86_env_get_cpu(env));
target_ulong addr;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
@@ -405,7 +406,7 @@ void helper_vmload(CPUX86State *env, int aflag)
void helper_vmsave(CPUX86State *env, int aflag)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(x86_env_get_cpu(env));
target_ulong addr;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
@@ -468,6 +469,7 @@ void helper_skinit(CPUX86State *env)
void helper_invlpga(CPUX86State *env, int aflag)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
target_ulong addr;
cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
@@ -480,13 +482,13 @@ void helper_invlpga(CPUX86State *env, int aflag)
/* XXX: could use the ASID to see if it is needed to do the
flush */
- tlb_flush_page(env, addr);
+ tlb_flush_page(CPU(cpu), addr);
}
void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
uint64_t param)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(x86_env_get_cpu(env));
if (likely(!(env->hflags & HF_SVMI_MASK))) {
return;
@@ -568,7 +570,8 @@ void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
uint32_t next_eip_addend)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
/* FIXME: this should be read in at vmrun (faster this way?) */
uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
@@ -766,11 +769,11 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
#GP fault is delivered inside the host. */
/* remove any pending exception */
- env->exception_index = -1;
+ cs->exception_index = -1;
env->error_code = 0;
env->old_exception = -1;
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 707ebd5ca0..02625e31c2 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -7965,8 +7965,8 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
gen_tb_start();
for(;;) {
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == pc_ptr &&
!((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
gen_debug(dc, pc_ptr - dc->cs_base);
diff --git a/target-lm32/cpu.c b/target-lm32/cpu.c
index 7e716fb336..c5c20d74c4 100644
--- a/target-lm32/cpu.c
+++ b/target-lm32/cpu.c
@@ -110,6 +110,11 @@ static void lm32_cpu_init_cfg_reg(LM32CPU *cpu)
env->cfg = cfg;
}
+static bool lm32_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
+
/* CPUClass::reset() */
static void lm32_cpu_reset(CPUState *s)
{
@@ -120,10 +125,10 @@ static void lm32_cpu_reset(CPUState *s)
lcc->parent_reset(s);
/* reset cpu state */
- memset(env, 0, offsetof(CPULM32State, breakpoints));
+ memset(env, 0, offsetof(CPULM32State, eba));
lm32_cpu_init_cfg_reg(cpu);
- tlb_flush(env, 1);
+ tlb_flush(s, 1);
}
static void lm32_cpu_realizefn(DeviceState *dev, Error **errp)
@@ -255,12 +260,15 @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
cc->reset = lm32_cpu_reset;
cc->class_by_name = lm32_cpu_class_by_name;
+ cc->has_work = lm32_cpu_has_work;
cc->do_interrupt = lm32_cpu_do_interrupt;
cc->dump_state = lm32_cpu_dump_state;
cc->set_pc = lm32_cpu_set_pc;
cc->gdb_read_register = lm32_cpu_gdb_read_register;
cc->gdb_write_register = lm32_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = lm32_cpu_handle_mmu_fault;
+#else
cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_lm32_cpu;
#endif
diff --git a/target-lm32/cpu.h b/target-lm32/cpu.h
index 18cf3488f7..24bde78502 100644
--- a/target-lm32/cpu.h
+++ b/target-lm32/cpu.h
@@ -166,11 +166,12 @@ struct CPULM32State {
uint32_t bp[4]; /* breakpoints */
uint32_t wp[4]; /* watchpoints */
- CPUBreakpoint * cpu_breakpoint[4];
- CPUWatchpoint * cpu_watchpoint[4];
+ struct CPUBreakpoint *cpu_breakpoint[4];
+ struct CPUWatchpoint *cpu_watchpoint[4];
CPU_COMMON
+ /* Fields from here on are preserved across CPU reset. */
uint32_t eba; /* exception base address */
uint32_t deba; /* debug exception base address */
@@ -231,9 +232,8 @@ static inline CPULM32State *cpu_init(const char *cpu_model)
#define cpu_gen_code cpu_lm32_gen_code
#define cpu_signal_handler cpu_lm32_signal_handler
-int cpu_lm32_handle_mmu_fault(CPULM32State *env, target_ulong address, int rw,
+int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx);
-#define cpu_handle_mmu_fault cpu_lm32_handle_mmu_fault
#include "exec/cpu-all.h"
@@ -245,11 +245,6 @@ static inline void cpu_get_tb_cpu_state(CPULM32State *env, target_ulong *pc,
*flags = 0;
}
-static inline bool cpu_has_work(CPUState *cpu)
-{
- return cpu->interrupt_request & CPU_INTERRUPT_HARD;
-}
-
#include "exec/exec-all.h"
#endif
diff --git a/target-lm32/helper.c b/target-lm32/helper.c
index eecb9f612e..783aa16a45 100644
--- a/target-lm32/helper.c
+++ b/target-lm32/helper.c
@@ -20,18 +20,20 @@
#include "cpu.h"
#include "qemu/host-utils.h"
-int cpu_lm32_handle_mmu_fault(CPULM32State *env, target_ulong address, int rw,
+int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{
+ LM32CPU *cpu = LM32_CPU(cs);
+ CPULM32State *env = &cpu->env;
int prot;
address &= TARGET_PAGE_MASK;
prot = PAGE_BITS;
if (env->flags & LM32_FLAG_IGNORE_MSB) {
- tlb_set_page(env, address, address & 0x7fffffff, prot, mmu_idx,
- TARGET_PAGE_SIZE);
+ tlb_set_page(cs, address, address & 0x7fffffff, prot, mmu_idx,
+ TARGET_PAGE_SIZE);
} else {
- tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
+ tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
}
return 0;
@@ -51,22 +53,28 @@ hwaddr lm32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
void lm32_breakpoint_insert(CPULM32State *env, int idx, target_ulong address)
{
- cpu_breakpoint_insert(env, address, BP_CPU, &env->cpu_breakpoint[idx]);
+ LM32CPU *cpu = lm32_env_get_cpu(env);
+
+ cpu_breakpoint_insert(CPU(cpu), address, BP_CPU,
+ &env->cpu_breakpoint[idx]);
}
void lm32_breakpoint_remove(CPULM32State *env, int idx)
{
+ LM32CPU *cpu = lm32_env_get_cpu(env);
+
if (!env->cpu_breakpoint[idx]) {
return;
}
- cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[idx]);
+ cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[idx]);
env->cpu_breakpoint[idx] = NULL;
}
void lm32_watchpoint_insert(CPULM32State *env, int idx, target_ulong address,
lm32_wp_t wp_type)
{
+ LM32CPU *cpu = lm32_env_get_cpu(env);
int flags = 0;
switch (wp_type) {
@@ -85,18 +93,20 @@ void lm32_watchpoint_insert(CPULM32State *env, int idx, target_ulong address,
}
if (flags != 0) {
- cpu_watchpoint_insert(env, address, 1, flags,
+ cpu_watchpoint_insert(CPU(cpu), address, 1, flags,
&env->cpu_watchpoint[idx]);
}
}
void lm32_watchpoint_remove(CPULM32State *env, int idx)
{
+ LM32CPU *cpu = lm32_env_get_cpu(env);
+
if (!env->cpu_watchpoint[idx]) {
return;
}
- cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[idx]);
+ cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[idx]);
env->cpu_watchpoint[idx] = NULL;
}
@@ -116,19 +126,20 @@ static bool check_watchpoints(CPULM32State *env)
void lm32_debug_excp_handler(CPULM32State *env)
{
+ CPUState *cs = CPU(lm32_env_get_cpu(env));
CPUBreakpoint *bp;
- if (env->watchpoint_hit) {
- if (env->watchpoint_hit->flags & BP_CPU) {
- env->watchpoint_hit = NULL;
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ cs->watchpoint_hit = NULL;
if (check_watchpoints(env)) {
raise_exception(env, EXCP_WATCHPOINT);
} else {
- cpu_resume_from_signal(env, NULL);
+ cpu_resume_from_signal(cs, NULL);
}
}
} else {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == env->pc) {
if (bp->flags & BP_CPU) {
raise_exception(env, EXCP_BREAKPOINT);
@@ -145,9 +156,9 @@ void lm32_cpu_do_interrupt(CPUState *cs)
CPULM32State *env = &cpu->env;
qemu_log_mask(CPU_LOG_INT,
- "exception at pc=%x type=%x\n", env->pc, env->exception_index);
+ "exception at pc=%x type=%x\n", env->pc, cs->exception_index);
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_INSN_BUS_ERROR:
case EXCP_DATA_BUS_ERROR:
case EXCP_DIVIDE_BY_ZERO:
@@ -158,9 +169,9 @@ void lm32_cpu_do_interrupt(CPUState *cs)
env->ie |= (env->ie & IE_IE) ? IE_EIE : 0;
env->ie &= ~IE_IE;
if (env->dc & DC_RE) {
- env->pc = env->deba + (env->exception_index * 32);
+ env->pc = env->deba + (cs->exception_index * 32);
} else {
- env->pc = env->eba + (env->exception_index * 32);
+ env->pc = env->eba + (cs->exception_index * 32);
}
log_cpu_state_mask(CPU_LOG_INT, cs, 0);
break;
@@ -170,30 +181,19 @@ void lm32_cpu_do_interrupt(CPUState *cs)
env->regs[R_BA] = env->pc;
env->ie |= (env->ie & IE_IE) ? IE_BIE : 0;
env->ie &= ~IE_IE;
- env->pc = env->deba + (env->exception_index * 32);
+ env->pc = env->deba + (cs->exception_index * 32);
log_cpu_state_mask(CPU_LOG_INT, cs, 0);
break;
default:
- cpu_abort(env, "unhandled exception type=%d\n",
- env->exception_index);
+ cpu_abort(cs, "unhandled exception type=%d\n",
+ cs->exception_index);
break;
}
}
LM32CPU *cpu_lm32_init(const char *cpu_model)
{
- LM32CPU *cpu;
- ObjectClass *oc;
-
- oc = cpu_class_by_name(TYPE_LM32_CPU, cpu_model);
- if (oc == NULL) {
- return NULL;
- }
- cpu = LM32_CPU(object_new(object_class_get_name(oc)));
-
- object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
-
- return cpu;
+ return LM32_CPU(cpu_generic_init(TYPE_LM32_CPU, cpu_model));
}
/* Some soc ignores the MSB on the address bus. Thus creating a shadow memory
diff --git a/target-lm32/op_helper.c b/target-lm32/op_helper.c
index 7189cb5cc4..2f36b7b053 100644
--- a/target-lm32/op_helper.c
+++ b/target-lm32/op_helper.c
@@ -25,8 +25,10 @@
void raise_exception(CPULM32State *env, int index)
{
- env->exception_index = index;
- cpu_loop_exit(env);
+ CPUState *cs = CPU(lm32_env_get_cpu(env));
+
+ cs->exception_index = index;
+ cpu_loop_exit(cs);
}
void HELPER(raise_exception)(CPULM32State *env, uint32_t index)
@@ -39,8 +41,8 @@ void HELPER(hlt)(CPULM32State *env)
CPUState *cs = CPU(lm32_env_get_cpu(env));
cs->halted = 1;
- env->exception_index = EXCP_HLT;
- cpu_loop_exit(env);
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
}
void HELPER(ill)(CPULM32State *env)
@@ -148,20 +150,21 @@ uint32_t HELPER(rcsr_jrx)(CPULM32State *env)
}
/* Try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-void tlb_fill(CPULM32State *env, target_ulong addr, int is_write, int mmu_idx,
+ * NULL, it means that the function was called in C code (i.e. not
+ * from generated code or from helper.c)
+ */
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
int ret;
- ret = cpu_lm32_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = lm32_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
/* now we have a real cpu fault */
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
}
#endif
diff --git a/target-lm32/translate.c b/target-lm32/translate.c
index 80bffc7b27..c8abd1f27e 100644
--- a/target-lm32/translate.c
+++ b/target-lm32/translate.c
@@ -1037,10 +1037,11 @@ static inline void decode(DisasContext *dc, uint32_t ir)
static void check_breakpoint(CPULM32State *env, DisasContext *dc)
{
+ CPUState *cs = CPU(lm32_env_get_cpu(env));
CPUBreakpoint *bp;
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
tcg_gen_movi_tl(cpu_pc, dc->pc);
t_gen_raise_exception(dc, EXCP_DEBUG);
diff --git a/target-m68k/cpu.c b/target-m68k/cpu.c
index 008d8db2da..c9cff19efc 100644
--- a/target-m68k/cpu.c
+++ b/target-m68k/cpu.c
@@ -30,6 +30,11 @@ static void m68k_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.pc = value;
}
+static bool m68k_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
+
static void m68k_set_feature(CPUM68KState *env, int feature)
{
env->features |= (1u << feature);
@@ -44,7 +49,7 @@ static void m68k_cpu_reset(CPUState *s)
mcc->parent_reset(s);
- memset(env, 0, offsetof(CPUM68KState, breakpoints));
+ memset(env, 0, offsetof(CPUM68KState, features));
#if !defined(CONFIG_USER_ONLY)
env->sr = 0x2700;
#endif
@@ -53,7 +58,7 @@ static void m68k_cpu_reset(CPUState *s)
env->cc_op = CC_OP_FLAGS;
/* TODO: We should set PC from the interrupt vector. */
env->pc = 0;
- tlb_flush(env, 1);
+ tlb_flush(s, 1);
}
/* CPU models */
@@ -189,12 +194,15 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
cc->reset = m68k_cpu_reset;
cc->class_by_name = m68k_cpu_class_by_name;
+ cc->has_work = m68k_cpu_has_work;
cc->do_interrupt = m68k_cpu_do_interrupt;
cc->dump_state = m68k_cpu_dump_state;
cc->set_pc = m68k_cpu_set_pc;
cc->gdb_read_register = m68k_cpu_gdb_read_register;
cc->gdb_write_register = m68k_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = m68k_cpu_handle_mmu_fault;
+#else
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
#endif
dc->vmsd = &vmstate_m68k_cpu;
diff --git a/target-m68k/cpu.h b/target-m68k/cpu.h
index cfd6846347..6e4001d523 100644
--- a/target-m68k/cpu.h
+++ b/target-m68k/cpu.h
@@ -110,6 +110,7 @@ typedef struct CPUM68KState {
CPU_COMMON
+ /* Fields from here on are preserved across CPU reset. */
uint32_t features;
} CPUM68KState;
@@ -237,9 +238,8 @@ static inline int cpu_mmu_index (CPUM68KState *env)
return (env->sr & SR_S) == 0 ? 1 : 0;
}
-int cpu_m68k_handle_mmu_fault(CPUM68KState *env, target_ulong address, int rw,
+int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx);
-#define cpu_handle_mmu_fault cpu_m68k_handle_mmu_fault
#include "exec/cpu-all.h"
@@ -253,11 +253,6 @@ static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc,
| ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
}
-static inline bool cpu_has_work(CPUState *cpu)
-{
- return cpu->interrupt_request & CPU_INTERRUPT_HARD;
-}
-
#include "exec/exec-all.h"
#endif
diff --git a/target-m68k/helper.c b/target-m68k/helper.c
index a364eb1e5c..077b653f24 100644
--- a/target-m68k/helper.c
+++ b/target-m68k/helper.c
@@ -132,6 +132,7 @@ void m68k_cpu_init_gdb(M68kCPU *cpu)
void cpu_m68k_flush_flags(CPUM68KState *env, int cc_op)
{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
int flags;
uint32_t src;
uint32_t dest;
@@ -204,7 +205,7 @@ void cpu_m68k_flush_flags(CPUM68KState *env, int cc_op)
flags |= CCF_C;
break;
default:
- cpu_abort(env, "Bad CC_OP %d", cc_op);
+ cpu_abort(CPU(cpu), "Bad CC_OP %d", cc_op);
}
env->cc_op = CC_OP_FLAGS;
env->cc_dest = flags;
@@ -212,6 +213,8 @@ void cpu_m68k_flush_flags(CPUM68KState *env, int cc_op)
void HELPER(movec)(CPUM68KState *env, uint32_t reg, uint32_t val)
{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
switch (reg) {
case 0x02: /* CACR */
env->cacr = val;
@@ -225,7 +228,7 @@ void HELPER(movec)(CPUM68KState *env, uint32_t reg, uint32_t val)
break;
/* TODO: Implement control registers. */
default:
- cpu_abort(env, "Unimplemented control register write 0x%x = 0x%x\n",
+ cpu_abort(CPU(cpu), "Unimplemented control register write 0x%x = 0x%x\n",
reg, val);
}
}
@@ -277,11 +280,13 @@ void m68k_switch_sp(CPUM68KState *env)
#if defined(CONFIG_USER_ONLY)
-int cpu_m68k_handle_mmu_fault (CPUM68KState *env, target_ulong address, int rw,
- int mmu_idx)
+int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
{
- env->exception_index = EXCP_ACCESS;
- env->mmu.ar = address;
+ M68kCPU *cpu = M68K_CPU(cs);
+
+ cs->exception_index = EXCP_ACCESS;
+ cpu->env.mmu.ar = address;
return 1;
}
@@ -295,14 +300,14 @@ hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
return addr;
}
-int cpu_m68k_handle_mmu_fault (CPUM68KState *env, target_ulong address, int rw,
- int mmu_idx)
+int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
{
int prot;
address &= TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
+ tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
}
diff --git a/target-m68k/m68k-semi.c b/target-m68k/m68k-semi.c
index 94c4983813..9dffe8de60 100644
--- a/target-m68k/m68k-semi.c
+++ b/target-m68k/m68k-semi.c
@@ -428,7 +428,8 @@ void do_m68k_semihosting(CPUM68KState *env, int nr)
case HOSTED_INIT_SIM:
#if defined(CONFIG_USER_ONLY)
{
- TaskState *ts = env->opaque;
+ CPUState *cs = CPU(m68k_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
/* Allocate the heap using sbrk. */
if (!ts->heap_limit) {
abi_ulong ret;
@@ -460,7 +461,7 @@ void do_m68k_semihosting(CPUM68KState *env, int nr)
#endif
return;
default:
- cpu_abort(env, "Unsupported semihosting syscall %d\n", nr);
+ cpu_abort(CPU(m68k_env_get_cpu(env)), "Unsupported semihosting syscall %d\n", nr);
result = 0;
}
failed:
diff --git a/target-m68k/op_helper.c b/target-m68k/op_helper.c
index bbbfd7f130..06302b1071 100644
--- a/target-m68k/op_helper.c
+++ b/target-m68k/op_helper.c
@@ -23,10 +23,7 @@
void m68k_cpu_do_interrupt(CPUState *cs)
{
- M68kCPU *cpu = M68K_CPU(cs);
- CPUM68KState *env = &cpu->env;
-
- env->exception_index = -1;
+ cs->exception_index = -1;
}
void do_interrupt_m68k_hardirq(CPUM68KState *env)
@@ -56,18 +53,18 @@ extern int semihosting_enabled;
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
-void tlb_fill(CPUM68KState *env, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
int ret;
- ret = cpu_m68k_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = m68k_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
/* now we have a real cpu fault */
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
}
@@ -87,7 +84,7 @@ static void do_rte(CPUM68KState *env)
static void do_interrupt_all(CPUM68KState *env, int is_hw)
{
- CPUState *cs;
+ CPUState *cs = CPU(m68k_env_get_cpu(env));
uint32_t sp;
uint32_t fmt;
uint32_t retaddr;
@@ -97,7 +94,7 @@ static void do_interrupt_all(CPUM68KState *env, int is_hw)
retaddr = env->pc;
if (!is_hw) {
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_RTE:
/* Return from an exception. */
do_rte(env);
@@ -112,20 +109,19 @@ static void do_interrupt_all(CPUM68KState *env, int is_hw)
do_m68k_semihosting(env, env->dregs[0]);
return;
}
- cs = CPU(m68k_env_get_cpu(env));
cs->halted = 1;
- env->exception_index = EXCP_HLT;
- cpu_loop_exit(env);
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
return;
}
- if (env->exception_index >= EXCP_TRAP0
- && env->exception_index <= EXCP_TRAP15) {
+ if (cs->exception_index >= EXCP_TRAP0
+ && cs->exception_index <= EXCP_TRAP15) {
/* Move the PC after the trap instruction. */
retaddr += 2;
}
}
- vector = env->exception_index << 2;
+ vector = cs->exception_index << 2;
sp = env->aregs[7];
@@ -168,8 +164,10 @@ void do_interrupt_m68k_hardirq(CPUM68KState *env)
static void raise_exception(CPUM68KState *env, int tt)
{
- env->exception_index = tt;
- cpu_loop_exit(env);
+ CPUState *cs = CPU(m68k_env_get_cpu(env));
+
+ cs->exception_index = tt;
+ cpu_loop_exit(cs);
}
void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt)
diff --git a/target-m68k/qregs.def b/target-m68k/qregs.def
index 4235b02764..204663e1aa 100644
--- a/target-m68k/qregs.def
+++ b/target-m68k/qregs.def
@@ -7,6 +7,5 @@ DEFO32(CC_SRC, cc_src)
DEFO32(CC_X, cc_x)
DEFO32(DIV1, div1)
DEFO32(DIV2, div2)
-DEFO32(EXCEPTION, exception_index)
DEFO32(MACSR, macsr)
DEFO32(MAC_MASK, mac_mask)
diff --git a/target-m68k/translate.c b/target-m68k/translate.c
index f54b94a53f..cd662891c8 100644
--- a/target-m68k/translate.c
+++ b/target-m68k/translate.c
@@ -43,6 +43,7 @@
#undef DEFF64
static TCGv_i32 cpu_halted;
+static TCGv_i32 cpu_exception_index;
static TCGv_ptr cpu_env;
@@ -81,6 +82,10 @@ void m68k_tcg_init(void)
cpu_halted = tcg_global_mem_new_i32(TCG_AREG0,
-offsetof(M68kCPU, env) +
offsetof(CPUState, halted), "HALTED");
+ cpu_exception_index = tcg_global_mem_new_i32(TCG_AREG0,
+ -offsetof(M68kCPU, env) +
+ offsetof(CPUState, exception_index),
+ "EXCEPTION");
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
@@ -110,14 +115,6 @@ void m68k_tcg_init(void)
store_dummy = tcg_global_mem_new(TCG_AREG0, -8, "NULL");
}
-static inline void qemu_assert(int cond, const char *msg)
-{
- if (!cond) {
- fprintf (stderr, "badness: %s\n", msg);
- abort();
- }
-}
-
/* internal defines */
typedef struct DisasContext {
CPUM68KState *env;
@@ -199,7 +196,7 @@ static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
tcg_gen_qemu_ld32u(tmp, addr, index);
break;
default:
- qemu_assert(0, "bad load size");
+ g_assert_not_reached();
}
gen_throws_exception = gen_last_qop;
return tmp;
@@ -233,7 +230,7 @@ static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
tcg_gen_qemu_st32(val, addr, index);
break;
default:
- qemu_assert(0, "bad store size");
+ g_assert_not_reached();
}
gen_throws_exception = gen_last_qop;
}
@@ -437,8 +434,7 @@ static inline int opsize_bytes(int opsize)
case OS_SINGLE: return 4;
case OS_DOUBLE: return 8;
default:
- qemu_assert(0, "bad operand size");
- return 0;
+ g_assert_not_reached();
}
}
@@ -465,8 +461,7 @@ static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
tcg_gen_mov_i32(reg, val);
break;
default:
- qemu_assert(0, "Bad operand size");
- break;
+ g_assert_not_reached();
}
}
@@ -495,7 +490,7 @@ static inline TCGv gen_extend(TCGv val, int opsize, int sign)
tmp = val;
break;
default:
- qemu_assert(0, "Bad operand size");
+ g_assert_not_reached();
}
return tmp;
}
@@ -669,7 +664,7 @@ static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
offset = read_im32(env, s);
break;
default:
- qemu_assert(0, "Bad immediate operand");
+ g_assert_not_reached();
}
return tcg_const_i32(offset);
default:
@@ -886,8 +881,10 @@ DISAS_INSN(undef_fpu)
DISAS_INSN(undef)
{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
- cpu_abort(env, "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
+ cpu_abort(CPU(cpu), "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
}
DISAS_INSN(mulw)
@@ -2087,12 +2084,14 @@ DISAS_INSN(wddata)
DISAS_INSN(wdebug)
{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
if (IS_USER(s)) {
gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
return;
}
/* TODO: Implement wdebug. */
- qemu_assert(0, "WDEBUG not implemented");
+ cpu_abort(CPU(cpu), "WDEBUG not implemented");
}
DISAS_INSN(trap)
@@ -2466,14 +2465,18 @@ DISAS_INSN(fbcc)
DISAS_INSN(frestore)
{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
/* TODO: Implement frestore. */
- qemu_assert(0, "FRESTORE not implemented");
+ cpu_abort(CPU(cpu), "FRESTORE not implemented");
}
DISAS_INSN(fsave)
{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
/* TODO: Implement fsave. */
- qemu_assert(0, "FSAVE not implemented");
+ cpu_abort(CPU(cpu), "FSAVE not implemented");
}
static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
@@ -3008,8 +3011,8 @@ gen_intermediate_code_internal(M68kCPU *cpu, TranslationBlock *tb,
do {
pc_offset = dc->pc - pc_start;
gen_throws_exception = NULL;
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_exception(dc, dc->pc, EXCP_DEBUG);
dc->is_jmp = DISAS_JUMP;
diff --git a/target-microblaze/cpu.c b/target-microblaze/cpu.c
index f108c0b521..8e0481186a 100644
--- a/target-microblaze/cpu.c
+++ b/target-microblaze/cpu.c
@@ -34,6 +34,11 @@ static void mb_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.sregs[SR_PC] = value;
}
+static bool mb_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
+}
+
#ifndef CONFIG_USER_ONLY
static void microblaze_cpu_set_irq(void *opaque, int irq, int level)
{
@@ -58,9 +63,9 @@ static void mb_cpu_reset(CPUState *s)
mcc->parent_reset(s);
- memset(env, 0, offsetof(CPUMBState, breakpoints));
+ memset(env, 0, sizeof(CPUMBState));
env->res_addr = RES_ADDR_NONE;
- tlb_flush(env, 1);
+ tlb_flush(s, 1);
/* Disable stack protector. */
env->shr = ~0;
@@ -160,12 +165,15 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
mcc->parent_reset = cc->reset;
cc->reset = mb_cpu_reset;
+ cc->has_work = mb_cpu_has_work;
cc->do_interrupt = mb_cpu_do_interrupt;
cc->dump_state = mb_cpu_dump_state;
cc->set_pc = mb_cpu_set_pc;
cc->gdb_read_register = mb_cpu_gdb_read_register;
cc->gdb_write_register = mb_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = mb_cpu_handle_mmu_fault;
+#else
cc->do_unassigned_access = mb_cpu_unassigned_access;
cc->get_phys_page_debug = mb_cpu_get_phys_page_debug;
#endif
diff --git a/target-microblaze/cpu.h b/target-microblaze/cpu.h
index 1df014e92e..6ccd06068c 100644
--- a/target-microblaze/cpu.h
+++ b/target-microblaze/cpu.h
@@ -332,9 +332,8 @@ static inline int cpu_mmu_index (CPUMBState *env)
return MMU_KERNEL_IDX;
}
-int cpu_mb_handle_mmu_fault(CPUMBState *env, target_ulong address, int rw,
+int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx);
-#define cpu_handle_mmu_fault cpu_mb_handle_mmu_fault
static inline int cpu_interrupts_enabled(CPUMBState *env)
{
@@ -363,11 +362,6 @@ void mb_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
unsigned size);
#endif
-static inline bool cpu_has_work(CPUState *cpu)
-{
- return cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
-}
-
#include "exec/exec-all.h"
#endif
diff --git a/target-microblaze/helper.c b/target-microblaze/helper.c
index 4fa9ce9cb5..59c9ad5aef 100644
--- a/target-microblaze/helper.c
+++ b/target-microblaze/helper.c
@@ -31,26 +31,26 @@ void mb_cpu_do_interrupt(CPUState *cs)
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUMBState *env = &cpu->env;
- env->exception_index = -1;
+ cs->exception_index = -1;
env->res_addr = RES_ADDR_NONE;
env->regs[14] = env->sregs[SR_PC];
}
-int cpu_mb_handle_mmu_fault(CPUMBState * env, target_ulong address, int rw,
+int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{
- MicroBlazeCPU *cpu = mb_env_get_cpu(env);
-
- env->exception_index = 0xaa;
- cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
+ cs->exception_index = 0xaa;
+ cpu_dump_state(cs, stderr, fprintf, 0);
return 1;
}
#else /* !CONFIG_USER_ONLY */
-int cpu_mb_handle_mmu_fault (CPUMBState *env, target_ulong address, int rw,
- int mmu_idx)
+int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
unsigned int hit;
unsigned int mmu_available;
int r = 1;
@@ -77,7 +77,7 @@ int cpu_mb_handle_mmu_fault (CPUMBState *env, target_ulong address, int rw,
DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n",
mmu_idx, vaddr, paddr, lu.prot));
- tlb_set_page(env, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
+ tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0;
} else {
env->sregs[SR_EAR] = address;
@@ -97,18 +97,18 @@ int cpu_mb_handle_mmu_fault (CPUMBState *env, target_ulong address, int rw,
break;
}
- if (env->exception_index == EXCP_MMU) {
- cpu_abort(env, "recursive faults\n");
+ if (cs->exception_index == EXCP_MMU) {
+ cpu_abort(cs, "recursive faults\n");
}
/* TLB miss. */
- env->exception_index = EXCP_MMU;
+ cs->exception_index = EXCP_MMU;
}
} else {
/* MMU disabled or not available. */
address &= TARGET_PAGE_MASK;
prot = PAGE_BITS;
- tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
+ tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0;
}
return r;
@@ -125,7 +125,7 @@ void mb_cpu_do_interrupt(CPUState *cs)
assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
/* assert(env->sregs[SR_MSR] & (MSR_EE)); Only for HW exceptions. */
env->res_addr = RES_ADDR_NONE;
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_HW_EXCP:
if (!(env->pvr.regs[0] & PVR0_USE_EXC_MASK)) {
qemu_log("Exception raised on system without exceptions!\n");
@@ -251,7 +251,7 @@ void mb_cpu_do_interrupt(CPUState *cs)
env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
env->sregs[SR_MSR] |= t;
env->sregs[SR_MSR] |= MSR_BIP;
- if (env->exception_index == EXCP_HW_BREAK) {
+ if (cs->exception_index == EXCP_HW_BREAK) {
env->regs[16] = env->sregs[SR_PC];
env->sregs[SR_MSR] |= MSR_BIP;
env->sregs[SR_PC] = cpu->base_vectors + 0x18;
@@ -259,8 +259,8 @@ void mb_cpu_do_interrupt(CPUState *cs)
env->sregs[SR_PC] = env->btarget;
break;
default:
- cpu_abort(env, "unhandled exception type=%d\n",
- env->exception_index);
+ cpu_abort(cs, "unhandled exception type=%d\n",
+ cs->exception_index);
break;
}
}
diff --git a/target-microblaze/mmu.c b/target-microblaze/mmu.c
index 73bf8059be..728da133da 100644
--- a/target-microblaze/mmu.c
+++ b/target-microblaze/mmu.c
@@ -34,6 +34,7 @@ static unsigned int tlb_decode_size(unsigned int f)
static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
{
+ CPUState *cs = CPU(mb_env_get_cpu(env));
struct microblaze_mmu *mmu = &env->mmu;
unsigned int tlb_size;
uint32_t tlb_tag, end, t;
@@ -47,7 +48,7 @@ static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
end = tlb_tag + tlb_size;
while (tlb_tag < end) {
- tlb_flush_page(env, tlb_tag);
+ tlb_flush_page(cs, tlb_tag);
tlb_tag += TARGET_PAGE_SIZE;
}
}
@@ -218,6 +219,7 @@ uint32_t mmu_read(CPUMBState *env, uint32_t rn)
void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
{
+ MicroBlazeCPU *cpu = mb_env_get_cpu(env);
unsigned int i;
D(qemu_log("%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]));
@@ -251,7 +253,7 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
/* Changes to the zone protection reg flush the QEMU TLB.
Fortunately, these are very uncommon. */
if (v != env->mmu.regs[rn]) {
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
env->mmu.regs[rn] = v;
break;
diff --git a/target-microblaze/op_helper.c b/target-microblaze/op_helper.c
index 14baa84c74..f8fb7f9169 100644
--- a/target-microblaze/op_helper.c
+++ b/target-microblaze/op_helper.c
@@ -39,20 +39,21 @@
#include "exec/softmmu_template.h"
/* Try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-void tlb_fill(CPUMBState *env, target_ulong addr, int is_write, int mmu_idx,
+ * NULL, it means that the function was called in C code (i.e. not
+ * from generated code or from helper.c)
+ */
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
int ret;
- ret = cpu_mb_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = mb_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
/* now we have a real cpu fault */
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
}
#endif
@@ -94,8 +95,10 @@ uint32_t helper_get(uint32_t id, uint32_t ctrl)
void helper_raise_exception(CPUMBState *env, uint32_t index)
{
- env->exception_index = index;
- cpu_loop_exit(env);
+ CPUState *cs = CPU(mb_env_get_cpu(env));
+
+ cs->exception_index = index;
+ cpu_loop_exit(cs);
}
void helper_debug(CPUMBState *env)
diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c
index 270138c6d2..782a489016 100644
--- a/target-microblaze/translate.c
+++ b/target-microblaze/translate.c
@@ -56,7 +56,7 @@ static TCGv env_res_val;
/* This is the state at translation time. */
typedef struct DisasContext {
- CPUMBState *env;
+ MicroBlazeCPU *cpu;
target_ulong pc;
/* Decoder. */
@@ -327,8 +327,8 @@ static void dec_pattern(DisasContext *dc)
int l1;
if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
@@ -370,7 +370,7 @@ static void dec_pattern(DisasContext *dc)
}
break;
default:
- cpu_abort(dc->env,
+ cpu_abort(CPU(dc->cpu),
"unsupported pattern insn opcode=%x\n", dc->opcode);
break;
}
@@ -441,9 +441,10 @@ static inline void msr_write(DisasContext *dc, TCGv v)
static void dec_msr(DisasContext *dc)
{
+ CPUState *cs = CPU(dc->cpu);
TCGv t0, t1;
unsigned int sr, to, rn;
- int mem_index = cpu_mmu_index(dc->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env);
sr = dc->imm & ((1 << 14) - 1);
to = dc->imm & (1 << 14);
@@ -458,7 +459,7 @@ static void dec_msr(DisasContext *dc)
LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
dc->rd, dc->imm);
- if (!(dc->env->pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
+ if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
/* nop??? */
return;
}
@@ -537,7 +538,7 @@ static void dec_msr(DisasContext *dc)
tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
break;
default:
- cpu_abort(dc->env, "unknown mts reg %x\n", sr);
+ cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
break;
}
} else {
@@ -586,7 +587,7 @@ static void dec_msr(DisasContext *dc)
cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
break;
default:
- cpu_abort(dc->env, "unknown mfs reg %x\n", sr);
+ cpu_abort(cs, "unknown mfs reg %x\n", sr);
break;
}
}
@@ -643,8 +644,8 @@ static void dec_mul(DisasContext *dc)
unsigned int subcode;
if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !(dc->env->pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
@@ -662,7 +663,7 @@ static void dec_mul(DisasContext *dc)
/* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
if (subcode >= 1 && subcode <= 3
- && !((dc->env->pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
+ && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
/* nop??? */
}
@@ -684,7 +685,7 @@ static void dec_mul(DisasContext *dc)
t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
break;
default:
- cpu_abort(dc->env, "unknown MUL insn %x\n", subcode);
+ cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
break;
}
done:
@@ -700,8 +701,8 @@ static void dec_div(DisasContext *dc)
u = dc->imm & 2;
LOG_DIS("div\n");
- if ((dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) {
+ if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
@@ -722,8 +723,8 @@ static void dec_barrel(DisasContext *dc)
unsigned int s, t;
if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !(dc->env->pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
@@ -752,9 +753,10 @@ static void dec_barrel(DisasContext *dc)
static void dec_bit(DisasContext *dc)
{
+ CPUState *cs = CPU(dc->cpu);
TCGv t0;
unsigned int op;
- int mem_index = cpu_mmu_index(dc->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env);
op = dc->ir & ((1 << 9) - 1);
switch (op) {
@@ -819,12 +821,12 @@ static void dec_bit(DisasContext *dc)
break;
case 0xe0:
if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !((dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
- if (dc->env->pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
+ if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
}
break;
@@ -839,8 +841,8 @@ static void dec_bit(DisasContext *dc)
tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
break;
default:
- cpu_abort(dc->env, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
- dc->pc, op, dc->rd, dc->ra, dc->rb);
+ cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
+ dc->pc, op, dc->rd, dc->ra, dc->rb);
break;
}
}
@@ -933,7 +935,7 @@ static void dec_load(DisasContext *dc)
}
if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
- && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
@@ -991,7 +993,7 @@ static void dec_load(DisasContext *dc)
}
break;
default:
- cpu_abort(dc->env, "Invalid reverse size\n");
+ cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
break;
}
}
@@ -1018,9 +1020,9 @@ static void dec_load(DisasContext *dc)
* address and if that succeeds we write into the destination reg.
*/
v = tcg_temp_new();
- tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(dc->env), mop);
+ tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
- if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
+ if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
tcg_const_tl(0), tcg_const_tl(size - 1));
@@ -1063,7 +1065,7 @@ static void dec_store(DisasContext *dc)
}
if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
- && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
@@ -1096,7 +1098,8 @@ static void dec_store(DisasContext *dc)
this compare and the following write to be atomic. For user
emulation we need to add atomicity between threads. */
tval = tcg_temp_new();
- tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(dc->env), MO_TEUL);
+ tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
+ MO_TEUL);
tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
write_carryi(dc, 0);
tcg_temp_free(tval);
@@ -1142,14 +1145,14 @@ static void dec_store(DisasContext *dc)
}
break;
default:
- cpu_abort(dc->env, "Invalid reverse size\n");
+ cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
break;
}
}
- tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(dc->env), mop);
+ tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
/* Verify alignment if needed. */
- if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
+ if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
/* FIXME: if the alignment is wrong, we should restore the value
* in memory. One possible way to achieve this is to probe
@@ -1193,7 +1196,7 @@ static inline void eval_cc(DisasContext *dc, unsigned int cc,
tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
break;
default:
- cpu_abort(dc->env, "Unknown condition code %x.\n", cc);
+ cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
break;
}
}
@@ -1244,7 +1247,7 @@ static void dec_bcc(DisasContext *dc)
static void dec_br(DisasContext *dc)
{
unsigned int dslot, link, abs, mbar;
- int mem_index = cpu_mmu_index(dc->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env);
dslot = dc->ir & (1 << 20);
abs = dc->ir & (1 << 19);
@@ -1376,7 +1379,7 @@ static inline void do_rte(DisasContext *dc)
static void dec_rts(DisasContext *dc)
{
unsigned int b_bit, i_bit, e_bit;
- int mem_index = cpu_mmu_index(dc->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env);
i_bit = dc->ir & (1 << 21);
b_bit = dc->ir & (1 << 22);
@@ -1423,7 +1426,7 @@ static int dec_check_fpuv2(DisasContext *dc)
{
int r;
- r = dc->env->pvr.regs[2] & PVR2_USE_FPU2_MASK;
+ r = dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU2_MASK;
if (!r && (dc->tb_flags & MSR_EE_FLAG)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
@@ -1437,8 +1440,8 @@ static void dec_fpu(DisasContext *dc)
unsigned int fpu_insn;
if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !((dc->env->pvr.regs[2] & PVR2_USE_FPU_MASK))) {
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_FPU_MASK))) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
@@ -1540,7 +1543,7 @@ static void dec_fpu(DisasContext *dc)
static void dec_null(DisasContext *dc)
{
if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
@@ -1552,7 +1555,7 @@ static void dec_null(DisasContext *dc)
/* Insns connected to FSL or AXI stream attached devices. */
static void dec_stream(DisasContext *dc)
{
- int mem_index = cpu_mmu_index(dc->env);
+ int mem_index = cpu_mmu_index(&dc->cpu->env);
TCGv_i32 t_id, t_ctrl;
int ctrl;
@@ -1628,8 +1631,8 @@ static inline void decode(DisasContext *dc, uint32_t ir)
dc->nr_nops = 0;
else {
if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && (dc->env->pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
@@ -1637,8 +1640,9 @@ static inline void decode(DisasContext *dc, uint32_t ir)
LOG_DIS("nr_nops=%d\t", dc->nr_nops);
dc->nr_nops++;
- if (dc->nr_nops > 4)
- cpu_abort(dc->env, "fetching nop sequence\n");
+ if (dc->nr_nops > 4) {
+ cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
+ }
}
/* bit 2 seems to indicate insn type. */
dc->type_b = ir & (1 << 29);
@@ -1660,10 +1664,11 @@ static inline void decode(DisasContext *dc, uint32_t ir)
static void check_breakpoint(CPUMBState *env, DisasContext *dc)
{
+ CPUState *cs = CPU(mb_env_get_cpu(env));
CPUBreakpoint *bp;
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
t_gen_raise_exception(dc, EXCP_DEBUG);
dc->is_jmp = DISAS_UPDATE;
@@ -1690,7 +1695,7 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
int max_insns;
pc_start = tb->pc;
- dc->env = env;
+ dc->cpu = cpu;
dc->tb = tb;
org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
@@ -1708,8 +1713,9 @@ gen_intermediate_code_internal(MicroBlazeCPU *cpu, TranslationBlock *tb,
dc->abort_at_next_insn = 0;
dc->nr_nops = 0;
- if (pc_start & 3)
- cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start);
+ if (pc_start & 3) {
+ cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
+ }
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
#if !SIM_COMPAT
diff --git a/target-mips/cpu.c b/target-mips/cpu.c
index 9dd47e84f7..ae37ae26c0 100644
--- a/target-mips/cpu.c
+++ b/target-mips/cpu.c
@@ -45,6 +45,35 @@ static void mips_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
}
+static bool mips_cpu_has_work(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ bool has_work = false;
+
+ /* It is implementation dependent if non-enabled interrupts
+ wake-up the CPU, however most of the implementations only
+ check for interrupts that can be taken. */
+ if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_mips_hw_interrupts_pending(env)) {
+ has_work = true;
+ }
+
+ /* MIPS-MT has the ability to halt the CPU. */
+ if (env->CP0_Config3 & (1 << CP0C3_MT)) {
+ /* The QEMU model will issue an _WAKE request whenever the CPUs
+ should be woken up. */
+ if (cs->interrupt_request & CPU_INTERRUPT_WAKE) {
+ has_work = true;
+ }
+
+ if (!mips_vpe_active(env)) {
+ has_work = false;
+ }
+ }
+ return has_work;
+}
+
/* CPUClass::reset() */
static void mips_cpu_reset(CPUState *s)
{
@@ -54,8 +83,8 @@ static void mips_cpu_reset(CPUState *s)
mcc->parent_reset(s);
- memset(env, 0, offsetof(CPUMIPSState, breakpoints));
- tlb_flush(env, 1);
+ memset(env, 0, offsetof(CPUMIPSState, mvp));
+ tlb_flush(s, 1);
cpu_state_reset(env);
}
@@ -97,13 +126,16 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
mcc->parent_reset = cc->reset;
cc->reset = mips_cpu_reset;
+ cc->has_work = mips_cpu_has_work;
cc->do_interrupt = mips_cpu_do_interrupt;
cc->dump_state = mips_cpu_dump_state;
cc->set_pc = mips_cpu_set_pc;
cc->synchronize_from_tb = mips_cpu_synchronize_from_tb;
cc->gdb_read_register = mips_cpu_gdb_read_register;
cc->gdb_write_register = mips_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = mips_cpu_handle_mmu_fault;
+#else
cc->do_unassigned_access = mips_cpu_unassigned_access;
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
#endif
diff --git a/target-mips/cpu.h b/target-mips/cpu.h
index 60c80617a5..6c2014eddd 100644
--- a/target-mips/cpu.h
+++ b/target-mips/cpu.h
@@ -482,6 +482,7 @@ struct CPUMIPSState {
CPU_COMMON
+ /* Fields from here on are preserved across CPU reset. */
CPUMIPSMVPContext *mvp;
#if !defined(CONFIG_USER_ONLY)
CPUMIPSTLBContext *tlb;
@@ -666,9 +667,8 @@ void cpu_mips_stop_count(CPUMIPSState *env);
void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level);
/* helper.c */
-int cpu_mips_handle_mmu_fault (CPUMIPSState *env, target_ulong address, int rw,
- int mmu_idx);
-#define cpu_handle_mmu_fault cpu_mips_handle_mmu_fault
+int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
#if !defined(CONFIG_USER_ONLY)
void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra);
hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address,
@@ -715,34 +715,6 @@ static inline int mips_vpe_active(CPUMIPSState *env)
return active;
}
-static inline bool cpu_has_work(CPUState *cpu)
-{
- CPUMIPSState *env = &MIPS_CPU(cpu)->env;
- bool has_work = false;
-
- /* It is implementation dependent if non-enabled interrupts
- wake-up the CPU, however most of the implementations only
- check for interrupts that can be taken. */
- if ((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
- cpu_mips_hw_interrupts_pending(env)) {
- has_work = true;
- }
-
- /* MIPS-MT has the ability to halt the CPU. */
- if (env->CP0_Config3 & (1 << CP0C3_MT)) {
- /* The QEMU model will issue an _WAKE request whenever the CPUs
- should be woken up. */
- if (cpu->interrupt_request & CPU_INTERRUPT_WAKE) {
- has_work = true;
- }
-
- if (!mips_vpe_active(env)) {
- has_work = false;
- }
- }
- return has_work;
-}
-
#include "exec/exec-all.h"
static inline void compute_hflags(CPUMIPSState *env)
@@ -803,7 +775,7 @@ static inline void compute_hflags(CPUMIPSState *env)
and disable the MIPS IV extensions to the MIPS III ISA.
Some other MIPS IV CPUs ignore the bit, so the check here
would be too restrictive for them. */
- if (env->CP0_Status & (1 << CP0St_CU3)) {
+ if (env->CP0_Status & (1U << CP0St_CU3)) {
env->hflags |= MIPS_HFLAG_COP1X;
}
}
diff --git a/target-mips/helper.c b/target-mips/helper.c
index 33e0e88637..064622cc31 100644
--- a/target-mips/helper.c
+++ b/target-mips/helper.c
@@ -204,6 +204,7 @@ static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
int rw, int tlb_error)
{
+ CPUState *cs = CPU(mips_env_get_cpu(env));
int exception = 0, error_code = 0;
switch (tlb_error) {
@@ -249,7 +250,7 @@ static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
((address & 0xC00000000000ULL) >> (55 - env->SEGBITS)) |
((address & ((1ULL << env->SEGBITS) - 1) & 0xFFFFFFFFFFFFE000ULL) >> 9);
#endif
- env->exception_index = exception;
+ cs->exception_index = exception;
env->error_code = error_code;
}
@@ -268,9 +269,11 @@ hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
#endif
-int cpu_mips_handle_mmu_fault (CPUMIPSState *env, target_ulong address, int rw,
- int mmu_idx)
+int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
#if !defined(CONFIG_USER_ONLY)
hwaddr physical;
int prot;
@@ -279,9 +282,9 @@ int cpu_mips_handle_mmu_fault (CPUMIPSState *env, target_ulong address, int rw,
int ret = 0;
#if 0
- log_cpu_state(CPU(mips_env_get_cpu(env)), 0);
+ log_cpu_state(cs, 0);
#endif
- qemu_log("%s pc " TARGET_FMT_lx " ad " TARGET_FMT_lx " rw %d mmu_idx %d\n",
+ qemu_log("%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, env->active_tc.PC, address, rw, mmu_idx);
rw &= 1;
@@ -293,10 +296,11 @@ int cpu_mips_handle_mmu_fault (CPUMIPSState *env, target_ulong address, int rw,
access_type = ACCESS_INT;
ret = get_physical_address(env, &physical, &prot,
address, rw, access_type);
- qemu_log("%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx " prot %d\n",
- __func__, address, ret, physical, prot);
+ qemu_log("%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
+ " prot %d\n",
+ __func__, address, ret, physical, prot);
if (ret == TLBRET_MATCH) {
- tlb_set_page(env, address & TARGET_PAGE_MASK,
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, TARGET_PAGE_SIZE);
ret = 0;
@@ -401,27 +405,29 @@ static void set_hflags_for_handler (CPUMIPSState *env)
void mips_cpu_do_interrupt(CPUState *cs)
{
+#if !defined(CONFIG_USER_ONLY)
MIPSCPU *cpu = MIPS_CPU(cs);
CPUMIPSState *env = &cpu->env;
-#if !defined(CONFIG_USER_ONLY)
target_ulong offset;
int cause = -1;
const char *name;
- if (qemu_log_enabled() && env->exception_index != EXCP_EXT_INTERRUPT) {
- if (env->exception_index < 0 || env->exception_index > EXCP_LAST)
+ if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) {
name = "unknown";
- else
- name = excp_names[env->exception_index];
+ } else {
+ name = excp_names[cs->exception_index];
+ }
qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " %s exception\n",
__func__, env->active_tc.PC, env->CP0_EPC, name);
}
- if (env->exception_index == EXCP_EXT_INTERRUPT &&
- (env->hflags & MIPS_HFLAG_DM))
- env->exception_index = EXCP_DINT;
+ if (cs->exception_index == EXCP_EXT_INTERRUPT &&
+ (env->hflags & MIPS_HFLAG_DM)) {
+ cs->exception_index = EXCP_DINT;
+ }
offset = 0x180;
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_DSS:
env->CP0_Debug |= 1 << CP0DB_DSS;
/* Debug single step cannot be raised inside a delay slot and
@@ -452,7 +458,7 @@ void mips_cpu_do_interrupt(CPUState *cs)
env->hflags &= ~(MIPS_HFLAG_KSU);
/* EJTAG probe trap enable is not implemented... */
if (!(env->CP0_Status & (1 << CP0St_EXL)))
- env->CP0_Cause &= ~(1 << CP0Ca_BD);
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
env->active_tc.PC = (int32_t)0xBFC00480;
set_hflags_for_handler(env);
break;
@@ -472,7 +478,7 @@ void mips_cpu_do_interrupt(CPUState *cs)
env->hflags |= MIPS_HFLAG_64 | MIPS_HFLAG_CP0;
env->hflags &= ~(MIPS_HFLAG_KSU);
if (!(env->CP0_Status & (1 << CP0St_EXL)))
- env->CP0_Cause &= ~(1 << CP0Ca_BD);
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
env->active_tc.PC = (int32_t)0xBFC00000;
set_hflags_for_handler(env);
break;
@@ -610,9 +616,9 @@ void mips_cpu_do_interrupt(CPUState *cs)
if (!(env->CP0_Status & (1 << CP0St_EXL))) {
env->CP0_EPC = exception_resume_pc(env);
if (env->hflags & MIPS_HFLAG_BMASK) {
- env->CP0_Cause |= (1 << CP0Ca_BD);
+ env->CP0_Cause |= (1U << CP0Ca_BD);
} else {
- env->CP0_Cause &= ~(1 << CP0Ca_BD);
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
}
env->CP0_Status |= (1 << CP0St_EXL);
env->hflags |= MIPS_HFLAG_64 | MIPS_HFLAG_CP0;
@@ -629,11 +635,11 @@ void mips_cpu_do_interrupt(CPUState *cs)
env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC);
break;
default:
- qemu_log("Invalid MIPS exception %d. Exiting\n", env->exception_index);
- printf("Invalid MIPS exception %d. Exiting\n", env->exception_index);
+ qemu_log("Invalid MIPS exception %d. Exiting\n", cs->exception_index);
+ printf("Invalid MIPS exception %d. Exiting\n", cs->exception_index);
exit(1);
}
- if (qemu_log_enabled() && env->exception_index != EXCP_EXT_INTERRUPT) {
+ if (qemu_log_enabled() && cs->exception_index != EXCP_EXT_INTERRUPT) {
qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
" S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
__func__, env->active_tc.PC, env->CP0_EPC, cause,
@@ -641,12 +647,14 @@ void mips_cpu_do_interrupt(CPUState *cs)
env->CP0_DEPC);
}
#endif
- env->exception_index = EXCP_NONE;
+ cs->exception_index = EXCP_NONE;
}
#if !defined(CONFIG_USER_ONLY)
void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+ CPUState *cs;
r4k_tlb_t *tlb;
target_ulong addr;
target_ulong end;
@@ -672,6 +680,7 @@ void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
/* 1k pages are not supported. */
mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
if (tlb->V0) {
+ cs = CPU(cpu);
addr = tlb->VPN & ~mask;
#if defined(TARGET_MIPS64)
if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
@@ -680,11 +689,12 @@ void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
#endif
end = addr | (mask >> 1);
while (addr < end) {
- tlb_flush_page (env, addr);
+ tlb_flush_page(cs, addr);
addr += TARGET_PAGE_SIZE;
}
}
if (tlb->V1) {
+ cs = CPU(cpu);
addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
#if defined(TARGET_MIPS64)
if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
@@ -693,7 +703,7 @@ void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
#endif
end = addr | mask;
while (addr - 1 < end) {
- tlb_flush_page (env, addr);
+ tlb_flush_page(cs, addr);
addr += TARGET_PAGE_SIZE;
}
}
diff --git a/target-mips/machine.c b/target-mips/machine.c
index 23504ba9ae..0a07db8540 100644
--- a/target-mips/machine.c
+++ b/target-mips/machine.c
@@ -191,6 +191,7 @@ static void load_fpu(QEMUFile *f, CPUMIPSFPUContext *fpu)
int cpu_load(QEMUFile *f, void *opaque, int version_id)
{
CPUMIPSState *env = opaque;
+ MIPSCPU *cpu = mips_env_get_cpu(env);
int i;
if (version_id != 3)
@@ -303,6 +304,6 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
load_fpu(f, &env->fpus[i]);
/* XXX: ensure compatibility for halted bit ? */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
return 0;
}
diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c
index 2ef6633f47..4edec6c617 100644
--- a/target-mips/op_helper.c
+++ b/target-mips/op_helper.c
@@ -38,18 +38,20 @@ static inline void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env,
int error_code,
uintptr_t pc)
{
+ CPUState *cs = CPU(mips_env_get_cpu(env));
+
if (exception < EXCP_SC) {
qemu_log("%s: %d %d\n", __func__, exception, error_code);
}
- env->exception_index = exception;
+ cs->exception_index = exception;
env->error_code = error_code;
if (pc) {
/* now we have a real cpu fault */
- cpu_restore_state(env, pc);
+ cpu_restore_state(cs, pc);
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
@@ -278,7 +280,7 @@ static inline hwaddr do_translate_address(CPUMIPSState *env,
lladdr = cpu_mips_translate_address(env, address, rw);
if (lladdr == -1LL) {
- cpu_loop_exit(env);
+ cpu_loop_exit(CPU(mips_env_get_cpu(env)));
} else {
return lladdr;
}
@@ -646,7 +648,7 @@ static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
{
uint32_t status;
uint32_t tcu, tmx, tasid, tksu;
- uint32_t mask = ((1 << CP0St_CU3)
+ uint32_t mask = ((1U << CP0St_CU3)
| (1 << CP0St_CU2)
| (1 << CP0St_CU1)
| (1 << CP0St_CU0)
@@ -1342,6 +1344,7 @@ void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
uint32_t val, old;
uint32_t mask = env->CP0_Status_rw_bitmask;
@@ -1363,7 +1366,9 @@ void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
case MIPS_HFLAG_KM: qemu_log("\n"); break;
- default: cpu_abort(env, "Invalid MMU mode!\n"); break;
+ default:
+ cpu_abort(CPU(cpu), "Invalid MMU mode!\n");
+ break;
}
}
}
@@ -1782,8 +1787,10 @@ target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
/* TLB management */
static void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+
/* Flush qemu's TLB and discard all shadowed entries. */
- tlb_flush (env, flush_global);
+ tlb_flush(CPU(cpu), flush_global);
env->tlb->tlb_in_use = env->tlb->nb_tlb;
}
@@ -1983,6 +1990,8 @@ static void debug_pre_eret(CPUMIPSState *env)
static void debug_post_eret(CPUMIPSState *env)
{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+
if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
env->active_tc.PC, env->CP0_EPC);
@@ -1994,7 +2003,9 @@ static void debug_post_eret(CPUMIPSState *env)
case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
case MIPS_HFLAG_KM: qemu_log("\n"); break;
- default: cpu_abort(env, "Invalid MMU mode!\n"); break;
+ default:
+ cpu_abort(CPU(cpu), "Invalid MMU mode!\n");
+ break;
}
}
}
@@ -2143,14 +2154,17 @@ static void do_unaligned_access(CPUMIPSState *env, target_ulong addr,
do_raise_exception(env, (is_write == 1) ? EXCP_AdES : EXCP_AdEL, retaddr);
}
-void tlb_fill(CPUMIPSState *env, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
int ret;
- ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = mips_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (ret) {
- do_raise_exception_err(env, env->exception_index,
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ do_raise_exception_err(env, cs->exception_index,
env->error_code, retaddr);
}
}
diff --git a/target-mips/translate.c b/target-mips/translate.c
index 083f6ab283..05f82d2f9b 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -1216,20 +1216,28 @@ static void gen_store_fpr32(TCGv_i32 t, int reg)
tcg_temp_free_i64(t64);
}
-static void gen_load_fpr32h(TCGv_i32 t, int reg)
+static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
{
- TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_shri_i64(t64, fpu_f64[reg], 32);
- tcg_gen_trunc_i64_i32(t, t64);
- tcg_temp_free_i64(t64);
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t64, fpu_f64[reg], 32);
+ tcg_gen_trunc_i64_i32(t, t64);
+ tcg_temp_free_i64(t64);
+ } else {
+ gen_load_fpr32(t, reg | 1);
+ }
}
-static void gen_store_fpr32h(TCGv_i32 t, int reg)
+static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
{
- TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_extu_i32_i64(t64, t);
- tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 32, 32);
- tcg_temp_free_i64(t64);
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t64, t);
+ tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 32, 32);
+ tcg_temp_free_i64(t64);
+ } else {
+ gen_store_fpr32(t, reg | 1);
+ }
}
static void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
@@ -6613,7 +6621,7 @@ static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd,
} else {
TCGv_i32 fp0 = tcg_temp_new_i32();
- gen_load_fpr32h(fp0, rt);
+ gen_load_fpr32h(ctx, fp0, rt);
tcg_gen_ext_i32_tl(t0, fp0);
tcg_temp_free_i32(fp0);
}
@@ -6812,7 +6820,7 @@ static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt,
TCGv_i32 fp0 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(fp0, t0);
- gen_store_fpr32h(fp0, rd);
+ gen_store_fpr32h(ctx, fp0, rd);
tcg_temp_free_i32(fp0);
}
break;
@@ -7283,7 +7291,7 @@ static void gen_cp1 (DisasContext *ctx, uint32_t opc, int rt, int fs)
{
TCGv_i32 fp0 = tcg_temp_new_i32();
- gen_load_fpr32h(fp0, fs);
+ gen_load_fpr32h(ctx, fp0, fs);
tcg_gen_ext_i32_tl(t0, fp0);
tcg_temp_free_i32(fp0);
}
@@ -7296,7 +7304,7 @@ static void gen_cp1 (DisasContext *ctx, uint32_t opc, int rt, int fs)
TCGv_i32 fp0 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(fp0, t0);
- gen_store_fpr32h(fp0, fs);
+ gen_store_fpr32h(ctx, fp0, fs);
tcg_temp_free_i32(fp0);
}
opn = "mthc1";
@@ -7383,7 +7391,8 @@ static inline void gen_movcf_d (DisasContext *ctx, int fs, int fd, int cc, int t
gen_set_label(l1);
}
-static inline void gen_movcf_ps (int fs, int fd, int cc, int tf)
+static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd,
+ int cc, int tf)
{
int cond;
TCGv_i32 t0 = tcg_temp_new_i32();
@@ -7403,8 +7412,8 @@ static inline void gen_movcf_ps (int fs, int fd, int cc, int tf)
tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc+1));
tcg_gen_brcondi_i32(cond, t0, 0, l2);
- gen_load_fpr32h(t0, fs);
- gen_store_fpr32h(t0, fd);
+ gen_load_fpr32h(ctx, t0, fs);
+ gen_store_fpr32h(ctx, t0, fd);
tcg_temp_free_i32(t0);
gen_set_label(l2);
}
@@ -8389,7 +8398,7 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
break;
case OPC_MOVCF_PS:
check_cp1_64bitmode(ctx);
- gen_movcf_ps(fs, fd, (ft >> 2) & 0x7, ft & 0x1);
+ gen_movcf_ps(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1);
opn = "movcf.ps";
break;
case OPC_MOVZ_PS:
@@ -8514,7 +8523,7 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
{
TCGv_i32 fp0 = tcg_temp_new_i32();
- gen_load_fpr32h(fp0, fs);
+ gen_load_fpr32h(ctx, fp0, fs);
gen_helper_float_cvts_pu(fp0, cpu_env, fp0);
gen_store_fpr32(fp0, fd);
tcg_temp_free_i32(fp0);
@@ -8553,7 +8562,7 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
gen_load_fpr32(fp0, fs);
gen_load_fpr32(fp1, ft);
- gen_store_fpr32h(fp0, fd);
+ gen_store_fpr32h(ctx, fp0, fd);
gen_store_fpr32(fp1, fd);
tcg_temp_free_i32(fp0);
tcg_temp_free_i32(fp1);
@@ -8567,9 +8576,9 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i32 fp1 = tcg_temp_new_i32();
gen_load_fpr32(fp0, fs);
- gen_load_fpr32h(fp1, ft);
+ gen_load_fpr32h(ctx, fp1, ft);
gen_store_fpr32(fp1, fd);
- gen_store_fpr32h(fp0, fd);
+ gen_store_fpr32h(ctx, fp0, fd);
tcg_temp_free_i32(fp0);
tcg_temp_free_i32(fp1);
}
@@ -8581,10 +8590,10 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i32 fp0 = tcg_temp_new_i32();
TCGv_i32 fp1 = tcg_temp_new_i32();
- gen_load_fpr32h(fp0, fs);
+ gen_load_fpr32h(ctx, fp0, fs);
gen_load_fpr32(fp1, ft);
gen_store_fpr32(fp1, fd);
- gen_store_fpr32h(fp0, fd);
+ gen_store_fpr32h(ctx, fp0, fd);
tcg_temp_free_i32(fp0);
tcg_temp_free_i32(fp1);
}
@@ -8596,10 +8605,10 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i32 fp0 = tcg_temp_new_i32();
TCGv_i32 fp1 = tcg_temp_new_i32();
- gen_load_fpr32h(fp0, fs);
- gen_load_fpr32h(fp1, ft);
+ gen_load_fpr32h(ctx, fp0, fs);
+ gen_load_fpr32h(ctx, fp1, ft);
gen_store_fpr32(fp1, fd);
- gen_store_fpr32h(fp0, fd);
+ gen_store_fpr32h(ctx, fp0, fd);
tcg_temp_free_i32(fp0);
tcg_temp_free_i32(fp1);
}
@@ -8763,23 +8772,23 @@ static void gen_flt3_arith (DisasContext *ctx, uint32_t opc,
tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
gen_load_fpr32(fp, fs);
- gen_load_fpr32h(fph, fs);
+ gen_load_fpr32h(ctx, fph, fs);
gen_store_fpr32(fp, fd);
- gen_store_fpr32h(fph, fd);
+ gen_store_fpr32h(ctx, fph, fd);
tcg_gen_br(l2);
gen_set_label(l1);
tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2);
tcg_temp_free(t0);
#ifdef TARGET_WORDS_BIGENDIAN
gen_load_fpr32(fp, fs);
- gen_load_fpr32h(fph, ft);
- gen_store_fpr32h(fp, fd);
+ gen_load_fpr32h(ctx, fph, ft);
+ gen_store_fpr32h(ctx, fp, fd);
gen_store_fpr32(fph, fd);
#else
- gen_load_fpr32h(fph, fs);
+ gen_load_fpr32h(ctx, fph, fs);
gen_load_fpr32(fp, ft);
gen_store_fpr32(fph, fd);
- gen_store_fpr32h(fp, fd);
+ gen_store_fpr32h(ctx, fp, fd);
#endif
gen_set_label(l2);
tcg_temp_free_i32(fp);
@@ -11976,7 +11985,7 @@ static void decode_micromips32_opc (CPUMIPSState *env, DisasContext *ctx,
gen_movcf_d(ctx, rs, rt, cc, 0);
break;
case FMT_SDPS_PS:
- gen_movcf_ps(rs, rt, cc, 0);
+ gen_movcf_ps(ctx, rs, rt, cc, 0);
break;
default:
goto pool32f_invalid;
@@ -11991,7 +12000,7 @@ static void decode_micromips32_opc (CPUMIPSState *env, DisasContext *ctx,
gen_movcf_d(ctx, rs, rt, cc, 1);
break;
case FMT_SDPS_PS:
- gen_movcf_ps(rs, rt, cc, 1);
+ gen_movcf_ps(ctx, rs, rt, cc, 1);
break;
default:
goto pool32f_invalid;
@@ -15613,8 +15622,8 @@ gen_intermediate_code_internal(MIPSCPU *cpu, TranslationBlock *tb,
LOG_DISAS("\ntb %p idx %d hflags %04x\n", tb, ctx.mem_idx, ctx.hflags);
gen_tb_start();
while (ctx.bstate == BS_NONE) {
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == ctx.pc) {
save_cpu_state(&ctx, 1);
ctx.bstate = BS_BRANCH;
@@ -15929,10 +15938,8 @@ MIPSCPU *cpu_mips_init(const char *cpu_model)
void cpu_state_reset(CPUMIPSState *env)
{
-#ifndef CONFIG_USER_ONLY
MIPSCPU *cpu = mips_env_get_cpu(env);
CPUState *cs = CPU(cpu);
-#endif
/* Reset registers to their default values */
env->CP0_PRid = env->cpu_model->CP0_PRid;
@@ -16063,7 +16070,7 @@ void cpu_state_reset(CPUMIPSState *env)
}
#endif
compute_hflags(env);
- env->exception_index = EXCP_NONE;
+ cs->exception_index = EXCP_NONE;
}
void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, int pc_pos)
diff --git a/target-mips/translate_init.c b/target-mips/translate_init.c
index 29d39e2a39..29dc2ef738 100644
--- a/target-mips/translate_init.c
+++ b/target-mips/translate_init.c
@@ -22,20 +22,20 @@
/* Have config1, uncached coherency */
#define MIPS_CONFIG0 \
- ((1 << CP0C0_M) | (0x2 << CP0C0_K0))
+ ((1U << CP0C0_M) | (0x2 << CP0C0_K0))
/* Have config2, no coprocessor2 attached, no MDMX support attached,
no performance counters, watch registers present,
no code compression, EJTAG present, no FPU */
#define MIPS_CONFIG1 \
-((1 << CP0C1_M) | \
+((1U << CP0C1_M) | \
(0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
(1 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
(0 << CP0C1_FP))
/* Have config3, no tertiary/secondary caches implemented */
#define MIPS_CONFIG2 \
-((1 << CP0C2_M))
+((1U << CP0C2_M))
/* No config4, no DSP ASE, no large physaddr (PABITS),
no external interrupt controller, no vectored interrupts,
@@ -301,16 +301,16 @@ static const mips_def_t mips_defs[] =
(1 << FCR0_D) | (1 << FCR0_S) | (0x95 << FCR0_PRID),
.CP0_SRSCtl = (0xf << CP0SRSCtl_HSS),
.CP0_SRSConf0_rw_bitmask = 0x3fffffff,
- .CP0_SRSConf0 = (1 << CP0SRSC0_M) | (0x3fe << CP0SRSC0_SRS3) |
+ .CP0_SRSConf0 = (1U << CP0SRSC0_M) | (0x3fe << CP0SRSC0_SRS3) |
(0x3fe << CP0SRSC0_SRS2) | (0x3fe << CP0SRSC0_SRS1),
.CP0_SRSConf1_rw_bitmask = 0x3fffffff,
- .CP0_SRSConf1 = (1 << CP0SRSC1_M) | (0x3fe << CP0SRSC1_SRS6) |
+ .CP0_SRSConf1 = (1U << CP0SRSC1_M) | (0x3fe << CP0SRSC1_SRS6) |
(0x3fe << CP0SRSC1_SRS5) | (0x3fe << CP0SRSC1_SRS4),
.CP0_SRSConf2_rw_bitmask = 0x3fffffff,
- .CP0_SRSConf2 = (1 << CP0SRSC2_M) | (0x3fe << CP0SRSC2_SRS9) |
+ .CP0_SRSConf2 = (1U << CP0SRSC2_M) | (0x3fe << CP0SRSC2_SRS9) |
(0x3fe << CP0SRSC2_SRS8) | (0x3fe << CP0SRSC2_SRS7),
.CP0_SRSConf3_rw_bitmask = 0x3fffffff,
- .CP0_SRSConf3 = (1 << CP0SRSC3_M) | (0x3fe << CP0SRSC3_SRS12) |
+ .CP0_SRSConf3 = (1U << CP0SRSC3_M) | (0x3fe << CP0SRSC3_SRS12) |
(0x3fe << CP0SRSC3_SRS11) | (0x3fe << CP0SRSC3_SRS10),
.CP0_SRSConf4_rw_bitmask = 0x3fffffff,
.CP0_SRSConf4 = (0x3fe << CP0SRSC4_SRS15) |
@@ -355,8 +355,8 @@ static const mips_def_t mips_defs[] =
(0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
(1 << CP0C1_CA),
.CP0_Config2 = MIPS_CONFIG2,
- .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_M),
- .CP0_Config4 = MIPS_CONFIG4 | (1 << CP0C4_M),
+ .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M),
+ .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M),
.CP0_Config4_rw_bitmask = 0,
.CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_UFR),
.CP0_Config5_rw_bitmask = (0 << CP0C5_M) | (1 << CP0C5_K) |
@@ -629,6 +629,8 @@ static void r4k_mmu_init (CPUMIPSState *env, const mips_def_t *def)
static void mmu_init (CPUMIPSState *env, const mips_def_t *def)
{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+
env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext));
switch (def->mmu_type) {
@@ -645,7 +647,7 @@ static void mmu_init (CPUMIPSState *env, const mips_def_t *def)
case MMU_TYPE_R6000:
case MMU_TYPE_R8000:
default:
- cpu_abort(env, "MMU type not supported\n");
+ cpu_abort(CPU(cpu), "MMU type not supported\n");
}
}
#endif /* CONFIG_USER_ONLY */
@@ -668,7 +670,7 @@ static void mvp_init (CPUMIPSState *env, const mips_def_t *def)
programmable cache partitioning implemented, number of allocatable
and sharable TLB entries, MVP has allocatable TCs, 2 VPEs
implemented, 5 TCs implemented. */
- env->mvp->CP0_MVPConf0 = (1 << CP0MVPC0_M) | (1 << CP0MVPC0_TLBS) |
+ env->mvp->CP0_MVPConf0 = (1U << CP0MVPC0_M) | (1 << CP0MVPC0_TLBS) |
(0 << CP0MVPC0_GS) | (1 << CP0MVPC0_PCP) |
// TODO: actually do 2 VPEs.
// (1 << CP0MVPC0_TCA) | (0x1 << CP0MVPC0_PVPE) |
@@ -682,7 +684,7 @@ static void mvp_init (CPUMIPSState *env, const mips_def_t *def)
/* Allocatable CP1 have media extensions, allocatable CP1 have FP support,
no UDI implemented, no CP2 implemented, 1 CP1 implemented. */
- env->mvp->CP0_MVPConf1 = (1 << CP0MVPC1_CIM) | (1 << CP0MVPC1_CIF) |
+ env->mvp->CP0_MVPConf1 = (1U << CP0MVPC1_CIM) | (1 << CP0MVPC1_CIF) |
(0x0 << CP0MVPC1_PCX) | (0x0 << CP0MVPC1_PCP2) |
(0x1 << CP0MVPC1_PCP1);
}
diff --git a/target-moxie/cpu.c b/target-moxie/cpu.c
index 484ecc2124..47b617f5cd 100644
--- a/target-moxie/cpu.c
+++ b/target-moxie/cpu.c
@@ -29,6 +29,11 @@ static void moxie_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.pc = value;
}
+static bool moxie_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
+
static void moxie_cpu_reset(CPUState *s)
{
MoxieCPU *cpu = MOXIE_CPU(s);
@@ -37,10 +42,10 @@ static void moxie_cpu_reset(CPUState *s)
mcc->parent_reset(s);
- memset(env, 0, offsetof(CPUMoxieState, breakpoints));
+ memset(env, 0, sizeof(CPUMoxieState));
env->pc = 0x1000;
- tlb_flush(env, 1);
+ tlb_flush(s, 1);
}
static void moxie_cpu_realizefn(DeviceState *dev, Error **errp)
@@ -99,10 +104,13 @@ static void moxie_cpu_class_init(ObjectClass *oc, void *data)
cc->class_by_name = moxie_cpu_class_by_name;
+ cc->has_work = moxie_cpu_has_work;
cc->do_interrupt = moxie_cpu_do_interrupt;
cc->dump_state = moxie_cpu_dump_state;
cc->set_pc = moxie_cpu_set_pc;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = moxie_cpu_handle_mmu_fault;
+#else
cc->get_phys_page_debug = moxie_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_moxie_cpu;
#endif
@@ -130,18 +138,7 @@ static const MoxieCPUInfo moxie_cpus[] = {
MoxieCPU *cpu_moxie_init(const char *cpu_model)
{
- MoxieCPU *cpu;
- ObjectClass *oc;
-
- oc = moxie_cpu_class_by_name(cpu_model);
- if (oc == NULL) {
- return NULL;
- }
- cpu = MOXIE_CPU(object_new(object_class_get_name(oc)));
-
- object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
-
- return cpu;
+ return MOXIE_CPU(cpu_generic_init(TYPE_MOXIE_CPU, cpu_model));
}
static void cpu_register(const MoxieCPUInfo *info)
diff --git a/target-moxie/cpu.h b/target-moxie/cpu.h
index 5ce14b5fd3..c5b12a5244 100644
--- a/target-moxie/cpu.h
+++ b/target-moxie/cpu.h
@@ -152,12 +152,7 @@ static inline void cpu_get_tb_cpu_state(CPUMoxieState *env, target_ulong *pc,
*flags = 0;
}
-static inline int cpu_has_work(CPUState *cpu)
-{
- return cpu->interrupt_request & CPU_INTERRUPT_HARD;
-}
-
-int cpu_moxie_handle_mmu_fault(CPUMoxieState *env, target_ulong address,
+int moxie_cpu_handle_mmu_fault(CPUState *cpu, vaddr address,
int rw, int mmu_idx);
#endif /* _CPU_MOXIE_H */
diff --git a/target-moxie/helper.c b/target-moxie/helper.c
index 7859102ab7..3d0c34dd0a 100644
--- a/target-moxie/helper.c
+++ b/target-moxie/helper.c
@@ -46,31 +46,33 @@
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
-void tlb_fill(CPUMoxieState *env, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
int ret;
- ret = cpu_moxie_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = moxie_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
void helper_raise_exception(CPUMoxieState *env, int ex)
{
- env->exception_index = ex;
+ CPUState *cs = CPU(moxie_env_get_cpu(env));
+
+ cs->exception_index = ex;
/* Stash the exception type. */
env->sregs[2] = ex;
/* Stash the address where the exception occurred. */
- cpu_restore_state(env, GETPC());
+ cpu_restore_state(cs, GETPC());
env->sregs[5] = env->pc;
/* Jump the the exception handline routine. */
env->pc = env->sregs[1];
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
uint32_t helper_div(CPUMoxieState *env, uint32_t a, uint32_t b)
@@ -97,33 +99,39 @@ uint32_t helper_udiv(CPUMoxieState *env, uint32_t a, uint32_t b)
void helper_debug(CPUMoxieState *env)
{
- env->exception_index = EXCP_DEBUG;
- cpu_loop_exit(env);
+ CPUState *cs = CPU(moxie_env_get_cpu(env));
+
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
}
#if defined(CONFIG_USER_ONLY)
-void moxie_cpu_do_interrupt(CPUState *env)
+void moxie_cpu_do_interrupt(CPUState *cs)
{
- env->exception_index = -1;
+ CPUState *cs = CPU(moxie_env_get_cpu(env));
+
+ cs->exception_index = -1;
}
-int cpu_moxie_handle_mmu_fault(CPUMoxieState *env, target_ulong address,
+int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int rw, int mmu_idx)
{
- MoxieCPU *cpu = moxie_env_get_cpu(env);
+ MoxieCPU *cpu = MOXIE_CPU(cs);
- env->exception_index = 0xaa;
- env->debug1 = address;
- cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
+ cs->exception_index = 0xaa;
+ cpu->env.debug1 = address;
+ cpu_dump_state(cs, stderr, fprintf, 0);
return 1;
}
#else /* !CONFIG_USER_ONLY */
-int cpu_moxie_handle_mmu_fault(CPUMoxieState *env, target_ulong address,
+int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int rw, int mmu_idx)
{
+ MoxieCPU *cpu = MOXIE_CPU(cs);
+ CPUMoxieState *env = &cpu->env;
MoxieMMUResult res;
int prot, miss;
target_ulong phy;
@@ -135,22 +143,19 @@ int cpu_moxie_handle_mmu_fault(CPUMoxieState *env, target_ulong address,
if (miss) {
/* handle the miss. */
phy = 0;
- env->exception_index = MOXIE_EX_MMU_MISS;
+ cs->exception_index = MOXIE_EX_MMU_MISS;
} else {
phy = res.phy;
r = 0;
}
- tlb_set_page(env, address, phy, prot, mmu_idx, TARGET_PAGE_SIZE);
+ tlb_set_page(cs, address, phy, prot, mmu_idx, TARGET_PAGE_SIZE);
return r;
}
void moxie_cpu_do_interrupt(CPUState *cs)
{
- MoxieCPU *cpu = MOXIE_CPU(cs);
- CPUMoxieState *env = &cpu->env;
-
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case MOXIE_EX_BREAK:
break;
default:
diff --git a/target-moxie/translate.c b/target-moxie/translate.c
index a93196f47b..63f889fd7f 100644
--- a/target-moxie/translate.c
+++ b/target-moxie/translate.c
@@ -845,8 +845,8 @@ gen_intermediate_code_internal(MoxieCPU *cpu, TranslationBlock *tb,
gen_tb_start();
do {
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (ctx.pc == bp->pc) {
tcg_gen_movi_i32(cpu_pc, ctx.pc);
gen_helper_debug(cpu_env);
diff --git a/target-openrisc/cpu.c b/target-openrisc/cpu.c
index 813794300b..08e724c126 100644
--- a/target-openrisc/cpu.c
+++ b/target-openrisc/cpu.c
@@ -27,6 +27,12 @@ static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.pc = value;
}
+static bool openrisc_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_TIMER);
+}
+
/* CPUClass::reset() */
static void openrisc_cpu_reset(CPUState *s)
{
@@ -35,14 +41,18 @@ static void openrisc_cpu_reset(CPUState *s)
occ->parent_reset(s);
- memset(&cpu->env, 0, offsetof(CPUOpenRISCState, breakpoints));
+#ifndef CONFIG_USER_ONLY
+ memset(&cpu->env, 0, offsetof(CPUOpenRISCState, tlb));
+#else
+ memset(&cpu->env, 0, offsetof(CPUOpenRISCState, irq));
+#endif
- tlb_flush(&cpu->env, 1);
+ tlb_flush(s, 1);
/*tb_flush(&cpu->env); FIXME: Do we need it? */
cpu->env.pc = 0x100;
cpu->env.sr = SR_FO | SR_SM;
- cpu->env.exception_index = -1;
+ s->exception_index = -1;
cpu->env.upr = UPR_UP | UPR_DMP | UPR_IMP | UPR_PICP | UPR_TTP;
cpu->env.cpucfgr = CPUCFGR_OB32S | CPUCFGR_OF32S;
@@ -153,12 +163,15 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
cc->reset = openrisc_cpu_reset;
cc->class_by_name = openrisc_cpu_class_by_name;
+ cc->has_work = openrisc_cpu_has_work;
cc->do_interrupt = openrisc_cpu_do_interrupt;
cc->dump_state = openrisc_cpu_dump_state;
cc->set_pc = openrisc_cpu_set_pc;
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
cc->gdb_write_register = openrisc_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = openrisc_cpu_handle_mmu_fault;
+#else
cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug;
dc->vmsd = &vmstate_openrisc_cpu;
#endif
@@ -201,18 +214,7 @@ static void openrisc_cpu_register_types(void)
OpenRISCCPU *cpu_openrisc_init(const char *cpu_model)
{
- OpenRISCCPU *cpu;
- ObjectClass *oc;
-
- oc = openrisc_cpu_class_by_name(cpu_model);
- if (oc == NULL) {
- return NULL;
- }
- cpu = OPENRISC_CPU(object_new(object_class_get_name(oc)));
-
- object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
-
- return cpu;
+ return OPENRISC_CPU(cpu_generic_init(TYPE_OPENRISC_CPU, cpu_model));
}
/* Sort alphabetically by type name, except for "any". */
diff --git a/target-openrisc/cpu.h b/target-openrisc/cpu.h
index 51d6afd153..4512f459bf 100644
--- a/target-openrisc/cpu.h
+++ b/target-openrisc/cpu.h
@@ -304,6 +304,7 @@ typedef struct CPUOpenRISCState {
CPU_COMMON
+ /* Fields from here on are preserved across CPU reset. */
#ifndef CONFIG_USER_ONLY
CPUOpenRISCTLBContext * tlb;
@@ -353,15 +354,13 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int openrisc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void openrisc_translate_init(void);
-int cpu_openrisc_handle_mmu_fault(CPUOpenRISCState *env,
- target_ulong address,
+int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address,
int rw, int mmu_idx);
int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);
#define cpu_list cpu_openrisc_list
#define cpu_exec cpu_openrisc_exec
#define cpu_gen_code cpu_openrisc_gen_code
-#define cpu_handle_mmu_fault cpu_openrisc_handle_mmu_fault
#define cpu_signal_handler cpu_openrisc_signal_handler
#ifndef CONFIG_USER_ONLY
@@ -419,11 +418,6 @@ static inline int cpu_mmu_index(CPUOpenRISCState *env)
}
#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_INT_0
-static inline bool cpu_has_work(CPUState *cpu)
-{
- return cpu->interrupt_request & (CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_TIMER);
-}
#include "exec/exec-all.h"
diff --git a/target-openrisc/exception.c b/target-openrisc/exception.c
index 58e53c6c98..74652a58f6 100644
--- a/target-openrisc/exception.c
+++ b/target-openrisc/exception.c
@@ -22,6 +22,8 @@
void QEMU_NORETURN raise_exception(OpenRISCCPU *cpu, uint32_t excp)
{
- cpu->env.exception_index = excp;
- cpu_loop_exit(&cpu->env);
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
}
diff --git a/target-openrisc/interrupt.c b/target-openrisc/interrupt.c
index 2153e7ea7e..3de567eee8 100644
--- a/target-openrisc/interrupt.c
+++ b/target-openrisc/interrupt.c
@@ -27,9 +27,9 @@
void openrisc_cpu_do_interrupt(CPUState *cs)
{
+#ifndef CONFIG_USER_ONLY
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
CPUOpenRISCState *env = &cpu->env;
-#ifndef CONFIG_USER_ONLY
env->epcr = env->pc;
if (env->flags & D_FLAG) {
@@ -37,13 +37,13 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
env->sr |= SR_DSX;
env->epcr -= 4;
}
- if (env->exception_index == EXCP_SYSCALL) {
+ if (cs->exception_index == EXCP_SYSCALL) {
env->epcr += 4;
}
/* For machine-state changed between user-mode and supervisor mode,
we need flush TLB when we enter&exit EXCP. */
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
env->esr = env->sr;
env->sr &= ~SR_DME;
@@ -54,12 +54,12 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
env->tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_nommu;
env->tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_nommu;
- if (env->exception_index > 0 && env->exception_index < EXCP_NR) {
- env->pc = (env->exception_index << 8);
+ if (cs->exception_index > 0 && cs->exception_index < EXCP_NR) {
+ env->pc = (cs->exception_index << 8);
} else {
- cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
}
#endif
- env->exception_index = -1;
+ cs->exception_index = -1;
}
diff --git a/target-openrisc/interrupt_helper.c b/target-openrisc/interrupt_helper.c
index 844648f780..819405701d 100644
--- a/target-openrisc/interrupt_helper.c
+++ b/target-openrisc/interrupt_helper.c
@@ -51,7 +51,7 @@ void HELPER(rfe)(CPUOpenRISCState *env)
}
if (need_flush_tlb) {
- tlb_flush(&cpu->env, 1);
+ tlb_flush(cs, 1);
}
#endif
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
diff --git a/target-openrisc/mmu.c b/target-openrisc/mmu.c
index dd487bd0d1..750a93636b 100644
--- a/target-openrisc/mmu.c
+++ b/target-openrisc/mmu.c
@@ -139,6 +139,7 @@ static void cpu_openrisc_raise_mmu_exception(OpenRISCCPU *cpu,
target_ulong address,
int rw, int tlb_error)
{
+ CPUState *cs = CPU(cpu);
int exception = 0;
switch (tlb_error) {
@@ -169,24 +170,24 @@ static void cpu_openrisc_raise_mmu_exception(OpenRISCCPU *cpu,
#endif
}
- cpu->env.exception_index = exception;
+ cs->exception_index = exception;
cpu->env.eear = address;
}
#ifndef CONFIG_USER_ONLY
-int cpu_openrisc_handle_mmu_fault(CPUOpenRISCState *env,
- target_ulong address, int rw, int mmu_idx)
+int openrisc_cpu_handle_mmu_fault(CPUState *cs,
+ vaddr address, int rw, int mmu_idx)
{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int ret = 0;
hwaddr physical = 0;
int prot = 0;
- OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
ret = cpu_openrisc_get_phys_addr(cpu, &physical, &prot,
address, rw);
if (ret == TLBRET_MATCH) {
- tlb_set_page(env, address & TARGET_PAGE_MASK,
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot,
mmu_idx, TARGET_PAGE_SIZE);
ret = 0;
@@ -198,11 +199,11 @@ int cpu_openrisc_handle_mmu_fault(CPUOpenRISCState *env,
return ret;
}
#else
-int cpu_openrisc_handle_mmu_fault(CPUOpenRISCState *env,
- target_ulong address, int rw, int mmu_idx)
+int openrisc_cpu_handle_mmu_fault(CPUState *cs,
+ vaddr address, int rw, int mmu_idx)
{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
int ret = 0;
- OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
cpu_openrisc_raise_mmu_exception(cpu, address, rw, ret);
ret = 1;
diff --git a/target-openrisc/mmu_helper.c b/target-openrisc/mmu_helper.c
index e46b092984..fb457c76af 100644
--- a/target-openrisc/mmu_helper.c
+++ b/target-openrisc/mmu_helper.c
@@ -36,20 +36,20 @@
#define SHIFT 3
#include "exec/softmmu_template.h"
-void tlb_fill(CPUOpenRISCState *env, target_ulong addr, int is_write,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write,
int mmu_idx, uintptr_t retaddr)
{
int ret;
- ret = cpu_openrisc_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = openrisc_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (ret) {
if (retaddr) {
/* now we have a real cpu fault. */
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
/* Raise Exception. */
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
}
#endif
diff --git a/target-openrisc/sys_helper.c b/target-openrisc/sys_helper.c
index be06c4565b..fedcbed4f7 100644
--- a/target-openrisc/sys_helper.c
+++ b/target-openrisc/sys_helper.c
@@ -45,7 +45,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
case TO_SPR(0, 17): /* SR */
if ((env->sr & (SR_IME | SR_DME | SR_SM)) ^
(rb & (SR_IME | SR_DME | SR_SM))) {
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
}
env->sr = rb;
env->sr |= SR_FO; /* FO is const equal to 1 */
@@ -84,7 +84,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
case TO_SPR(1, 512) ... TO_SPR(1, 512+DTLB_SIZE-1): /* DTLBW0MR 0-127 */
idx = spr - TO_SPR(1, 512);
if (!(rb & 1)) {
- tlb_flush_page(env, env->tlb->dtlb[0][idx].mr & TARGET_PAGE_MASK);
+ tlb_flush_page(cs, env->tlb->dtlb[0][idx].mr & TARGET_PAGE_MASK);
}
env->tlb->dtlb[0][idx].mr = rb;
break;
@@ -103,7 +103,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
case TO_SPR(2, 512) ... TO_SPR(2, 512+ITLB_SIZE-1): /* ITLBW0MR 0-127 */
idx = spr - TO_SPR(2, 512);
if (!(rb & 1)) {
- tlb_flush_page(env, env->tlb->itlb[0][idx].mr & TARGET_PAGE_MASK);
+ tlb_flush_page(cs, env->tlb->itlb[0][idx].mr & TARGET_PAGE_MASK);
}
env->tlb->itlb[0][idx].mr = rb;
break;
diff --git a/target-openrisc/translate.c b/target-openrisc/translate.c
index 776cb6eece..852b5e6107 100644
--- a/target-openrisc/translate.c
+++ b/target-openrisc/translate.c
@@ -1619,10 +1619,11 @@ static void disas_openrisc_insn(DisasContext *dc, OpenRISCCPU *cpu)
static void check_breakpoint(OpenRISCCPU *cpu, DisasContext *dc)
{
+ CPUState *cs = CPU(cpu);
CPUBreakpoint *bp;
- if (unlikely(!QTAILQ_EMPTY(&cpu->env.breakpoints))) {
- QTAILQ_FOREACH(bp, &cpu->env.breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
tcg_gen_movi_tl(cpu_pc, dc->pc);
gen_exception(dc, EXCP_DEBUG);
diff --git a/target-ppc/arch_dump.c b/target-ppc/arch_dump.c
index 17fd4c6fb1..9dccf1ae1f 100644
--- a/target-ppc/arch_dump.c
+++ b/target-ppc/arch_dump.c
@@ -164,7 +164,7 @@ static void ppc64_write_elf64_speregset(Note *note, PowerPCCPU *cpu)
speregset->spe_fscr = cpu_to_be32(cpu->env.spe_fscr);
}
-struct NoteFuncDescStruct {
+static const struct NoteFuncDescStruct {
int contents_size;
void (*note_contents_func)(Note *note, PowerPCCPU *cpu);
} note_func[] = {
@@ -196,7 +196,7 @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
int name_size = 8; /* "CORE" or "QEMU" rounded */
size_t elf_note_size = 0;
int note_head_size;
- NoteFuncDesc *nf;
+ const NoteFuncDesc *nf;
if (class != ELFCLASS64) {
return -1;
@@ -221,7 +221,7 @@ static int ppc64_write_all_elf64_notes(const char *note_name,
Note note;
int ret = -1;
int note_size;
- NoteFuncDesc *nf;
+ const NoteFuncDesc *nf;
for (nf = note_func; nf->note_contents_func; nf++) {
note.hdr.n_namesz = cpu_to_be32(sizeof(note.name));
diff --git a/target-ppc/cpu-qom.h b/target-ppc/cpu-qom.h
index b17c024543..47dc8e6fdf 100644
--- a/target-ppc/cpu-qom.h
+++ b/target-ppc/cpu-qom.h
@@ -38,6 +38,8 @@
#define POWERPC_CPU_GET_CLASS(obj) \
OBJECT_GET_CLASS(PowerPCCPUClass, (obj), TYPE_POWERPC_CPU)
+typedef struct PowerPCCPU PowerPCCPU;
+
/**
* PowerPCCPUClass:
* @parent_realize: The parent class' realize handler.
@@ -71,7 +73,7 @@ typedef struct PowerPCCPUClass {
void (*init_proc)(CPUPPCState *env);
int (*check_pow)(CPUPPCState *env);
#if defined(CONFIG_SOFTMMU)
- int (*handle_mmu_fault)(CPUPPCState *env, target_ulong eaddr, int rwx,
+ int (*handle_mmu_fault)(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
int mmu_idx);
#endif
} PowerPCCPUClass;
@@ -83,14 +85,14 @@ typedef struct PowerPCCPUClass {
*
* A PowerPC CPU.
*/
-typedef struct PowerPCCPU {
+struct PowerPCCPU {
/*< private >*/
CPUState parent_obj;
/*< public >*/
CPUPPCState env;
int cpu_dt_id;
-} PowerPCCPU;
+};
static inline PowerPCCPU *ppc_env_get_cpu(CPUPPCState *env)
{
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index afab267485..d4983405a2 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -334,6 +334,7 @@ struct ppc_spr_t {
void (*hea_write)(void *opaque, int spr_num, int gpr_num);
#endif
const char *name;
+ target_ulong default_value;
#ifdef CONFIG_KVM
/* We (ab)use the fact that all the SPRs will have ids for the
* ONE_REG interface will have KVM_REG_PPC to use 0 as meaning,
@@ -1111,8 +1112,8 @@ int cpu_ppc_signal_handler (int host_signum, void *pinfo,
void *puc);
void ppc_hw_interrupt (CPUPPCState *env);
#if defined(CONFIG_USER_ONLY)
-int cpu_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
- int mmu_idx);
+int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
#endif
#if !defined(CONFIG_USER_ONLY)
@@ -1132,6 +1133,7 @@ uint64_t cpu_ppc_load_atbl (CPUPPCState *env);
uint32_t cpu_ppc_load_atbu (CPUPPCState *env);
void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value);
void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value);
+bool ppc_decr_clear_on_delivery(CPUPPCState *env);
uint32_t cpu_ppc_load_decr (CPUPPCState *env);
void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value);
uint32_t cpu_ppc_load_hdecr (CPUPPCState *env);
@@ -1900,6 +1902,8 @@ enum {
PPC2_LSQ_ISA207 = 0x0000000000002000ULL,
/* ISA 2.07 Altivec */
PPC2_ALTIVEC_207 = 0x0000000000004000ULL,
+ /* PowerISA 2.07 Book3s specification */
+ PPC2_ISA207S = 0x0000000000008000ULL,
#define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \
PPC2_ISA205 | PPC2_VSX207 | PPC2_PERM_ISA206 | \
@@ -2100,7 +2104,7 @@ static inline int booke206_tlbm_to_tlbn(CPUPPCState *env, ppcmas_tlb_t *tlbm)
}
}
- cpu_abort(env, "Unknown TLBe: %d\n", id);
+ cpu_abort(CPU(ppc_env_get_cpu(env)), "Unknown TLBe: %d\n", id);
return 0;
}
@@ -2171,14 +2175,6 @@ static inline bool msr_is_64bit(CPUPPCState *env, target_ulong msr)
extern void (*cpu_ppc_hypercall)(PowerPCCPU *);
-static inline bool cpu_has_work(CPUState *cpu)
-{
- PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
- CPUPPCState *env = &ppc_cpu->env;
-
- return msr_ee && (cpu->interrupt_request & CPU_INTERRUPT_HARD);
-}
-
#include "exec/exec-all.h"
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env);
diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c
index d541929743..4fa297d7dd 100644
--- a/target-ppc/excp_helper.c
+++ b/target-ppc/excp_helper.c
@@ -43,13 +43,15 @@ void ppc_cpu_do_interrupt(CPUState *cs)
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
- env->exception_index = POWERPC_EXCP_NONE;
+ cs->exception_index = POWERPC_EXCP_NONE;
env->error_code = 0;
}
void ppc_hw_interrupt(CPUPPCState *env)
{
- env->exception_index = POWERPC_EXCP_NONE;
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
+ cs->exception_index = POWERPC_EXCP_NONE;
env->error_code = 0;
}
#else /* defined(CONFIG_USER_ONLY) */
@@ -68,8 +70,8 @@ static inline void dump_syscall(CPUPPCState *env)
*/
static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
{
+ CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- CPUState *cs;
target_ulong msr, new_msr, vector;
int srr0, srr1, asrr0, asrr1;
int lpes0, lpes1, lev;
@@ -135,7 +137,6 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
fprintf(stderr, "Machine check while not allowed. "
"Entering checkstop state\n");
}
- cs = CPU(cpu);
cs->halted = 1;
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
@@ -204,7 +205,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
case POWERPC_EXCP_FP:
if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
LOG_EXCP("Ignore floating point exception\n");
- env->exception_index = POWERPC_EXCP_NONE;
+ cs->exception_index = POWERPC_EXCP_NONE;
env->error_code = 0;
return;
}
@@ -241,7 +242,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
break;
default:
/* Should never occur */
- cpu_abort(env, "Invalid program exception %d. Aborting\n",
+ cpu_abort(cs, "Invalid program exception %d. Aborting\n",
env->error_code);
break;
}
@@ -301,26 +302,26 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
break;
}
/* XXX: TODO */
- cpu_abort(env, "Debug exception is not implemented yet !\n");
+ cpu_abort(cs, "Debug exception is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */
env->spr[SPR_BOOKE_ESR] = ESR_SPV;
goto store_current;
case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
/* XXX: TODO */
- cpu_abort(env, "Embedded floating point data exception "
+ cpu_abort(cs, "Embedded floating point data exception "
"is not implemented yet !\n");
env->spr[SPR_BOOKE_ESR] = ESR_SPV;
goto store_next;
case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
/* XXX: TODO */
- cpu_abort(env, "Embedded floating point round exception "
+ cpu_abort(cs, "Embedded floating point round exception "
"is not implemented yet !\n");
env->spr[SPR_BOOKE_ESR] = ESR_SPV;
goto store_next;
case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
/* XXX: TODO */
- cpu_abort(env,
+ cpu_abort(cs,
"Performance counter exception is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
@@ -402,15 +403,15 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
goto store_next;
case POWERPC_EXCP_IO: /* IO error exception */
/* XXX: TODO */
- cpu_abort(env, "601 IO error exception is not implemented yet !\n");
+ cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_RUNM: /* Run mode exception */
/* XXX: TODO */
- cpu_abort(env, "601 run mode exception is not implemented yet !\n");
+ cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_EMUL: /* Emulation trap exception */
/* XXX: TODO */
- cpu_abort(env, "602 emulation trap exception "
+ cpu_abort(cs, "602 emulation trap exception "
"is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
@@ -428,7 +429,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
case POWERPC_EXCP_74xx:
goto tlb_miss_74xx;
default:
- cpu_abort(env, "Invalid instruction TLB miss exception\n");
+ cpu_abort(cs, "Invalid instruction TLB miss exception\n");
break;
}
break;
@@ -447,7 +448,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
case POWERPC_EXCP_74xx:
goto tlb_miss_74xx;
default:
- cpu_abort(env, "Invalid data load TLB miss exception\n");
+ cpu_abort(cs, "Invalid data load TLB miss exception\n");
break;
}
break;
@@ -533,30 +534,30 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
msr |= env->error_code; /* key bit */
break;
default:
- cpu_abort(env, "Invalid data store TLB miss exception\n");
+ cpu_abort(cs, "Invalid data store TLB miss exception\n");
break;
}
goto store_next;
case POWERPC_EXCP_FPA: /* Floating-point assist exception */
/* XXX: TODO */
- cpu_abort(env, "Floating point assist exception "
+ cpu_abort(cs, "Floating point assist exception "
"is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_DABR: /* Data address breakpoint */
/* XXX: TODO */
- cpu_abort(env, "DABR exception is not implemented yet !\n");
+ cpu_abort(cs, "DABR exception is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
/* XXX: TODO */
- cpu_abort(env, "IABR exception is not implemented yet !\n");
+ cpu_abort(cs, "IABR exception is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_SMI: /* System management interrupt */
/* XXX: TODO */
- cpu_abort(env, "SMI exception is not implemented yet !\n");
+ cpu_abort(cs, "SMI exception is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_THERM: /* Thermal interrupt */
/* XXX: TODO */
- cpu_abort(env, "Thermal management exception "
+ cpu_abort(cs, "Thermal management exception "
"is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
@@ -564,36 +565,36 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
new_msr |= (target_ulong)MSR_HVB;
}
/* XXX: TODO */
- cpu_abort(env,
+ cpu_abort(cs,
"Performance counter exception is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_VPUA: /* Vector assist exception */
/* XXX: TODO */
- cpu_abort(env, "VPU assist exception is not implemented yet !\n");
+ cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_SOFTP: /* Soft patch exception */
/* XXX: TODO */
- cpu_abort(env,
+ cpu_abort(cs,
"970 soft-patch exception is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_MAINT: /* Maintenance exception */
/* XXX: TODO */
- cpu_abort(env,
+ cpu_abort(cs,
"970 maintenance exception is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
/* XXX: TODO */
- cpu_abort(env, "Maskable external exception "
+ cpu_abort(cs, "Maskable external exception "
"is not implemented yet !\n");
goto store_next;
case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
/* XXX: TODO */
- cpu_abort(env, "Non maskable external exception "
+ cpu_abort(cs, "Non maskable external exception "
"is not implemented yet !\n");
goto store_next;
default:
excp_invalid:
- cpu_abort(env, "Invalid PowerPC exception %d. Aborting\n", excp);
+ cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
break;
store_current:
/* save current instruction location */
@@ -615,7 +616,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
}
/* If we disactivated any translation, flush TLBs */
if (msr & ((1 << MSR_IR) | (1 << MSR_DR))) {
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
}
#ifdef TARGET_PPC64
@@ -635,7 +636,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* Jump to handler */
vector = env->excp_vectors[excp];
if (vector == (target_ulong)-1ULL) {
- cpu_abort(env, "Raised an exception without defined vector %d\n",
+ cpu_abort(cs, "Raised an exception without defined vector %d\n",
excp);
}
vector |= env->excp_prefix;
@@ -662,7 +663,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
hreg_compute_hflags(env);
env->nip = vector;
/* Reset exception state */
- env->exception_index = POWERPC_EXCP_NONE;
+ cs->exception_index = POWERPC_EXCP_NONE;
env->error_code = 0;
if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
@@ -670,7 +671,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* XXX: The BookE changes address space when switching modes,
we should probably implement that as different MMU indexes,
but for the moment we do it the slow way and flush all. */
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
}
}
@@ -679,7 +680,7 @@ void ppc_cpu_do_interrupt(CPUState *cs)
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
- powerpc_excp(cpu, env->excp_model, env->exception_index);
+ powerpc_excp(cpu, env->excp_model, cs->exception_index);
}
void ppc_hw_interrupt(CPUPPCState *env)
@@ -722,7 +723,6 @@ void ppc_hw_interrupt(CPUPPCState *env)
if ((msr_ee != 0 || msr_hv == 0 || msr_pr != 0) && hdice != 0) {
/* Hypervisor decrementer exception */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
return;
}
@@ -766,7 +766,9 @@ void ppc_hw_interrupt(CPUPPCState *env)
}
/* Decrementer exception */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
+ if (ppc_decr_clear_on_delivery(env)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
+ }
powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
return;
}
@@ -815,12 +817,14 @@ static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
uint32_t error_code)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
#if 0
printf("Raise exception %3x code : %d\n", exception, error_code);
#endif
- env->exception_index = exception;
+ cs->exception_index = exception;
env->error_code = error_code;
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
void helper_raise_exception(CPUPPCState *env, uint32_t exception)
diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index e7f329566d..c6f484fc34 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -119,6 +119,7 @@ uint32_t helper_compute_fprf(CPUPPCState *env, uint64_t arg, uint32_t set_fprf)
static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op,
int set_fpcc)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
uint64_t ret = 0;
int ve;
@@ -155,7 +156,7 @@ static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op,
}
/* We must update the target FPR before raising the exception */
if (ve != 0) {
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
@@ -224,6 +225,8 @@ static inline void float_zero_divide_excp(CPUPPCState *env)
static inline void float_overflow_excp(CPUPPCState *env)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
env->fpscr |= 1 << FPSCR_OX;
/* Update the floating-point exception summary */
env->fpscr |= 1 << FPSCR_FX;
@@ -232,7 +235,7 @@ static inline void float_overflow_excp(CPUPPCState *env)
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
/* We must update the target FPR before raising the exception */
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
} else {
env->fpscr |= 1 << FPSCR_XX;
@@ -242,6 +245,8 @@ static inline void float_overflow_excp(CPUPPCState *env)
static inline void float_underflow_excp(CPUPPCState *env)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
env->fpscr |= 1 << FPSCR_UX;
/* Update the floating-point exception summary */
env->fpscr |= 1 << FPSCR_FX;
@@ -250,13 +255,15 @@ static inline void float_underflow_excp(CPUPPCState *env)
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
/* We must update the target FPR before raising the exception */
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
}
}
static inline void float_inexact_excp(CPUPPCState *env)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
env->fpscr |= 1 << FPSCR_XX;
/* Update the floating-point exception summary */
env->fpscr |= 1 << FPSCR_FX;
@@ -264,7 +271,7 @@ static inline void float_inexact_excp(CPUPPCState *env)
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
/* We must update the target FPR before raising the exception */
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
}
}
@@ -316,6 +323,7 @@ void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
int prev;
prev = (env->fpscr >> bit) & 1;
@@ -439,7 +447,7 @@ void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
/* We have to update Rc1 before raising the exception */
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
break;
}
}
@@ -447,6 +455,7 @@ void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
target_ulong prev, new;
int i;
@@ -468,7 +477,7 @@ void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
}
if ((fpscr_ex & fpscr_eex) != 0) {
env->fpscr |= 1 << FPSCR_FEX;
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
/* XXX: we should compute it properly */
env->error_code = POWERPC_EXCP_FP;
} else {
@@ -484,6 +493,7 @@ void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
void helper_float_check_status(CPUPPCState *env)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
int status = get_float_exception_flags(&env->fp_status);
if (status & float_flag_divbyzero) {
@@ -496,11 +506,11 @@ void helper_float_check_status(CPUPPCState *env)
float_inexact_excp(env);
}
- if (env->exception_index == POWERPC_EXCP_PROGRAM &&
+ if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
(env->error_code & POWERPC_EXCP_FP)) {
/* Differred floating-point exception after target FPR update */
if (msr_fe0 != 0 || msr_fe1 != 0) {
- helper_raise_exception_err(env, env->exception_index,
+ helper_raise_exception_err(env, cs->exception_index,
env->error_code);
}
}
@@ -1772,11 +1782,19 @@ typedef union _ppc_vsr_t {
float64 f64[2];
} ppc_vsr_t;
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VsrW(i) u32[i]
+#define VsrD(i) u64[i]
+#else
+#define VsrW(i) u32[3-(i)]
+#define VsrD(i) u64[1-(i)]
+#endif
+
static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
{
if (n < 32) {
- vsr->f64[0] = env->fpr[n];
- vsr->u64[1] = env->vsr[n];
+ vsr->VsrD(0) = env->fpr[n];
+ vsr->VsrD(1) = env->vsr[n];
} else {
vsr->u64[0] = env->avr[n-32].u64[0];
vsr->u64[1] = env->avr[n-32].u64[1];
@@ -1786,8 +1804,8 @@ static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
{
if (n < 32) {
- env->fpr[n] = vsr->f64[0];
- env->vsr[n] = vsr->u64[1];
+ env->fpr[n] = vsr->VsrD(0);
+ env->vsr[n] = vsr->VsrD(1);
} else {
env->avr[n-32].u64[0] = vsr->u64[0];
env->avr[n-32].u64[1] = vsr->u64[1];
@@ -1802,7 +1820,7 @@ static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
* op - operation (add or sub)
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
@@ -1819,44 +1837,44 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld[i] = tp##_##op(xa.fld[i], xb.fld[i], &tstat); \
+ xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_infinity(xa.fld[i]) && tp##_is_infinity(xb.fld[i])) {\
+ if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
- } else if (tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(xb.fld[i])) { \
+ } else if (tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
if (r2sp) { \
- xt.fld[i] = helper_frsp(env, xt.fld[i]); \
+ xt.fld = helper_frsp(env, xt.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld, sfprf); \
} \
} \
putVSR(xT(opcode), &xt, env); \
helper_float_check_status(env); \
}
-VSX_ADD_SUB(xsadddp, add, 1, float64, f64, 1, 0)
-VSX_ADD_SUB(xsaddsp, add, 1, float64, f64, 1, 1)
-VSX_ADD_SUB(xvadddp, add, 2, float64, f64, 0, 0)
-VSX_ADD_SUB(xvaddsp, add, 4, float32, f32, 0, 0)
-VSX_ADD_SUB(xssubdp, sub, 1, float64, f64, 1, 0)
-VSX_ADD_SUB(xssubsp, sub, 1, float64, f64, 1, 1)
-VSX_ADD_SUB(xvsubdp, sub, 2, float64, f64, 0, 0)
-VSX_ADD_SUB(xvsubsp, sub, 4, float32, f32, 0, 0)
+VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
+VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
/* VSX_MUL - VSX floating point multiply
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
@@ -1873,25 +1891,25 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld[i] = tp##_mul(xa.fld[i], xb.fld[i], &tstat); \
+ xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if ((tp##_is_infinity(xa.fld[i]) && tp##_is_zero(xb.fld[i])) || \
- (tp##_is_infinity(xb.fld[i]) && tp##_is_zero(xa.fld[i]))) { \
+ if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
+ (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
- } else if (tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(xb.fld[i])) { \
+ } else if (tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
if (r2sp) { \
- xt.fld[i] = helper_frsp(env, xt.fld[i]); \
+ xt.fld = helper_frsp(env, xt.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld, sfprf); \
} \
} \
\
@@ -1899,16 +1917,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_MUL(xsmuldp, 1, float64, f64, 1, 0)
-VSX_MUL(xsmulsp, 1, float64, f64, 1, 1)
-VSX_MUL(xvmuldp, 2, float64, f64, 0, 0)
-VSX_MUL(xvmulsp, 4, float32, f32, 0, 0)
+VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
+VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
+VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
+VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
/* VSX_DIV - VSX floating point divide
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
@@ -1925,27 +1943,27 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld[i] = tp##_div(xa.fld[i], xb.fld[i], &tstat); \
+ xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_infinity(xa.fld[i]) && tp##_is_infinity(xb.fld[i])) { \
+ if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
- } else if (tp##_is_zero(xa.fld[i]) && \
- tp##_is_zero(xb.fld[i])) { \
+ } else if (tp##_is_zero(xa.fld) && \
+ tp##_is_zero(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
- } else if (tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(xb.fld[i])) { \
+ } else if (tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
if (r2sp) { \
- xt.fld[i] = helper_frsp(env, xt.fld[i]); \
+ xt.fld = helper_frsp(env, xt.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld, sfprf); \
} \
} \
\
@@ -1953,16 +1971,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_DIV(xsdivdp, 1, float64, f64, 1, 0)
-VSX_DIV(xsdivsp, 1, float64, f64, 1, 1)
-VSX_DIV(xvdivdp, 2, float64, f64, 0, 0)
-VSX_DIV(xvdivsp, 4, float32, f32, 0, 0)
+VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
+VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
+VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
+VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
/* VSX_RE - VSX floating point reciprocal estimate
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
#define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
@@ -1976,17 +1994,17 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_signaling_nan(xb.fld[i]))) { \
+ if (unlikely(tp##_is_signaling_nan(xb.fld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
- xt.fld[i] = tp##_div(tp##_one, xb.fld[i], &env->fp_status); \
+ xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
\
if (r2sp) { \
- xt.fld[i] = helper_frsp(env, xt.fld[i]); \
+ xt.fld = helper_frsp(env, xt.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[0], sfprf); \
+ helper_compute_fprf(env, xt.fld, sfprf); \
} \
} \
\
@@ -1994,16 +2012,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_RE(xsredp, 1, float64, f64, 1, 0)
-VSX_RE(xsresp, 1, float64, f64, 1, 1)
-VSX_RE(xvredp, 2, float64, f64, 0, 0)
-VSX_RE(xvresp, 4, float32, f32, 0, 0)
+VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
+VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
+VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
+VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
/* VSX_SQRT - VSX floating point square root
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
@@ -2019,23 +2037,23 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld[i] = tp##_sqrt(xb.fld[i], &tstat); \
+ xt.fld = tp##_sqrt(xb.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_neg(xb.fld[i]) && !tp##_is_zero(xb.fld[i])) { \
+ if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
- } else if (tp##_is_signaling_nan(xb.fld[i])) { \
+ } else if (tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
if (r2sp) { \
- xt.fld[i] = helper_frsp(env, xt.fld[i]); \
+ xt.fld = helper_frsp(env, xt.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld, sfprf); \
} \
} \
\
@@ -2043,16 +2061,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_SQRT(xssqrtdp, 1, float64, f64, 1, 0)
-VSX_SQRT(xssqrtsp, 1, float64, f64, 1, 1)
-VSX_SQRT(xvsqrtdp, 2, float64, f64, 0, 0)
-VSX_SQRT(xvsqrtsp, 4, float32, f32, 0, 0)
+VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
+VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
+VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
+VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* sfprf - set FPRF
*/
#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
@@ -2068,24 +2086,24 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
for (i = 0; i < nels; i++) { \
float_status tstat = env->fp_status; \
set_float_exception_flags(0, &tstat); \
- xt.fld[i] = tp##_sqrt(xb.fld[i], &tstat); \
- xt.fld[i] = tp##_div(tp##_one, xt.fld[i], &tstat); \
+ xt.fld = tp##_sqrt(xb.fld, &tstat); \
+ xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_neg(xb.fld[i]) && !tp##_is_zero(xb.fld[i])) { \
+ if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
- } else if (tp##_is_signaling_nan(xb.fld[i])) { \
+ } else if (tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
\
if (r2sp) { \
- xt.fld[i] = helper_frsp(env, xt.fld[i]); \
+ xt.fld = helper_frsp(env, xt.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld, sfprf); \
} \
} \
\
@@ -2093,16 +2111,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_RSQRTE(xsrsqrtedp, 1, float64, f64, 1, 0)
-VSX_RSQRTE(xsrsqrtesp, 1, float64, f64, 1, 1)
-VSX_RSQRTE(xvrsqrtedp, 2, float64, f64, 0, 0)
-VSX_RSQRTE(xvrsqrtesp, 4, float32, f32, 0, 0)
+VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
+VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
+VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
+VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
/* VSX_TDIV - VSX floating point test for divide
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* emin - minimum unbiased exponent
* emax - maximum unbiased exponent
* nbits - number of fraction bits
@@ -2119,28 +2137,28 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
getVSR(xB(opcode), &xb, env); \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_infinity(xa.fld[i]) || \
- tp##_is_infinity(xb.fld[i]) || \
- tp##_is_zero(xb.fld[i]))) { \
+ if (unlikely(tp##_is_infinity(xa.fld) || \
+ tp##_is_infinity(xb.fld) || \
+ tp##_is_zero(xb.fld))) { \
fe_flag = 1; \
fg_flag = 1; \
} else { \
- int e_a = ppc_##tp##_get_unbiased_exp(xa.fld[i]); \
- int e_b = ppc_##tp##_get_unbiased_exp(xb.fld[i]); \
+ int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
\
- if (unlikely(tp##_is_any_nan(xa.fld[i]) || \
- tp##_is_any_nan(xb.fld[i]))) { \
+ if (unlikely(tp##_is_any_nan(xa.fld) || \
+ tp##_is_any_nan(xb.fld))) { \
fe_flag = 1; \
} else if ((e_b <= emin) || (e_b >= (emax-2))) { \
fe_flag = 1; \
- } else if (!tp##_is_zero(xa.fld[i]) && \
+ } else if (!tp##_is_zero(xa.fld) && \
(((e_a - e_b) >= emax) || \
((e_a - e_b) <= (emin+1)) || \
(e_a <= (emin+nbits)))) { \
fe_flag = 1; \
} \
\
- if (unlikely(tp##_is_zero_or_denormal(xb.fld[i]))) { \
+ if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
/* XB is not zero because of the above check and */ \
/* so must be denormalized. */ \
fg_flag = 1; \
@@ -2151,15 +2169,15 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
}
-VSX_TDIV(xstdivdp, 1, float64, f64, -1022, 1023, 52)
-VSX_TDIV(xvtdivdp, 2, float64, f64, -1022, 1023, 52)
-VSX_TDIV(xvtdivsp, 4, float32, f32, -126, 127, 23)
+VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
+VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
+VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
/* VSX_TSQRT - VSX floating point test for square root
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* emin - minimum unbiased exponent
* emax - maximum unbiased exponent
* nbits - number of fraction bits
@@ -2176,25 +2194,25 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
getVSR(xB(opcode), &xb, env); \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_infinity(xb.fld[i]) || \
- tp##_is_zero(xb.fld[i]))) { \
+ if (unlikely(tp##_is_infinity(xb.fld) || \
+ tp##_is_zero(xb.fld))) { \
fe_flag = 1; \
fg_flag = 1; \
} else { \
- int e_b = ppc_##tp##_get_unbiased_exp(xb.fld[i]); \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
\
- if (unlikely(tp##_is_any_nan(xb.fld[i]))) { \
+ if (unlikely(tp##_is_any_nan(xb.fld))) { \
fe_flag = 1; \
- } else if (unlikely(tp##_is_zero(xb.fld[i]))) { \
+ } else if (unlikely(tp##_is_zero(xb.fld))) { \
fe_flag = 1; \
- } else if (unlikely(tp##_is_neg(xb.fld[i]))) { \
+ } else if (unlikely(tp##_is_neg(xb.fld))) { \
fe_flag = 1; \
- } else if (!tp##_is_zero(xb.fld[i]) && \
+ } else if (!tp##_is_zero(xb.fld) && \
(e_b <= (emin+nbits))) { \
fe_flag = 1; \
} \
\
- if (unlikely(tp##_is_zero_or_denormal(xb.fld[i]))) { \
+ if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
/* XB is not zero because of the above check and */ \
/* therefore must be denormalized. */ \
fg_flag = 1; \
@@ -2205,15 +2223,15 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
}
-VSX_TSQRT(xstsqrtdp, 1, float64, f64, -1022, 52)
-VSX_TSQRT(xvtsqrtdp, 2, float64, f64, -1022, 52)
-VSX_TSQRT(xvtsqrtsp, 4, float32, f32, -126, 23)
+VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
+VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
+VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
/* VSX_MADD - VSX floating point muliply/add variations
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* maddflgs - flags for the float*muladd routine that control the
* various forms (madd, msub, nmadd, nmsub)
* afrm - A form (1=A, 0=M)
@@ -2249,43 +2267,43 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
/* Avoid double rounding errors by rounding the intermediate */ \
/* result to odd. */ \
set_float_rounding_mode(float_round_to_zero, &tstat); \
- xt_out.fld[i] = tp##_muladd(xa.fld[i], b->fld[i], c->fld[i], \
+ xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
maddflgs, &tstat); \
- xt_out.fld[i] |= (get_float_exception_flags(&tstat) & \
+ xt_out.fld |= (get_float_exception_flags(&tstat) & \
float_flag_inexact) != 0; \
} else { \
- xt_out.fld[i] = tp##_muladd(xa.fld[i], b->fld[i], c->fld[i], \
+ xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
maddflgs, &tstat); \
} \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(b->fld[i]) || \
- tp##_is_signaling_nan(c->fld[i])) { \
+ if (tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(b->fld) || \
+ tp##_is_signaling_nan(c->fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
tstat.float_exception_flags &= ~float_flag_invalid; \
} \
- if ((tp##_is_infinity(xa.fld[i]) && tp##_is_zero(b->fld[i])) || \
- (tp##_is_zero(xa.fld[i]) && tp##_is_infinity(b->fld[i]))) { \
- xt_out.fld[i] = float64_to_##tp(fload_invalid_op_excp(env, \
+ if ((tp##_is_infinity(xa.fld) && tp##_is_zero(b->fld)) || \
+ (tp##_is_zero(xa.fld) && tp##_is_infinity(b->fld))) { \
+ xt_out.fld = float64_to_##tp(fload_invalid_op_excp(env, \
POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status); \
tstat.float_exception_flags &= ~float_flag_invalid; \
} \
if ((tstat.float_exception_flags & float_flag_invalid) && \
- ((tp##_is_infinity(xa.fld[i]) || \
- tp##_is_infinity(b->fld[i])) && \
- tp##_is_infinity(c->fld[i]))) { \
+ ((tp##_is_infinity(xa.fld) || \
+ tp##_is_infinity(b->fld)) && \
+ tp##_is_infinity(c->fld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
} \
} \
\
if (r2sp) { \
- xt_out.fld[i] = helper_frsp(env, xt_out.fld[i]); \
+ xt_out.fld = helper_frsp(env, xt_out.fld); \
} \
\
if (sfprf) { \
- helper_compute_fprf(env, xt_out.fld[i], sfprf); \
+ helper_compute_fprf(env, xt_out.fld, sfprf); \
} \
} \
putVSR(xT(opcode), &xt_out, env); \
@@ -2297,41 +2315,41 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
#define NMADD_FLGS float_muladd_negate_result
#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
-VSX_MADD(xsmaddadp, 1, float64, f64, MADD_FLGS, 1, 1, 0)
-VSX_MADD(xsmaddmdp, 1, float64, f64, MADD_FLGS, 0, 1, 0)
-VSX_MADD(xsmsubadp, 1, float64, f64, MSUB_FLGS, 1, 1, 0)
-VSX_MADD(xsmsubmdp, 1, float64, f64, MSUB_FLGS, 0, 1, 0)
-VSX_MADD(xsnmaddadp, 1, float64, f64, NMADD_FLGS, 1, 1, 0)
-VSX_MADD(xsnmaddmdp, 1, float64, f64, NMADD_FLGS, 0, 1, 0)
-VSX_MADD(xsnmsubadp, 1, float64, f64, NMSUB_FLGS, 1, 1, 0)
-VSX_MADD(xsnmsubmdp, 1, float64, f64, NMSUB_FLGS, 0, 1, 0)
-
-VSX_MADD(xsmaddasp, 1, float64, f64, MADD_FLGS, 1, 1, 1)
-VSX_MADD(xsmaddmsp, 1, float64, f64, MADD_FLGS, 0, 1, 1)
-VSX_MADD(xsmsubasp, 1, float64, f64, MSUB_FLGS, 1, 1, 1)
-VSX_MADD(xsmsubmsp, 1, float64, f64, MSUB_FLGS, 0, 1, 1)
-VSX_MADD(xsnmaddasp, 1, float64, f64, NMADD_FLGS, 1, 1, 1)
-VSX_MADD(xsnmaddmsp, 1, float64, f64, NMADD_FLGS, 0, 1, 1)
-VSX_MADD(xsnmsubasp, 1, float64, f64, NMSUB_FLGS, 1, 1, 1)
-VSX_MADD(xsnmsubmsp, 1, float64, f64, NMSUB_FLGS, 0, 1, 1)
-
-VSX_MADD(xvmaddadp, 2, float64, f64, MADD_FLGS, 1, 0, 0)
-VSX_MADD(xvmaddmdp, 2, float64, f64, MADD_FLGS, 0, 0, 0)
-VSX_MADD(xvmsubadp, 2, float64, f64, MSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvmsubmdp, 2, float64, f64, MSUB_FLGS, 0, 0, 0)
-VSX_MADD(xvnmaddadp, 2, float64, f64, NMADD_FLGS, 1, 0, 0)
-VSX_MADD(xvnmaddmdp, 2, float64, f64, NMADD_FLGS, 0, 0, 0)
-VSX_MADD(xvnmsubadp, 2, float64, f64, NMSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvnmsubmdp, 2, float64, f64, NMSUB_FLGS, 0, 0, 0)
-
-VSX_MADD(xvmaddasp, 4, float32, f32, MADD_FLGS, 1, 0, 0)
-VSX_MADD(xvmaddmsp, 4, float32, f32, MADD_FLGS, 0, 0, 0)
-VSX_MADD(xvmsubasp, 4, float32, f32, MSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvmsubmsp, 4, float32, f32, MSUB_FLGS, 0, 0, 0)
-VSX_MADD(xvnmaddasp, 4, float32, f32, NMADD_FLGS, 1, 0, 0)
-VSX_MADD(xvnmaddmsp, 4, float32, f32, NMADD_FLGS, 0, 0, 0)
-VSX_MADD(xvnmsubasp, 4, float32, f32, NMSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvnmsubmsp, 4, float32, f32, NMSUB_FLGS, 0, 0, 0)
+VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
+VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
+VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
+VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
+VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
+VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
+VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
+VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
+
+VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
+VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
+VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
+VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
+VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
+VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
+VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
+VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
+
+VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
+VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
+VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
+VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
+VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
+VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
+
+VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
+VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
+VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
+VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
+VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
+VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
#define VSX_SCALAR_CMP(op, ordered) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
@@ -2342,10 +2360,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
getVSR(xA(opcode), &xa, env); \
getVSR(xB(opcode), &xb, env); \
\
- if (unlikely(float64_is_any_nan(xa.f64[0]) || \
- float64_is_any_nan(xb.f64[0]))) { \
- if (float64_is_signaling_nan(xa.f64[0]) || \
- float64_is_signaling_nan(xb.f64[0])) { \
+ if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
+ float64_is_any_nan(xb.VsrD(0)))) { \
+ if (float64_is_signaling_nan(xa.VsrD(0)) || \
+ float64_is_signaling_nan(xb.VsrD(0))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
if (ordered) { \
@@ -2353,9 +2371,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
cc = 1; \
} else { \
- if (float64_lt(xa.f64[0], xb.f64[0], &env->fp_status)) { \
+ if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
cc = 8; \
- } else if (!float64_le(xa.f64[0], xb.f64[0], &env->fp_status)) { \
+ } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), \
+ &env->fp_status)) { \
cc = 4; \
} else { \
cc = 2; \
@@ -2380,7 +2399,7 @@ VSX_SCALAR_CMP(xscmpudp, 0)
* op - operation (max or min)
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
*/
#define VSX_MAX_MIN(name, op, nels, tp, fld) \
void helper_##name(CPUPPCState *env, uint32_t opcode) \
@@ -2393,9 +2412,9 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
getVSR(xT(opcode), &xt, env); \
\
for (i = 0; i < nels; i++) { \
- xt.fld[i] = tp##_##op(xa.fld[i], xb.fld[i], &env->fp_status); \
- if (unlikely(tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(xb.fld[i]))) { \
+ xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
+ if (unlikely(tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(xb.fld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
} \
@@ -2404,18 +2423,18 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, f64)
-VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, f64)
-VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, f32)
-VSX_MAX_MIN(xsmindp, minnum, 1, float64, f64)
-VSX_MAX_MIN(xvmindp, minnum, 2, float64, f64)
-VSX_MAX_MIN(xvminsp, minnum, 4, float32, f32)
+VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
+VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
/* VSX_CMP - VSX floating point compare
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* cmp - comparison operation
* svxvc - set VXVC bit
*/
@@ -2432,23 +2451,23 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
getVSR(xT(opcode), &xt, env); \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_any_nan(xa.fld[i]) || \
- tp##_is_any_nan(xb.fld[i]))) { \
- if (tp##_is_signaling_nan(xa.fld[i]) || \
- tp##_is_signaling_nan(xb.fld[i])) { \
+ if (unlikely(tp##_is_any_nan(xa.fld) || \
+ tp##_is_any_nan(xb.fld))) { \
+ if (tp##_is_signaling_nan(xa.fld) || \
+ tp##_is_signaling_nan(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
if (svxvc) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
} \
- xt.fld[i] = 0; \
+ xt.fld = 0; \
all_true = 0; \
} else { \
- if (tp##_##cmp(xb.fld[i], xa.fld[i], &env->fp_status) == 1) { \
- xt.fld[i] = -1; \
+ if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == 1) { \
+ xt.fld = -1; \
all_false = 0; \
} else { \
- xt.fld[i] = 0; \
+ xt.fld = 0; \
all_true = 0; \
} \
} \
@@ -2461,18 +2480,12 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_CMP(xvcmpeqdp, 2, float64, f64, eq, 0)
-VSX_CMP(xvcmpgedp, 2, float64, f64, le, 1)
-VSX_CMP(xvcmpgtdp, 2, float64, f64, lt, 1)
-VSX_CMP(xvcmpeqsp, 4, float32, f32, eq, 0)
-VSX_CMP(xvcmpgesp, 4, float32, f32, le, 1)
-VSX_CMP(xvcmpgtsp, 4, float32, f32, lt, 1)
-
-#if defined(HOST_WORDS_BIGENDIAN)
-#define JOFFSET 0
-#else
-#define JOFFSET 1
-#endif
+VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0)
+VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1)
+VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1)
+VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0)
+VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1)
+VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1)
/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
* op - instruction mnemonic
@@ -2493,7 +2506,6 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
getVSR(xT(opcode), &xt, env); \
\
for (i = 0; i < nels; i++) { \
- int j = 2*i + JOFFSET; \
xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
if (unlikely(stp##_is_signaling_nan(xb.sfld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
@@ -2509,10 +2521,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, f64[i], f32[j], 1)
-VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, f32[j], f64[i], 1)
-VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, f64[i], f32[j], 0)
-VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, f32[j], f64[i], 0)
+VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
+VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
+VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
+VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
{
@@ -2537,10 +2549,9 @@ uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
* ttp - target type (int32, uint32, int64 or uint64)
* sfld - source vsr_t field
* tfld - target vsr_t field
- * jdef - definition of the j index (i or 2*i)
* rnan - resulting NaN
*/
-#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, jdef, rnan) \
+#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
ppc_vsr_t xt, xb; \
@@ -2550,7 +2561,6 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
getVSR(xT(opcode), &xt, env); \
\
for (i = 0; i < nels; i++) { \
- int j = jdef; \
if (unlikely(stp##_is_any_nan(xb.sfld))) { \
if (stp##_is_signaling_nan(xb.sfld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
@@ -2558,7 +2568,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
xt.tfld = rnan; \
} else { \
- xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
+ xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
+ &env->fp_status); \
if (env->fp_status.float_exception_flags & float_flag_invalid) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
} \
@@ -2569,27 +2580,23 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, f64[j], u64[i], i, \
+VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
0x8000000000000000ULL)
-VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, f64[i], u32[j], \
- 2*i + JOFFSET, 0x80000000U)
-VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, f64[j], u64[i], i, 0ULL)
-VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, f64[i], u32[j], \
- 2*i + JOFFSET, 0U)
-VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, f64[j], u64[i], i, \
+VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
+ 0x80000000U)
+VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
+VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
+VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
0x8000000000000000ULL)
-VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, f64[i], u32[j], \
- 2*i + JOFFSET, 0x80000000U)
-VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, f64[j], u64[i], i, 0ULL)
-VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, f64[i], u32[j], \
- 2*i + JOFFSET, 0U)
-VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, f32[j], u64[i], \
- 2*i + JOFFSET, 0x8000000000000000ULL)
-VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, f32[j], u32[j], i, \
+VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
0x80000000U)
-VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, f32[j], u64[i], \
- 2*i + JOFFSET, 0ULL)
-VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, f32[j], u32[i], i, 0U)
+VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
+VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
+VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
+ 0x8000000000000000ULL)
+VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
+VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
+VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
* op - instruction mnemonic
@@ -2601,7 +2608,7 @@ VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, f32[j], u32[i], i, 0U)
* jdef - definition of the j index (i or 2*i)
* sfprf - set FPRF
*/
-#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, jdef, sfprf, r2sp) \
+#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
void helper_##op(CPUPPCState *env, uint32_t opcode) \
{ \
ppc_vsr_t xt, xb; \
@@ -2611,7 +2618,6 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
getVSR(xT(opcode), &xt, env); \
\
for (i = 0; i < nels; i++) { \
- int j = jdef; \
xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
if (r2sp) { \
xt.tfld = helper_frsp(env, xt.tfld); \
@@ -2625,22 +2631,18 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, u64[j], f64[i], i, 1, 0)
-VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, u64[j], f64[i], i, 1, 0)
-VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, u64[j], f64[i], i, 1, 1)
-VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, u64[j], f64[i], i, 1, 1)
-VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, u64[j], f64[i], i, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, u64[j], f64[i], i, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, u32[j], f64[i], \
- 2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, u32[j], f64[i], \
- 2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, u64[i], f32[j], \
- 2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, u64[i], f32[j], \
- 2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, u32[j], f32[i], i, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, u32[j], f32[i], i, 0, 0)
+VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
/* For "use current rounding mode", define a value that will not be one of
* the existing rounding model enums.
@@ -2652,7 +2654,7 @@ VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, u32[j], f32[i], i, 0, 0)
* op - instruction mnemonic
* nels - number of elements (1, 2 or 4)
* tp - type (float32 or float64)
- * fld - vsr_t field (f32 or f64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
* rmode - rounding mode
* sfprf - set FPRF
*/
@@ -2669,14 +2671,14 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_signaling_nan(xb.fld[i]))) { \
+ if (unlikely(tp##_is_signaling_nan(xb.fld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
- xt.fld[i] = tp##_snan_to_qnan(xb.fld[i]); \
+ xt.fld = tp##_snan_to_qnan(xb.fld); \
} else { \
- xt.fld[i] = tp##_round_to_int(xb.fld[i], &env->fp_status); \
+ xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
} \
if (sfprf) { \
- helper_compute_fprf(env, xt.fld[i], sfprf); \
+ helper_compute_fprf(env, xt.fld, sfprf); \
} \
} \
\
@@ -2692,23 +2694,23 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_ROUND(xsrdpi, 1, float64, f64, float_round_nearest_even, 1)
-VSX_ROUND(xsrdpic, 1, float64, f64, FLOAT_ROUND_CURRENT, 1)
-VSX_ROUND(xsrdpim, 1, float64, f64, float_round_down, 1)
-VSX_ROUND(xsrdpip, 1, float64, f64, float_round_up, 1)
-VSX_ROUND(xsrdpiz, 1, float64, f64, float_round_to_zero, 1)
+VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_nearest_even, 1)
+VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
+VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
+VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
+VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
-VSX_ROUND(xvrdpi, 2, float64, f64, float_round_nearest_even, 0)
-VSX_ROUND(xvrdpic, 2, float64, f64, FLOAT_ROUND_CURRENT, 0)
-VSX_ROUND(xvrdpim, 2, float64, f64, float_round_down, 0)
-VSX_ROUND(xvrdpip, 2, float64, f64, float_round_up, 0)
-VSX_ROUND(xvrdpiz, 2, float64, f64, float_round_to_zero, 0)
+VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_nearest_even, 0)
+VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
+VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
+VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
+VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
-VSX_ROUND(xvrspi, 4, float32, f32, float_round_nearest_even, 0)
-VSX_ROUND(xvrspic, 4, float32, f32, FLOAT_ROUND_CURRENT, 0)
-VSX_ROUND(xvrspim, 4, float32, f32, float_round_down, 0)
-VSX_ROUND(xvrspip, 4, float32, f32, float_round_up, 0)
-VSX_ROUND(xvrspiz, 4, float32, f32, float_round_to_zero, 0)
+VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_nearest_even, 0)
+VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
+VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
+VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
+VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
{
diff --git a/target-ppc/helper_regs.h b/target-ppc/helper_regs.h
index c02e8da4e4..271fddf17f 100644
--- a/target-ppc/helper_regs.h
+++ b/target-ppc/helper_regs.h
@@ -83,7 +83,7 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
if (((value >> MSR_IR) & 1) != msr_ir ||
((value >> MSR_DR) & 1) != msr_dr) {
/* Flush all tlb when changing translation mode */
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
excp = POWERPC_EXCP_NONE;
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
@@ -101,7 +101,7 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
hreg_compute_hflags(env);
#if !defined(CONFIG_USER_ONLY)
if (unlikely(msr_pow == 1)) {
- if ((*env->check_pow)(env)) {
+ if (!env->pending_interrupts && (*env->check_pow)(env)) {
cs->halted = 1;
excp = EXCP_HALTED;
}
diff --git a/target-ppc/int_helper.c b/target-ppc/int_helper.c
index 63dde94b04..18b54f060a 100644
--- a/target-ppc/int_helper.c
+++ b/target-ppc/int_helper.c
@@ -1075,7 +1075,7 @@ void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
#undef VBPERMQ_INDEX
#undef VBPERMQ_DW
-uint64_t VGBBD_MASKS[256] = {
+static const uint64_t VGBBD_MASKS[256] = {
0x0000000000000000ull, /* 00 */
0x0000000000000080ull, /* 01 */
0x0000000000008000ull, /* 02 */
@@ -2216,7 +2216,7 @@ static int bcd_cmp_mag(ppc_avr_t *a, ppc_avr_t *b)
uint8_t dig_a = bcd_get_digit(a, i, &invalid);
uint8_t dig_b = bcd_get_digit(b, i, &invalid);
if (unlikely(invalid)) {
- return 0; /* doesnt matter */
+ return 0; /* doesn't matter */
} else if (dig_a > dig_b) {
return 1;
} else if (dig_a < dig_b) {
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index 32e7a8c0a7..9974b10ccb 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -1178,7 +1178,7 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu)
if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
cs->halted = 1;
- env->exception_index = EXCP_HLT;
+ cs->exception_index = EXCP_HLT;
}
return 0;
@@ -1504,7 +1504,6 @@ int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
void kvmppc_set_papr(PowerPCCPU *cpu)
{
- CPUPPCState *env = &cpu->env;
CPUState *cs = CPU(cpu);
struct kvm_enable_cap cap = {};
int ret;
@@ -1513,7 +1512,7 @@ void kvmppc_set_papr(PowerPCCPU *cpu)
ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &cap);
if (ret) {
- cpu_abort(env, "This KVM version does not support PAPR\n");
+ cpu_abort(cs, "This KVM version does not support PAPR\n");
}
/* Update the capability flag so we sync the right information
@@ -1523,7 +1522,6 @@ void kvmppc_set_papr(PowerPCCPU *cpu)
void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
{
- CPUPPCState *env = &cpu->env;
CPUState *cs = CPU(cpu);
struct kvm_enable_cap cap = {};
int ret;
@@ -1533,7 +1531,7 @@ void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &cap);
if (ret && mpic_proxy) {
- cpu_abort(env, "This KVM version does not support EPR\n");
+ cpu_abort(cs, "This KVM version does not support EPR\n");
}
}
diff --git a/target-ppc/machine.c b/target-ppc/machine.c
index 2d46ceccca..063b379d90 100644
--- a/target-ppc/machine.c
+++ b/target-ppc/machine.c
@@ -114,7 +114,7 @@ static void put_avr(QEMUFile *f, void *pv, size_t size)
qemu_put_be64(f, v->u64[1]);
}
-const VMStateInfo vmstate_info_avr = {
+static const VMStateInfo vmstate_info_avr = {
.name = "avr",
.get = get_avr,
.put = put_avr,
@@ -288,7 +288,7 @@ static void put_slbe(QEMUFile *f, void *pv, size_t size)
qemu_put_be64(f, v->vsid);
}
-const VMStateInfo vmstate_info_slbe = {
+static const VMStateInfo vmstate_info_slbe = {
.name = "slbe",
.get = get_slbe,
.put = put_slbe,
diff --git a/target-ppc/misc_helper.c b/target-ppc/misc_helper.c
index dc2ebfc452..2eb2fa6e20 100644
--- a/target-ppc/misc_helper.c
+++ b/target-ppc/misc_helper.c
@@ -62,10 +62,12 @@ void helper_store_hid0_601(CPUPPCState *env, target_ulong val)
void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
if (likely(env->pb[num] != value)) {
env->pb[num] = value;
/* Should be optimized */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
}
diff --git a/target-ppc/mmu-hash32.c b/target-ppc/mmu-hash32.c
index 6a77dc4f97..1cc19162b7 100644
--- a/target-ppc/mmu-hash32.c
+++ b/target-ppc/mmu-hash32.c
@@ -222,6 +222,7 @@ static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
target_ulong eaddr, int rwx,
hwaddr *raddr, int *prot)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS));
LOG_MMU("direct store...\n");
@@ -238,7 +239,7 @@ static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
if (rwx == 2) {
/* No code fetch is allowed in direct-store areas */
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x10000000;
return 1;
}
@@ -249,7 +250,7 @@ static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
break;
case ACCESS_FLOAT:
/* Floating point load/store */
- env->exception_index = POWERPC_EXCP_ALIGN;
+ cs->exception_index = POWERPC_EXCP_ALIGN;
env->error_code = POWERPC_EXCP_ALIGN_FP;
env->spr[SPR_DAR] = eaddr;
return 1;
@@ -272,7 +273,7 @@ static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
return 0;
case ACCESS_EXT:
/* eciwx or ecowx */
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
@@ -290,7 +291,7 @@ static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
*raddr = eaddr;
return 0;
} else {
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
@@ -380,9 +381,11 @@ static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte,
return (rpn & ~mask) | (eaddr & mask);
}
-int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr, int rwx,
+int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
int mmu_idx)
{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
target_ulong sr;
hwaddr pte_offset;
ppc_hash_pte32_t pte;
@@ -397,7 +400,7 @@ int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr, int rwx,
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
/* Translation is off */
raddr = eaddr;
- tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
TARGET_PAGE_SIZE);
return 0;
@@ -409,10 +412,10 @@ int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr, int rwx,
if (raddr != -1) {
if (need_prot[rwx] & ~prot) {
if (rwx == 2) {
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x08000000;
} else {
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
@@ -424,7 +427,7 @@ int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr, int rwx,
return 1;
}
- tlb_set_page(env, eaddr & TARGET_PAGE_MASK,
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
raddr & TARGET_PAGE_MASK, prot, mmu_idx,
TARGET_PAGE_SIZE);
return 0;
@@ -438,7 +441,7 @@ int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr, int rwx,
if (sr & SR32_T) {
if (ppc_hash32_direct_store(env, sr, eaddr, rwx,
&raddr, &prot) == 0) {
- tlb_set_page(env, eaddr & TARGET_PAGE_MASK,
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
raddr & TARGET_PAGE_MASK, prot, mmu_idx,
TARGET_PAGE_SIZE);
return 0;
@@ -449,7 +452,7 @@ int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr, int rwx,
/* 5. Check for segment level no-execute violation */
if ((rwx == 2) && (sr & SR32_NX)) {
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x10000000;
return 1;
}
@@ -458,10 +461,10 @@ int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr, int rwx,
pte_offset = ppc_hash32_htab_lookup(env, sr, eaddr, &pte);
if (pte_offset == -1) {
if (rwx == 2) {
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x40000000;
} else {
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
@@ -483,10 +486,10 @@ int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr, int rwx,
/* Access right violation */
LOG_MMU("PTE access rejected\n");
if (rwx == 2) {
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x08000000;
} else {
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
@@ -519,7 +522,7 @@ int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr, int rwx,
raddr = ppc_hash32_pte_raddr(sr, pte, eaddr);
- tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
diff --git a/target-ppc/mmu-hash32.h b/target-ppc/mmu-hash32.h
index 4671141a32..d515d4ff73 100644
--- a/target-ppc/mmu-hash32.h
+++ b/target-ppc/mmu-hash32.h
@@ -5,7 +5,7 @@
hwaddr get_pteg_offset32(CPUPPCState *env, hwaddr hash);
hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong addr);
-int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
+int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
int mmu_idx);
/*
@@ -68,7 +68,8 @@ int ppc_hash32_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
static inline target_ulong ppc_hash32_load_hpte0(CPUPPCState *env,
hwaddr pte_offset)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
assert(!env->external_htab); /* Not supported on 32-bit for now */
return ldl_phys(cs->as, env->htab_base + pte_offset);
}
@@ -76,7 +77,8 @@ static inline target_ulong ppc_hash32_load_hpte0(CPUPPCState *env,
static inline target_ulong ppc_hash32_load_hpte1(CPUPPCState *env,
hwaddr pte_offset)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
assert(!env->external_htab); /* Not supported on 32-bit for now */
return ldl_phys(cs->as, env->htab_base + pte_offset + HASH_PTE_SIZE_32/2);
}
@@ -84,7 +86,8 @@ static inline target_ulong ppc_hash32_load_hpte1(CPUPPCState *env,
static inline void ppc_hash32_store_hpte0(CPUPPCState *env,
hwaddr pte_offset, target_ulong pte0)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
assert(!env->external_htab); /* Not supported on 32-bit for now */
stl_phys(cs->as, env->htab_base + pte_offset, pte0);
}
@@ -92,7 +95,8 @@ static inline void ppc_hash32_store_hpte0(CPUPPCState *env,
static inline void ppc_hash32_store_hpte1(CPUPPCState *env,
hwaddr pte_offset, target_ulong pte1)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
assert(!env->external_htab); /* Not supported on 32-bit for now */
stl_phys(cs->as, env->htab_base + pte_offset + HASH_PTE_SIZE_32/2, pte1);
}
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index f2af4fbaa7..1fefe5881e 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -99,6 +99,7 @@ void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
void helper_slbia(CPUPPCState *env)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
int n, do_invalidate;
do_invalidate = 0;
@@ -116,12 +117,13 @@ void helper_slbia(CPUPPCState *env)
}
}
if (do_invalidate) {
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
}
void helper_slbie(CPUPPCState *env, target_ulong addr)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppc_slb_t *slb;
slb = slb_lookup(env, addr);
@@ -136,7 +138,7 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
}
@@ -454,9 +456,11 @@ static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
return (rpn & ~mask) | (eaddr & mask);
}
-int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr,
+int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
int rwx, int mmu_idx)
{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
ppc_slb_t *slb;
hwaddr pte_offset;
ppc_hash_pte64_t pte;
@@ -472,7 +476,7 @@ int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr,
/* Translation is off */
/* In real mode the top 4 effective address bits are ignored */
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
- tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
TARGET_PAGE_SIZE);
return 0;
@@ -483,10 +487,10 @@ int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr,
if (!slb) {
if (rwx == 2) {
- env->exception_index = POWERPC_EXCP_ISEG;
+ cs->exception_index = POWERPC_EXCP_ISEG;
env->error_code = 0;
} else {
- env->exception_index = POWERPC_EXCP_DSEG;
+ cs->exception_index = POWERPC_EXCP_DSEG;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
}
@@ -495,7 +499,7 @@ int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr,
/* 3. Check for segment level no-execute violation */
if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x10000000;
return 1;
}
@@ -504,10 +508,10 @@ int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr,
pte_offset = ppc_hash64_htab_lookup(env, slb, eaddr, &pte);
if (pte_offset == -1) {
if (rwx == 2) {
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x40000000;
} else {
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (rwx == 1) {
@@ -530,12 +534,12 @@ int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr,
/* Access right violation */
LOG_MMU("PTE access rejected\n");
if (rwx == 2) {
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x08000000;
} else {
target_ulong dsisr = 0;
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = eaddr;
if (need_prot[rwx] & ~pp_prot) {
@@ -574,7 +578,7 @@ int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong eaddr,
raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);
- tlb_set_page(env, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
@@ -608,7 +612,7 @@ void ppc_hash64_store_hpte(CPUPPCState *env,
target_ulong pte_index,
target_ulong pte0, target_ulong pte1)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
if (kvmppc_kern_htab) {
return kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
index 1746b3e2d3..49e385db90 100644
--- a/target-ppc/mmu-hash64.h
+++ b/target-ppc/mmu-hash64.h
@@ -7,7 +7,7 @@
void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env);
int ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs);
hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr);
-int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
+int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
int mmu_idx);
void ppc_hash64_store_hpte(CPUPPCState *env, target_ulong index,
target_ulong pte0, target_ulong pte1);
@@ -85,8 +85,9 @@ void ppc_hash64_stop_access(uint64_t token);
static inline target_ulong ppc_hash64_load_hpte0(CPUPPCState *env,
uint64_t token, int index)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
uint64_t addr;
+
addr = token + (index * HASH_PTE_SIZE_64);
if (env->external_htab) {
return ldq_p((const void *)(uintptr_t)addr);
@@ -98,8 +99,9 @@ static inline target_ulong ppc_hash64_load_hpte0(CPUPPCState *env,
static inline target_ulong ppc_hash64_load_hpte1(CPUPPCState *env,
uint64_t token, int index)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
uint64_t addr;
+
addr = token + (index * HASH_PTE_SIZE_64) + HASH_PTE_SIZE_64/2;
if (env->external_htab) {
return ldq_p((const void *)(uintptr_t)addr);
diff --git a/target-ppc/mmu_helper.c b/target-ppc/mmu_helper.c
index 8e2f8e736a..1771863dff 100644
--- a/target-ppc/mmu_helper.c
+++ b/target-ppc/mmu_helper.c
@@ -231,6 +231,7 @@ static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr,
static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppc6xx_tlb_t *tlb;
int nr, max;
@@ -244,7 +245,7 @@ static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
tlb = &env->tlb.tlb6[nr];
pte_invalidate(&tlb->pte0);
}
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
@@ -252,6 +253,7 @@ static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
int is_code, int match_epn)
{
#if !defined(FLUSH_ALL_TLBS)
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
ppc6xx_tlb_t *tlb;
int way, nr;
@@ -263,7 +265,7 @@ static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
env->nb_tlb, eaddr);
pte_invalidate(&tlb->pte0);
- tlb_flush_page(env, tlb->EPN);
+ tlb_flush_page(cs, tlb->EPN);
}
}
#else
@@ -643,6 +645,7 @@ static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address,
/* Helpers specific to PowerPC 40x implementations */
static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppcemb_tlb_t *tlb;
int i;
@@ -650,13 +653,14 @@ static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
tlb = &env->tlb.tlbe[i];
tlb->prot &= ~PAGE_VALID;
}
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState *env,
target_ulong eaddr, uint32_t pid)
{
#if !defined(FLUSH_ALL_TLBS)
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
ppcemb_tlb_t *tlb;
hwaddr raddr;
target_ulong page, end;
@@ -667,7 +671,7 @@ static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState *env,
if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) {
end = tlb->EPN + tlb->size;
for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
- tlb_flush_page(env, page);
+ tlb_flush_page(cs, page);
}
tlb->prot &= ~PAGE_VALID;
break;
@@ -746,9 +750,11 @@ static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
void store_40x_sler(CPUPPCState *env, uint32_t val)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
/* XXX: TO BE FIXED */
if (val != 0x00000000) {
- cpu_abort(env, "Little-endian regions are not supported by now\n");
+ cpu_abort(CPU(cpu), "Little-endian regions are not supported by now\n");
}
env->spr[SPR_405_SLER] = val;
}
@@ -858,6 +864,7 @@ static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
static void booke206_flush_tlb(CPUPPCState *env, int flags,
const int check_iprot)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
int tlb_size;
int i, j;
ppcmas_tlb_t *tlb = env->tlb.tlbm;
@@ -874,7 +881,7 @@ static void booke206_flush_tlb(CPUPPCState *env, int flags,
tlb += booke206_tlb_size(env, i);
}
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
static hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
@@ -1344,6 +1351,7 @@ static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx,
static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong eaddr, int rw, int access_type)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
int ret = -1;
bool real_mode = (access_type == ACCESS_CODE && msr_ir == 0)
|| (access_type != ACCESS_CODE && msr_dr == 0);
@@ -1388,17 +1396,17 @@ static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
break;
case POWERPC_MMU_MPC8xx:
/* XXX: TODO */
- cpu_abort(env, "MPC8xx MMU model is not implemented\n");
+ cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n");
break;
case POWERPC_MMU_REAL:
if (real_mode) {
ret = check_physical(env, ctx, eaddr, rw);
} else {
- cpu_abort(env, "PowerPC in real mode do not do any translation\n");
+ cpu_abort(CPU(cpu), "PowerPC in real mode do not do any translation\n");
}
return -1;
default:
- cpu_abort(env, "Unknown or invalid MMU model\n");
+ cpu_abort(CPU(cpu), "Unknown or invalid MMU model\n");
return -1;
}
#if 0
@@ -1491,6 +1499,7 @@ static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
int rw, int mmu_idx)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
mmu_ctx_t ctx;
int access_type;
int ret = 0;
@@ -1505,29 +1514,29 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
}
ret = get_physical_address(env, &ctx, address, rw, access_type);
if (ret == 0) {
- tlb_set_page(env, address & TARGET_PAGE_MASK,
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
mmu_idx, TARGET_PAGE_SIZE);
ret = 0;
} else if (ret < 0) {
- LOG_MMU_STATE(CPU(ppc_env_get_cpu(env)));
+ LOG_MMU_STATE(cs);
if (access_type == ACCESS_CODE) {
switch (ret) {
case -1:
/* No matches in page tables or TLB */
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
- env->exception_index = POWERPC_EXCP_IFTLB;
+ cs->exception_index = POWERPC_EXCP_IFTLB;
env->error_code = 1 << 18;
env->spr[SPR_IMISS] = address;
env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
goto tlb_miss;
case POWERPC_MMU_SOFT_74xx:
- env->exception_index = POWERPC_EXCP_IFTLB;
+ cs->exception_index = POWERPC_EXCP_IFTLB;
goto tlb_miss_74xx;
case POWERPC_MMU_SOFT_4xx:
case POWERPC_MMU_SOFT_4xx_Z:
- env->exception_index = POWERPC_EXCP_ITLB;
+ cs->exception_index = POWERPC_EXCP_ITLB;
env->error_code = 0;
env->spr[SPR_40x_DEAR] = address;
env->spr[SPR_40x_ESR] = 0x00000000;
@@ -1536,26 +1545,26 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
booke206_update_mas_tlb_miss(env, address, rw);
/* fall through */
case POWERPC_MMU_BOOKE:
- env->exception_index = POWERPC_EXCP_ITLB;
+ cs->exception_index = POWERPC_EXCP_ITLB;
env->error_code = 0;
env->spr[SPR_BOOKE_DEAR] = address;
return -1;
case POWERPC_MMU_MPC8xx:
/* XXX: TODO */
- cpu_abort(env, "MPC8xx MMU model is not implemented\n");
+ cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
break;
case POWERPC_MMU_REAL:
- cpu_abort(env, "PowerPC in real mode should never raise "
+ cpu_abort(cs, "PowerPC in real mode should never raise "
"any MMU exceptions\n");
return -1;
default:
- cpu_abort(env, "Unknown or invalid MMU model\n");
+ cpu_abort(cs, "Unknown or invalid MMU model\n");
return -1;
}
break;
case -2:
/* Access rights violation */
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x08000000;
break;
case -3:
@@ -1564,13 +1573,13 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
(env->mmu_model == POWERPC_MMU_BOOKE206)) {
env->spr[SPR_BOOKE_ESR] = 0x00000000;
}
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x10000000;
break;
case -4:
/* Direct store exception */
/* No code fetch is allowed in direct-store areas */
- env->exception_index = POWERPC_EXCP_ISI;
+ cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = 0x10000000;
break;
}
@@ -1581,10 +1590,10 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
if (rw == 1) {
- env->exception_index = POWERPC_EXCP_DSTLB;
+ cs->exception_index = POWERPC_EXCP_DSTLB;
env->error_code = 1 << 16;
} else {
- env->exception_index = POWERPC_EXCP_DLTLB;
+ cs->exception_index = POWERPC_EXCP_DLTLB;
env->error_code = 0;
}
env->spr[SPR_DMISS] = address;
@@ -1598,9 +1607,9 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
break;
case POWERPC_MMU_SOFT_74xx:
if (rw == 1) {
- env->exception_index = POWERPC_EXCP_DSTLB;
+ cs->exception_index = POWERPC_EXCP_DSTLB;
} else {
- env->exception_index = POWERPC_EXCP_DLTLB;
+ cs->exception_index = POWERPC_EXCP_DLTLB;
}
tlb_miss_74xx:
/* Implement LRU algorithm */
@@ -1611,7 +1620,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
break;
case POWERPC_MMU_SOFT_4xx:
case POWERPC_MMU_SOFT_4xx_Z:
- env->exception_index = POWERPC_EXCP_DTLB;
+ cs->exception_index = POWERPC_EXCP_DTLB;
env->error_code = 0;
env->spr[SPR_40x_DEAR] = address;
if (rw) {
@@ -1622,29 +1631,29 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
break;
case POWERPC_MMU_MPC8xx:
/* XXX: TODO */
- cpu_abort(env, "MPC8xx MMU model is not implemented\n");
+ cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
break;
case POWERPC_MMU_BOOKE206:
booke206_update_mas_tlb_miss(env, address, rw);
/* fall through */
case POWERPC_MMU_BOOKE:
- env->exception_index = POWERPC_EXCP_DTLB;
+ cs->exception_index = POWERPC_EXCP_DTLB;
env->error_code = 0;
env->spr[SPR_BOOKE_DEAR] = address;
env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0;
return -1;
case POWERPC_MMU_REAL:
- cpu_abort(env, "PowerPC in real mode should never raise "
+ cpu_abort(cs, "PowerPC in real mode should never raise "
"any MMU exceptions\n");
return -1;
default:
- cpu_abort(env, "Unknown or invalid MMU model\n");
+ cpu_abort(cs, "Unknown or invalid MMU model\n");
return -1;
}
break;
case -2:
/* Access rights violation */
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
if (env->mmu_model == POWERPC_MMU_SOFT_4xx
|| env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
@@ -1670,13 +1679,13 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
switch (access_type) {
case ACCESS_FLOAT:
/* Floating point load/store */
- env->exception_index = POWERPC_EXCP_ALIGN;
+ cs->exception_index = POWERPC_EXCP_ALIGN;
env->error_code = POWERPC_EXCP_ALIGN_FP;
env->spr[SPR_DAR] = address;
break;
case ACCESS_RES:
/* lwarx, ldarx or stwcx. */
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = address;
if (rw == 1) {
@@ -1687,7 +1696,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
break;
case ACCESS_EXT:
/* eciwx or ecowx */
- env->exception_index = POWERPC_EXCP_DSI;
+ cs->exception_index = POWERPC_EXCP_DSI;
env->error_code = 0;
env->spr[SPR_DAR] = address;
if (rw == 1) {
@@ -1698,7 +1707,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
break;
default:
printf("DSI: invalid exception (%d)\n", ret);
- env->exception_index = POWERPC_EXCP_PROGRAM;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
env->error_code =
POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL;
env->spr[SPR_DAR] = address;
@@ -1709,7 +1718,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
}
#if 0
printf("%s: set exception to %d %02x\n", __func__,
- env->exception, env->error_code);
+ cs->exception, env->error_code);
#endif
ret = 1;
}
@@ -1723,6 +1732,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
target_ulong mask)
{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
target_ulong base, end, page;
base = BATu & ~0x0001FFFF;
@@ -1730,7 +1740,7 @@ static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
TARGET_FMT_lx ")\n", base, end, mask);
for (page = base; page != end; page += TARGET_PAGE_SIZE) {
- tlb_flush_page(env, page);
+ tlb_flush_page(cs, page);
}
LOG_BATS("Flush done\n");
}
@@ -1892,6 +1902,8 @@ void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value)
/* TLB management */
void ppc_tlb_invalidate_all(CPUPPCState *env)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
case POWERPC_MMU_SOFT_74xx:
@@ -1902,14 +1914,14 @@ void ppc_tlb_invalidate_all(CPUPPCState *env)
ppc4xx_tlb_invalidate_all(env);
break;
case POWERPC_MMU_REAL:
- cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n");
+ cpu_abort(CPU(cpu), "No TLB for PowerPC 4xx in real mode\n");
break;
case POWERPC_MMU_MPC8xx:
/* XXX: TODO */
- cpu_abort(env, "MPC8xx MMU model is not implemented\n");
+ cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n");
break;
case POWERPC_MMU_BOOKE:
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
break;
case POWERPC_MMU_BOOKE206:
booke206_flush_tlb(env, -1, 0);
@@ -1922,11 +1934,11 @@ void ppc_tlb_invalidate_all(CPUPPCState *env)
case POWERPC_MMU_2_06a:
case POWERPC_MMU_2_06d:
#endif /* defined(TARGET_PPC64) */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
break;
default:
/* XXX: TODO */
- cpu_abort(env, "Unknown MMU model\n");
+ cpu_abort(CPU(cpu), "Unknown MMU model\n");
break;
}
}
@@ -1934,6 +1946,9 @@ void ppc_tlb_invalidate_all(CPUPPCState *env)
void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
{
#if !defined(FLUSH_ALL_TLBS)
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ CPUState *cs;
+
addr &= TARGET_PAGE_MASK;
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
@@ -1948,43 +1963,44 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
ppc4xx_tlb_invalidate_virt(env, addr, env->spr[SPR_40x_PID]);
break;
case POWERPC_MMU_REAL:
- cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n");
+ cpu_abort(CPU(cpu), "No TLB for PowerPC 4xx in real mode\n");
break;
case POWERPC_MMU_MPC8xx:
/* XXX: TODO */
- cpu_abort(env, "MPC8xx MMU model is not implemented\n");
+ cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n");
break;
case POWERPC_MMU_BOOKE:
/* XXX: TODO */
- cpu_abort(env, "BookE MMU model is not implemented\n");
+ cpu_abort(CPU(cpu), "BookE MMU model is not implemented\n");
break;
case POWERPC_MMU_BOOKE206:
/* XXX: TODO */
- cpu_abort(env, "BookE 2.06 MMU model is not implemented\n");
+ cpu_abort(CPU(cpu), "BookE 2.06 MMU model is not implemented\n");
break;
case POWERPC_MMU_32B:
case POWERPC_MMU_601:
/* tlbie invalidate TLBs for all segments */
addr &= ~((target_ulong)-1ULL << 28);
+ cs = CPU(cpu);
/* XXX: this case should be optimized,
* giving a mask to tlb_flush_page
*/
- tlb_flush_page(env, addr | (0x0 << 28));
- tlb_flush_page(env, addr | (0x1 << 28));
- tlb_flush_page(env, addr | (0x2 << 28));
- tlb_flush_page(env, addr | (0x3 << 28));
- tlb_flush_page(env, addr | (0x4 << 28));
- tlb_flush_page(env, addr | (0x5 << 28));
- tlb_flush_page(env, addr | (0x6 << 28));
- tlb_flush_page(env, addr | (0x7 << 28));
- tlb_flush_page(env, addr | (0x8 << 28));
- tlb_flush_page(env, addr | (0x9 << 28));
- tlb_flush_page(env, addr | (0xA << 28));
- tlb_flush_page(env, addr | (0xB << 28));
- tlb_flush_page(env, addr | (0xC << 28));
- tlb_flush_page(env, addr | (0xD << 28));
- tlb_flush_page(env, addr | (0xE << 28));
- tlb_flush_page(env, addr | (0xF << 28));
+ tlb_flush_page(cs, addr | (0x0 << 28));
+ tlb_flush_page(cs, addr | (0x1 << 28));
+ tlb_flush_page(cs, addr | (0x2 << 28));
+ tlb_flush_page(cs, addr | (0x3 << 28));
+ tlb_flush_page(cs, addr | (0x4 << 28));
+ tlb_flush_page(cs, addr | (0x5 << 28));
+ tlb_flush_page(cs, addr | (0x6 << 28));
+ tlb_flush_page(cs, addr | (0x7 << 28));
+ tlb_flush_page(cs, addr | (0x8 << 28));
+ tlb_flush_page(cs, addr | (0x9 << 28));
+ tlb_flush_page(cs, addr | (0xA << 28));
+ tlb_flush_page(cs, addr | (0xB << 28));
+ tlb_flush_page(cs, addr | (0xC << 28));
+ tlb_flush_page(cs, addr | (0xD << 28));
+ tlb_flush_page(cs, addr | (0xE << 28));
+ tlb_flush_page(cs, addr | (0xF << 28));
break;
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
@@ -1996,12 +2012,12 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
* and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
* we just invalidate all TLBs
*/
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
break;
#endif /* defined(TARGET_PPC64) */
default:
/* XXX: TODO */
- cpu_abort(env, "Unknown MMU model\n");
+ cpu_abort(CPU(cpu), "Unknown MMU model\n");
break;
}
#else
@@ -2013,6 +2029,8 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
/* Special registers manipulation */
void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
LOG_MMU("%s: " TARGET_FMT_lx "\n", __func__, value);
assert(!env->external_htab);
if (env->spr[SPR_SDR1] != value) {
@@ -2035,7 +2053,7 @@ void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
env->htab_mask = ((value & SDR_32_HTABMASK) << 16) | 0xFFFF;
env->htab_base = value & SDR_32_HTABORG;
}
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
}
@@ -2053,6 +2071,8 @@ target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
LOG_MMU("%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
(int)srnum, value, env->sr[srnum]);
#if defined(TARGET_PPC64)
@@ -2085,11 +2105,11 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
page = (16 << 20) * srnum;
end = page + (16 << 20);
for (; page != end; page += TARGET_PAGE_SIZE) {
- tlb_flush_page(env, page);
+ tlb_flush_page(CPU(cpu), page);
}
}
#else
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
#endif
}
}
@@ -2316,6 +2336,8 @@ target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
target_ulong val)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
ppcemb_tlb_t *tlb;
target_ulong page, end;
@@ -2329,7 +2351,7 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
- tlb_flush_page(env, page);
+ tlb_flush_page(cs, page);
}
}
tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
@@ -2339,7 +2361,7 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
* of the ppc or ppc64 one
*/
if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
- cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
+ cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
"are not supported (%d)\n",
tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
}
@@ -2348,7 +2370,7 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
tlb->prot |= PAGE_VALID;
if (val & PPC4XX_TLBHI_E) {
/* XXX: TO BE FIXED */
- cpu_abort(env,
+ cpu_abort(cs,
"Little-endian TLB entries are not supported by now\n");
}
} else {
@@ -2368,7 +2390,7 @@ void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
- tlb_flush_page(env, page);
+ tlb_flush_page(cs, page);
}
}
}
@@ -2409,6 +2431,7 @@ target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
target_ulong value)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppcemb_tlb_t *tlb;
target_ulong EPN, RPN, size;
int do_flush_tlbs;
@@ -2444,13 +2467,13 @@ void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
}
tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
if (do_flush_tlbs) {
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
break;
case 1:
RPN = value & 0xFFFFFC0F;
if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
tlb->RPN = RPN;
break;
@@ -2544,6 +2567,7 @@ target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
uint32_t tlbncfg = 0;
int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
@@ -2553,7 +2577,7 @@ static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
- cpu_abort(env, "we don't support HES yet\n");
+ cpu_abort(CPU(cpu), "we don't support HES yet\n");
}
return booke206_get_tlbm(env, tlb, ea, esel);
@@ -2561,13 +2585,16 @@ static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
env->spr[pidn] = pid;
/* changing PIDs mean we're in a different address space now */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
void helper_booke206_tlbwe(CPUPPCState *env)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
uint32_t tlbncfg, tlbn;
ppcmas_tlb_t *tlb;
uint32_t size_tlb, size_ps;
@@ -2621,7 +2648,7 @@ void helper_booke206_tlbwe(CPUPPCState *env)
}
if (msr_gs) {
- cpu_abort(env, "missing HV implementation\n");
+ cpu_abort(CPU(cpu), "missing HV implementation\n");
}
tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
env->spr[SPR_BOOKE_MAS3];
@@ -2655,9 +2682,9 @@ void helper_booke206_tlbwe(CPUPPCState *env)
}
if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
- tlb_flush_page(env, tlb->mas2 & MAS2_EPN_MASK);
+ tlb_flush_page(CPU(cpu), tlb->mas2 & MAS2_EPN_MASK);
} else {
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
}
@@ -2764,6 +2791,8 @@ static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
if (address & 0x4) {
/* flush all entries */
if (address & 0x8) {
@@ -2779,11 +2808,11 @@ void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
if (address & 0x8) {
/* flush TLB1 entries */
booke206_invalidate_ea_tlb(env, 1, address);
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
} else {
/* flush TLB0 entries */
booke206_invalidate_ea_tlb(env, 0, address);
- tlb_flush_page(env, address & MAS2_EPN_MASK);
+ tlb_flush_page(CPU(cpu), address & MAS2_EPN_MASK);
}
}
@@ -2795,6 +2824,7 @@ void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
int i, j;
int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
ppcmas_tlb_t *tlb = env->tlb.tlbm;
@@ -2811,11 +2841,12 @@ void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
}
tlb += booke206_tlb_size(env, i);
}
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
int i, j;
ppcmas_tlb_t *tlb;
int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
@@ -2851,7 +2882,7 @@ void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
tlb->mas1 &= ~MAS1_VALID;
}
}
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
void helper_booke206_tlbflush(CPUPPCState *env, uint32_t type)
@@ -2892,23 +2923,24 @@ void helper_booke206_tlbflush(CPUPPCState *env, uint32_t type)
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
-void tlb_fill(CPUPPCState *env, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
- CPUState *cpu = CPU(ppc_env_get_cpu(env));
- PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
+ CPUPPCState *env = &cpu->env;
int ret;
if (pcc->handle_mmu_fault) {
- ret = pcc->handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = pcc->handle_mmu_fault(cpu, addr, is_write, mmu_idx);
} else {
ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx);
}
if (unlikely(ret != 0)) {
if (likely(retaddr)) {
/* now we have a real cpu fault */
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- helper_raise_exception_err(env, env->exception_index, env->error_code);
+ helper_raise_exception_err(env, cs->exception_index, env->error_code);
}
}
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 91c33dcd1d..e3fcb03c26 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -11377,8 +11377,8 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
/* Set env in case of segfault during code fetch */
while (ctx.exception == POWERPC_EXCP_NONE
&& tcg_ctx.gen_opc_ptr < gen_opc_end) {
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == ctx.nip) {
gen_debug_exception(ctxp);
break;
diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
index 3eafbb0d1a..4d94015942 100644
--- a/target-ppc/translate_init.c
+++ b/target-ppc/translate_init.c
@@ -631,7 +631,7 @@ static inline void _spr_register(CPUPPCState *env, int num,
#if defined(CONFIG_KVM)
spr->one_reg_id = one_reg_id,
#endif
- env->spr[num] = initial_value;
+ env->spr[num] = spr->default_value = initial_value;
}
/* Generic PowerPC SPRs */
@@ -4432,6 +4432,7 @@ enum fsl_e500_version {
static void init_proc_e500 (CPUPPCState *env, int version)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
uint32_t tlbncfg[2];
uint64_t ivor_mask;
uint64_t ivpr_mask = 0xFFFF0000ULL;
@@ -4490,7 +4491,7 @@ static void init_proc_e500 (CPUPPCState *env, int version)
tlbncfg[1] = gen_tlbncfg(64, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 64);
break;
default:
- cpu_abort(env, "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
+ cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
}
#endif
/* Cache sizes */
@@ -4507,7 +4508,7 @@ static void init_proc_e500 (CPUPPCState *env, int version)
l1cfg0 |= 0x1000000; /* 64 byte cache block size */
break;
default:
- cpu_abort(env, "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
+ cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
}
gen_spr_BookE206(env, 0x000000DF, tlbncfg);
/* XXX : not implemented */
@@ -6698,6 +6699,8 @@ POWERPC_FAMILY(970)(ObjectClass *oc, void *data)
pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
POWERPC_FLAG_BUS_CLK;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x10000;
}
static int check_pow_970FX (CPUPPCState *env)
@@ -6790,6 +6793,8 @@ POWERPC_FAMILY(970FX)(ObjectClass *oc, void *data)
pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
POWERPC_FLAG_BUS_CLK;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x10000;
}
static int check_pow_970MP (CPUPPCState *env)
@@ -6876,6 +6881,8 @@ POWERPC_FAMILY(970MP)(ObjectClass *oc, void *data)
pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
POWERPC_FLAG_BUS_CLK;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x10000;
}
static void init_proc_power5plus(CPUPPCState *env)
@@ -6966,6 +6973,8 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
POWERPC_FLAG_BUS_CLK;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x10000;
}
static void init_proc_POWER7 (CPUPPCState *env)
@@ -7074,7 +7083,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
PPC2_FP_TST_ISA206;
- pcc->msr_mask = 0x800000000284FF37ULL;
+ pcc->msr_mask = 0x800000000280FF37ULL;
pcc->mmu_model = POWERPC_MMU_2_06;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
@@ -7117,7 +7126,7 @@ POWERPC_FAMILY(POWER7P)(ObjectClass *oc, void *data)
PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
PPC2_FP_TST_ISA206;
- pcc->msr_mask = 0x800000000204FF37ULL;
+ pcc->msr_mask = 0x800000000280FF37ULL;
pcc->mmu_model = POWERPC_MMU_2_06;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
@@ -7156,7 +7165,7 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
pcc->pvr_mask = CPU_POWERPC_POWER8_MASK;
pcc->init_proc = init_proc_POWER8;
pcc->check_pow = check_pow_nocheck;
- pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
PPC_FLOAT_FRSQRTES |
@@ -7172,8 +7181,9 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
- PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207;
- pcc->msr_mask = 0x800000000284FF36ULL;
+ PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
+ PPC2_ISA205 | PPC2_ISA207S;
+ pcc->msr_mask = 0x800000000280FF37ULL;
pcc->mmu_model = POWERPC_MMU_2_06;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
@@ -7433,7 +7443,7 @@ static int create_new_table (opc_handler_t **table, unsigned char idx)
{
opc_handler_t **tmp;
- tmp = g_malloc(0x20 * sizeof(opc_handler_t));
+ tmp = g_new(opc_handler_t *, 0x20);
fill_new_table(tmp, 0x20);
table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT);
@@ -7847,6 +7857,12 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
max_smt, kvm_enabled() ? "KVM" : "TCG");
return;
}
+ if (!is_power_of_2(smp_threads)) {
+ error_setg(errp, "Cannot support %d threads on PPC with %s, "
+ "threads count must be a power of 2.",
+ smp_threads, kvm_enabled() ? "KVM" : "TCG");
+ return;
+ }
cpu->cpu_dt_id = (cs->cpu_index / smp_threads) * max_smt
+ (cs->cpu_index % smp_threads);
@@ -8220,26 +8236,7 @@ static ObjectClass *ppc_cpu_class_by_name(const char *name)
PowerPCCPU *cpu_ppc_init(const char *cpu_model)
{
- PowerPCCPU *cpu;
- ObjectClass *oc;
- Error *err = NULL;
-
- oc = ppc_cpu_class_by_name(cpu_model);
- if (oc == NULL) {
- return NULL;
- }
-
- cpu = POWERPC_CPU(object_new(object_class_get_name(oc)));
-
- object_property_set_bool(OBJECT(cpu), true, "realized", &err);
- if (err != NULL) {
- error_report("%s", error_get_pretty(err));
- error_free(err);
- object_unref(OBJECT(cpu));
- return NULL;
- }
-
- return cpu;
+ return POWERPC_CPU(cpu_generic_init(TYPE_POWERPC_CPU, cpu_model));
}
/* Sort by PVR, ordering special case "host" last. */
@@ -8384,6 +8381,14 @@ static void ppc_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.nip = value;
}
+static bool ppc_cpu_has_work(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+}
+
/* CPUClass::reset() */
static void ppc_cpu_reset(CPUState *s)
{
@@ -8391,6 +8396,7 @@ static void ppc_cpu_reset(CPUState *s)
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
target_ulong msr;
+ int i;
pcc->parent_reset(s);
@@ -8433,7 +8439,7 @@ static void ppc_cpu_reset(CPUState *s)
env->reserve_addr = (target_ulong)-1ULL;
/* Be sure no exception or interrupt is pending */
env->pending_interrupts = 0;
- env->exception_index = POWERPC_EXCP_NONE;
+ s->exception_index = POWERPC_EXCP_NONE;
env->error_code = 0;
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
@@ -8444,8 +8450,17 @@ static void ppc_cpu_reset(CPUState *s)
env->dtl_size = 0;
#endif /* TARGET_PPC64 */
+ for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
+ ppc_spr_t *spr = &env->spr_cb[i];
+
+ if (!spr->name) {
+ continue;
+ }
+ env->spr[i] = spr->default_value;
+ }
+
/* Flush all TLBs */
- tlb_flush(env, 1);
+ tlb_flush(s, 1);
}
static void ppc_cpu_initfn(Object *obj)
@@ -8511,13 +8526,16 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->reset = ppc_cpu_reset;
cc->class_by_name = ppc_cpu_class_by_name;
+ cc->has_work = ppc_cpu_has_work;
cc->do_interrupt = ppc_cpu_do_interrupt;
cc->dump_state = ppc_cpu_dump_state;
cc->dump_statistics = ppc_cpu_dump_statistics;
cc->set_pc = ppc_cpu_set_pc;
cc->gdb_read_register = ppc_cpu_gdb_read_register;
cc->gdb_write_register = ppc_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = ppc_cpu_handle_mmu_fault;
+#else
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
cc->vmsd = &vmstate_ppc_cpu;
#if defined(TARGET_PPC64)
diff --git a/target-ppc/user_only_helper.c b/target-ppc/user_only_helper.c
index 56e686efd1..829f66f504 100644
--- a/target-ppc/user_only_helper.c
+++ b/target-ppc/user_only_helper.c
@@ -20,9 +20,11 @@
#include "cpu.h"
-int cpu_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
- int mmu_idx)
+int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
int exception, error_code;
if (rw == 2) {
@@ -37,7 +39,7 @@ int cpu_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
env->spr[SPR_DAR] = address;
env->spr[SPR_DSISR] = error_code;
}
- env->exception_index = exception;
+ cs->exception_index = exception;
env->error_code = error_code;
return 1;
diff --git a/target-s390x/arch_dump.c b/target-s390x/arch_dump.c
index 5cbb53ca2e..a1554f5754 100644
--- a/target-s390x/arch_dump.c
+++ b/target-s390x/arch_dump.c
@@ -123,7 +123,7 @@ static void s390x_write_elf64_prefix(Note *note, S390CPU *cpu)
}
-struct NoteFuncDescStruct {
+static const struct NoteFuncDescStruct {
int contents_size;
void (*note_contents_func)(Note *note, S390CPU *cpu);
} note_func[] = {
@@ -146,7 +146,7 @@ static int s390x_write_all_elf64_notes(const char *note_name,
void *opaque)
{
Note note;
- NoteFuncDesc *nf;
+ const NoteFuncDesc *nf;
int note_size;
int ret = -1;
@@ -192,7 +192,7 @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
int name_size = 8; /* "CORE" or "QEMU" rounded */
size_t elf_note_size = 0;
int note_head_size;
- NoteFuncDesc *nf;
+ const NoteFuncDesc *nf;
assert(class == ELFCLASS64);
assert(machine == EM_S390);
diff --git a/target-s390x/cc_helper.c b/target-s390x/cc_helper.c
index a6d60bf885..9e676a5ca7 100644
--- a/target-s390x/cc_helper.c
+++ b/target-s390x/cc_helper.c
@@ -407,6 +407,7 @@ static uint32_t cc_calc_flogr(uint64_t dst)
static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
uint64_t src, uint64_t dst, uint64_t vr)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
uint32_t r = 0;
switch (cc_op) {
@@ -524,7 +525,7 @@ static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
break;
default:
- cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
+ cpu_abort(CPU(cpu), "Unknown CC operation: %s\n", cc_name(cc_op));
}
HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__,
@@ -548,7 +549,7 @@ uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src,
void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
{
load_psw(env, mask, addr);
- cpu_loop_exit(env);
+ cpu_loop_exit(CPU(s390_env_get_cpu(env)));
}
void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
diff --git a/target-s390x/cpu.c b/target-s390x/cpu.c
index 1a8c1cc39f..dfd83e8aef 100644
--- a/target-s390x/cpu.c
+++ b/target-s390x/cpu.c
@@ -65,6 +65,15 @@ static void s390_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.psw.addr = value;
}
+static bool s390_cpu_has_work(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+
+ return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->psw.mask & PSW_MASK_EXT);
+}
+
#if !defined(CONFIG_USER_ONLY)
/* S390CPUClass::load_normal() */
static void s390_cpu_load_normal(CPUState *s)
@@ -89,7 +98,7 @@ static void s390_cpu_reset(CPUState *s)
#if !defined(CONFIG_USER_ONLY)
s->halted = 1;
#endif
- tlb_flush(env, 1);
+ tlb_flush(s, 1);
}
/* S390CPUClass::initial_reset() */
@@ -100,7 +109,7 @@ static void s390_cpu_initial_reset(CPUState *s)
s390_cpu_reset(s);
/* initial reset does not touch regs,fregs and aregs */
- memset(&env->fpc, 0, offsetof(CPUS390XState, breakpoints) -
+ memset(&env->fpc, 0, offsetof(CPUS390XState, cpu_num) -
offsetof(CPUS390XState, fpc));
/* architectured initial values for CR 0 and 14 */
@@ -130,7 +139,7 @@ static void s390_cpu_full_reset(CPUState *s)
scc->parent_reset(s);
- memset(env, 0, offsetof(CPUS390XState, breakpoints));
+ memset(env, 0, offsetof(CPUS390XState, cpu_num));
/* architectured initial values for CR 0 and 14 */
env->cregs[0] = CR0_RESET;
@@ -144,7 +153,7 @@ static void s390_cpu_full_reset(CPUState *s)
#if !defined(CONFIG_USER_ONLY)
s->halted = 1;
#endif
- tlb_flush(env, 1);
+ tlb_flush(s, 1);
}
#if !defined(CONFIG_USER_ONLY)
@@ -232,12 +241,15 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
scc->cpu_reset = s390_cpu_reset;
scc->initial_cpu_reset = s390_cpu_initial_reset;
cc->reset = s390_cpu_full_reset;
+ cc->has_work = s390_cpu_has_work;
cc->do_interrupt = s390_cpu_do_interrupt;
cc->dump_state = s390_cpu_dump_state;
cc->set_pc = s390_cpu_set_pc;
cc->gdb_read_register = s390_cpu_gdb_read_register;
cc->gdb_write_register = s390_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = s390_cpu_handle_mmu_fault;
+#else
cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
cc->write_elf64_note = s390_cpu_write_elf64_note;
cc->write_elf64_qemunote = s390_cpu_write_elf64_qemunote;
diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
index effe84b102..41903a93fb 100644
--- a/target-s390x/cpu.h
+++ b/target-s390x/cpu.h
@@ -126,6 +126,9 @@ typedef struct CPUS390XState {
uint64_t pfault_compare;
uint64_t pfault_select;
+ uint64_t gbea;
+ uint64_t pp;
+
CPU_COMMON
/* reset does memset(0) up to here */
@@ -320,9 +323,8 @@ int cpu_s390x_exec(CPUS390XState *s);
is returned if the signal was handled by the virtual CPU. */
int cpu_s390x_signal_handler(int host_signum, void *pinfo,
void *puc);
-int cpu_s390x_handle_mmu_fault (CPUS390XState *env, target_ulong address, int rw,
- int mmu_idx);
-#define cpu_handle_mmu_fault cpu_s390x_handle_mmu_fault
+int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
#include "ioinst.h"
@@ -1041,15 +1043,6 @@ static inline void cpu_inject_crw_mchk(S390CPU *cpu)
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
}
-static inline bool cpu_has_work(CPUState *cpu)
-{
- S390CPU *s390_cpu = S390_CPU(cpu);
- CPUS390XState *env = &s390_cpu->env;
-
- return (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
- (env->psw.mask & PSW_MASK_EXT);
-}
-
/* fpu_helper.c */
uint32_t set_cc_nz_f32(float32 v);
uint32_t set_cc_nz_f64(float64 v);
diff --git a/target-s390x/fpu_helper.c b/target-s390x/fpu_helper.c
index 94375b6a63..3e9c7b2d68 100644
--- a/target-s390x/fpu_helper.c
+++ b/target-s390x/fpu_helper.c
@@ -80,6 +80,8 @@ static void handle_exceptions(CPUS390XState *env, uintptr_t retaddr)
static inline int float_comp_to_cc(CPUS390XState *env, int float_compare)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
switch (float_compare) {
case float_relation_equal:
return 0;
@@ -90,7 +92,7 @@ static inline int float_comp_to_cc(CPUS390XState *env, int float_compare)
case float_relation_unordered:
return 3;
default:
- cpu_abort(env, "unknown return value for float compare\n");
+ cpu_abort(CPU(cpu), "unknown return value for float compare\n");
}
}
diff --git a/target-s390x/helper.c b/target-s390x/helper.c
index aa537e1bff..aa628b8fe2 100644
--- a/target-s390x/helper.c
+++ b/target-s390x/helper.c
@@ -85,20 +85,19 @@ S390CPU *cpu_s390x_init(const char *cpu_model)
void s390_cpu_do_interrupt(CPUState *cs)
{
- S390CPU *cpu = S390_CPU(cs);
- CPUS390XState *env = &cpu->env;
-
- env->exception_index = -1;
+ cs->exception_index = -1;
}
-int cpu_s390x_handle_mmu_fault(CPUS390XState *env, target_ulong address,
- int rw, int mmu_idx)
+int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+ int rw, int mmu_idx)
{
- env->exception_index = EXCP_PGM;
- env->int_pgm_code = PGM_ADDRESSING;
+ S390CPU *cpu = S390_CPU(cs);
+
+ cs->exception_index = EXCP_PGM;
+ cpu->env.int_pgm_code = PGM_ADDRESSING;
/* On real machines this value is dropped into LowMem. Since this
is userland, simply put this someplace that cpu_loop can find it. */
- env->__excp_addr = address;
+ cpu->env.__excp_addr = address;
return 1;
}
@@ -108,13 +107,16 @@ int cpu_s390x_handle_mmu_fault(CPUS390XState *env, target_ulong address,
static void trigger_pgm_exception(CPUS390XState *env, uint32_t code,
uint32_t ilen)
{
- env->exception_index = EXCP_PGM;
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+
+ cs->exception_index = EXCP_PGM;
env->int_pgm_code = code;
env->int_pgm_ilen = ilen;
}
static int trans_bits(CPUS390XState *env, uint64_t mode)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
int bits = 0;
switch (mode) {
@@ -128,7 +130,7 @@ static int trans_bits(CPUS390XState *env, uint64_t mode)
bits = 3;
break;
default:
- cpu_abort(env, "unknown asc mode\n");
+ cpu_abort(CPU(cpu), "unknown asc mode\n");
break;
}
@@ -138,7 +140,7 @@ static int trans_bits(CPUS390XState *env, uint64_t mode)
static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
uint64_t mode)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(s390_env_get_cpu(env));
int ilen = ILEN_LATER_INC;
int bits = trans_bits(env, mode) | 4;
@@ -152,7 +154,7 @@ static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
uint32_t type, uint64_t asc, int rw)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(s390_env_get_cpu(env));
int ilen = ILEN_LATER;
int bits = trans_bits(env, asc);
@@ -172,7 +174,7 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t asce, int level,
target_ulong *raddr, int *flags, int rw)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(s390_env_get_cpu(env));
uint64_t offs = 0;
uint64_t origin;
uint64_t new_asce;
@@ -379,14 +381,16 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
return r;
}
-int cpu_s390x_handle_mmu_fault(CPUS390XState *env, target_ulong orig_vaddr,
- int rw, int mmu_idx)
+int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
+ int rw, int mmu_idx)
{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
target_ulong vaddr, raddr;
int prot;
- DPRINTF("%s: address 0x%" PRIx64 " rw %d mmu_idx %d\n",
+ DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, orig_vaddr, rw, mmu_idx);
orig_vaddr &= TARGET_PAGE_MASK;
@@ -413,7 +417,7 @@ int cpu_s390x_handle_mmu_fault(CPUS390XState *env, target_ulong orig_vaddr,
DPRINTF("%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n", __func__,
(uint64_t)vaddr, (uint64_t)raddr, prot);
- tlb_set_page(env, orig_vaddr, raddr, prot,
+ tlb_set_page(cs, orig_vaddr, raddr, prot,
mmu_idx, TARGET_PAGE_SIZE);
return 0;
@@ -425,7 +429,7 @@ hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
CPUS390XState *env = &cpu->env;
target_ulong raddr;
int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- int old_exc = env->exception_index;
+ int old_exc = cs->exception_index;
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
/* 31-Bit mode */
@@ -434,7 +438,7 @@ hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
}
mmu_translate(env, vaddr, 2, asc, &raddr, &prot);
- env->exception_index = old_exc;
+ cs->exception_index = old_exc;
return raddr;
}
@@ -452,7 +456,7 @@ void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
}
}
cs->halted = 1;
- env->exception_index = EXCP_HLT;
+ cs->exception_index = EXCP_HLT;
}
env->psw.addr = addr;
@@ -476,13 +480,14 @@ static uint64_t get_psw_mask(CPUS390XState *env)
static LowCore *cpu_map_lowcore(CPUS390XState *env)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
LowCore *lowcore;
hwaddr len = sizeof(LowCore);
lowcore = cpu_physical_memory_map(env->psa, &len, 1);
if (len < sizeof(LowCore)) {
- cpu_abort(env, "Could not map lowcore\n");
+ cpu_abort(CPU(cpu), "Could not map lowcore\n");
}
return lowcore;
@@ -580,16 +585,17 @@ static void do_program_interrupt(CPUS390XState *env)
static void do_ext_interrupt(CPUS390XState *env)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
uint64_t mask, addr;
LowCore *lowcore;
ExtQueue *q;
if (!(env->psw.mask & PSW_MASK_EXT)) {
- cpu_abort(env, "Ext int w/o ext mask\n");
+ cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
}
if (env->ext_index < 0 || env->ext_index > MAX_EXT_QUEUE) {
- cpu_abort(env, "Ext queue overrun: %d\n", env->ext_index);
+ cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
}
q = &env->ext_queue[env->ext_index];
@@ -619,6 +625,7 @@ static void do_ext_interrupt(CPUS390XState *env)
static void do_io_interrupt(CPUS390XState *env)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
LowCore *lowcore;
IOIntQueue *q;
uint8_t isc;
@@ -626,7 +633,7 @@ static void do_io_interrupt(CPUS390XState *env)
int found = 0;
if (!(env->psw.mask & PSW_MASK_IO)) {
- cpu_abort(env, "I/O int w/o I/O mask\n");
+ cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
}
for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
@@ -636,7 +643,7 @@ static void do_io_interrupt(CPUS390XState *env)
continue;
}
if (env->io_index[isc] > MAX_IO_QUEUE) {
- cpu_abort(env, "I/O queue overrun for isc %d: %d\n",
+ cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
isc, env->io_index[isc]);
}
@@ -683,24 +690,25 @@ static void do_io_interrupt(CPUS390XState *env)
static void do_mchk_interrupt(CPUS390XState *env)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
uint64_t mask, addr;
LowCore *lowcore;
MchkQueue *q;
int i;
if (!(env->psw.mask & PSW_MASK_MCHECK)) {
- cpu_abort(env, "Machine check w/o mchk mask\n");
+ cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
}
if (env->mchk_index < 0 || env->mchk_index > MAX_MCHK_QUEUE) {
- cpu_abort(env, "Mchk queue overrun: %d\n", env->mchk_index);
+ cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
}
q = &env->mchk_queue[env->mchk_index];
if (q->type != 1) {
/* Don't know how to handle this... */
- cpu_abort(env, "Unknown machine check type %d\n", q->type);
+ cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
}
if (!(env->cregs[14] & (1 << 28))) {
/* CRW machine checks disabled */
@@ -749,43 +757,43 @@ void s390_cpu_do_interrupt(CPUState *cs)
CPUS390XState *env = &cpu->env;
qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
- __func__, env->exception_index, env->psw.addr);
+ __func__, cs->exception_index, env->psw.addr);
s390_add_running_cpu(cpu);
/* handle machine checks */
if ((env->psw.mask & PSW_MASK_MCHECK) &&
- (env->exception_index == -1)) {
+ (cs->exception_index == -1)) {
if (env->pending_int & INTERRUPT_MCHK) {
- env->exception_index = EXCP_MCHK;
+ cs->exception_index = EXCP_MCHK;
}
}
/* handle external interrupts */
if ((env->psw.mask & PSW_MASK_EXT) &&
- env->exception_index == -1) {
+ cs->exception_index == -1) {
if (env->pending_int & INTERRUPT_EXT) {
/* code is already in env */
- env->exception_index = EXCP_EXT;
+ cs->exception_index = EXCP_EXT;
} else if (env->pending_int & INTERRUPT_TOD) {
cpu_inject_ext(cpu, 0x1004, 0, 0);
- env->exception_index = EXCP_EXT;
+ cs->exception_index = EXCP_EXT;
env->pending_int &= ~INTERRUPT_EXT;
env->pending_int &= ~INTERRUPT_TOD;
} else if (env->pending_int & INTERRUPT_CPUTIMER) {
cpu_inject_ext(cpu, 0x1005, 0, 0);
- env->exception_index = EXCP_EXT;
+ cs->exception_index = EXCP_EXT;
env->pending_int &= ~INTERRUPT_EXT;
env->pending_int &= ~INTERRUPT_TOD;
}
}
/* handle I/O interrupts */
if ((env->psw.mask & PSW_MASK_IO) &&
- (env->exception_index == -1)) {
+ (cs->exception_index == -1)) {
if (env->pending_int & INTERRUPT_IO) {
- env->exception_index = EXCP_IO;
+ cs->exception_index = EXCP_IO;
}
}
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_PGM:
do_program_interrupt(env);
break;
@@ -802,7 +810,7 @@ void s390_cpu_do_interrupt(CPUState *cs)
do_mchk_interrupt(env);
break;
}
- env->exception_index = -1;
+ cs->exception_index = -1;
if (!env->pending_int) {
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
diff --git a/target-s390x/int_helper.c b/target-s390x/int_helper.c
index 85e49aafa6..6a929ca1f3 100644
--- a/target-s390x/int_helper.c
+++ b/target-s390x/int_helper.c
@@ -106,9 +106,10 @@ uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al,
runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
}
#else
+ S390CPU *cpu = s390_env_get_cpu(env);
/* 32-bit hosts would need special wrapper functionality - just abort if
we encounter such a case; it's very unlikely anyways. */
- cpu_abort(env, "128 -> 64/64 division not implemented\n");
+ cpu_abort(CPU(cpu), "128 -> 64/64 division not implemented\n");
#endif
}
return ret;
diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c
index 56b9af7505..a30d1bc060 100644
--- a/target-s390x/kvm.c
+++ b/target-s390x/kvm.c
@@ -36,6 +36,7 @@
#include "sysemu/device_tree.h"
#include "qapi/qmp/qjson.h"
#include "monitor/monitor.h"
+#include "trace.h"
/* #define DEBUG_KVM */
@@ -128,14 +129,42 @@ void kvm_arch_reset_vcpu(CPUState *cpu)
}
}
+static int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
+{
+ struct kvm_one_reg reg;
+ int r;
+
+ reg.id = id;
+ reg.addr = (uint64_t) source;
+ r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (r) {
+ trace_kvm_failed_reg_set(id, strerror(errno));
+ }
+ return r;
+}
+
+static int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
+{
+ struct kvm_one_reg reg;
+ int r;
+
+ reg.id = id;
+ reg.addr = (uint64_t) target;
+ r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (r) {
+ trace_kvm_failed_reg_get(id, strerror(errno));
+ }
+ return r;
+}
+
+
int kvm_arch_put_registers(CPUState *cs, int level)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
- struct kvm_one_reg reg;
struct kvm_sregs sregs;
struct kvm_regs regs;
- int ret;
+ int r;
int i;
/* always save the PSW and the GPRS*/
@@ -151,9 +180,9 @@ int kvm_arch_put_registers(CPUState *cs, int level)
for (i = 0; i < 16; i++) {
regs.gprs[i] = env->regs[i];
}
- ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
- if (ret < 0) {
- return ret;
+ r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
+ if (r < 0) {
+ return r;
}
}
@@ -162,47 +191,29 @@ int kvm_arch_put_registers(CPUState *cs, int level)
return 0;
}
- reg.id = KVM_REG_S390_CPU_TIMER;
- reg.addr = (__u64)&(env->cputm);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
- }
-
- reg.id = KVM_REG_S390_CLOCK_COMP;
- reg.addr = (__u64)&(env->ckc);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
- }
-
- reg.id = KVM_REG_S390_TODPR;
- reg.addr = (__u64)&(env->todpr);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
- }
+ /*
+ * These ONE_REGS are not protected by a capability. As they are only
+ * necessary for migration we just trace a possible error, but don't
+ * return with an error return code.
+ */
+ kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
+ kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
+ kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
+ kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
+ kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
if (cap_async_pf) {
- reg.id = KVM_REG_S390_PFTOKEN;
- reg.addr = (__u64)&(env->pfault_token);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
+ if (r < 0) {
+ return r;
}
-
- reg.id = KVM_REG_S390_PFCOMPARE;
- reg.addr = (__u64)&(env->pfault_compare);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
+ if (r < 0) {
+ return r;
}
-
- reg.id = KVM_REG_S390_PFSELECT;
- reg.addr = (__u64)&(env->pfault_select);
- ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
- if (ret < 0) {
- return ret;
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
+ if (r < 0) {
+ return r;
}
}
@@ -220,9 +231,9 @@ int kvm_arch_put_registers(CPUState *cs, int level)
sregs.acrs[i] = env->aregs[i];
sregs.crs[i] = env->cregs[i];
}
- ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
- if (ret < 0) {
- return ret;
+ r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
+ if (r < 0) {
+ return r;
}
}
@@ -240,7 +251,6 @@ int kvm_arch_get_registers(CPUState *cs)
{
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
- struct kvm_one_reg reg;
struct kvm_sregs sregs;
struct kvm_regs regs;
int i, r;
@@ -288,46 +298,27 @@ int kvm_arch_get_registers(CPUState *cs)
env->psa = cs->kvm_run->s.regs.prefix;
}
- /* One Regs */
- reg.id = KVM_REG_S390_CPU_TIMER;
- reg.addr = (__u64)&(env->cputm);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
- if (r < 0) {
- return r;
- }
-
- reg.id = KVM_REG_S390_CLOCK_COMP;
- reg.addr = (__u64)&(env->ckc);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
- if (r < 0) {
- return r;
- }
-
- reg.id = KVM_REG_S390_TODPR;
- reg.addr = (__u64)&(env->todpr);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
- if (r < 0) {
- return r;
- }
+ /*
+ * These ONE_REGS are not protected by a capability. As they are only
+ * necessary for migration we just trace a possible error, but don't
+ * return with an error return code.
+ */
+ kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
+ kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
+ kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
+ kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
+ kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
if (cap_async_pf) {
- reg.id = KVM_REG_S390_PFTOKEN;
- reg.addr = (__u64)&(env->pfault_token);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
if (r < 0) {
return r;
}
-
- reg.id = KVM_REG_S390_PFCOMPARE;
- reg.addr = (__u64)&(env->pfault_compare);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
if (r < 0) {
return r;
}
-
- reg.id = KVM_REG_S390_PFSELECT;
- reg.addr = (__u64)&(env->pfault_select);
- r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
if (r < 0) {
return r;
}
@@ -383,6 +374,26 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
return 0;
}
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ return -ENOSYS;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ return -ENOSYS;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+}
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
+{
+}
+
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
{
}
@@ -844,6 +855,11 @@ static int handle_tsch(S390CPU *cpu)
return ret;
}
+static int kvm_arch_handle_debug_exit(S390CPU *cpu)
+{
+ return -ENOSYS;
+}
+
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
S390CPU *cpu = S390_CPU(cs);
@@ -859,6 +875,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
case KVM_EXIT_S390_TSCH:
ret = handle_tsch(cpu);
break;
+ case KVM_EXIT_DEBUG:
+ ret = kvm_arch_handle_debug_exit(cpu);
+ break;
default:
fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
break;
diff --git a/target-s390x/mem_helper.c b/target-s390x/mem_helper.c
index 875ea95de4..d8ca3007f8 100644
--- a/target-s390x/mem_helper.c
+++ b/target-s390x/mem_helper.c
@@ -44,18 +44,18 @@
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
-void tlb_fill(CPUS390XState *env, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
int ret;
- ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = s390_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret != 0)) {
if (likely(retaddr)) {
/* now we have a real cpu fault */
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
}
@@ -72,6 +72,7 @@ void tlb_fill(CPUS390XState *env, target_ulong addr, int is_write, int mmu_idx,
static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
uint8_t byte)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
hwaddr dest_phys;
hwaddr len = l;
void *dest_p;
@@ -80,7 +81,7 @@ static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
cpu_stb_data(env, dest, byte);
- cpu_abort(env, "should never reach here");
+ cpu_abort(CPU(cpu), "should never reach here");
}
dest_phys |= dest & ~TARGET_PAGE_MASK;
@@ -94,6 +95,7 @@ static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
uint64_t src)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
hwaddr dest_phys;
hwaddr src_phys;
hwaddr len = l;
@@ -104,13 +106,13 @@ static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
cpu_stb_data(env, dest, 0);
- cpu_abort(env, "should never reach here");
+ cpu_abort(CPU(cpu), "should never reach here");
}
dest_phys |= dest & ~TARGET_PAGE_MASK;
if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
cpu_ldub_data(env, src);
- cpu_abort(env, "should never reach here");
+ cpu_abort(CPU(cpu), "should never reach here");
}
src_phys |= src & ~TARGET_PAGE_MASK;
@@ -483,6 +485,7 @@ static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
uint64_t addr, uint64_t ret)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
uint16_t insn = cpu_lduw_code(env, addr);
HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
@@ -534,7 +537,7 @@ uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
} else {
abort:
- cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
+ cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
insn);
}
return cc;
@@ -807,6 +810,7 @@ void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
#if !defined(CONFIG_USER_ONLY)
void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
int i;
uint64_t src = a2;
@@ -821,11 +825,12 @@ void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
}
}
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
int i;
uint64_t src = a2;
@@ -839,7 +844,7 @@ void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
}
}
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
@@ -932,6 +937,7 @@ uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
/* compare and swap and purge */
uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
uint32_t cc;
uint32_t o1 = env->regs[r1];
uint64_t a2 = r2 & ~3ULL;
@@ -941,7 +947,7 @@ uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
if (r2 & 0x3) {
/* flush TLB / ALB */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
cc = 0;
} else {
@@ -955,7 +961,7 @@ uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
uint64_t mode1, uint64_t a2, uint64_t mode2)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(s390_env_get_cpu(env));
target_ulong src, dest;
int flags, cc = 0, i;
@@ -968,12 +974,12 @@ static uint32_t mvc_asc(CPUS390XState *env, int64_t l, uint64_t a1,
}
if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
- cpu_loop_exit(env);
+ cpu_loop_exit(CPU(s390_env_get_cpu(env)));
}
dest |= a1 & ~TARGET_PAGE_MASK;
if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
- cpu_loop_exit(env);
+ cpu_loop_exit(CPU(s390_env_get_cpu(env)));
}
src |= a2 & ~TARGET_PAGE_MASK;
@@ -1010,7 +1016,7 @@ uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
/* invalidate pte */
void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(s390_env_get_cpu(env));
uint64_t page = vaddr & TARGET_PAGE_MASK;
uint64_t pte = 0;
@@ -1024,34 +1030,38 @@ void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
/* XXX we exploit the fact that Linux passes the exact virtual
address here - it's not obliged to! */
- tlb_flush_page(env, page);
+ tlb_flush_page(cs, page);
/* XXX 31-bit hack */
if (page & 0x80000000) {
- tlb_flush_page(env, page & ~0x80000000);
+ tlb_flush_page(cs, page & ~0x80000000);
} else {
- tlb_flush_page(env, page | 0x80000000);
+ tlb_flush_page(cs, page | 0x80000000);
}
}
/* flush local tlb */
void HELPER(ptlb)(CPUS390XState *env)
{
- tlb_flush(env, 1);
+ S390CPU *cpu = s390_env_get_cpu(env);
+
+ tlb_flush(CPU(cpu), 1);
}
/* store using real address */
void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+
stw_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
}
/* load real address */
uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
uint32_t cc = 0;
- int old_exc = env->exception_index;
+ int old_exc = cs->exception_index;
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
uint64_t ret;
int flags;
@@ -1061,16 +1071,16 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
program_interrupt(env, PGM_SPECIAL_OP, 2);
}
- env->exception_index = old_exc;
+ cs->exception_index = old_exc;
if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
cc = 3;
}
- if (env->exception_index == EXCP_PGM) {
+ if (cs->exception_index == EXCP_PGM) {
ret = env->int_pgm_code | 0x80000000;
} else {
ret |= addr & ~TARGET_PAGE_MASK;
}
- env->exception_index = old_exc;
+ cs->exception_index = old_exc;
env->cc_op = cc;
return ret;
diff --git a/target-s390x/misc_helper.c b/target-s390x/misc_helper.c
index 728456f295..cdbbb79314 100644
--- a/target-s390x/misc_helper.c
+++ b/target-s390x/misc_helper.c
@@ -47,46 +47,53 @@
void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
uintptr_t retaddr)
{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
int t;
- env->exception_index = EXCP_PGM;
+ cs->exception_index = EXCP_PGM;
env->int_pgm_code = excp;
/* Use the (ultimate) callers address to find the insn that trapped. */
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
/* Advance past the insn. */
t = cpu_ldub_code(env, env->psw.addr);
env->int_pgm_ilen = t = get_ilen(t);
env->psw.addr += 2 * t;
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
/* Raise an exception statically from a TB. */
void HELPER(exception)(CPUS390XState *env, uint32_t excp)
{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+
HELPER_LOG("%s: exception %d\n", __func__, excp);
- env->exception_index = excp;
- cpu_loop_exit(env);
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
}
#ifndef CONFIG_USER_ONLY
void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
env->psw.addr);
if (kvm_enabled()) {
#ifdef CONFIG_KVM
- kvm_s390_interrupt(s390_env_get_cpu(env), KVM_S390_PROGRAM_INT, code);
+ kvm_s390_interrupt(cpu, KVM_S390_PROGRAM_INT, code);
#endif
} else {
+ CPUState *cs = CPU(cpu);
+
env->int_pgm_code = code;
env->int_pgm_ilen = ilen;
- env->exception_index = EXCP_PGM;
- cpu_loop_exit(env);
+ cs->exception_index = EXCP_PGM;
+ cpu_loop_exit(cs);
}
}
@@ -230,11 +237,13 @@ uint64_t HELPER(diag)(CPUS390XState *env, uint32_t num, uint64_t mem,
/* Set Prefix */
void HELPER(spx)(CPUS390XState *env, uint64_t a1)
{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
uint32_t prefix = a1 & 0x7fffe000;
+
env->psa = prefix;
qemu_log("prefix: %#x\n", prefix);
- tlb_flush_page(env, 0);
- tlb_flush_page(env, TARGET_PAGE_SIZE);
+ tlb_flush_page(cs, 0);
+ tlb_flush_page(cs, TARGET_PAGE_SIZE);
}
static inline uint64_t clock_value(CPUS390XState *env)
@@ -327,7 +336,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
ebcdic_put(sysib.model, "QEMU ", 16);
ebcdic_put(sysib.sequence, "QEMU ", 16);
ebcdic_put(sysib.plant, "QEMU", 4);
- cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
} else if ((sel1 == 2) && (sel2 == 1)) {
/* Basic Machine CPU */
struct sysib_121 sysib;
@@ -337,7 +346,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
ebcdic_put(sysib.plant, "QEMU", 4);
stw_p(&sysib.cpu_addr, env->cpu_num);
- cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
} else if ((sel1 == 2) && (sel2 == 2)) {
/* Basic Machine CPUs */
struct sysib_122 sysib;
@@ -349,7 +358,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
stw_p(&sysib.active_cpus, 1);
stw_p(&sysib.standby_cpus, 0);
stw_p(&sysib.reserved_cpus, 0);
- cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
} else {
cc = 3;
}
@@ -366,7 +375,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
ebcdic_put(sysib.plant, "QEMU", 4);
stw_p(&sysib.cpu_addr, env->cpu_num);
stw_p(&sysib.cpu_id, 0);
- cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
} else if ((sel1 == 2) && (sel2 == 2)) {
/* LPAR CPUs */
struct sysib_222 sysib;
@@ -383,7 +392,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
stl_p(&sysib.caf, 1000);
stw_p(&sysib.dedicated_cpus, 0);
stw_p(&sysib.shared_cpus, 0);
- cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
} else {
cc = 3;
}
@@ -405,7 +414,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
stl_p(&sysib.vm[0].caf, 1000);
ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
- cpu_physical_memory_rw(a0, (uint8_t *)&sysib, sizeof(sysib), 1);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
} else {
cc = 3;
}
@@ -449,11 +458,11 @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
#if !defined(CONFIG_USER_ONLY)
case SIGP_RESTART:
qemu_system_reset_request();
- cpu_loop_exit(env);
+ cpu_loop_exit(CPU(s390_env_get_cpu(env)));
break;
case SIGP_STOP:
qemu_system_shutdown_request();
- cpu_loop_exit(env);
+ cpu_loop_exit(CPU(s390_env_get_cpu(env)));
break;
#endif
default:
diff --git a/target-s390x/translate.c b/target-s390x/translate.c
index bc99a378a7..81b7e330ab 100644
--- a/target-s390x/translate.c
+++ b/target-s390x/translate.c
@@ -4795,8 +4795,8 @@ static inline void gen_intermediate_code_internal(S390CPU *cpu,
}
status = NO_EXIT;
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc.pc) {
status = EXIT_PC_STALE;
do_debug = true;
diff --git a/target-sh4/cpu.c b/target-sh4/cpu.c
index c23294d410..e7f05212da 100644
--- a/target-sh4/cpu.c
+++ b/target-sh4/cpu.c
@@ -39,6 +39,11 @@ static void superh_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
cpu->env.flags = tb->flags;
}
+static bool superh_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
+
/* CPUClass::reset() */
static void superh_cpu_reset(CPUState *s)
{
@@ -48,8 +53,8 @@ static void superh_cpu_reset(CPUState *s)
scc->parent_reset(s);
- memset(env, 0, offsetof(CPUSH4State, breakpoints));
- tlb_flush(env, 1);
+ memset(env, 0, offsetof(CPUSH4State, id));
+ tlb_flush(s, 1);
env->pc = 0xA0000000;
#if defined(CONFIG_USER_ONLY)
@@ -143,18 +148,7 @@ static ObjectClass *superh_cpu_class_by_name(const char *cpu_model)
SuperHCPU *cpu_sh4_init(const char *cpu_model)
{
- SuperHCPU *cpu;
- ObjectClass *oc;
-
- oc = superh_cpu_class_by_name(cpu_model);
- if (oc == NULL) {
- return NULL;
- }
- cpu = SUPERH_CPU(object_new(object_class_get_name(oc)));
-
- object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
-
- return cpu;
+ return SUPERH_CPU(cpu_generic_init(TYPE_SUPERH_CPU, cpu_model));
}
static void sh7750r_cpu_initfn(Object *obj)
@@ -280,13 +274,16 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
cc->reset = superh_cpu_reset;
cc->class_by_name = superh_cpu_class_by_name;
+ cc->has_work = superh_cpu_has_work;
cc->do_interrupt = superh_cpu_do_interrupt;
cc->dump_state = superh_cpu_dump_state;
cc->set_pc = superh_cpu_set_pc;
cc->synchronize_from_tb = superh_cpu_synchronize_from_tb;
cc->gdb_read_register = superh_cpu_gdb_read_register;
cc->gdb_write_register = superh_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = superh_cpu_handle_mmu_fault;
+#else
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
#endif
dc->vmsd = &vmstate_sh_cpu;
diff --git a/target-sh4/cpu.h b/target-sh4/cpu.h
index c181ddacf5..a2e9e2c031 100644
--- a/target-sh4/cpu.h
+++ b/target-sh4/cpu.h
@@ -175,6 +175,7 @@ typedef struct CPUSH4State {
CPU_COMMON
+ /* Fields from here on are preserved over CPU reset. */
int id; /* CPU model */
/* The features that we should emulate. See sh_features above. */
@@ -193,9 +194,8 @@ SuperHCPU *cpu_sh4_init(const char *cpu_model);
int cpu_sh4_exec(CPUSH4State * s);
int cpu_sh4_signal_handler(int host_signum, void *pinfo,
void *puc);
-int cpu_sh4_handle_mmu_fault(CPUSH4State * env, target_ulong address, int rw,
- int mmu_idx);
-#define cpu_handle_mmu_fault cpu_sh4_handle_mmu_fault
+int superh_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf);
#if !defined(CONFIG_USER_ONLY)
@@ -352,11 +352,6 @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
| (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 4 */
}
-static inline bool cpu_has_work(CPUState *cpu)
-{
- return cpu->interrupt_request & CPU_INTERRUPT_HARD;
-}
-
#include "exec/exec-all.h"
#endif /* _CPU_SH4_H */
diff --git a/target-sh4/helper.c b/target-sh4/helper.c
index 9ac28250e0..9ebdd5c9b5 100644
--- a/target-sh4/helper.c
+++ b/target-sh4/helper.c
@@ -33,26 +33,26 @@
void superh_cpu_do_interrupt(CPUState *cs)
{
- SuperHCPU *cpu = SUPERH_CPU(cs);
- CPUSH4State *env = &cpu->env;
-
- env->exception_index = -1;
+ cs->exception_index = -1;
}
-int cpu_sh4_handle_mmu_fault(CPUSH4State * env, target_ulong address, int rw,
- int mmu_idx)
+int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+
env->tea = address;
- env->exception_index = -1;
+ cs->exception_index = -1;
switch (rw) {
case 0:
- env->exception_index = 0x0a0;
+ cs->exception_index = 0x0a0;
break;
case 1:
- env->exception_index = 0x0c0;
+ cs->exception_index = 0x0c0;
break;
case 2:
- env->exception_index = 0x0a0;
+ cs->exception_index = 0x0a0;
break;
}
return 1;
@@ -86,16 +86,16 @@ void superh_cpu_do_interrupt(CPUState *cs)
SuperHCPU *cpu = SUPERH_CPU(cs);
CPUSH4State *env = &cpu->env;
int do_irq = cs->interrupt_request & CPU_INTERRUPT_HARD;
- int do_exp, irq_vector = env->exception_index;
+ int do_exp, irq_vector = cs->exception_index;
/* prioritize exceptions over interrupts */
- do_exp = env->exception_index != -1;
- do_irq = do_irq && (env->exception_index == -1);
+ do_exp = cs->exception_index != -1;
+ do_irq = do_irq && (cs->exception_index == -1);
if (env->sr & SR_BL) {
- if (do_exp && env->exception_index != 0x1e0) {
- env->exception_index = 0x000; /* masked exception -> reset */
+ if (do_exp && cs->exception_index != 0x1e0) {
+ cs->exception_index = 0x000; /* masked exception -> reset */
}
if (do_irq && !env->in_sleep) {
return; /* masked */
@@ -113,7 +113,7 @@ void superh_cpu_do_interrupt(CPUState *cs)
if (qemu_loglevel_mask(CPU_LOG_INT)) {
const char *expname;
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case 0x0e0:
expname = "addr_error";
break;
@@ -177,8 +177,8 @@ void superh_cpu_do_interrupt(CPUState *cs)
env->flags = 0;
if (do_exp) {
- env->expevt = env->exception_index;
- switch (env->exception_index) {
+ env->expevt = cs->exception_index;
+ switch (cs->exception_index) {
case 0x000:
case 0x020:
case 0x140:
@@ -234,15 +234,21 @@ static void update_itlb_use(CPUSH4State * env, int itlbnb)
static int itlb_replacement(CPUSH4State * env)
{
- if ((env->mmucr & 0xe0000000) == 0xe0000000)
+ SuperHCPU *cpu = sh_env_get_cpu(env);
+
+ if ((env->mmucr & 0xe0000000) == 0xe0000000) {
return 0;
- if ((env->mmucr & 0x98000000) == 0x18000000)
+ }
+ if ((env->mmucr & 0x98000000) == 0x18000000) {
return 1;
- if ((env->mmucr & 0x54000000) == 0x04000000)
+ }
+ if ((env->mmucr & 0x54000000) == 0x04000000) {
return 2;
- if ((env->mmucr & 0x2c000000) == 0x00000000)
+ }
+ if ((env->mmucr & 0x2c000000) == 0x00000000) {
return 3;
- cpu_abort(env, "Unhandled itlb_replacement");
+ }
+ cpu_abort(CPU(cpu), "Unhandled itlb_replacement");
}
/* Find the corresponding entry in the right TLB
@@ -298,7 +304,7 @@ static int copy_utlb_entry_itlb(CPUSH4State *env, int utlb)
itlb = itlb_replacement(env);
ientry = &env->itlb[itlb];
if (ientry->v) {
- tlb_flush_page(env, ientry->vpn << 10);
+ tlb_flush_page(CPU(sh_env_get_cpu(env)), ientry->vpn << 10);
}
*ientry = env->utlb[utlb];
update_itlb_use(env, itlb);
@@ -447,9 +453,11 @@ static int get_physical_address(CPUSH4State * env, target_ulong * physical,
return get_mmu_address(env, physical, prot, address, rw, access_type);
}
-int cpu_sh4_handle_mmu_fault(CPUSH4State * env, target_ulong address, int rw,
- int mmu_idx)
+int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
target_ulong physical;
int prot, ret, access_type;
@@ -467,36 +475,36 @@ int cpu_sh4_handle_mmu_fault(CPUSH4State * env, target_ulong address, int rw,
switch (ret) {
case MMU_ITLB_MISS:
case MMU_DTLB_MISS_READ:
- env->exception_index = 0x040;
+ cs->exception_index = 0x040;
break;
case MMU_DTLB_MULTIPLE:
case MMU_ITLB_MULTIPLE:
- env->exception_index = 0x140;
+ cs->exception_index = 0x140;
break;
case MMU_ITLB_VIOLATION:
- env->exception_index = 0x0a0;
+ cs->exception_index = 0x0a0;
break;
case MMU_DTLB_MISS_WRITE:
- env->exception_index = 0x060;
+ cs->exception_index = 0x060;
break;
case MMU_DTLB_INITIAL_WRITE:
- env->exception_index = 0x080;
+ cs->exception_index = 0x080;
break;
case MMU_DTLB_VIOLATION_READ:
- env->exception_index = 0x0a0;
+ cs->exception_index = 0x0a0;
break;
case MMU_DTLB_VIOLATION_WRITE:
- env->exception_index = 0x0c0;
+ cs->exception_index = 0x0c0;
break;
case MMU_IADDR_ERROR:
case MMU_DADDR_ERROR_READ:
- env->exception_index = 0x0e0;
+ cs->exception_index = 0x0e0;
break;
case MMU_DADDR_ERROR_WRITE:
- env->exception_index = 0x100;
+ cs->exception_index = 0x100;
break;
default:
- cpu_abort(env, "Unhandled MMU fault");
+ cpu_abort(cs, "Unhandled MMU fault");
}
return 1;
}
@@ -504,7 +512,7 @@ int cpu_sh4_handle_mmu_fault(CPUSH4State * env, target_ulong address, int rw,
address &= TARGET_PAGE_MASK;
physical &= TARGET_PAGE_MASK;
- tlb_set_page(env, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
}
@@ -520,13 +528,14 @@ hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
void cpu_load_tlb(CPUSH4State * env)
{
+ SuperHCPU *cpu = sh_env_get_cpu(env);
int n = cpu_mmucr_urc(env->mmucr);
tlb_t * entry = &env->utlb[n];
if (entry->v) {
/* Overwriting valid entry in utlb. */
target_ulong address = entry->vpn << 10;
- tlb_flush_page(env, address);
+ tlb_flush_page(CPU(cpu), address);
}
/* Take values into cpu status from registers. */
@@ -549,7 +558,7 @@ void cpu_load_tlb(CPUSH4State * env)
entry->size = 1024 * 1024; /* 1M */
break;
default:
- cpu_abort(env, "Unhandled load_tlb");
+ cpu_abort(CPU(cpu), "Unhandled load_tlb");
break;
}
entry->sh = (uint8_t)cpu_ptel_sh(env->ptel);
@@ -576,7 +585,7 @@ void cpu_load_tlb(CPUSH4State * env)
entry->v = 0;
}
- tlb_flush(s, 1);
+ tlb_flush(CPU(sh_env_get_cpu(s)), 1);
}
uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s,
@@ -602,7 +611,7 @@ void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr,
if (entry->v) {
/* Overwriting valid entry in itlb. */
target_ulong address = entry->vpn << 10;
- tlb_flush_page(s, address);
+ tlb_flush_page(CPU(sh_env_get_cpu(s)), address);
}
entry->asid = asid;
entry->vpn = vpn;
@@ -644,7 +653,7 @@ void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr,
if (entry->v) {
/* Overwriting valid entry in utlb. */
target_ulong address = entry->vpn << 10;
- tlb_flush_page(s, address);
+ tlb_flush_page(CPU(sh_env_get_cpu(s)), address);
}
entry->ppn = (mem_value & 0x1ffffc00) >> 10;
entry->v = (mem_value & 0x00000100) >> 8;
@@ -697,8 +706,10 @@ void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
if (entry->vpn == vpn
&& (!use_asid || entry->asid == asid || entry->sh)) {
if (utlb_match_entry) {
+ CPUState *cs = CPU(sh_env_get_cpu(s));
+
/* Multiple TLB Exception */
- s->exception_index = 0x140;
+ cs->exception_index = 0x140;
s->tea = addr;
break;
}
@@ -726,16 +737,19 @@ void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
}
}
- if (needs_tlb_flush)
- tlb_flush_page(s, vpn << 10);
+ if (needs_tlb_flush) {
+ tlb_flush_page(CPU(sh_env_get_cpu(s)), vpn << 10);
+ }
} else {
int index = (addr & 0x00003f00) >> 8;
tlb_t * entry = &s->utlb[index];
if (entry->v) {
+ CPUState *cs = CPU(sh_env_get_cpu(s));
+
/* Overwriting valid entry in utlb. */
target_ulong address = entry->vpn << 10;
- tlb_flush_page(s, address);
+ tlb_flush_page(cs, address);
}
entry->asid = asid;
entry->vpn = vpn;
@@ -786,7 +800,7 @@ void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
if (entry->v) {
/* Overwriting valid entry in utlb. */
target_ulong address = entry->vpn << 10;
- tlb_flush_page(s, address);
+ tlb_flush_page(CPU(sh_env_get_cpu(s)), address);
}
entry->ppn = (mem_value & 0x1ffffc00) >> 10;
entry->v = (mem_value & 0x00000100) >> 8;
diff --git a/target-sh4/op_helper.c b/target-sh4/op_helper.c
index e955e810b5..720a97b1d1 100644
--- a/target-sh4/op_helper.c
+++ b/target-sh4/op_helper.c
@@ -38,18 +38,18 @@
#define SHIFT 3
#include "exec/softmmu_template.h"
-void tlb_fill(CPUSH4State *env, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
int ret;
- ret = cpu_sh4_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = superh_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (ret) {
/* now we have a real cpu fault */
if (retaddr) {
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
}
@@ -58,8 +58,10 @@ void tlb_fill(CPUSH4State *env, target_ulong addr, int is_write, int mmu_idx,
void helper_ldtlb(CPUSH4State *env)
{
#ifdef CONFIG_USER_ONLY
+ SuperHCPU *cpu = sh_env_get_cpu(env);
+
/* XXXXX */
- cpu_abort(env, "Unhandled ldtlb");
+ cpu_abort(CPU(cpu), "Unhandled ldtlb");
#else
cpu_load_tlb(env);
#endif
@@ -68,11 +70,13 @@ void helper_ldtlb(CPUSH4State *env)
static inline void QEMU_NORETURN raise_exception(CPUSH4State *env, int index,
uintptr_t retaddr)
{
- env->exception_index = index;
+ CPUState *cs = CPU(sh_env_get_cpu(env));
+
+ cs->exception_index = index;
if (retaddr) {
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
void helper_raise_illegal_instruction(CPUSH4State *env)
diff --git a/target-sh4/translate.c b/target-sh4/translate.c
index 661fc6c887..2360609a0a 100644
--- a/target-sh4/translate.c
+++ b/target-sh4/translate.c
@@ -1889,8 +1889,8 @@ gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
max_insns = CF_COUNT_MASK;
gen_tb_start();
while (ctx.bstate == BS_NONE && tcg_ctx.gen_opc_ptr < gen_opc_end) {
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (ctx.pc == bp->pc) {
/* We have hit a breakpoint - make sure PC is up-to-date */
tcg_gen_movi_i32(cpu_pc, ctx.pc);
diff --git a/target-sparc/cpu.c b/target-sparc/cpu.c
index 5806e59af3..d9f37e9b6a 100644
--- a/target-sparc/cpu.c
+++ b/target-sparc/cpu.c
@@ -18,6 +18,7 @@
*/
#include "cpu.h"
+#include "qemu/error-report.h"
//#define DEBUG_FEATURES
@@ -32,8 +33,8 @@ static void sparc_cpu_reset(CPUState *s)
scc->parent_reset(s);
- memset(env, 0, offsetof(CPUSPARCState, breakpoints));
- tlb_flush(env, 1);
+ memset(env, 0, offsetof(CPUSPARCState, version));
+ tlb_flush(s, 1);
env->cwp = 0;
#ifndef TARGET_SPARC64
env->wim = 1;
@@ -69,21 +70,32 @@ static void sparc_cpu_reset(CPUState *s)
env->cache_control = 0;
}
-static int cpu_sparc_register(CPUSPARCState *env, const char *cpu_model)
+static int cpu_sparc_register(SPARCCPU *cpu, const char *cpu_model)
{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ CPUSPARCState *env = &cpu->env;
+ char *s = g_strdup(cpu_model);
+ char *featurestr, *name = strtok(s, ",");
sparc_def_t def1, *def = &def1;
+ Error *err = NULL;
- if (cpu_sparc_find_by_name(def, cpu_model) < 0) {
+ if (cpu_sparc_find_by_name(def, name) < 0) {
+ g_free(s);
return -1;
}
env->def = g_new0(sparc_def_t, 1);
memcpy(env->def, def, sizeof(*def));
-#if defined(CONFIG_USER_ONLY)
- if ((env->def->features & CPU_FEATURE_FLOAT)) {
- env->def->features |= CPU_FEATURE_FLOAT128;
+
+ featurestr = strtok(NULL, ",");
+ cc->parse_features(CPU(cpu), featurestr, &err);
+ g_free(s);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ error_free(err);
+ return -1;
}
-#endif
+
env->version = def->iu_version;
env->fsr = def->fpu_version;
env->nwindows = def->nwindows;
@@ -103,12 +115,10 @@ static int cpu_sparc_register(CPUSPARCState *env, const char *cpu_model)
SPARCCPU *cpu_sparc_init(const char *cpu_model)
{
SPARCCPU *cpu;
- CPUSPARCState *env;
cpu = SPARC_CPU(object_new(TYPE_SPARC_CPU));
- env = &cpu->env;
- if (cpu_sparc_register(env, cpu_model) < 0) {
+ if (cpu_sparc_register(cpu, cpu_model) < 0) {
object_unref(OBJECT(cpu));
return NULL;
}
@@ -506,19 +516,13 @@ static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features)
return;
}
}
- fprintf(stderr, "CPU feature %s not found\n", flagname);
+ error_report("CPU feature %s not found", flagname);
}
-static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model)
+static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *name)
{
unsigned int i;
const sparc_def_t *def = NULL;
- char *s = g_strdup(cpu_model);
- char *featurestr, *name = strtok(s, ",");
- uint32_t plus_features = 0;
- uint32_t minus_features = 0;
- uint64_t iu_version;
- uint32_t fpu_version, mmu_version, nwindows;
for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) {
if (strcasecmp(name, sparc_defs[i].name) == 0) {
@@ -526,11 +530,24 @@ static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model)
}
}
if (!def) {
- goto error;
+ return -1;
}
memcpy(cpu_def, def, sizeof(*def));
+ return 0;
+}
- featurestr = strtok(NULL, ",");
+static void sparc_cpu_parse_features(CPUState *cs, char *features,
+ Error **errp)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ sparc_def_t *cpu_def = cpu->env.def;
+ char *featurestr;
+ uint32_t plus_features = 0;
+ uint32_t minus_features = 0;
+ uint64_t iu_version;
+ uint32_t fpu_version, mmu_version, nwindows;
+
+ featurestr = features ? strtok(features, ",") : NULL;
while (featurestr) {
char *val;
@@ -545,8 +562,8 @@ static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model)
iu_version = strtoll(val, &err, 0);
if (!*val || *err) {
- fprintf(stderr, "bad numerical value %s\n", val);
- goto error;
+ error_setg(errp, "bad numerical value %s", val);
+ return;
}
cpu_def->iu_version = iu_version;
#ifdef DEBUG_FEATURES
@@ -557,8 +574,8 @@ static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model)
fpu_version = strtol(val, &err, 0);
if (!*val || *err) {
- fprintf(stderr, "bad numerical value %s\n", val);
- goto error;
+ error_setg(errp, "bad numerical value %s", val);
+ return;
}
cpu_def->fpu_version = fpu_version;
#ifdef DEBUG_FEATURES
@@ -569,8 +586,8 @@ static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model)
mmu_version = strtol(val, &err, 0);
if (!*val || *err) {
- fprintf(stderr, "bad numerical value %s\n", val);
- goto error;
+ error_setg(errp, "bad numerical value %s", val);
+ return;
}
cpu_def->mmu_version = mmu_version;
#ifdef DEBUG_FEATURES
@@ -582,21 +599,21 @@ static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model)
nwindows = strtol(val, &err, 0);
if (!*val || *err || nwindows > MAX_NWINDOWS ||
nwindows < MIN_NWINDOWS) {
- fprintf(stderr, "bad numerical value %s\n", val);
- goto error;
+ error_setg(errp, "bad numerical value %s", val);
+ return;
}
cpu_def->nwindows = nwindows;
#ifdef DEBUG_FEATURES
fprintf(stderr, "nwindows %d\n", nwindows);
#endif
} else {
- fprintf(stderr, "unrecognized feature %s\n", featurestr);
- goto error;
+ error_setg(errp, "unrecognized feature %s", featurestr);
+ return;
}
} else {
- fprintf(stderr, "feature string `%s' not in format "
- "(+feature|-feature|feature=xyz)\n", featurestr);
- goto error;
+ error_setg(errp, "feature string `%s' not in format "
+ "(+feature|-feature|feature=xyz)", featurestr);
+ return;
}
featurestr = strtok(NULL, ",");
}
@@ -605,12 +622,6 @@ static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model)
#ifdef DEBUG_FEATURES
print_features(stderr, fprintf, cpu_def->features, NULL);
#endif
- g_free(s);
- return 0;
-
- error:
- g_free(s);
- return -1;
}
void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf)
@@ -739,9 +750,26 @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
cpu->env.npc = tb->cs_base;
}
+static bool sparc_cpu_has_work(CPUState *cs)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+
+ return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_interrupts_enabled(env);
+}
+
static void sparc_cpu_realizefn(DeviceState *dev, Error **errp)
{
SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(dev);
+#if defined(CONFIG_USER_ONLY)
+ SPARCCPU *cpu = SPARC_CPU(dev);
+ CPUSPARCState *env = &cpu->env;
+
+ if ((env->def->features & CPU_FEATURE_FLOAT)) {
+ env->def->features |= CPU_FEATURE_FLOAT128;
+ }
+#endif
qemu_init_vcpu(CPU(dev));
@@ -782,6 +810,8 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
scc->parent_reset = cc->reset;
cc->reset = sparc_cpu_reset;
+ cc->parse_features = sparc_cpu_parse_features;
+ cc->has_work = sparc_cpu_has_work;
cc->do_interrupt = sparc_cpu_do_interrupt;
cc->dump_state = sparc_cpu_dump_state;
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
@@ -791,7 +821,9 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb;
cc->gdb_read_register = sparc_cpu_gdb_read_register;
cc->gdb_write_register = sparc_cpu_gdb_write_register;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = sparc_cpu_handle_mmu_fault;
+#else
cc->do_unassigned_access = sparc_cpu_unassigned_access;
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
#endif
diff --git a/target-sparc/cpu.h b/target-sparc/cpu.h
index ed6d2d1204..f72451d53e 100644
--- a/target-sparc/cpu.h
+++ b/target-sparc/cpu.h
@@ -423,6 +423,7 @@ struct CPUSPARCState {
CPU_COMMON
+ /* Fields from here on are preserved across CPU reset. */
target_ulong version;
uint32_t nwindows;
@@ -521,9 +522,8 @@ SPARCCPU *cpu_sparc_init(const char *cpu_model);
void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf);
/* mmu_helper.c */
-int cpu_sparc_handle_mmu_fault(CPUSPARCState *env1, target_ulong address, int rw,
+int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx);
-#define cpu_handle_mmu_fault cpu_sparc_handle_mmu_fault
target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env);
@@ -749,15 +749,6 @@ static inline bool tb_am_enabled(int tb_flags)
#endif
}
-static inline bool cpu_has_work(CPUState *cpu)
-{
- SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
- CPUSPARCState *env1 = &sparc_cpu->env;
-
- return (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
- cpu_interrupts_enabled(env1);
-}
-
#include "exec/exec-all.h"
#endif
diff --git a/target-sparc/helper.c b/target-sparc/helper.c
index 57c20af478..ae7740bca1 100644
--- a/target-sparc/helper.c
+++ b/target-sparc/helper.c
@@ -24,14 +24,18 @@
void helper_raise_exception(CPUSPARCState *env, int tt)
{
- env->exception_index = tt;
- cpu_loop_exit(env);
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+ cs->exception_index = tt;
+ cpu_loop_exit(cs);
}
void helper_debug(CPUSPARCState *env)
{
- env->exception_index = EXCP_DEBUG;
- cpu_loop_exit(env);
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
}
#ifdef TARGET_SPARC64
@@ -67,6 +71,7 @@ void helper_tick_set_limit(void *opaque, uint64_t limit)
static target_ulong helper_udiv_common(CPUSPARCState *env, target_ulong a,
target_ulong b, int cc)
{
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
int overflow = 0;
uint64_t x0;
uint32_t x1;
@@ -75,13 +80,13 @@ static target_ulong helper_udiv_common(CPUSPARCState *env, target_ulong a,
x1 = (b & 0xffffffff);
if (x1 == 0) {
- cpu_restore_state(env, GETPC());
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_DIV_ZERO);
}
x0 = x0 / x1;
- if (x0 > 0xffffffff) {
- x0 = 0xffffffff;
+ if (x0 > UINT32_MAX) {
+ x0 = UINT32_MAX;
overflow = 1;
}
@@ -106,6 +111,7 @@ target_ulong helper_udiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b)
static target_ulong helper_sdiv_common(CPUSPARCState *env, target_ulong a,
target_ulong b, int cc)
{
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
int overflow = 0;
int64_t x0;
int32_t x1;
@@ -114,14 +120,17 @@ static target_ulong helper_sdiv_common(CPUSPARCState *env, target_ulong a,
x1 = (b & 0xffffffff);
if (x1 == 0) {
- cpu_restore_state(env, GETPC());
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_DIV_ZERO);
- }
-
- x0 = x0 / x1;
- if ((int32_t) x0 != x0) {
- x0 = x0 < 0 ? 0x80000000 : 0x7fffffff;
+ } else if (x1 == -1 && x0 == INT64_MIN) {
+ x0 = INT32_MAX;
overflow = 1;
+ } else {
+ x0 = x0 / x1;
+ if ((int32_t) x0 != x0) {
+ x0 = x0 < 0 ? INT32_MIN : INT32_MAX;
+ overflow = 1;
+ }
}
if (cc) {
@@ -147,7 +156,9 @@ int64_t helper_sdivx(CPUSPARCState *env, int64_t a, int64_t b)
{
if (b == 0) {
/* Raise divide by zero trap. */
- cpu_restore_state(env, GETPC());
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
+
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_DIV_ZERO);
} else if (b == -1) {
/* Avoid overflow trap with i386 divide insn. */
@@ -161,7 +172,9 @@ uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b)
{
if (b == 0) {
/* Raise divide by zero trap. */
- cpu_restore_state(env, GETPC());
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
+
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_DIV_ZERO);
}
return a / b;
@@ -171,6 +184,7 @@ uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b)
target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1,
target_ulong src2)
{
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
target_ulong dst;
/* Tag overflow occurs if either input has bits 0 or 1 set. */
@@ -193,13 +207,14 @@ target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1,
return dst;
tag_overflow:
- cpu_restore_state(env, GETPC());
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_TOVF);
}
target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1,
target_ulong src2)
{
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
target_ulong dst;
/* Tag overflow occurs if either input has bits 0 or 1 set. */
@@ -222,7 +237,7 @@ target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1,
return dst;
tag_overflow:
- cpu_restore_state(env, GETPC());
+ cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_TOVF);
}
@@ -232,9 +247,9 @@ void helper_power_down(CPUSPARCState *env)
CPUState *cs = CPU(sparc_env_get_cpu(env));
cs->halted = 1;
- env->exception_index = EXCP_HLT;
+ cs->exception_index = EXCP_HLT;
env->pc = env->npc;
env->npc = env->pc + 4;
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
#endif
diff --git a/target-sparc/int32_helper.c b/target-sparc/int32_helper.c
index d5322380cd..7c380ba2a1 100644
--- a/target-sparc/int32_helper.c
+++ b/target-sparc/int32_helper.c
@@ -62,7 +62,7 @@ void sparc_cpu_do_interrupt(CPUState *cs)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
- int cwp, intno = env->exception_index;
+ int cwp, intno = cs->exception_index;
/* Compute PSR before exposing state. */
if (env->cc_op != CC_OP_FLAGS) {
@@ -105,12 +105,12 @@ void sparc_cpu_do_interrupt(CPUState *cs)
#endif
#if !defined(CONFIG_USER_ONLY)
if (env->psret == 0) {
- if (env->exception_index == 0x80 &&
+ if (cs->exception_index == 0x80 &&
env->def->features & CPU_FEATURE_TA0_SHUTDOWN) {
qemu_system_shutdown_request();
} else {
- cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
- env->exception_index);
+ cpu_abort(cs, "Trap 0x%02x while interrupts disabled, Error state",
+ cs->exception_index);
}
return;
}
@@ -125,7 +125,7 @@ void sparc_cpu_do_interrupt(CPUState *cs)
env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
env->pc = env->tbr;
env->npc = env->pc + 4;
- env->exception_index = -1;
+ cs->exception_index = -1;
#if !defined(CONFIG_USER_ONLY)
/* IRQ acknowledgment */
diff --git a/target-sparc/int64_helper.c b/target-sparc/int64_helper.c
index bf7dd86ab8..bf242323a4 100644
--- a/target-sparc/int64_helper.c
+++ b/target-sparc/int64_helper.c
@@ -63,7 +63,7 @@ void sparc_cpu_do_interrupt(CPUState *cs)
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
- int intno = env->exception_index;
+ int intno = cs->exception_index;
trap_state *tsptr;
/* Compute PSR before exposing state. */
@@ -111,8 +111,8 @@ void sparc_cpu_do_interrupt(CPUState *cs)
#endif
#if !defined(CONFIG_USER_ONLY)
if (env->tl >= env->maxtl) {
- cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
- " Error state", env->exception_index, env->tl, env->maxtl);
+ cpu_abort(cs, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
+ " Error state", cs->exception_index, env->tl, env->maxtl);
return;
}
#endif
@@ -160,7 +160,7 @@ void sparc_cpu_do_interrupt(CPUState *cs)
env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
env->pc = env->tbr;
env->npc = env->pc + 4;
- env->exception_index = -1;
+ cs->exception_index = -1;
}
trap_state *cpu_tsptr(CPUSPARCState* env)
diff --git a/target-sparc/ldst_helper.c b/target-sparc/ldst_helper.c
index 32491b499a..ec14802573 100644
--- a/target-sparc/ldst_helper.c
+++ b/target-sparc/ldst_helper.c
@@ -141,6 +141,7 @@ static void replace_tlb_entry(SparcTLBEntry *tlb,
/* flush page range if translation is valid */
if (TTE_IS_VALID(tlb->tte)) {
+ CPUState *cs = CPU(sparc_env_get_cpu(env1));
mask = 0xffffffffffffe000ULL;
mask <<= 3 * ((tlb->tte >> 61) & 3);
@@ -149,7 +150,7 @@ static void replace_tlb_entry(SparcTLBEntry *tlb,
va = tlb->tag & mask;
for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
- tlb_flush_page(env1, va + offset);
+ tlb_flush_page(cs, va + offset);
}
}
@@ -447,7 +448,7 @@ static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
int sign)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
uint64_t ret = 0;
#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
uint32_t last_addr = addr;
@@ -688,8 +689,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
break;
case 8: /* User code access, XXX */
default:
- cpu_unassigned_access(CPU(sparc_env_get_cpu(env)),
- addr, false, false, asi, size);
+ cpu_unassigned_access(cs, addr, false, false, asi, size);
ret = 0;
break;
}
@@ -717,7 +717,9 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
int size)
{
- CPUState *cs = ENV_GET_CPU(env);
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
helper_check_align(env, addr, size - 1);
switch (asi) {
case 2: /* SuperSparc MXCC registers and Leon3 cache control */
@@ -863,13 +865,13 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
DPRINTF_MMU("mmu flush level %d\n", mmulev);
switch (mmulev) {
case 0: /* flush page */
- tlb_flush_page(env, addr & 0xfffff000);
+ tlb_flush_page(CPU(cpu), addr & 0xfffff000);
break;
case 1: /* flush segment (256k) */
case 2: /* flush region (16M) */
case 3: /* flush context (4G) */
case 4: /* flush entire */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
break;
default:
break;
@@ -894,7 +896,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
disabled mode are invalid in normal mode */
if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
(env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm))) {
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
break;
case 1: /* Context Table Pointer Register */
@@ -905,7 +907,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val, int asi,
if (oldreg != env->mmuregs[reg]) {
/* we flush when the MMU context changes because
QEMU has no MMU context support */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
break;
case 3: /* Synchronous Fault Status Register with Clear */
@@ -1292,7 +1294,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
int sign)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
uint64_t ret = 0;
#if defined(DEBUG_ASI)
target_ulong last_addr = addr;
@@ -1326,7 +1328,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
dump_asi("read ", last_addr, asi, size, ret);
#endif
/* env->exception_index is set in get_physical_address_data(). */
- helper_raise_exception(env, env->exception_index);
+ helper_raise_exception(env, cs->exception_index);
}
/* convert nonfaulting load ASIs to normal load ASIs */
@@ -1605,8 +1607,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
case 0x5f: /* D-MMU demap, WO */
case 0x77: /* Interrupt vector, WO */
default:
- cpu_unassigned_access(CPU(sparc_env_get_cpu(env)),
- addr, false, false, 1, size);
+ cpu_unassigned_access(cs, addr, false, false, 1, size);
ret = 0;
break;
}
@@ -1662,7 +1663,9 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, int asi, int size,
void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
int asi, int size)
{
- CPUState *cs = ENV_GET_CPU(env);
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
#ifdef DEBUG_ASI
dump_asi("write", addr, asi, size, val);
#endif
@@ -1871,7 +1874,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
#ifdef DEBUG_MMU
dump_mmu(stdout, fprintf, env);
#endif
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
return;
}
@@ -1960,13 +1963,13 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
env->dmmu.mmu_primary_context = val;
/* can be optimized to only flush MMU_USER_IDX
and MMU_KERNEL_IDX entries */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
break;
case 2: /* Secondary context */
env->dmmu.mmu_secondary_context = val;
/* can be optimized to only flush MMU_USER_SECONDARY_IDX
and MMU_KERNEL_SECONDARY_IDX entries */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
break;
case 5: /* TSB access */
DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
@@ -2040,8 +2043,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
case 0x8a: /* Primary no-fault LE, RO */
case 0x8b: /* Secondary no-fault LE, RO */
default:
- cpu_unassigned_access(CPU(sparc_env_get_cpu(env)),
- addr, true, false, 1, size);
+ cpu_unassigned_access(cs, addr, true, false, 1, size);
return;
}
}
@@ -2397,7 +2399,7 @@ void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
/* flush neverland mappings created during no-fault mode,
so the sequential MMU faults report proper fault types */
if (env->mmuregs[0] & MMU_NF) {
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
}
}
#else
@@ -2427,12 +2429,13 @@ static void QEMU_NORETURN do_unaligned_access(CPUSPARCState *env,
target_ulong addr, int is_write,
int is_user, uintptr_t retaddr)
{
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
#ifdef DEBUG_UNALIGNED
printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
"\n", addr, env->pc);
#endif
if (retaddr) {
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(CPU(cpu), retaddr);
}
helper_raise_exception(env, TT_UNALIGNED);
}
@@ -2441,17 +2444,17 @@ static void QEMU_NORETURN do_unaligned_access(CPUSPARCState *env,
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
-void tlb_fill(CPUSPARCState *env, target_ulong addr, int is_write, int mmu_idx,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr)
{
int ret;
- ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = sparc_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (ret) {
if (retaddr) {
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
}
#endif
diff --git a/target-sparc/machine.c b/target-sparc/machine.c
index a353dabdd9..3f3de4c65a 100644
--- a/target-sparc/machine.c
+++ b/target-sparc/machine.c
@@ -112,6 +112,7 @@ void cpu_save(QEMUFile *f, void *opaque)
int cpu_load(QEMUFile *f, void *opaque, int version_id)
{
CPUSPARCState *env = opaque;
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
int i;
uint32_t tmp;
@@ -212,6 +213,6 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
qemu_get_be64s(f, &env->ssr);
cpu_get_timer(f, env->hstick);
#endif
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
return 0;
}
diff --git a/target-sparc/mmu_helper.c b/target-sparc/mmu_helper.c
index 5fc2fd64bb..61afbcf048 100644
--- a/target-sparc/mmu_helper.c
+++ b/target-sparc/mmu_helper.c
@@ -25,13 +25,13 @@
#if defined(CONFIG_USER_ONLY)
-int cpu_sparc_handle_mmu_fault(CPUSPARCState *env1, target_ulong address, int rw,
+int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{
if (rw & 2) {
- env1->exception_index = TT_TFAULT;
+ cs->exception_index = TT_TFAULT;
} else {
- env1->exception_index = TT_DFAULT;
+ cs->exception_index = TT_DFAULT;
}
return 1;
}
@@ -86,7 +86,7 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
uint32_t pde;
int error_code = 0, is_dirty, is_user;
unsigned long page_offset;
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
is_user = mmu_idx == MMU_USER_IDX;
@@ -198,9 +198,11 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
}
/* Perform address translation */
-int cpu_sparc_handle_mmu_fault(CPUSPARCState *env, target_ulong address, int rw,
+int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
hwaddr paddr;
target_ulong vaddr;
target_ulong page_size;
@@ -212,10 +214,10 @@ int cpu_sparc_handle_mmu_fault(CPUSPARCState *env, target_ulong address, int rw,
vaddr = address;
if (error_code == 0) {
#ifdef DEBUG_MMU
- printf("Translate at " TARGET_FMT_lx " -> " TARGET_FMT_plx ", vaddr "
+ printf("Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr "
TARGET_FMT_lx "\n", address, paddr, vaddr);
#endif
- tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
+ tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
return 0;
}
@@ -231,13 +233,13 @@ int cpu_sparc_handle_mmu_fault(CPUSPARCState *env, target_ulong address, int rw,
neverland. Fake/overridden mappings will be flushed when
switching to normal mode. */
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- tlb_set_page(env, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
+ tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
} else {
if (rw & 2) {
- env->exception_index = TT_TFAULT;
+ cs->exception_index = TT_TFAULT;
} else {
- env->exception_index = TT_DFAULT;
+ cs->exception_index = TT_DFAULT;
}
return 1;
}
@@ -245,7 +247,7 @@ int cpu_sparc_handle_mmu_fault(CPUSPARCState *env, target_ulong address, int rw,
target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
hwaddr pde_ptr;
uint32_t pde;
@@ -487,6 +489,7 @@ static int get_physical_address_data(CPUSPARCState *env,
hwaddr *physical, int *prot,
target_ulong address, int rw, int mmu_idx)
{
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
unsigned int i;
uint64_t context;
uint64_t sfsr = 0;
@@ -551,10 +554,10 @@ static int get_physical_address_data(CPUSPARCState *env,
if (do_fault) {
/* faults above are reported with TT_DFAULT. */
- env->exception_index = TT_DFAULT;
+ cs->exception_index = TT_DFAULT;
} else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
do_fault = 1;
- env->exception_index = TT_DPROT;
+ cs->exception_index = TT_DPROT;
trace_mmu_helper_dprot(address, context, mmu_idx, env->tl);
}
@@ -598,7 +601,7 @@ static int get_physical_address_data(CPUSPARCState *env,
* - JPS1: SFAR updated and some fields of SFSR updated
*/
env->dmmu.tag_access = (address & ~0x1fffULL) | context;
- env->exception_index = TT_DMISS;
+ cs->exception_index = TT_DMISS;
return 1;
}
@@ -606,6 +609,7 @@ static int get_physical_address_code(CPUSPARCState *env,
hwaddr *physical, int *prot,
target_ulong address, int mmu_idx)
{
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
unsigned int i;
uint64_t context;
@@ -649,7 +653,7 @@ static int get_physical_address_code(CPUSPARCState *env,
/* FIXME: ASI field in SFSR must be set */
env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
- env->exception_index = TT_TFAULT;
+ cs->exception_index = TT_TFAULT;
env->immu.tag_access = (address & ~0x1fffULL) | context;
@@ -667,7 +671,7 @@ static int get_physical_address_code(CPUSPARCState *env,
/* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
env->immu.tag_access = (address & ~0x1fffULL) | context;
- env->exception_index = TT_TMISS;
+ cs->exception_index = TT_TMISS;
return 1;
}
@@ -705,9 +709,11 @@ static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
}
/* Perform address translation */
-int cpu_sparc_handle_mmu_fault(CPUSPARCState *env, target_ulong address, int rw,
+int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
int mmu_idx)
{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
target_ulong vaddr;
hwaddr paddr;
target_ulong page_size;
@@ -723,7 +729,7 @@ int cpu_sparc_handle_mmu_fault(CPUSPARCState *env, target_ulong address, int rw,
env->dmmu.mmu_primary_context,
env->dmmu.mmu_secondary_context);
- tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
+ tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
return 0;
}
/* XXX */
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index 46d7859e97..2de1c4a58d 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -5270,8 +5270,8 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
max_insns = CF_COUNT_MASK;
gen_tb_start();
do {
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
if (dc->pc != pc_start)
save_state(dc);
diff --git a/target-unicore32/cpu.c b/target-unicore32/cpu.c
index 3f78208360..2d2c429a35 100644
--- a/target-unicore32/cpu.c
+++ b/target-unicore32/cpu.c
@@ -23,6 +23,12 @@ static void uc32_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.regs[31] = value;
}
+static bool uc32_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request &
+ (CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
+}
+
static inline void set_feature(CPUUniCore32State *env, int feature)
{
env->features |= feature;
@@ -115,7 +121,7 @@ static void uc32_cpu_initfn(Object *obj)
env->regs[31] = 0x03000000;
#endif
- tlb_flush(env, 1);
+ tlb_flush(cs, 1);
if (tcg_enabled() && !inited) {
inited = true;
@@ -138,10 +144,13 @@ static void uc32_cpu_class_init(ObjectClass *oc, void *data)
dc->realize = uc32_cpu_realizefn;
cc->class_by_name = uc32_cpu_class_by_name;
+ cc->has_work = uc32_cpu_has_work;
cc->do_interrupt = uc32_cpu_do_interrupt;
cc->dump_state = uc32_cpu_dump_state;
cc->set_pc = uc32_cpu_set_pc;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = uc32_cpu_handle_mmu_fault;
+#else
cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug;
#endif
dc->vmsd = &vmstate_uc32_cpu;
diff --git a/target-unicore32/cpu.h b/target-unicore32/cpu.h
index 967511e3f6..50972f9494 100644
--- a/target-unicore32/cpu.h
+++ b/target-unicore32/cpu.h
@@ -125,13 +125,10 @@ void cpu_asr_write(CPUUniCore32State *env1, target_ulong val, target_ulong mask)
#define cpu_init uc32_cpu_init
#define cpu_exec uc32_cpu_exec
#define cpu_signal_handler uc32_cpu_signal_handler
-#define cpu_handle_mmu_fault uc32_cpu_handle_mmu_fault
CPUUniCore32State *uc32_cpu_init(const char *cpu_model);
int uc32_cpu_exec(CPUUniCore32State *s);
int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
-int uc32_cpu_handle_mmu_fault(CPUUniCore32State *env, target_ulong address, int rw,
- int mmu_idx);
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
@@ -157,13 +154,9 @@ static inline void cpu_get_tb_cpu_state(CPUUniCore32State *env, target_ulong *pc
}
}
+int uc32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
void uc32_translate_init(void);
void switch_mode(CPUUniCore32State *, int);
-static inline bool cpu_has_work(CPUState *cpu)
-{
- return cpu->interrupt_request &
- (CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
-}
-
#endif /* QEMU_UNICORE32_CPU_H */
diff --git a/target-unicore32/helper.c b/target-unicore32/helper.c
index 9bf4fea5db..169c85cb4d 100644
--- a/target-unicore32/helper.c
+++ b/target-unicore32/helper.c
@@ -28,19 +28,12 @@
CPUUniCore32State *uc32_cpu_init(const char *cpu_model)
{
UniCore32CPU *cpu;
- CPUUniCore32State *env;
- ObjectClass *oc;
- oc = cpu_class_by_name(TYPE_UNICORE32_CPU, cpu_model);
- if (oc == NULL) {
+ cpu = UNICORE32_CPU(cpu_generic_init(TYPE_UNICORE32_CPU, cpu_model));
+ if (cpu == NULL) {
return NULL;
}
- cpu = UNICORE32_CPU(object_new(object_class_get_name(oc)));
- env = &cpu->env;
-
- object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
-
- return env;
+ return &cpu->env;
}
uint32_t HELPER(clo)(uint32_t x)
@@ -57,6 +50,8 @@ uint32_t HELPER(clz)(uint32_t x)
void helper_cp0_set(CPUUniCore32State *env, uint32_t val, uint32_t creg,
uint32_t cop)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
/*
* movc pp.nn, rn, #imm9
* rn: UCOP_REG_D
@@ -125,7 +120,7 @@ void helper_cp0_set(CPUUniCore32State *env, uint32_t val, uint32_t creg,
case 6:
if ((cop <= 6) && (cop >= 2)) {
/* invalid all tlb */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
return;
}
break;
@@ -236,23 +231,22 @@ void helper_cp1_putc(target_ulong x)
#ifdef CONFIG_USER_ONLY
void switch_mode(CPUUniCore32State *env, int mode)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
if (mode != ASR_MODE_USER) {
- cpu_abort(env, "Tried to switch out of user mode\n");
+ cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
}
}
void uc32_cpu_do_interrupt(CPUState *cs)
{
- UniCore32CPU *cpu = UNICORE32_CPU(cs);
- CPUUniCore32State *env = &cpu->env;
-
- cpu_abort(env, "NO interrupt in user mode\n");
+ cpu_abort(cs, "NO interrupt in user mode\n");
}
-int uc32_cpu_handle_mmu_fault(CPUUniCore32State *env, target_ulong address,
+int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int access_type, int mmu_idx)
{
- cpu_abort(env, "NO mmu fault in user mode\n");
+ cpu_abort(cs, "NO mmu fault in user mode\n");
return 1;
}
#endif
diff --git a/target-unicore32/op_helper.c b/target-unicore32/op_helper.c
index 4f9f41eb36..4c6950d506 100644
--- a/target-unicore32/op_helper.c
+++ b/target-unicore32/op_helper.c
@@ -16,8 +16,10 @@
void HELPER(exception)(CPUUniCore32State *env, uint32_t excp)
{
- env->exception_index = excp;
- cpu_loop_exit(env);
+ CPUState *cs = CPU(uc32_env_get_cpu(env));
+
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
}
static target_ulong asr_read(CPUUniCore32State *env)
@@ -255,18 +257,18 @@ uint32_t HELPER(ror_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
#define SHIFT 3
#include "exec/softmmu_template.h"
-void tlb_fill(CPUUniCore32State *env, target_ulong addr, int is_write,
+void tlb_fill(CPUState *cs, target_ulong addr, int is_write,
int mmu_idx, uintptr_t retaddr)
{
int ret;
- ret = uc32_cpu_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ ret = uc32_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
if (unlikely(ret)) {
if (retaddr) {
/* now we have a real cpu fault */
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
}
#endif
diff --git a/target-unicore32/softmmu.c b/target-unicore32/softmmu.c
index 22defc6db9..9a3786dddb 100644
--- a/target-unicore32/softmmu.c
+++ b/target-unicore32/softmmu.c
@@ -33,6 +33,8 @@
/* Map CPU modes onto saved register banks. */
static inline int bank_number(CPUUniCore32State *env, int mode)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
switch (mode) {
case ASR_MODE_USER:
case ASR_MODE_SUSR:
@@ -46,7 +48,7 @@ static inline int bank_number(CPUUniCore32State *env, int mode)
case ASR_MODE_INTR:
return 4;
}
- cpu_abort(env, "Bad mode %x\n", mode);
+ cpu_abort(CPU(cpu), "Bad mode %x\n", mode);
return -1;
}
@@ -79,7 +81,7 @@ void uc32_cpu_do_interrupt(CPUState *cs)
uint32_t addr;
int new_mode;
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case UC32_EXCP_PRIV:
new_mode = ASR_MODE_PRIV;
addr = 0x08;
@@ -99,7 +101,7 @@ void uc32_cpu_do_interrupt(CPUState *cs)
addr = 0x18;
break;
default:
- cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return;
}
/* High vectors. */
@@ -121,7 +123,8 @@ static int get_phys_addr_ucv2(CPUUniCore32State *env, uint32_t address,
int access_type, int is_user, uint32_t *phys_ptr, int *prot,
target_ulong *page_size)
{
- CPUState *cs = ENV_GET_CPU(env);
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
int code;
uint32_t table;
uint32_t desc;
@@ -168,11 +171,11 @@ static int get_phys_addr_ucv2(CPUUniCore32State *env, uint32_t address,
*page_size = TARGET_PAGE_SIZE;
break;
default:
- cpu_abort(env, "wrong page type!");
+ cpu_abort(CPU(cpu), "wrong page type!");
}
break;
default:
- cpu_abort(env, "wrong page type!");
+ cpu_abort(CPU(cpu), "wrong page type!");
}
*phys_ptr = phys_addr;
@@ -209,9 +212,11 @@ do_fault:
return code;
}
-int uc32_cpu_handle_mmu_fault(CPUUniCore32State *env, target_ulong address,
+int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
int access_type, int mmu_idx)
{
+ UniCore32CPU *cpu = UNICORE32_CPU(cs);
+ CPUUniCore32State *env = &cpu->env;
uint32_t phys_addr;
target_ulong page_size;
int prot;
@@ -231,7 +236,7 @@ int uc32_cpu_handle_mmu_fault(CPUUniCore32State *env, target_ulong address,
ret = get_phys_addr_ucv2(env, address, access_type, is_user,
&phys_addr, &prot, &page_size);
if (is_user) {
- DPRINTF("user space access: ret %x, address %x, "
+ DPRINTF("user space access: ret %x, address %" VADDR_PRIx ", "
"access_type %x, phys_addr %x, prot %x\n",
ret, address, access_type, phys_addr, prot);
}
@@ -248,16 +253,16 @@ int uc32_cpu_handle_mmu_fault(CPUUniCore32State *env, target_ulong address,
/* Map a single page. */
phys_addr &= TARGET_PAGE_MASK;
address &= TARGET_PAGE_MASK;
- tlb_set_page(env, address, phys_addr, prot, mmu_idx, page_size);
+ tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
return 0;
}
env->cp0.c3_faultstatus = ret;
env->cp0.c4_faultaddr = address;
if (access_type == 2) {
- env->exception_index = UC32_EXCP_ITRAP;
+ cs->exception_index = UC32_EXCP_ITRAP;
} else {
- env->exception_index = UC32_EXCP_DTRAP;
+ cs->exception_index = UC32_EXCP_DTRAP;
}
return ret;
}
@@ -266,6 +271,6 @@ hwaddr uc32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
UniCore32CPU *cpu = UNICORE32_CPU(cs);
- cpu_abort(&cpu->env, "%s not supported yet\n", __func__);
+ cpu_abort(CPU(cpu), "%s not supported yet\n", __func__);
return addr;
}
diff --git a/target-unicore32/translate.c b/target-unicore32/translate.c
index 4572890ffa..c2402cf942 100644
--- a/target-unicore32/translate.c
+++ b/target-unicore32/translate.c
@@ -176,7 +176,7 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
#define UCOP_SET_L UCOP_SET(24)
#define UCOP_SET_S UCOP_SET(24)
-#define ILLEGAL cpu_abort(env, \
+#define ILLEGAL cpu_abort(CPU(cpu), \
"Illegal UniCore32 instruction %x at line %d!", \
insn, __LINE__)
@@ -184,6 +184,7 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
TCGv tmp, tmp2, tmp3;
if ((insn & 0xfe000000) == 0xe0000000) {
tmp2 = new_tmp();
@@ -209,6 +210,7 @@ static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
TCGv tmp;
if ((insn & 0xff003fff) == 0xe1000400) {
@@ -689,6 +691,7 @@ static inline long ucf64_reg_offset(int reg)
/* UniCore-F64 single load/store I_offset */
static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
int offset;
TCGv tmp;
TCGv addr;
@@ -735,6 +738,7 @@ static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t in
/* UniCore-F64 load/store multiple words */
static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
unsigned int i;
int j, n, freg;
TCGv tmp;
@@ -820,6 +824,7 @@ static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t in
/* UniCore-F64 mrc/mcr */
static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
TCGv tmp;
if ((insn & 0xfe0003ff) == 0xe2000000) {
@@ -884,6 +889,8 @@ static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t ins
/* UniCore-F64 convert instructions */
static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
if (UCOP_UCF64_FMT == 3) {
ILLEGAL;
}
@@ -950,6 +957,8 @@ static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn
/* UniCore-F64 compare instructions */
static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
if (UCOP_SET(25)) {
ILLEGAL;
}
@@ -1028,6 +1037,8 @@ static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn
/* UniCore-F64 data processing */
static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
if (UCOP_UCF64_FMT == 3) {
ILLEGAL;
}
@@ -1061,6 +1072,8 @@ static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t ins
/* Disassemble an F64 instruction */
static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
if (!UCOP_SET(29)) {
if (UCOP_SET(26)) {
do_ucf64_ldst_m(env, s, insn);
@@ -1167,6 +1180,8 @@ static void gen_exception_return(DisasContext *s, TCGv pc)
static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
switch (UCOP_CPNUM) {
#ifndef CONFIG_USER_ONLY
case 0:
@@ -1181,13 +1196,14 @@ static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
break;
default:
/* Unknown coprocessor. */
- cpu_abort(env, "Unknown coprocessor!");
+ cpu_abort(CPU(cpu), "Unknown coprocessor!");
}
}
/* data processing instructions */
static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
TCGv tmp;
TCGv tmp2;
int logic_cc;
@@ -1421,6 +1437,7 @@ static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
/* miscellaneous instructions */
static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
unsigned int val;
TCGv tmp;
@@ -1546,6 +1563,7 @@ static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
/* SWP instruction */
static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
TCGv addr;
TCGv tmp;
TCGv tmp2;
@@ -1573,6 +1591,7 @@ static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
/* load/store hw/sb */
static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
TCGv addr;
TCGv tmp;
@@ -1625,6 +1644,7 @@ static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
/* load/store multiple words */
static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
unsigned int val, i, mmu_idx;
int j, n, reg, user, loaded_base;
TCGv tmp;
@@ -1766,6 +1786,7 @@ static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
/* branch (and link) */
static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
unsigned int val;
int32_t offset;
TCGv tmp;
@@ -1795,6 +1816,7 @@ static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
unsigned int insn;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
@@ -1922,8 +1944,8 @@ static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
gen_tb_start();
do {
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
gen_set_pc_im(dc->pc);
gen_exception(EXCP_DEBUG);
@@ -1978,7 +2000,7 @@ static inline void gen_intermediate_code_internal(UniCore32CPU *cpu,
if (dc->condjmp) {
/* FIXME: This can theoretically happen with self-modifying
code. */
- cpu_abort(env, "IO on conditional branch instruction");
+ cpu_abort(cs, "IO on conditional branch instruction");
}
gen_io_end();
}
diff --git a/target-unicore32/ucf64_helper.c b/target-unicore32/ucf64_helper.c
index a516edd319..34fa2a5b40 100644
--- a/target-unicore32/ucf64_helper.c
+++ b/target-unicore32/ucf64_helper.c
@@ -76,6 +76,7 @@ static inline int ucf64_exceptbits_to_host(int target_bits)
void HELPER(ucf64_set_fpscr)(CPUUniCore32State *env, uint32_t val)
{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
int i;
uint32_t changed;
@@ -99,7 +100,7 @@ void HELPER(ucf64_set_fpscr)(CPUUniCore32State *env, uint32_t val)
i = float_round_down;
break;
default: /* 100 and 101 not implement */
- cpu_abort(env, "Unsupported UniCore-F64 round mode");
+ cpu_abort(CPU(cpu), "Unsupported UniCore-F64 round mode");
}
set_float_rounding_mode(i, &env->ucf64.fp_status);
}
diff --git a/target-xtensa/cpu.c b/target-xtensa/cpu.c
index 749e20580f..6251f1c47e 100644
--- a/target-xtensa/cpu.c
+++ b/target-xtensa/cpu.c
@@ -40,6 +40,13 @@ static void xtensa_cpu_set_pc(CPUState *cs, vaddr value)
cpu->env.pc = value;
}
+static bool xtensa_cpu_has_work(CPUState *cs)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+
+ return cpu->env.pending_irq_level;
+}
+
/* CPUClass::reset() */
static void xtensa_cpu_reset(CPUState *s)
{
@@ -134,6 +141,7 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
cc->reset = xtensa_cpu_reset;
cc->class_by_name = xtensa_cpu_class_by_name;
+ cc->has_work = xtensa_cpu_has_work;
cc->do_interrupt = xtensa_cpu_do_interrupt;
cc->dump_state = xtensa_cpu_dump_state;
cc->set_pc = xtensa_cpu_set_pc;
diff --git a/target-xtensa/cpu.h b/target-xtensa/cpu.h
index 1cf5ea3aff..e210bacdff 100644
--- a/target-xtensa/cpu.h
+++ b/target-xtensa/cpu.h
@@ -359,7 +359,7 @@ typedef struct CPUXtensaState {
int exception_taken;
/* Watchpoints for DBREAK registers */
- CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK];
+ struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK];
CPU_COMMON
} CPUXtensaState;
@@ -493,6 +493,8 @@ static inline int cpu_mmu_index(CPUXtensaState *env)
static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
*pc = env->pc;
*cs_base = 0;
*flags = 0;
@@ -515,7 +517,7 @@ static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
if (xtensa_option_enabled(env->config, XTENSA_OPTION_COPROCESSOR)) {
*flags |= env->sregs[CPENABLE] << XTENSA_TBFLAG_CPENABLE_SHIFT;
}
- if (ENV_GET_CPU(env)->singlestep_enabled && env->exception_taken) {
+ if (cs->singlestep_enabled && env->exception_taken) {
*flags |= XTENSA_TBFLAG_EXCEPTION;
}
}
@@ -523,11 +525,4 @@ static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
#include "exec/cpu-all.h"
#include "exec/exec-all.h"
-static inline int cpu_has_work(CPUState *cpu)
-{
- CPUXtensaState *env = &XTENSA_CPU(cpu)->env;
-
- return env->pending_irq_level;
-}
-
#endif
diff --git a/target-xtensa/helper.c b/target-xtensa/helper.c
index 60cb055a93..94dcd9442e 100644
--- a/target-xtensa/helper.c
+++ b/target-xtensa/helper.c
@@ -81,16 +81,18 @@ static uint32_t check_hw_breakpoints(CPUXtensaState *env)
void xtensa_breakpoint_handler(CPUXtensaState *env)
{
- if (env->watchpoint_hit) {
- if (env->watchpoint_hit->flags & BP_CPU) {
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
uint32_t cause;
- env->watchpoint_hit = NULL;
+ cs->watchpoint_hit = NULL;
cause = check_hw_breakpoints(env);
if (cause) {
debug_exception_env(env, cause);
}
- cpu_resume_from_signal(env, NULL);
+ cpu_resume_from_signal(cs, NULL);
}
}
}
@@ -169,6 +171,8 @@ static void handle_interrupt(CPUXtensaState *env)
(env->config->level_mask[level] &
env->sregs[INTSET] &
env->sregs[INTENABLE])) {
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
if (level > 1) {
env->sregs[EPC1 + level - 1] = env->pc;
env->sregs[EPS2 + level - 2] = env->sregs[PS];
@@ -185,10 +189,10 @@ static void handle_interrupt(CPUXtensaState *env)
} else {
env->sregs[EPC1] = env->pc;
}
- env->exception_index = EXC_DOUBLE;
+ cs->exception_index = EXC_DOUBLE;
} else {
env->sregs[EPC1] = env->pc;
- env->exception_index =
+ cs->exception_index =
(env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
}
env->sregs[PS] |= PS_EXCM;
@@ -202,7 +206,7 @@ void xtensa_cpu_do_interrupt(CPUState *cs)
XtensaCPU *cpu = XTENSA_CPU(cs);
CPUXtensaState *env = &cpu->env;
- if (env->exception_index == EXC_IRQ) {
+ if (cs->exception_index == EXC_IRQ) {
qemu_log_mask(CPU_LOG_INT,
"%s(EXC_IRQ) level = %d, cintlevel = %d, "
"pc = %08x, a0 = %08x, ps = %08x, "
@@ -215,7 +219,7 @@ void xtensa_cpu_do_interrupt(CPUState *cs)
handle_interrupt(env);
}
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXC_WINDOW_OVERFLOW4:
case EXC_WINDOW_UNDERFLOW4:
case EXC_WINDOW_OVERFLOW8:
@@ -228,15 +232,15 @@ void xtensa_cpu_do_interrupt(CPUState *cs)
case EXC_DEBUG:
qemu_log_mask(CPU_LOG_INT, "%s(%d) "
"pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
- __func__, env->exception_index,
+ __func__, cs->exception_index,
env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
- if (env->config->exception_vector[env->exception_index]) {
+ if (env->config->exception_vector[cs->exception_index]) {
env->pc = relocated_vector(env,
- env->config->exception_vector[env->exception_index]);
+ env->config->exception_vector[cs->exception_index]);
env->exception_taken = 1;
} else {
qemu_log("%s(pc = %08x) bad exception_index: %d\n",
- __func__, env->pc, env->exception_index);
+ __func__, env->pc, cs->exception_index);
}
break;
@@ -245,7 +249,7 @@ void xtensa_cpu_do_interrupt(CPUState *cs)
default:
qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
- __func__, env->pc, env->exception_index);
+ __func__, env->pc, cs->exception_index);
break;
}
check_interrupts(env);
@@ -552,7 +556,7 @@ static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
uint32_t paddr;
uint32_t page_size;
unsigned access;
diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c
index 509ba49d60..b531019488 100644
--- a/target-xtensa/op_helper.c
+++ b/target-xtensa/op_helper.c
@@ -52,17 +52,21 @@ static void do_unaligned_access(CPUXtensaState *env,
static void do_unaligned_access(CPUXtensaState *env,
target_ulong addr, int is_write, int is_user, uintptr_t retaddr)
{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+
if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
!xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(CPU(cpu), retaddr);
HELPER(exception_cause_vaddr)(env,
env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
}
}
-void tlb_fill(CPUXtensaState *env,
- target_ulong vaddr, int is_write, int mmu_idx, uintptr_t retaddr)
+void tlb_fill(CPUState *cs,
+ target_ulong vaddr, int is_write, int mmu_idx, uintptr_t retaddr)
{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
uint32_t paddr;
uint32_t page_size;
unsigned access;
@@ -73,12 +77,12 @@ void tlb_fill(CPUXtensaState *env,
vaddr, is_write, mmu_idx, paddr, ret);
if (ret == 0) {
- tlb_set_page(env,
- vaddr & TARGET_PAGE_MASK,
- paddr & TARGET_PAGE_MASK,
- access, mmu_idx, page_size);
+ tlb_set_page(cs,
+ vaddr & TARGET_PAGE_MASK,
+ paddr & TARGET_PAGE_MASK,
+ access, mmu_idx, page_size);
} else {
- cpu_restore_state(env, retaddr);
+ cpu_restore_state(cs, retaddr);
HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
}
}
@@ -97,11 +101,13 @@ static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
{
- env->exception_index = excp;
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ cs->exception_index = excp;
if (excp == EXCP_DEBUG) {
env->exception_taken = 0;
}
- cpu_loop_exit(env);
+ cpu_loop_exit(cs);
}
void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
@@ -387,7 +393,7 @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
(intlevel << PS_INTLEVEL_SHIFT);
check_interrupts(env);
if (env->pending_irq_level) {
- cpu_loop_exit(env);
+ cpu_loop_exit(CPU(xtensa_env_get_cpu(env)));
return;
}
@@ -481,10 +487,12 @@ void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr)
void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+
v = (v & 0xffffff00) | 0x1;
if (v != env->sregs[RASID]) {
env->sregs[RASID] = v;
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
}
@@ -681,7 +689,7 @@ void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
uint32_t wi;
xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
if (entry->variable && entry->asid) {
- tlb_flush_page(env, entry->vaddr);
+ tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr);
entry->asid = 0;
}
}
@@ -726,21 +734,23 @@ void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
if (entry->variable) {
if (entry->asid) {
- tlb_flush_page(env, entry->vaddr);
+ tlb_flush_page(cs, entry->vaddr);
}
xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
- tlb_flush_page(env, entry->vaddr);
+ tlb_flush_page(cs, entry->vaddr);
} else {
qemu_log("%s %d, %d, %d trying to set immutable entry\n",
__func__, dtlb, wi, ei);
}
} else {
- tlb_flush_page(env, entry->vaddr);
+ tlb_flush_page(cs, entry->vaddr);
if (xtensa_option_enabled(env->config,
XTENSA_OPTION_REGION_TRANSLATION)) {
entry->paddr = pte & REGION_PAGE_MASK;
@@ -784,11 +794,12 @@ void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
uint32_t dbreakc)
{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
uint32_t mask = dbreakc | ~DBREAKC_MASK;
if (env->cpu_watchpoint[i]) {
- cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[i]);
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
}
if (dbreakc & DBREAKC_SB) {
flags |= BP_MEM_WRITE;
@@ -802,7 +813,7 @@ static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
/* cut mask after the first zero bit */
mask = 0xffffffff << (32 - clo32(mask));
}
- if (cpu_watchpoint_insert(env, dbreaka & mask, ~mask + 1,
+ if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
flags, &env->cpu_watchpoint[i])) {
env->cpu_watchpoint[i] = NULL;
qemu_log("Failed to set data breakpoint at 0x%08x/%d\n",
@@ -828,7 +839,9 @@ void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
set_dbreak(env, i, env->sregs[DBREAKA + i], v);
} else {
if (env->cpu_watchpoint[i]) {
- cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[i]);
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
env->cpu_watchpoint[i] = NULL;
}
}
diff --git a/target-xtensa/translate.c b/target-xtensa/translate.c
index 9f5895e021..764cee96f3 100644
--- a/target-xtensa/translate.c
+++ b/target-xtensa/translate.c
@@ -2948,10 +2948,11 @@ invalid_opcode:
static void check_breakpoint(CPUXtensaState *env, DisasContext *dc)
{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
CPUBreakpoint *bp;
- if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
- QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
tcg_gen_movi_i32(cpu_pc, dc->pc);
gen_exception(dc, EXCP_DEBUG);
diff --git a/tcg/README b/tcg/README
index f1782123b7..776e9259e3 100644
--- a/tcg/README
+++ b/tcg/README
@@ -36,6 +36,12 @@ or a memory location which is stored in a register outside QEMU TBs
A TCG "basic block" corresponds to a list of instructions terminated
by a branch instruction.
+An operation with "undefined behavior" may result in a crash.
+
+An operation with "unspecified behavior" shall not crash. However,
+the result may be one of several possibilities so may be considered
+an "undefined result".
+
3) Intermediate representation
3.1) Introduction
@@ -239,23 +245,25 @@ t0=t1|~t2
* shl_i32/i64 t0, t1, t2
-t0=t1 << t2. Undefined behavior if t2 < 0 or t2 >= 32 (resp 64)
+t0=t1 << t2. Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64)
* shr_i32/i64 t0, t1, t2
-t0=t1 >> t2 (unsigned). Undefined behavior if t2 < 0 or t2 >= 32 (resp 64)
+t0=t1 >> t2 (unsigned). Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64)
* sar_i32/i64 t0, t1, t2
-t0=t1 >> t2 (signed). Undefined behavior if t2 < 0 or t2 >= 32 (resp 64)
+t0=t1 >> t2 (signed). Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64)
* rotl_i32/i64 t0, t1, t2
-Rotation of t2 bits to the left. Undefined behavior if t2 < 0 or t2 >= 32 (resp 64)
+Rotation of t2 bits to the left.
+Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64)
* rotr_i32/i64 t0, t1, t2
-Rotation of t2 bits to the right. Undefined behavior if t2 < 0 or t2 >= 32 (resp 64)
+Rotation of t2 bits to the right.
+Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64)
********* Misc
diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index f43eb676bf..0a580b65ea 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -23,34 +23,26 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"%x0", "%x1", "%x2", "%x3", "%x4", "%x5", "%x6", "%x7",
"%x8", "%x9", "%x10", "%x11", "%x12", "%x13", "%x14", "%x15",
"%x16", "%x17", "%x18", "%x19", "%x20", "%x21", "%x22", "%x23",
- "%x24", "%x25", "%x26", "%x27", "%x28",
- "%fp", /* frame pointer */
- "%lr", /* link register */
- "%sp", /* stack pointer */
+ "%x24", "%x25", "%x26", "%x27", "%x28", "%fp", "%x30", "%sp",
};
#endif /* NDEBUG */
-#ifdef TARGET_WORDS_BIGENDIAN
- #define TCG_LDST_BSWAP 1
-#else
- #define TCG_LDST_BSWAP 0
-#endif
-
static const int tcg_target_reg_alloc_order[] = {
TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23,
TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27,
TCG_REG_X28, /* we will reserve this for GUEST_BASE if configured */
- TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, TCG_REG_X12,
- TCG_REG_X13, TCG_REG_X14, TCG_REG_X15,
+ TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11,
+ TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15,
TCG_REG_X16, TCG_REG_X17,
- TCG_REG_X18, TCG_REG_X19, /* will not use these, see tcg_target_init */
-
TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7,
- TCG_REG_X8, /* will not use, see tcg_target_init */
+ /* X18 reserved by system */
+ /* X19 reserved for AREG0 */
+ /* X29 reserved as fp */
+ /* X30 reserved as temporary */
};
static const int tcg_target_call_iarg_regs[8] = {
@@ -61,13 +53,13 @@ static const int tcg_target_call_oarg_regs[1] = {
TCG_REG_X0
};
-#define TCG_REG_TMP TCG_REG_X8
+#define TCG_REG_TMP TCG_REG_X30
#ifndef CONFIG_SOFTMMU
-# if defined(CONFIG_USE_GUEST_BASE)
-# define TCG_REG_GUEST_BASE TCG_REG_X28
+# ifdef CONFIG_USE_GUEST_BASE
+# define TCG_REG_GUEST_BASE TCG_REG_X28
# else
-# define TCG_REG_GUEST_BASE TCG_REG_XZR
+# define TCG_REG_GUEST_BASE TCG_REG_XZR
# endif
#endif
@@ -110,6 +102,11 @@ static inline void patch_reloc(uint8_t *code_ptr, int type,
}
}
+#define TCG_CT_CONST_AIMM 0x100
+#define TCG_CT_CONST_LIMM 0x200
+#define TCG_CT_CONST_ZERO 0x400
+#define TCG_CT_CONST_MONE 0x800
+
/* parse target specific constraints */
static int target_parse_constraint(TCGArgConstraint *ct,
const char **pct_str)
@@ -133,6 +130,18 @@ static int target_parse_constraint(TCGArgConstraint *ct,
tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3);
#endif
break;
+ case 'A': /* Valid for arithmetic immediate (positive or negative). */
+ ct->ct |= TCG_CT_CONST_AIMM;
+ break;
+ case 'L': /* Valid for logical immediate. */
+ ct->ct |= TCG_CT_CONST_LIMM;
+ break;
+ case 'M': /* minus one */
+ ct->ct |= TCG_CT_CONST_MONE;
+ break;
+ case 'Z': /* zero */
+ ct->ct |= TCG_CT_CONST_ZERO;
+ break;
default:
return -1;
}
@@ -142,14 +151,54 @@ static int target_parse_constraint(TCGArgConstraint *ct,
return 0;
}
-static inline int tcg_target_const_match(tcg_target_long val,
- const TCGArgConstraint *arg_ct)
+static inline bool is_aimm(uint64_t val)
+{
+ return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0;
+}
+
+static inline bool is_limm(uint64_t val)
+{
+ /* Taking a simplified view of the logical immediates for now, ignoring
+ the replication that can happen across the field. Match bit patterns
+ of the forms
+ 0....01....1
+ 0..01..10..0
+ and their inverses. */
+
+ /* Make things easier below, by testing the form with msb clear. */
+ if ((int64_t)val < 0) {
+ val = ~val;
+ }
+ if (val == 0) {
+ return false;
+ }
+ val += val & -val;
+ return (val & (val - 1)) == 0;
+}
+
+static int tcg_target_const_match(tcg_target_long val, TCGType type,
+ const TCGArgConstraint *arg_ct)
{
int ct = arg_ct->ct;
if (ct & TCG_CT_CONST) {
return 1;
}
+ if (type == TCG_TYPE_I32) {
+ val = (int32_t)val;
+ }
+ if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) {
+ return 1;
+ }
+ if ((ct & TCG_CT_CONST_LIMM) && is_limm(val)) {
+ return 1;
+ }
+ if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
+ return 1;
+ }
+ if ((ct & TCG_CT_CONST_MONE) && val == -1) {
+ return 1;
+ }
return 0;
}
@@ -189,313 +238,505 @@ static const enum aarch64_cond_code tcg_cond_to_aarch64[] = {
[TCG_COND_LEU] = COND_LS,
};
-/* opcodes for LDR / STR instructions with base + simm9 addressing */
-enum aarch64_ldst_op_data { /* size of the data moved */
- LDST_8 = 0x38,
- LDST_16 = 0x78,
- LDST_32 = 0xb8,
- LDST_64 = 0xf8,
-};
-enum aarch64_ldst_op_type { /* type of operation */
- LDST_ST = 0x0, /* store */
- LDST_LD = 0x4, /* load */
- LDST_LD_S_X = 0x8, /* load and sign-extend into Xt */
- LDST_LD_S_W = 0xc, /* load and sign-extend into Wt */
-};
+typedef enum {
+ LDST_ST = 0, /* store */
+ LDST_LD = 1, /* load */
+ LDST_LD_S_X = 2, /* load and sign-extend into Xt */
+ LDST_LD_S_W = 3, /* load and sign-extend into Wt */
+} AArch64LdstType;
+
+/* We encode the format of the insn into the beginning of the name, so that
+ we can have the preprocessor help "typecheck" the insn vs the output
+ function. Arm didn't provide us with nice names for the formats, so we
+ use the section number of the architecture reference manual in which the
+ instruction group is described. */
+typedef enum {
+ /* Compare and branch (immediate). */
+ I3201_CBZ = 0x34000000,
+ I3201_CBNZ = 0x35000000,
+
+ /* Conditional branch (immediate). */
+ I3202_B_C = 0x54000000,
+
+ /* Unconditional branch (immediate). */
+ I3206_B = 0x14000000,
+ I3206_BL = 0x94000000,
+
+ /* Unconditional branch (register). */
+ I3207_BR = 0xd61f0000,
+ I3207_BLR = 0xd63f0000,
+ I3207_RET = 0xd65f0000,
+
+ /* Load/store register. Described here as 3.3.12, but the helper
+ that emits them can transform to 3.3.10 or 3.3.13. */
+ I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30,
+ I3312_STRH = 0x38000000 | LDST_ST << 22 | MO_16 << 30,
+ I3312_STRW = 0x38000000 | LDST_ST << 22 | MO_32 << 30,
+ I3312_STRX = 0x38000000 | LDST_ST << 22 | MO_64 << 30,
+
+ I3312_LDRB = 0x38000000 | LDST_LD << 22 | MO_8 << 30,
+ I3312_LDRH = 0x38000000 | LDST_LD << 22 | MO_16 << 30,
+ I3312_LDRW = 0x38000000 | LDST_LD << 22 | MO_32 << 30,
+ I3312_LDRX = 0x38000000 | LDST_LD << 22 | MO_64 << 30,
+
+ I3312_LDRSBW = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30,
+ I3312_LDRSHW = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30,
+
+ I3312_LDRSBX = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30,
+ I3312_LDRSHX = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30,
+ I3312_LDRSWX = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30,
+
+ I3312_TO_I3310 = 0x00206800,
+ I3312_TO_I3313 = 0x01000000,
+
+ /* Load/store register pair instructions. */
+ I3314_LDP = 0x28400000,
+ I3314_STP = 0x28000000,
+
+ /* Add/subtract immediate instructions. */
+ I3401_ADDI = 0x11000000,
+ I3401_ADDSI = 0x31000000,
+ I3401_SUBI = 0x51000000,
+ I3401_SUBSI = 0x71000000,
+
+ /* Bitfield instructions. */
+ I3402_BFM = 0x33000000,
+ I3402_SBFM = 0x13000000,
+ I3402_UBFM = 0x53000000,
+
+ /* Extract instruction. */
+ I3403_EXTR = 0x13800000,
+
+ /* Logical immediate instructions. */
+ I3404_ANDI = 0x12000000,
+ I3404_ORRI = 0x32000000,
+ I3404_EORI = 0x52000000,
+
+ /* Move wide immediate instructions. */
+ I3405_MOVN = 0x12800000,
+ I3405_MOVZ = 0x52800000,
+ I3405_MOVK = 0x72800000,
+
+ /* PC relative addressing instructions. */
+ I3406_ADR = 0x10000000,
+ I3406_ADRP = 0x90000000,
+
+ /* Add/subtract shifted register instructions (without a shift). */
+ I3502_ADD = 0x0b000000,
+ I3502_ADDS = 0x2b000000,
+ I3502_SUB = 0x4b000000,
+ I3502_SUBS = 0x6b000000,
+
+ /* Add/subtract shifted register instructions (with a shift). */
+ I3502S_ADD_LSL = I3502_ADD,
+
+ /* Add/subtract with carry instructions. */
+ I3503_ADC = 0x1a000000,
+ I3503_SBC = 0x5a000000,
+
+ /* Conditional select instructions. */
+ I3506_CSEL = 0x1a800000,
+ I3506_CSINC = 0x1a800400,
+
+ /* Data-processing (1 source) instructions. */
+ I3507_REV16 = 0x5ac00400,
+ I3507_REV32 = 0x5ac00800,
+ I3507_REV64 = 0x5ac00c00,
+
+ /* Data-processing (2 source) instructions. */
+ I3508_LSLV = 0x1ac02000,
+ I3508_LSRV = 0x1ac02400,
+ I3508_ASRV = 0x1ac02800,
+ I3508_RORV = 0x1ac02c00,
+ I3508_SMULH = 0x9b407c00,
+ I3508_UMULH = 0x9bc07c00,
+ I3508_UDIV = 0x1ac00800,
+ I3508_SDIV = 0x1ac00c00,
+
+ /* Data-processing (3 source) instructions. */
+ I3509_MADD = 0x1b000000,
+ I3509_MSUB = 0x1b008000,
+
+ /* Logical shifted register instructions (without a shift). */
+ I3510_AND = 0x0a000000,
+ I3510_BIC = 0x0a200000,
+ I3510_ORR = 0x2a000000,
+ I3510_ORN = 0x2a200000,
+ I3510_EOR = 0x4a000000,
+ I3510_EON = 0x4a200000,
+ I3510_ANDS = 0x6a000000,
+} AArch64Insn;
-enum aarch64_arith_opc {
- ARITH_AND = 0x0a,
- ARITH_ADD = 0x0b,
- ARITH_OR = 0x2a,
- ARITH_ADDS = 0x2b,
- ARITH_XOR = 0x4a,
- ARITH_SUB = 0x4b,
- ARITH_ANDS = 0x6a,
- ARITH_SUBS = 0x6b,
-};
+static inline uint32_t tcg_in32(TCGContext *s)
+{
+ uint32_t v = *(uint32_t *)s->code_ptr;
+ return v;
+}
-enum aarch64_srr_opc {
- SRR_SHL = 0x0,
- SRR_SHR = 0x4,
- SRR_SAR = 0x8,
- SRR_ROR = 0xc
-};
+/* Emit an opcode with "type-checking" of the format. */
+#define tcg_out_insn(S, FMT, OP, ...) \
+ glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__)
-static inline enum aarch64_ldst_op_data
-aarch64_ldst_get_data(TCGOpcode tcg_op)
+static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext,
+ TCGReg rt, int imm19)
{
- switch (tcg_op) {
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i64:
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- return LDST_8;
+ tcg_out32(s, insn | ext << 31 | (imm19 & 0x7ffff) << 5 | rt);
+}
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i64:
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- return LDST_16;
+static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn,
+ TCGCond c, int imm19)
+{
+ tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5);
+}
- case INDEX_op_ld_i32:
- case INDEX_op_st_i32:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_st32_i64:
- return LDST_32;
+static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26)
+{
+ tcg_out32(s, insn | (imm26 & 0x03ffffff));
+}
- case INDEX_op_ld_i64:
- case INDEX_op_st_i64:
- return LDST_64;
+static void tcg_out_insn_3207(TCGContext *s, AArch64Insn insn, TCGReg rn)
+{
+ tcg_out32(s, insn | rn << 5);
+}
- default:
- tcg_abort();
+static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn,
+ TCGReg r1, TCGReg r2, TCGReg rn,
+ tcg_target_long ofs, bool pre, bool w)
+{
+ insn |= 1u << 31; /* ext */
+ insn |= pre << 24;
+ insn |= w << 23;
+
+ assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0);
+ insn |= (ofs & (0x7f << 3)) << (15 - 3);
+
+ tcg_out32(s, insn | r2 << 10 | rn << 5 | r1);
+}
+
+static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext,
+ TCGReg rd, TCGReg rn, uint64_t aimm)
+{
+ if (aimm > 0xfff) {
+ assert((aimm & 0xfff) == 0);
+ aimm >>= 12;
+ assert(aimm <= 0xfff);
+ aimm |= 1 << 12; /* apply LSL 12 */
}
+ tcg_out32(s, insn | ext << 31 | aimm << 10 | rn << 5 | rd);
}
-static inline enum aarch64_ldst_op_type
-aarch64_ldst_get_type(TCGOpcode tcg_op)
+/* This function can be used for both 3.4.2 (Bitfield) and 3.4.4
+ (Logical immediate). Both insn groups have N, IMMR and IMMS fields
+ that feed the DecodeBitMasks pseudo function. */
+static void tcg_out_insn_3402(TCGContext *s, AArch64Insn insn, TCGType ext,
+ TCGReg rd, TCGReg rn, int n, int immr, int imms)
{
- switch (tcg_op) {
- case INDEX_op_st8_i32:
- case INDEX_op_st16_i32:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i64:
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
- return LDST_ST;
+ tcg_out32(s, insn | ext << 31 | n << 22 | immr << 16 | imms << 10
+ | rn << 5 | rd);
+}
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld_i32:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld_i64:
- return LDST_LD;
+#define tcg_out_insn_3404 tcg_out_insn_3402
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16s_i32:
- return LDST_LD_S_W;
+static void tcg_out_insn_3403(TCGContext *s, AArch64Insn insn, TCGType ext,
+ TCGReg rd, TCGReg rn, TCGReg rm, int imms)
+{
+ tcg_out32(s, insn | ext << 31 | ext << 22 | rm << 16 | imms << 10
+ | rn << 5 | rd);
+}
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld32s_i64:
- return LDST_LD_S_X;
+/* This function is used for the Move (wide immediate) instruction group.
+ Note that SHIFT is a full shift count, not the 2 bit HW field. */
+static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext,
+ TCGReg rd, uint16_t half, unsigned shift)
+{
+ assert((shift & ~0x30) == 0);
+ tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd);
+}
- default:
- tcg_abort();
- }
+static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn,
+ TCGReg rd, int64_t disp)
+{
+ tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd);
}
-static inline uint32_t tcg_in32(TCGContext *s)
+/* This function is for both 3.5.2 (Add/Subtract shifted register), for
+ the rare occasion when we actually want to supply a shift amount. */
+static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn,
+ TCGType ext, TCGReg rd, TCGReg rn,
+ TCGReg rm, int imm6)
{
- uint32_t v = *(uint32_t *)s->code_ptr;
- return v;
+ tcg_out32(s, insn | ext << 31 | rm << 16 | imm6 << 10 | rn << 5 | rd);
}
-static inline void tcg_out_ldst_9(TCGContext *s,
- enum aarch64_ldst_op_data op_data,
- enum aarch64_ldst_op_type op_type,
- TCGReg rd, TCGReg rn, tcg_target_long offset)
+/* This function is for 3.5.2 (Add/subtract shifted register),
+ and 3.5.10 (Logical shifted register), for the vast majorty of cases
+ when we don't want to apply a shift. Thus it can also be used for
+ 3.5.3 (Add/subtract with carry) and 3.5.8 (Data processing 2 source). */
+static void tcg_out_insn_3502(TCGContext *s, AArch64Insn insn, TCGType ext,
+ TCGReg rd, TCGReg rn, TCGReg rm)
{
- /* use LDUR with BASE register with 9bit signed unscaled offset */
- tcg_out32(s, op_data << 24 | op_type << 20
- | (offset & 0x1ff) << 12 | rn << 5 | rd);
+ tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd);
}
-/* tcg_out_ldst_12 expects a scaled unsigned immediate offset */
-static inline void tcg_out_ldst_12(TCGContext *s,
- enum aarch64_ldst_op_data op_data,
- enum aarch64_ldst_op_type op_type,
- TCGReg rd, TCGReg rn,
- tcg_target_ulong scaled_uimm)
+#define tcg_out_insn_3503 tcg_out_insn_3502
+#define tcg_out_insn_3508 tcg_out_insn_3502
+#define tcg_out_insn_3510 tcg_out_insn_3502
+
+static void tcg_out_insn_3506(TCGContext *s, AArch64Insn insn, TCGType ext,
+ TCGReg rd, TCGReg rn, TCGReg rm, TCGCond c)
{
- tcg_out32(s, (op_data | 1) << 24
- | op_type << 20 | scaled_uimm << 10 | rn << 5 | rd);
+ tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd
+ | tcg_cond_to_aarch64[c] << 12);
}
-static inline void tcg_out_movr(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg src)
+static void tcg_out_insn_3507(TCGContext *s, AArch64Insn insn, TCGType ext,
+ TCGReg rd, TCGReg rn)
{
- /* register to register move using MOV (shifted register with no shift) */
- /* using MOV 0x2a0003e0 | (shift).. */
- unsigned int base = ext ? 0xaa0003e0 : 0x2a0003e0;
- tcg_out32(s, base | src << 16 | rd);
+ tcg_out32(s, insn | ext << 31 | rn << 5 | rd);
}
-static inline void tcg_out_movi_aux(TCGContext *s,
- TCGReg rd, uint64_t value)
+static void tcg_out_insn_3509(TCGContext *s, AArch64Insn insn, TCGType ext,
+ TCGReg rd, TCGReg rn, TCGReg rm, TCGReg ra)
{
- uint32_t half, base, shift, movk = 0;
- /* construct halfwords of the immediate with MOVZ/MOVK with LSL */
- /* using MOVZ 0x52800000 | extended reg.. */
- base = (value > 0xffffffff) ? 0xd2800000 : 0x52800000;
- /* count trailing zeros in 16 bit steps, mapping 64 to 0. Emit the
- first MOVZ with the half-word immediate skipping the zeros, with a shift
- (LSL) equal to this number. Then morph all next instructions into MOVKs.
- Zero the processed half-word in the value, continue until empty.
- We build the final result 16bits at a time with up to 4 instructions,
- but do not emit instructions for 16bit zero holes. */
- do {
- shift = ctz64(value) & (63 & -16);
- half = (value >> shift) & 0xffff;
- tcg_out32(s, base | movk | shift << 17 | half << 5 | rd);
- movk = 0x20000000; /* morph next MOVZs into MOVKs */
- value &= ~(0xffffUL << shift);
- } while (value);
+ tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd);
}
-static inline void tcg_out_movi(TCGContext *s, TCGType type,
- TCGReg rd, tcg_target_long value)
+static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,
+ TCGReg rd, TCGReg base, TCGReg regoff)
{
- if (type == TCG_TYPE_I64) {
- tcg_out_movi_aux(s, rd, value);
- } else {
- tcg_out_movi_aux(s, rd, value & 0xffffffff);
- }
+ /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */
+ tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 | base << 5 | rd);
}
-static inline void tcg_out_ldst_r(TCGContext *s,
- enum aarch64_ldst_op_data op_data,
- enum aarch64_ldst_op_type op_type,
- TCGReg rd, TCGReg base, TCGReg regoff)
+
+static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,
+ TCGReg rd, TCGReg rn, intptr_t offset)
{
- /* load from memory to register using base + 64bit register offset */
- /* using f.e. STR Wt, [Xn, Xm] 0xb8600800|(regoff << 16)|(base << 5)|rd */
- /* the 0x6000 is for the "no extend field" */
- tcg_out32(s, 0x00206800
- | op_data << 24 | op_type << 20 | regoff << 16 | base << 5 | rd);
+ tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | rd);
}
-/* solve the whole ldst problem */
-static inline void tcg_out_ldst(TCGContext *s, enum aarch64_ldst_op_data data,
- enum aarch64_ldst_op_type type,
- TCGReg rd, TCGReg rn, tcg_target_long offset)
+static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn,
+ TCGReg rd, TCGReg rn, uintptr_t scaled_uimm)
{
- if (offset >= -256 && offset < 256) {
- tcg_out_ldst_9(s, data, type, rd, rn, offset);
+ /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */
+ tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10 | rn << 5 | rd);
+}
+
+/* Register to register move using ORR (shifted register with no shift). */
+static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm)
+{
+ tcg_out_insn(s, 3510, ORR, ext, rd, TCG_REG_XZR, rm);
+}
+
+/* Register to register move using ADDI (move to/from SP). */
+static void tcg_out_movr_sp(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn)
+{
+ tcg_out_insn(s, 3401, ADDI, ext, rd, rn, 0);
+}
+
+/* This function is used for the Logical (immediate) instruction group.
+ The value of LIMM must satisfy IS_LIMM. See the comment above about
+ only supporting simplified logical immediates. */
+static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext,
+ TCGReg rd, TCGReg rn, uint64_t limm)
+{
+ unsigned h, l, r, c;
+
+ assert(is_limm(limm));
+
+ h = clz64(limm);
+ l = ctz64(limm);
+ if (l == 0) {
+ r = 0; /* form 0....01....1 */
+ c = ctz64(~limm) - 1;
+ if (h == 0) {
+ r = clz64(~limm); /* form 1..10..01..1 */
+ c += r;
+ }
+ } else {
+ r = 64 - l; /* form 1....10....0 or 0..01..10..0 */
+ c = r - h - 1;
+ }
+ if (ext == TCG_TYPE_I32) {
+ r &= 31;
+ c &= 31;
+ }
+
+ tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c);
+}
+
+static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
+ tcg_target_long value)
+{
+ AArch64Insn insn;
+ int i, wantinv, shift;
+ tcg_target_long svalue = value;
+ tcg_target_long ivalue = ~value;
+ tcg_target_long imask;
+
+ /* For 32-bit values, discard potential garbage in value. For 64-bit
+ values within [2**31, 2**32-1], we can create smaller sequences by
+ interpreting this as a negative 32-bit number, while ensuring that
+ the high 32 bits are cleared by setting SF=0. */
+ if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) {
+ svalue = (int32_t)value;
+ value = (uint32_t)value;
+ ivalue = (uint32_t)ivalue;
+ type = TCG_TYPE_I32;
+ }
+
+ /* Speed things up by handling the common case of small positive
+ and negative values specially. */
+ if ((value & ~0xffffull) == 0) {
+ tcg_out_insn(s, 3405, MOVZ, type, rd, value, 0);
+ return;
+ } else if ((ivalue & ~0xffffull) == 0) {
+ tcg_out_insn(s, 3405, MOVN, type, rd, ivalue, 0);
+ return;
+ }
+
+ /* Check for bitfield immediates. For the benefit of 32-bit quantities,
+ use the sign-extended value. That lets us match rotated values such
+ as 0xff0000ff with the same 64-bit logic matching 0xffffffffff0000ff. */
+ if (is_limm(svalue)) {
+ tcg_out_logicali(s, I3404_ORRI, type, rd, TCG_REG_XZR, svalue);
return;
}
- if (offset >= 256) {
- /* if the offset is naturally aligned and in range,
- then we can use the scaled uimm12 encoding */
- unsigned int s_bits = data >> 6;
- if (!(offset & ((1 << s_bits) - 1))) {
- tcg_target_ulong scaled_uimm = offset >> s_bits;
- if (scaled_uimm <= 0xfff) {
- tcg_out_ldst_12(s, data, type, rd, rn, scaled_uimm);
- return;
+ /* Look for host pointer values within 4G of the PC. This happens
+ often when loading pointers to QEMU's own data structures. */
+ if (type == TCG_TYPE_I64) {
+ tcg_target_long disp = (value >> 12) - ((intptr_t)s->code_ptr >> 12);
+ if (disp == sextract64(disp, 0, 21)) {
+ tcg_out_insn(s, 3406, ADRP, rd, disp);
+ if (value & 0xfff) {
+ tcg_out_insn(s, 3401, ADDI, type, rd, rd, value & 0xfff);
}
+ return;
}
}
- /* worst-case scenario, move offset to temp register, use reg offset */
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
- tcg_out_ldst_r(s, data, type, rd, rn, TCG_REG_TMP);
+ /* Would it take fewer insns to begin with MOVN? For the value and its
+ inverse, count the number of 16-bit lanes that are 0. */
+ for (i = wantinv = imask = 0; i < 64; i += 16) {
+ tcg_target_long mask = 0xffffull << i;
+ if ((value & mask) == 0) {
+ wantinv -= 1;
+ }
+ if ((ivalue & mask) == 0) {
+ wantinv += 1;
+ imask |= mask;
+ }
+ }
+
+ /* If we had more 0xffff than 0x0000, invert VALUE and use MOVN. */
+ insn = I3405_MOVZ;
+ if (wantinv > 0) {
+ value = ivalue;
+ insn = I3405_MOVN;
+ }
+
+ /* Find the lowest lane that is not 0x0000. */
+ shift = ctz64(value) & (63 & -16);
+ tcg_out_insn_3405(s, insn, type, rd, value >> shift, shift);
+
+ if (wantinv > 0) {
+ /* Re-invert the value, so MOVK sees non-inverted bits. */
+ value = ~value;
+ /* Clear out all the 0xffff lanes. */
+ value ^= imask;
+ }
+ /* Clear out the lane that we just set. */
+ value &= ~(0xffffUL << shift);
+
+ /* Iterate until all lanes have been set, and thus cleared from VALUE. */
+ while (value) {
+ shift = ctz64(value) & (63 & -16);
+ tcg_out_insn(s, 3405, MOVK, type, rd, value >> shift, shift);
+ value &= ~(0xffffUL << shift);
+ }
}
-/* mov alias implemented with add immediate, useful to move to/from SP */
-static inline void tcg_out_movr_sp(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg rn)
+/* Define something more legible for general use. */
+#define tcg_out_ldst_r tcg_out_insn_3310
+
+static void tcg_out_ldst(TCGContext *s, AArch64Insn insn,
+ TCGReg rd, TCGReg rn, intptr_t offset)
{
- /* using ADD 0x11000000 | (ext) | rn << 5 | rd */
- unsigned int base = ext ? 0x91000000 : 0x11000000;
- tcg_out32(s, base | rn << 5 | rd);
+ TCGMemOp size = (uint32_t)insn >> 30;
+
+ /* If the offset is naturally aligned and in range, then we can
+ use the scaled uimm12 encoding */
+ if (offset >= 0 && !(offset & ((1 << size) - 1))) {
+ uintptr_t scaled_uimm = offset >> size;
+ if (scaled_uimm <= 0xfff) {
+ tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm);
+ return;
+ }
+ }
+
+ /* Small signed offsets can use the unscaled encoding. */
+ if (offset >= -256 && offset < 256) {
+ tcg_out_insn_3312(s, insn, rd, rn, offset);
+ return;
+ }
+
+ /* Worst-case scenario, move offset to temp register, use reg offset. */
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
+ tcg_out_ldst_r(s, insn, rd, rn, TCG_REG_TMP);
}
static inline void tcg_out_mov(TCGContext *s,
TCGType type, TCGReg ret, TCGReg arg)
{
if (ret != arg) {
- tcg_out_movr(s, type == TCG_TYPE_I64, ret, arg);
+ tcg_out_movr(s, type, ret, arg);
}
}
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
TCGReg arg1, intptr_t arg2)
{
- tcg_out_ldst(s, (type == TCG_TYPE_I64) ? LDST_64 : LDST_32, LDST_LD,
+ tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_LDRW : I3312_LDRX,
arg, arg1, arg2);
}
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
TCGReg arg1, intptr_t arg2)
{
- tcg_out_ldst(s, (type == TCG_TYPE_I64) ? LDST_64 : LDST_32, LDST_ST,
+ tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_STRW : I3312_STRX,
arg, arg1, arg2);
}
-static inline void tcg_out_arith(TCGContext *s, enum aarch64_arith_opc opc,
- TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm,
- int shift_imm)
-{
- /* Using shifted register arithmetic operations */
- /* if extended register operation (64bit) just OR with 0x80 << 24 */
- unsigned int shift, base = ext ? (0x80 | opc) << 24 : opc << 24;
- if (shift_imm == 0) {
- shift = 0;
- } else if (shift_imm > 0) {
- shift = shift_imm << 10 | 1 << 22;
- } else /* (shift_imm < 0) */ {
- shift = (-shift_imm) << 10;
- }
- tcg_out32(s, base | rm << 16 | shift | rn << 5 | rd);
-}
-
-static inline void tcg_out_mul(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg rn, TCGReg rm)
+static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd,
+ TCGReg rn, unsigned int a, unsigned int b)
{
- /* Using MADD 0x1b000000 with Ra = wzr alias MUL 0x1b007c00 */
- unsigned int base = ext ? 0x9b007c00 : 0x1b007c00;
- tcg_out32(s, base | rm << 16 | rn << 5 | rd);
-}
-
-static inline void tcg_out_shiftrot_reg(TCGContext *s,
- enum aarch64_srr_opc opc, TCGType ext,
- TCGReg rd, TCGReg rn, TCGReg rm)
-{
- /* using 2-source data processing instructions 0x1ac02000 */
- unsigned int base = ext ? 0x9ac02000 : 0x1ac02000;
- tcg_out32(s, base | rm << 16 | opc << 8 | rn << 5 | rd);
+ tcg_out_insn(s, 3402, BFM, ext, rd, rn, ext, a, b);
}
static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd,
TCGReg rn, unsigned int a, unsigned int b)
{
- /* Using UBFM 0x53000000 Wd, Wn, a, b */
- unsigned int base = ext ? 0xd3400000 : 0x53000000;
- tcg_out32(s, base | a << 16 | b << 10 | rn << 5 | rd);
+ tcg_out_insn(s, 3402, UBFM, ext, rd, rn, ext, a, b);
}
static inline void tcg_out_sbfm(TCGContext *s, TCGType ext, TCGReg rd,
TCGReg rn, unsigned int a, unsigned int b)
{
- /* Using SBFM 0x13000000 Wd, Wn, a, b */
- unsigned int base = ext ? 0x93400000 : 0x13000000;
- tcg_out32(s, base | a << 16 | b << 10 | rn << 5 | rd);
+ tcg_out_insn(s, 3402, SBFM, ext, rd, rn, ext, a, b);
}
static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd,
TCGReg rn, TCGReg rm, unsigned int a)
{
- /* Using EXTR 0x13800000 Wd, Wn, Wm, a */
- unsigned int base = ext ? 0x93c00000 : 0x13800000;
- tcg_out32(s, base | rm << 16 | a << 10 | rn << 5 | rd);
+ tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a);
}
static inline void tcg_out_shl(TCGContext *s, TCGType ext,
TCGReg rd, TCGReg rn, unsigned int m)
{
- int bits, max;
- bits = ext ? 64 : 32;
- max = bits - 1;
+ int bits = ext ? 64 : 32;
+ int max = bits - 1;
tcg_out_ubfm(s, ext, rd, rn, bits - (m & max), max - (m & max));
}
@@ -523,24 +764,34 @@ static inline void tcg_out_rotr(TCGContext *s, TCGType ext,
static inline void tcg_out_rotl(TCGContext *s, TCGType ext,
TCGReg rd, TCGReg rn, unsigned int m)
{
- int bits, max;
- bits = ext ? 64 : 32;
- max = bits - 1;
+ int bits = ext ? 64 : 32;
+ int max = bits - 1;
tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max));
}
-static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg rn, TCGReg rm)
+static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
+ TCGReg rn, unsigned lsb, unsigned width)
{
- /* Using CMP alias SUBS wzr, Wn, Wm */
- tcg_out_arith(s, ARITH_SUBS, ext, TCG_REG_XZR, rn, rm, 0);
+ unsigned size = ext ? 64 : 32;
+ unsigned a = (size - lsb) & (size - 1);
+ unsigned b = width - 1;
+ tcg_out_bfm(s, ext, rd, rn, a, b);
}
-static inline void tcg_out_cset(TCGContext *s, TCGType ext,
- TCGReg rd, TCGCond c)
+static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
+ tcg_target_long b, bool const_b)
{
- /* Using CSET alias of CSINC 0x1a800400 Xd, XZR, XZR, invert(cond) */
- unsigned int base = ext ? 0x9a9f07e0 : 0x1a9f07e0;
- tcg_out32(s, base | tcg_cond_to_aarch64[tcg_invert_cond(c)] << 12 | rd);
+ if (const_b) {
+ /* Using CMP or CMN aliases. */
+ if (b >= 0) {
+ tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
+ } else {
+ tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
+ }
+ } else {
+ /* Using CMP alias SUBS wzr, Wn, Wm */
+ tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
+ }
}
static inline void tcg_out_goto(TCGContext *s, intptr_t target)
@@ -552,51 +803,29 @@ static inline void tcg_out_goto(TCGContext *s, intptr_t target)
tcg_abort();
}
- tcg_out32(s, 0x14000000 | (offset & 0x03ffffff));
+ tcg_out_insn(s, 3206, B, offset);
}
static inline void tcg_out_goto_noaddr(TCGContext *s)
{
- /* We pay attention here to not modify the branch target by
- reading from the buffer. This ensure that caches and memory are
- kept coherent during retranslation.
- Mask away possible garbage in the high bits for the first translation,
- while keeping the offset bits for retranslation. */
- uint32_t insn;
- insn = (tcg_in32(s) & 0x03ffffff) | 0x14000000;
- tcg_out32(s, insn);
+ /* We pay attention here to not modify the branch target by reading from
+ the buffer. This ensure that caches and memory are kept coherent during
+ retranslation. Mask away possible garbage in the high bits for the
+ first translation, while keeping the offset bits for retranslation. */
+ uint32_t old = tcg_in32(s);
+ tcg_out_insn(s, 3206, B, old);
}
static inline void tcg_out_goto_cond_noaddr(TCGContext *s, TCGCond c)
{
- /* see comments in tcg_out_goto_noaddr */
- uint32_t insn;
- insn = tcg_in32(s) & (0x07ffff << 5);
- insn |= 0x54000000 | tcg_cond_to_aarch64[c];
- tcg_out32(s, insn);
-}
-
-static inline void tcg_out_goto_cond(TCGContext *s, TCGCond c, intptr_t target)
-{
- intptr_t offset = (target - (intptr_t)s->code_ptr) / 4;
-
- if (offset < -0x40000 || offset >= 0x40000) {
- /* out of 19bit range */
- tcg_abort();
- }
-
- offset &= 0x7ffff;
- tcg_out32(s, 0x54000000 | tcg_cond_to_aarch64[c] | offset << 5);
+ /* See comments in tcg_out_goto_noaddr. */
+ uint32_t old = tcg_in32(s) >> 5;
+ tcg_out_insn(s, 3202, B_C, c, old);
}
static inline void tcg_out_callr(TCGContext *s, TCGReg reg)
{
- tcg_out32(s, 0xd63f0000 | reg << 5);
-}
-
-static inline void tcg_out_gotor(TCGContext *s, TCGReg reg)
-{
- tcg_out32(s, 0xd61f0000 | reg << 5);
+ tcg_out_insn(s, 3207, BLR, reg);
}
static inline void tcg_out_call(TCGContext *s, intptr_t target)
@@ -607,50 +836,10 @@ static inline void tcg_out_call(TCGContext *s, intptr_t target)
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, target);
tcg_out_callr(s, TCG_REG_TMP);
} else {
- tcg_out32(s, 0x94000000 | (offset & 0x03ffffff));
+ tcg_out_insn(s, 3206, BL, offset);
}
}
-/* encode a logical immediate, mapping user parameter
- M=set bits pattern length to S=M-1 */
-static inline unsigned int
-aarch64_limm(unsigned int m, unsigned int r)
-{
- assert(m > 0);
- return r << 16 | (m - 1) << 10;
-}
-
-/* test a register against an immediate bit pattern made of
- M set bits rotated right by R.
- Examples:
- to test a 32/64 reg against 0x00000007, pass M = 3, R = 0.
- to test a 32/64 reg against 0x000000ff, pass M = 8, R = 0.
- to test a 32bit reg against 0xff000000, pass M = 8, R = 8.
- to test a 32bit reg against 0xff0000ff, pass M = 16, R = 8.
- */
-static inline void tcg_out_tst(TCGContext *s, TCGType ext, TCGReg rn,
- unsigned int m, unsigned int r)
-{
- /* using TST alias of ANDS XZR, Xn,#bimm64 0x7200001f */
- unsigned int base = ext ? 0xf240001f : 0x7200001f;
- tcg_out32(s, base | aarch64_limm(m, r) | rn << 5);
-}
-
-/* and a register with a bit pattern, similarly to TST, no flags change */
-static inline void tcg_out_andi(TCGContext *s, TCGType ext, TCGReg rd,
- TCGReg rn, unsigned int m, unsigned int r)
-{
- /* using AND 0x12000000 */
- unsigned int base = ext ? 0x92400000 : 0x12000000;
- tcg_out32(s, base | aarch64_limm(m, r) | rn << 5 | rd);
-}
-
-static inline void tcg_out_ret(TCGContext *s)
-{
- /* emit RET { LR } */
- tcg_out32(s, 0xd65f03c0);
-}
-
void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
{
intptr_t target = addr;
@@ -677,129 +866,168 @@ static inline void tcg_out_goto_label(TCGContext *s, int label_index)
}
}
-static inline void tcg_out_goto_label_cond(TCGContext *s,
- TCGCond c, int label_index)
+static void tcg_out_brcond(TCGContext *s, TCGMemOp ext, TCGCond c, TCGArg a,
+ TCGArg b, bool b_const, int label)
{
- TCGLabel *l = &s->labels[label_index];
+ TCGLabel *l = &s->labels[label];
+ intptr_t offset;
+ bool need_cmp;
+
+ if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) {
+ need_cmp = false;
+ } else {
+ need_cmp = true;
+ tcg_out_cmp(s, ext, a, b, b_const);
+ }
if (!l->has_value) {
- tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, label_index, 0);
- tcg_out_goto_cond_noaddr(s, c);
+ tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, label, 0);
+ offset = tcg_in32(s) >> 5;
+ } else {
+ offset = l->u.value - (uintptr_t)s->code_ptr;
+ offset >>= 2;
+ assert(offset >= -0x40000 && offset < 0x40000);
+ }
+
+ if (need_cmp) {
+ tcg_out_insn(s, 3202, B_C, c, offset);
+ } else if (c == TCG_COND_EQ) {
+ tcg_out_insn(s, 3201, CBZ, ext, a, offset);
} else {
- tcg_out_goto_cond(s, c, l->u.value);
+ tcg_out_insn(s, 3201, CBNZ, ext, a, offset);
}
}
-static inline void tcg_out_rev(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg rm)
+static inline void tcg_out_rev64(TCGContext *s, TCGReg rd, TCGReg rn)
+{
+ tcg_out_insn(s, 3507, REV64, TCG_TYPE_I64, rd, rn);
+}
+
+static inline void tcg_out_rev32(TCGContext *s, TCGReg rd, TCGReg rn)
{
- /* using REV 0x5ac00800 */
- unsigned int base = ext ? 0xdac00c00 : 0x5ac00800;
- tcg_out32(s, base | rm << 5 | rd);
+ tcg_out_insn(s, 3507, REV32, TCG_TYPE_I32, rd, rn);
}
-static inline void tcg_out_rev16(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg rm)
+static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn)
{
- /* using REV16 0x5ac00400 */
- unsigned int base = ext ? 0xdac00400 : 0x5ac00400;
- tcg_out32(s, base | rm << 5 | rd);
+ tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn);
}
-static inline void tcg_out_sxt(TCGContext *s, TCGType ext, int s_bits,
+static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits,
TCGReg rd, TCGReg rn)
{
- /* using ALIASes SXTB 0x13001c00, SXTH 0x13003c00, SXTW 0x93407c00
- of SBFM Xd, Xn, #0, #7|15|31 */
- int bits = 8 * (1 << s_bits) - 1;
+ /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */
+ int bits = (8 << s_bits) - 1;
tcg_out_sbfm(s, ext, rd, rn, 0, bits);
}
-static inline void tcg_out_uxt(TCGContext *s, int s_bits,
+static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits,
TCGReg rd, TCGReg rn)
{
- /* using ALIASes UXTB 0x53001c00, UXTH 0x53003c00
- of UBFM Wd, Wn, #0, #7|15 */
- int bits = 8 * (1 << s_bits) - 1;
+ /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */
+ int bits = (8 << s_bits) - 1;
tcg_out_ubfm(s, 0, rd, rn, 0, bits);
}
-static inline void tcg_out_addi(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg rn, unsigned int aimm)
+static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
+ TCGReg rn, int64_t aimm)
{
- /* add immediate aimm unsigned 12bit value (with LSL 0 or 12) */
- /* using ADD 0x11000000 | (ext) | (aimm << 10) | (rn << 5) | rd */
- unsigned int base = ext ? 0x91000000 : 0x11000000;
-
- if (aimm <= 0xfff) {
- aimm <<= 10;
+ if (aimm >= 0) {
+ tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm);
} else {
- /* we can only shift left by 12, on assert we cannot represent */
- assert(!(aimm & 0xfff));
- assert(aimm <= 0xfff000);
- base |= 1 << 22; /* apply LSL 12 */
- aimm >>= 2;
+ tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm);
}
-
- tcg_out32(s, base | aimm | (rn << 5) | rd);
}
-static inline void tcg_out_subi(TCGContext *s, TCGType ext,
- TCGReg rd, TCGReg rn, unsigned int aimm)
+static inline void tcg_out_addsub2(TCGContext *s, int ext, TCGReg rl,
+ TCGReg rh, TCGReg al, TCGReg ah,
+ tcg_target_long bl, tcg_target_long bh,
+ bool const_bl, bool const_bh, bool sub)
{
- /* sub immediate aimm unsigned 12bit value (with LSL 0 or 12) */
- /* using SUB 0x51000000 | (ext) | (aimm << 10) | (rn << 5) | rd */
- unsigned int base = ext ? 0xd1000000 : 0x51000000;
+ TCGReg orig_rl = rl;
+ AArch64Insn insn;
+
+ if (rl == ah || (!const_bh && rl == bh)) {
+ rl = TCG_REG_TMP;
+ }
- if (aimm <= 0xfff) {
- aimm <<= 10;
+ if (const_bl) {
+ insn = I3401_ADDSI;
+ if ((bl < 0) ^ sub) {
+ insn = I3401_SUBSI;
+ bl = -bl;
+ }
+ tcg_out_insn_3401(s, insn, ext, rl, al, bl);
} else {
- /* we can only shift left by 12, on assert we cannot represent */
- assert(!(aimm & 0xfff));
- assert(aimm <= 0xfff000);
- base |= 1 << 22; /* apply LSL 12 */
- aimm >>= 2;
+ tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl);
+ }
+
+ insn = I3503_ADC;
+ if (const_bh) {
+ /* Note that the only two constants we support are 0 and -1, and
+ that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */
+ if ((bh != 0) ^ sub) {
+ insn = I3503_SBC;
+ }
+ bh = TCG_REG_XZR;
+ } else if (sub) {
+ insn = I3503_SBC;
}
+ tcg_out_insn_3503(s, insn, ext, rh, ah, bh);
- tcg_out32(s, base | aimm | (rn << 5) | rd);
+ tcg_out_mov(s, ext, orig_rl, rl);
}
#ifdef CONFIG_SOFTMMU
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra)
*/
-static const void * const qemu_ld_helpers[4] = {
- helper_ret_ldub_mmu,
- helper_ret_lduw_mmu,
- helper_ret_ldul_mmu,
- helper_ret_ldq_mmu,
+static const void * const qemu_ld_helpers[16] = {
+ [MO_UB] = helper_ret_ldub_mmu,
+ [MO_LEUW] = helper_le_lduw_mmu,
+ [MO_LEUL] = helper_le_ldul_mmu,
+ [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_BEUW] = helper_be_lduw_mmu,
+ [MO_BEUL] = helper_be_ldul_mmu,
+ [MO_BEQ] = helper_be_ldq_mmu,
};
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
* uintxx_t val, int mmu_idx, uintptr_t ra)
*/
-static const void * const qemu_st_helpers[4] = {
- helper_ret_stb_mmu,
- helper_ret_stw_mmu,
- helper_ret_stl_mmu,
- helper_ret_stq_mmu,
+static const void * const qemu_st_helpers[16] = {
+ [MO_UB] = helper_ret_stb_mmu,
+ [MO_LEUW] = helper_le_stw_mmu,
+ [MO_LEUL] = helper_le_stl_mmu,
+ [MO_LEQ] = helper_le_stq_mmu,
+ [MO_BEUW] = helper_be_stw_mmu,
+ [MO_BEUL] = helper_be_stl_mmu,
+ [MO_BEQ] = helper_be_stq_mmu,
};
+static inline void tcg_out_adr(TCGContext *s, TCGReg rd, uintptr_t addr)
+{
+ addr -= (uintptr_t)s->code_ptr;
+ assert(addr == sextract64(addr, 0, 21));
+ tcg_out_insn(s, 3406, ADR, rd, addr);
+}
+
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
+ TCGMemOp opc = lb->opc;
+ TCGMemOp size = opc & MO_SIZE;
+
reloc_pc19(lb->label_ptr[0], (intptr_t)s->code_ptr);
- tcg_out_movr(s, 1, TCG_REG_X0, TCG_AREG0);
- tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg);
+ tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0);
+ tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index);
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X3, (tcg_target_long)lb->raddr);
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP,
- (tcg_target_long)qemu_ld_helpers[lb->opc & 3]);
- tcg_out_callr(s, TCG_REG_TMP);
- if (lb->opc & 0x04) {
- tcg_out_sxt(s, 1, lb->opc & 3, lb->datalo_reg, TCG_REG_X0);
+ tcg_out_adr(s, TCG_REG_X3, (intptr_t)lb->raddr);
+ tcg_out_call(s, (intptr_t)qemu_ld_helpers[opc & ~MO_SIGN]);
+ if (opc & MO_SIGN) {
+ tcg_out_sxt(s, TCG_TYPE_I64, size, lb->datalo_reg, TCG_REG_X0);
} else {
- tcg_out_movr(s, 1, lb->datalo_reg, TCG_REG_X0);
+ tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0);
}
tcg_out_goto(s, (intptr_t)lb->raddr);
@@ -807,20 +1035,21 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
+ TCGMemOp opc = lb->opc;
+ TCGMemOp size = opc & MO_SIZE;
+
reloc_pc19(lb->label_ptr[0], (intptr_t)s->code_ptr);
- tcg_out_movr(s, 1, TCG_REG_X0, TCG_AREG0);
- tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg);
- tcg_out_movr(s, 1, TCG_REG_X2, lb->datalo_reg);
+ tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0);
+ tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
+ tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index);
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X4, (intptr_t)lb->raddr);
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP,
- (intptr_t)qemu_st_helpers[lb->opc & 3]);
- tcg_out_callr(s, TCG_REG_TMP);
- tcg_out_goto(s, (tcg_target_long)lb->raddr);
+ tcg_out_adr(s, TCG_REG_X4, (intptr_t)lb->raddr);
+ tcg_out_call(s, (intptr_t)qemu_st_helpers[opc]);
+ tcg_out_goto(s, (intptr_t)lb->raddr);
}
-static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc,
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
TCGReg data_reg, TCGReg addr_reg,
int mem_index,
uint8_t *raddr, uint8_t *label_ptr)
@@ -840,96 +1069,108 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc,
slow path for the failure case, which will be patched later when finalizing
the slow path. Generated code returns the host addend in X1,
clobbers X0,X2,X3,TMP. */
-static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg,
- int s_bits, uint8_t **label_ptr, int mem_index, int is_read)
+static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits,
+ uint8_t **label_ptr, int mem_index, bool is_read)
{
TCGReg base = TCG_AREG0;
int tlb_offset = is_read ?
offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
+
/* Extract the TLB index from the address into X0.
X0<CPU_TLB_BITS:0> =
addr_reg<TARGET_PAGE_BITS+CPU_TLB_BITS:TARGET_PAGE_BITS> */
- tcg_out_ubfm(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, addr_reg,
+ tcg_out_ubfm(s, TARGET_LONG_BITS == 64, TCG_REG_X0, addr_reg,
TARGET_PAGE_BITS, TARGET_PAGE_BITS + CPU_TLB_BITS);
+
/* Store the page mask part of the address and the low s_bits into X3.
Later this allows checking for equality and alignment at the same time.
X3 = addr_reg & (PAGE_MASK | ((1 << s_bits) - 1)) */
- tcg_out_andi(s, (TARGET_LONG_BITS == 64), TCG_REG_X3, addr_reg,
- (TARGET_LONG_BITS - TARGET_PAGE_BITS) + s_bits,
- (TARGET_LONG_BITS - TARGET_PAGE_BITS));
+ tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, TCG_REG_X3,
+ addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
+
/* Add any "high bits" from the tlb offset to the env address into X2,
- to take advantage of the LSL12 form of the addi instruction.
+ to take advantage of the LSL12 form of the ADDI instruction.
X2 = env + (tlb_offset & 0xfff000) */
- tcg_out_addi(s, 1, TCG_REG_X2, base, tlb_offset & 0xfff000);
+ if (tlb_offset & 0xfff000) {
+ tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_X2, base,
+ tlb_offset & 0xfff000);
+ base = TCG_REG_X2;
+ }
+
/* Merge the tlb index contribution into X2.
X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */
- tcg_out_arith(s, ARITH_ADD, 1, TCG_REG_X2, TCG_REG_X2,
- TCG_REG_X0, -CPU_TLB_ENTRY_BITS);
+ tcg_out_insn(s, 3502S, ADD_LSL, TCG_TYPE_I64, TCG_REG_X2, base,
+ TCG_REG_X0, CPU_TLB_ENTRY_BITS);
+
/* Merge "low bits" from tlb offset, load the tlb comparator into X0.
X0 = load [X2 + (tlb_offset & 0x000fff)] */
- tcg_out_ldst(s, TARGET_LONG_BITS == 64 ? LDST_64 : LDST_32,
- LDST_LD, TCG_REG_X0, TCG_REG_X2,
- (tlb_offset & 0xfff));
+ tcg_out_ldst(s, TARGET_LONG_BITS == 32 ? I3312_LDRW : I3312_LDRX,
+ TCG_REG_X0, TCG_REG_X2, tlb_offset & 0xfff);
+
/* Load the tlb addend. Do that early to avoid stalling.
X1 = load [X2 + (tlb_offset & 0xfff) + offsetof(addend)] */
- tcg_out_ldst(s, LDST_64, LDST_LD, TCG_REG_X1, TCG_REG_X2,
+ tcg_out_ldst(s, I3312_LDRX, TCG_REG_X1, TCG_REG_X2,
(tlb_offset & 0xfff) + (offsetof(CPUTLBEntry, addend)) -
(is_read ? offsetof(CPUTLBEntry, addr_read)
: offsetof(CPUTLBEntry, addr_write)));
+
/* Perform the address comparison. */
- tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3);
- *label_ptr = s->code_ptr;
+ tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3, 0);
+
/* If not equal, we jump to the slow path. */
+ *label_ptr = s->code_ptr;
tcg_out_goto_cond_noaddr(s, TCG_COND_NE);
}
#endif /* CONFIG_SOFTMMU */
-static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data_r,
- TCGReg addr_r, TCGReg off_r)
+static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop,
+ TCGReg data_r, TCGReg addr_r, TCGReg off_r)
{
- switch (opc) {
- case 0:
- tcg_out_ldst_r(s, LDST_8, LDST_LD, data_r, addr_r, off_r);
+ const TCGMemOp bswap = memop & MO_BSWAP;
+
+ switch (memop & MO_SSIZE) {
+ case MO_UB:
+ tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, off_r);
break;
- case 0 | 4:
- tcg_out_ldst_r(s, LDST_8, LDST_LD_S_X, data_r, addr_r, off_r);
+ case MO_SB:
+ tcg_out_ldst_r(s, I3312_LDRSBX, data_r, addr_r, off_r);
break;
- case 1:
- tcg_out_ldst_r(s, LDST_16, LDST_LD, data_r, addr_r, off_r);
- if (TCG_LDST_BSWAP) {
- tcg_out_rev16(s, 0, data_r, data_r);
+ case MO_UW:
+ tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r);
+ if (bswap) {
+ tcg_out_rev16(s, data_r, data_r);
}
break;
- case 1 | 4:
- if (TCG_LDST_BSWAP) {
- tcg_out_ldst_r(s, LDST_16, LDST_LD, data_r, addr_r, off_r);
- tcg_out_rev16(s, 0, data_r, data_r);
- tcg_out_sxt(s, 1, 1, data_r, data_r);
+ case MO_SW:
+ if (bswap) {
+ tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r);
+ tcg_out_rev16(s, data_r, data_r);
+ tcg_out_sxt(s, TCG_TYPE_I64, MO_16, data_r, data_r);
} else {
- tcg_out_ldst_r(s, LDST_16, LDST_LD_S_X, data_r, addr_r, off_r);
+ tcg_out_ldst_r(s, I3312_LDRSHX, data_r, addr_r, off_r);
}
break;
- case 2:
- tcg_out_ldst_r(s, LDST_32, LDST_LD, data_r, addr_r, off_r);
- if (TCG_LDST_BSWAP) {
- tcg_out_rev(s, 0, data_r, data_r);
+ case MO_UL:
+ tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r);
+ if (bswap) {
+ tcg_out_rev32(s, data_r, data_r);
}
break;
- case 2 | 4:
- if (TCG_LDST_BSWAP) {
- tcg_out_ldst_r(s, LDST_32, LDST_LD, data_r, addr_r, off_r);
- tcg_out_rev(s, 0, data_r, data_r);
- tcg_out_sxt(s, 1, 2, data_r, data_r);
+ case MO_SL:
+ if (bswap) {
+ tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r);
+ tcg_out_rev32(s, data_r, data_r);
+ tcg_out_sxt(s, TCG_TYPE_I64, MO_32, data_r, data_r);
} else {
- tcg_out_ldst_r(s, LDST_32, LDST_LD_S_X, data_r, addr_r, off_r);
+ tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, off_r);
}
break;
- case 3:
- tcg_out_ldst_r(s, LDST_64, LDST_LD, data_r, addr_r, off_r);
- if (TCG_LDST_BSWAP) {
- tcg_out_rev(s, 1, data_r, data_r);
+ case MO_Q:
+ tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, off_r);
+ if (bswap) {
+ tcg_out_rev64(s, data_r, data_r);
}
break;
default:
@@ -937,141 +1178,77 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data_r,
}
}
-static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data_r,
- TCGReg addr_r, TCGReg off_r)
+static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop,
+ TCGReg data_r, TCGReg addr_r, TCGReg off_r)
{
- switch (opc) {
- case 0:
- tcg_out_ldst_r(s, LDST_8, LDST_ST, data_r, addr_r, off_r);
+ const TCGMemOp bswap = memop & MO_BSWAP;
+
+ switch (memop & MO_SIZE) {
+ case MO_8:
+ tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, off_r);
break;
- case 1:
- if (TCG_LDST_BSWAP) {
- tcg_out_rev16(s, 0, TCG_REG_TMP, data_r);
- tcg_out_ldst_r(s, LDST_16, LDST_ST, TCG_REG_TMP, addr_r, off_r);
- } else {
- tcg_out_ldst_r(s, LDST_16, LDST_ST, data_r, addr_r, off_r);
+ case MO_16:
+ if (bswap && data_r != TCG_REG_XZR) {
+ tcg_out_rev16(s, TCG_REG_TMP, data_r);
+ data_r = TCG_REG_TMP;
}
+ tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, off_r);
break;
- case 2:
- if (TCG_LDST_BSWAP) {
- tcg_out_rev(s, 0, TCG_REG_TMP, data_r);
- tcg_out_ldst_r(s, LDST_32, LDST_ST, TCG_REG_TMP, addr_r, off_r);
- } else {
- tcg_out_ldst_r(s, LDST_32, LDST_ST, data_r, addr_r, off_r);
+ case MO_32:
+ if (bswap && data_r != TCG_REG_XZR) {
+ tcg_out_rev32(s, TCG_REG_TMP, data_r);
+ data_r = TCG_REG_TMP;
}
+ tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, off_r);
break;
- case 3:
- if (TCG_LDST_BSWAP) {
- tcg_out_rev(s, 1, TCG_REG_TMP, data_r);
- tcg_out_ldst_r(s, LDST_64, LDST_ST, TCG_REG_TMP, addr_r, off_r);
- } else {
- tcg_out_ldst_r(s, LDST_64, LDST_ST, data_r, addr_r, off_r);
+ case MO_64:
+ if (bswap && data_r != TCG_REG_XZR) {
+ tcg_out_rev64(s, TCG_REG_TMP, data_r);
+ data_r = TCG_REG_TMP;
}
+ tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, off_r);
break;
default:
tcg_abort();
}
}
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
+ TCGMemOp memop, int mem_index)
{
- TCGReg addr_reg, data_reg;
#ifdef CONFIG_SOFTMMU
- int mem_index, s_bits;
+ TCGMemOp s_bits = memop & MO_SIZE;
uint8_t *label_ptr;
-#endif
- data_reg = args[0];
- addr_reg = args[1];
-#ifdef CONFIG_SOFTMMU
- mem_index = args[2];
- s_bits = opc & 3;
tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1);
- tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, TCG_REG_X1);
- add_qemu_ldst_label(s, 1, opc, data_reg, addr_reg,
+ tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg, TCG_REG_X1);
+ add_qemu_ldst_label(s, true, memop, data_reg, addr_reg,
mem_index, s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
- tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg,
+ tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg,
GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR);
#endif /* CONFIG_SOFTMMU */
}
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
+static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
+ TCGMemOp memop, int mem_index)
{
- TCGReg addr_reg, data_reg;
#ifdef CONFIG_SOFTMMU
- int mem_index, s_bits;
+ TCGMemOp s_bits = memop & MO_SIZE;
uint8_t *label_ptr;
-#endif
- data_reg = args[0];
- addr_reg = args[1];
-
-#ifdef CONFIG_SOFTMMU
- mem_index = args[2];
- s_bits = opc & 3;
tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0);
- tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, TCG_REG_X1);
- add_qemu_ldst_label(s, 0, opc, data_reg, addr_reg,
+ tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1);
+ add_qemu_ldst_label(s, false, memop, data_reg, addr_reg,
mem_index, s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
- tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg,
+ tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg,
GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR);
#endif /* CONFIG_SOFTMMU */
}
static uint8_t *tb_ret_addr;
-/* callee stack use example:
- stp x29, x30, [sp,#-32]!
- mov x29, sp
- stp x1, x2, [sp,#16]
- ...
- ldp x1, x2, [sp,#16]
- ldp x29, x30, [sp],#32
- ret
-*/
-
-/* push r1 and r2, and alloc stack space for a total of
- alloc_n elements (1 element=16 bytes, must be between 1 and 31. */
-static inline void tcg_out_push_pair(TCGContext *s, TCGReg addr,
- TCGReg r1, TCGReg r2, int alloc_n)
-{
- /* using indexed scaled simm7 STP 0x28800000 | (ext) | 0x01000000 (pre-idx)
- | alloc_n * (-1) << 16 | r2 << 10 | addr << 5 | r1 */
- assert(alloc_n > 0 && alloc_n < 0x20);
- alloc_n = (-alloc_n) & 0x3f;
- tcg_out32(s, 0xa9800000 | alloc_n << 16 | r2 << 10 | addr << 5 | r1);
-}
-
-/* dealloc stack space for a total of alloc_n elements and pop r1, r2. */
-static inline void tcg_out_pop_pair(TCGContext *s, TCGReg addr,
- TCGReg r1, TCGReg r2, int alloc_n)
-{
- /* using indexed scaled simm7 LDP 0x28c00000 | (ext) | nothing (post-idx)
- | alloc_n << 16 | r2 << 10 | addr << 5 | r1 */
- assert(alloc_n > 0 && alloc_n < 0x20);
- tcg_out32(s, 0xa8c00000 | alloc_n << 16 | r2 << 10 | addr << 5 | r1);
-}
-
-static inline void tcg_out_store_pair(TCGContext *s, TCGReg addr,
- TCGReg r1, TCGReg r2, int idx)
-{
- /* using register pair offset simm7 STP 0x29000000 | (ext)
- | idx << 16 | r2 << 10 | addr << 5 | r1 */
- assert(idx > 0 && idx < 0x20);
- tcg_out32(s, 0xa9000000 | idx << 16 | r2 << 10 | addr << 5 | r1);
-}
-
-static inline void tcg_out_load_pair(TCGContext *s, TCGReg addr,
- TCGReg r1, TCGReg r2, int idx)
-{
- /* using register pair offset simm7 LDP 0x29400000 | (ext)
- | idx << 16 | r2 << 10 | addr << 5 | r1 */
- assert(idx > 0 && idx < 0x20);
- tcg_out32(s, 0xa9400000 | idx << 16 | r2 << 10 | addr << 5 | r1);
-}
-
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1086,6 +1263,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
TCGArg a2 = args[2];
int c2 = const_args[2];
+ /* Some operands are defined with "rZ" constraint, a register or
+ the zero register. These need not actually test args[I] == 0. */
+#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
+
switch (opc) {
case INDEX_op_exit_tb:
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
@@ -1116,188 +1297,320 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_goto_label(s, a0);
break;
- case INDEX_op_ld_i32:
- case INDEX_op_ld_i64:
- case INDEX_op_st_i32:
- case INDEX_op_st_i64:
case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
case INDEX_op_ld8u_i64:
+ tcg_out_ldst(s, I3312_LDRB, a0, a1, a2);
+ break;
+ case INDEX_op_ld8s_i32:
+ tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2);
+ break;
case INDEX_op_ld8s_i64:
+ tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2);
+ break;
+ case INDEX_op_ld16u_i32:
case INDEX_op_ld16u_i64:
+ tcg_out_ldst(s, I3312_LDRH, a0, a1, a2);
+ break;
+ case INDEX_op_ld16s_i32:
+ tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2);
+ break;
case INDEX_op_ld16s_i64:
+ tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2);
+ break;
+ case INDEX_op_ld_i32:
case INDEX_op_ld32u_i64:
+ tcg_out_ldst(s, I3312_LDRW, a0, a1, a2);
+ break;
case INDEX_op_ld32s_i64:
+ tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2);
+ break;
+ case INDEX_op_ld_i64:
+ tcg_out_ldst(s, I3312_LDRX, a0, a1, a2);
+ break;
+
case INDEX_op_st8_i32:
case INDEX_op_st8_i64:
+ tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2);
+ break;
case INDEX_op_st16_i32:
case INDEX_op_st16_i64:
+ tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2);
+ break;
+ case INDEX_op_st_i32:
case INDEX_op_st32_i64:
- tcg_out_ldst(s, aarch64_ldst_get_data(opc), aarch64_ldst_get_type(opc),
- a0, a1, a2);
+ tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2);
+ break;
+ case INDEX_op_st_i64:
+ tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2);
break;
- case INDEX_op_add_i64:
case INDEX_op_add_i32:
- tcg_out_arith(s, ARITH_ADD, ext, a0, a1, a2, 0);
+ a2 = (int32_t)a2;
+ /* FALLTHRU */
+ case INDEX_op_add_i64:
+ if (c2) {
+ tcg_out_addsubi(s, ext, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2);
+ }
break;
- case INDEX_op_sub_i64:
case INDEX_op_sub_i32:
- tcg_out_arith(s, ARITH_SUB, ext, a0, a1, a2, 0);
+ a2 = (int32_t)a2;
+ /* FALLTHRU */
+ case INDEX_op_sub_i64:
+ if (c2) {
+ tcg_out_addsubi(s, ext, a0, a1, -a2);
+ } else {
+ tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_neg_i64:
+ case INDEX_op_neg_i32:
+ tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1);
break;
- case INDEX_op_and_i64:
case INDEX_op_and_i32:
- tcg_out_arith(s, ARITH_AND, ext, a0, a1, a2, 0);
+ a2 = (int32_t)a2;
+ /* FALLTHRU */
+ case INDEX_op_and_i64:
+ if (c2) {
+ tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3510, AND, ext, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_andc_i32:
+ a2 = (int32_t)a2;
+ /* FALLTHRU */
+ case INDEX_op_andc_i64:
+ if (c2) {
+ tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2);
+ } else {
+ tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2);
+ }
break;
- case INDEX_op_or_i64:
case INDEX_op_or_i32:
- tcg_out_arith(s, ARITH_OR, ext, a0, a1, a2, 0);
+ a2 = (int32_t)a2;
+ /* FALLTHRU */
+ case INDEX_op_or_i64:
+ if (c2) {
+ tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_orc_i32:
+ a2 = (int32_t)a2;
+ /* FALLTHRU */
+ case INDEX_op_orc_i64:
+ if (c2) {
+ tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2);
+ } else {
+ tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2);
+ }
break;
- case INDEX_op_xor_i64:
case INDEX_op_xor_i32:
- tcg_out_arith(s, ARITH_XOR, ext, a0, a1, a2, 0);
+ a2 = (int32_t)a2;
+ /* FALLTHRU */
+ case INDEX_op_xor_i64:
+ if (c2) {
+ tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_eqv_i32:
+ a2 = (int32_t)a2;
+ /* FALLTHRU */
+ case INDEX_op_eqv_i64:
+ if (c2) {
+ tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2);
+ } else {
+ tcg_out_insn(s, 3510, EON, ext, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_not_i64:
+ case INDEX_op_not_i32:
+ tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1);
break;
case INDEX_op_mul_i64:
case INDEX_op_mul_i32:
- tcg_out_mul(s, ext, a0, a1, a2);
+ tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR);
+ break;
+
+ case INDEX_op_div_i64:
+ case INDEX_op_div_i32:
+ tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2);
+ break;
+ case INDEX_op_divu_i64:
+ case INDEX_op_divu_i32:
+ tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2);
+ break;
+
+ case INDEX_op_rem_i64:
+ case INDEX_op_rem_i32:
+ tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP, a1, a2);
+ tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1);
+ break;
+ case INDEX_op_remu_i64:
+ case INDEX_op_remu_i32:
+ tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP, a1, a2);
+ tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1);
break;
case INDEX_op_shl_i64:
case INDEX_op_shl_i32:
- if (c2) { /* LSL / UBFM Wd, Wn, (32 - m) */
+ if (c2) {
tcg_out_shl(s, ext, a0, a1, a2);
- } else { /* LSL / LSLV */
- tcg_out_shiftrot_reg(s, SRR_SHL, ext, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2);
}
break;
case INDEX_op_shr_i64:
case INDEX_op_shr_i32:
- if (c2) { /* LSR / UBFM Wd, Wn, m, 31 */
+ if (c2) {
tcg_out_shr(s, ext, a0, a1, a2);
- } else { /* LSR / LSRV */
- tcg_out_shiftrot_reg(s, SRR_SHR, ext, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2);
}
break;
case INDEX_op_sar_i64:
case INDEX_op_sar_i32:
- if (c2) { /* ASR / SBFM Wd, Wn, m, 31 */
+ if (c2) {
tcg_out_sar(s, ext, a0, a1, a2);
- } else { /* ASR / ASRV */
- tcg_out_shiftrot_reg(s, SRR_SAR, ext, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2);
}
break;
case INDEX_op_rotr_i64:
case INDEX_op_rotr_i32:
- if (c2) { /* ROR / EXTR Wd, Wm, Wm, m */
+ if (c2) {
tcg_out_rotr(s, ext, a0, a1, a2);
- } else { /* ROR / RORV */
- tcg_out_shiftrot_reg(s, SRR_ROR, ext, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2);
}
break;
case INDEX_op_rotl_i64:
- case INDEX_op_rotl_i32: /* same as rotate right by (32 - m) */
- if (c2) { /* ROR / EXTR Wd, Wm, Wm, 32 - m */
+ case INDEX_op_rotl_i32:
+ if (c2) {
tcg_out_rotl(s, ext, a0, a1, a2);
} else {
- tcg_out_arith(s, ARITH_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2, 0);
- tcg_out_shiftrot_reg(s, SRR_ROR, ext, a0, a1, TCG_REG_TMP);
+ tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2);
+ tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP);
}
break;
- case INDEX_op_brcond_i64:
case INDEX_op_brcond_i32:
- tcg_out_cmp(s, ext, a0, a1);
- tcg_out_goto_label_cond(s, a2, args[3]);
+ a1 = (int32_t)a1;
+ /* FALLTHRU */
+ case INDEX_op_brcond_i64:
+ tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], args[3]);
break;
- case INDEX_op_setcond_i64:
case INDEX_op_setcond_i32:
- tcg_out_cmp(s, ext, a1, a2);
- tcg_out_cset(s, 0, a0, args[3]);
+ a2 = (int32_t)a2;
+ /* FALLTHRU */
+ case INDEX_op_setcond_i64:
+ tcg_out_cmp(s, ext, a1, a2, c2);
+ /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */
+ tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR,
+ TCG_REG_XZR, tcg_invert_cond(args[3]));
break;
- case INDEX_op_qemu_ld8u:
- tcg_out_qemu_ld(s, args, 0 | 0);
- break;
- case INDEX_op_qemu_ld8s:
- tcg_out_qemu_ld(s, args, 4 | 0);
- break;
- case INDEX_op_qemu_ld16u:
- tcg_out_qemu_ld(s, args, 0 | 1);
- break;
- case INDEX_op_qemu_ld16s:
- tcg_out_qemu_ld(s, args, 4 | 1);
- break;
- case INDEX_op_qemu_ld32u:
- tcg_out_qemu_ld(s, args, 0 | 2);
- break;
- case INDEX_op_qemu_ld32s:
- tcg_out_qemu_ld(s, args, 4 | 2);
- break;
- case INDEX_op_qemu_ld32:
- tcg_out_qemu_ld(s, args, 0 | 2);
- break;
- case INDEX_op_qemu_ld64:
- tcg_out_qemu_ld(s, args, 0 | 3);
- break;
- case INDEX_op_qemu_st8:
- tcg_out_qemu_st(s, args, 0);
- break;
- case INDEX_op_qemu_st16:
- tcg_out_qemu_st(s, args, 1);
+ case INDEX_op_movcond_i32:
+ a2 = (int32_t)a2;
+ /* FALLTHRU */
+ case INDEX_op_movcond_i64:
+ tcg_out_cmp(s, ext, a1, a2, c2);
+ tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
break;
- case INDEX_op_qemu_st32:
- tcg_out_qemu_st(s, args, 2);
+
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_ld_i64:
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3]);
break;
- case INDEX_op_qemu_st64:
- tcg_out_qemu_st(s, args, 3);
+ case INDEX_op_qemu_st_i32:
+ case INDEX_op_qemu_st_i64:
+ tcg_out_qemu_st(s, REG0(0), a1, a2, args[3]);
break;
- case INDEX_op_bswap32_i64:
- /* Despite the _i64, this is a 32-bit bswap. */
- ext = 0;
- /* FALLTHRU */
case INDEX_op_bswap64_i64:
+ tcg_out_rev64(s, a0, a1);
+ break;
+ case INDEX_op_bswap32_i64:
case INDEX_op_bswap32_i32:
- tcg_out_rev(s, ext, a0, a1);
+ tcg_out_rev32(s, a0, a1);
break;
case INDEX_op_bswap16_i64:
case INDEX_op_bswap16_i32:
- tcg_out_rev16(s, 0, a0, a1);
+ tcg_out_rev16(s, a0, a1);
break;
case INDEX_op_ext8s_i64:
case INDEX_op_ext8s_i32:
- tcg_out_sxt(s, ext, 0, a0, a1);
+ tcg_out_sxt(s, ext, MO_8, a0, a1);
break;
case INDEX_op_ext16s_i64:
case INDEX_op_ext16s_i32:
- tcg_out_sxt(s, ext, 1, a0, a1);
+ tcg_out_sxt(s, ext, MO_16, a0, a1);
break;
case INDEX_op_ext32s_i64:
- tcg_out_sxt(s, 1, 2, a0, a1);
+ tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
break;
case INDEX_op_ext8u_i64:
case INDEX_op_ext8u_i32:
- tcg_out_uxt(s, 0, a0, a1);
+ tcg_out_uxt(s, MO_8, a0, a1);
break;
case INDEX_op_ext16u_i64:
case INDEX_op_ext16u_i32:
- tcg_out_uxt(s, 1, a0, a1);
+ tcg_out_uxt(s, MO_16, a0, a1);
break;
case INDEX_op_ext32u_i64:
- tcg_out_movr(s, 0, a0, a1);
+ tcg_out_movr(s, TCG_TYPE_I32, a0, a1);
+ break;
+
+ case INDEX_op_deposit_i64:
+ case INDEX_op_deposit_i32:
+ tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]);
+ break;
+
+ case INDEX_op_add2_i32:
+ tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
+ (int32_t)args[4], args[5], const_args[4],
+ const_args[5], false);
+ break;
+ case INDEX_op_add2_i64:
+ tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
+ args[5], const_args[4], const_args[5], false);
+ break;
+ case INDEX_op_sub2_i32:
+ tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
+ (int32_t)args[4], args[5], const_args[4],
+ const_args[5], true);
+ break;
+ case INDEX_op_sub2_i64:
+ tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4],
+ args[5], const_args[4], const_args[5], true);
+ break;
+
+ case INDEX_op_muluh_i64:
+ tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2);
+ break;
+ case INDEX_op_mulsh_i64:
+ tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2);
break;
case INDEX_op_mov_i64:
@@ -1309,6 +1622,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
/* Opcode not implemented. */
tcg_abort();
}
+
+#undef REG0
}
static const TCGTargetOpDef aarch64_op_defs[] = {
@@ -1336,26 +1651,45 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
{ INDEX_op_ld32s_i64, { "r", "r" } },
{ INDEX_op_ld_i64, { "r", "r" } },
- { INDEX_op_st8_i32, { "r", "r" } },
- { INDEX_op_st16_i32, { "r", "r" } },
- { INDEX_op_st_i32, { "r", "r" } },
- { INDEX_op_st8_i64, { "r", "r" } },
- { INDEX_op_st16_i64, { "r", "r" } },
- { INDEX_op_st32_i64, { "r", "r" } },
- { INDEX_op_st_i64, { "r", "r" } },
-
- { INDEX_op_add_i32, { "r", "r", "r" } },
- { INDEX_op_add_i64, { "r", "r", "r" } },
- { INDEX_op_sub_i32, { "r", "r", "r" } },
- { INDEX_op_sub_i64, { "r", "r", "r" } },
+ { INDEX_op_st8_i32, { "rZ", "r" } },
+ { INDEX_op_st16_i32, { "rZ", "r" } },
+ { INDEX_op_st_i32, { "rZ", "r" } },
+ { INDEX_op_st8_i64, { "rZ", "r" } },
+ { INDEX_op_st16_i64, { "rZ", "r" } },
+ { INDEX_op_st32_i64, { "rZ", "r" } },
+ { INDEX_op_st_i64, { "rZ", "r" } },
+
+ { INDEX_op_add_i32, { "r", "r", "rA" } },
+ { INDEX_op_add_i64, { "r", "r", "rA" } },
+ { INDEX_op_sub_i32, { "r", "r", "rA" } },
+ { INDEX_op_sub_i64, { "r", "r", "rA" } },
{ INDEX_op_mul_i32, { "r", "r", "r" } },
{ INDEX_op_mul_i64, { "r", "r", "r" } },
- { INDEX_op_and_i32, { "r", "r", "r" } },
- { INDEX_op_and_i64, { "r", "r", "r" } },
- { INDEX_op_or_i32, { "r", "r", "r" } },
- { INDEX_op_or_i64, { "r", "r", "r" } },
- { INDEX_op_xor_i32, { "r", "r", "r" } },
- { INDEX_op_xor_i64, { "r", "r", "r" } },
+ { INDEX_op_div_i32, { "r", "r", "r" } },
+ { INDEX_op_div_i64, { "r", "r", "r" } },
+ { INDEX_op_divu_i32, { "r", "r", "r" } },
+ { INDEX_op_divu_i64, { "r", "r", "r" } },
+ { INDEX_op_rem_i32, { "r", "r", "r" } },
+ { INDEX_op_rem_i64, { "r", "r", "r" } },
+ { INDEX_op_remu_i32, { "r", "r", "r" } },
+ { INDEX_op_remu_i64, { "r", "r", "r" } },
+ { INDEX_op_and_i32, { "r", "r", "rL" } },
+ { INDEX_op_and_i64, { "r", "r", "rL" } },
+ { INDEX_op_or_i32, { "r", "r", "rL" } },
+ { INDEX_op_or_i64, { "r", "r", "rL" } },
+ { INDEX_op_xor_i32, { "r", "r", "rL" } },
+ { INDEX_op_xor_i64, { "r", "r", "rL" } },
+ { INDEX_op_andc_i32, { "r", "r", "rL" } },
+ { INDEX_op_andc_i64, { "r", "r", "rL" } },
+ { INDEX_op_orc_i32, { "r", "r", "rL" } },
+ { INDEX_op_orc_i64, { "r", "r", "rL" } },
+ { INDEX_op_eqv_i32, { "r", "r", "rL" } },
+ { INDEX_op_eqv_i64, { "r", "r", "rL" } },
+
+ { INDEX_op_neg_i32, { "r", "r" } },
+ { INDEX_op_neg_i64, { "r", "r" } },
+ { INDEX_op_not_i32, { "r", "r" } },
+ { INDEX_op_not_i64, { "r", "r" } },
{ INDEX_op_shl_i32, { "r", "r", "ri" } },
{ INDEX_op_shr_i32, { "r", "r", "ri" } },
@@ -1368,25 +1702,17 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
{ INDEX_op_rotl_i64, { "r", "r", "ri" } },
{ INDEX_op_rotr_i64, { "r", "r", "ri" } },
- { INDEX_op_brcond_i32, { "r", "r" } },
- { INDEX_op_setcond_i32, { "r", "r", "r" } },
- { INDEX_op_brcond_i64, { "r", "r" } },
- { INDEX_op_setcond_i64, { "r", "r", "r" } },
-
- { INDEX_op_qemu_ld8u, { "r", "l" } },
- { INDEX_op_qemu_ld8s, { "r", "l" } },
- { INDEX_op_qemu_ld16u, { "r", "l" } },
- { INDEX_op_qemu_ld16s, { "r", "l" } },
- { INDEX_op_qemu_ld32u, { "r", "l" } },
- { INDEX_op_qemu_ld32s, { "r", "l" } },
-
- { INDEX_op_qemu_ld32, { "r", "l" } },
- { INDEX_op_qemu_ld64, { "r", "l" } },
+ { INDEX_op_brcond_i32, { "r", "rA" } },
+ { INDEX_op_brcond_i64, { "r", "rA" } },
+ { INDEX_op_setcond_i32, { "r", "r", "rA" } },
+ { INDEX_op_setcond_i64, { "r", "r", "rA" } },
+ { INDEX_op_movcond_i32, { "r", "r", "rA", "rZ", "rZ" } },
+ { INDEX_op_movcond_i64, { "r", "r", "rA", "rZ", "rZ" } },
- { INDEX_op_qemu_st8, { "l", "l" } },
- { INDEX_op_qemu_st16, { "l", "l" } },
- { INDEX_op_qemu_st32, { "l", "l" } },
- { INDEX_op_qemu_st64, { "l", "l" } },
+ { INDEX_op_qemu_ld_i32, { "r", "l" } },
+ { INDEX_op_qemu_ld_i64, { "r", "l" } },
+ { INDEX_op_qemu_st_i32, { "lZ", "l" } },
+ { INDEX_op_qemu_st_i64, { "lZ", "l" } },
{ INDEX_op_bswap16_i32, { "r", "r" } },
{ INDEX_op_bswap32_i32, { "r", "r" } },
@@ -1406,6 +1732,17 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
{ INDEX_op_ext16u_i64, { "r", "r" } },
{ INDEX_op_ext32u_i64, { "r", "r" } },
+ { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
+ { INDEX_op_deposit_i64, { "r", "0", "rZ" } },
+
+ { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } },
+ { INDEX_op_add2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } },
+ { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } },
+ { INDEX_op_sub2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } },
+
+ { INDEX_op_muluh_i64, { "r", "r", "r" } },
+ { INDEX_op_mulsh_i64, { "r", "r", "r" } },
+
{ -1 },
};
@@ -1424,7 +1761,7 @@ static void tcg_target_init(TCGContext *s)
(1 << TCG_REG_X12) | (1 << TCG_REG_X13) |
(1 << TCG_REG_X14) | (1 << TCG_REG_X15) |
(1 << TCG_REG_X16) | (1 << TCG_REG_X17) |
- (1 << TCG_REG_X18));
+ (1 << TCG_REG_X18) | (1 << TCG_REG_X30));
tcg_regset_clear(s->reserved_regs);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
@@ -1435,39 +1772,44 @@ static void tcg_target_init(TCGContext *s)
tcg_add_target_add_op_defs(aarch64_op_defs);
}
+/* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */
+#define PUSH_SIZE ((30 - 19 + 1) * 8)
+
+#define FRAME_SIZE \
+ ((PUSH_SIZE \
+ + TCG_STATIC_CALL_ARGS_SIZE \
+ + CPU_TEMP_BUF_NLONGS * sizeof(long) \
+ + TCG_TARGET_STACK_ALIGN - 1) \
+ & ~(TCG_TARGET_STACK_ALIGN - 1))
+
+/* We're expecting a 2 byte uleb128 encoded value. */
+QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
+
+/* We're expecting to use a single ADDI insn. */
+QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff);
+
static void tcg_target_qemu_prologue(TCGContext *s)
{
- /* NB: frame sizes are in 16 byte stack units! */
- int frame_size_callee_saved, frame_size_tcg_locals;
TCGReg r;
- /* save pairs (FP, LR) and (X19, X20) .. (X27, X28) */
- frame_size_callee_saved = (1) + (TCG_REG_X28 - TCG_REG_X19) / 2 + 1;
-
- /* frame size requirement for TCG local variables */
- frame_size_tcg_locals = TCG_STATIC_CALL_ARGS_SIZE
- + CPU_TEMP_BUF_NLONGS * sizeof(long)
- + (TCG_TARGET_STACK_ALIGN - 1);
- frame_size_tcg_locals &= ~(TCG_TARGET_STACK_ALIGN - 1);
- frame_size_tcg_locals /= TCG_TARGET_STACK_ALIGN;
-
- /* push (FP, LR) and update sp */
- tcg_out_push_pair(s, TCG_REG_SP,
- TCG_REG_FP, TCG_REG_LR, frame_size_callee_saved);
+ /* Push (FP, LR) and allocate space for all saved registers. */
+ tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR,
+ TCG_REG_SP, -PUSH_SIZE, 1, 1);
- /* FP -> callee_saved */
- tcg_out_movr_sp(s, 1, TCG_REG_FP, TCG_REG_SP);
+ /* Set up frame pointer for canonical unwinding. */
+ tcg_out_movr_sp(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP);
- /* store callee-preserved regs x19..x28 using FP -> callee_saved */
+ /* Store callee-preserved regs x19..x28. */
for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) {
- int idx = (r - TCG_REG_X19) / 2 + 1;
- tcg_out_store_pair(s, TCG_REG_FP, r, r + 1, idx);
+ int ofs = (r - TCG_REG_X19 + 2) * 8;
+ tcg_out_insn(s, 3314, STP, r, r + 1, TCG_REG_SP, ofs, 1, 0);
}
- /* make stack space for TCG locals */
- tcg_out_subi(s, 1, TCG_REG_SP, TCG_REG_SP,
- frame_size_tcg_locals * TCG_TARGET_STACK_ALIGN);
- /* inform TCG about how to find TCG locals with register, offset, size */
+ /* Make stack space for TCG locals. */
+ tcg_out_insn(s, 3401, SUBI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
+ FRAME_SIZE - PUSH_SIZE);
+
+ /* Inform TCG about how to find TCG locals with register, offset, size. */
tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
CPU_TEMP_BUF_NLONGS * sizeof(long));
@@ -1479,23 +1821,71 @@ static void tcg_target_qemu_prologue(TCGContext *s)
#endif
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
- tcg_out_gotor(s, tcg_target_call_iarg_regs[1]);
+ tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]);
tb_ret_addr = s->code_ptr;
- /* remove TCG locals stack space */
- tcg_out_addi(s, 1, TCG_REG_SP, TCG_REG_SP,
- frame_size_tcg_locals * TCG_TARGET_STACK_ALIGN);
+ /* Remove TCG locals stack space. */
+ tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
+ FRAME_SIZE - PUSH_SIZE);
- /* restore registers x19..x28.
- FP must be preserved, so it still points to callee_saved area */
+ /* Restore registers x19..x28. */
for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) {
- int idx = (r - TCG_REG_X19) / 2 + 1;
- tcg_out_load_pair(s, TCG_REG_FP, r, r + 1, idx);
+ int ofs = (r - TCG_REG_X19 + 2) * 8;
+ tcg_out_insn(s, 3314, LDP, r, r + 1, TCG_REG_SP, ofs, 1, 0);
+ }
+
+ /* Pop (FP, LR), restore SP to previous frame. */
+ tcg_out_insn(s, 3314, LDP, TCG_REG_FP, TCG_REG_LR,
+ TCG_REG_SP, PUSH_SIZE, 0, 1);
+ tcg_out_insn(s, 3207, RET, TCG_REG_LR);
+}
+
+typedef struct {
+ DebugFrameCIE cie;
+ DebugFrameFDEHeader fde;
+ uint8_t fde_def_cfa[4];
+ uint8_t fde_reg_ofs[24];
+} DebugFrame;
+
+#define ELF_HOST_MACHINE EM_AARCH64
+
+static DebugFrame debug_frame = {
+ .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
+ .cie.id = -1,
+ .cie.version = 1,
+ .cie.code_align = 1,
+ .cie.data_align = 0x78, /* sleb128 -8 */
+ .cie.return_column = TCG_REG_LR,
+
+ /* Total FDE size does not include the "len" member. */
+ .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
+
+ .fde_def_cfa = {
+ 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
+ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
+ (FRAME_SIZE >> 7)
+ },
+ .fde_reg_ofs = {
+ 0x80 + 28, 1, /* DW_CFA_offset, x28, -8 */
+ 0x80 + 27, 2, /* DW_CFA_offset, x27, -16 */
+ 0x80 + 26, 3, /* DW_CFA_offset, x26, -24 */
+ 0x80 + 25, 4, /* DW_CFA_offset, x25, -32 */
+ 0x80 + 24, 5, /* DW_CFA_offset, x24, -40 */
+ 0x80 + 23, 6, /* DW_CFA_offset, x23, -48 */
+ 0x80 + 22, 7, /* DW_CFA_offset, x22, -56 */
+ 0x80 + 21, 8, /* DW_CFA_offset, x21, -64 */
+ 0x80 + 20, 9, /* DW_CFA_offset, x20, -72 */
+ 0x80 + 19, 10, /* DW_CFA_offset, x1p, -80 */
+ 0x80 + 30, 11, /* DW_CFA_offset, lr, -88 */
+ 0x80 + 29, 12, /* DW_CFA_offset, fp, -96 */
}
+};
+
+void tcg_register_jit(void *buf, size_t buf_size)
+{
+ debug_frame.fde.func_start = (intptr_t)buf;
+ debug_frame.fde.func_len = buf_size;
- /* pop (FP, LR), restore SP to previous frame, return */
- tcg_out_pop_pair(s, TCG_REG_SP,
- TCG_REG_FP, TCG_REG_LR, frame_size_callee_saved);
- tcg_out_ret(s);
+ tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
}
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
index 82ad919518..eff1d68fc6 100644
--- a/tcg/aarch64/tcg-target.h
+++ b/tcg/aarch64/tcg-target.h
@@ -13,21 +13,26 @@
#ifndef TCG_TARGET_AARCH64
#define TCG_TARGET_AARCH64 1
-#undef TCG_TARGET_WORDS_BIGENDIAN
#undef TCG_TARGET_STACK_GROWSUP
typedef enum {
- TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4,
- TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, TCG_REG_X8, TCG_REG_X9,
- TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, TCG_REG_X13, TCG_REG_X14,
- TCG_REG_X15, TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19,
- TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, TCG_REG_X24,
- TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, TCG_REG_X28,
- TCG_REG_FP, /* frame pointer */
- TCG_REG_LR, /* link register */
- TCG_REG_SP, /* stack pointer or zero register */
- TCG_REG_XZR = TCG_REG_SP /* same register number */
- /* program counter is not directly accessible! */
+ TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
+ TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7,
+ TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11,
+ TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15,
+ TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19,
+ TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23,
+ TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27,
+ TCG_REG_X28, TCG_REG_X29, TCG_REG_X30,
+
+ /* X31 is either the stack pointer or zero, depending on context. */
+ TCG_REG_SP = 31,
+ TCG_REG_XZR = 31,
+
+ /* Aliases. */
+ TCG_REG_FP = TCG_REG_X29,
+ TCG_REG_LR = TCG_REG_X30,
+ TCG_AREG0 = TCG_REG_X19,
} TCGReg;
#define TCG_TARGET_NB_REGS 32
@@ -39,33 +44,33 @@ typedef enum {
#define TCG_TARGET_CALL_STACK_OFFSET 0
/* optional instructions */
-#define TCG_TARGET_HAS_div_i32 0
-#define TCG_TARGET_HAS_rem_i32 0
+#define TCG_TARGET_HAS_div_i32 1
+#define TCG_TARGET_HAS_rem_i32 1
#define TCG_TARGET_HAS_ext8s_i32 1
#define TCG_TARGET_HAS_ext16s_i32 1
#define TCG_TARGET_HAS_ext8u_i32 1
#define TCG_TARGET_HAS_ext16u_i32 1
#define TCG_TARGET_HAS_bswap16_i32 1
#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_not_i32 0
-#define TCG_TARGET_HAS_neg_i32 0
+#define TCG_TARGET_HAS_not_i32 1
+#define TCG_TARGET_HAS_neg_i32 1
#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_andc_i32 0
-#define TCG_TARGET_HAS_orc_i32 0
-#define TCG_TARGET_HAS_eqv_i32 0
+#define TCG_TARGET_HAS_andc_i32 1
+#define TCG_TARGET_HAS_orc_i32 1
+#define TCG_TARGET_HAS_eqv_i32 1
#define TCG_TARGET_HAS_nand_i32 0
#define TCG_TARGET_HAS_nor_i32 0
-#define TCG_TARGET_HAS_deposit_i32 0
-#define TCG_TARGET_HAS_movcond_i32 0
-#define TCG_TARGET_HAS_add2_i32 0
-#define TCG_TARGET_HAS_sub2_i32 0
+#define TCG_TARGET_HAS_deposit_i32 1
+#define TCG_TARGET_HAS_movcond_i32 1
+#define TCG_TARGET_HAS_add2_i32 1
+#define TCG_TARGET_HAS_sub2_i32 1
#define TCG_TARGET_HAS_mulu2_i32 0
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_div_i64 0
-#define TCG_TARGET_HAS_rem_i64 0
+#define TCG_TARGET_HAS_div_i64 1
+#define TCG_TARGET_HAS_rem_i64 1
#define TCG_TARGET_HAS_ext8s_i64 1
#define TCG_TARGET_HAS_ext16s_i64 1
#define TCG_TARGET_HAS_ext32s_i64 1
@@ -75,28 +80,24 @@ typedef enum {
#define TCG_TARGET_HAS_bswap16_i64 1
#define TCG_TARGET_HAS_bswap32_i64 1
#define TCG_TARGET_HAS_bswap64_i64 1
-#define TCG_TARGET_HAS_not_i64 0
-#define TCG_TARGET_HAS_neg_i64 0
+#define TCG_TARGET_HAS_not_i64 1
+#define TCG_TARGET_HAS_neg_i64 1
#define TCG_TARGET_HAS_rot_i64 1
-#define TCG_TARGET_HAS_andc_i64 0
-#define TCG_TARGET_HAS_orc_i64 0
-#define TCG_TARGET_HAS_eqv_i64 0
+#define TCG_TARGET_HAS_andc_i64 1
+#define TCG_TARGET_HAS_orc_i64 1
+#define TCG_TARGET_HAS_eqv_i64 1
#define TCG_TARGET_HAS_nand_i64 0
#define TCG_TARGET_HAS_nor_i64 0
-#define TCG_TARGET_HAS_deposit_i64 0
-#define TCG_TARGET_HAS_movcond_i64 0
-#define TCG_TARGET_HAS_add2_i64 0
-#define TCG_TARGET_HAS_sub2_i64 0
+#define TCG_TARGET_HAS_deposit_i64 1
+#define TCG_TARGET_HAS_movcond_i64 1
+#define TCG_TARGET_HAS_add2_i64 1
+#define TCG_TARGET_HAS_sub2_i64 1
#define TCG_TARGET_HAS_mulu2_i64 0
#define TCG_TARGET_HAS_muls2_i64 0
-#define TCG_TARGET_HAS_muluh_i64 0
-#define TCG_TARGET_HAS_mulsh_i64 0
-
-enum {
- TCG_AREG0 = TCG_REG_X19,
-};
+#define TCG_TARGET_HAS_muluh_i64 1
+#define TCG_TARGET_HAS_mulsh_i64 1
-#define TCG_TARGET_HAS_new_ldst 0
+#define TCG_TARGET_HAS_new_ldst 1
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
index c8884b31f4..7535175f9c 100644
--- a/tcg/arm/tcg-target.c
+++ b/tcg/arm/tcg-target.c
@@ -60,6 +60,13 @@ static int arm_arch = __ARM_ARCH;
bool use_idiv_instructions;
#endif
+/* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
+#ifdef CONFIG_SOFTMMU
+# define USING_SOFTMMU 1
+#else
+# define USING_SOFTMMU 0
+#endif
+
#ifndef NDEBUG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"%r0",
@@ -254,7 +261,7 @@ static inline int check_fit_imm(uint32_t imm)
* mov operand2: values represented with x << (2 * y), x < 0x100
* add, sub, eor...: ditto
*/
-static inline int tcg_target_const_match(tcg_target_long val,
+static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
{
int ct;
@@ -1246,7 +1253,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
/* Record the context of a call to the out of line helper code for the slow
path for a load or store, so that we can later generate the correct
helper code. */
-static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
TCGReg datalo, TCGReg datahi, TCGReg addrlo,
TCGReg addrhi, int mem_index,
uint8_t *raddr, uint8_t *label_ptr)
@@ -1404,7 +1411,9 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, TCGMemOp opc,
TCGReg dl = (bswap ? datahi : datalo);
TCGReg dh = (bswap ? datalo : datahi);
- if (use_armv6_instructions && (dl & 1) == 0 && dh == dl + 1) {
+ /* Avoid ldrd for user-only emulation, to handle unaligned. */
+ if (USING_SOFTMMU && use_armv6_instructions
+ && (dl & 1) == 0 && dh == dl + 1) {
tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend);
} else if (dl != addend) {
tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo);
@@ -1463,7 +1472,9 @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc,
TCGReg dl = (bswap ? datahi : datalo);
TCGReg dh = (bswap ? datalo : datahi);
- if (use_armv6_instructions && (dl & 1) == 0 && dh == dl + 1) {
+ /* Avoid ldrd for user-only emulation, to handle unaligned. */
+ if (USING_SOFTMMU && use_armv6_instructions
+ && (dl & 1) == 0 && dh == dl + 1) {
tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0);
} else if (dl == addrlo) {
tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
@@ -1508,7 +1519,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
- add_qemu_ldst_label(s, 1, opc, datalo, datahi, addrlo, addrhi,
+ add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi,
mem_index, s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
if (GUEST_BASE) {
@@ -1548,12 +1559,13 @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, TCGMemOp opc,
}
break;
case MO_64:
+ /* Avoid strd for user-only emulation, to handle unaligned. */
if (bswap) {
tcg_out_bswap32(s, cond, TCG_REG_R0, datahi);
tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo);
tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4);
- } else if (use_armv6_instructions
+ } else if (USING_SOFTMMU && use_armv6_instructions
&& (datalo & 1) == 0 && datahi == datalo + 1) {
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
} else {
@@ -1592,12 +1604,13 @@ static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc,
}
break;
case MO_64:
+ /* Avoid strd for user-only emulation, to handle unaligned. */
if (bswap) {
tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi);
tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4);
- } else if (use_armv6_instructions
+ } else if (USING_SOFTMMU && use_armv6_instructions
&& (datalo & 1) == 0 && datahi == datalo + 1) {
tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
} else {
@@ -1634,7 +1647,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
label_ptr = s->code_ptr;
tcg_out_bl_noaddr(s, COND_NE);
- add_qemu_ldst_label(s, 0, opc, datalo, datahi, addrlo, addrhi,
+ add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi,
mem_index, s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
if (GUEST_BASE) {
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index 3746b6e298..1bc5dacec0 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -25,7 +25,6 @@
#ifndef TCG_TARGET_ARM
#define TCG_TARGET_ARM 1
-#undef TCG_TARGET_WORDS_BIGENDIAN
#undef TCG_TARGET_STACK_GROWSUP
typedef enum {
@@ -79,6 +78,7 @@ extern bool use_idiv_instructions;
#define TCG_TARGET_HAS_nor_i32 0
#define TCG_TARGET_HAS_deposit_i32 1
#define TCG_TARGET_HAS_movcond_i32 1
+#define TCG_TARGET_HAS_mulu2_i32 1
#define TCG_TARGET_HAS_muls2_i32 1
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
index f832282d1a..34ece1f80b 100644
--- a/tcg/i386/tcg-target.c
+++ b/tcg/i386/tcg-target.c
@@ -257,7 +257,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
}
/* test if a constant matches the constraint */
-static inline int tcg_target_const_match(tcg_target_long val,
+static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
{
int ct = arg_ct->ct;
@@ -1244,7 +1244,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
* Record the context of a call to the out of line helper code for the slow path
* for a load or store, so that we can later generate the correct helper code
*/
-static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
int mem_index, uint8_t *raddr,
@@ -1554,7 +1554,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
/* Record the current context of a load into ldst label */
- add_qemu_ldst_label(s, 1, opc, datalo, datahi, addrlo, addrhi,
+ add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi,
mem_index, s->code_ptr, label_ptr);
#else
{
@@ -1685,7 +1685,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
/* Record the current context of a store into ldst label */
- add_qemu_ldst_label(s, 0, opc, datalo, datahi, addrlo, addrhi,
+ add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi,
mem_index, s->code_ptr, label_ptr);
#else
{
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index bdf2222452..ababca0569 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -24,8 +24,6 @@
#ifndef TCG_TARGET_I386
#define TCG_TARGET_I386 1
-#undef TCG_TARGET_WORDS_BIGENDIAN
-
#ifdef __x86_64__
# define TCG_TARGET_REG_BITS 64
# define TCG_TARGET_NB_REGS 16
diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c
index 2d8e00cd94..1f523d6466 100644
--- a/tcg/ia64/tcg-target.c
+++ b/tcg/ia64/tcg-target.c
@@ -23,8 +23,6 @@
* THE SOFTWARE.
*/
-#include "tcg-be-null.h"
-
/*
* Register definitions
*/
@@ -221,10 +219,12 @@ enum {
OPC_ALLOC_M34 = 0x02c00000000ull,
OPC_BR_DPTK_FEW_B1 = 0x08400000000ull,
OPC_BR_SPTK_MANY_B1 = 0x08000001000ull,
+ OPC_BR_CALL_SPNT_FEW_B3 = 0x0a200000000ull,
OPC_BR_SPTK_MANY_B4 = 0x00100001000ull,
OPC_BR_CALL_SPTK_MANY_B5 = 0x02100001000ull,
OPC_BR_RET_SPTK_MANY_B4 = 0x00108001100ull,
OPC_BRL_SPTK_MANY_X3 = 0x18000001000ull,
+ OPC_BRL_CALL_SPNT_MANY_X4 = 0x1a200001000ull,
OPC_BRL_CALL_SPTK_MANY_X4 = 0x1a000001000ull,
OPC_CMP_LT_A6 = 0x18000000000ull,
OPC_CMP_LTU_A6 = 0x1a000000000ull,
@@ -356,6 +356,15 @@ static inline uint64_t tcg_opc_b1(int qp, uint64_t opc, uint64_t imm)
| (qp & 0x3f);
}
+static inline uint64_t tcg_opc_b3(int qp, uint64_t opc, int b1, uint64_t imm)
+{
+ return opc
+ | ((imm & 0x100000) << 16) /* s */
+ | ((imm & 0x0fffff) << 13) /* imm20b */
+ | ((b1 & 0x7) << 6)
+ | (qp & 0x3f);
+}
+
static inline uint64_t tcg_opc_b4(int qp, uint64_t opc, int b2)
{
return opc
@@ -815,6 +824,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
#if defined(CONFIG_SOFTMMU)
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R56);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R57);
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_R58);
#endif
break;
case 'Z':
@@ -832,7 +842,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
}
/* test if a constant matches the constraint */
-static inline int tcg_target_const_match(tcg_target_long val,
+static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
{
int ct;
@@ -950,15 +960,21 @@ static inline void tcg_out_callr(TCGContext *s, TCGReg addr)
static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg)
{
int64_t disp;
- uint64_t imm;
+ uint64_t imm, opc1;
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R8, arg);
+ /* At least arg == 0 is a common operation. */
+ if (arg == sextract64(arg, 0, 22)) {
+ opc1 = tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R8, arg);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R8, arg);
+ opc1 = INSN_NOP_M;
+ }
disp = tb_ret_addr - s->code_ptr;
imm = (uint64_t)disp >> 4;
tcg_out_bundle(s, mLX,
- INSN_NOP_M,
+ opc1,
tcg_opc_l3 (imm),
tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, imm));
}
@@ -1558,238 +1574,284 @@ static inline void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGArg ret,
}
#if defined(CONFIG_SOFTMMU)
+/* We're expecting to use an signed 22-bit immediate add. */
+QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
+ > 0x1fffff)
+
/* Load and compare a TLB entry, and return the result in (p6, p7).
- R2 is loaded with the address of the addend TLB entry.
- R57 is loaded with the address, zero extented on 32-bit targets. */
-static inline void tcg_out_qemu_tlb(TCGContext *s, TCGArg addr_reg,
- TCGMemOp s_bits, uint64_t offset_rw,
- uint64_t offset_addend)
-{
- tcg_out_bundle(s, mII,
- INSN_NOP_M,
- tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R2,
+ R2 is loaded with the addend TLB entry.
+ R57 is loaded with the address, zero extented on 32-bit targets.
+ R1, R3 are clobbered, leaving R56 free for...
+ BSWAP_1, BSWAP_2 and I-slot insns for swapping data for store. */
+static inline void tcg_out_qemu_tlb(TCGContext *s, TCGReg addr_reg,
+ TCGMemOp s_bits, int off_rw, int off_add,
+ uint64_t bswap1, uint64_t bswap2)
+{
+ /*
+ .mii
+ mov r2 = off_rw
+ extr.u r3 = addr_reg, ... # extract tlb page
+ zxt4 r57 = addr_reg # or mov for 64-bit guest
+ ;;
+ .mii
+ addl r2 = r2, areg0
+ shl r3 = r3, cteb # via dep.z
+ dep r1 = 0, r57, ... # zero page ofs, keep align
+ ;;
+ .mmi
+ add r2 = r2, r3
+ ;;
+ ld4 r3 = [r2], off_add-off_rw # or ld8 for 64-bit guest
+ nop
+ ;;
+ .mmi
+ nop
+ cmp.eq p6, p7 = r3, r58
+ nop
+ ;;
+ */
+ tcg_out_bundle(s, miI,
+ tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, off_rw),
+ tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R3,
addr_reg, TARGET_PAGE_BITS, CPU_TLB_BITS - 1),
- tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R2,
- TCG_REG_R2, 63 - CPU_TLB_ENTRY_BITS,
- 63 - CPU_TLB_ENTRY_BITS));
- tcg_out_bundle(s, mII,
- tcg_opc_a5 (TCG_REG_P0, OPC_ADDL_A5, TCG_REG_R2,
- offset_rw, TCG_REG_R2),
tcg_opc_ext_i(TCG_REG_P0,
TARGET_LONG_BITS == 32 ? MO_UL : MO_Q,
- TCG_REG_R57, addr_reg),
+ TCG_REG_R57, addr_reg));
+ tcg_out_bundle(s, miI,
tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2,
- TCG_REG_R2, TCG_AREG0));
- tcg_out_bundle(s, mII,
+ TCG_REG_R2, TCG_AREG0),
+ tcg_opc_i12(TCG_REG_P0, OPC_DEP_Z_I12, TCG_REG_R3,
+ TCG_REG_R3, 63 - CPU_TLB_ENTRY_BITS,
+ 63 - CPU_TLB_ENTRY_BITS),
+ tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R1, 0,
+ TCG_REG_R57, 63 - s_bits,
+ TARGET_PAGE_BITS - s_bits - 1));
+ tcg_out_bundle(s, MmI,
+ tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
+ TCG_REG_R2, TCG_REG_R2, TCG_REG_R3),
tcg_opc_m3 (TCG_REG_P0,
(TARGET_LONG_BITS == 32
- ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R56,
- TCG_REG_R2, offset_addend - offset_rw),
- tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R3, 0,
- TCG_REG_R57, 63 - s_bits,
- TARGET_PAGE_BITS - s_bits - 1),
+ ? OPC_LD4_M3 : OPC_LD8_M3), TCG_REG_R3,
+ TCG_REG_R2, off_add - off_rw),
+ bswap1);
+ tcg_out_bundle(s, mmI,
+ tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, TCG_REG_R2),
tcg_opc_a6 (TCG_REG_P0, OPC_CMP_EQ_A6, TCG_REG_P6,
- TCG_REG_P7, TCG_REG_R3, TCG_REG_R56));
+ TCG_REG_P7, TCG_REG_R1, TCG_REG_R3),
+ bswap2);
}
-/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
- int mmu_idx) */
-static const void * const qemu_ld_helpers[4] = {
- helper_ldb_mmu,
- helper_ldw_mmu,
- helper_ldl_mmu,
- helper_ldq_mmu,
-};
+#define TCG_MAX_QEMU_LDST 640
-static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
- TCGMemOp opc)
+typedef struct TCGLabelQemuLdst {
+ bool is_ld;
+ TCGMemOp size;
+ uint8_t *label_ptr; /* label pointers to be updated */
+} TCGLabelQemuLdst;
+
+typedef struct TCGBackendData {
+ int nb_ldst_labels;
+ TCGLabelQemuLdst ldst_labels[TCG_MAX_QEMU_LDST];
+} TCGBackendData;
+
+static inline void tcg_out_tb_init(TCGContext *s)
+{
+ s->be->nb_ldst_labels = 0;
+}
+
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
+ uint8_t *label_ptr)
+{
+ TCGBackendData *be = s->be;
+ TCGLabelQemuLdst *l = &be->ldst_labels[be->nb_ldst_labels++];
+
+ assert(be->nb_ldst_labels <= TCG_MAX_QEMU_LDST);
+ l->is_ld = is_ld;
+ l->size = opc & MO_SIZE;
+ l->label_ptr = label_ptr;
+}
+
+static void tcg_out_tb_finalize(TCGContext *s)
+{
+ static const void * const helpers[8] = {
+ helper_ret_stb_mmu,
+ helper_le_stw_mmu,
+ helper_le_stl_mmu,
+ helper_le_stq_mmu,
+ helper_ret_ldub_mmu,
+ helper_le_lduw_mmu,
+ helper_le_ldul_mmu,
+ helper_le_ldq_mmu,
+ };
+ uintptr_t thunks[8] = { };
+ TCGBackendData *be = s->be;
+ size_t i, n = be->nb_ldst_labels;
+
+ for (i = 0; i < n; i++) {
+ TCGLabelQemuLdst *l = &be->ldst_labels[i];
+ long x = l->is_ld * 4 + l->size;
+ uintptr_t dest = thunks[x];
+
+ /* The out-of-line thunks are all the same; load the return address
+ from B0, load the GP, and branch to the code. Note that we are
+ always post-call, so the register window has rolled, so we're
+ using incomming parameter register numbers, not outgoing. */
+ if (dest == 0) {
+ uintptr_t disp, *desc = (uintptr_t *)helpers[x];
+
+ thunks[x] = dest = (uintptr_t)s->code_ptr;
+
+ tcg_out_bundle(s, mlx,
+ INSN_NOP_M,
+ tcg_opc_l2 (desc[1]),
+ tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2,
+ TCG_REG_R1, desc[1]));
+ tcg_out_bundle(s, mii,
+ INSN_NOP_M,
+ INSN_NOP_I,
+ tcg_opc_i22(TCG_REG_P0, OPC_MOV_I22,
+ l->is_ld ? TCG_REG_R35 : TCG_REG_R36,
+ TCG_REG_B0));
+ disp = (desc[0] - (uintptr_t)s->code_ptr) >> 4;
+ tcg_out_bundle(s, mLX,
+ INSN_NOP_M,
+ tcg_opc_l3 (disp),
+ tcg_opc_x3 (TCG_REG_P0, OPC_BRL_SPTK_MANY_X3, disp));
+ }
+
+ reloc_pcrel21b(l->label_ptr, dest);
+ }
+}
+
+static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
{
static const uint64_t opc_ld_m1[4] = {
OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
};
int addr_reg, data_reg, mem_index;
- TCGMemOp s_bits, bswap;
-
- data_reg = *args++;
- addr_reg = *args++;
- mem_index = *args;
+ TCGMemOp opc, s_bits;
+ uint64_t fin1, fin2;
+ uint8_t *label_ptr;
+
+ data_reg = args[0];
+ addr_reg = args[1];
+ opc = args[2];
+ mem_index = args[3];
s_bits = opc & MO_SIZE;
- bswap = opc & MO_BSWAP;
/* Read the TLB entry */
tcg_out_qemu_tlb(s, addr_reg, s_bits,
offsetof(CPUArchState, tlb_table[mem_index][0].addr_read),
- offsetof(CPUArchState, tlb_table[mem_index][0].addend));
+ offsetof(CPUArchState, tlb_table[mem_index][0].addend),
+ INSN_NOP_I, INSN_NOP_I);
/* P6 is the fast path, and P7 the slow path */
- tcg_out_bundle(s, mLX,
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
- tcg_opc_l2 ((tcg_target_long) qemu_ld_helpers[s_bits]),
- tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2,
- (tcg_target_long) qemu_ld_helpers[s_bits]));
- tcg_out_bundle(s, MmI,
- tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R3,
- TCG_REG_R2, 8),
- tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R3,
- TCG_REG_R3, TCG_REG_R57),
- tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6,
- TCG_REG_R3, 0));
- if (bswap && s_bits == MO_16) {
- tcg_out_bundle(s, MmI,
- tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
- TCG_REG_R8, TCG_REG_R3),
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
- tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
- TCG_REG_R8, TCG_REG_R8, 15, 15));
- } else if (bswap && s_bits == MO_32) {
- tcg_out_bundle(s, MmI,
- tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
- TCG_REG_R8, TCG_REG_R3),
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
- tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
- TCG_REG_R8, TCG_REG_R8, 31, 31));
- } else {
- tcg_out_bundle(s, mmI,
- tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
- TCG_REG_R8, TCG_REG_R3),
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1, TCG_REG_R1, TCG_REG_R2),
- INSN_NOP_I);
- }
- if (!bswap) {
- tcg_out_bundle(s, miB,
- tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index),
- INSN_NOP_I,
- tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
- TCG_REG_B0, TCG_REG_B6));
+
+ fin2 = 0;
+ if (opc & MO_BSWAP) {
+ fin1 = tcg_opc_bswap64_i(TCG_REG_P0, data_reg, TCG_REG_R8);
+ if (s_bits < MO_64) {
+ int shift = 64 - (8 << s_bits);
+ fin2 = (opc & MO_SIGN ? OPC_EXTR_I11 : OPC_EXTR_U_I11);
+ fin2 = tcg_opc_i11(TCG_REG_P0, fin2,
+ data_reg, data_reg, shift, 63 - shift);
+ }
} else {
- tcg_out_bundle(s, miB,
- tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index),
- tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R8, TCG_REG_R8),
- tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
- TCG_REG_B0, TCG_REG_B6));
+ fin1 = tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, TCG_REG_R8);
}
- tcg_out_bundle(s, miI,
- INSN_NOP_M,
+ tcg_out_bundle(s, mmI,
+ tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
+ tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
+ TCG_REG_R2, TCG_REG_R57),
+ tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index));
+ label_ptr = s->code_ptr + 2;
+ tcg_out_bundle(s, miB,
+ tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
+ TCG_REG_R8, TCG_REG_R2),
INSN_NOP_I,
- tcg_opc_ext_i(TCG_REG_P0, opc, data_reg, TCG_REG_R8));
-}
+ tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0,
+ get_reloc_pcrel21b(label_ptr)));
-/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
- uintxx_t val, int mmu_idx) */
-static const void * const qemu_st_helpers[4] = {
- helper_stb_mmu,
- helper_stw_mmu,
- helper_stl_mmu,
- helper_stq_mmu,
-};
+ add_qemu_ldst_label(s, 1, opc, label_ptr);
-static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
- TCGMemOp opc)
+ /* Note that we always use LE helper functions, so the bswap insns
+ here for the fast path also apply to the slow path. */
+ tcg_out_bundle(s, (fin2 ? mII : miI),
+ INSN_NOP_M,
+ fin1,
+ fin2 ? fin2 : INSN_NOP_I);
+}
+
+static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
{
static const uint64_t opc_st_m4[4] = {
OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4
};
- int addr_reg, data_reg, mem_index;
- TCGMemOp s_bits;
-
- data_reg = *args++;
- addr_reg = *args++;
- mem_index = *args;
+ TCGReg addr_reg, data_reg;
+ int mem_index;
+ uint64_t pre1, pre2;
+ TCGMemOp opc, s_bits;
+ uint8_t *label_ptr;
+
+ data_reg = args[0];
+ addr_reg = args[1];
+ opc = args[2];
+ mem_index = args[3];
s_bits = opc & MO_SIZE;
+ /* Note that we always use LE helper functions, so the bswap insns
+ that are here for the fast path also apply to the slow path,
+ and move the data into the argument register. */
+ pre2 = INSN_NOP_I;
+ if (opc & MO_BSWAP) {
+ pre1 = tcg_opc_bswap64_i(TCG_REG_P0, TCG_REG_R58, data_reg);
+ if (s_bits < MO_64) {
+ int shift = 64 - (8 << s_bits);
+ pre2 = tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11,
+ TCG_REG_R58, TCG_REG_R58, shift, 63 - shift);
+ }
+ } else {
+ /* Just move the data into place for the slow path. */
+ pre1 = tcg_opc_ext_i(TCG_REG_P0, opc, TCG_REG_R58, data_reg);
+ }
+
tcg_out_qemu_tlb(s, addr_reg, s_bits,
offsetof(CPUArchState, tlb_table[mem_index][0].addr_write),
- offsetof(CPUArchState, tlb_table[mem_index][0].addend));
+ offsetof(CPUArchState, tlb_table[mem_index][0].addend),
+ pre1, pre2);
/* P6 is the fast path, and P7 the slow path */
- tcg_out_bundle(s, mLX,
+ tcg_out_bundle(s, mmI,
tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
- tcg_opc_l2 ((tcg_target_long) qemu_st_helpers[s_bits]),
- tcg_opc_x2 (TCG_REG_P7, OPC_MOVL_X2, TCG_REG_R2,
- (tcg_target_long) qemu_st_helpers[s_bits]));
- tcg_out_bundle(s, MmI,
- tcg_opc_m3 (TCG_REG_P0, OPC_LD8_M3, TCG_REG_R3,
- TCG_REG_R2, 8),
- tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R3,
- TCG_REG_R3, TCG_REG_R57),
- tcg_opc_i21(TCG_REG_P7, OPC_MOV_I21, TCG_REG_B6,
- TCG_REG_R3, 0));
-
- switch (opc) {
- case MO_8:
- case MO_16:
- case MO_32:
- case MO_64:
- tcg_out_bundle(s, mii,
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
- TCG_REG_R1, TCG_REG_R2),
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
- INSN_NOP_I);
- break;
-
- case MO_16 | MO_BSWAP:
- tcg_out_bundle(s, miI,
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
- TCG_REG_R1, TCG_REG_R2),
- INSN_NOP_I,
- tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
- TCG_REG_R2, data_reg, 15, 15));
- tcg_out_bundle(s, miI,
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
- INSN_NOP_I,
- tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, TCG_REG_R2));
- data_reg = TCG_REG_R2;
- break;
-
- case MO_32 | MO_BSWAP:
- tcg_out_bundle(s, miI,
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
- TCG_REG_R1, TCG_REG_R2),
- INSN_NOP_I,
- tcg_opc_i12(TCG_REG_P6, OPC_DEP_Z_I12,
- TCG_REG_R2, data_reg, 31, 31));
- tcg_out_bundle(s, miI,
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
- INSN_NOP_I,
- tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, TCG_REG_R2));
- data_reg = TCG_REG_R2;
- break;
-
- case MO_64 | MO_BSWAP:
- tcg_out_bundle(s, miI,
- tcg_opc_m1 (TCG_REG_P7, OPC_LD8_M1,
- TCG_REG_R1, TCG_REG_R2),
- tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R58, data_reg),
- tcg_opc_bswap64_i(TCG_REG_P6, TCG_REG_R2, data_reg));
- data_reg = TCG_REG_R2;
- break;
-
- default:
- tcg_abort();
- }
-
+ tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
+ TCG_REG_R2, TCG_REG_R57),
+ tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, mem_index));
+ label_ptr = s->code_ptr + 2;
tcg_out_bundle(s, miB,
tcg_opc_m4 (TCG_REG_P6, opc_st_m4[s_bits],
- data_reg, TCG_REG_R3),
- tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, mem_index),
- tcg_opc_b5 (TCG_REG_P7, OPC_BR_CALL_SPTK_MANY_B5,
- TCG_REG_B0, TCG_REG_B6));
+ TCG_REG_R58, TCG_REG_R2),
+ INSN_NOP_I,
+ tcg_opc_b3 (TCG_REG_P7, OPC_BR_CALL_SPNT_FEW_B3, TCG_REG_B0,
+ get_reloc_pcrel21b(label_ptr)));
+
+ add_qemu_ldst_label(s, 0, opc, label_ptr);
}
#else /* !CONFIG_SOFTMMU */
+# include "tcg-be-null.h"
-static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
- TCGMemOp opc)
+static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
{
static uint64_t const opc_ld_m1[4] = {
OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
};
int addr_reg, data_reg;
- TCGMemOp s_bits, bswap;
+ TCGMemOp opc, s_bits, bswap;
- data_reg = *args++;
- addr_reg = *args++;
+ data_reg = args[0];
+ addr_reg = args[1];
+ opc = args[2];
s_bits = opc & MO_SIZE;
bswap = opc & MO_BSWAP;
@@ -1900,8 +1962,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
#endif
}
-static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
- TCGMemOp opc)
+static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
{
static uint64_t const opc_st_m4[4] = {
OPC_ST1_M4, OPC_ST2_M4, OPC_ST4_M4, OPC_ST8_M4
@@ -1910,10 +1971,11 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
#if TARGET_LONG_BITS == 64
uint64_t add_guest_base;
#endif
- TCGMemOp s_bits, bswap;
+ TCGMemOp opc, s_bits, bswap;
- data_reg = *args++;
- addr_reg = *args++;
+ data_reg = args[0];
+ addr_reg = args[1];
+ opc = args[2];
s_bits = opc & MO_SIZE;
bswap = opc & MO_BSWAP;
@@ -2237,40 +2299,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
args[3], const_args[3], args[4], const_args[4], 0);
break;
- case INDEX_op_qemu_ld8u:
- tcg_out_qemu_ld(s, args, MO_UB);
- break;
- case INDEX_op_qemu_ld8s:
- tcg_out_qemu_ld(s, args, MO_SB);
- break;
- case INDEX_op_qemu_ld16u:
- tcg_out_qemu_ld(s, args, MO_TEUW);
- break;
- case INDEX_op_qemu_ld16s:
- tcg_out_qemu_ld(s, args, MO_TESW);
- break;
- case INDEX_op_qemu_ld32:
- case INDEX_op_qemu_ld32u:
- tcg_out_qemu_ld(s, args, MO_TEUL);
- break;
- case INDEX_op_qemu_ld32s:
- tcg_out_qemu_ld(s, args, MO_TESL);
- break;
- case INDEX_op_qemu_ld64:
- tcg_out_qemu_ld(s, args, MO_TEQ);
- break;
-
- case INDEX_op_qemu_st8:
- tcg_out_qemu_st(s, args, MO_UB);
+ case INDEX_op_qemu_ld_i32:
+ tcg_out_qemu_ld(s, args);
break;
- case INDEX_op_qemu_st16:
- tcg_out_qemu_st(s, args, MO_TEUW);
+ case INDEX_op_qemu_ld_i64:
+ tcg_out_qemu_ld(s, args);
break;
- case INDEX_op_qemu_st32:
- tcg_out_qemu_st(s, args, MO_TEUL);
+ case INDEX_op_qemu_st_i32:
+ tcg_out_qemu_st(s, args);
break;
- case INDEX_op_qemu_st64:
- tcg_out_qemu_st(s, args, MO_TEQ);
+ case INDEX_op_qemu_st_i64:
+ tcg_out_qemu_st(s, args);
break;
default:
@@ -2381,19 +2420,10 @@ static const TCGTargetOpDef ia64_op_defs[] = {
{ INDEX_op_deposit_i32, { "r", "rZ", "ri" } },
{ INDEX_op_deposit_i64, { "r", "rZ", "ri" } },
- { INDEX_op_qemu_ld8u, { "r", "r" } },
- { INDEX_op_qemu_ld8s, { "r", "r" } },
- { INDEX_op_qemu_ld16u, { "r", "r" } },
- { INDEX_op_qemu_ld16s, { "r", "r" } },
- { INDEX_op_qemu_ld32, { "r", "r" } },
- { INDEX_op_qemu_ld32u, { "r", "r" } },
- { INDEX_op_qemu_ld32s, { "r", "r" } },
- { INDEX_op_qemu_ld64, { "r", "r" } },
-
- { INDEX_op_qemu_st8, { "SZ", "r" } },
- { INDEX_op_qemu_st16, { "SZ", "r" } },
- { INDEX_op_qemu_st32, { "SZ", "r" } },
- { INDEX_op_qemu_st64, { "SZ", "r" } },
+ { INDEX_op_qemu_ld_i32, { "r", "r" } },
+ { INDEX_op_qemu_ld_i64, { "r", "r" } },
+ { INDEX_op_qemu_st_i32, { "SZ", "r" } },
+ { INDEX_op_qemu_st_i64, { "SZ", "r" } },
{ -1 },
};
diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h
index 52a939c946..09c3ba8fe3 100644
--- a/tcg/ia64/tcg-target.h
+++ b/tcg/ia64/tcg-target.h
@@ -153,7 +153,7 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_mulsh_i64 0
-#define TCG_TARGET_HAS_new_ldst 0
+#define TCG_TARGET_HAS_new_ldst 1
#define TCG_TARGET_deposit_i32_valid(ofs, len) ((len) <= 16)
#define TCG_TARGET_deposit_i64_valid(ofs, len) ((len) <= 16)
diff --git a/tcg/mips/tcg-target.c b/tcg/mips/tcg-target.c
index 40551cdcb5..37241b224b 100644
--- a/tcg/mips/tcg-target.c
+++ b/tcg/mips/tcg-target.c
@@ -26,7 +26,7 @@
#include "tcg-be-null.h"
-#if defined(TCG_TARGET_WORDS_BIGENDIAN) == defined(TARGET_WORDS_BIGENDIAN)
+#if defined(HOST_WORDS_BIGENDIAN) == defined(TARGET_WORDS_BIGENDIAN)
# define TCG_NEED_BSWAP 0
#else
# define TCG_NEED_BSWAP 1
@@ -253,7 +253,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
}
/* test if a constant matches the constraint */
-static inline int tcg_target_const_match(tcg_target_long val,
+static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
{
int ct;
@@ -589,7 +589,7 @@ static inline void tcg_out_call_iarg_reg64(TCGContext *s, int *arg_num,
{
(*arg_num) = (*arg_num + 1) & ~1;
-#if defined(TCG_TARGET_WORDS_BIGENDIAN)
+#if defined(HOST_WORDS_BIGENDIAN)
tcg_out_call_iarg_reg32(s, arg_num, arg_high);
tcg_out_call_iarg_reg32(s, arg_num, arg_low);
#else
@@ -964,7 +964,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
#if defined(CONFIG_SOFTMMU)
# if TARGET_LONG_BITS == 64
addr_regh = *args++;
-# if defined(TCG_TARGET_WORDS_BIGENDIAN)
+# if defined(HOST_WORDS_BIGENDIAN)
addr_memh = 0;
addr_meml = 4;
# else
@@ -979,7 +979,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
#endif
if (opc == 3) {
-#if defined(TCG_TARGET_WORDS_BIGENDIAN)
+#if defined(HOST_WORDS_BIGENDIAN)
data_reg1 = data_regh;
data_reg2 = data_regl;
#else
@@ -1152,7 +1152,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
#if defined(CONFIG_SOFTMMU)
# if TARGET_LONG_BITS == 64
addr_regh = *args++;
-# if defined(TCG_TARGET_WORDS_BIGENDIAN)
+# if defined(HOST_WORDS_BIGENDIAN)
addr_memh = 0;
addr_meml = 4;
# else
@@ -1167,7 +1167,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
#endif
if (opc == 3) {
-#if defined(TCG_TARGET_WORDS_BIGENDIAN)
+#if defined(HOST_WORDS_BIGENDIAN)
data_reg1 = data_regh;
data_reg2 = data_regl;
#else
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
index 683c6af8b9..9576db514d 100644
--- a/tcg/mips/tcg-target.h
+++ b/tcg/mips/tcg-target.h
@@ -26,10 +26,6 @@
#ifndef TCG_TARGET_MIPS
#define TCG_TARGET_MIPS 1
-#ifdef __MIPSEB__
-# define TCG_TARGET_WORDS_BIGENDIAN
-#endif
-
#define TCG_TARGET_NB_REGS 32
typedef enum {
@@ -109,6 +105,7 @@ extern bool use_mips32r2_instructions;
#define TCG_TARGET_HAS_orc_i32 0
#define TCG_TARGET_HAS_eqv_i32 0
#define TCG_TARGET_HAS_nand_i32 0
+#define TCG_TARGET_HAS_mulu2_i32 1
#define TCG_TARGET_HAS_muls2_i32 1
#define TCG_TARGET_HAS_muluh_i32 1
#define TCG_TARGET_HAS_mulsh_i32 1
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 7777743e88..c447062ab1 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -220,34 +220,34 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
return x ^ y;
case INDEX_op_shl_i32:
- return (uint32_t)x << (uint32_t)y;
+ return (uint32_t)x << (y & 31);
case INDEX_op_shl_i64:
- return (uint64_t)x << (uint64_t)y;
+ return (uint64_t)x << (y & 63);
case INDEX_op_shr_i32:
- return (uint32_t)x >> (uint32_t)y;
+ return (uint32_t)x >> (y & 31);
case INDEX_op_shr_i64:
- return (uint64_t)x >> (uint64_t)y;
+ return (uint64_t)x >> (y & 63);
case INDEX_op_sar_i32:
- return (int32_t)x >> (int32_t)y;
+ return (int32_t)x >> (y & 31);
case INDEX_op_sar_i64:
- return (int64_t)x >> (int64_t)y;
+ return (int64_t)x >> (y & 63);
case INDEX_op_rotr_i32:
- return ror32(x, y);
+ return ror32(x, y & 31);
case INDEX_op_rotr_i64:
- return ror64(x, y);
+ return ror64(x, y & 63);
case INDEX_op_rotl_i32:
- return rol32(x, y);
+ return rol32(x, y & 31);
case INDEX_op_rotl_i64:
- return rol64(x, y);
+ return rol64(x, y & 63);
CASE_OP_32_64(not):
return ~x;
@@ -806,29 +806,34 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
case INDEX_op_sar_i32:
if (temps[args[2]].state == TCG_TEMP_CONST) {
- mask = (int32_t)temps[args[1]].mask >> temps[args[2]].val;
+ tmp = temps[args[2]].val & 31;
+ mask = (int32_t)temps[args[1]].mask >> tmp;
}
break;
case INDEX_op_sar_i64:
if (temps[args[2]].state == TCG_TEMP_CONST) {
- mask = (int64_t)temps[args[1]].mask >> temps[args[2]].val;
+ tmp = temps[args[2]].val & 63;
+ mask = (int64_t)temps[args[1]].mask >> tmp;
}
break;
case INDEX_op_shr_i32:
if (temps[args[2]].state == TCG_TEMP_CONST) {
- mask = (uint32_t)temps[args[1]].mask >> temps[args[2]].val;
+ tmp = temps[args[2]].val & 31;
+ mask = (uint32_t)temps[args[1]].mask >> tmp;
}
break;
case INDEX_op_shr_i64:
if (temps[args[2]].state == TCG_TEMP_CONST) {
- mask = (uint64_t)temps[args[1]].mask >> temps[args[2]].val;
+ tmp = temps[args[2]].val & 63;
+ mask = (uint64_t)temps[args[1]].mask >> tmp;
}
break;
CASE_OP_32_64(shl):
if (temps[args[2]].state == TCG_TEMP_CONST) {
- mask = temps[args[1]].mask << temps[args[2]].val;
+ tmp = temps[args[2]].val & (TCG_TARGET_REG_BITS - 1);
+ mask = temps[args[1]].mask << tmp;
}
break;
@@ -838,9 +843,8 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
break;
CASE_OP_32_64(deposit):
- tmp = ((1ull << args[4]) - 1);
- mask = ((temps[args[1]].mask & ~(tmp << args[3]))
- | ((temps[args[2]].mask & tmp) << args[3]));
+ mask = deposit64(temps[args[1]].mask, args[3], args[4],
+ temps[args[2]].mask);
break;
CASE_OP_32_64(or):
@@ -1055,9 +1059,8 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
if (temps[args[1]].state == TCG_TEMP_CONST
&& temps[args[2]].state == TCG_TEMP_CONST) {
s->gen_opc_buf[op_index] = op_to_movi(op);
- tmp = ((1ull << args[4]) - 1);
- tmp = (temps[args[1]].val & ~(tmp << args[3]))
- | ((temps[args[2]].val & tmp) << args[3]);
+ tmp = deposit64(temps[args[1]].val, args[3], args[4],
+ temps[args[2]].val);
tcg_opt_gen_movi(gen_args, args[0], tmp);
gen_args += 2;
args += 5;
diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c
index dc2c2df890..83d9340fca 100644
--- a/tcg/ppc/tcg-target.c
+++ b/tcg/ppc/tcg-target.c
@@ -298,7 +298,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
}
/* test if a constant matches the constraint */
-static int tcg_target_const_match(tcg_target_long val,
+static int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
{
int ct;
@@ -524,7 +524,7 @@ static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg,
#if defined(CONFIG_SOFTMMU)
static void add_qemu_ldst_label (TCGContext *s,
- int is_ld,
+ bool is_ld,
TCGMemOp opc,
int data_reg,
int data_reg2,
@@ -720,7 +720,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
break;
}
#ifdef CONFIG_SOFTMMU
- add_qemu_ldst_label(s, 1, opc, datalo, datahi, addrlo,
+ add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo,
addrhi, mem_index, s->code_ptr, label_ptr);
#endif
}
@@ -779,7 +779,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
}
#ifdef CONFIG_SOFTMMU
- add_qemu_ldst_label(s, 0, opc, datalo, datahi, addrlo, addrhi,
+ add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi,
mem_index, s->code_ptr, label_ptr);
#endif
}
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
index e3395e301c..0d4f5959a7 100644
--- a/tcg/ppc/tcg-target.h
+++ b/tcg/ppc/tcg-target.h
@@ -24,7 +24,6 @@
#ifndef TCG_TARGET_PPC
#define TCG_TARGET_PPC 1
-#define TCG_TARGET_WORDS_BIGENDIAN
#define TCG_TARGET_NB_REGS 32
typedef enum {
@@ -95,6 +94,7 @@ typedef enum {
#define TCG_TARGET_HAS_nor_i32 1
#define TCG_TARGET_HAS_deposit_i32 1
#define TCG_TARGET_HAS_movcond_i32 1
+#define TCG_TARGET_HAS_mulu2_i32 1
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c
index 06e440f9bc..45b1c06910 100644
--- a/tcg/ppc64/tcg-target.c
+++ b/tcg/ppc64/tcg-target.c
@@ -290,13 +290,21 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
}
/* test if a constant matches the constraint */
-static int tcg_target_const_match(tcg_target_long val,
+static int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
{
int ct = arg_ct->ct;
if (ct & TCG_CT_CONST) {
return 1;
- } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
+ }
+
+ /* The only 32-bit constraint we use aside from
+ TCG_CT_CONST is TCG_CT_CONST_S16. */
+ if (type == TCG_TYPE_I32) {
+ val = (int32_t)val;
+ }
+
+ if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
return 1;
} else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
return 1;
diff --git a/tcg/ppc64/tcg-target.h b/tcg/ppc64/tcg-target.h
index 7ee50b6c6c..78bbf7a34a 100644
--- a/tcg/ppc64/tcg-target.h
+++ b/tcg/ppc64/tcg-target.h
@@ -24,7 +24,6 @@
#ifndef TCG_TARGET_PPC64
#define TCG_TARGET_PPC64 1
-#define TCG_TARGET_WORDS_BIGENDIAN
#define TCG_TARGET_NB_REGS 32
typedef enum {
diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
index 907d9d1744..1d912a7937 100644
--- a/tcg/s390/tcg-target.c
+++ b/tcg/s390/tcg-target.c
@@ -38,11 +38,10 @@
a 32-bit displacement here Just In Case. */
#define USE_LONG_BRANCHES 0
-#define TCG_CT_CONST_32 0x0100
-#define TCG_CT_CONST_MULI 0x0800
-#define TCG_CT_CONST_ORI 0x2000
-#define TCG_CT_CONST_XORI 0x4000
-#define TCG_CT_CONST_CMPI 0x8000
+#define TCG_CT_CONST_MULI 0x100
+#define TCG_CT_CONST_ORI 0x200
+#define TCG_CT_CONST_XORI 0x400
+#define TCG_CT_CONST_CMPI 0x800
/* Several places within the instruction set 0 means "no register"
rather than TCG_REG_R0. */
@@ -407,9 +406,6 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
tcg_regset_clear(ct->u.regs);
tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
break;
- case 'W': /* force 32-bit ("word") immediate */
- ct->ct |= TCG_CT_CONST_32;
- break;
case 'K':
ct->ct |= TCG_CT_CONST_MULI;
break;
@@ -437,10 +433,10 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
can load efficiently, and the immediate load plus the reg-reg OR is
smaller than the sequential OI's. */
-static int tcg_match_ori(int ct, tcg_target_long val)
+static int tcg_match_ori(TCGType type, tcg_target_long val)
{
if (facilities & FACILITY_EXT_IMM) {
- if (ct & TCG_CT_CONST_32) {
+ if (type == TCG_TYPE_I32) {
/* All 32-bit ORs can be performed with 1 48-bit insn. */
return 1;
}
@@ -466,13 +462,13 @@ static int tcg_match_ori(int ct, tcg_target_long val)
extended-immediate facility. That said, there are a few patterns for
which it is better to load the value into a register first. */
-static int tcg_match_xori(int ct, tcg_target_long val)
+static int tcg_match_xori(TCGType type, tcg_target_long val)
{
if ((facilities & FACILITY_EXT_IMM) == 0) {
return 0;
}
- if (ct & TCG_CT_CONST_32) {
+ if (type == TCG_TYPE_I32) {
/* All 32-bit XORs can be performed with 1 48-bit insn. */
return 1;
}
@@ -487,11 +483,11 @@ static int tcg_match_xori(int ct, tcg_target_long val)
/* Imediates to be used with comparisons. */
-static int tcg_match_cmpi(int ct, tcg_target_long val)
+static int tcg_match_cmpi(TCGType type, tcg_target_long val)
{
if (facilities & FACILITY_EXT_IMM) {
/* The COMPARE IMMEDIATE instruction is available. */
- if (ct & TCG_CT_CONST_32) {
+ if (type == TCG_TYPE_I32) {
/* We have a 32-bit immediate and can compare against anything. */
return 1;
} else {
@@ -515,7 +511,7 @@ static int tcg_match_cmpi(int ct, tcg_target_long val)
}
/* Test if a constant matches the constraint. */
-static int tcg_target_const_match(tcg_target_long val,
+static int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
{
int ct = arg_ct->ct;
@@ -524,8 +520,7 @@ static int tcg_target_const_match(tcg_target_long val,
return 1;
}
- /* Handle the modifiers. */
- if (ct & TCG_CT_CONST_32) {
+ if (type == TCG_TYPE_I32) {
val = (int32_t)val;
}
@@ -541,11 +536,11 @@ static int tcg_target_const_match(tcg_target_long val,
return val == (int16_t)val;
}
} else if (ct & TCG_CT_CONST_ORI) {
- return tcg_match_ori(ct, val);
+ return tcg_match_ori(type, val);
} else if (ct & TCG_CT_CONST_XORI) {
- return tcg_match_xori(ct, val);
+ return tcg_match_xori(type, val);
} else if (ct & TCG_CT_CONST_CMPI) {
- return tcg_match_cmpi(ct, val);
+ return tcg_match_cmpi(type, val);
}
return 0;
@@ -2112,8 +2107,8 @@ static const TCGTargetOpDef s390_op_defs[] = {
{ INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
{ INDEX_op_and_i32, { "r", "0", "ri" } },
- { INDEX_op_or_i32, { "r", "0", "rWO" } },
- { INDEX_op_xor_i32, { "r", "0", "rWX" } },
+ { INDEX_op_or_i32, { "r", "0", "rO" } },
+ { INDEX_op_xor_i32, { "r", "0", "rX" } },
{ INDEX_op_neg_i32, { "r", "r" } },
@@ -2135,9 +2130,9 @@ static const TCGTargetOpDef s390_op_defs[] = {
{ INDEX_op_add2_i32, { "r", "r", "0", "1", "r", "r" } },
{ INDEX_op_sub2_i32, { "r", "r", "0", "1", "r", "r" } },
- { INDEX_op_brcond_i32, { "r", "rWC" } },
- { INDEX_op_setcond_i32, { "r", "r", "rWC" } },
- { INDEX_op_movcond_i32, { "r", "r", "rWC", "r", "0" } },
+ { INDEX_op_brcond_i32, { "r", "rC" } },
+ { INDEX_op_setcond_i32, { "r", "r", "rC" } },
+ { INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
{ INDEX_op_deposit_i32, { "r", "0", "r" } },
{ INDEX_op_qemu_ld8u, { "r", "L" } },
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
index 10adb778c7..b3bfdcc22c 100644
--- a/tcg/s390/tcg-target.h
+++ b/tcg/s390/tcg-target.h
@@ -24,8 +24,6 @@
#ifndef TCG_TARGET_S390
#define TCG_TARGET_S390 1
-#define TCG_TARGET_WORDS_BIGENDIAN
-
typedef enum TCGReg {
TCG_REG_R0 = 0,
TCG_REG_R1,
diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c
index cbd1c91779..35089b82c9 100644
--- a/tcg/sparc/tcg-target.c
+++ b/tcg/sparc/tcg-target.c
@@ -327,14 +327,20 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
}
/* test if a constant matches the constraint */
-static inline int tcg_target_const_match(tcg_target_long val,
+static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
{
int ct = arg_ct->ct;
if (ct & TCG_CT_CONST) {
return 1;
- } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
+ }
+
+ if (type == TCG_TYPE_I32) {
+ val = (int32_t)val;
+ }
+
+ if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
return 1;
} else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
return 1;
@@ -384,37 +390,47 @@ static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
}
-static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
+static void tcg_out_movi(TCGContext *s, TCGType type,
+ TCGReg ret, tcg_target_long arg)
{
- if (check_fit_tl(arg, 13))
+ tcg_target_long hi, lo;
+
+ /* A 13-bit constant sign-extended to 64-bits. */
+ if (check_fit_tl(arg, 13)) {
tcg_out_movi_imm13(s, ret, arg);
- else {
- tcg_out_sethi(s, ret, arg);
- if (arg & 0x3ff)
- tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
+ return;
}
-}
-static inline void tcg_out_movi(TCGContext *s, TCGType type,
- TCGReg ret, tcg_target_long arg)
-{
- /* All 32-bit constants, as well as 64-bit constants with
- no high bits set go through movi_imm32. */
+ /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
if (TCG_TARGET_REG_BITS == 32
|| type == TCG_TYPE_I32
- || (arg & ~(tcg_target_long)0xffffffff) == 0) {
- tcg_out_movi_imm32(s, ret, arg);
- } else if (check_fit_tl(arg, 13)) {
- /* A 13-bit constant sign-extended to 64-bits. */
- tcg_out_movi_imm13(s, ret, arg);
- } else if (check_fit_tl(arg, 32)) {
- /* A 32-bit constant sign-extended to 64-bits. */
+ || (arg & ~0xffffffffu) == 0) {
+ tcg_out_sethi(s, ret, arg);
+ if (arg & 0x3ff) {
+ tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
+ }
+ return;
+ }
+
+ /* A 32-bit constant sign-extended to 64-bits. */
+ if (check_fit_tl(arg, 32)) {
tcg_out_sethi(s, ret, ~arg);
tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
+ return;
+ }
+
+ /* A 64-bit constant decomposed into 2 32-bit pieces. */
+ lo = (int32_t)arg;
+ if (check_fit_tl(lo, 13)) {
+ hi = (arg - lo) >> 31 >> 1;
+ tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
+ tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
+ tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
} else {
- tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
+ hi = arg >> 31 >> 1;
+ tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
- tcg_out_movi_imm32(s, TCG_REG_T2, arg);
tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
}
}
@@ -449,13 +465,14 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
}
-static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
- tcg_target_long arg)
+static inline void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
{
+ TCGReg base = TCG_REG_G0;
if (!check_fit_tl(arg, 10)) {
tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
+ base = ret;
}
- tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
+ tcg_out_ld(s, TCG_TYPE_PTR, ret, base, arg & 0x3ff);
}
static inline void tcg_out_sety(TCGContext *s, int rs)
@@ -480,19 +497,6 @@ static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
}
}
-static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
- tcg_target_long val)
-{
- if (val != 0) {
- if (check_fit_tl(val, 13))
- tcg_out_arithi(s, rd, rs, val, ARITH_AND);
- else {
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T1, val);
- tcg_out_arith(s, rd, rs, TCG_REG_T1, ARITH_AND);
- }
- }
-}
-
static void tcg_out_div32(TCGContext *s, int rd, int rs1,
int val2, int val2const, int uns)
{
@@ -796,6 +800,110 @@ static void tcg_out_addsub2(TCGContext *s, TCGArg rl, TCGArg rh,
tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
}
+static inline void tcg_out_calli(TCGContext *s, uintptr_t dest)
+{
+ intptr_t disp = dest - (uintptr_t)s->code_ptr;
+
+ if (disp == (int32_t)disp) {
+ tcg_out32(s, CALL | (uint32_t)disp >> 2);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, dest & ~0xfff);
+ tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, dest & 0xfff, JMPL);
+ }
+}
+
+#ifdef CONFIG_SOFTMMU
+static uintptr_t qemu_ld_trampoline[16];
+static uintptr_t qemu_st_trampoline[16];
+
+static void build_trampolines(TCGContext *s)
+{
+ static uintptr_t const qemu_ld_helpers[16] = {
+ [MO_UB] = (uintptr_t)helper_ret_ldub_mmu,
+ [MO_SB] = (uintptr_t)helper_ret_ldsb_mmu,
+ [MO_LEUW] = (uintptr_t)helper_le_lduw_mmu,
+ [MO_LESW] = (uintptr_t)helper_le_ldsw_mmu,
+ [MO_LEUL] = (uintptr_t)helper_le_ldul_mmu,
+ [MO_LEQ] = (uintptr_t)helper_le_ldq_mmu,
+ [MO_BEUW] = (uintptr_t)helper_be_lduw_mmu,
+ [MO_BESW] = (uintptr_t)helper_be_ldsw_mmu,
+ [MO_BEUL] = (uintptr_t)helper_be_ldul_mmu,
+ [MO_BEQ] = (uintptr_t)helper_be_ldq_mmu,
+ };
+ static uintptr_t const qemu_st_helpers[16] = {
+ [MO_UB] = (uintptr_t)helper_ret_stb_mmu,
+ [MO_LEUW] = (uintptr_t)helper_le_stw_mmu,
+ [MO_LEUL] = (uintptr_t)helper_le_stl_mmu,
+ [MO_LEQ] = (uintptr_t)helper_le_stq_mmu,
+ [MO_BEUW] = (uintptr_t)helper_be_stw_mmu,
+ [MO_BEUL] = (uintptr_t)helper_be_stl_mmu,
+ [MO_BEQ] = (uintptr_t)helper_be_stq_mmu,
+ };
+
+ int i;
+ TCGReg ra;
+ uintptr_t tramp;
+
+ for (i = 0; i < 16; ++i) {
+ if (qemu_ld_helpers[i] == 0) {
+ continue;
+ }
+
+ /* May as well align the trampoline. */
+ tramp = (uintptr_t)s->code_ptr;
+ while (tramp & 15) {
+ tcg_out_nop(s);
+ tramp += 4;
+ }
+ qemu_ld_trampoline[i] = tramp;
+
+ /* Find the retaddr argument register. */
+ ra = TCG_REG_O3 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
+
+ /* Set the retaddr operand. */
+ tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
+ /* Set the env operand. */
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
+ /* Tail call. */
+ tcg_out_calli(s, qemu_ld_helpers[i]);
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
+ }
+
+ for (i = 0; i < 16; ++i) {
+ if (qemu_st_helpers[i] == 0) {
+ continue;
+ }
+
+ /* May as well align the trampoline. */
+ tramp = (uintptr_t)s->code_ptr;
+ while (tramp & 15) {
+ tcg_out_nop(s);
+ tramp += 4;
+ }
+ qemu_st_trampoline[i] = tramp;
+
+ /* Find the retaddr argument. For 32-bit, this may be past the
+ last argument register, and need passing on the stack. */
+ ra = (TCG_REG_O4
+ + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)
+ + (TCG_TARGET_REG_BITS == 32 && (i & MO_SIZE) == MO_64));
+
+ /* Set the retaddr operand. */
+ if (ra >= TCG_REG_O6) {
+ tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
+ TCG_TARGET_CALL_STACK_OFFSET);
+ ra = TCG_REG_G1;
+ }
+ tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
+ /* Set the env operand. */
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
+ /* Tail call. */
+ tcg_out_calli(s, qemu_st_helpers[i]);
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
+ }
+}
+#endif
+
/* Generate global QEMU prologue and epilogue code */
static void tcg_target_qemu_prologue(TCGContext *s)
{
@@ -823,39 +931,22 @@ static void tcg_target_qemu_prologue(TCGContext *s)
}
#endif
- tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
- INSN_RS2(TCG_REG_G0));
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
/* delay slot */
tcg_out_nop(s);
/* No epilogue required. We issue ret + restore directly in the TB. */
+
+#ifdef CONFIG_SOFTMMU
+ build_trampolines(s);
+#endif
}
#if defined(CONFIG_SOFTMMU)
-
-/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
- int mmu_idx) */
-static const void * const qemu_ld_helpers[4] = {
- helper_ldb_mmu,
- helper_ldw_mmu,
- helper_ldl_mmu,
- helper_ldq_mmu,
-};
-
-/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
- uintxx_t val, int mmu_idx) */
-static const void * const qemu_st_helpers[4] = {
- helper_stb_mmu,
- helper_stw_mmu,
- helper_stl_mmu,
- helper_stq_mmu,
-};
-
/* Perform the TLB load and compare.
Inputs:
- ADDRLO_IDX contains the index into ARGS of the low part of the
- address; the high part of the address is at ADDR_LOW_IDX+1.
+ ADDRLO and ADDRHI contain the possible two parts of the address.
MEM_INDEX and S_BITS are the memory context and log2 size of the load.
@@ -865,32 +956,38 @@ static const void * const qemu_st_helpers[4] = {
The result of the TLB comparison is in %[ix]cc. The sanitized address
is in the returned register, maybe %o0. The TLB addend is in %o1. */
-static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
- int s_bits, const TCGArg *args, int which)
+static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
+ int mem_index, TCGMemOp s_bits, int which)
{
- const int addrlo = args[addrlo_idx];
- const int r0 = TCG_REG_O0;
- const int r1 = TCG_REG_O1;
- const int r2 = TCG_REG_O2;
- int addr = addrlo;
+ const TCGReg r0 = TCG_REG_O0;
+ const TCGReg r1 = TCG_REG_O1;
+ const TCGReg r2 = TCG_REG_O2;
+ TCGReg addr = addrlo;
int tlb_ofs;
if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
/* Assemble the 64-bit address in R0. */
tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
- tcg_out_arithi(s, r1, args[addrlo_idx + 1], 32, SHIFT_SLLX);
+ tcg_out_arithi(s, r1, addrhi, 32, SHIFT_SLLX);
tcg_out_arith(s, r0, r0, r1, ARITH_OR);
+ addr = r0;
}
- /* Shift the page number down to tlb-entry. */
- tcg_out_arithi(s, r1, addrlo,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
+ /* Shift the page number down. */
+ tcg_out_arithi(s, r1, addrlo, TARGET_PAGE_BITS, SHIFT_SRL);
/* Mask out the page offset, except for the required alignment. */
- tcg_out_andi(s, r0, addr, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
+ tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1,
+ TARGET_PAGE_MASK | ((1 << s_bits) - 1));
+
+ /* Mask the tlb index. */
+ tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND);
+
+ /* Mask page, part 2. */
+ tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND);
- /* Compute tlb index, modulo tlb size. */
- tcg_out_andi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
+ /* Shift the tlb index into place. */
+ tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL);
/* Relative to the current ENV. */
tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
@@ -898,8 +995,8 @@ static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
/* Find a base address that can load both tlb comparator and addend. */
tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
- tcg_out_addi(s, r1, tlb_ofs);
- tlb_ofs = 0;
+ tcg_out_addi(s, r1, tlb_ofs & ~0x3ff);
+ tlb_ofs &= 0x3ff;
}
/* Load the tlb comparator and the addend. */
@@ -918,56 +1015,71 @@ static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
}
#endif /* CONFIG_SOFTMMU */
-static const int qemu_ld_opc[8] = {
-#ifdef TARGET_WORDS_BIGENDIAN
- LDUB, LDUH, LDUW, LDX, LDSB, LDSH, LDSW, LDX
-#else
- LDUB, LDUH_LE, LDUW_LE, LDX_LE, LDSB, LDSH_LE, LDSW_LE, LDX_LE
-#endif
+static const int qemu_ld_opc[16] = {
+ [MO_UB] = LDUB,
+ [MO_SB] = LDSB,
+
+ [MO_BEUW] = LDUH,
+ [MO_BESW] = LDSH,
+ [MO_BEUL] = LDUW,
+ [MO_BESL] = LDSW,
+ [MO_BEQ] = LDX,
+
+ [MO_LEUW] = LDUH_LE,
+ [MO_LESW] = LDSH_LE,
+ [MO_LEUL] = LDUW_LE,
+ [MO_LESL] = LDSW_LE,
+ [MO_LEQ] = LDX_LE,
};
-static const int qemu_st_opc[4] = {
-#ifdef TARGET_WORDS_BIGENDIAN
- STB, STH, STW, STX
-#else
- STB, STH_LE, STW_LE, STX_LE
-#endif
+static const int qemu_st_opc[16] = {
+ [MO_UB] = STB,
+
+ [MO_BEUW] = STH,
+ [MO_BEUL] = STW,
+ [MO_BEQ] = STX,
+
+ [MO_LEUW] = STH_LE,
+ [MO_LEUL] = STW_LE,
+ [MO_LEQ] = STX_LE,
};
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
{
- int addrlo_idx = 1, datalo, datahi, addr_reg;
+ TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
+ TCGMemOp memop, s_bits;
#if defined(CONFIG_SOFTMMU)
- int memi_idx, memi, s_bits, n;
+ TCGReg addrz, param;
+ uintptr_t func;
+ int memi;
uint32_t *label_ptr[2];
#endif
- datahi = datalo = args[0];
- if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
- datahi = args[1];
- addrlo_idx = 2;
- }
+ datalo = *args++;
+ datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
+ addrlo = *args++;
+ addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
+ memop = *args++;
+ s_bits = memop & MO_SIZE;
#if defined(CONFIG_SOFTMMU)
- memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
- memi = args[memi_idx];
- s_bits = sizeop & 3;
+ memi = *args++;
+ addrz = tcg_out_tlb_load(s, addrlo, addrhi, memi, s_bits,
+ offsetof(CPUTLBEntry, addr_read));
- addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, s_bits, args,
- offsetof(CPUTLBEntry, addr_read));
-
- if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
+ if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
int reg64;
/* bne,pn %[xi]cc, label0 */
label_ptr[0] = (uint32_t *)s->code_ptr;
tcg_out_bpcc0(s, COND_NE, BPCC_PN
| (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
+ tcg_out_nop(s);
/* TLB Hit. */
/* Load all 64-bits into an O/G register. */
reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
- tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
+ tcg_out_ldst_rr(s, reg64, addrz, TCG_REG_O1, qemu_ld_opc[memop]);
/* Move the two 32-bit pieces into the destination registers. */
tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
@@ -989,7 +1101,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
| (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
/* delay slot */
- tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
+ tcg_out_ldst_rr(s, datalo, addrz, TCG_REG_O1, qemu_ld_opc[memop]);
}
/* TLB Miss. */
@@ -998,103 +1110,93 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
*label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
(unsigned long)label_ptr[0]);
}
- n = 0;
- tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
+
+ param = TCG_REG_O1;
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
- args[addrlo_idx + 1]);
+ tcg_out_mov(s, TCG_TYPE_REG, param++, addrhi);
}
- tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
- args[addrlo_idx]);
+ tcg_out_mov(s, TCG_TYPE_REG, param++, addrlo);
- /* qemu_ld_helper[s_bits](arg0, arg1) */
- tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
- - (tcg_target_ulong)s->code_ptr) >> 2)
- & 0x3fffffff));
+ /* We use the helpers to extend SB and SW data, leaving the case
+ of SL needing explicit extending below. */
+ if ((memop & ~MO_BSWAP) == MO_SL) {
+ func = qemu_ld_trampoline[memop & ~MO_SIGN];
+ } else {
+ func = qemu_ld_trampoline[memop];
+ }
+ assert(func != 0);
+ tcg_out_calli(s, func);
/* delay slot */
- tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[n], memi);
-
- n = tcg_target_call_oarg_regs[0];
- /* datalo = sign_extend(arg0) */
- switch (sizeop) {
- case 0 | 4:
- /* Recall that SRA sign extends from bit 31 through bit 63. */
- tcg_out_arithi(s, datalo, n, 24, SHIFT_SLL);
- tcg_out_arithi(s, datalo, datalo, 24, SHIFT_SRA);
- break;
- case 1 | 4:
- tcg_out_arithi(s, datalo, n, 16, SHIFT_SLL);
- tcg_out_arithi(s, datalo, datalo, 16, SHIFT_SRA);
- break;
- case 2 | 4:
- tcg_out_arithi(s, datalo, n, 0, SHIFT_SRA);
+ tcg_out_movi(s, TCG_TYPE_I32, param, memi);
+
+ switch (memop & ~MO_BSWAP) {
+ case MO_SL:
+ tcg_out_arithi(s, datalo, TCG_REG_O0, 0, SHIFT_SRA);
break;
- case 3:
+ case MO_Q:
if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_mov(s, TCG_TYPE_REG, datahi, n);
- tcg_out_mov(s, TCG_TYPE_REG, datalo, n + 1);
+ tcg_out_mov(s, TCG_TYPE_REG, datahi, TCG_REG_O0);
+ tcg_out_mov(s, TCG_TYPE_REG, datalo, TCG_REG_O1);
break;
}
/* FALLTHRU */
- case 0:
- case 1:
- case 2:
default:
/* mov */
- tcg_out_mov(s, TCG_TYPE_REG, datalo, n);
+ tcg_out_mov(s, TCG_TYPE_REG, datalo, TCG_REG_O0);
break;
}
*label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
(unsigned long)label_ptr[1]);
#else
- addr_reg = args[addrlo_idx];
if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
- tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
- addr_reg = TCG_REG_T1;
+ tcg_out_arithi(s, TCG_REG_T1, addrlo, 0, SHIFT_SRL);
+ addrlo = TCG_REG_T1;
}
- if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
+ if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
- tcg_out_ldst_rr(s, reg64, addr_reg,
+ tcg_out_ldst_rr(s, reg64, addrlo,
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
- qemu_ld_opc[sizeop]);
+ qemu_ld_opc[memop]);
tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
if (reg64 != datalo) {
tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
}
} else {
- tcg_out_ldst_rr(s, datalo, addr_reg,
+ tcg_out_ldst_rr(s, datalo, addrlo,
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
- qemu_ld_opc[sizeop]);
+ qemu_ld_opc[memop]);
}
#endif /* CONFIG_SOFTMMU */
}
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
{
- int addrlo_idx = 1, datalo, datahi, addr_reg;
+ TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
+ TCGMemOp memop, s_bits;
#if defined(CONFIG_SOFTMMU)
- int memi_idx, memi, n, datafull;
+ TCGReg addrz, datafull, param;
+ uintptr_t func;
+ int memi;
uint32_t *label_ptr;
#endif
- datahi = datalo = args[0];
- if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
- datahi = args[1];
- addrlo_idx = 2;
- }
+ datalo = *args++;
+ datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
+ addrlo = *args++;
+ addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
+ memop = *args++;
+ s_bits = memop & MO_SIZE;
#if defined(CONFIG_SOFTMMU)
- memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
- memi = args[memi_idx];
-
- addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, sizeop, args,
- offsetof(CPUTLBEntry, addr_write));
+ memi = *args++;
+ addrz = tcg_out_tlb_load(s, addrlo, addrhi, memi, s_bits,
+ offsetof(CPUTLBEntry, addr_write));
datafull = datalo;
- if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
+ if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
/* Reconstruct the full 64-bit value. */
tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
@@ -1109,47 +1211,42 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
| (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
/* delay slot */
- tcg_out_ldst_rr(s, datafull, addr_reg, TCG_REG_O1, qemu_st_opc[sizeop]);
+ tcg_out_ldst_rr(s, datafull, addrz, TCG_REG_O1, qemu_st_opc[memop]);
/* TLB Miss. */
- n = 0;
- tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
+ param = TCG_REG_O1;
if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
- args[addrlo_idx + 1]);
+ tcg_out_mov(s, TCG_TYPE_REG, param++, addrhi);
}
- tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
- args[addrlo_idx]);
- if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
- tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datahi);
+ tcg_out_mov(s, TCG_TYPE_REG, param++, addrlo);
+ if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
+ tcg_out_mov(s, TCG_TYPE_REG, param++, datahi);
}
- tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datalo);
+ tcg_out_mov(s, TCG_TYPE_REG, param++, datalo);
- /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
- tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[sizeop]
- - (tcg_target_ulong)s->code_ptr) >> 2)
- & 0x3fffffff));
+ func = qemu_st_trampoline[memop];
+ assert(func != 0);
+ tcg_out_calli(s, func);
/* delay slot */
- tcg_out_movi(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n], memi);
+ tcg_out_movi(s, TCG_TYPE_REG, param, memi);
*label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
(unsigned long)label_ptr);
#else
- addr_reg = args[addrlo_idx];
if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
- tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
- addr_reg = TCG_REG_T1;
+ tcg_out_arithi(s, TCG_REG_T1, addrlo, 0, SHIFT_SRL);
+ addrlo = TCG_REG_T1;
}
- if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
+ if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
datalo = TCG_REG_O2;
}
- tcg_out_ldst_rr(s, datalo, addr_reg,
+ tcg_out_ldst_rr(s, datalo, addrlo,
(GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
- qemu_st_opc[sizeop]);
+ qemu_st_opc[memop]);
#endif /* CONFIG_SOFTMMU */
}
@@ -1161,8 +1258,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
switch (opc) {
case INDEX_op_exit_tb:
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
- tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
- INSN_IMM13(8));
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, JMPL);
tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
INSN_RS2(TCG_REG_G0));
break;
@@ -1175,24 +1271,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
} else {
/* indirect jump method */
- tcg_out_ld_ptr(s, TCG_REG_T1,
- (tcg_target_long)(s->tb_next + args[0]));
- tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
- INSN_RS2(TCG_REG_G0));
+ tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + args[0]));
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
}
tcg_out_nop(s);
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
break;
case INDEX_op_call:
if (const_args[0]) {
- tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
- - (tcg_target_ulong)s->code_ptr) >> 2)
- & 0x3fffffff));
+ tcg_out_calli(s, args[0]);
} else {
- tcg_out_ld_ptr(s, TCG_REG_T1,
- (tcg_target_long)(s->tb_next + args[0]));
- tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_T1) |
- INSN_RS2(TCG_REG_G0));
+ tcg_out_arithi(s, TCG_REG_O7, args[0], 0, JMPL);
}
/* delay slot */
tcg_out_nop(s);
@@ -1294,15 +1383,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
break;
- case INDEX_op_rem_i32:
- case INDEX_op_remu_i32:
- tcg_out_div32(s, TCG_REG_T1, args[1], args[2], const_args[2],
- opc == INDEX_op_remu_i32);
- tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
- ARITH_UMUL);
- tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
- break;
-
case INDEX_op_brcond_i32:
tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
args[3]);
@@ -1345,44 +1425,18 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out_rdy(s, args[1]);
break;
- case INDEX_op_qemu_ld8u:
+ case INDEX_op_qemu_ld_i32:
tcg_out_qemu_ld(s, args, 0);
break;
- case INDEX_op_qemu_ld8s:
- tcg_out_qemu_ld(s, args, 0 | 4);
- break;
- case INDEX_op_qemu_ld16u:
+ case INDEX_op_qemu_ld_i64:
tcg_out_qemu_ld(s, args, 1);
break;
- case INDEX_op_qemu_ld16s:
- tcg_out_qemu_ld(s, args, 1 | 4);
- break;
- case INDEX_op_qemu_ld32:
-#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_qemu_ld32u:
-#endif
- tcg_out_qemu_ld(s, args, 2);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_qemu_ld32s:
- tcg_out_qemu_ld(s, args, 2 | 4);
- break;
-#endif
- case INDEX_op_qemu_ld64:
- tcg_out_qemu_ld(s, args, 3);
- break;
- case INDEX_op_qemu_st8:
+ case INDEX_op_qemu_st_i32:
tcg_out_qemu_st(s, args, 0);
break;
- case INDEX_op_qemu_st16:
+ case INDEX_op_qemu_st_i64:
tcg_out_qemu_st(s, args, 1);
break;
- case INDEX_op_qemu_st32:
- tcg_out_qemu_st(s, args, 2);
- break;
- case INDEX_op_qemu_st64:
- tcg_out_qemu_st(s, args, 3);
- break;
#if TCG_TARGET_REG_BITS == 64
case INDEX_op_movi_i64:
@@ -1418,27 +1472,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
case INDEX_op_divu_i64:
c = ARITH_UDIVX;
goto gen_arith;
- case INDEX_op_rem_i64:
- case INDEX_op_remu_i64:
- tcg_out_arithc(s, TCG_REG_T1, args[1], args[2], const_args[2],
- opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
- tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
- ARITH_MULX);
- tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
- break;
case INDEX_op_ext32s_i64:
- if (const_args[1]) {
- tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
- } else {
- tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
- }
+ tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
break;
case INDEX_op_ext32u_i64:
- if (const_args[1]) {
- tcg_out_movi_imm32(s, args[0], args[1]);
- } else {
- tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
- }
+ tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
break;
case INDEX_op_brcond_i64:
@@ -1489,8 +1527,6 @@ static const TCGTargetOpDef sparc_op_defs[] = {
{ INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
{ INDEX_op_div_i32, { "r", "rZ", "rJ" } },
{ INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
- { INDEX_op_rem_i32, { "r", "rZ", "rJ" } },
- { INDEX_op_remu_i32, { "r", "rZ", "rJ" } },
{ INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
{ INDEX_op_and_i32, { "r", "rZ", "rJ" } },
{ INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
@@ -1537,8 +1573,6 @@ static const TCGTargetOpDef sparc_op_defs[] = {
{ INDEX_op_mul_i64, { "r", "rZ", "rJ" } },
{ INDEX_op_div_i64, { "r", "rZ", "rJ" } },
{ INDEX_op_divu_i64, { "r", "rZ", "rJ" } },
- { INDEX_op_rem_i64, { "r", "rZ", "rJ" } },
- { INDEX_op_remu_i64, { "r", "rZ", "rJ" } },
{ INDEX_op_sub_i64, { "r", "rZ", "rJ" } },
{ INDEX_op_and_i64, { "r", "rZ", "rJ" } },
{ INDEX_op_andc_i64, { "r", "rZ", "rJ" } },
@@ -1553,8 +1587,8 @@ static const TCGTargetOpDef sparc_op_defs[] = {
{ INDEX_op_neg_i64, { "r", "rJ" } },
{ INDEX_op_not_i64, { "r", "rJ" } },
- { INDEX_op_ext32s_i64, { "r", "ri" } },
- { INDEX_op_ext32u_i64, { "r", "ri" } },
+ { INDEX_op_ext32s_i64, { "r", "r" } },
+ { INDEX_op_ext32u_i64, { "r", "r" } },
{ INDEX_op_brcond_i64, { "rZ", "rJ" } },
{ INDEX_op_setcond_i64, { "r", "rZ", "rJ" } },
@@ -1562,43 +1596,20 @@ static const TCGTargetOpDef sparc_op_defs[] = {
#endif
#if TCG_TARGET_REG_BITS == 64
- { INDEX_op_qemu_ld8u, { "r", "L" } },
- { INDEX_op_qemu_ld8s, { "r", "L" } },
- { INDEX_op_qemu_ld16u, { "r", "L" } },
- { INDEX_op_qemu_ld16s, { "r", "L" } },
- { INDEX_op_qemu_ld32, { "r", "L" } },
- { INDEX_op_qemu_ld32u, { "r", "L" } },
- { INDEX_op_qemu_ld32s, { "r", "L" } },
- { INDEX_op_qemu_ld64, { "r", "L" } },
-
- { INDEX_op_qemu_st8, { "L", "L" } },
- { INDEX_op_qemu_st16, { "L", "L" } },
- { INDEX_op_qemu_st32, { "L", "L" } },
- { INDEX_op_qemu_st64, { "L", "L" } },
+ { INDEX_op_qemu_ld_i32, { "r", "L" } },
+ { INDEX_op_qemu_ld_i64, { "r", "L" } },
+ { INDEX_op_qemu_st_i32, { "L", "L" } },
+ { INDEX_op_qemu_st_i64, { "L", "L" } },
#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
- { INDEX_op_qemu_ld8u, { "r", "L" } },
- { INDEX_op_qemu_ld8s, { "r", "L" } },
- { INDEX_op_qemu_ld16u, { "r", "L" } },
- { INDEX_op_qemu_ld16s, { "r", "L" } },
- { INDEX_op_qemu_ld32, { "r", "L" } },
- { INDEX_op_qemu_ld64, { "r", "r", "L" } },
-
- { INDEX_op_qemu_st8, { "L", "L" } },
- { INDEX_op_qemu_st16, { "L", "L" } },
- { INDEX_op_qemu_st32, { "L", "L" } },
- { INDEX_op_qemu_st64, { "L", "L", "L" } },
+ { INDEX_op_qemu_ld_i32, { "r", "L" } },
+ { INDEX_op_qemu_ld_i64, { "r", "r", "L" } },
+ { INDEX_op_qemu_st_i32, { "L", "L" } },
+ { INDEX_op_qemu_st_i64, { "L", "L", "L" } },
#else
- { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
- { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
- { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
- { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
- { INDEX_op_qemu_ld32, { "r", "L", "L" } },
- { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
-
- { INDEX_op_qemu_st8, { "L", "L", "L" } },
- { INDEX_op_qemu_st16, { "L", "L", "L" } },
- { INDEX_op_qemu_st32, { "L", "L", "L" } },
- { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
+ { INDEX_op_qemu_ld_i32, { "r", "L", "L" } },
+ { INDEX_op_qemu_ld_i64, { "L", "L", "L", "L" } },
+ { INDEX_op_qemu_st_i32, { "L", "L", "L" } },
+ { INDEX_op_qemu_st_i64, { "L", "L", "L", "L" } },
#endif
{ -1 },
@@ -1679,7 +1690,7 @@ static DebugFrame debug_frame = {
void tcg_register_jit(void *buf, size_t buf_size)
{
- debug_frame.fde.func_start = (tcg_target_long) buf;
+ debug_frame.fde.func_start = (uintptr_t)buf;
debug_frame.fde.func_len = buf_size;
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
@@ -1688,14 +1699,12 @@ void tcg_register_jit(void *buf, size_t buf_size)
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
uint32_t *ptr = (uint32_t *)jmp_addr;
- tcg_target_long disp = (tcg_target_long)(addr - jmp_addr) >> 2;
+ uintptr_t disp = addr - jmp_addr;
/* We can reach the entire address space for 32-bit. For 64-bit
the code_gen_buffer can't be larger than 2GB. */
- if (TCG_TARGET_REG_BITS == 64 && !check_fit_tl(disp, 30)) {
- tcg_abort();
- }
+ assert(disp == (int32_t)disp);
- *ptr = CALL | (disp & 0x3fffffff);
+ *ptr = CALL | (uint32_t)disp >> 2;
flush_icache_range(jmp_addr, jmp_addr + 4);
}
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
index 00f3a1848b..4519c64ae9 100644
--- a/tcg/sparc/tcg-target.h
+++ b/tcg/sparc/tcg-target.h
@@ -32,8 +32,6 @@
# error Unknown pointer size for tcg target
#endif
-#define TCG_TARGET_WORDS_BIGENDIAN
-
#define TCG_TARGET_NB_REGS 32
typedef enum {
@@ -94,7 +92,7 @@ typedef enum {
/* optional instructions */
#define TCG_TARGET_HAS_div_i32 1
-#define TCG_TARGET_HAS_rem_i32 1
+#define TCG_TARGET_HAS_rem_i32 0
#define TCG_TARGET_HAS_rot_i32 0
#define TCG_TARGET_HAS_ext8s_i32 0
#define TCG_TARGET_HAS_ext16s_i32 0
@@ -120,7 +118,7 @@ typedef enum {
#if TCG_TARGET_REG_BITS == 64
#define TCG_TARGET_HAS_div_i64 1
-#define TCG_TARGET_HAS_rem_i64 1
+#define TCG_TARGET_HAS_rem_i64 0
#define TCG_TARGET_HAS_rot_i64 0
#define TCG_TARGET_HAS_ext8s_i64 0
#define TCG_TARGET_HAS_ext16s_i64 0
@@ -148,7 +146,7 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i64 0
#endif
-#define TCG_TARGET_HAS_new_ldst 0
+#define TCG_TARGET_HAS_new_ldst 1
#define TCG_AREG0 TCG_REG_I0
diff --git a/tcg/tcg-be-ldst.h b/tcg/tcg-be-ldst.h
index 284db0c70d..ad94c0ca51 100644
--- a/tcg/tcg-be-ldst.h
+++ b/tcg/tcg-be-ldst.h
@@ -24,7 +24,7 @@
#define TCG_MAX_QEMU_LDST 640
typedef struct TCGLabelQemuLdst {
- int is_ld:1; /* qemu_ld: 1, qemu_st: 0 */
+ bool is_ld:1; /* qemu_ld: true, qemu_st: false */
TCGMemOp opc:4;
TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */
TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index 7eabf22f01..8d4ff7da9b 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -858,7 +858,7 @@ static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2,
{
/* since arg2 and ret have different types, they cannot be the
same temporary */
-#ifdef TCG_TARGET_WORDS_BIGENDIAN
+#ifdef HOST_WORDS_BIGENDIAN
tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset);
tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4);
#else
@@ -888,7 +888,7 @@ static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2,
static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2,
tcg_target_long offset)
{
-#ifdef TCG_TARGET_WORDS_BIGENDIAN
+#ifdef HOST_WORDS_BIGENDIAN
tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset);
tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4);
#else
@@ -2437,14 +2437,12 @@ static inline void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh,
tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2);
tcg_gen_mov_i32(rl, t);
tcg_temp_free_i32(t);
- } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_mulu2_i32) {
+ } else if (TCG_TARGET_REG_BITS == 32) {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_i32 t3 = tcg_temp_new_i32();
- tcg_gen_op4_i32(INDEX_op_mulu2_i32, t0, t1, arg1, arg2);
- /* Allow the optimizer room to replace mulu2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
+ tcg_gen_mulu2_i32(t0, t1, arg1, arg2);
/* Adjust for negative inputs. */
tcg_gen_sari_i32(t2, arg1, 31);
tcg_gen_sari_i32(t3, arg2, 31);
@@ -2522,26 +2520,6 @@ static inline void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh,
tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2);
tcg_gen_mov_i64(rl, t);
tcg_temp_free_i64(t);
- } else if (TCG_TARGET_HAS_mulu2_i64) {
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv_i64 t2 = tcg_temp_new_i64();
- TCGv_i64 t3 = tcg_temp_new_i64();
- tcg_gen_op4_i64(INDEX_op_mulu2_i64, t0, t1, arg1, arg2);
- /* Allow the optimizer room to replace mulu2 with two moves. */
- tcg_gen_op0(INDEX_op_nop);
- /* Adjust for negative inputs. */
- tcg_gen_sari_i64(t2, arg1, 63);
- tcg_gen_sari_i64(t3, arg2, 63);
- tcg_gen_and_i64(t2, t2, arg2);
- tcg_gen_and_i64(t3, t3, arg1);
- tcg_gen_sub_i64(rh, t1, t2);
- tcg_gen_sub_i64(rh, rh, t3);
- tcg_gen_mov_i64(rl, t0);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t3);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
int sizemask = 0;
@@ -2569,6 +2547,24 @@ static inline void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh,
tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2);
tcg_gen_mov_i64(rl, t);
tcg_temp_free_i64(t);
+ } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ tcg_gen_mulu2_i64(t0, t1, arg1, arg2);
+ /* Adjust for negative inputs. */
+ tcg_gen_sari_i64(t2, arg1, 63);
+ tcg_gen_sari_i64(t3, arg2, 63);
+ tcg_gen_and_i64(t2, t2, arg2);
+ tcg_gen_and_i64(t3, t3, arg1);
+ tcg_gen_sub_i64(rh, t1, t2);
+ tcg_gen_sub_i64(rh, rh, t3);
+ tcg_gen_mov_i64(rl, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
int sizemask = 0;
diff --git a/tcg/tcg.c b/tcg/tcg.c
index f1e0763432..21ce9fb505 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -101,7 +101,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
const int *const_args);
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
intptr_t arg2);
-static int tcg_target_const_match(tcg_target_long val,
+static int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct);
static void tcg_out_tb_init(TCGContext *s);
static void tcg_out_tb_finalize(TCGContext *s);
@@ -444,7 +444,7 @@ static inline int tcg_global_mem_new_internal(TCGType type, int reg,
ts->fixed_reg = 0;
ts->mem_allocated = 1;
ts->mem_reg = reg;
-#ifdef TCG_TARGET_WORDS_BIGENDIAN
+#ifdef HOST_WORDS_BIGENDIAN
ts->mem_offset = offset + 4;
#else
ts->mem_offset = offset;
@@ -459,7 +459,7 @@ static inline int tcg_global_mem_new_internal(TCGType type, int reg,
ts->fixed_reg = 0;
ts->mem_allocated = 1;
ts->mem_reg = reg;
-#ifdef TCG_TARGET_WORDS_BIGENDIAN
+#ifdef HOST_WORDS_BIGENDIAN
ts->mem_offset = offset;
#else
ts->mem_offset = offset + 4;
@@ -686,7 +686,7 @@ void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
if (ret != TCG_CALL_DUMMY_ARG) {
#if TCG_TARGET_REG_BITS < 64
if (sizemask & 1) {
-#ifdef TCG_TARGET_WORDS_BIGENDIAN
+#ifdef HOST_WORDS_BIGENDIAN
*s->gen_opparam_ptr++ = ret + 1;
*s->gen_opparam_ptr++ = ret;
#else
@@ -725,7 +725,7 @@ void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags,
order. If another such target is added, this logic may
have to get more complicated to differentiate between
stack arguments and register arguments. */
-#if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
+#if defined(HOST_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP)
*s->gen_opparam_ptr++ = args[i] + 1;
*s->gen_opparam_ptr++ = args[i];
#else
@@ -2121,7 +2121,7 @@ static void tcg_reg_alloc_op(TCGContext *s,
ts->mem_coherent = 1;
s->reg_to_temp[reg] = arg;
} else if (ts->val_type == TEMP_VAL_CONST) {
- if (tcg_target_const_match(ts->val, arg_ct)) {
+ if (tcg_target_const_match(ts->val, ts->type, arg_ct)) {
/* constant is OK for instruction */
const_args[i] = 1;
new_args[i] = ts->val;
@@ -2365,7 +2365,7 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
func_arg = reg;
tcg_regset_set_reg(allocated_regs, reg);
} else if (ts->val_type == TEMP_VAL_CONST) {
- if (tcg_target_const_match(func_addr, arg_ct)) {
+ if (tcg_target_const_match(func_addr, ts->type, arg_ct)) {
const_func_arg = 1;
func_arg = func_addr;
} else {
diff --git a/tcg/tcg.h b/tcg/tcg.h
index f7efcb4202..0bb66775a5 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -97,7 +97,6 @@ typedef uint64_t TCGRegSet;
/* Turn some undef macros into true macros. */
#define TCG_TARGET_HAS_add2_i32 1
#define TCG_TARGET_HAS_sub2_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 1
#endif
#ifndef TCG_TARGET_deposit_i32_valid
@@ -121,6 +120,13 @@ typedef uint64_t TCGRegSet;
#define TCG_TARGET_HAS_rem_i64 0
#endif
+/* For 32-bit targets, some sort of unsigned widening multiply is required. */
+#if TCG_TARGET_REG_BITS == 32 \
+ && !(defined(TCG_TARGET_HAS_mulu2_i32) \
+ || defined(TCG_TARGET_HAS_muluh_i32))
+# error "Missing unsigned widening multiply"
+#endif
+
typedef enum TCGOpcode {
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
#include "tcg-opc.h"
diff --git a/tcg/tci/tcg-target.c b/tcg/tci/tcg-target.c
index fc80704de8..47c0b85d95 100644
--- a/tcg/tci/tcg-target.c
+++ b/tcg/tci/tcg-target.c
@@ -859,7 +859,7 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
}
/* Test if a constant matches the constraint. */
-static int tcg_target_const_match(tcg_target_long val,
+static int tcg_target_const_match(tcg_target_long val, TCGType type,
const TCGArgConstraint *arg_ct)
{
/* No need to return 0 or 1, 0 or != 0 is good enough. */
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index 6e1da8c007..f43492cc67 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -57,12 +57,6 @@
#define CONFIG_DEBUG_TCG_INTERPRETER
#endif
-#if 0 /* TCI tries to emulate a little endian host. */
-#if defined(HOST_WORDS_BIGENDIAN)
-# define TCG_TARGET_WORDS_BIGENDIAN
-#endif
-#endif
-
/* Optional instructions. */
#define TCG_TARGET_HAS_bswap16_i32 1
@@ -118,6 +112,8 @@
#define TCG_TARGET_HAS_mulu2_i64 0
#define TCG_TARGET_HAS_muluh_i64 0
#define TCG_TARGET_HAS_mulsh_i64 0
+#else
+#define TCG_TARGET_HAS_mulu2_i32 1
#endif /* TCG_TARGET_REG_BITS == 64 */
#define TCG_TARGET_HAS_new_ldst 0
diff --git a/tci.c b/tci.c
index 0202ed97d1..6523ab82f4 100644
--- a/tci.c
+++ b/tci.c
@@ -669,32 +669,32 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
t0 = *tb_ptr++;
t1 = tci_read_ri32(&tb_ptr);
t2 = tci_read_ri32(&tb_ptr);
- tci_write_reg32(t0, t1 << t2);
+ tci_write_reg32(t0, t1 << (t2 & 31));
break;
case INDEX_op_shr_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(&tb_ptr);
t2 = tci_read_ri32(&tb_ptr);
- tci_write_reg32(t0, t1 >> t2);
+ tci_write_reg32(t0, t1 >> (t2 & 31));
break;
case INDEX_op_sar_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(&tb_ptr);
t2 = tci_read_ri32(&tb_ptr);
- tci_write_reg32(t0, ((int32_t)t1 >> t2));
+ tci_write_reg32(t0, ((int32_t)t1 >> (t2 & 31)));
break;
#if TCG_TARGET_HAS_rot_i32
case INDEX_op_rotl_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(&tb_ptr);
t2 = tci_read_ri32(&tb_ptr);
- tci_write_reg32(t0, rol32(t1, t2));
+ tci_write_reg32(t0, rol32(t1, t2 & 31));
break;
case INDEX_op_rotr_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(&tb_ptr);
t2 = tci_read_ri32(&tb_ptr);
- tci_write_reg32(t0, ror32(t1, t2));
+ tci_write_reg32(t0, ror32(t1, t2 & 31));
break;
#endif
#if TCG_TARGET_HAS_deposit_i32
@@ -936,32 +936,32 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
t0 = *tb_ptr++;
t1 = tci_read_ri64(&tb_ptr);
t2 = tci_read_ri64(&tb_ptr);
- tci_write_reg64(t0, t1 << t2);
+ tci_write_reg64(t0, t1 << (t2 & 63));
break;
case INDEX_op_shr_i64:
t0 = *tb_ptr++;
t1 = tci_read_ri64(&tb_ptr);
t2 = tci_read_ri64(&tb_ptr);
- tci_write_reg64(t0, t1 >> t2);
+ tci_write_reg64(t0, t1 >> (t2 & 63));
break;
case INDEX_op_sar_i64:
t0 = *tb_ptr++;
t1 = tci_read_ri64(&tb_ptr);
t2 = tci_read_ri64(&tb_ptr);
- tci_write_reg64(t0, ((int64_t)t1 >> t2));
+ tci_write_reg64(t0, ((int64_t)t1 >> (t2 & 63)));
break;
#if TCG_TARGET_HAS_rot_i64
case INDEX_op_rotl_i64:
t0 = *tb_ptr++;
t1 = tci_read_ri64(&tb_ptr);
t2 = tci_read_ri64(&tb_ptr);
- tci_write_reg64(t0, rol64(t1, t2));
+ tci_write_reg64(t0, rol64(t1, t2 & 63));
break;
case INDEX_op_rotr_i64:
t0 = *tb_ptr++;
t1 = tci_read_ri64(&tb_ptr);
t2 = tci_read_ri64(&tb_ptr);
- tci_write_reg64(t0, ror64(t1, t2));
+ tci_write_reg64(t0, ror64(t1, t2 & 63));
break;
#endif
#if TCG_TARGET_HAS_deposit_i64
diff --git a/tests/.gitignore b/tests/.gitignore
index 9ba9d96b6b..c71c11020e 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -7,20 +7,28 @@ check-qstring
check-qom-interface
test-aio
test-bitops
-test-throttle
+test-coroutine
test-cutils
test-hbitmap
test-int128
test-iov
test-mul64
+test-opts-visitor
test-qapi-types.[ch]
test-qapi-visit.[ch]
test-qdev-global-props
-test-qmp-commands.h
test-qmp-commands
+test-qmp-commands.h
test-qmp-input-strict
+test-qmp-input-visitor
test-qmp-marshal.c
+test-qmp-output-visitor
+test-rfifolock
+test-string-input-visitor
+test-string-output-visitor
test-thread-pool
+test-throttle
+test-visitor-serialization
test-vmstate
test-x86-cpuid
test-xbzrle
diff --git a/tests/Makefile b/tests/Makefile
index e146f81d44..88f7105d02 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -35,6 +35,7 @@ check-unit-y += tests/test-visitor-serialization$(EXESUF)
check-unit-y += tests/test-iov$(EXESUF)
gcov-files-test-iov-y = util/iov.c
check-unit-y += tests/test-aio$(EXESUF)
+check-unit-$(CONFIG_POSIX) += tests/test-rfifolock$(EXESUF)
check-unit-y += tests/test-throttle$(EXESUF)
gcov-files-test-aio-$(CONFIG_WIN32) = aio-win32.c
gcov-files-test-aio-$(CONFIG_POSIX) = aio-posix.c
@@ -58,7 +59,7 @@ check-unit-y += tests/test-bitops$(EXESUF)
check-unit-y += tests/test-qdev-global-props$(EXESUF)
check-unit-y += tests/check-qom-interface$(EXESUF)
gcov-files-check-qom-interface-y = qom/object.c
-check-unit-y += tests/test-vmstate$(EXESUF)
+check-unit-$(CONFIG_POSIX) += tests/test-vmstate$(EXESUF)
check-block-$(CONFIG_POSIX) += tests/qemu-iotests-quick.sh
@@ -69,9 +70,29 @@ gcov-files-ipack-y += hw/ipack/ipack.c
check-qtest-ipack-y += tests/ipoctal232-test$(EXESUF)
gcov-files-ipack-y += hw/char/ipoctal232.c
+check-qtest-virtioserial-y += tests/virtio-console-test$(EXESUF)
+gcov-files-virtioserial-y += hw/char/virtio-console.c
+
gcov-files-virtio-y += i386-softmmu/hw/virtio/virtio.c
check-qtest-virtio-y += tests/virtio-net-test$(EXESUF)
gcov-files-virtio-y += i386-softmmu/hw/net/virtio-net.c
+check-qtest-virtio-y += tests/virtio-balloon-test$(EXESUF)
+gcov-files-virtio-y += i386-softmmu/hw/virtio/virtio-balloon.c
+check-qtest-virtio-y += tests/virtio-blk-test$(EXESUF)
+gcov-files-virtio-y += i386-softmmu/hw/block/virtio-blk.c
+check-qtest-virtio-y += tests/virtio-rng-test$(EXESUF)
+gcov-files-virtio-y += hw/virtio/virtio-rng.c
+check-qtest-virtio-y += tests/virtio-scsi-test$(EXESUF)
+gcov-files-virtio-y += i386-softmmu/hw/scsi/virtio-scsi.c
+ifeq ($(CONFIG_VIRTIO)$(CONFIG_VIRTFS)$(CONFIG_PCI),yyy)
+check-qtest-virtio-y += tests/virtio-9p-test$(EXESUF)
+gcov-files-virtio-y += hw/9pfs/virtio-9p.c
+gcov-files-virtio-y += i386-softmmu/hw/9pfs/virtio-9p-device.c
+endif
+check-qtest-virtio-y += tests/virtio-serial-test$(EXESUF)
+gcov-files-virtio-y += i386-softmmu/hw/char/virtio-serial-bus.c
+check-qtest-virtio-y += $(check-qtest-virtioserial-y)
+gcov-files-virtio-y += $(gcov-files-virtioserial-y)
check-qtest-pci-y += tests/e1000-test$(EXESUF)
gcov-files-pci-y += hw/net/e1000.c
@@ -84,12 +105,14 @@ check-qtest-pci-y += tests/eepro100-test$(EXESUF)
gcov-files-pci-y += hw/net/eepro100.c
check-qtest-pci-y += tests/ne2000-test$(EXESUF)
gcov-files-pci-y += hw/net/ne2000.c
+check-qtest-pci-y += tests/nvme-test$(EXESUF)
+gcov-files-pci-y += hw/block/nvme.c
check-qtest-pci-y += $(check-qtest-virtio-y)
gcov-files-pci-y += $(gcov-files-virtio-y) hw/virtio/virtio-pci.c
check-qtest-pci-y += tests/tpci200-test$(EXESUF)
-gcov-files-pci-y += hw/char/tpci200.c
+gcov-files-pci-y += hw/ipack/tpci200.c
check-qtest-pci-y += $(check-qtest-ipack-y)
-gcov-files-pci-y += $(gcov-files-ipack-y) hw/ipack/tpci200.c
+gcov-files-pci-y += $(gcov-files-ipack-y)
check-qtest-i386-y = tests/endianness-test$(EXESUF)
check-qtest-i386-y += tests/fdc-test$(EXESUF)
@@ -110,6 +133,10 @@ check-qtest-i386-y += tests/vmxnet3-test$(EXESUF)
gcov-files-i386-y += hw/net/vmxnet3.c
gcov-files-i386-y += hw/net/vmxnet_rx_pkt.c
gcov-files-i386-y += hw/net/vmxnet_tx_pkt.c
+check-qtest-i386-y += tests/pvpanic-test$(EXESUF)
+gcov-files-i386-y += i386-softmmu/hw/misc/pvpanic.c
+check-qtest-i386-y += tests/i82801b11-test$(EXESUF)
+gcov-files-i386-y += hw/pci-bridge/i82801b11.c
check-qtest-x86_64-y = $(check-qtest-i386-y)
gcov-files-i386-y += i386-softmmu/hw/timer/mc146818rtc.c
gcov-files-x86_64-y = $(subst i386-softmmu/,x86_64-softmmu/,$(gcov-files-i386-y))
@@ -129,6 +156,8 @@ check-qtest-arm-y = tests/tmp105-test$(EXESUF)
gcov-files-arm-y += hw/misc/tmp105.c
check-qtest-ppc-y += tests/boot-order-test$(EXESUF)
check-qtest-ppc64-y += tests/boot-order-test$(EXESUF)
+check-qtest-ppc64-y += tests/spapr-phb-test$(EXESUF)
+gcov-files-ppc64-y += ppc64-softmmu/hw/ppc/spapr_pci.c
check-qtest-microblazeel-y = $(check-qtest-microblaze-y)
check-qtest-xtensaeb-y = $(check-qtest-xtensa-y)
@@ -176,6 +205,7 @@ tests/check-qjson$(EXESUF): tests/check-qjson.o libqemuutil.a libqemustub.a
tests/check-qom-interface$(EXESUF): tests/check-qom-interface.o $(qom-core-obj) libqemuutil.a libqemustub.a
tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(block-obj-y) libqemuutil.a libqemustub.a
tests/test-aio$(EXESUF): tests/test-aio.o $(block-obj-y) libqemuutil.a libqemustub.a
+tests/test-rfifolock$(EXESUF): tests/test-rfifolock.o libqemuutil.a libqemustub.a
tests/test-throttle$(EXESUF): tests/test-throttle.o $(block-obj-y) libqemuutil.a libqemustub.a
tests/test-thread-pool$(EXESUF): tests/test-thread-pool.o $(block-obj-y) libqemuutil.a libqemustub.a
tests/test-iov$(EXESUF): tests/test-iov.o libqemuutil.a
@@ -187,6 +217,7 @@ tests/test-int128$(EXESUF): tests/test-int128.o
tests/test-qdev-global-props$(EXESUF): tests/test-qdev-global-props.o \
hw/core/qdev.o hw/core/qdev-properties.o hw/core/hotplug.o\
hw/core/irq.o \
+ hw/core/fw-path-provider.o \
$(qom-core-obj) \
$(test-qapi-obj-y) \
libqemuutil.a libqemustub.a
@@ -225,6 +256,7 @@ libqos-omap-obj-y = $(libqos-obj-y) tests/libqos/i2c-omap.o
tests/rtc-test$(EXESUF): tests/rtc-test.o
tests/m48t59-test$(EXESUF): tests/m48t59-test.o
tests/endianness-test$(EXESUF): tests/endianness-test.o
+tests/spapr-phb-test$(EXESUF): tests/spapr-phb-test.o $(libqos-obj-y)
tests/fdc-test$(EXESUF): tests/fdc-test.o
tests/ide-test$(EXESUF): tests/ide-test.o $(libqos-pc-obj-y)
tests/hd-geo-test$(EXESUF): tests/hd-geo-test.o
@@ -239,19 +271,31 @@ tests/pcnet-test$(EXESUF): tests/pcnet-test.o
tests/eepro100-test$(EXESUF): tests/eepro100-test.o
tests/vmxnet3-test$(EXESUF): tests/vmxnet3-test.o
tests/ne2000-test$(EXESUF): tests/ne2000-test.o
+tests/virtio-balloon-test$(EXESUF): tests/virtio-balloon-test.o
+tests/virtio-blk-test$(EXESUF): tests/virtio-blk-test.o
tests/virtio-net-test$(EXESUF): tests/virtio-net-test.o
+tests/virtio-rng-test$(EXESUF): tests/virtio-rng-test.o
+tests/virtio-scsi-test$(EXESUF): tests/virtio-scsi-test.o
+tests/virtio-9p-test$(EXESUF): tests/virtio-9p-test.o
+tests/virtio-serial-test$(EXESUF): tests/virtio-serial-test.o
+tests/virtio-console-test$(EXESUF): tests/virtio-console-test.o
tests/tpci200-test$(EXESUF): tests/tpci200-test.o
tests/ipoctal232-test$(EXESUF): tests/ipoctal232-test.o
tests/qom-test$(EXESUF): tests/qom-test.o
tests/blockdev-test$(EXESUF): tests/blockdev-test.o $(libqos-pc-obj-y)
tests/qdev-monitor-test$(EXESUF): tests/qdev-monitor-test.o $(libqos-pc-obj-y)
+tests/nvme-test$(EXESUF): tests/nvme-test.o
+tests/pvpanic-test$(EXESUF): tests/pvpanic-test.o
+tests/i82801b11-test$(EXESUF): tests/i82801b11-test.o
tests/qemu-iotests/socket_scm_helper$(EXESUF): tests/qemu-iotests/socket_scm_helper.o
# QTest rules
TARGETS=$(patsubst %-softmmu,%, $(filter %-softmmu,$(TARGET_DIRS)))
+ifeq ($(CONFIG_POSIX),y)
QTEST_TARGETS=$(foreach TARGET,$(TARGETS), $(if $(check-qtest-$(TARGET)-y), $(TARGET),))
-check-qtest-$(CONFIG_POSIX)=$(foreach TARGET,$(TARGETS), $(check-qtest-$(TARGET)-y))
+check-qtest-y=$(foreach TARGET,$(TARGETS), $(check-qtest-$(TARGET)-y))
+endif
qtest-obj-y = tests/libqtest.o libqemuutil.a libqemustub.a
$(check-qtest-y): $(qtest-obj-y)
@@ -350,7 +394,8 @@ check-block: $(patsubst %,check-%, $(check-block-y))
check: check-qapi-schema check-unit check-qtest
check-clean:
$(MAKE) -C tests/tcg clean
- rm -rf $(check-unit-y) $(check-qtest-i386-y) $(check-qtest-x86_64-y) $(check-qtest-sparc64-y) $(check-qtest-sparc-y) tests/*.o $(QEMU_IOTESTS_HELPERS-y)
+ rm -rf $(check-unit-y) tests/*.o $(QEMU_IOTESTS_HELPERS-y)
+ rm -rf $(sort $(foreach target,$(SYSEMU_TARGET_LIST), $(check-qtest-$(target)-y)))
clean: check-clean
diff --git a/tests/acpi-test-data/pc/DSDT b/tests/acpi-test-data/pc/DSDT
index 8b14a5f6f2..d0bb3de79d 100644
--- a/tests/acpi-test-data/pc/DSDT
+++ b/tests/acpi-test-data/pc/DSDT
Binary files differ
diff --git a/tests/acpi-test-data/pc/SSDT b/tests/acpi-test-data/pc/SSDT
index ae5a9a57d6..c987fb2379 100644
--- a/tests/acpi-test-data/pc/SSDT
+++ b/tests/acpi-test-data/pc/SSDT
Binary files differ
diff --git a/tests/acpi-test-data/q35/DSDT b/tests/acpi-test-data/q35/DSDT
index a76ea9a418..fc5b970009 100644
--- a/tests/acpi-test-data/q35/DSDT
+++ b/tests/acpi-test-data/q35/DSDT
Binary files differ
diff --git a/tests/acpi-test-data/q35/SSDT b/tests/acpi-test-data/q35/SSDT
index 634b4817fc..9199638757 100644
--- a/tests/acpi-test-data/q35/SSDT
+++ b/tests/acpi-test-data/q35/SSDT
Binary files differ
diff --git a/tests/acpi-test.c b/tests/acpi-test.c
index 185309a241..76fbccfa4b 100644
--- a/tests/acpi-test.c
+++ b/tests/acpi-test.c
@@ -23,7 +23,6 @@
#define MACHINE_Q35 "q35"
#define ACPI_REBUILD_EXPECTED_AML "TEST_ACPI_REBUILD_AML"
-#define ACPI_SSDT_SIGNATURE 0x54445353 /* SSDT */
/* DSDT and SSDTs format */
typedef struct {
@@ -101,6 +100,20 @@ typedef struct {
ACPI_READ_FIELD((table)->asl_compiler_revision, addr); \
} while (0);
+#define ACPI_ASSERT_CMP(actual, expected) do { \
+ uint32_t ACPI_ASSERT_CMP_le = cpu_to_le32(actual); \
+ char ACPI_ASSERT_CMP_str[5] = {}; \
+ memcpy(ACPI_ASSERT_CMP_str, &ACPI_ASSERT_CMP_le, 4); \
+ g_assert_cmpstr(ACPI_ASSERT_CMP_str, ==, expected); \
+} while (0)
+
+#define ACPI_ASSERT_CMP64(actual, expected) do { \
+ uint64_t ACPI_ASSERT_CMP_le = cpu_to_le64(actual); \
+ char ACPI_ASSERT_CMP_str[9] = {}; \
+ memcpy(ACPI_ASSERT_CMP_str, &ACPI_ASSERT_CMP_le, 8); \
+ g_assert_cmpstr(ACPI_ASSERT_CMP_str, ==, expected); \
+} while (0)
+
/* Boot sector code: write SIGNATURE into memory,
* then halt.
* Q35 machine requires a minimum 0x7e000 bytes disk.
@@ -213,7 +226,7 @@ static void test_acpi_rsdp_table(test_data *data)
uint32_t addr = data->rsdp_addr;
ACPI_READ_FIELD(rsdp_table->signature, addr);
- g_assert_cmphex(rsdp_table->signature, ==, ACPI_RSDP_SIGNATURE);
+ ACPI_ASSERT_CMP64(rsdp_table->signature, "RSD PTR ");
ACPI_READ_FIELD(rsdp_table->checksum, addr);
ACPI_READ_ARRAY(rsdp_table->oem_id, addr);
@@ -235,7 +248,7 @@ static void test_acpi_rsdt_table(test_data *data)
/* read the header */
ACPI_READ_TABLE_HEADER(rsdt_table, addr);
- g_assert_cmphex(rsdt_table->signature, ==, ACPI_RSDT_SIGNATURE);
+ ACPI_ASSERT_CMP(rsdt_table->signature, "RSDT");
/* compute the table entries in rsdt */
tables_nr = (rsdt_table->length - sizeof(AcpiRsdtDescriptorRev1)) /
@@ -304,7 +317,7 @@ static void test_acpi_fadt_table(test_data *data)
ACPI_READ_FIELD(fadt_table->reserved4b, addr);
ACPI_READ_FIELD(fadt_table->flags, addr);
- g_assert_cmphex(fadt_table->signature, ==, ACPI_FACP_SIGNATURE);
+ ACPI_ASSERT_CMP(fadt_table->signature, "FACP");
g_assert(!acpi_checksum((uint8_t *)fadt_table, fadt_table->length));
}
@@ -321,7 +334,7 @@ static void test_acpi_facs_table(test_data *data)
ACPI_READ_FIELD(facs_table->flags, addr);
ACPI_READ_ARRAY(facs_table->resverved3, addr);
- g_assert_cmphex(facs_table->signature, ==, ACPI_FACS_SIGNATURE);
+ ACPI_ASSERT_CMP(facs_table->signature, "FACS");
}
static void test_dst_table(AcpiSdtTable *sdt_table, uint32_t addr)
@@ -348,7 +361,7 @@ static void test_acpi_dsdt_table(test_data *data)
data->tables = g_array_new(false, true, sizeof(AcpiSdtTable));
test_dst_table(&dsdt_table, addr);
- g_assert_cmphex(dsdt_table.header.signature, ==, ACPI_DSDT_SIGNATURE);
+ ACPI_ASSERT_CMP(dsdt_table.header.signature, "DSDT");
/* Place DSDT first */
g_array_append_val(data->tables, dsdt_table);
@@ -383,8 +396,9 @@ static void dump_aml_files(test_data *data, bool rebuild)
g_assert(sdt->aml);
if (rebuild) {
+ uint32_t signature = cpu_to_le32(sdt->header.signature);
aml_file = g_strdup_printf("%s/%s/%.4s", data_dir, data->machine,
- (gchar *)&sdt->header.signature);
+ (gchar *)&signature);
fd = g_open(aml_file, O_WRONLY|O_TRUNC|O_CREAT,
S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH);
} else {
@@ -406,9 +420,9 @@ static void dump_aml_files(test_data *data, bool rebuild)
}
}
-static bool compare_signature(AcpiSdtTable *sdt, uint32_t signature)
+static bool compare_signature(AcpiSdtTable *sdt, const char *signature)
{
- return sdt->header.signature == signature;
+ return !memcmp(&sdt->header.signature, signature, 4);
}
static bool load_asl(GArray *sdts, AcpiSdtTable *sdt)
@@ -427,12 +441,12 @@ static bool load_asl(GArray *sdts, AcpiSdtTable *sdt)
/* build command line */
g_string_append_printf(command_line, " -p %s ", sdt->asl_file);
- if (compare_signature(sdt, ACPI_DSDT_SIGNATURE) ||
- compare_signature(sdt, ACPI_SSDT_SIGNATURE)) {
+ if (compare_signature(sdt, "DSDT") ||
+ compare_signature(sdt, "SSDT")) {
for (i = 0; i < sdts->len; ++i) {
temp = &g_array_index(sdts, AcpiSdtTable, i);
- if (compare_signature(temp, ACPI_DSDT_SIGNATURE) ||
- compare_signature(temp, ACPI_SSDT_SIGNATURE)) {
+ if (compare_signature(temp, "DSDT") ||
+ compare_signature(temp, "SSDT")) {
g_string_append_printf(command_line, "-e %s ", temp->aml_file);
}
}
@@ -442,13 +456,12 @@ static bool load_asl(GArray *sdts, AcpiSdtTable *sdt)
/* pass 'out' and 'out_err' in order to be redirected */
ret = g_spawn_command_line_sync(command_line->str, &out, &out_err, NULL, &error);
g_assert_no_error(error);
-
if (ret) {
ret = g_file_get_contents(sdt->asl_file, (gchar **)&sdt->asl,
&sdt->asl_len, &error);
g_assert(ret);
g_assert_no_error(error);
- g_assert(sdt->asl_len);
+ ret = (sdt->asl_len > 0);
}
g_free(out);
@@ -495,13 +508,16 @@ static GArray *load_expected_aml(test_data *data)
GArray *exp_tables = g_array_new(false, true, sizeof(AcpiSdtTable));
for (i = 0; i < data->tables->len; ++i) {
AcpiSdtTable exp_sdt;
+ uint32_t signature;
+
sdt = &g_array_index(data->tables, AcpiSdtTable, i);
memset(&exp_sdt, 0, sizeof(exp_sdt));
exp_sdt.header.signature = sdt->header.signature;
+ signature = cpu_to_le32(sdt->header.signature);
aml_file = g_strdup_printf("%s/%s/%.4s", data_dir, data->machine,
- (gchar *)&exp_sdt.header.signature);
+ (gchar *)&signature);
exp_sdt.aml_file = aml_file;
g_assert(g_file_test(aml_file, G_FILE_TEST_EXISTS));
ret = g_file_get_contents(aml_file, &exp_sdt.aml,
@@ -543,14 +559,20 @@ static void test_acpi_asl(test_data *data)
g_assert(!err || exp_err);
if (g_strcmp0(asl->str, exp_asl->str)) {
- sdt->tmp_files_retain = true;
- exp_sdt->tmp_files_retain = true;
- fprintf(stderr,
- "acpi-test: Warning! %.4s mismatch. "
- "Actual [asl:%s, aml:%s], Expected [asl:%s, aml:%s].\n",
- (gchar *)&exp_sdt->header.signature,
- sdt->asl_file, sdt->aml_file,
- exp_sdt->asl_file, exp_sdt->aml_file);
+ if (exp_err) {
+ fprintf(stderr,
+ "Warning! iasl couldn't parse the expected aml\n");
+ } else {
+ uint32_t signature = cpu_to_le32(exp_sdt->header.signature);
+ sdt->tmp_files_retain = true;
+ exp_sdt->tmp_files_retain = true;
+ fprintf(stderr,
+ "acpi-test: Warning! %.4s mismatch. "
+ "Actual [asl:%s, aml:%s], Expected [asl:%s, aml:%s].\n",
+ (gchar *)&signature,
+ sdt->asl_file, sdt->aml_file,
+ exp_sdt->asl_file, exp_sdt->aml_file);
+ }
}
g_string_free(asl, true);
g_string_free(exp_asl, true);
diff --git a/tests/i82801b11-test.c b/tests/i82801b11-test.c
new file mode 100644
index 0000000000..78d9ce0e6b
--- /dev/null
+++ b/tests/i82801b11-test.c
@@ -0,0 +1,33 @@
+/*
+ * QTest testcase for i82801b11
+ *
+ * Copyright (c) 2014 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <string.h>
+#include "libqtest.h"
+#include "qemu/osdep.h"
+
+/* Tests only initialization so far. TODO: Replace with functional tests */
+static void nop(void)
+{
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/i82801b11/nop", nop);
+
+ qtest_start("-machine q35 -device i82801b11-bridge,bus=pcie.0,addr=1e.0");
+ ret = g_test_run();
+
+ qtest_end();
+
+ return ret;
+}
diff --git a/tests/libqos/pci-pc.c b/tests/libqos/pci-pc.c
index 3bde8ab190..bf741a43ac 100644
--- a/tests/libqos/pci-pc.c
+++ b/tests/libqos/pci-pc.c
@@ -110,37 +110,37 @@ static void qpci_pc_io_writel(QPCIBus *bus, void *addr, uint32_t value)
static uint8_t qpci_pc_config_readb(QPCIBus *bus, int devfn, uint8_t offset)
{
- outl(0xcf8, (1 << 31) | (devfn << 8) | offset);
+ outl(0xcf8, (1U << 31) | (devfn << 8) | offset);
return inb(0xcfc);
}
static uint16_t qpci_pc_config_readw(QPCIBus *bus, int devfn, uint8_t offset)
{
- outl(0xcf8, (1 << 31) | (devfn << 8) | offset);
+ outl(0xcf8, (1U << 31) | (devfn << 8) | offset);
return inw(0xcfc);
}
static uint32_t qpci_pc_config_readl(QPCIBus *bus, int devfn, uint8_t offset)
{
- outl(0xcf8, (1 << 31) | (devfn << 8) | offset);
+ outl(0xcf8, (1U << 31) | (devfn << 8) | offset);
return inl(0xcfc);
}
static void qpci_pc_config_writeb(QPCIBus *bus, int devfn, uint8_t offset, uint8_t value)
{
- outl(0xcf8, (1 << 31) | (devfn << 8) | offset);
+ outl(0xcf8, (1U << 31) | (devfn << 8) | offset);
outb(0xcfc, value);
}
static void qpci_pc_config_writew(QPCIBus *bus, int devfn, uint8_t offset, uint16_t value)
{
- outl(0xcf8, (1 << 31) | (devfn << 8) | offset);
+ outl(0xcf8, (1U << 31) | (devfn << 8) | offset);
outw(0xcfc, value);
}
static void qpci_pc_config_writel(QPCIBus *bus, int devfn, uint8_t offset, uint32_t value)
{
- outl(0xcf8, (1 << 31) | (devfn << 8) | offset);
+ outl(0xcf8, (1U << 31) | (devfn << 8) | offset);
outl(0xcfc, value);
}
diff --git a/tests/libqtest.c b/tests/libqtest.c
index f587d36176..8155695848 100644
--- a/tests/libqtest.c
+++ b/tests/libqtest.c
@@ -34,6 +34,7 @@
#include "qapi/qmp/json-parser.h"
#define MAX_IRQ 256
+#define SOCKET_TIMEOUT 5
QTestState *global_qtest;
@@ -47,6 +48,9 @@ struct QTestState
struct sigaction sigact_old; /* restored on exit */
};
+static GList *qtest_instances;
+static struct sigaction sigact_old;
+
#define g_assert_no_errno(ret) do { \
g_assert_cmpint(ret, !=, -1); \
} while (0)
@@ -78,12 +82,16 @@ static int socket_accept(int sock)
struct sockaddr_un addr;
socklen_t addrlen;
int ret;
+ struct timeval timeout = { .tv_sec = SOCKET_TIMEOUT,
+ .tv_usec = 0 };
+
+ setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (void *)&timeout,
+ sizeof(timeout));
addrlen = sizeof(addr);
do {
ret = accept(sock, (struct sockaddr *)&addr, &addrlen);
} while (ret == -1 && errno == EINTR);
- g_assert_no_errno(ret);
close(sock);
return ret;
@@ -99,7 +107,28 @@ static void kill_qemu(QTestState *s)
static void sigabrt_handler(int signo)
{
- kill_qemu(global_qtest);
+ GList *elem;
+ for (elem = qtest_instances; elem; elem = elem->next) {
+ kill_qemu(elem->data);
+ }
+}
+
+static void setup_sigabrt_handler(void)
+{
+ struct sigaction sigact;
+
+ /* Catch SIGABRT to clean up on g_assert() failure */
+ sigact = (struct sigaction){
+ .sa_handler = sigabrt_handler,
+ .sa_flags = SA_RESETHAND,
+ };
+ sigemptyset(&sigact.sa_mask);
+ sigaction(SIGABRT, &sigact, &sigact_old);
+}
+
+static void cleanup_sigabrt_handler(void)
+{
+ sigaction(SIGABRT, &sigact_old, NULL);
}
QTestState *qtest_init(const char *extra_args)
@@ -110,7 +139,6 @@ QTestState *qtest_init(const char *extra_args)
gchar *qmp_socket_path;
gchar *command;
const char *qemu_binary;
- struct sigaction sigact;
qemu_binary = getenv("QTEST_QEMU_BINARY");
g_assert(qemu_binary != NULL);
@@ -123,13 +151,12 @@ QTestState *qtest_init(const char *extra_args)
sock = init_socket(socket_path);
qmpsock = init_socket(qmp_socket_path);
- /* Catch SIGABRT to clean up on g_assert() failure */
- sigact = (struct sigaction){
- .sa_handler = sigabrt_handler,
- .sa_flags = SA_RESETHAND,
- };
- sigemptyset(&sigact.sa_mask);
- sigaction(SIGABRT, &sigact, &s->sigact_old);
+ /* Only install SIGABRT handler once */
+ if (!qtest_instances) {
+ setup_sigabrt_handler();
+ }
+
+ qtest_instances = g_list_prepend(qtest_instances, s);
s->qemu_pid = fork();
if (s->qemu_pid == 0) {
@@ -147,12 +174,16 @@ QTestState *qtest_init(const char *extra_args)
}
s->fd = socket_accept(sock);
- s->qmp_fd = socket_accept(qmpsock);
+ if (s->fd >= 0) {
+ s->qmp_fd = socket_accept(qmpsock);
+ }
unlink(socket_path);
unlink(qmp_socket_path);
g_free(socket_path);
g_free(qmp_socket_path);
+ g_assert(s->fd >= 0 && s->qmp_fd >= 0);
+
s->rx = g_string_new("");
for (i = 0; i < MAX_IRQ; i++) {
s->irq_level[i] = false;
@@ -171,7 +202,12 @@ QTestState *qtest_init(const char *extra_args)
void qtest_quit(QTestState *s)
{
- sigaction(SIGABRT, &s->sigact_old, NULL);
+ /* Uninstall SIGABRT handler on last instance */
+ if (qtest_instances && !qtest_instances->next) {
+ cleanup_sigabrt_handler();
+ }
+
+ qtest_instances = g_list_remove(qtest_instances, s);
kill_qemu(s);
close(s->fd);
@@ -309,14 +345,10 @@ static void qmp_response(JSONMessageParser *parser, QList *tokens)
qmp->response = (QDict *)obj;
}
-QDict *qtest_qmpv(QTestState *s, const char *fmt, va_list ap)
+QDict *qtest_qmp_receive(QTestState *s)
{
QMPResponseParser qmp;
- /* Send QMP request */
- socket_sendf(s->qmp_fd, fmt, ap);
-
- /* Receive reply */
qmp.response = NULL;
json_message_parser_init(&qmp.parser, qmp_response);
while (!qmp.response) {
@@ -340,6 +372,15 @@ QDict *qtest_qmpv(QTestState *s, const char *fmt, va_list ap)
return qmp.response;
}
+QDict *qtest_qmpv(QTestState *s, const char *fmt, va_list ap)
+{
+ /* Send QMP request */
+ socket_sendf(s->qmp_fd, fmt, ap);
+
+ /* Receive reply */
+ return qtest_qmp_receive(s);
+}
+
QDict *qtest_qmp(QTestState *s, const char *fmt, ...)
{
va_list ap;
@@ -581,3 +622,23 @@ void qtest_memwrite(QTestState *s, uint64_t addr, const void *data, size_t size)
qtest_sendf(s, "\n");
qtest_rsp(s, 0);
}
+
+QDict *qmp(const char *fmt, ...)
+{
+ va_list ap;
+ QDict *response;
+
+ va_start(ap, fmt);
+ response = qtest_qmpv(global_qtest, fmt, ap);
+ va_end(ap);
+ return response;
+}
+
+void qmp_discard_response(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ qtest_qmpv_discard_response(global_qtest, fmt, ap);
+ va_end(ap);
+}
diff --git a/tests/libqtest.h b/tests/libqtest.h
index 9deebdcdfa..8f323c7030 100644
--- a/tests/libqtest.h
+++ b/tests/libqtest.h
@@ -83,6 +83,14 @@ void qtest_qmpv_discard_response(QTestState *s, const char *fmt, va_list ap);
QDict *qtest_qmpv(QTestState *s, const char *fmt, va_list ap);
/**
+ * qtest_receive:
+ * @s: #QTestState instance to operate on.
+ *
+ * Reads a QMP message from QEMU and returns the response.
+ */
+QDict *qtest_qmp_receive(QTestState *s);
+
+/**
* qtest_get_irq:
* @s: #QTestState instance to operate on.
* @num: Interrupt to observe.
@@ -356,16 +364,7 @@ static inline void qtest_end(void)
*
* Sends a QMP message to QEMU and returns the response.
*/
-static inline QDict *qmp(const char *fmt, ...)
-{
- va_list ap;
- QDict *response;
-
- va_start(ap, fmt);
- response = qtest_qmpv(global_qtest, fmt, ap);
- va_end(ap);
- return response;
-}
+QDict *qmp(const char *fmt, ...);
/**
* qmp_discard_response:
@@ -373,13 +372,16 @@ static inline QDict *qmp(const char *fmt, ...)
*
* Sends a QMP message to QEMU and consumes the response.
*/
-static inline void qmp_discard_response(const char *fmt, ...)
-{
- va_list ap;
+void qmp_discard_response(const char *fmt, ...);
- va_start(ap, fmt);
- qtest_qmpv_discard_response(global_qtest, fmt, ap);
- va_end(ap);
+/**
+ * qmp_receive:
+ *
+ * Reads a QMP message from QEMU and returns the response.
+ */
+static inline QDict *qmp_receive(void)
+{
+ return qtest_qmp_receive(global_qtest);
}
/**
diff --git a/tests/nvme-test.c b/tests/nvme-test.c
new file mode 100644
index 0000000000..85768e837b
--- /dev/null
+++ b/tests/nvme-test.c
@@ -0,0 +1,34 @@
+/*
+ * QTest testcase for NVMe
+ *
+ * Copyright (c) 2014 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <string.h>
+#include "libqtest.h"
+#include "qemu/osdep.h"
+
+/* Tests only initialization so far. TODO: Replace with functional tests */
+static void nop(void)
+{
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/nvme/nop", nop);
+
+ qtest_start("-drive id=drv0,if=none,file=/dev/null "
+ "-device nvme,drive=drv0,serial=foo");
+ ret = g_test_run();
+
+ qtest_end();
+
+ return ret;
+}
diff --git a/tests/pvpanic-test.c b/tests/pvpanic-test.c
new file mode 100644
index 0000000000..a7ad6b3064
--- /dev/null
+++ b/tests/pvpanic-test.c
@@ -0,0 +1,47 @@
+/*
+ * QTest testcase for PV Panic
+ *
+ * Copyright (c) 2014 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <string.h>
+#include "libqtest.h"
+#include "qemu/osdep.h"
+
+static void test_panic(void)
+{
+ uint8_t val;
+ QDict *response, *data;
+
+ val = inb(0x505);
+ g_assert_cmpuint(val, ==, 1);
+
+ outb(0x505, 0x1);
+
+ response = qmp_receive();
+ g_assert(qdict_haskey(response, "event"));
+ g_assert_cmpstr(qdict_get_str(response, "event"), ==, "GUEST_PANICKED");
+ g_assert(qdict_haskey(response, "data"));
+ data = qdict_get_qdict(response, "data");
+ g_assert(qdict_haskey(data, "action"));
+ g_assert_cmpstr(qdict_get_str(data, "action"), ==, "pause");
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/pvpanic/panic", test_panic);
+
+ qtest_start("-device pvpanic");
+ ret = g_test_run();
+
+ qtest_end();
+
+ return ret;
+}
diff --git a/tests/qdev-monitor-test.c b/tests/qdev-monitor-test.c
index ba7f9cc238..e20ffd67a7 100644
--- a/tests/qdev-monitor-test.c
+++ b/tests/qdev-monitor-test.c
@@ -32,8 +32,7 @@ static void test_device_add(void)
"}}");
g_assert(response);
error = qdict_get_qdict(response, "error");
- g_assert(!strcmp(qdict_get_try_str(error, "desc") ?: "",
- "Device needs media, but drive is empty"));
+ g_assert_cmpstr(qdict_get_try_str(error, "class"), ==, "GenericError");
QDECREF(response);
/* Delete the drive */
@@ -42,7 +41,7 @@ static void test_device_add(void)
" \"command-line\": \"drive_del drive0\""
"}}");
g_assert(response);
- g_assert(!strcmp(qdict_get_try_str(response, "return") ?: "(null)", ""));
+ g_assert_cmpstr(qdict_get_try_str(response, "return"), ==, "");
QDECREF(response);
/* Try to re-add the drive. This fails with duplicate IDs if a leaked
@@ -53,8 +52,7 @@ static void test_device_add(void)
" \"command-line\": \"drive_add pci-addr=auto if=none,id=drive0\""
"}}");
g_assert(response);
- g_assert(!strcmp(qdict_get_try_str(response, "return") ?: "",
- "OK\r\n"));
+ g_assert_cmpstr(qdict_get_try_str(response, "return"), ==, "OK\r\n");
QDECREF(response);
qtest_end();
diff --git a/tests/qemu-iotests/026.out b/tests/qemu-iotests/026.out
index 15045799a2..f7c78e712a 100644
--- a/tests/qemu-iotests/026.out
+++ b/tests/qemu-iotests/026.out
@@ -475,7 +475,7 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc.write_blocks; errno: 28; imm: off; once: off; write
write failed: No space left on device
-10 leaked clusters were found on the image.
+11 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
@@ -499,7 +499,7 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc.write_table; errno: 28; imm: off; once: off; write
write failed: No space left on device
-10 leaked clusters were found on the image.
+11 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
@@ -523,7 +523,7 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
Event: refblock_alloc.switch_table; errno: 28; imm: off; once: off; write
write failed: No space left on device
-10 leaked clusters were found on the image.
+11 leaked clusters were found on the image.
This means waste of disk space, but no harm to data.
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
diff --git a/tests/qemu-iotests/029 b/tests/qemu-iotests/029
index b424726fc4..fa46ace67b 100755
--- a/tests/qemu-iotests/029
+++ b/tests/qemu-iotests/029
@@ -1,7 +1,6 @@
#!/bin/bash
#
-# Test loading internal snapshots where the L1 table of the snapshot
-# is smaller than the current L1 table.
+# qcow2 internal snapshots/VM state tests
#
# Copyright (C) 2011 Red Hat, Inc.
#
@@ -31,7 +30,8 @@ status=1 # failure is the default!
_cleanup()
{
- _cleanup_test_img
+ rm -f $TEST_IMG.snap
+ _cleanup_test_img
}
trap "_cleanup; exit \$status" 0 1 2 3 15
@@ -45,6 +45,14 @@ _supported_fmt qcow2
_supported_proto generic
_supported_os Linux
+offset_size=24
+offset_l1_size=36
+
+echo
+echo Test loading internal snapshots where the L1 table of the snapshot
+echo is smaller than the current L1 table.
+echo
+
CLUSTER_SIZE=65536
_make_test_img 64M
$QEMU_IMG snapshot -c foo "$TEST_IMG"
@@ -59,6 +67,32 @@ $QEMU_IO -c 'write -b 0 4M' "$TEST_IMG" | _filter_qemu_io
$QEMU_IMG snapshot -a foo "$TEST_IMG"
_check_test_img
+
+echo
+echo Try using a huge VM state
+echo
+
+CLUSTER_SIZE=65536
+_make_test_img 64M
+{ $QEMU_IO -c "write -b -P 0x11 1T 4k" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+{ $QEMU_IMG snapshot -c foo $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+{ $QEMU_IMG snapshot -a foo $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+{ $QEMU_IO -c "read -b -P 0x11 1T 4k" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+_check_test_img
+
+
+echo
+echo "qcow2_snapshot_load_tmp() should take the L1 size from the snapshot"
+echo
+
+CLUSTER_SIZE=512
+_make_test_img 64M
+{ $QEMU_IMG snapshot -c foo $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_size" "\x00\x00\x00\x00\x00\x00\x02\x00"
+poke_file "$TEST_IMG" "$offset_l1_size" "\x00\x00\x00\x01"
+{ $QEMU_IMG convert -s foo $TEST_IMG $TEST_IMG.snap; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+
# success, all done
echo "*** done"
rm -f $seq.full
diff --git a/tests/qemu-iotests/029.out b/tests/qemu-iotests/029.out
index 0eedb3a3ab..ce0e64d24a 100644
--- a/tests/qemu-iotests/029.out
+++ b/tests/qemu-iotests/029.out
@@ -1,4 +1,8 @@
QA output created by 029
+
+Test loading internal snapshots where the L1 table of the snapshot
+is smaller than the current L1 table.
+
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
wrote 4096/4096 bytes at offset 0
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -7,4 +11,17 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=16777216
wrote 4194304/4194304 bytes at offset 0
4 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
No errors were found on the image.
+
+Try using a huge VM state
+
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+wrote 4096/4096 bytes at offset 1099511627776
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 4096/4096 bytes at offset 1099511627776
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+No errors were found on the image.
+
+qcow2_snapshot_load_tmp() should take the L1 size from the snapshot
+
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
*** done
diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030
index 59a34f76f5..8cb61fd7ec 100755
--- a/tests/qemu-iotests/030
+++ b/tests/qemu-iotests/030
@@ -50,15 +50,7 @@ class TestSingleDrive(iotests.QMPTestCase):
result = self.vm.qmp('block-stream', device='drive0')
self.assert_qmp(result, 'return', {})
- completed = False
- while not completed:
- for event in self.vm.get_qmp_events(wait=True):
- if event['event'] == 'BLOCK_JOB_COMPLETED':
- self.assert_qmp(event, 'data/type', 'stream')
- self.assert_qmp(event, 'data/device', 'drive0')
- self.assert_qmp(event, 'data/offset', self.image_len)
- self.assert_qmp(event, 'data/len', self.image_len)
- completed = True
+ self.wait_until_completed()
self.assert_no_active_block_jobs()
self.vm.shutdown()
@@ -89,15 +81,7 @@ class TestSingleDrive(iotests.QMPTestCase):
self.assert_qmp(result, 'return', {})
self.vm.resume_drive('drive0')
- completed = False
- while not completed:
- for event in self.vm.get_qmp_events(wait=True):
- if event['event'] == 'BLOCK_JOB_COMPLETED':
- self.assert_qmp(event, 'data/type', 'stream')
- self.assert_qmp(event, 'data/device', 'drive0')
- self.assert_qmp(event, 'data/offset', self.image_len)
- self.assert_qmp(event, 'data/len', self.image_len)
- completed = True
+ self.wait_until_completed()
self.assert_no_active_block_jobs()
self.vm.shutdown()
@@ -112,15 +96,7 @@ class TestSingleDrive(iotests.QMPTestCase):
result = self.vm.qmp('block-stream', device='drive0', base=mid_img)
self.assert_qmp(result, 'return', {})
- completed = False
- while not completed:
- for event in self.vm.get_qmp_events(wait=True):
- if event['event'] == 'BLOCK_JOB_COMPLETED':
- self.assert_qmp(event, 'data/type', 'stream')
- self.assert_qmp(event, 'data/device', 'drive0')
- self.assert_qmp(event, 'data/offset', self.image_len)
- self.assert_qmp(event, 'data/len', self.image_len)
- completed = True
+ self.wait_until_completed()
self.assert_no_active_block_jobs()
self.vm.shutdown()
@@ -152,15 +128,7 @@ class TestSmallerBackingFile(iotests.QMPTestCase):
result = self.vm.qmp('block-stream', device='drive0')
self.assert_qmp(result, 'return', {})
- completed = False
- while not completed:
- for event in self.vm.get_qmp_events(wait=True):
- if event['event'] == 'BLOCK_JOB_COMPLETED':
- self.assert_qmp(event, 'data/type', 'stream')
- self.assert_qmp(event, 'data/device', 'drive0')
- self.assert_qmp(event, 'data/offset', self.image_len)
- self.assert_qmp(event, 'data/len', self.image_len)
- completed = True
+ self.wait_until_completed()
self.assert_no_active_block_jobs()
self.vm.shutdown()
@@ -442,15 +410,7 @@ class TestSetSpeed(iotests.QMPTestCase):
result = self.vm.qmp('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024)
self.assert_qmp(result, 'return', {})
- completed = False
- while not completed:
- for event in self.vm.get_qmp_events(wait=True):
- if event['event'] == 'BLOCK_JOB_COMPLETED':
- self.assert_qmp(event, 'data/type', 'stream')
- self.assert_qmp(event, 'data/device', 'drive0')
- self.assert_qmp(event, 'data/offset', self.image_len)
- self.assert_qmp(event, 'data/len', self.image_len)
- completed = True
+ self.wait_until_completed()
self.assert_no_active_block_jobs()
diff --git a/tests/qemu-iotests/039 b/tests/qemu-iotests/039
index 9b355c0977..b9cbe99560 100755
--- a/tests/qemu-iotests/039
+++ b/tests/qemu-iotests/039
@@ -131,6 +131,26 @@ ulimit -c "$old_ulimit"
./qcow2.py "$TEST_IMG" dump-header | grep incompatible_features
_check_test_img
+echo
+echo "== Committing to a backing file with lazy_refcounts=on =="
+
+IMGOPTS="compat=1.1,lazy_refcounts=on"
+TEST_IMG="$TEST_IMG".base _make_test_img $size
+
+IMGOPTS="compat=1.1,lazy_refcounts=on,backing_file=$TEST_IMG.base"
+_make_test_img $size
+
+$QEMU_IO -c "write 0 512" "$TEST_IMG" | _filter_qemu_io
+$QEMU_IMG commit "$TEST_IMG"
+
+# The dirty bit must not be set
+./qcow2.py "$TEST_IMG" dump-header | grep incompatible_features
+./qcow2.py "$TEST_IMG".base dump-header | grep incompatible_features
+
+_check_test_img
+TEST_IMG="$TEST_IMG".base _check_test_img
+
+
# success, all done
echo "*** done"
rm -f $seq.full
diff --git a/tests/qemu-iotests/039.out b/tests/qemu-iotests/039.out
index 077fa64cbf..fb31ae0624 100644
--- a/tests/qemu-iotests/039.out
+++ b/tests/qemu-iotests/039.out
@@ -54,4 +54,15 @@ wrote 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
incompatible_features 0x0
No errors were found on the image.
+
+== Committing to a backing file with lazy_refcounts=on ==
+Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=134217728
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 backing_file='TEST_DIR/t.IMGFMT.base'
+wrote 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+Image committed.
+incompatible_features 0x0
+incompatible_features 0x0
+No errors were found on the image.
+No errors were found on the image.
*** done
diff --git a/tests/qemu-iotests/044.out b/tests/qemu-iotests/044.out
index 5c5aa929fb..4789a5310e 100644
--- a/tests/qemu-iotests/044.out
+++ b/tests/qemu-iotests/044.out
@@ -1,6 +1,6 @@
No errors were found on the image.
7292415/33554432 = 21.73% allocated, 0.00% fragmented, 0.00% compressed clusters
-Image end offset: 4296448000
+Image end offset: 4296152064
.
----------------------------------------------------------------------
Ran 1 tests
diff --git a/tests/qemu-iotests/051 b/tests/qemu-iotests/051
index 14694e176b..073dc7a2d3 100755
--- a/tests/qemu-iotests/051
+++ b/tests/qemu-iotests/051
@@ -204,6 +204,10 @@ run_qemu -hda foo:bar
run_qemu -drive file=foo:bar
run_qemu -drive file.filename=foo:bar
+run_qemu -hda "file:$TEST_IMG"
+run_qemu -drive file="file:$TEST_IMG"
+run_qemu -drive file.filename="file:$TEST_IMG"
+
echo
echo === Snapshot mode ===
echo
@@ -214,6 +218,14 @@ echo 'qemu-io ide0-hd0 "write -P 0x22 0 4k"' | run_qemu -drive file="$TEST_IMG"
echo 'qemu-io ide0-hd0 "write -P 0x22 0 4k"' | run_qemu -drive file="$TEST_IMG",snapshot=on | _filter_qemu_io
echo 'qemu-io ide0-hd0 "write -P 0x22 0 4k"' | run_qemu -drive file.filename="$TEST_IMG",driver=qcow2,snapshot=on | _filter_qemu_io
echo 'qemu-io ide0-hd0 "write -P 0x22 0 4k"' | run_qemu -drive file.filename="$TEST_IMG",driver=qcow2 -snapshot | _filter_qemu_io
+echo 'qemu-io ide0-hd0 "write -P 0x22 0 4k"' | run_qemu -drive file="file:$TEST_IMG" -snapshot | _filter_qemu_io
+echo 'qemu-io ide0-hd0 "write -P 0x22 0 4k"' | run_qemu -drive file="file:$TEST_IMG",snapshot=on | _filter_qemu_io
+
+# Opening a read-only file r/w with snapshot=on
+chmod u-w "$TEST_IMG"
+echo 'qemu-io ide0-hd0 "write -P 0x22 0 4k"' | run_qemu -drive file="$TEST_IMG" -snapshot | _filter_qemu_io
+echo 'qemu-io ide0-hd0 "write -P 0x22 0 4k"' | run_qemu -drive file="$TEST_IMG",snapshot=on | _filter_qemu_io
+chmod u+w "$TEST_IMG"
$QEMU_IO -c "read -P 0x11 0 4k" "$TEST_IMG" | _filter_qemu_io
diff --git a/tests/qemu-iotests/051.out b/tests/qemu-iotests/051.out
index f5e33ff395..01b0384472 100644
--- a/tests/qemu-iotests/051.out
+++ b/tests/qemu-iotests/051.out
@@ -44,11 +44,11 @@ QEMU_PROG: -drive file=TEST_DIR/t.qcow2,driver=foo: could not open disk image TE
=== Overriding backing file ===
Testing: -drive file=TEST_DIR/t.qcow2,driver=qcow2,backing.file.filename=TEST_DIR/t.qcow2.orig -nodefaults
-QEMU X.Y.Z monitor - type 'help' for more information
-(qemu) iininfinfoinfo info binfo blinfo bloinfo blocinfo block
-ide0-hd0: TEST_DIR/t.qcow2 (qcow2)
- Backing file: TEST_DIR/t.qcow2.orig (chain depth: 1)
-(qemu) qququiquit
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) iininfinfoinfo info binfo blinfo bloinfo blocinfo block
+ide0-hd0: TEST_DIR/t.qcow2 (qcow2)
+ Backing file: TEST_DIR/t.qcow2.orig (chain depth: 1)
+(qemu) qququiquit
=== Enable and disable lazy refcounting on the command line, plus some invalid values ===
@@ -275,6 +275,17 @@ QEMU_PROG: -drive file=foo:bar: could not open disk image foo:bar: Unknown proto
Testing: -drive file.filename=foo:bar
QEMU_PROG: -drive file.filename=foo:bar: could not open disk image ide0-hd0: Could not open 'foo:bar': No such file or directory
+Testing: -hda file:TEST_DIR/t.qcow2
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) qququiquit
+
+Testing: -drive file=file:TEST_DIR/t.qcow2
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) qququiquit
+
+Testing: -drive file.filename=file:TEST_DIR/t.qcow2
+QEMU_PROG: -drive file.filename=file:TEST_DIR/t.qcow2: could not open disk image ide0-hd0: Could not open 'file:TEST_DIR/t.qcow2': No such file or directory
+
=== Snapshot mode ===
@@ -308,6 +319,34 @@ wrote 4096/4096 bytes at offset 0
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
(qemu) qququiquit
+Testing: -drive file=file:TEST_DIR/t.qcow2 -snapshot
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) qqeqemqemuqemu-qemu-iqemu-ioqemu-io qemu-io iqemu-io idqemu-io ideqemu-io ide0qemu-io ide0-qemu-io ide0-hqemu-io ide0-hdqemu-io ide0-hd0qemu-io ide0-hd0 qemu-io ide0-hd0 "qemu-io ide0-hd0 "wqemu-io ide0-hd0 "wrqemu-io ide0-hd0 "wriqemu-io ide0-hd0 "writqemu-io ide0-hd0 "writeqemu-io ide0-hd0 "write qemu-io ide0-hd0 "write -qemu-io ide0-hd0 "write -Pqemu-io ide0-hd0 "write -P qemu-io ide0-hd0 "write -P 0qemu-io ide0-hd0 "write -P 0xqemu-io ide0-hd0 "write -P 0x2qemu-io ide0-hd0 "write -P 0x22qemu-io ide0-hd0 "write -P 0x22 qemu-io ide0-hd0 "write -P 0x22 0qemu-io ide0-hd0 "write -P 0x22 0 qemu-io ide0-hd0 "write -P 0x22 0 4qemu-io ide0-hd0 "write -P 0x22 0 4kqemu-io ide0-hd0 "write -P 0x22 0 4k"
+wrote 4096/4096 bytes at offset 0
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) qququiquit
+
+Testing: -drive file=file:TEST_DIR/t.qcow2,snapshot=on
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) qqeqemqemuqemu-qemu-iqemu-ioqemu-io qemu-io iqemu-io idqemu-io ideqemu-io ide0qemu-io ide0-qemu-io ide0-hqemu-io ide0-hdqemu-io ide0-hd0qemu-io ide0-hd0 qemu-io ide0-hd0 "qemu-io ide0-hd0 "wqemu-io ide0-hd0 "wrqemu-io ide0-hd0 "wriqemu-io ide0-hd0 "writqemu-io ide0-hd0 "writeqemu-io ide0-hd0 "write qemu-io ide0-hd0 "write -qemu-io ide0-hd0 "write -Pqemu-io ide0-hd0 "write -P qemu-io ide0-hd0 "write -P 0qemu-io ide0-hd0 "write -P 0xqemu-io ide0-hd0 "write -P 0x2qemu-io ide0-hd0 "write -P 0x22qemu-io ide0-hd0 "write -P 0x22 qemu-io ide0-hd0 "write -P 0x22 0qemu-io ide0-hd0 "write -P 0x22 0 qemu-io ide0-hd0 "write -P 0x22 0 4qemu-io ide0-hd0 "write -P 0x22 0 4kqemu-io ide0-hd0 "write -P 0x22 0 4k"
+wrote 4096/4096 bytes at offset 0
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) qququiquit
+
+Testing: -drive file=TEST_DIR/t.qcow2 -snapshot
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) qqeqemqemuqemu-qemu-iqemu-ioqemu-io qemu-io iqemu-io idqemu-io ideqemu-io ide0qemu-io ide0-qemu-io ide0-hqemu-io ide0-hdqemu-io ide0-hd0qemu-io ide0-hd0 qemu-io ide0-hd0 "qemu-io ide0-hd0 "wqemu-io ide0-hd0 "wrqemu-io ide0-hd0 "wriqemu-io ide0-hd0 "writqemu-io ide0-hd0 "writeqemu-io ide0-hd0 "write qemu-io ide0-hd0 "write -qemu-io ide0-hd0 "write -Pqemu-io ide0-hd0 "write -P qemu-io ide0-hd0 "write -P 0qemu-io ide0-hd0 "write -P 0xqemu-io ide0-hd0 "write -P 0x2qemu-io ide0-hd0 "write -P 0x22qemu-io ide0-hd0 "write -P 0x22 qemu-io ide0-hd0 "write -P 0x22 0qemu-io ide0-hd0 "write -P 0x22 0 qemu-io ide0-hd0 "write -P 0x22 0 4qemu-io ide0-hd0 "write -P 0x22 0 4kqemu-io ide0-hd0 "write -P 0x22 0 4k"
+wrote 4096/4096 bytes at offset 0
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) qququiquit
+
+Testing: -drive file=TEST_DIR/t.qcow2,snapshot=on
+QEMU X.Y.Z monitor - type 'help' for more information
+(qemu) qqeqemqemuqemu-qemu-iqemu-ioqemu-io qemu-io iqemu-io idqemu-io ideqemu-io ide0qemu-io ide0-qemu-io ide0-hqemu-io ide0-hdqemu-io ide0-hd0qemu-io ide0-hd0 qemu-io ide0-hd0 "qemu-io ide0-hd0 "wqemu-io ide0-hd0 "wrqemu-io ide0-hd0 "wriqemu-io ide0-hd0 "writqemu-io ide0-hd0 "writeqemu-io ide0-hd0 "write qemu-io ide0-hd0 "write -qemu-io ide0-hd0 "write -Pqemu-io ide0-hd0 "write -P qemu-io ide0-hd0 "write -P 0qemu-io ide0-hd0 "write -P 0xqemu-io ide0-hd0 "write -P 0x2qemu-io ide0-hd0 "write -P 0x22qemu-io ide0-hd0 "write -P 0x22 qemu-io ide0-hd0 "write -P 0x22 0qemu-io ide0-hd0 "write -P 0x22 0 qemu-io ide0-hd0 "write -P 0x22 0 4qemu-io ide0-hd0 "write -P 0x22 0 4kqemu-io ide0-hd0 "write -P 0x22 0 4k"
+wrote 4096/4096 bytes at offset 0
+4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+(qemu) qququiquit
+
read 4096/4096 bytes at offset 0
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
Testing: -drive file=TEST_DIR/t.qcow2,snapshot=off
diff --git a/tests/qemu-iotests/056 b/tests/qemu-iotests/056
index 63893423cf..54e4bd0692 100755
--- a/tests/qemu-iotests/056
+++ b/tests/qemu-iotests/056
@@ -57,14 +57,7 @@ class TestSyncModesNoneAndTop(iotests.QMPTestCase):
format=iotests.imgfmt, target=target_img)
self.assert_qmp(result, 'return', {})
- # Custom completed check as we are not copying all data.
- completed = False
- while not completed:
- for event in self.vm.get_qmp_events(wait=True):
- if event['event'] == 'BLOCK_JOB_COMPLETED':
- self.assert_qmp(event, 'data/device', 'drive0')
- self.assert_qmp_absent(event, 'data/error')
- completed = True
+ self.wait_until_completed(check_offset=False)
self.assert_no_active_block_jobs()
self.vm.shutdown()
diff --git a/tests/qemu-iotests/060 b/tests/qemu-iotests/060
index af8ed9f39a..f0116aab1d 100755
--- a/tests/qemu-iotests/060
+++ b/tests/qemu-iotests/060
@@ -138,6 +138,32 @@ $QEMU_IMG snapshot -a foo "$TEST_IMG"
_check_test_img
$QEMU_IO -c "$OPEN_RO" -c "read -P 1 0 512" | _filter_qemu_io
+echo
+echo "=== Testing overlap while COW is in flight ==="
+echo
+# compat=0.10 is required in order to make the following discard actually
+# unallocate the sector rather than make it a zero sector - we want COW, after
+# all.
+IMGOPTS='compat=0.10' _make_test_img 1G
+# Write two clusters, the second one enforces creation of an L2 table after
+# the first data cluster.
+$QEMU_IO -c 'write 0k 64k' -c 'write 512M 64k' "$TEST_IMG" | _filter_qemu_io
+# Discard the first cluster. This cluster will soon enough be reallocated and
+# used for COW.
+$QEMU_IO -c 'discard 0k 64k' "$TEST_IMG" | _filter_qemu_io
+# Now, corrupt the image by marking the second L2 table cluster as free.
+poke_file "$TEST_IMG" '131084' "\x00\x00" # 0x2000c
+# Start a write operation requiring COW on the image stopping it right before
+# doing the read; then, trigger the corruption prevention by writing anything to
+# any unallocated cluster, leading to an attempt to overwrite the second L2
+# table. Finally, resume the COW write and see it fail (but not crash).
+echo "open -o file.driver=blkdebug $TEST_IMG
+break cow_read 0
+aio_write 0k 1k
+wait_break 0
+write 64k 64k
+resume 0" | $QEMU_IO | _filter_qemu_io
+
# success, all done
echo "*** done"
rm -f $seq.full
diff --git a/tests/qemu-iotests/060.out b/tests/qemu-iotests/060.out
index 6c7bdbb2f2..a517948036 100644
--- a/tests/qemu-iotests/060.out
+++ b/tests/qemu-iotests/060.out
@@ -78,4 +78,19 @@ read 512/512 bytes at offset 0
No errors were found on the image.
read 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+=== Testing overlap while COW is in flight ===
+
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
+wrote 65536/65536 bytes at offset 0
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 65536/65536 bytes at offset 536870912
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+discard 65536/65536 bytes at offset 0
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+qcow2: Preventing invalid write on metadata (overlaps with active L2 table); image marked as corrupt.
+blkdebug: Suspended request '0'
+write failed: Input/output error
+blkdebug: Resuming request '0'
+aio_write failed: No medium found
*** done
diff --git a/tests/qemu-iotests/075 b/tests/qemu-iotests/075
new file mode 100755
index 0000000000..40032c563d
--- /dev/null
+++ b/tests/qemu-iotests/075
@@ -0,0 +1,106 @@
+#!/bin/bash
+#
+# cloop format input validation tests
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=stefanha@redhat.com
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+_supported_fmt cloop
+_supported_proto generic
+_supported_os Linux
+
+block_size_offset=128
+n_blocks_offset=132
+offsets_offset=136
+
+echo
+echo "== check that the first sector can be read =="
+_use_sample_img simple-pattern.cloop.bz2
+$QEMU_IO -c "read 0 512" $TEST_IMG 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== check that the last sector can be read =="
+_use_sample_img simple-pattern.cloop.bz2
+$QEMU_IO -c "read $((1024 * 1024 - 512)) 512" $TEST_IMG 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== block_size must be a multiple of 512 =="
+_use_sample_img simple-pattern.cloop.bz2
+poke_file "$TEST_IMG" "$block_size_offset" "\x00\x00\x02\x01"
+$QEMU_IO -c "read 0 512" $TEST_IMG 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== block_size cannot be zero =="
+_use_sample_img simple-pattern.cloop.bz2
+poke_file "$TEST_IMG" "$block_size_offset" "\x00\x00\x00\x00"
+$QEMU_IO -c "read 0 512" $TEST_IMG 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== huge block_size ==="
+_use_sample_img simple-pattern.cloop.bz2
+poke_file "$TEST_IMG" "$block_size_offset" "\xff\xff\xfe\x00"
+$QEMU_IO -c "read 0 512" $TEST_IMG 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== offsets_size overflow ==="
+_use_sample_img simple-pattern.cloop.bz2
+poke_file "$TEST_IMG" "$n_blocks_offset" "\xff\xff\xff\xff"
+$QEMU_IO -c "read 0 512" $TEST_IMG 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== refuse images that require too many offsets ==="
+_use_sample_img simple-pattern.cloop.bz2
+poke_file "$TEST_IMG" "$n_blocks_offset" "\x04\x00\x00\x01"
+$QEMU_IO -c "read 0 512" $TEST_IMG 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== refuse images with non-monotonically increasing offsets =="
+_use_sample_img simple-pattern.cloop.bz2
+poke_file "$TEST_IMG" "$offsets_offset" "\x00\x00\x00\x00\xff\xff\xff\xff"
+poke_file "$TEST_IMG" $((offsets_offset + 8)) "\x00\x00\x00\x00\xff\xfe\x00\x00"
+$QEMU_IO -c "read 0 512" $TEST_IMG 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== refuse images with invalid compressed block size =="
+_use_sample_img simple-pattern.cloop.bz2
+poke_file "$TEST_IMG" "$offsets_offset" "\x00\x00\x00\x00\x00\x00\x00\x00"
+poke_file "$TEST_IMG" $((offsets_offset + 8)) "\xff\xff\xff\xff\xff\xff\xff\xff"
+$QEMU_IO -c "read 0 512" $TEST_IMG 2>&1 | _filter_qemu_io | _filter_testdir
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/075.out b/tests/qemu-iotests/075.out
new file mode 100644
index 0000000000..5f1d6c120a
--- /dev/null
+++ b/tests/qemu-iotests/075.out
@@ -0,0 +1,38 @@
+QA output created by 075
+
+== check that the first sector can be read ==
+read 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+== check that the last sector can be read ==
+read 512/512 bytes at offset 1048064
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+== block_size must be a multiple of 512 ==
+qemu-io: can't open device TEST_DIR/simple-pattern.cloop: block_size 513 must be a multiple of 512
+no file open, try 'help open'
+
+== block_size cannot be zero ==
+qemu-io: can't open device TEST_DIR/simple-pattern.cloop: block_size cannot be zero
+no file open, try 'help open'
+
+== huge block_size ===
+qemu-io: can't open device TEST_DIR/simple-pattern.cloop: block_size 4294966784 must be 64 MB or less
+no file open, try 'help open'
+
+== offsets_size overflow ===
+qemu-io: can't open device TEST_DIR/simple-pattern.cloop: n_blocks 4294967295 must be 536870911 or less
+no file open, try 'help open'
+
+== refuse images that require too many offsets ===
+qemu-io: can't open device TEST_DIR/simple-pattern.cloop: image requires too many offsets, try increasing block size
+no file open, try 'help open'
+
+== refuse images with non-monotonically increasing offsets ==
+qemu-io: can't open device TEST_DIR/simple-pattern.cloop: offsets not monotonically increasing at index 1, image file is corrupt
+no file open, try 'help open'
+
+== refuse images with invalid compressed block size ==
+qemu-io: can't open device TEST_DIR/simple-pattern.cloop: invalid compressed block size at index 1, image file is corrupt
+no file open, try 'help open'
+*** done
diff --git a/tests/qemu-iotests/076 b/tests/qemu-iotests/076
new file mode 100755
index 0000000000..b614a7dd6e
--- /dev/null
+++ b/tests/qemu-iotests/076
@@ -0,0 +1,76 @@
+#!/bin/bash
+#
+# parallels format input validation tests
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=kwolf@redhat.com
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+_supported_fmt parallels
+_supported_proto generic
+_supported_os Linux
+
+tracks_offset=$((0x1c))
+catalog_entries_offset=$((0x20))
+nb_sectors_offset=$((0x24))
+
+echo
+echo "== Read from a valid (enough) image =="
+_use_sample_img fake.parallels.bz2
+{ $QEMU_IO -c "read -P 0x11 0 64k" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Negative catalog size =="
+_use_sample_img fake.parallels.bz2
+poke_file "$TEST_IMG" "$catalog_entries_offset" "\xff\xff\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Overflow in catalog allocation =="
+_use_sample_img fake.parallels.bz2
+poke_file "$TEST_IMG" "$nb_sectors_offset" "\xff\xff\xff\xff"
+poke_file "$TEST_IMG" "$catalog_entries_offset" "\x01\x00\x00\x40"
+{ $QEMU_IO -c "read 64M 64M" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Zero sectors per track =="
+_use_sample_img fake.parallels.bz2
+poke_file "$TEST_IMG" "$tracks_offset" "\x00\x00\x00\x00"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/076.out b/tests/qemu-iotests/076.out
new file mode 100644
index 0000000000..f7745d8b0d
--- /dev/null
+++ b/tests/qemu-iotests/076.out
@@ -0,0 +1,18 @@
+QA output created by 076
+
+== Read from a valid (enough) image ==
+read 65536/65536 bytes at offset 0
+64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+== Negative catalog size ==
+qemu-io: can't open device TEST_DIR/fake.parallels: Catalog too large
+no file open, try 'help open'
+
+== Overflow in catalog allocation ==
+qemu-io: can't open device TEST_DIR/fake.parallels: Catalog too large
+no file open, try 'help open'
+
+== Zero sectors per track ==
+qemu-io: can't open device TEST_DIR/fake.parallels: Invalid image: Zero sectors per track
+no file open, try 'help open'
+*** done
diff --git a/tests/qemu-iotests/078 b/tests/qemu-iotests/078
new file mode 100755
index 0000000000..d4d6da7b09
--- /dev/null
+++ b/tests/qemu-iotests/078
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+# bochs format input validation tests
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=kwolf@redhat.com
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+_supported_fmt bochs
+_supported_proto generic
+_supported_os Linux
+
+catalog_size_offset=$((0x48))
+extent_size_offset=$((0x50))
+disk_size_offset=$((0x58))
+
+echo
+echo "== Read from a valid image =="
+_use_sample_img empty.bochs.bz2
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Negative catalog size =="
+_use_sample_img empty.bochs.bz2
+poke_file "$TEST_IMG" "$catalog_size_offset" "\xff\xff\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Overflow for catalog size * sizeof(uint32_t) =="
+_use_sample_img empty.bochs.bz2
+poke_file "$TEST_IMG" "$catalog_size_offset" "\x00\x00\x00\x40"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Too small catalog bitmap for image size =="
+_use_sample_img empty.bochs.bz2
+poke_file "$TEST_IMG" "$disk_size_offset" "\x00\xc0\x0f\x00\x00\x00\x00\x7f"
+{ $QEMU_IO -c "read 2T 4k" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+_use_sample_img empty.bochs.bz2
+poke_file "$TEST_IMG" "$catalog_size_offset" "\x10\x00\x00\x00"
+{ $QEMU_IO -c "read 0xfbe00 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Negative extent size =="
+_use_sample_img empty.bochs.bz2
+poke_file "$TEST_IMG" "$extent_size_offset" "\x00\x00\x00\x80"
+{ $QEMU_IO -c "read 768k 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Zero extent size =="
+_use_sample_img empty.bochs.bz2
+poke_file "$TEST_IMG" "$extent_size_offset" "\x00\x00\x00\x00"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/078.out b/tests/qemu-iotests/078.out
new file mode 100644
index 0000000000..ca18d2ea38
--- /dev/null
+++ b/tests/qemu-iotests/078.out
@@ -0,0 +1,28 @@
+QA output created by 078
+
+== Read from a valid image ==
+read 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+== Negative catalog size ==
+qemu-io: can't open device TEST_DIR/empty.bochs: Catalog size is too large
+no file open, try 'help open'
+
+== Overflow for catalog size * sizeof(uint32_t) ==
+qemu-io: can't open device TEST_DIR/empty.bochs: Catalog size is too large
+no file open, try 'help open'
+
+== Too small catalog bitmap for image size ==
+qemu-io: can't open device TEST_DIR/empty.bochs: Catalog size is too small for this disk size
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/empty.bochs: Catalog size is too small for this disk size
+no file open, try 'help open'
+
+== Negative extent size ==
+qemu-io: can't open device TEST_DIR/empty.bochs: Extent size 2147483648 is too large
+no file open, try 'help open'
+
+== Zero extent size ==
+qemu-io: can't open device TEST_DIR/empty.bochs: Extent size must be at least 512
+no file open, try 'help open'
+*** done
diff --git a/tests/qemu-iotests/080 b/tests/qemu-iotests/080
new file mode 100755
index 0000000000..6b3a3e77a5
--- /dev/null
+++ b/tests/qemu-iotests/080
@@ -0,0 +1,180 @@
+#!/bin/bash
+#
+# qcow2 format input validation tests
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=kwolf@redhat.com
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+
+_cleanup()
+{
+ rm -f $TEST_IMG.snap
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+_supported_fmt qcow2
+_supported_proto generic
+_supported_os Linux
+
+header_size=104
+
+offset_backing_file_offset=8
+offset_backing_file_size=16
+offset_l1_size=36
+offset_l1_table_offset=40
+offset_refcount_table_offset=48
+offset_refcount_table_clusters=56
+offset_nb_snapshots=60
+offset_snapshots_offset=64
+offset_header_size=100
+offset_ext_magic=$header_size
+offset_ext_size=$((header_size + 4))
+
+offset_l2_table_0=$((0x40000))
+
+offset_snap1=$((0x70000))
+offset_snap1_l1_offset=$((offset_snap1 + 0))
+offset_snap1_l1_size=$((offset_snap1 + 8))
+
+echo
+echo "== Huge header size =="
+_make_test_img 64M
+poke_file "$TEST_IMG" "$offset_header_size" "\xff\xff\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_header_size" "\x7f\xff\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Huge unknown header extension =="
+_make_test_img 64M
+poke_file "$TEST_IMG" "$offset_backing_file_offset" "\xff\xff\xff\xff\xff\xff\xff\xff"
+poke_file "$TEST_IMG" "$offset_ext_magic" "\x12\x34\x56\x78"
+poke_file "$TEST_IMG" "$offset_ext_size" "\x7f\xff\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_backing_file_offset" "\x00\x00\x00\x00\x00\x00\x00\x00"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Huge refcount table size =="
+_make_test_img 64M
+poke_file "$TEST_IMG" "$offset_refcount_table_clusters" "\xff\xff\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_refcount_table_clusters" "\x00\x02\x00\x01"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Misaligned refcount table =="
+_make_test_img 64M
+poke_file "$TEST_IMG" "$offset_refcount_table_offset" "\x12\x34\x56\x78\x90\xab\xcd\xef"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Huge refcount offset =="
+_make_test_img 64M
+poke_file "$TEST_IMG" "$offset_refcount_table_offset" "\xff\xff\xff\xff\xff\xff\x00\x00"
+poke_file "$TEST_IMG" "$offset_refcount_table_clusters" "\x00\x00\x00\x7f"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Invalid snapshot table =="
+_make_test_img 64M
+poke_file "$TEST_IMG" "$offset_nb_snapshots" "\xff\xff\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_nb_snapshots" "\x7f\xff\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+poke_file "$TEST_IMG" "$offset_snapshots_offset" "\xff\xff\xff\xff\xff\xff\x00\x00"
+poke_file "$TEST_IMG" "$offset_nb_snapshots" "\x00\x00\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+poke_file "$TEST_IMG" "$offset_snapshots_offset" "\x12\x34\x56\x78\x90\xab\xcd\xef"
+poke_file "$TEST_IMG" "$offset_nb_snapshots" "\x00\x00\x00\x00"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Hitting snapshot table size limit =="
+_make_test_img 64M
+# Put the refcount table in a more or less safe place (16 MB)
+poke_file "$TEST_IMG" "$offset_snapshots_offset" "\x00\x00\x00\x00\x01\x00\x00\x00"
+poke_file "$TEST_IMG" "$offset_nb_snapshots" "\x00\x01\x00\x00"
+{ $QEMU_IMG snapshot -c test $TEST_IMG; } 2>&1 | _filter_testdir
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Invalid L1 table =="
+_make_test_img 64M
+poke_file "$TEST_IMG" "$offset_l1_size" "\xff\xff\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_l1_size" "\x7f\xff\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+poke_file "$TEST_IMG" "$offset_l1_table_offset" "\x7f\xff\xff\xff\xff\xff\x00\x00"
+poke_file "$TEST_IMG" "$offset_l1_size" "\x00\x00\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+poke_file "$TEST_IMG" "$offset_l1_table_offset" "\x12\x34\x56\x78\x90\xab\xcd\xef"
+poke_file "$TEST_IMG" "$offset_l1_size" "\x00\x00\x00\x01"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Invalid L1 table (with internal snapshot in the image) =="
+_make_test_img 64M
+{ $QEMU_IMG snapshot -c foo $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_l1_size" "\x00\x00\x00\x00"
+_img_info
+
+echo
+echo "== Invalid backing file size =="
+_make_test_img 64M
+poke_file "$TEST_IMG" "$offset_backing_file_offset" "\x00\x00\x00\x00\x00\x00\x10\x00"
+poke_file "$TEST_IMG" "$offset_backing_file_size" "\xff\xff\xff\xff"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Invalid L2 entry (huge physical offset) =="
+_make_test_img 64M
+{ $QEMU_IO -c "write 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_l2_table_0" "\xbf\xff\xff\xff\xff\xff\x00\x00"
+{ $QEMU_IMG snapshot -c test $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_l2_table_0" "\x80\x00\x00\xff\xff\xff\x00\x00"
+{ $QEMU_IMG snapshot -c test $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+echo
+echo "== Invalid snapshot L1 table =="
+_make_test_img 64M
+{ $QEMU_IO -c "write 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+{ $QEMU_IMG snapshot -c test $TEST_IMG; } 2>&1 | _filter_testdir
+poke_file "$TEST_IMG" "$offset_snap1_l1_size" "\x10\x00\x00\x00"
+{ $QEMU_IMG convert -s test $TEST_IMG $TEST_IMG.snap; } 2>&1 | _filter_testdir
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/080.out b/tests/qemu-iotests/080.out
new file mode 100644
index 0000000000..f7a943c7a4
--- /dev/null
+++ b/tests/qemu-iotests/080.out
@@ -0,0 +1,83 @@
+QA output created by 080
+
+== Huge header size ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+qemu-io: can't open device TEST_DIR/t.qcow2: qcow2 header exceeds cluster size
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.qcow2: qcow2 header exceeds cluster size
+no file open, try 'help open'
+
+== Huge unknown header extension ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+qemu-io: can't open device TEST_DIR/t.qcow2: Invalid backing file offset
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.qcow2: Header extension too large
+no file open, try 'help open'
+
+== Huge refcount table size ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+qemu-io: can't open device TEST_DIR/t.qcow2: Reference count table too large
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.qcow2: Reference count table too large
+no file open, try 'help open'
+
+== Misaligned refcount table ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+qemu-io: can't open device TEST_DIR/t.qcow2: Invalid reference count table offset
+no file open, try 'help open'
+
+== Huge refcount offset ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+qemu-io: can't open device TEST_DIR/t.qcow2: Invalid reference count table offset
+no file open, try 'help open'
+
+== Invalid snapshot table ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+qemu-io: can't open device TEST_DIR/t.qcow2: Too many snapshots
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.qcow2: Too many snapshots
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.qcow2: Invalid snapshot table offset
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.qcow2: Invalid snapshot table offset
+no file open, try 'help open'
+
+== Hitting snapshot table size limit ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+qemu-img: Could not create snapshot 'test': -27 (File too large)
+read 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+== Invalid L1 table ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+qemu-io: can't open device TEST_DIR/t.qcow2: Active L1 table too large
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.qcow2: Active L1 table too large
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.qcow2: Invalid L1 table offset
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.qcow2: Invalid L1 table offset
+no file open, try 'help open'
+
+== Invalid L1 table (with internal snapshot in the image) ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+qemu-img: Could not open 'TEST_DIR/t.IMGFMT': L1 table is too small
+
+== Invalid backing file size ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+qemu-io: can't open device TEST_DIR/t.qcow2: Backing file name too long
+no file open, try 'help open'
+
+== Invalid L2 entry (huge physical offset) ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+wrote 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+qemu-img: Could not create snapshot 'test': -27 (File too large)
+qemu-img: Could not create snapshot 'test': -11 (Resource temporarily unavailable)
+
+== Invalid snapshot L1 table ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+wrote 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+qemu-img: Failed to load snapshot: Snapshot L1 table too large
+*** done
diff --git a/tests/qemu-iotests/083 b/tests/qemu-iotests/083
new file mode 100755
index 0000000000..f764534782
--- /dev/null
+++ b/tests/qemu-iotests/083
@@ -0,0 +1,129 @@
+#!/bin/bash
+#
+# Test NBD client unexpected disconnect
+#
+# Copyright Red Hat, Inc. 2014
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=stefanha@redhat.com
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+_supported_fmt generic
+_supported_proto nbd
+_supported_os Linux
+
+# Pick a TCP port based on our pid. This way multiple instances of this test
+# can run in parallel without conflicting.
+choose_tcp_port() {
+ echo $((($$ % 31744) + 1024)) # 1024 <= port < 32768
+}
+
+wait_for_tcp_port() {
+ while ! (netstat --tcp --listening --numeric | \
+ grep "$1.*0.0.0.0:\*.*LISTEN") 2>&1 >/dev/null; do
+ sleep 0.1
+ done
+}
+
+filter_nbd() {
+ # nbd.c error messages contain function names and line numbers that are prone
+ # to change. Message ordering depends on timing between send and receive
+ # callbacks sometimes, making them unreliable.
+ #
+ # Filter out the TCP port number since this changes between runs.
+ sed -e 's#^nbd.c:.*##g' \
+ -e 's#nbd:127.0.0.1:[^:]*:#nbd:127.0.0.1:PORT:#g'
+}
+
+check_disconnect() {
+ event=$1
+ when=$2
+ negotiation=$3
+ echo "=== Check disconnect $when $event ==="
+ echo
+
+ port=$(choose_tcp_port)
+
+ cat > "$TEST_DIR/nbd-fault-injector.conf" <<EOF
+[inject-error]
+event=$event
+when=$when
+EOF
+
+ if [ "$negotiation" = "--classic-negotiation" ]; then
+ extra_args=--classic-negotiation
+ nbd_url="nbd:127.0.0.1:$port"
+ else
+ nbd_url="nbd:127.0.0.1:$port:exportname=foo"
+ fi
+
+ ./nbd-fault-injector.py $extra_args "127.0.0.1:$port" "$TEST_DIR/nbd-fault-injector.conf" 2>&1 >/dev/null &
+ wait_for_tcp_port "127.0.0.1:$port"
+ $QEMU_IO -c "read 0 512" "$nbd_url" 2>&1 | _filter_qemu_io | filter_nbd
+
+ echo
+}
+
+for event in neg1 "export" neg2 request reply data; do
+ for when in before after; do
+ check_disconnect "$event" "$when"
+ done
+
+ # Also inject short replies from the NBD server
+ case "$event" in
+ neg1)
+ for when in 8 16; do
+ check_disconnect "$event" "$when"
+ done
+ ;;
+ "export")
+ for when in 4 12 16; do
+ check_disconnect "$event" "$when"
+ done
+ ;;
+ neg2)
+ for when in 8 10; do
+ check_disconnect "$event" "$when"
+ done
+ ;;
+ reply)
+ for when in 4 8; do
+ check_disconnect "$event" "$when"
+ done
+ ;;
+ esac
+done
+
+# Also check classic negotiation without export information
+for when in before 8 16 24 28 after; do
+ check_disconnect "neg-classic" "$when" --classic-negotiation
+done
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/083.out b/tests/qemu-iotests/083.out
new file mode 100644
index 0000000000..85ee8d6dd7
--- /dev/null
+++ b/tests/qemu-iotests/083.out
@@ -0,0 +1,163 @@
+QA output created by 083
+=== Check disconnect before neg1 ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect after neg1 ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect 8 neg1 ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect 16 neg1 ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect before export ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect after export ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect 4 export ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect 12 export ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect 16 export ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect before neg2 ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect after neg2 ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
+no file open, try 'help open'
+
+=== Check disconnect 8 neg2 ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect 10 neg2 ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect before request ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
+no file open, try 'help open'
+
+=== Check disconnect after request ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
+no file open, try 'help open'
+
+=== Check disconnect before reply ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
+no file open, try 'help open'
+
+=== Check disconnect after reply ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
+no file open, try 'help open'
+
+=== Check disconnect 4 reply ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
+no file open, try 'help open'
+
+=== Check disconnect 8 reply ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
+no file open, try 'help open'
+
+=== Check disconnect before data ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT:exportname=foo: Could not read image for determining its format: Input/output error
+no file open, try 'help open'
+
+=== Check disconnect after data ===
+
+
+read failed: Input/output error
+
+=== Check disconnect before neg-classic ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect 8 neg-classic ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect 16 neg-classic ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect 24 neg-classic ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect 28 neg-classic ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT: Could not open image: Invalid argument
+no file open, try 'help open'
+
+=== Check disconnect after neg-classic ===
+
+
+qemu-io: can't open device nbd:127.0.0.1:PORT: Could not read image for determining its format: Input/output error
+no file open, try 'help open'
+
+*** done
diff --git a/tests/qemu-iotests/084 b/tests/qemu-iotests/084
new file mode 100755
index 0000000000..cb4d7b729e
--- /dev/null
+++ b/tests/qemu-iotests/084
@@ -0,0 +1,104 @@
+#!/bin/bash
+#
+# Test case for VDI header corruption; image too large, and too many blocks
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=jcody@redhat.com
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+# This tests vdi-specific header fields
+_supported_fmt vdi
+_supported_proto generic
+_supported_os Linux
+
+ds_offset=368 # disk image size field offset
+bs_offset=376 # block size field offset
+bii_offset=384 # block in image field offset
+
+echo
+echo "=== Testing image size bounds ==="
+echo
+_make_test_img 64M
+
+# check for image size too large
+# poke max image size, and appropriate blocks_in_image value
+echo "Test 1: Maximum size (1024 TB):"
+poke_file "$TEST_IMG" "$ds_offset" "\x00\x00\xf0\xff\xff\xff\x03\x00"
+poke_file "$TEST_IMG" "$bii_offset" "\xff\xff\xff\x3f"
+_img_info
+
+echo
+echo "Test 2: Size too large (1024TB + 1)"
+# This should be too large (-EINVAL):
+poke_file "$TEST_IMG" "$ds_offset" "\x00\x00\xf1\xff\xff\xff\x03\x00"
+_img_info
+
+echo
+echo "Test 3: Size valid (64M), but Blocks In Image too small (63)"
+# This sets the size to 64M, but with a blocks_in_image size that is
+# too small
+poke_file "$TEST_IMG" "$ds_offset" "\x00\x00\x00\x04\x00\x00\x00\x00"
+# For a 64M image, we would need a blocks_in_image value of at least 64,
+# so 63 should be too small and give us -ENOTSUP
+poke_file "$TEST_IMG" "$bii_offset" "\x3f\x00\x00\x00"
+_img_info
+
+echo
+echo "Test 4: Size valid (64M), but Blocks In Image exceeds max allowed"
+# Now check the bounds of blocks_in_image - 0x3fffffff should be the max
+# value here, and we should get -ENOTSUP
+poke_file "$TEST_IMG" "$bii_offset" "\x00\x00\x00\x40"
+_img_info
+
+# Finally, 1MB is the only block size supported. Verify that
+# a value != 1MB results in error, both smaller and larger
+echo
+echo "Test 5: Valid Image: 64MB, Blocks In Image 64, Block Size 1MB"
+poke_file "$TEST_IMG" "$bii_offset" "\x40\x00\x00\x00" # reset bii to valid
+poke_file "$TEST_IMG" "$bs_offset" "\x00\x00\x10\x00" # valid
+_img_info
+echo
+echo "Test 6: Block Size != 1MB; too small test (1MB - 1)"
+poke_file "$TEST_IMG" "$bs_offset" "\xff\xff\x0f\x00" # invalid (too small)
+_img_info
+echo
+echo "Test 7: Block Size != 1MB; too large test (1MB + 64KB)"
+poke_file "$TEST_IMG" "$bs_offset" "\x00\x00\x11\x00" # invalid (too large)
+_img_info
+# success, all done
+echo
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/084.out b/tests/qemu-iotests/084.out
new file mode 100644
index 0000000000..c7120d9b0b
--- /dev/null
+++ b/tests/qemu-iotests/084.out
@@ -0,0 +1,30 @@
+QA output created by 084
+
+=== Testing image size bounds ===
+
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+Test 1: Maximum size (1024 TB):
+qemu-img: Could not open 'TEST_DIR/t.IMGFMT': Could not open 'TEST_DIR/t.IMGFMT': Invalid argument
+
+Test 2: Size too large (1024TB + 1)
+qemu-img: Could not open 'TEST_DIR/t.IMGFMT': Unsupported VDI image size (size is 0x3fffffff10000, max supported is 0x3fffffff00000)
+
+Test 3: Size valid (64M), but Blocks In Image too small (63)
+qemu-img: Could not open 'TEST_DIR/t.IMGFMT': unsupported VDI image (disk size 67108864, image bitmap has room for 66060288)
+
+Test 4: Size valid (64M), but Blocks In Image exceeds max allowed
+qemu-img: Could not open 'TEST_DIR/t.IMGFMT': unsupported VDI image (too many blocks 1073741824, max is 1073741823)
+
+Test 5: Valid Image: 64MB, Blocks In Image 64, Block Size 1MB
+image: TEST_DIR/t.IMGFMT
+file format: IMGFMT
+virtual size: 64M (67108864 bytes)
+cluster_size: 1048576
+
+Test 6: Block Size != 1MB; too small test (1MB - 1)
+qemu-img: Could not open 'TEST_DIR/t.IMGFMT': unsupported VDI image (block size 1048575 is not 1048576)
+
+Test 7: Block Size != 1MB; too large test (1MB + 64KB)
+qemu-img: Could not open 'TEST_DIR/t.IMGFMT': unsupported VDI image (block size 1114112 is not 1048576)
+
+*** done
diff --git a/tests/qemu-iotests/087 b/tests/qemu-iotests/087
index 53b6c43bff..82c56b1394 100755
--- a/tests/qemu-iotests/087
+++ b/tests/qemu-iotests/087
@@ -73,6 +73,91 @@ run_qemu <<EOF
EOF
echo
+echo === Duplicate ID ===
+echo
+
+run_qemu <<EOF
+{ "execute": "qmp_capabilities" }
+{ "execute": "blockdev-add",
+ "arguments": {
+ "options": {
+ "driver": "$IMGFMT",
+ "id": "disk",
+ "node-name": "test-node",
+ "file": {
+ "driver": "file",
+ "filename": "$TEST_IMG"
+ }
+ }
+ }
+ }
+{ "execute": "blockdev-add",
+ "arguments": {
+ "options": {
+ "driver": "$IMGFMT",
+ "id": "disk",
+ "file": {
+ "driver": "file",
+ "filename": "$TEST_IMG"
+ }
+ }
+ }
+ }
+{ "execute": "blockdev-add",
+ "arguments": {
+ "options": {
+ "driver": "$IMGFMT",
+ "id": "test-node",
+ "file": {
+ "driver": "file",
+ "filename": "$TEST_IMG"
+ }
+ }
+ }
+ }
+{ "execute": "blockdev-add",
+ "arguments": {
+ "options": {
+ "driver": "$IMGFMT",
+ "id": "disk2",
+ "node-name": "disk",
+ "file": {
+ "driver": "file",
+ "filename": "$TEST_IMG"
+ }
+ }
+ }
+ }
+{ "execute": "blockdev-add",
+ "arguments": {
+ "options": {
+ "driver": "$IMGFMT",
+ "id": "disk2",
+ "node-name": "test-node",
+ "file": {
+ "driver": "file",
+ "filename": "$TEST_IMG"
+ }
+ }
+ }
+ }
+{ "execute": "blockdev-add",
+ "arguments": {
+ "options": {
+ "driver": "$IMGFMT",
+ "id": "disk3",
+ "node-name": "disk3",
+ "file": {
+ "driver": "file",
+ "filename": "$TEST_IMG"
+ }
+ }
+ }
+ }
+{ "execute": "quit" }
+EOF
+
+echo
echo === aio=native without O_DIRECT ===
echo
@@ -99,6 +184,23 @@ echo === Encrypted image ===
echo
_make_test_img -o encryption=on $size
+run_qemu -S <<EOF
+{ "execute": "qmp_capabilities" }
+{ "execute": "blockdev-add",
+ "arguments": {
+ "options": {
+ "driver": "$IMGFMT",
+ "id": "disk",
+ "file": {
+ "driver": "file",
+ "filename": "$TEST_IMG"
+ }
+ }
+ }
+ }
+{ "execute": "quit" }
+EOF
+
run_qemu <<EOF
{ "execute": "qmp_capabilities" }
{ "execute": "blockdev-add",
diff --git a/tests/qemu-iotests/087.out b/tests/qemu-iotests/087.out
index b87103252e..7fbee3ff5e 100644
--- a/tests/qemu-iotests/087.out
+++ b/tests/qemu-iotests/087.out
@@ -13,6 +13,24 @@ QMP_VERSION
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "floppy0", "tray-open": true}}
+=== Duplicate ID ===
+
+Testing:
+QMP_VERSION
+{"return": {}}
+{"return": {}}
+{"error": {"class": "GenericError", "desc": "Device with id 'disk' already exists"}}
+{"error": {"class": "GenericError", "desc": "Device with node-name 'test-node' already exists"}}
+main-loop: WARNING: I/O thread spun for 1000 iterations
+{"error": {"class": "GenericError", "desc": "could not open disk image disk2: node-name=disk is conflicting with a device id"}}
+{"error": {"class": "GenericError", "desc": "could not open disk image disk2: Duplicate node name"}}
+{"error": {"class": "GenericError", "desc": "could not open disk image disk3: node-name=disk3 is conflicting with a device id"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN"}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "ide1-cd0", "tray-open": true}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "floppy0", "tray-open": true}}
+
+
=== aio=native without O_DIRECT ===
Testing:
@@ -28,7 +46,7 @@ QMP_VERSION
=== Encrypted image ===
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 encryption=on
-Testing:
+Testing: -S
QMP_VERSION
{"return": {}}
{"error": {"class": "GenericError", "desc": "blockdev-add doesn't support encrypted devices"}}
@@ -37,4 +55,13 @@ QMP_VERSION
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "ide1-cd0", "tray-open": true}}
{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "floppy0", "tray-open": true}}
+Testing:
+QMP_VERSION
+{"return": {}}
+{"error": {"class": "GenericError", "desc": "could not open disk image disk: Guest must be stopped for opening of encrypted image"}}
+{"return": {}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN"}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "ide1-cd0", "tray-open": true}}
+{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "DEVICE_TRAY_MOVED", "data": {"device": "floppy0", "tray-open": true}}
+
*** done
diff --git a/tests/qemu-iotests/088 b/tests/qemu-iotests/088
new file mode 100755
index 0000000000..c09adf8023
--- /dev/null
+++ b/tests/qemu-iotests/088
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# vpc (VHD) format input validation tests
+#
+# Copyright (C) 2014 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=kwolf@redhat.com
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+
+_cleanup()
+{
+ rm -f $TEST_IMG.snap
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+_supported_fmt vpc
+_supported_proto generic
+_supported_os Linux
+
+offset_block_size=$((512 + 32))
+
+echo
+echo "== Invalid block size =="
+_make_test_img 64M
+poke_file "$TEST_IMG" "$offset_block_size" "\x00\x00\x00\x00"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+{ $QEMU_IO -c "write 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_block_size" "\x00\x00\x00\x80"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+{ $QEMU_IO -c "write 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+poke_file "$TEST_IMG" "$offset_block_size" "\x12\x34\x56\x78"
+{ $QEMU_IO -c "read 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+{ $QEMU_IO -c "write 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
diff --git a/tests/qemu-iotests/088.out b/tests/qemu-iotests/088.out
new file mode 100644
index 0000000000..d961609e49
--- /dev/null
+++ b/tests/qemu-iotests/088.out
@@ -0,0 +1,17 @@
+QA output created by 088
+
+== Invalid block size ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
+qemu-io: can't open device TEST_DIR/t.vpc: Invalid block size 0
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.vpc: Invalid block size 0
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.vpc: Invalid block size 128
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.vpc: Invalid block size 128
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.vpc: Invalid block size 305419896
+no file open, try 'help open'
+qemu-io: can't open device TEST_DIR/t.vpc: Invalid block size 305419896
+no file open, try 'help open'
+*** done
diff --git a/tests/qemu-iotests/common b/tests/qemu-iotests/common
index 5795358924..0aaf84d015 100644
--- a/tests/qemu-iotests/common
+++ b/tests/qemu-iotests/common
@@ -136,7 +136,10 @@ common options
check options
-raw test raw (default)
+ -bochs test bochs
-cow test cow
+ -cloop test cloop
+ -parallels test parallels
-qcow test qcow
-qcow2 test qcow2
-qed test qed
@@ -173,11 +176,29 @@ testlist options
xpand=false
;;
+ -bochs)
+ IMGFMT=bochs
+ IMGFMT_GENERIC=false
+ xpand=false
+ ;;
+
-cow)
IMGFMT=cow
xpand=false
;;
+ -cloop)
+ IMGFMT=cloop
+ IMGFMT_GENERIC=false
+ xpand=false
+ ;;
+
+ -parallels)
+ IMGFMT=parallels
+ IMGFMT_GENERIC=false
+ xpand=false
+ ;;
+
-qcow)
IMGFMT=qcow
xpand=false
diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc
index 881079bdb9..7f00883cad 100644
--- a/tests/qemu-iotests/common.rc
+++ b/tests/qemu-iotests/common.rc
@@ -364,6 +364,9 @@ _fail()
#
_supported_fmt()
{
+ # "generic" is suitable for most image formats. For some formats it doesn't
+ # work, however (most notably read-only formats), so they can opt out by
+ # setting IMGFMT_GENERIC to false.
for f; do
if [ "$f" = "$IMGFMT" -o "$f" = "generic" -a "$IMGFMT_GENERIC" = "true" ]; then
return
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
index e96eafdf43..864643d256 100644
--- a/tests/qemu-iotests/group
+++ b/tests/qemu-iotests/group
@@ -81,10 +81,17 @@
072 rw auto quick
073 rw auto quick
074 rw auto quick
+075 rw auto
+076 auto
077 rw auto quick
+078 rw auto
079 rw auto
+080 rw auto
081 rw auto
082 rw auto quick
-085 rw auto quick
+083 rw auto
+084 img auto
+085 rw auto
086 rw auto quick
-087 rw auto quick
+087 rw auto
+088 rw auto
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index e4fa9af714..f6c437c0c3 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -257,7 +257,7 @@ class QMPTestCase(unittest.TestCase):
self.assert_no_active_block_jobs()
return result
- def wait_until_completed(self, drive='drive0'):
+ def wait_until_completed(self, drive='drive0', check_offset=True):
'''Wait for a block job to finish, returning the event'''
completed = False
while not completed:
@@ -265,7 +265,8 @@ class QMPTestCase(unittest.TestCase):
if event['event'] == 'BLOCK_JOB_COMPLETED':
self.assert_qmp(event, 'data/device', drive)
self.assert_qmp_absent(event, 'data/error')
- self.assert_qmp(event, 'data/offset', self.image_len)
+ if check_offset:
+ self.assert_qmp(event, 'data/offset', self.image_len)
self.assert_qmp(event, 'data/len', self.image_len)
completed = True
diff --git a/tests/qemu-iotests/nbd-fault-injector.py b/tests/qemu-iotests/nbd-fault-injector.py
new file mode 100755
index 0000000000..6c07191a5a
--- /dev/null
+++ b/tests/qemu-iotests/nbd-fault-injector.py
@@ -0,0 +1,264 @@
+#!/usr/bin/env python
+# NBD server - fault injection utility
+#
+# Configuration file syntax:
+# [inject-error "disconnect-neg1"]
+# event=neg1
+# io=readwrite
+# when=before
+#
+# Note that Python's ConfigParser squashes together all sections with the same
+# name, so give each [inject-error] a unique name.
+#
+# inject-error options:
+# event - name of the trigger event
+# "neg1" - first part of negotiation struct
+# "export" - export struct
+# "neg2" - second part of negotiation struct
+# "request" - NBD request struct
+# "reply" - NBD reply struct
+# "data" - request/reply data
+# io - I/O direction that triggers this rule:
+# "read", "write", or "readwrite"
+# default: readwrite
+# when - after how many bytes to inject the fault
+# -1 - inject error after I/O
+# 0 - inject error before I/O
+# integer - inject error after integer bytes
+# "before" - alias for 0
+# "after" - alias for -1
+# default: before
+#
+# Currently the only error injection action is to terminate the server process.
+# This resets the TCP connection and thus forces the client to handle
+# unexpected connection termination.
+#
+# Other error injection actions could be added in the future.
+#
+# Copyright Red Hat, Inc. 2014
+#
+# Authors:
+# Stefan Hajnoczi <stefanha@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+
+import sys
+import socket
+import struct
+import collections
+import ConfigParser
+
+FAKE_DISK_SIZE = 8 * 1024 * 1024 * 1024 # 8 GB
+
+# Protocol constants
+NBD_CMD_READ = 0
+NBD_CMD_WRITE = 1
+NBD_CMD_DISC = 2
+NBD_REQUEST_MAGIC = 0x25609513
+NBD_REPLY_MAGIC = 0x67446698
+NBD_PASSWD = 0x4e42444d41474943
+NBD_OPTS_MAGIC = 0x49484156454F5054
+NBD_CLIENT_MAGIC = 0x0000420281861253
+NBD_OPT_EXPORT_NAME = 1 << 0
+
+# Protocol structs
+neg_classic_struct = struct.Struct('>QQQI124x')
+neg1_struct = struct.Struct('>QQH')
+export_tuple = collections.namedtuple('Export', 'reserved magic opt len')
+export_struct = struct.Struct('>IQII')
+neg2_struct = struct.Struct('>QH124x')
+request_tuple = collections.namedtuple('Request', 'magic type handle from_ len')
+request_struct = struct.Struct('>IIQQI')
+reply_struct = struct.Struct('>IIQ')
+
+def err(msg):
+ sys.stderr.write(msg + '\n')
+ sys.exit(1)
+
+def recvall(sock, bufsize):
+ received = 0
+ chunks = []
+ while received < bufsize:
+ chunk = sock.recv(bufsize - received)
+ if len(chunk) == 0:
+ raise Exception('unexpected disconnect')
+ chunks.append(chunk)
+ received += len(chunk)
+ return ''.join(chunks)
+
+class Rule(object):
+ def __init__(self, name, event, io, when):
+ self.name = name
+ self.event = event
+ self.io = io
+ self.when = when
+
+ def match(self, event, io):
+ if event != self.event:
+ return False
+ if io != self.io and self.io != 'readwrite':
+ return False
+ return True
+
+class FaultInjectionSocket(object):
+ def __init__(self, sock, rules):
+ self.sock = sock
+ self.rules = rules
+
+ def check(self, event, io, bufsize=None):
+ for rule in self.rules:
+ if rule.match(event, io):
+ if rule.when == 0 or bufsize is None:
+ print 'Closing connection on rule match %s' % rule.name
+ sys.exit(0)
+ if rule.when != -1:
+ return rule.when
+ return bufsize
+
+ def send(self, buf, event):
+ bufsize = self.check(event, 'write', bufsize=len(buf))
+ self.sock.sendall(buf[:bufsize])
+ self.check(event, 'write')
+
+ def recv(self, bufsize, event):
+ bufsize = self.check(event, 'read', bufsize=bufsize)
+ data = recvall(self.sock, bufsize)
+ self.check(event, 'read')
+ return data
+
+ def close(self):
+ self.sock.close()
+
+def negotiate_classic(conn):
+ buf = neg_classic_struct.pack(NBD_PASSWD, NBD_CLIENT_MAGIC,
+ FAKE_DISK_SIZE, 0)
+ conn.send(buf, event='neg-classic')
+
+def negotiate_export(conn):
+ # Send negotiation part 1
+ buf = neg1_struct.pack(NBD_PASSWD, NBD_OPTS_MAGIC, 0)
+ conn.send(buf, event='neg1')
+
+ # Receive export option
+ buf = conn.recv(export_struct.size, event='export')
+ export = export_tuple._make(export_struct.unpack(buf))
+ assert export.magic == NBD_OPTS_MAGIC
+ assert export.opt == NBD_OPT_EXPORT_NAME
+ name = conn.recv(export.len, event='export-name')
+
+ # Send negotiation part 2
+ buf = neg2_struct.pack(FAKE_DISK_SIZE, 0)
+ conn.send(buf, event='neg2')
+
+def negotiate(conn, use_export):
+ '''Negotiate export with client'''
+ if use_export:
+ negotiate_export(conn)
+ else:
+ negotiate_classic(conn)
+
+def read_request(conn):
+ '''Parse NBD request from client'''
+ buf = conn.recv(request_struct.size, event='request')
+ req = request_tuple._make(request_struct.unpack(buf))
+ assert req.magic == NBD_REQUEST_MAGIC
+ return req
+
+def write_reply(conn, error, handle):
+ buf = reply_struct.pack(NBD_REPLY_MAGIC, error, handle)
+ conn.send(buf, event='reply')
+
+def handle_connection(conn, use_export):
+ negotiate(conn, use_export)
+ while True:
+ req = read_request(conn)
+ if req.type == NBD_CMD_READ:
+ write_reply(conn, 0, req.handle)
+ conn.send('\0' * req.len, event='data')
+ elif req.type == NBD_CMD_WRITE:
+ _ = conn.recv(req.len, event='data')
+ write_reply(conn, 0, req.handle)
+ elif req.type == NBD_CMD_DISC:
+ break
+ else:
+ print 'unrecognized command type %#02x' % req.type
+ break
+ conn.close()
+
+def run_server(sock, rules, use_export):
+ while True:
+ conn, _ = sock.accept()
+ handle_connection(FaultInjectionSocket(conn, rules), use_export)
+
+def parse_inject_error(name, options):
+ if 'event' not in options:
+ err('missing \"event\" option in %s' % name)
+ event = options['event']
+ if event not in ('neg-classic', 'neg1', 'export', 'neg2', 'request', 'reply', 'data'):
+ err('invalid \"event\" option value \"%s\" in %s' % (event, name))
+ io = options.get('io', 'readwrite')
+ if io not in ('read', 'write', 'readwrite'):
+ err('invalid \"io\" option value \"%s\" in %s' % (io, name))
+ when = options.get('when', 'before')
+ try:
+ when = int(when)
+ except ValueError:
+ if when == 'before':
+ when = 0
+ elif when == 'after':
+ when = -1
+ else:
+ err('invalid \"when\" option value \"%s\" in %s' % (when, name))
+ return Rule(name, event, io, when)
+
+def parse_config(config):
+ rules = []
+ for name in config.sections():
+ if name.startswith('inject-error'):
+ options = dict(config.items(name))
+ rules.append(parse_inject_error(name, options))
+ else:
+ err('invalid config section name: %s' % name)
+ return rules
+
+def load_rules(filename):
+ config = ConfigParser.RawConfigParser()
+ with open(filename, 'rt') as f:
+ config.readfp(f, filename)
+ return parse_config(config)
+
+def open_socket(path):
+ '''Open a TCP or UNIX domain listen socket'''
+ if ':' in path:
+ host, port = path.split(':', 1)
+ sock = socket.socket()
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind((host, int(port)))
+ else:
+ sock = socket.socket(socket.AF_UNIX)
+ sock.bind(path)
+ sock.listen(0)
+ print 'Listening on %s' % path
+ return sock
+
+def usage(args):
+ sys.stderr.write('usage: %s [--classic-negotiation] <tcp-port>|<unix-path> <config-file>\n' % args[0])
+ sys.stderr.write('Run an fault injector NBD server with rules defined in a config file.\n')
+ sys.exit(1)
+
+def main(args):
+ if len(args) != 3 and len(args) != 4:
+ usage(args)
+ use_export = True
+ if args[1] == '--classic-negotiation':
+ use_export = False
+ elif len(args) == 4:
+ usage(args)
+ sock = open_socket(args[1 if use_export else 2])
+ rules = load_rules(args[2 if use_export else 3])
+ run_server(sock, rules, use_export)
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/tests/qemu-iotests/sample_images/empty.bochs.bz2 b/tests/qemu-iotests/sample_images/empty.bochs.bz2
new file mode 100644
index 0000000000..7a29c6ed76
--- /dev/null
+++ b/tests/qemu-iotests/sample_images/empty.bochs.bz2
Binary files differ
diff --git a/tests/qemu-iotests/sample_images/fake.parallels.bz2 b/tests/qemu-iotests/sample_images/fake.parallels.bz2
new file mode 100644
index 0000000000..ffb5f13bac
--- /dev/null
+++ b/tests/qemu-iotests/sample_images/fake.parallels.bz2
Binary files differ
diff --git a/tests/qemu-iotests/sample_images/simple-pattern.cloop.bz2 b/tests/qemu-iotests/sample_images/simple-pattern.cloop.bz2
new file mode 100644
index 0000000000..a02d2ee4c7
--- /dev/null
+++ b/tests/qemu-iotests/sample_images/simple-pattern.cloop.bz2
Binary files differ
diff --git a/tests/qom-test.c b/tests/qom-test.c
index b6671fbec3..6d9a00b448 100644
--- a/tests/qom-test.c
+++ b/tests/qom-test.c
@@ -10,6 +10,7 @@
#include <glib.h>
#include <string.h>
+#include "qemu-common.h"
#include "libqtest.h"
#include "qemu/osdep.h"
#include "qapi/qmp/types.h"
@@ -43,6 +44,40 @@ static bool is_blacklisted(const char *arch, const char *mach)
return false;
}
+static void test_properties(const char *path)
+{
+ char *child_path;
+ QDict *response, *tuple;
+ QList *list;
+ QListEntry *entry;
+
+ g_test_message("Obtaining properties of %s", path);
+ response = qmp("{ 'execute': 'qom-list',"
+ " 'arguments': { 'path': '%s' } }", path);
+ g_assert(response);
+
+ g_assert(qdict_haskey(response, "return"));
+ list = qobject_to_qlist(qdict_get(response, "return"));
+ QLIST_FOREACH_ENTRY(list, entry) {
+ tuple = qobject_to_qdict(qlist_entry_obj(entry));
+ if (strstart(qdict_get_str(tuple, "type"), "child<", NULL)) {
+ child_path = g_strdup_printf("%s/%s",
+ path, qdict_get_str(tuple, "name"));
+ test_properties(child_path);
+ g_free(child_path);
+ } else {
+ const char *prop = qdict_get_str(tuple, "name");
+ g_test_message("Testing property %s.%s", path, prop);
+ response = qmp("{ 'execute': 'qom-get',"
+ " 'arguments': { 'path': '%s',"
+ " 'property': '%s' } }",
+ path, prop);
+ /* qom-get may fail but should not, e.g., segfault. */
+ g_assert(response);
+ }
+ }
+}
+
static void test_machine(gconstpointer data)
{
const char *machine = data;
@@ -51,8 +86,12 @@ static void test_machine(gconstpointer data)
args = g_strdup_printf("-machine %s", machine);
qtest_start(args);
+
+ test_properties("/machine");
+
response = qmp("{ 'execute': 'quit' }");
g_assert(qdict_haskey(response, "return"));
+
qtest_end();
g_free(args);
}
diff --git a/tests/spapr-phb-test.c b/tests/spapr-phb-test.c
new file mode 100644
index 0000000000..b629de475a
--- /dev/null
+++ b/tests/spapr-phb-test.c
@@ -0,0 +1,35 @@
+/*
+ * QTest testcase for SPAPR PHB
+ *
+ * Authors:
+ * Alexey Kardashevskiy <aik@ozlabs.ru>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include <glib.h>
+
+#include "libqtest.h"
+
+#define TYPE_SPAPR_PCI_HOST_BRIDGE "spapr-pci-host-bridge"
+
+/* Tests only initialization so far. TODO: Replace with functional tests */
+static void test_phb_device(void)
+{
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/spapr-phb/device", test_phb_device);
+
+ qtest_start("-device " TYPE_SPAPR_PCI_HOST_BRIDGE ",index=100");
+
+ ret = g_test_run();
+
+ qtest_end();
+
+ return ret;
+}
diff --git a/tests/tcg/test_path.c b/tests/tcg/test_path.c
index a064eea8fb..f8dd36aab2 100644
--- a/tests/tcg/test_path.c
+++ b/tests/tcg/test_path.c
@@ -1,12 +1,15 @@
/* Test path override code */
#define _GNU_SOURCE
#include "config-host.h"
-#include "iov.c"
-#include "cutils.c"
-#include "path.c"
-#include "trace.c"
+#include "util/cutils.c"
+#include "util/hexdump.c"
+#include "util/iov.c"
+#include "util/path.c"
+#include "util/qemu-timer-common.c"
+#include "trace/control.c"
+#include "../trace/generated-events.c"
#ifdef CONFIG_TRACE_SIMPLE
-#include "../trace/simple.c"
+#include "trace/simple.c"
#endif
#include <stdarg.h>
diff --git a/tests/test-aio.c b/tests/test-aio.c
index 592721ed3f..e5f8b55d30 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -65,6 +65,8 @@ static void bh_test_cb(void *opaque)
}
}
+#if !defined(_WIN32)
+
static void timer_test_cb(void *opaque)
{
TimerTestData *data = opaque;
@@ -78,6 +80,8 @@ static void dummy_io_handler_read(void *opaque)
{
}
+#endif /* !_WIN32 */
+
static void bh_delete_cb(void *opaque)
{
BHTestData *data = opaque;
@@ -112,6 +116,64 @@ static void test_notify(void)
g_assert(!aio_poll(ctx, false));
}
+typedef struct {
+ QemuMutex start_lock;
+ bool thread_acquired;
+} AcquireTestData;
+
+static void *test_acquire_thread(void *opaque)
+{
+ AcquireTestData *data = opaque;
+
+ /* Wait for other thread to let us start */
+ qemu_mutex_lock(&data->start_lock);
+ qemu_mutex_unlock(&data->start_lock);
+
+ aio_context_acquire(ctx);
+ aio_context_release(ctx);
+
+ data->thread_acquired = true; /* success, we got here */
+
+ return NULL;
+}
+
+static void dummy_notifier_read(EventNotifier *unused)
+{
+ g_assert(false); /* should never be invoked */
+}
+
+static void test_acquire(void)
+{
+ QemuThread thread;
+ EventNotifier notifier;
+ AcquireTestData data;
+
+ /* Dummy event notifier ensures aio_poll() will block */
+ event_notifier_init(&notifier, false);
+ aio_set_event_notifier(ctx, &notifier, dummy_notifier_read);
+ g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
+
+ qemu_mutex_init(&data.start_lock);
+ qemu_mutex_lock(&data.start_lock);
+ data.thread_acquired = false;
+
+ qemu_thread_create(&thread, "test_acquire_thread",
+ test_acquire_thread,
+ &data, QEMU_THREAD_JOINABLE);
+
+ /* Block in aio_poll(), let other thread kick us and acquire context */
+ aio_context_acquire(ctx);
+ qemu_mutex_unlock(&data.start_lock); /* let the thread run */
+ g_assert(!aio_poll(ctx, true));
+ aio_context_release(ctx);
+
+ qemu_thread_join(&thread);
+ aio_set_event_notifier(ctx, &notifier, NULL);
+ event_notifier_cleanup(&notifier);
+
+ g_assert(data.thread_acquired);
+}
+
static void test_bh_schedule(void)
{
BHTestData data = { .n = 0 };
@@ -365,6 +427,8 @@ static void test_wait_event_notifier_noflush(void)
event_notifier_cleanup(&data.e);
}
+#if !defined(_WIN32)
+
static void test_timer_schedule(void)
{
TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
@@ -426,6 +490,8 @@ static void test_timer_schedule(void)
timer_del(&data.timer);
}
+#endif /* !_WIN32 */
+
/* Now the same tests, using the context as a GSource. They are
* very similar to the ones above, with g_main_context_iteration
* replacing aio_poll. However:
@@ -708,6 +774,8 @@ static void test_source_wait_event_notifier_noflush(void)
event_notifier_cleanup(&data.e);
}
+#if !defined(_WIN32)
+
static void test_source_timer_schedule(void)
{
TimerTestData data = { .n = 0, .ctx = ctx, .ns = SCALE_MS * 750LL,
@@ -757,6 +825,8 @@ static void test_source_timer_schedule(void)
timer_del(&data.timer);
}
+#endif /* !_WIN32 */
+
/* End of tests. */
@@ -775,6 +845,7 @@ int main(int argc, char **argv)
g_test_init(&argc, &argv, NULL);
g_test_add_func("/aio/notify", test_notify);
+ g_test_add_func("/aio/acquire", test_acquire);
g_test_add_func("/aio/bh/schedule", test_bh_schedule);
g_test_add_func("/aio/bh/schedule10", test_bh_schedule10);
g_test_add_func("/aio/bh/cancel", test_bh_cancel);
@@ -786,7 +857,9 @@ int main(int argc, char **argv)
g_test_add_func("/aio/event/wait", test_wait_event_notifier);
g_test_add_func("/aio/event/wait/no-flush-cb", test_wait_event_notifier_noflush);
g_test_add_func("/aio/event/flush", test_flush_event_notifier);
+#if !defined(_WIN32)
g_test_add_func("/aio/timer/schedule", test_timer_schedule);
+#endif
g_test_add_func("/aio-gsource/notify", test_source_notify);
g_test_add_func("/aio-gsource/flush", test_source_flush);
@@ -801,6 +874,8 @@ int main(int argc, char **argv)
g_test_add_func("/aio-gsource/event/wait", test_source_wait_event_notifier);
g_test_add_func("/aio-gsource/event/wait/no-flush-cb", test_source_wait_event_notifier_noflush);
g_test_add_func("/aio-gsource/event/flush", test_source_flush_event_notifier);
+#if !defined(_WIN32)
g_test_add_func("/aio-gsource/timer/schedule", test_source_timer_schedule);
+#endif
return g_test_run();
}
diff --git a/tests/test-qmp-input-strict.c b/tests/test-qmp-input-strict.c
index 38b5e95f68..f03353b755 100644
--- a/tests/test-qmp-input-strict.c
+++ b/tests/test-qmp-input-strict.c
@@ -153,7 +153,7 @@ static void test_validate_union_flat(TestInputVisitorData *data,
/* TODO when generator bug is fixed, add 'integer': 41 */
visit_type_UserDefFlatUnion(v, &tmp, NULL, &errp);
- g_assert(!error_is_set(&errp));
+ g_assert(!errp);
qapi_free_UserDefFlatUnion(tmp);
}
@@ -167,7 +167,7 @@ static void test_validate_union_anon(TestInputVisitorData *data,
v = validate_test_init(data, "42");
visit_type_UserDefAnonUnion(v, &tmp, NULL, &errp);
- g_assert(!error_is_set(&errp));
+ g_assert(!errp);
qapi_free_UserDefAnonUnion(tmp);
}
@@ -240,7 +240,7 @@ static void test_validate_fail_union_flat(TestInputVisitorData *data,
v = validate_test_init(data, "{ 'string': 'c', 'integer': 41, 'boolean': true }");
visit_type_UserDefFlatUnion(v, &tmp, NULL, &errp);
- g_assert(error_is_set(&errp));
+ g_assert(errp);
qapi_free_UserDefFlatUnion(tmp);
}
@@ -254,7 +254,7 @@ static void test_validate_fail_union_anon(TestInputVisitorData *data,
v = validate_test_init(data, "3.14");
visit_type_UserDefAnonUnion(v, &tmp, NULL, &errp);
- g_assert(error_is_set(&errp));
+ g_assert(errp);
qapi_free_UserDefAnonUnion(tmp);
}
diff --git a/tests/test-rfifolock.c b/tests/test-rfifolock.c
new file mode 100644
index 0000000000..0572ebb42a
--- /dev/null
+++ b/tests/test-rfifolock.c
@@ -0,0 +1,91 @@
+/*
+ * RFifoLock tests
+ *
+ * Copyright Red Hat, Inc. 2013
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+
+#include <glib.h>
+#include "qemu-common.h"
+#include "qemu/rfifolock.h"
+
+static void test_nesting(void)
+{
+ RFifoLock lock;
+
+ /* Trivial test, ensure the lock is recursive */
+ rfifolock_init(&lock, NULL, NULL);
+ rfifolock_lock(&lock);
+ rfifolock_lock(&lock);
+ rfifolock_lock(&lock);
+ rfifolock_unlock(&lock);
+ rfifolock_unlock(&lock);
+ rfifolock_unlock(&lock);
+ rfifolock_destroy(&lock);
+}
+
+typedef struct {
+ RFifoLock lock;
+ int fd[2];
+} CallbackTestData;
+
+static void rfifolock_cb(void *opaque)
+{
+ CallbackTestData *data = opaque;
+ int ret;
+ char c = 0;
+
+ ret = write(data->fd[1], &c, sizeof(c));
+ g_assert(ret == 1);
+}
+
+static void *callback_thread(void *opaque)
+{
+ CallbackTestData *data = opaque;
+
+ /* The other thread holds the lock so the contention callback will be
+ * invoked...
+ */
+ rfifolock_lock(&data->lock);
+ rfifolock_unlock(&data->lock);
+ return NULL;
+}
+
+static void test_callback(void)
+{
+ CallbackTestData data;
+ QemuThread thread;
+ int ret;
+ char c;
+
+ rfifolock_init(&data.lock, rfifolock_cb, &data);
+ ret = qemu_pipe(data.fd);
+ g_assert(ret == 0);
+
+ /* Hold lock but allow the callback to kick us by writing to the pipe */
+ rfifolock_lock(&data.lock);
+ qemu_thread_create(&thread, "callback_thread",
+ callback_thread, &data, QEMU_THREAD_JOINABLE);
+ ret = read(data.fd[0], &c, sizeof(c));
+ g_assert(ret == 1);
+ rfifolock_unlock(&data.lock);
+ /* If we got here then the callback was invoked, as expected */
+
+ qemu_thread_join(&thread);
+ close(data.fd[0]);
+ close(data.fd[1]);
+ rfifolock_destroy(&data.lock);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+ g_test_add_func("/nesting", test_nesting);
+ g_test_add_func("/callback", test_callback);
+ return g_test_run();
+}
diff --git a/tests/tmp105-test.c b/tests/tmp105-test.c
index 0834219e35..15ddaf38d4 100644
--- a/tests/tmp105-test.c
+++ b/tests/tmp105-test.c
@@ -15,44 +15,135 @@
#define OMAP2_I2C_1_BASE 0x48070000
-#define N8X0_ADDR 0x48
+#define TMP105_TEST_ID "tmp105-test"
+#define TMP105_TEST_ADDR 0x49
static I2CAdapter *i2c;
-static uint8_t addr;
-static void send_and_receive(void)
+static uint16_t tmp105_get8(I2CAdapter *i2c, uint8_t addr, uint8_t reg)
{
- uint8_t cmd[3];
- uint8_t resp[2];
+ uint8_t resp[1];
+ i2c_send(i2c, addr, &reg, 1);
+ i2c_recv(i2c, addr, resp, 1);
+ return resp[0];
+}
- cmd[0] = TMP105_REG_TEMPERATURE;
- i2c_send(i2c, addr, cmd, 1);
+static uint16_t tmp105_get16(I2CAdapter *i2c, uint8_t addr, uint8_t reg)
+{
+ uint8_t resp[2];
+ i2c_send(i2c, addr, &reg, 1);
i2c_recv(i2c, addr, resp, 2);
- g_assert_cmpuint(((uint16_t)resp[0] << 8) | resp[1], ==, 0);
+ return (resp[0] << 8) | resp[1];
+}
+
+static void tmp105_set8(I2CAdapter *i2c, uint8_t addr, uint8_t reg,
+ uint8_t value)
+{
+ uint8_t cmd[2];
+ uint8_t resp[1];
- cmd[0] = TMP105_REG_CONFIG;
- cmd[1] = 0x0; /* matches the reset value */
+ cmd[0] = reg;
+ cmd[1] = value;
i2c_send(i2c, addr, cmd, 2);
i2c_recv(i2c, addr, resp, 1);
g_assert_cmphex(resp[0], ==, cmd[1]);
+}
- cmd[0] = TMP105_REG_T_LOW;
- cmd[1] = 0x12;
- cmd[2] = 0x34;
- i2c_send(i2c, addr, cmd, 3);
- i2c_recv(i2c, addr, resp, 2);
- g_assert_cmphex(resp[0], ==, cmd[1]);
- g_assert_cmphex(resp[1], ==, cmd[2]);
+static void tmp105_set16(I2CAdapter *i2c, uint8_t addr, uint8_t reg,
+ uint16_t value)
+{
+ uint8_t cmd[3];
+ uint8_t resp[2];
- cmd[0] = TMP105_REG_T_HIGH;
- cmd[1] = 0x42;
- cmd[2] = 0x31;
+ cmd[0] = reg;
+ cmd[1] = value >> 8;
+ cmd[2] = value & 255;
i2c_send(i2c, addr, cmd, 3);
i2c_recv(i2c, addr, resp, 2);
g_assert_cmphex(resp[0], ==, cmd[1]);
g_assert_cmphex(resp[1], ==, cmd[2]);
}
+static int qmp_tmp105_get_temperature(const char *id)
+{
+ QDict *response;
+ int ret;
+
+ response = qmp("{ 'execute': 'qom-get', 'arguments': { 'path': '%s', "
+ "'property': 'temperature' } }", id);
+ g_assert(qdict_haskey(response, "return"));
+ ret = qdict_get_int(response, "return");
+ QDECREF(response);
+ return ret;
+}
+
+static void qmp_tmp105_set_temperature(const char *id, int value)
+{
+ QDict *response;
+
+ response = qmp("{ 'execute': 'qom-set', 'arguments': { 'path': '%s', "
+ "'property': 'temperature', 'value': %d } }", id, value);
+ g_assert(qdict_haskey(response, "return"));
+ QDECREF(response);
+}
+
+#define TMP105_PRECISION (1000/16)
+static void send_and_receive(void)
+{
+ uint16_t value;
+
+ value = qmp_tmp105_get_temperature(TMP105_TEST_ID);
+ g_assert_cmpuint(value, ==, 0);
+
+ value = tmp105_get16(i2c, TMP105_TEST_ADDR, TMP105_REG_TEMPERATURE);
+ g_assert_cmphex(value, ==, 0);
+
+ qmp_tmp105_set_temperature(TMP105_TEST_ID, 20000);
+ value = qmp_tmp105_get_temperature(TMP105_TEST_ID);
+ g_assert_cmpuint(value, ==, 20000);
+
+ value = tmp105_get16(i2c, TMP105_TEST_ADDR, TMP105_REG_TEMPERATURE);
+ g_assert_cmphex(value, ==, 0x1400);
+
+ qmp_tmp105_set_temperature(TMP105_TEST_ID, 20938); /* 20 + 15/16 */
+ value = qmp_tmp105_get_temperature(TMP105_TEST_ID);
+ g_assert_cmpuint(value, >=, 20938 - TMP105_PRECISION/2);
+ g_assert_cmpuint(value, <, 20938 + TMP105_PRECISION/2);
+
+ /* Set config */
+ tmp105_set8(i2c, TMP105_TEST_ADDR, TMP105_REG_CONFIG, 0x60);
+ value = tmp105_get8(i2c, TMP105_TEST_ADDR, TMP105_REG_CONFIG);
+ g_assert_cmphex(value, ==, 0x60);
+
+ value = tmp105_get16(i2c, TMP105_TEST_ADDR, TMP105_REG_TEMPERATURE);
+ g_assert_cmphex(value, ==, 0x14f0);
+
+ /* Set precision to 9, 10, 11 bits. */
+ tmp105_set8(i2c, TMP105_TEST_ADDR, TMP105_REG_CONFIG, 0x00);
+ value = tmp105_get16(i2c, TMP105_TEST_ADDR, TMP105_REG_TEMPERATURE);
+ g_assert_cmphex(value, ==, 0x1480);
+
+ tmp105_set8(i2c, TMP105_TEST_ADDR, TMP105_REG_CONFIG, 0x20);
+ value = tmp105_get16(i2c, TMP105_TEST_ADDR, TMP105_REG_TEMPERATURE);
+ g_assert_cmphex(value, ==, 0x14c0);
+
+ tmp105_set8(i2c, TMP105_TEST_ADDR, TMP105_REG_CONFIG, 0x40);
+ value = tmp105_get16(i2c, TMP105_TEST_ADDR, TMP105_REG_TEMPERATURE);
+ g_assert_cmphex(value, ==, 0x14e0);
+
+ /* stored precision remains the same */
+ value = qmp_tmp105_get_temperature(TMP105_TEST_ID);
+ g_assert_cmpuint(value, >=, 20938 - TMP105_PRECISION/2);
+ g_assert_cmpuint(value, <, 20938 + TMP105_PRECISION/2);
+
+ tmp105_set8(i2c, TMP105_TEST_ADDR, TMP105_REG_CONFIG, 0x60);
+ value = tmp105_get16(i2c, TMP105_TEST_ADDR, TMP105_REG_TEMPERATURE);
+ g_assert_cmphex(value, ==, 0x14f0);
+
+ tmp105_set16(i2c, TMP105_TEST_ADDR, TMP105_REG_T_LOW, 0x1234);
+ tmp105_set16(i2c, TMP105_TEST_ADDR, TMP105_REG_T_HIGH, 0x4231);
+}
+
int main(int argc, char **argv)
{
QTestState *s = NULL;
@@ -60,9 +151,10 @@ int main(int argc, char **argv)
g_test_init(&argc, &argv, NULL);
- s = qtest_start("-machine n800");
+ s = qtest_start("-machine n800 "
+ "-device tmp105,bus=i2c-bus.0,id=" TMP105_TEST_ID
+ ",address=0x49");
i2c = omap_i2c_create(OMAP2_I2C_1_BASE);
- addr = N8X0_ADDR;
qtest_add_func("/tmp105/tx-rx", send_and_receive);
diff --git a/tests/virtio-9p-test.c b/tests/virtio-9p-test.c
new file mode 100644
index 0000000000..1fae47797e
--- /dev/null
+++ b/tests/virtio-9p-test.c
@@ -0,0 +1,46 @@
+/*
+ * QTest testcase for VirtIO 9P
+ *
+ * Copyright (c) 2014 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <glib.h>
+#include "libqtest.h"
+#include "qemu-common.h"
+#include "qemu/osdep.h"
+
+/* Tests only initialization so far. TODO: Replace with functional tests */
+static void pci_nop(void)
+{
+}
+
+static char test_share[] = "/tmp/qtest.XXXXXX";
+
+int main(int argc, char **argv)
+{
+ char *args;
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/virtio/9p/pci/nop", pci_nop);
+
+ g_assert(mkdtemp(test_share));
+
+ args = g_strdup_printf("-fsdev local,id=fsdev0,security_model=none,path=%s "
+ "-device virtio-9p-pci,fsdev=fsdev0,mount_tag=qtest",
+ test_share);
+ qtest_start(args);
+ g_free(args);
+
+ ret = g_test_run();
+
+ qtest_end();
+ rmdir(test_share);
+
+ return ret;
+}
diff --git a/tests/virtio-balloon-test.c b/tests/virtio-balloon-test.c
new file mode 100644
index 0000000000..becebb51a7
--- /dev/null
+++ b/tests/virtio-balloon-test.c
@@ -0,0 +1,33 @@
+/*
+ * QTest testcase for VirtIO Balloon
+ *
+ * Copyright (c) 2014 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <string.h>
+#include "libqtest.h"
+#include "qemu/osdep.h"
+
+/* Tests only initialization so far. TODO: Replace with functional tests */
+static void pci_nop(void)
+{
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/virtio/balloon/pci/nop", pci_nop);
+
+ qtest_start("-device virtio-balloon-pci");
+ ret = g_test_run();
+
+ qtest_end();
+
+ return ret;
+}
diff --git a/tests/virtio-blk-test.c b/tests/virtio-blk-test.c
new file mode 100644
index 0000000000..d53f875b89
--- /dev/null
+++ b/tests/virtio-blk-test.c
@@ -0,0 +1,34 @@
+/*
+ * QTest testcase for VirtIO Block Device
+ *
+ * Copyright (c) 2014 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <string.h>
+#include "libqtest.h"
+#include "qemu/osdep.h"
+
+/* Tests only initialization so far. TODO: Replace with functional tests */
+static void pci_nop(void)
+{
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/virtio/blk/pci/nop", pci_nop);
+
+ qtest_start("-drive id=drv0,if=none,file=/dev/null "
+ "-device virtio-blk-pci,drive=drv0");
+ ret = g_test_run();
+
+ qtest_end();
+
+ return ret;
+}
diff --git a/tests/virtio-console-test.c b/tests/virtio-console-test.c
new file mode 100644
index 0000000000..6be96e8c64
--- /dev/null
+++ b/tests/virtio-console-test.c
@@ -0,0 +1,41 @@
+/*
+ * QTest testcase for VirtIO Console
+ *
+ * Copyright (c) 2014 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <string.h>
+#include "libqtest.h"
+#include "qemu/osdep.h"
+
+/* Tests only initialization so far. TODO: Replace with functional tests */
+static void console_pci_nop(void)
+{
+ qtest_start("-device virtio-serial-pci,id=vser0 "
+ "-device virtconsole,bus=vser0.0");
+ qtest_end();
+}
+
+static void serialport_pci_nop(void)
+{
+ qtest_start("-device virtio-serial-pci,id=vser0 "
+ "-device virtserialport,bus=vser0.0");
+ qtest_end();
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/virtio/console/pci/nop", console_pci_nop);
+ qtest_add_func("/virtio/serialport/pci/nop", serialport_pci_nop);
+
+ ret = g_test_run();
+
+ return ret;
+}
diff --git a/tests/virtio-rng-test.c b/tests/virtio-rng-test.c
new file mode 100644
index 0000000000..402c2060da
--- /dev/null
+++ b/tests/virtio-rng-test.c
@@ -0,0 +1,33 @@
+/*
+ * QTest testcase for VirtIO RNG
+ *
+ * Copyright (c) 2014 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <string.h>
+#include "libqtest.h"
+#include "qemu/osdep.h"
+
+/* Tests only initialization so far. TODO: Replace with functional tests */
+static void pci_nop(void)
+{
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/virtio/rng/pci/nop", pci_nop);
+
+ qtest_start("-device virtio-rng-pci");
+ ret = g_test_run();
+
+ qtest_end();
+
+ return ret;
+}
diff --git a/tests/virtio-scsi-test.c b/tests/virtio-scsi-test.c
new file mode 100644
index 0000000000..3230908b98
--- /dev/null
+++ b/tests/virtio-scsi-test.c
@@ -0,0 +1,35 @@
+/*
+ * QTest testcase for VirtIO SCSI
+ *
+ * Copyright (c) 2014 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <string.h>
+#include "libqtest.h"
+#include "qemu/osdep.h"
+
+/* Tests only initialization so far. TODO: Replace with functional tests */
+static void pci_nop(void)
+{
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/virtio/scsi/pci/nop", pci_nop);
+
+ qtest_start("-drive id=drv0,if=none,file=/dev/null "
+ "-device virtio-scsi-pci,id=vscsi0 "
+ "-device scsi-hd,bus=vscsi0.0,drive=drv0");
+ ret = g_test_run();
+
+ qtest_end();
+
+ return ret;
+}
diff --git a/tests/virtio-serial-test.c b/tests/virtio-serial-test.c
new file mode 100644
index 0000000000..e7438751ea
--- /dev/null
+++ b/tests/virtio-serial-test.c
@@ -0,0 +1,33 @@
+/*
+ * QTest testcase for VirtIO Serial
+ *
+ * Copyright (c) 2014 SUSE LINUX Products GmbH
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <string.h>
+#include "libqtest.h"
+#include "qemu/osdep.h"
+
+/* Tests only initialization so far. TODO: Replace with functional tests */
+static void pci_nop(void)
+{
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+ qtest_add_func("/virtio/serial/pci/nop", pci_nop);
+
+ qtest_start("-device virtio-serial-pci");
+ ret = g_test_run();
+
+ qtest_end();
+
+ return ret;
+}
diff --git a/trace-events b/trace-events
index 002c2604d8..a5218ba393 100644
--- a/trace-events
+++ b/trace-events
@@ -82,7 +82,7 @@ mirror_start(void *bs, void *s, void *co, void *opaque) "bs %p s %p co %p opaque
mirror_restart_iter(void *s, int64_t cnt) "s %p dirty count %"PRId64
mirror_before_flush(void *s) "s %p"
mirror_before_drain(void *s, int64_t cnt) "s %p dirty count %"PRId64
-mirror_before_sleep(void *s, int64_t cnt, int synced) "s %p dirty count %"PRId64" synced %d"
+mirror_before_sleep(void *s, int64_t cnt, int synced, uint64_t delay_ns) "s %p dirty count %"PRId64" synced %d delay %"PRIu64"ns"
mirror_one_iteration(void *s, int64_t sector_num, int nb_sectors) "s %p sector_num %"PRId64" nb_sectors %d"
mirror_iteration_done(void *s, int64_t sector_num, int nb_sectors, int ret) "s %p sector_num %"PRId64" nb_sectors %d ret %d"
mirror_yield(void *s, int64_t cnt, int buf_free_count, int in_flight) "s %p dirty count %"PRId64" free buffers %d in_flight %d"
@@ -433,6 +433,27 @@ usb_uas_tmf_abort_task(int addr, uint16_t tag, uint16_t task_tag) "dev %d, tag 0
usb_uas_tmf_logical_unit_reset(int addr, uint16_t tag, int lun) "dev %d, tag 0x%x, lun %d"
usb_uas_tmf_unsupported(int addr, uint16_t tag, uint32_t function) "dev %d, tag 0x%x, function 0x%x"
+# hw/usb/dev-mtp.c
+usb_mtp_reset(int addr) "dev %d"
+usb_mtp_command(int dev, uint16_t code, uint32_t trans, uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t arg4) "dev %d, code 0x%x, trans 0x%x, args 0x%x, 0x%x, 0x%x, 0x%x, 0x%x"
+usb_mtp_success(int dev, uint32_t trans, uint32_t arg0, uint32_t arg1) "dev %d, trans 0x%x, args 0x%x, 0x%x"
+usb_mtp_error(int dev, uint16_t code, uint32_t trans, uint32_t arg0, uint32_t arg1) "dev %d, code 0x%x, trans 0x%x, args 0x%x, 0x%x"
+usb_mtp_data_in(int dev, uint32_t trans, uint32_t len) "dev %d, trans 0x%x, len %d"
+usb_mtp_data_out(int dev, uint32_t trans, uint32_t len) "dev %d, trans 0x%x, len %d"
+usb_mtp_xfer(int dev, uint32_t ep, uint32_t dlen, uint32_t plen) "dev %d, ep %d, %d/%d"
+usb_mtp_nak(int dev, uint32_t ep) "dev %d, ep %d"
+usb_mtp_stall(int dev, const char *reason) "dev %d, reason: %s"
+usb_mtp_op_get_device_info(int dev) "dev %d"
+usb_mtp_op_open_session(int dev) "dev %d"
+usb_mtp_op_close_session(int dev) "dev %d"
+usb_mtp_op_get_storage_ids(int dev) "dev %d"
+usb_mtp_op_get_storage_info(int dev) "dev %d"
+usb_mtp_op_get_num_objects(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s"
+usb_mtp_op_get_object_handles(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s"
+usb_mtp_op_get_object_info(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s"
+usb_mtp_op_get_object(int dev, uint32_t handle, const char *path) "dev %d, handle 0x%x, path %s"
+usb_mtp_op_get_partial_object(int dev, uint32_t handle, const char *path, uint32_t offset, uint32_t length) "dev %d, handle 0x%x, path %s, off %d, len %d"
+
# hw/usb/host-libusb.c
usb_host_open_started(int bus, int addr) "dev %d:%d"
usb_host_open_success(int bus, int addr) "dev %d:%d"
@@ -1022,7 +1043,7 @@ gd_update(int x, int y, int w, int h) "x=%d, y=%d, w=%d, h=%d"
gd_key_event(int gdk_keycode, int qemu_keycode, const char *action) "translated GDK keycode %d to QEMU keycode %d (%s)"
# ui/input.c
-input_event_key_number(int conidx, int number, bool down) "con %d, key number 0x%d, down %d"
+input_event_key_number(int conidx, int number, bool down) "con %d, key number 0x%x, down %d"
input_event_key_qcode(int conidx, const char *qcode, bool down) "con %d, key qcode %s, down %d"
input_event_btn(int conidx, const char *btn, bool down) "con %d, button %s, down %d"
input_event_rel(int conidx, const char *axis, int value) "con %d, axis %s, value %d"
@@ -1042,6 +1063,17 @@ vmware_setmode(uint32_t w, uint32_t h, uint32_t bpp) "%dx%d @ %d bpp"
# savevm.c
savevm_section_start(const char *id, unsigned int section_id) "%s, section_id %u"
savevm_section_end(const char *id, unsigned int section_id) "%s, section_id %u"
+savevm_state_begin(void) ""
+savevm_state_iterate(void) ""
+savevm_state_complete(void) ""
+savevm_state_cancel(void) ""
+vmstate_save(const char *idstr, const char *vmsd_name) "%s, %s"
+vmstate_load(const char *idstr, const char *vmsd_name) "%s, %s"
+vmstate_load_field_error(const char *field, int ret) "field \"%s\" load failed, ret = %d"
+qemu_announce_self_iter(const char *mac) "%s"
+
+# qemu-file.c
+qemu_file_fclose(void) ""
# arch_init.c
migration_bitmap_sync_start(void) ""
@@ -1181,6 +1213,11 @@ flic_reset_failed(int err) "flic: reset failed %d"
# migration.c
migrate_set_state(int new_state) "new state %d"
+migrate_fd_cleanup(void) ""
+migrate_fd_error(void) ""
+migrate_fd_cancel(void) ""
+migrate_pending(uint64_t size, uint64_t max) "pending size %" PRIu64 " max %" PRIu64
+migrate_transferred(uint64_t tranferred, uint64_t time_spent, double bandwidth, uint64_t size) "transferred %" PRIu64 " time_spent %" PRIu64 " bandwidth %g max_size %" PRId64
# kvm-all.c
kvm_ioctl(int type, void *arg) "type 0x%x, arg %p"
@@ -1206,3 +1243,7 @@ xen_pv_mmio_write(uint64_t addr) "WARNING: write to Xen PV Device MMIO space (ad
# hw/pci/pci_host.c
pci_cfg_read(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x -> 0x%x"
pci_cfg_write(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x <- 0x%x"
+
+# target-s390/kvm.c
+kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s"
+kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s"
diff --git a/trace/simple.c b/trace/simple.c
index 57572c4905..aaa010ee2b 100644
--- a/trace/simple.c
+++ b/trace/simple.c
@@ -414,15 +414,6 @@ bool trace_backend_init(const char *events, const char *file)
{
GThread *thread;
- if (!g_thread_supported()) {
-#if !GLIB_CHECK_VERSION(2, 31, 0)
- g_thread_init(NULL);
-#else
- fprintf(stderr, "glib threading failed to initialize.\n");
- exit(1);
-#endif
- }
-
#if !GLIB_CHECK_VERSION(2, 31, 0)
trace_available_cond = g_cond_new();
trace_empty_cond = g_cond_new();
diff --git a/translate-all.c b/translate-all.c
index 1ac0246dab..5759974d42 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -197,9 +197,10 @@ int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr
/* The cpu state corresponding to 'searched_pc' is restored.
*/
-static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
+static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t searched_pc)
{
+ CPUArchState *env = cpu->env_ptr;
TCGContext *s = &tcg_ctx;
int j;
uintptr_t tc_ptr;
@@ -216,9 +217,9 @@ static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
if (use_icount) {
/* Reset the cycle counter to the start of the block. */
- env->icount_decr.u16.low += tb->icount;
+ cpu->icount_decr.u16.low += tb->icount;
/* Clear the IO flag. */
- env->can_do_io = 0;
+ cpu->can_do_io = 0;
}
/* find opc index corresponding to search_pc */
@@ -241,7 +242,7 @@ static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
while (s->gen_opc_instr_start[j] == 0) {
j--;
}
- env->icount_decr.u16.low -= s->gen_opc_icount[j];
+ cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
restore_state_to_opc(env, tb, j);
@@ -252,13 +253,13 @@ static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
return 0;
}
-bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
+bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
{
TranslationBlock *tb;
tb = tb_find_pc(retaddr);
if (tb) {
- cpu_restore_state_from_tb(tb, env, retaddr);
+ cpu_restore_state_from_tb(cpu, tb, retaddr);
return true;
}
return false;
@@ -687,7 +688,7 @@ static void page_flush_tb(void)
/* XXX: tb_flush is currently not thread safe */
void tb_flush(CPUArchState *env1)
{
- CPUState *cpu;
+ CPUState *cpu = ENV_GET_CPU(env1);
#if defined(DEBUG_FLUSH)
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
@@ -698,14 +699,12 @@ void tb_flush(CPUArchState *env1)
#endif
if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
> tcg_ctx.code_gen_buffer_size) {
- cpu_abort(env1, "Internal error: code buffer overflow\n");
+ cpu_abort(cpu, "Internal error: code buffer overflow\n");
}
tcg_ctx.tb_ctx.nb_tbs = 0;
CPU_FOREACH(cpu) {
- CPUArchState *env = cpu->env_ptr;
-
- memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
+ memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
}
memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
@@ -856,10 +855,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
/* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
- CPUArchState *env = cpu->env_ptr;
-
- if (env->tb_jmp_cache[h] == tb) {
- env->tb_jmp_cache[h] = NULL;
+ if (cpu->tb_jmp_cache[h] == tb) {
+ cpu->tb_jmp_cache[h] = NULL;
}
}
@@ -941,10 +938,11 @@ static void build_page_bitmap(PageDesc *p)
}
}
-TranslationBlock *tb_gen_code(CPUArchState *env,
+TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base,
int flags, int cflags)
{
+ CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
uint8_t *tc_ptr;
tb_page_addr_t phys_pc, phys_page2;
@@ -1009,7 +1007,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
{
TranslationBlock *tb, *tb_next, *saved_tb;
CPUState *cpu = current_cpu;
-#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_HAS_PRECISE_SMC)
CPUArchState *env = NULL;
#endif
tb_page_addr_t tb_start, tb_end;
@@ -1034,7 +1032,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
/* build code bitmap */
build_page_bitmap(p);
}
-#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_HAS_PRECISE_SMC)
if (cpu != NULL) {
env = cpu->env_ptr;
}
@@ -1063,9 +1061,9 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
if (current_tb_not_found) {
current_tb_not_found = 0;
current_tb = NULL;
- if (env->mem_io_pc) {
+ if (cpu->mem_io_pc) {
/* now we have a real cpu fault */
- current_tb = tb_find_pc(env->mem_io_pc);
+ current_tb = tb_find_pc(cpu->mem_io_pc);
}
}
if (current_tb == tb &&
@@ -1077,7 +1075,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
restore the CPU state */
current_tb_modified = 1;
- cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
+ cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
&current_flags);
}
@@ -1104,7 +1102,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
if (!p->first_tb) {
invalidate_page_bitmap(p);
if (is_cpu_write_access) {
- tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
+ tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr);
}
}
#endif
@@ -1114,8 +1112,8 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
modifying the memory. It will ensure that it cannot modify
itself */
cpu->current_tb = NULL;
- tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
- cpu_resume_from_signal(env, NULL);
+ tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
+ cpu_resume_from_signal(cpu, NULL);
}
#endif
}
@@ -1196,7 +1194,7 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
restore the CPU state */
current_tb_modified = 1;
- cpu_restore_state_from_tb(current_tb, env, pc);
+ cpu_restore_state_from_tb(cpu, current_tb, pc);
cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
&current_flags);
}
@@ -1211,11 +1209,11 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
modifying the memory. It will ensure that it cannot modify
itself */
cpu->current_tb = NULL;
- tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
+ tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
if (locked) {
mmap_unlock();
}
- cpu_resume_from_signal(env, puc);
+ cpu_resume_from_signal(cpu, puc);
}
#endif
}
@@ -1374,16 +1372,16 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
}
#endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
-void tb_check_watchpoint(CPUArchState *env)
+void tb_check_watchpoint(CPUState *cpu)
{
TranslationBlock *tb;
- tb = tb_find_pc(env->mem_io_pc);
+ tb = tb_find_pc(cpu->mem_io_pc);
if (!tb) {
- cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
- (void *)env->mem_io_pc);
+ cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p",
+ (void *)cpu->mem_io_pc);
}
- cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
+ cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
tb_phys_invalidate(tb, -1);
}
@@ -1391,7 +1389,6 @@ void tb_check_watchpoint(CPUArchState *env)
/* mask must never be zero, except for A20 change call */
static void tcg_handle_interrupt(CPUState *cpu, int mask)
{
- CPUArchState *env = cpu->env_ptr;
int old_mask;
old_mask = cpu->interrupt_request;
@@ -1407,10 +1404,10 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
}
if (use_icount) {
- env->icount_decr.u16.high = 0xffff;
- if (!can_do_io(env)
+ cpu->icount_decr.u16.high = 0xffff;
+ if (!cpu_can_do_io(cpu)
&& (mask & ~old_mask) != 0) {
- cpu_abort(env, "Raised interrupt while not in I/O function");
+ cpu_abort(cpu, "Raised interrupt while not in I/O function");
}
} else {
cpu->tcg_exit_req = 1;
@@ -1421,8 +1418,11 @@ CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
/* in deterministic execution mode, instructions doing device I/Os
must be at the end of the TB */
-void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
+void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
{
+#if defined(TARGET_MIPS) || defined(TARGET_SH4)
+ CPUArchState *env = cpu->env_ptr;
+#endif
TranslationBlock *tb;
uint32_t n, cflags;
target_ulong pc, cs_base;
@@ -1430,14 +1430,14 @@ void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
tb = tb_find_pc(retaddr);
if (!tb) {
- cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
+ cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
(void *)retaddr);
}
- n = env->icount_decr.u16.low + tb->icount;
- cpu_restore_state_from_tb(tb, env, retaddr);
+ n = cpu->icount_decr.u16.low + tb->icount;
+ cpu_restore_state_from_tb(cpu, tb, retaddr);
/* Calculate how many instructions had been executed before the fault
occurred. */
- n = n - env->icount_decr.u16.low;
+ n = n - cpu->icount_decr.u16.low;
/* Generate a new TB ending on the I/O insn. */
n++;
/* On MIPS and SH, delay slot instructions can only be restarted if
@@ -1447,20 +1447,20 @@ void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
#if defined(TARGET_MIPS)
if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
env->active_tc.PC -= 4;
- env->icount_decr.u16.low++;
+ cpu->icount_decr.u16.low++;
env->hflags &= ~MIPS_HFLAG_BMASK;
}
#elif defined(TARGET_SH4)
if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
&& n > 1) {
env->pc -= 2;
- env->icount_decr.u16.low++;
+ cpu->icount_decr.u16.low++;
env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
}
#endif
/* This should never happen. */
if (n > CF_COUNT_MASK) {
- cpu_abort(env, "TB too big during recompile");
+ cpu_abort(cpu, "TB too big during recompile");
}
cflags = n | CF_LAST_IO;
@@ -1470,27 +1470,27 @@ void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
tb_phys_invalidate(tb, -1);
/* FIXME: In theory this could raise an exception. In practice
we have already translated the block once so it's probably ok. */
- tb_gen_code(env, pc, cs_base, flags, cflags);
+ tb_gen_code(cpu, pc, cs_base, flags, cflags);
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
the first in the TB) then we end up generating a whole new TB and
repeating the fault, which is horribly inefficient.
Better would be to execute just this insn uncached, or generate a
second new TB. */
- cpu_resume_from_signal(env, NULL);
+ cpu_resume_from_signal(cpu, NULL);
}
-void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
+void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
{
unsigned int i;
/* Discard jump cache entries for any tb which might potentially
overlap the flushed page. */
i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
- memset(&env->tb_jmp_cache[i], 0,
+ memset(&cpu->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
i = tb_jmp_cache_hash_page(addr);
- memset(&env->tb_jmp_cache[i], 0,
+ memset(&cpu->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
}
@@ -1777,7 +1777,6 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
return -1;
}
}
- return 0;
}
}
return 0;
diff --git a/translate-all.h b/translate-all.h
index f7e5932d65..02832b2718 100644
--- a/translate-all.h
+++ b/translate-all.h
@@ -22,6 +22,6 @@
/* translate-all.c */
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len);
void cpu_unlink_tb(CPUState *cpu);
-void tb_check_watchpoint(CPUArchState *env);
+void tb_check_watchpoint(CPUState *cpu);
#endif /* TRANSLATE_ALL_H */
diff --git a/ui/console.c b/ui/console.c
index 4df251d579..e057755c04 100644
--- a/ui/console.c
+++ b/ui/console.c
@@ -1180,7 +1180,10 @@ static QemuConsole *new_console(DisplayState *ds, console_type_t console_type)
obj = object_new(TYPE_QEMU_CONSOLE);
s = QEMU_CONSOLE(obj);
object_property_add_link(obj, "device", TYPE_DEVICE,
- (Object **)&s->device, &local_err);
+ (Object **)&s->device,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &local_err);
object_property_add_uint32_ptr(obj, "head",
&s->head, &local_err);
diff --git a/ui/gtk.c b/ui/gtk.c
index 185149571e..00fbbccb34 100644
--- a/ui/gtk.c
+++ b/ui/gtk.c
@@ -54,7 +54,9 @@
#include <gdk/gdkkeysyms.h>
#include <glib/gi18n.h>
#include <locale.h>
+#if defined(CONFIG_VTE)
#include <vte/vte.h>
+#endif
#include <math.h>
#include "trace.h"
@@ -68,6 +70,9 @@
#define MAX_VCS 10
+#if !defined(CONFIG_VTE)
+# define VTE_CHECK_VERSION(a, b, c) 0
+#endif
/* Compatibility define to let us build on both Gtk2 and Gtk3 */
#if GTK_CHECK_VERSION(3, 0, 0)
@@ -105,8 +110,10 @@ typedef struct VirtualConsole
{
GtkWidget *menu_item;
GtkWidget *terminal;
+#if defined(CONFIG_VTE)
GtkWidget *scrolled_window;
CharDriverState *chr;
+#endif
int fd;
} VirtualConsole;
@@ -149,8 +156,11 @@ typedef struct GtkDisplayState
DisplayChangeListener dcl;
DisplaySurface *ds;
int button_mask;
+ gboolean last_set;
int last_x;
int last_y;
+ int grab_x_root;
+ int grab_y_root;
double scale_x;
double scale_y;
@@ -340,13 +350,17 @@ static void gd_mouse_set(DisplayChangeListener *dcl,
GdkDeviceManager *mgr;
gint x_root, y_root;
+ if (qemu_input_is_absolute()) {
+ return;
+ }
+
dpy = gtk_widget_get_display(s->drawing_area);
mgr = gdk_display_get_device_manager(dpy);
gdk_window_get_root_coords(gtk_widget_get_window(s->drawing_area),
x, y, &x_root, &y_root);
gdk_device_warp(gdk_device_manager_get_client_pointer(mgr),
gtk_widget_get_screen(s->drawing_area),
- x, y);
+ x_root, y_root);
}
#else
static void gd_mouse_set(DisplayChangeListener *dcl,
@@ -355,6 +369,10 @@ static void gd_mouse_set(DisplayChangeListener *dcl,
GtkDisplayState *s = container_of(dcl, GtkDisplayState, dcl);
gint x_root, y_root;
+ if (qemu_input_is_absolute()) {
+ return;
+ }
+
gdk_window_get_root_coords(gtk_widget_get_window(s->drawing_area),
x, y, &x_root, &y_root);
gdk_display_warp_pointer(gtk_widget_get_display(s->drawing_area),
@@ -458,8 +476,15 @@ static void gd_change_runstate(void *opaque, int running, RunState state)
static void gd_mouse_mode_change(Notifier *notify, void *data)
{
- gd_update_cursor(container_of(notify, GtkDisplayState, mouse_mode_notifier),
- FALSE);
+ GtkDisplayState *s;
+
+ s = container_of(notify, GtkDisplayState, mouse_mode_notifier);
+ /* release the grab at switching to absolute mode */
+ if (qemu_input_is_absolute() && gd_is_grab_active(s)) {
+ gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item),
+ FALSE);
+ }
+ gd_update_cursor(s, FALSE);
}
/** GTK Events **/
@@ -601,25 +626,25 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion,
x = (motion->x - mx) / s->scale_x;
y = (motion->y - my) / s->scale_y;
- if (x < 0 || y < 0 ||
- x >= surface_width(s->ds) ||
- y >= surface_height(s->ds)) {
- return TRUE;
- }
-
if (qemu_input_is_absolute()) {
+ if (x < 0 || y < 0 ||
+ x >= surface_width(s->ds) ||
+ y >= surface_height(s->ds)) {
+ return TRUE;
+ }
qemu_input_queue_abs(s->dcl.con, INPUT_AXIS_X, x,
surface_width(s->ds));
qemu_input_queue_abs(s->dcl.con, INPUT_AXIS_Y, y,
surface_height(s->ds));
qemu_input_event_sync();
- } else if (s->last_x != -1 && s->last_y != -1 && gd_is_grab_active(s)) {
+ } else if (s->last_set && gd_is_grab_active(s)) {
qemu_input_queue_rel(s->dcl.con, INPUT_AXIS_X, x - s->last_x);
qemu_input_queue_rel(s->dcl.con, INPUT_AXIS_Y, y - s->last_y);
qemu_input_event_sync();
}
s->last_x = x;
s->last_y = y;
+ s->last_set = TRUE;
if (!qemu_input_is_absolute() && gd_is_grab_active(s)) {
GdkScreen *screen = gtk_widget_get_screen(s->drawing_area);
@@ -654,8 +679,7 @@ static gboolean gd_motion_event(GtkWidget *widget, GdkEventMotion *motion,
GdkDisplay *display = gtk_widget_get_display(widget);
gdk_display_warp_pointer(display, screen, x, y);
#endif
- s->last_x = -1;
- s->last_y = -1;
+ s->last_set = FALSE;
return FALSE;
}
}
@@ -668,6 +692,14 @@ static gboolean gd_button_event(GtkWidget *widget, GdkEventButton *button,
GtkDisplayState *s = opaque;
InputButton btn;
+ /* implicitly grab the input at the first click in the relative mode */
+ if (button->button == 1 && button->type == GDK_BUTTON_PRESS &&
+ !qemu_input_is_absolute() && !gd_is_grab_active(s)) {
+ gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item),
+ TRUE);
+ return TRUE;
+ }
+
if (button->button == 1) {
btn = INPUT_BUTTON_LEFT;
} else if (button->button == 2) {
@@ -683,6 +715,27 @@ static gboolean gd_button_event(GtkWidget *widget, GdkEventButton *button,
return TRUE;
}
+static gboolean gd_scroll_event(GtkWidget *widget, GdkEventScroll *scroll,
+ void *opaque)
+{
+ GtkDisplayState *s = opaque;
+ InputButton btn;
+
+ if (scroll->direction == GDK_SCROLL_UP) {
+ btn = INPUT_BUTTON_WHEEL_UP;
+ } else if (scroll->direction == GDK_SCROLL_DOWN) {
+ btn = INPUT_BUTTON_WHEEL_DOWN;
+ } else {
+ return TRUE;
+ }
+
+ qemu_input_queue_btn(s->dcl.con, btn, true);
+ qemu_input_event_sync();
+ qemu_input_queue_btn(s->dcl.con, btn, false);
+ qemu_input_event_sync();
+ return TRUE;
+}
+
static gboolean gd_key_event(GtkWidget *widget, GdkEventKey *key, void *opaque)
{
GtkDisplayState *s = opaque;
@@ -729,6 +782,14 @@ static gboolean gd_key_event(GtkWidget *widget, GdkEventKey *key, void *opaque)
return TRUE;
}
+static gboolean gd_event(GtkWidget *widget, GdkEvent *event, void *opaque)
+{
+ if (event->type == GDK_MOTION_NOTIFY) {
+ return gd_motion_event(widget, &event->motion, opaque);
+ }
+ return FALSE;
+}
+
/** Window Menu Actions **/
static void gd_menu_pause(GtkMenuItem *item, void *opaque)
@@ -927,8 +988,8 @@ static void gd_ungrab_keyboard(GtkDisplayState *s)
static void gd_grab_pointer(GtkDisplayState *s)
{
-#if GTK_CHECK_VERSION(3, 0, 0)
GdkDisplay *display = gtk_widget_get_display(s->drawing_area);
+#if GTK_CHECK_VERSION(3, 0, 0)
GdkDeviceManager *mgr = gdk_display_get_device_manager(display);
GList *devices = gdk_device_manager_list_devices(mgr,
GDK_DEVICE_TYPE_MASTER);
@@ -952,6 +1013,8 @@ static void gd_grab_pointer(GtkDisplayState *s)
tmp = tmp->next;
}
g_list_free(devices);
+ gdk_device_get_position(gdk_device_manager_get_client_pointer(mgr),
+ NULL, &s->grab_x_root, &s->grab_y_root);
#else
gdk_pointer_grab(gtk_widget_get_window(s->drawing_area),
FALSE, /* All events to come to our window directly */
@@ -963,13 +1026,15 @@ static void gd_grab_pointer(GtkDisplayState *s)
NULL, /* Allow cursor to move over entire desktop */
s->null_cursor,
GDK_CURRENT_TIME);
+ gdk_display_get_pointer(display, NULL,
+ &s->grab_x_root, &s->grab_y_root, NULL);
#endif
}
static void gd_ungrab_pointer(GtkDisplayState *s)
{
-#if GTK_CHECK_VERSION(3, 0, 0)
GdkDisplay *display = gtk_widget_get_display(s->drawing_area);
+#if GTK_CHECK_VERSION(3, 0, 0)
GdkDeviceManager *mgr = gdk_display_get_device_manager(display);
GList *devices = gdk_device_manager_list_devices(mgr,
GDK_DEVICE_TYPE_MASTER);
@@ -983,8 +1048,14 @@ static void gd_ungrab_pointer(GtkDisplayState *s)
tmp = tmp->next;
}
g_list_free(devices);
+ gdk_device_warp(gdk_device_manager_get_client_pointer(mgr),
+ gtk_widget_get_screen(s->drawing_area),
+ s->grab_x_root, s->grab_y_root);
#else
gdk_pointer_ungrab(GDK_CURRENT_TIME);
+ gdk_display_warp_pointer(display,
+ gtk_widget_get_screen(s->drawing_area),
+ s->grab_x_root, s->grab_y_root);
#endif
}
@@ -1034,6 +1105,7 @@ static void gd_change_page(GtkNotebook *nb, gpointer arg1, guint arg2,
if (arg2 == 0) {
gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->vga_item), TRUE);
} else {
+#if defined(CONFIG_VTE)
VirtualConsole *vc = &s->vc[arg2 - 1];
VteTerminal *term = VTE_TERMINAL(vc->terminal);
int width, height;
@@ -1043,6 +1115,9 @@ static void gd_change_page(GtkNotebook *nb, gpointer arg1, guint arg2,
gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(vc->menu_item), TRUE);
gtk_widget_set_size_request(vc->terminal, width, height);
+#else
+ g_assert_not_reached();
+#endif
}
gtk_widget_set_sensitive(s->grab_item, on_vga);
@@ -1088,7 +1163,7 @@ static int gd_vc_chr_write(CharDriverState *chr, const uint8_t *buf, int len)
{
VirtualConsole *vc = chr->opaque;
- return write(vc->fd, buf, len);
+ return vc ? write(vc->fd, buf, len) : len;
}
static int nb_vcs;
@@ -1113,6 +1188,7 @@ void early_gtk_display_init(void)
register_vc_handler(gd_vc_handler);
}
+#if defined(CONFIG_VTE)
static gboolean gd_vc_in(GIOChannel *chan, GIOCondition cond, void *opaque)
{
VirtualConsole *vc = opaque;
@@ -1128,10 +1204,12 @@ static gboolean gd_vc_in(GIOChannel *chan, GIOCondition cond, void *opaque)
return TRUE;
}
+#endif
static GSList *gd_vc_init(GtkDisplayState *s, VirtualConsole *vc, int index, GSList *group,
GtkWidget *view_menu)
{
+#if defined(CONFIG_VTE)
const char *label;
char buffer[32];
char path[32];
@@ -1201,6 +1279,7 @@ static GSList *gd_vc_init(GtkDisplayState *s, VirtualConsole *vc, int index, GSL
chan = g_io_channel_unix_new(vc->fd);
g_io_add_watch(chan, G_IO_IN, gd_vc_in, vc);
+#endif /* CONFIG_VTE */
return group;
}
@@ -1223,12 +1302,14 @@ static void gd_connect_signals(GtkDisplayState *s)
g_signal_connect(s->drawing_area, "expose-event",
G_CALLBACK(gd_expose_event), s);
#endif
- g_signal_connect(s->drawing_area, "motion-notify-event",
- G_CALLBACK(gd_motion_event), s);
+ g_signal_connect(s->drawing_area, "event",
+ G_CALLBACK(gd_event), s);
g_signal_connect(s->drawing_area, "button-press-event",
G_CALLBACK(gd_button_event), s);
g_signal_connect(s->drawing_area, "button-release-event",
G_CALLBACK(gd_button_event), s);
+ g_signal_connect(s->drawing_area, "scroll-event",
+ G_CALLBACK(gd_scroll_event), s);
g_signal_connect(s->drawing_area, "key-press-event",
G_CALLBACK(gd_key_event), s);
g_signal_connect(s->drawing_area, "key-release-event",
@@ -1415,7 +1496,7 @@ static const DisplayChangeListenerOps dcl_ops = {
.dpy_cursor_define = gd_cursor_define,
};
-void gtk_display_init(DisplayState *ds, bool full_screen)
+void gtk_display_init(DisplayState *ds, bool full_screen, bool grab_on_hover)
{
GtkDisplayState *s = g_malloc0(sizeof(*s));
char *filename;
@@ -1494,6 +1575,9 @@ void gtk_display_init(DisplayState *ds, bool full_screen)
if (full_screen) {
gtk_menu_item_activate(GTK_MENU_ITEM(s->full_screen_item));
}
+ if (grab_on_hover) {
+ gtk_menu_item_activate(GTK_MENU_ITEM(s->grab_on_hover_item));
+ }
register_displaychangelistener(&s->dcl);
diff --git a/ui/input-legacy.c b/ui/input-legacy.c
index 7dc486b8ac..1aa2605b75 100644
--- a/ui/input-legacy.c
+++ b/ui/input-legacy.c
@@ -333,6 +333,7 @@ QEMUPutKbdEntry *qemu_add_kbd_event_handler(QEMUPutKBDEvent *func, void *opaque)
entry->opaque = opaque;
entry->s = qemu_input_handler_register((DeviceState *)entry,
&legacy_kbd_handler);
+ qemu_input_handler_activate(entry->s);
return entry;
}
diff --git a/ui/input.c b/ui/input.c
index 2761911f3c..1ed0e783b1 100644
--- a/ui/input.c
+++ b/ui/input.c
@@ -143,6 +143,9 @@ void qemu_input_event_send(QemuConsole *src, InputEvent *evt)
/* send event */
s = qemu_input_find_handler(1 << evt->kind);
+ if (!s) {
+ return;
+ }
s->handler->event(s->dev, src, evt);
s->events++;
}
@@ -342,15 +345,21 @@ void do_mouse_set(Monitor *mon, const QDict *qdict)
int found = 0;
QTAILQ_FOREACH(s, &handlers, node) {
- if (s->id == index) {
- found = 1;
- qemu_input_handler_activate(s);
- break;
+ if (s->id != index) {
+ continue;
+ }
+ if (!(s->handler->mask & (INPUT_EVENT_MASK_REL |
+ INPUT_EVENT_MASK_ABS))) {
+ error_report("Input device '%s' is not a mouse", s->handler->name);
+ return;
}
+ found = 1;
+ qemu_input_handler_activate(s);
+ break;
}
if (!found) {
- monitor_printf(mon, "Mouse at given index not found\n");
+ error_report("Mouse at index '%d' not found", index);
}
qemu_input_check_mode_change();
diff --git a/ui/sdl2.c b/ui/sdl2.c
index f1532e9d2c..7506e2e13f 100644
--- a/ui/sdl2.c
+++ b/ui/sdl2.c
@@ -278,7 +278,7 @@ static void sdl_hide_cursor(void)
SDL_ShowCursor(1);
SDL_SetCursor(sdl_cursor_hidden);
} else {
- SDL_ShowCursor(0);
+ SDL_SetRelativeMouseMode(SDL_TRUE);
}
}
@@ -289,6 +289,7 @@ static void sdl_show_cursor(void)
}
if (!qemu_input_is_absolute()) {
+ SDL_SetRelativeMouseMode(SDL_FALSE);
SDL_ShowCursor(1);
if (guest_cursor &&
(gui_grab || qemu_input_is_absolute() || absolute_enabled)) {
@@ -403,13 +404,17 @@ static void sdl_send_mouse_event(struct sdl2_state *scon, int dx, int dy,
}
qemu_input_queue_abs(scon->dcl.con, INPUT_AXIS_X, off_x + x, max_w);
qemu_input_queue_abs(scon->dcl.con, INPUT_AXIS_Y, off_y + y, max_h);
- } else if (guest_cursor) {
- x -= guest_x;
- y -= guest_y;
- guest_x += x;
- guest_y += y;
- qemu_input_queue_rel(scon->dcl.con, INPUT_AXIS_X, x);
- qemu_input_queue_rel(scon->dcl.con, INPUT_AXIS_Y, y);
+ } else {
+ if (guest_cursor) {
+ x -= guest_x;
+ y -= guest_y;
+ guest_x += x;
+ guest_y += y;
+ dx = x;
+ dy = y;
+ }
+ qemu_input_queue_rel(scon->dcl.con, INPUT_AXIS_X, dx);
+ qemu_input_queue_rel(scon->dcl.con, INPUT_AXIS_Y, dy);
}
qemu_input_event_sync();
}
diff --git a/ui/spice-display.c b/ui/spice-display.c
index e28698c6b6..ce6b220f55 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -549,6 +549,10 @@ static int interface_client_monitors_config(QXLInstance *sin,
QemuUIInfo info;
int rc;
+ if (!mc) {
+ return 1;
+ }
+
/*
* FIXME: multihead is tricky due to the way
* spice has multihead implemented.
diff --git a/ui/spice-input.c b/ui/spice-input.c
index 6dab23b75c..c342e0dcfb 100644
--- a/ui/spice-input.c
+++ b/ui/spice-input.c
@@ -176,7 +176,7 @@ static void tablet_position(SpiceTabletInstance* sin, int x, int y,
spice_update_buttons(pointer, 0, buttons_state);
qemu_input_queue_abs(NULL, INPUT_AXIS_X, x, pointer->width);
- qemu_input_queue_abs(NULL, INPUT_AXIS_Y, y, pointer->width);
+ qemu_input_queue_abs(NULL, INPUT_AXIS_Y, y, pointer->height);
qemu_input_event_sync();
}
diff --git a/ui/vnc.c b/ui/vnc.c
index 9c84b3e0fd..2d7def9aa2 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -888,7 +888,7 @@ static int vnc_update_client(VncState *vs, int has_dirty, bool sync)
VncDisplay *vd = vs->vd;
VncJob *job;
int y;
- int height;
+ int height, width;
int n = 0;
if (vs->output.offset && !vs->audio_cap && !vs->force_update)
@@ -907,6 +907,7 @@ static int vnc_update_client(VncState *vs, int has_dirty, bool sync)
job = vnc_job_new(vs);
height = MIN(pixman_image_get_height(vd->server), vs->client_height);
+ width = MIN(pixman_image_get_width(vd->server), vs->client_width);
y = 0;
for (;;) {
@@ -925,8 +926,11 @@ static int vnc_update_client(VncState *vs, int has_dirty, bool sync)
VNC_DIRTY_BPL(vs), x);
bitmap_clear(vs->dirty[y], x, x2 - x);
h = find_and_clear_dirty_height(vs, y, x, x2, height);
- n += vnc_job_add_rect(job, x * VNC_DIRTY_PIXELS_PER_BIT, y,
- (x2 - x) * VNC_DIRTY_PIXELS_PER_BIT, h);
+ x2 = MIN(x2, width / VNC_DIRTY_PIXELS_PER_BIT);
+ if (x2 > x) {
+ n += vnc_job_add_rect(job, x * VNC_DIRTY_PIXELS_PER_BIT, y,
+ (x2 - x) * VNC_DIRTY_PIXELS_PER_BIT, h);
+ }
}
vnc_job_push(job);
@@ -992,7 +996,7 @@ static void audio_add(VncState *vs)
struct audio_capture_ops ops;
if (vs->audio_cap) {
- monitor_printf(default_mon, "audio already running\n");
+ error_report("audio already running");
return;
}
@@ -1002,7 +1006,7 @@ static void audio_add(VncState *vs)
vs->audio_cap = AUD_add_capture(&vs->as, &ops, vs);
if (!vs->audio_cap) {
- monitor_printf(default_mon, "Failed to add audio capture\n");
+ error_report("Failed to add audio capture");
}
}
diff --git a/user-exec.c b/user-exec.c
index 82bfa66ce3..8ed6fec814 100644
--- a/user-exec.c
+++ b/user-exec.c
@@ -38,19 +38,22 @@
//#define DEBUG_SIGNAL
-static void exception_action(CPUArchState *env1)
+static void exception_action(CPUState *cpu)
{
#if defined(TARGET_I386)
- raise_exception_err(env1, env1->exception_index, env1->error_code);
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env1 = &x86_cpu->env;
+
+ raise_exception_err(env1, cpu->exception_index, env1->error_code);
#else
- cpu_loop_exit(env1);
+ cpu_loop_exit(cpu);
#endif
}
/* exit the current TB from a signal handler. The host registers are
restored in a state compatible with the CPU emulator
*/
-void cpu_resume_from_signal(CPUArchState *env1, void *puc)
+void cpu_resume_from_signal(CPUState *cpu, void *puc)
{
#ifdef __linux__
struct ucontext *uc = puc;
@@ -70,8 +73,8 @@ void cpu_resume_from_signal(CPUArchState *env1, void *puc)
sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
#endif
}
- env1->exception_index = -1;
- siglongjmp(env1->jmp_env, 1);
+ cpu->exception_index = -1;
+ siglongjmp(cpu->jmp_env, 1);
}
/* 'pc' is the host PC at which the exception was raised. 'address' is
@@ -82,7 +85,8 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
int is_write, sigset_t *old_set,
void *puc)
{
- CPUArchState *env;
+ CPUState *cpu;
+ CPUClass *cc;
int ret;
#if defined(DEBUG_SIGNAL)
@@ -99,9 +103,11 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
are still valid segv ones */
address = h2g_nocheck(address);
- env = current_cpu->env_ptr;
+ cpu = current_cpu;
+ cc = CPU_GET_CLASS(cpu);
/* see if it is an MMU fault */
- ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX);
+ g_assert(cc->handle_mmu_fault);
+ ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX);
if (ret < 0) {
return 0; /* not an MMU fault */
}
@@ -109,12 +115,12 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
return 1; /* the MMU fault was handled without causing real CPU fault */
}
/* now we have a real cpu fault */
- cpu_restore_state(env, pc);
+ cpu_restore_state(cpu, pc);
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
sigprocmask(SIG_SETMASK, old_set, NULL);
- exception_action(env);
+ exception_action(cpu);
/* never comes here */
return 1;
@@ -459,16 +465,29 @@ int cpu_signal_handler(int host_signum, void *pinfo,
#elif defined(__aarch64__)
-int cpu_signal_handler(int host_signum, void *pinfo,
- void *puc)
+int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
{
siginfo_t *info = pinfo;
struct ucontext *uc = puc;
- uint64_t pc;
- int is_write = 0; /* XXX how to determine? */
+ uintptr_t pc = uc->uc_mcontext.pc;
+ uint32_t insn = *(uint32_t *)pc;
+ bool is_write;
- pc = uc->uc_mcontext.pc;
- return handle_cpu_signal(pc, (uint64_t)info->si_addr,
+ /* XXX: need kernel patch to get write flag faster. */
+ is_write = ( (insn & 0xbfff0000) == 0x0c000000 /* C3.3.1 */
+ || (insn & 0xbfe00000) == 0x0c800000 /* C3.3.2 */
+ || (insn & 0xbfdf0000) == 0x0d000000 /* C3.3.3 */
+ || (insn & 0xbfc00000) == 0x0d800000 /* C3.3.4 */
+ || (insn & 0x3f400000) == 0x08000000 /* C3.3.6 */
+ || (insn & 0x3bc00000) == 0x39000000 /* C3.3.13 */
+ || (insn & 0x3fc00000) == 0x3d800000 /* ... 128bit */
+ /* Ingore bits 10, 11 & 21, controlling indexing. */
+ || (insn & 0x3bc00000) == 0x38000000 /* C3.3.8-12 */
+ || (insn & 0x3fe00000) == 0x3c800000 /* ... 128bit */
+ /* Ignore bits 23 & 24, controlling indexing. */
+ || (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */
+
+ return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
is_write, &uc->uc_sigmask, puc);
}
diff --git a/util/Makefile.objs b/util/Makefile.objs
index 937376b082..df83b629a0 100644
--- a/util/Makefile.objs
+++ b/util/Makefile.objs
@@ -14,3 +14,4 @@ util-obj-y += crc32c.o
util-obj-y += throttle.o
util-obj-y += getauxval.o
util-obj-y += readline.o
+util-obj-y += rfifolock.o
diff --git a/util/cutils.c b/util/cutils.c
index 0116fcde74..b337293239 100644
--- a/util/cutils.c
+++ b/util/cutils.c
@@ -27,6 +27,7 @@
#include "qemu/sockets.h"
#include "qemu/iov.h"
+#include "net/net.h"
void strpadcpy(char *buf, int buf_size, const char *str, char pad)
{
@@ -530,3 +531,16 @@ int parse_debug_env(const char *name, int max, int initial)
}
return debug;
}
+
+/*
+ * Helper to print ethernet mac address
+ */
+const char *qemu_ether_ntoa(const MACAddr *mac)
+{
+ static char ret[18];
+
+ snprintf(ret, sizeof(ret), "%02x:%02x:%02x:%02x:%02x:%02x",
+ mac->a[0], mac->a[1], mac->a[2], mac->a[3], mac->a[4], mac->a[5]);
+
+ return ret;
+}
diff --git a/util/error.c b/util/error.c
index f11f1d57a0..2bb42e1c4b 100644
--- a/util/error.c
+++ b/util/error.c
@@ -12,10 +12,7 @@
#include "qemu-common.h"
#include "qapi/error.h"
-#include "qapi/qmp/qjson.h"
-#include "qapi/qmp/qdict.h"
-#include "qapi-types.h"
-#include "qapi/qmp/qerror.h"
+#include "qemu/error-report.h"
struct Error
{
diff --git a/util/module.c b/util/module.c
index 863a8a32a3..214effb39f 100644
--- a/util/module.c
+++ b/util/module.c
@@ -163,7 +163,7 @@ out:
}
#endif
-void module_load(module_init_type type)
+static void module_load(module_init_type type)
{
#ifdef CONFIG_MODULES
char *fname = NULL;
diff --git a/util/osdep.c b/util/osdep.c
index bd4f530ad1..a9029f8894 100644
--- a/util/osdep.c
+++ b/util/osdep.c
@@ -436,6 +436,24 @@ int socket_init(void)
return 0;
}
+/* Ensure that glib is running in multi-threaded mode */
+static void __attribute__((constructor)) thread_init(void)
+{
+ if (!g_thread_supported()) {
+#if !GLIB_CHECK_VERSION(2, 31, 0)
+ /* Old versions of glib require explicit initialization. Failure to do
+ * this results in the single-threaded code paths being taken inside
+ * glib. For example, the g_slice allocator will not be thread-safe
+ * and cause crashes.
+ */
+ g_thread_init(NULL);
+#else
+ fprintf(stderr, "glib threading failed to initialize.\n");
+ exit(1);
+#endif
+ }
+}
+
#ifndef CONFIG_IOVEC
/* helper function for iov_send_recv() */
static ssize_t
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
index c2eeb4fe40..8e9c770d28 100644
--- a/util/oslib-posix.c
+++ b/util/oslib-posix.c
@@ -63,6 +63,10 @@ extern int daemon(int, int);
#include <sys/syscall.h>
#endif
+#ifdef __FreeBSD__
+#include <sys/sysctl.h>
+#endif
+
int qemu_get_thread_id(void)
{
#if defined(__linux__)
diff --git a/util/path.c b/util/path.c
index 623219e4c5..5c59d9f1d3 100644
--- a/util/path.c
+++ b/util/path.c
@@ -160,7 +160,9 @@ void init_paths(const char *prefix)
base = new_entry("", NULL, pref_buf);
base = add_dir_maybe(base);
if (base->num_entries == 0) {
- free (base);
+ g_free(base->pathname);
+ free(base->name);
+ free(base);
base = NULL;
} else {
set_parents(base, base);
diff --git a/util/qemu-config.c b/util/qemu-config.c
index f6101012c0..f4e4f38749 100644
--- a/util/qemu-config.c
+++ b/util/qemu-config.c
@@ -20,7 +20,7 @@ static QemuOptsList *find_list(QemuOptsList **lists, const char *group,
break;
}
if (lists[i] == NULL) {
- error_set(errp, QERR_INVALID_OPTION_GROUP, group);
+ error_setg(errp, "There is no option group '%s'", group);
}
return lists[i];
}
@@ -39,6 +39,20 @@ QemuOptsList *qemu_find_opts(const char *group)
return ret;
}
+QemuOpts *qemu_find_opts_singleton(const char *group)
+{
+ QemuOptsList *list;
+ QemuOpts *opts;
+
+ list = qemu_find_opts(group);
+ assert(list);
+ opts = qemu_opts_find(list, NULL);
+ if (!opts) {
+ opts = qemu_opts_create(list, NULL, 0, &error_abort);
+ }
+ return opts;
+}
+
static CommandLineParameterInfoList *query_option_descs(const QemuOptDesc *desc)
{
CommandLineParameterInfoList *param_list = NULL, *entry;
diff --git a/util/qemu-error.c b/util/qemu-error.c
index fec02c6075..7b167fd06b 100644
--- a/util/qemu-error.c
+++ b/util/qemu-error.c
@@ -20,7 +20,7 @@
*/
void error_vprintf(const char *fmt, va_list ap)
{
- if (cur_mon) {
+ if (cur_mon && !monitor_cur_is_qmp()) {
monitor_vprintf(cur_mon, fmt, ap);
} else {
vfprintf(stderr, fmt, ap);
@@ -165,7 +165,7 @@ const char *error_get_progname(void)
/*
* Print current location to current monitor if we have one, else to stderr.
*/
-void error_print_loc(void)
+static void error_print_loc(void)
{
const char *sep = "";
int i;
diff --git a/util/qemu-option.c b/util/qemu-option.c
index 9d898af443..8bbc3ad4a3 100644
--- a/util/qemu-option.c
+++ b/util/qemu-option.c
@@ -819,7 +819,7 @@ QemuOpts *qemu_opts_create(QemuOptsList *list, const char *id,
opts = qemu_opts_find(list, id);
if (opts != NULL) {
if (fail_if_exists && !list->merge_lists) {
- error_set(errp, QERR_DUPLICATE_ID, id, list->name);
+ error_setg(errp, "Duplicate ID '%s' for %s", id, list->name);
return NULL;
} else {
return opts;
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
index 960d7f5d42..d05a6497e1 100644
--- a/util/qemu-thread-posix.c
+++ b/util/qemu-thread-posix.c
@@ -32,6 +32,13 @@ static bool name_threads;
void qemu_thread_naming(bool enable)
{
name_threads = enable;
+
+#ifndef CONFIG_THREAD_SETNAME_BYTHREAD
+ /* This is a debugging option, not fatal */
+ if (enable) {
+ fprintf(stderr, "qemu: thread naming not supported on this host\n");
+ }
+#endif
}
static void error_exit(int err, const char *msg)
@@ -394,6 +401,16 @@ void qemu_event_wait(QemuEvent *ev)
}
}
+/* Attempt to set the threads name; note that this is for debug, so
+ * we're not going to fail if we can't set it.
+ */
+static void qemu_thread_set_name(QemuThread *thread, const char *name)
+{
+#ifdef CONFIG_PTHREAD_SETNAME_NP
+ pthread_setname_np(thread->thread, name);
+#endif
+}
+
void qemu_thread_create(QemuThread *thread, const char *name,
void *(*start_routine)(void*),
void *arg, int mode)
@@ -420,11 +437,9 @@ void qemu_thread_create(QemuThread *thread, const char *name,
if (err)
error_exit(err, __func__);
-#if defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 12))
if (name_threads) {
- pthread_setname_np(thread->thread, name);
+ qemu_thread_set_name(thread, name);
}
-#endif
pthread_sigmask(SIG_SETMASK, &oldset, NULL);
diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c
index b9c957b6a0..c405c9bef6 100644
--- a/util/qemu-thread-win32.c
+++ b/util/qemu-thread-win32.c
@@ -22,6 +22,8 @@ void qemu_thread_naming(bool enable)
{
/* But note we don't actually name them on Windows yet */
name_threads = enable;
+
+ fprintf(stderr, "qemu: thread naming not supported on this host\n");
}
static void error_exit(int err, const char *msg)
diff --git a/util/rfifolock.c b/util/rfifolock.c
new file mode 100644
index 0000000000..afbf7488df
--- /dev/null
+++ b/util/rfifolock.c
@@ -0,0 +1,78 @@
+/*
+ * Recursive FIFO lock
+ *
+ * Copyright Red Hat, Inc. 2013
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#include <assert.h>
+#include "qemu/rfifolock.h"
+
+void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque)
+{
+ qemu_mutex_init(&r->lock);
+ r->head = 0;
+ r->tail = 0;
+ qemu_cond_init(&r->cond);
+ r->nesting = 0;
+ r->cb = cb;
+ r->cb_opaque = opaque;
+}
+
+void rfifolock_destroy(RFifoLock *r)
+{
+ qemu_cond_destroy(&r->cond);
+ qemu_mutex_destroy(&r->lock);
+}
+
+/*
+ * Theory of operation:
+ *
+ * In order to ensure FIFO ordering, implement a ticketlock. Threads acquiring
+ * the lock enqueue themselves by incrementing the tail index. When the lock
+ * is unlocked, the head is incremented and waiting threads are notified.
+ *
+ * Recursive locking does not take a ticket since the head is only incremented
+ * when the outermost recursive caller unlocks.
+ */
+void rfifolock_lock(RFifoLock *r)
+{
+ qemu_mutex_lock(&r->lock);
+
+ /* Take a ticket */
+ unsigned int ticket = r->tail++;
+
+ if (r->nesting > 0 && qemu_thread_is_self(&r->owner_thread)) {
+ r->tail--; /* put ticket back, we're nesting */
+ } else {
+ while (ticket != r->head) {
+ /* Invoke optional contention callback */
+ if (r->cb) {
+ r->cb(r->cb_opaque);
+ }
+ qemu_cond_wait(&r->cond, &r->lock);
+ }
+ }
+
+ qemu_thread_get_self(&r->owner_thread);
+ r->nesting++;
+ qemu_mutex_unlock(&r->lock);
+}
+
+void rfifolock_unlock(RFifoLock *r)
+{
+ qemu_mutex_lock(&r->lock);
+ assert(r->nesting > 0);
+ assert(qemu_thread_is_self(&r->owner_thread));
+ if (--r->nesting == 0) {
+ r->head++;
+ qemu_cond_broadcast(&r->cond);
+ }
+ qemu_mutex_unlock(&r->lock);
+}
diff --git a/vl.c b/vl.c
index bca5c95908..236f95efd7 100644
--- a/vl.c
+++ b/vl.c
@@ -58,6 +58,7 @@ int main(int argc, char **argv)
#include <glib.h>
+#include "qemu/sockets.h"
#include "hw/hw.h"
#include "hw/boards.h"
#include "hw/usb.h"
@@ -103,7 +104,6 @@ int main(int argc, char **argv)
#include "disas/disas.h"
-#include "qemu/sockets.h"
#include "slirp/libslirp.h"
@@ -143,6 +143,9 @@ int vga_interface_type = VGA_NONE;
static int full_screen = 0;
static int no_frame = 0;
int no_quit = 0;
+#ifdef CONFIG_GTK
+static bool grab_on_hover;
+#endif
CharDriverState *serial_hds[MAX_SERIAL_PORTS];
CharDriverState *parallel_hds[MAX_PARALLEL_PORTS];
CharDriverState *virtcon_hds[MAX_VIRTIO_CONSOLES];
@@ -213,6 +216,7 @@ uint32_t xen_domid;
enum xen_mode xen_mode = XEN_EMULATE;
static int tcg_tb_size;
+static int has_defaults = 1;
static int default_serial = 1;
static int default_parallel = 1;
static int default_virtcon = 1;
@@ -506,6 +510,20 @@ static QemuOptsList qemu_name_opts = {
},
};
+static QemuOptsList qemu_mem_opts = {
+ .name = "memory",
+ .implied_opt_name = "size",
+ .head = QTAILQ_HEAD_INITIALIZER(qemu_mem_opts.head),
+ .merge_lists = true,
+ .desc = {
+ {
+ .name = "size",
+ .type = QEMU_OPT_SIZE,
+ },
+ { /* end of list */ }
+ },
+};
+
/**
* Get machine options
*
@@ -513,16 +531,7 @@ static QemuOptsList qemu_name_opts = {
*/
QemuOpts *qemu_get_machine_opts(void)
{
- QemuOptsList *list;
- QemuOpts *opts;
-
- list = qemu_find_opts("machine");
- assert(list);
- opts = qemu_opts_find(list, NULL);
- if (!opts) {
- opts = qemu_opts_create(list, NULL, 0, &error_abort);
- }
- return opts;
+ return qemu_find_opts_singleton("machine");
}
const char *qemu_get_vm_name(void)
@@ -973,7 +982,8 @@ static void parse_name(QemuOpts *opts)
bool usb_enabled(bool default_usb)
{
- return qemu_opt_get_bool(qemu_get_machine_opts(), "usb", default_usb);
+ return qemu_opt_get_bool(qemu_get_machine_opts(), "usb",
+ has_defaults && default_usb);
}
#ifndef _WIN32
@@ -1204,7 +1214,7 @@ DeviceState *get_boot_device(uint32_t position)
* memory pointed by "size" is assigned total length of the array in bytes
*
*/
-char *get_boot_devices_list(size_t *size)
+char *get_boot_devices_list(size_t *size, bool ignore_suffixes)
{
FWBootEntry *i;
size_t total = 0;
@@ -1219,7 +1229,7 @@ char *get_boot_devices_list(size_t *size)
assert(devpath);
}
- if (i->suffix && devpath) {
+ if (i->suffix && !ignore_suffixes && devpath) {
size_t bootpathlen = strlen(devpath) + strlen(i->suffix) + 1;
bootpath = g_malloc(bootpathlen);
@@ -1227,9 +1237,11 @@ char *get_boot_devices_list(size_t *size)
g_free(devpath);
} else if (devpath) {
bootpath = devpath;
- } else {
+ } else if (!ignore_suffixes) {
assert(i->suffix);
bootpath = g_strdup(i->suffix);
+ } else {
+ bootpath = g_strdup("");
}
if (total) {
@@ -1413,7 +1425,7 @@ static void smp_parse(QemuOpts *opts)
max_cpus = smp_cpus;
}
- if (max_cpus > 255) {
+ if (max_cpus > MAX_CPUMASK_BITS) {
fprintf(stderr, "Unsupported number of maxcpus\n");
exit(1);
}
@@ -1571,54 +1583,84 @@ void pcmcia_info(Monitor *mon, const QDict *qdict)
/***********************************************************/
/* machine registration */
-static QEMUMachine *first_machine = NULL;
-QEMUMachine *current_machine = NULL;
+MachineState *current_machine;
+
+static void machine_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->qemu_machine = data;
+}
int qemu_register_machine(QEMUMachine *m)
{
- QEMUMachine **pm;
- pm = &first_machine;
- while (*pm != NULL)
- pm = &(*pm)->next;
- m->next = NULL;
- *pm = m;
+ char *name = g_strconcat(m->name, TYPE_MACHINE_SUFFIX, NULL);
+ TypeInfo ti = {
+ .name = name,
+ .parent = TYPE_MACHINE,
+ .class_init = machine_class_init,
+ .class_data = (void *)m,
+ };
+
+ type_register(&ti);
+ g_free(name);
+
return 0;
}
-static QEMUMachine *find_machine(const char *name)
+static MachineClass *find_machine(const char *name)
{
- QEMUMachine *m;
+ GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false);
+ MachineClass *mc = NULL;
+
+ for (el = machines; el; el = el->next) {
+ MachineClass *temp = el->data;
- for(m = first_machine; m != NULL; m = m->next) {
- if (!strcmp(m->name, name))
- return m;
- if (m->alias && !strcmp(m->alias, name))
- return m;
+ if (!strcmp(temp->qemu_machine->name, name)) {
+ mc = temp;
+ break;
+ }
+ if (temp->qemu_machine->alias &&
+ !strcmp(temp->qemu_machine->alias, name)) {
+ mc = temp;
+ break;
+ }
}
- return NULL;
+
+ g_slist_free(machines);
+ return mc;
}
-QEMUMachine *find_default_machine(void)
+MachineClass *find_default_machine(void)
{
- QEMUMachine *m;
+ GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false);
+ MachineClass *mc = NULL;
- for(m = first_machine; m != NULL; m = m->next) {
- if (m->is_default) {
- return m;
+ for (el = machines; el; el = el->next) {
+ MachineClass *temp = el->data;
+
+ if (temp->qemu_machine->is_default) {
+ mc = temp;
+ break;
}
}
- return NULL;
+
+ g_slist_free(machines);
+ return mc;
}
MachineInfoList *qmp_query_machines(Error **errp)
{
+ GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false);
MachineInfoList *mach_list = NULL;
QEMUMachine *m;
- for (m = first_machine; m; m = m->next) {
+ for (el = machines; el; el = el->next) {
+ MachineClass *mc = el->data;
MachineInfoList *entry;
MachineInfo *info;
+ m = mc->qemu_machine;
info = g_malloc0(sizeof(*info));
if (m->is_default) {
info->has_is_default = true;
@@ -1639,6 +1681,7 @@ MachineInfoList *qmp_query_machines(Error **errp)
mach_list = entry;
}
+ g_slist_free(machines);
return mach_list;
}
@@ -1832,8 +1875,12 @@ void qemu_devices_reset(void)
void qemu_system_reset(bool report)
{
- if (current_machine && current_machine->reset) {
- current_machine->reset();
+ MachineClass *mc;
+
+ mc = current_machine ? MACHINE_GET_CLASS(current_machine) : NULL;
+
+ if (mc && mc->qemu_machine->reset) {
+ mc->qemu_machine->reset();
} else {
qemu_devices_reset();
}
@@ -2089,7 +2136,7 @@ static void select_vgahw (const char *p)
{
const char *opts;
- vga_interface_type = VGA_NONE;
+ assert(vga_interface_type == VGA_NONE);
if (strstart(p, "std", &opts)) {
if (vga_available()) {
vga_interface_type = VGA_STD;
@@ -2241,6 +2288,25 @@ static DisplayType select_display(const char *p)
} else if (strstart(p, "gtk", &opts)) {
#ifdef CONFIG_GTK
display = DT_GTK;
+ while (*opts) {
+ const char *nextopt;
+
+ if (strstart(opts, ",grab_on_hover=", &nextopt)) {
+ opts = nextopt;
+ if (strstart(opts, "on", &nextopt)) {
+ grab_on_hover = true;
+ } else if (strstart(opts, "off", &nextopt)) {
+ grab_on_hover = false;
+ } else {
+ goto invalid_gtk_args;
+ }
+ } else {
+ invalid_gtk_args:
+ fprintf(stderr, "Invalid GTK option string: %s\n", p);
+ exit(1);
+ }
+ opts = nextopt;
+ }
#else
fprintf(stderr, "GTK support is disabled\n");
exit(1);
@@ -2605,24 +2671,34 @@ static int debugcon_parse(const char *devname)
return 0;
}
-static QEMUMachine *machine_parse(const char *name)
+static MachineClass *machine_parse(const char *name)
{
- QEMUMachine *m, *machine = NULL;
+ MachineClass *mc = NULL;
+ GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false);
if (name) {
- machine = find_machine(name);
+ mc = find_machine(name);
}
- if (machine) {
- return machine;
+ if (mc) {
+ return mc;
}
- printf("Supported machines are:\n");
- for (m = first_machine; m != NULL; m = m->next) {
- if (m->alias) {
- printf("%-20s %s (alias of %s)\n", m->alias, m->desc, m->name);
+ if (name && !is_help_option(name)) {
+ error_report("Unsupported machine type");
+ error_printf("Use -machine help to list supported machines!\n");
+ } else {
+ printf("Supported machines are:\n");
+ for (el = machines; el; el = el->next) {
+ MachineClass *mc = el->data;
+ QEMUMachine *m = mc->qemu_machine;
+ if (m->alias) {
+ printf("%-20s %s (alias of %s)\n", m->alias, m->desc, m->name);
+ }
+ printf("%-20s %s%s\n", m->name, m->desc,
+ m->is_default ? " (default)" : "");
}
- printf("%-20s %s%s\n", m->name, m->desc,
- m->is_default ? " (default)" : "");
}
+
+ g_slist_free(machines);
exit(!name || !is_help_option(name));
}
@@ -2669,7 +2745,7 @@ static int configure_accelerator(QEMUMachine *machine)
if (!accel_list[i].available()) {
printf("%s not supported for this target\n",
accel_list[i].name);
- continue;
+ break;
}
*(accel_list[i].allowed) = true;
ret = accel_list[i].init(machine);
@@ -2871,9 +2947,10 @@ int main(int argc, char **argv, char **envp)
int optind;
const char *optarg;
const char *loadvm = NULL;
+ MachineClass *machine_class;
QEMUMachine *machine;
const char *cpu_model;
- const char *vga_model = "none";
+ const char *vga_model = NULL;
const char *qtest_chrdev = NULL;
const char *qtest_log = NULL;
const char *pid_file = NULL;
@@ -2892,20 +2969,14 @@ int main(int argc, char **argv, char **envp)
};
const char *trace_events = NULL;
const char *trace_file = NULL;
+ const ram_addr_t default_ram_size = (ram_addr_t)DEFAULT_RAM_SIZE *
+ 1024 * 1024;
atexit(qemu_run_exit_notifiers);
error_set_progname(argv[0]);
qemu_init_exec_dir(argv[0]);
g_mem_set_vtable(&mem_trace);
- if (!g_thread_supported()) {
-#if !GLIB_CHECK_VERSION(2, 31, 0)
- g_thread_init(NULL);
-#else
- fprintf(stderr, "glib threading failed to initialize.\n");
- exit(1);
-#endif
- }
module_call_init(MODULE_INIT_QOM);
@@ -2923,6 +2994,7 @@ int main(int argc, char **argv, char **envp)
qemu_add_opts(&qemu_trace_opts);
qemu_add_opts(&qemu_option_rom_opts);
qemu_add_opts(&qemu_machine_opts);
+ qemu_add_opts(&qemu_mem_opts);
qemu_add_opts(&qemu_smp_opts);
qemu_add_opts(&qemu_boot_opts);
qemu_add_opts(&qemu_sandbox_opts);
@@ -2945,9 +3017,9 @@ int main(int argc, char **argv, char **envp)
os_setup_early_signal_handling();
module_call_init(MODULE_INIT_MACHINE);
- machine = find_default_machine();
+ machine_class = find_default_machine();
cpu_model = NULL;
- ram_size = 0;
+ ram_size = default_ram_size;
snapshot = 0;
cyls = heads = secs = 0;
translation = BIOS_ATA_TRANSLATION_AUTO;
@@ -2970,7 +3042,6 @@ int main(int argc, char **argv, char **envp)
if (argv[optind][0] != '-') {
/* disk image */
optind++;
- continue;
} else {
const QEMUOption *popt;
@@ -3011,7 +3082,7 @@ int main(int argc, char **argv, char **envp)
}
switch(popt->index) {
case QEMU_OPTION_M:
- machine = machine_parse(optarg);
+ machine_class = machine_parse(optarg);
break;
case QEMU_OPTION_no_kvm_irqchip: {
olist = qemu_find_opts("machine");
@@ -3234,20 +3305,48 @@ int main(int argc, char **argv, char **envp)
exit(0);
break;
case QEMU_OPTION_m: {
- int64_t value;
uint64_t sz;
- char *end;
+ const char *mem_str;
- value = strtosz(optarg, &end);
- if (value < 0 || *end) {
- fprintf(stderr, "qemu: invalid ram size: %s\n", optarg);
- exit(1);
+ opts = qemu_opts_parse(qemu_find_opts("memory"),
+ optarg, 1);
+ if (!opts) {
+ exit(EXIT_FAILURE);
+ }
+
+ mem_str = qemu_opt_get(opts, "size");
+ if (!mem_str) {
+ error_report("invalid -m option, missing 'size' option");
+ exit(EXIT_FAILURE);
}
- sz = QEMU_ALIGN_UP((uint64_t)value, 8192);
+ if (!*mem_str) {
+ error_report("missing 'size' option value");
+ exit(EXIT_FAILURE);
+ }
+
+ sz = qemu_opt_get_size(opts, "size", ram_size);
+
+ /* Fix up legacy suffix-less format */
+ if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) {
+ uint64_t overflow_check = sz;
+
+ sz <<= 20;
+ if ((sz >> 20) != overflow_check) {
+ error_report("too large 'size' option value");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ /* backward compatibility behaviour for case "-m 0" */
+ if (sz == 0) {
+ sz = default_ram_size;
+ }
+
+ sz = QEMU_ALIGN_UP(sz, 8192);
ram_size = sz;
if (ram_size != sz) {
- fprintf(stderr, "qemu: ram size too large\n");
- exit(1);
+ error_report("ram size too large");
+ exit(EXIT_FAILURE);
}
break;
}
@@ -3567,7 +3666,7 @@ int main(int argc, char **argv, char **envp)
}
optarg = qemu_opt_get(opts, "type");
if (optarg) {
- machine = machine_parse(optarg);
+ machine_class = machine_parse(optarg);
}
break;
case QEMU_OPTION_no_kvm:
@@ -3723,16 +3822,7 @@ int main(int argc, char **argv, char **envp)
runstate_set(RUN_STATE_INMIGRATE);
break;
case QEMU_OPTION_nodefaults:
- default_serial = 0;
- default_parallel = 0;
- default_virtcon = 0;
- default_sclp = 0;
- default_monitor = 0;
- default_net = 0;
- default_floppy = 0;
- default_cdrom = 0;
- default_sdcard = 0;
- default_vga = 0;
+ has_defaults = 0;
break;
case QEMU_OPTION_xen_domid:
if (!(xen_available())) {
@@ -3800,7 +3890,9 @@ int main(int argc, char **argv, char **envp)
}
}
qemu_config_write(fp);
- fclose(fp);
+ if (fp != stdout) {
+ fclose(fp);
+ }
break;
}
case QEMU_OPTION_qtest:
@@ -3873,11 +3965,18 @@ int main(int argc, char **argv, char **envp)
}
#endif
- if (machine == NULL) {
- fprintf(stderr, "No machine found.\n");
+ if (machine_class == NULL) {
+ fprintf(stderr, "No machine specified, and there is no default.\n"
+ "Use -machine help to list supported machines!\n");
exit(1);
}
+ current_machine = MACHINE(object_new(object_class_get_name(
+ OBJECT_CLASS(machine_class))));
+ object_property_add_child(object_get_root(), "machine",
+ OBJECT(current_machine), &error_abort);
+
+ machine = machine_class->qemu_machine;
if (machine->hw_version) {
qemu_set_version(machine->hw_version);
}
@@ -3959,27 +4058,35 @@ int main(int argc, char **argv, char **envp)
qemu_opts_foreach(qemu_find_opts("device"), default_driver_check, NULL, 0);
qemu_opts_foreach(qemu_find_opts("global"), default_driver_check, NULL, 0);
- if (machine->no_serial) {
+ if (!vga_model && !default_vga) {
+ vga_interface_type = VGA_DEVICE;
+ }
+ if (!has_defaults || machine->no_serial) {
default_serial = 0;
}
- if (machine->no_parallel) {
+ if (!has_defaults || machine->no_parallel) {
default_parallel = 0;
}
- if (!machine->use_virtcon) {
+ if (!has_defaults || !machine->use_virtcon) {
default_virtcon = 0;
}
- if (!machine->use_sclp) {
+ if (!has_defaults || !machine->use_sclp) {
default_sclp = 0;
}
- if (machine->no_floppy) {
+ if (!has_defaults || machine->no_floppy) {
default_floppy = 0;
}
- if (machine->no_cdrom) {
+ if (!has_defaults || machine->no_cdrom) {
default_cdrom = 0;
}
- if (machine->no_sdcard) {
+ if (!has_defaults || machine->no_sdcard) {
default_sdcard = 0;
}
+ if (!has_defaults) {
+ default_monitor = 0;
+ default_net = 0;
+ default_vga = 0;
+ }
if (is_daemonized()) {
/* According to documentation and historically, -nographic redirects
@@ -4084,10 +4191,8 @@ int main(int argc, char **argv, char **envp)
exit(1);
}
- /* init the memory */
- if (ram_size == 0) {
- ram_size = DEFAULT_RAM_SIZE * 1024 * 1024;
- }
+ /* store value for the future use */
+ qemu_opt_set_number(qemu_find_opts_singleton("memory"), "size", ram_size);
if (qemu_opts_foreach(qemu_find_opts("device"), device_help_func, NULL, 0)
!= 0) {
@@ -4284,7 +4389,9 @@ int main(int argc, char **argv, char **envp)
vga_model = "std";
}
}
- select_vgahw(vga_model);
+ if (vga_model) {
+ select_vgahw(vga_model);
+ }
if (watchdog) {
i = select_watchdog(watchdog);
@@ -4299,14 +4406,16 @@ int main(int argc, char **argv, char **envp)
qdev_machine_init();
- QEMUMachineInitArgs args = { .machine = machine,
- .ram_size = ram_size,
- .boot_order = boot_order,
- .kernel_filename = kernel_filename,
- .kernel_cmdline = kernel_cmdline,
- .initrd_filename = initrd_filename,
- .cpu_model = cpu_model };
- machine->init(&args);
+ current_machine->init_args = (QEMUMachineInitArgs) {
+ .machine = machine,
+ .ram_size = ram_size,
+ .boot_order = boot_order,
+ .kernel_filename = kernel_filename,
+ .kernel_cmdline = kernel_cmdline,
+ .initrd_filename = initrd_filename,
+ .cpu_model = cpu_model };
+
+ machine->init(&current_machine->init_args);
audio_init();
@@ -4314,8 +4423,6 @@ int main(int argc, char **argv, char **envp)
set_numa_modes();
- current_machine = machine;
-
/* init USB devices */
if (usb_enabled(false)) {
if (foreach_device_config(DEV_USB, usb_parse) < 0)
@@ -4351,7 +4458,7 @@ int main(int argc, char **argv, char **envp)
#endif
#if defined(CONFIG_GTK)
case DT_GTK:
- gtk_display_init(ds, full_screen);
+ gtk_display_init(ds, full_screen, grab_on_hover);
break;
#endif
default:
diff --git a/vmstate.c b/vmstate.c
index d1f5eb0e6a..b689f2f9b3 100644
--- a/vmstate.c
+++ b/vmstate.c
@@ -3,6 +3,7 @@
#include "migration/qemu-file.h"
#include "migration/vmstate.h"
#include "qemu/bitops.h"
+#include "trace.h"
static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque);
@@ -73,6 +74,7 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
}
if (ret < 0) {
+ trace_vmstate_load_field_error(field->name, ret);
return ret;
}
}
diff --git a/xbzrle.c b/xbzrle.c
index fbcb35d0e3..8e220bf25b 100644
--- a/xbzrle.c
+++ b/xbzrle.c
@@ -28,7 +28,7 @@ int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen,
{
uint32_t zrun_len = 0, nzrun_len = 0;
int d = 0, i = 0;
- long res, xor;
+ long res;
uint8_t *nzrun_start = NULL;
g_assert(!(((uintptr_t)old_buf | (uintptr_t)new_buf | slen) %
@@ -93,9 +93,11 @@ int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen,
/* word at a time for speed, use of 32-bit long okay */
if (!res) {
/* truncation to 32-bit long okay */
- long mask = (long)0x0101010101010101ULL;
+ unsigned long mask = (unsigned long)0x0101010101010101ULL;
while (i < slen) {
- xor = *(long *)(old_buf + i) ^ *(long *)(new_buf + i);
+ unsigned long xor;
+ xor = *(unsigned long *)(old_buf + i)
+ ^ *(unsigned long *)(new_buf + i);
if ((xor - mask) & ~xor & (mask << 7)) {
/* found the end of an nzrun within the current long */
while (old_buf[i] != new_buf[i]) {