diff options
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | block.c | 35 | ||||
-rw-r--r-- | block/iscsi.c | 9 | ||||
-rw-r--r-- | block/mirror.c | 2 | ||||
-rw-r--r-- | block/nbd.c | 2 | ||||
-rw-r--r-- | block/nfs.c | 2 | ||||
-rw-r--r-- | block/quorum.c | 4 | ||||
-rw-r--r-- | blockdev.c | 7 | ||||
-rw-r--r-- | docs/writing-qmp-commands.txt | 6 | ||||
-rw-r--r-- | hw/net/pcnet.c | 1 | ||||
-rw-r--r-- | hw/net/xilinx_axienet.c | 3 | ||||
-rw-r--r-- | linux-headers/asm-s390/kvm.h | 24 | ||||
-rw-r--r-- | linux-headers/linux/kvm.h | 17 | ||||
-rw-r--r-- | linux-headers/linux/vfio.h | 6 | ||||
-rw-r--r-- | net/net.c | 10 | ||||
-rw-r--r-- | net/tap.c | 14 | ||||
-rw-r--r-- | qapi-schema.json | 8 | ||||
-rw-r--r-- | qemu-img.c | 12 | ||||
-rw-r--r-- | qemu-options.hx | 3 | ||||
-rw-r--r-- | target-alpha/fpu_helper.c | 7 | ||||
-rw-r--r-- | target-alpha/helper.h | 1 | ||||
-rw-r--r-- | target-alpha/translate.c | 2363 | ||||
-rw-r--r-- | target-s390x/cpu.h | 3 | ||||
-rw-r--r-- | target-s390x/kvm.c | 175 | ||||
-rwxr-xr-x | tests/qemu-iotests/030 | 50 | ||||
-rwxr-xr-x | tests/qemu-iotests/056 | 9 | ||||
-rw-r--r-- | tests/qemu-iotests/iotests.py | 5 | ||||
-rw-r--r-- | tests/test-qmp-input-strict.c | 8 | ||||
-rw-r--r-- | trace-events | 4 |
29 files changed, 1110 insertions, 1682 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index c66946ff07..b287ef8939 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -674,6 +674,8 @@ S: Supported F: block* F: block/ F: hw/block/ +F: qemu-img* +F: qemu-io* T: git git://repo.or.cz/qemu/kevin.git block T: git git://github.com/stefanha/qemu.git block @@ -864,7 +864,7 @@ static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file, node_name = qdict_get_try_str(options, "node-name"); bdrv_assign_node_name(bs, node_name, &local_err); - if (error_is_set(&local_err)) { + if (local_err) { error_propagate(errp, local_err); return -EINVAL; } @@ -1068,14 +1068,14 @@ fail: */ int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp) { - char backing_filename[PATH_MAX]; - int back_flags, ret; + char *backing_filename = g_malloc0(PATH_MAX); + int back_flags, ret = 0; BlockDriver *back_drv = NULL; Error *local_err = NULL; if (bs->backing_hd != NULL) { QDECREF(options); - return 0; + goto free_exit; } /* NULL means an empty set of options */ @@ -1088,10 +1088,9 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp) backing_filename[0] = '\0'; } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) { QDECREF(options); - return 0; + goto free_exit; } else { - bdrv_get_full_backing_filename(bs, backing_filename, - sizeof(backing_filename)); + bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX); } if (bs->backing_format[0] != '\0') { @@ -1112,7 +1111,7 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp) error_setg(errp, "Could not open backing file: %s", error_get_pretty(local_err)); error_free(local_err); - return ret; + goto free_exit; } if (bs->backing_hd->file) { @@ -1123,7 +1122,9 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp) /* Recalculate the BlockLimits with the backing file */ bdrv_refresh_limits(bs); - return 0; +free_exit: + g_free(backing_filename); + return ret; } /* @@ -1180,8 +1181,7 @@ done: void bdrv_append_temp_snapshot(BlockDriverState *bs, Error **errp) { /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */ - char tmp_filename[PATH_MAX + 1]; - + char *tmp_filename = g_malloc0(PATH_MAX + 1); int64_t total_size; BlockDriver *bdrv_qcow2; QEMUOptionParameter *create_options; @@ -1197,15 +1197,15 @@ void bdrv_append_temp_snapshot(BlockDriverState *bs, Error **errp) total_size = bdrv_getlength(bs); if (total_size < 0) { error_setg_errno(errp, -total_size, "Could not get image size"); - return; + goto out; } total_size &= BDRV_SECTOR_MASK; /* Create the temporary image */ - ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename)); + ret = get_tmp_filename(tmp_filename, PATH_MAX + 1); if (ret < 0) { error_setg_errno(errp, -ret, "Could not get temporary filename"); - return; + goto out; } bdrv_qcow2 = bdrv_find_format("qcow2"); @@ -1221,7 +1221,7 @@ void bdrv_append_temp_snapshot(BlockDriverState *bs, Error **errp) "'%s': %s", tmp_filename, error_get_pretty(local_err)); error_free(local_err); - return; + goto out; } /* Prepare a new options QDict for the temporary file */ @@ -1238,10 +1238,13 @@ void bdrv_append_temp_snapshot(BlockDriverState *bs, Error **errp) bs->open_flags & ~BDRV_O_SNAPSHOT, bdrv_qcow2, &local_err); if (ret < 0) { error_propagate(errp, local_err); - return; + goto out; } bdrv_append(bs_snapshot, bs); + +out: + g_free(tmp_filename); } /* diff --git a/block/iscsi.c b/block/iscsi.c index a636ea4f53..a30202b4fe 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -1095,16 +1095,15 @@ static struct scsi_task *iscsi_do_inquiry(struct iscsi_context *iscsi, int lun, *inq = scsi_datain_unmarshall(task); if (*inq == NULL) { error_setg(errp, "iSCSI: failed to unmarshall inquiry datain blob"); - goto fail; + goto fail_with_err; } return task; fail: - if (!error_is_set(errp)) { - error_setg(errp, "iSCSI: Inquiry command failed : %s", - iscsi_get_error(iscsi)); - } + error_setg(errp, "iSCSI: Inquiry command failed : %s", + iscsi_get_error(iscsi)); +fail_with_err: if (task != NULL) { scsi_free_scsi_task(task); } diff --git a/block/mirror.c b/block/mirror.c index 2618c3763c..36f4f8e8bd 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -680,7 +680,7 @@ void commit_active_start(BlockDriverState *bs, BlockDriverState *base, mirror_start_job(bs, base, speed, 0, 0, on_error, on_error, cb, opaque, &local_err, &commit_active_job_driver, false, base); - if (error_is_set(&local_err)) { + if (local_err) { error_propagate(errp, local_err); goto error_restore_flags; } diff --git a/block/nbd.c b/block/nbd.c index 55124239df..613f2581ae 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -175,7 +175,7 @@ static void nbd_parse_filename(const char *filename, QDict *options, InetSocketAddress *addr = NULL; addr = inet_parse(host_spec, errp); - if (error_is_set(errp)) { + if (!addr) { goto out; } diff --git a/block/nfs.c b/block/nfs.c index 98aa363e48..9fa831f160 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -343,7 +343,7 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags, opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); - if (error_is_set(&local_err)) { + if (local_err) { error_propagate(errp, local_err); return -EINVAL; } diff --git a/block/quorum.c b/block/quorum.c index 7f580a83b5..ecec3a5407 100644 --- a/block/quorum.c +++ b/block/quorum.c @@ -753,7 +753,7 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags, opts = qemu_opts_create(&quorum_runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); - if (error_is_set(&local_err)) { + if (local_err) { ret = -EINVAL; goto exit; } @@ -828,7 +828,7 @@ close_exit: g_free(opened); exit: /* propagate error */ - if (error_is_set(&local_err)) { + if (local_err) { error_propagate(errp, local_err); } QDECREF(list); diff --git a/blockdev.c b/blockdev.c index 9486358e48..7810e9fb68 100644 --- a/blockdev.c +++ b/blockdev.c @@ -1115,6 +1115,7 @@ typedef struct InternalSnapshotState { static void internal_snapshot_prepare(BlkTransactionState *common, Error **errp) { + Error *local_err = NULL; const char *device; const char *name; BlockDriverState *bs; @@ -1163,8 +1164,10 @@ static void internal_snapshot_prepare(BlkTransactionState *common, } /* check whether a snapshot with name exist */ - ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn, errp); - if (error_is_set(errp)) { + ret = bdrv_snapshot_find_by_id_and_name(bs, NULL, name, &old_sn, + &local_err); + if (local_err) { + error_propagate(errp, local_err); return; } else if (ret) { error_setg(errp, diff --git a/docs/writing-qmp-commands.txt b/docs/writing-qmp-commands.txt index 8349dec8af..3930a9ba70 100644 --- a/docs/writing-qmp-commands.txt +++ b/docs/writing-qmp-commands.txt @@ -311,7 +311,7 @@ void hmp_hello_world(Monitor *mon, const QDict *qdict) Error *errp = NULL; qmp_hello_world(!!message, message, &errp); - if (error_is_set(&errp)) { + if (errp) { monitor_printf(mon, "%s\n", error_get_pretty(errp)); error_free(errp); return; @@ -483,7 +483,7 @@ void hmp_info_alarm_clock(Monitor *mon) Error *errp = NULL; clock = qmp_query_alarm_clock(&errp); - if (error_is_set(&errp)) { + if (errp) { monitor_printf(mon, "Could not query alarm clock information\n"); error_free(errp); return; @@ -634,7 +634,7 @@ void hmp_info_alarm_methods(Monitor *mon) Error *errp = NULL; method_list = qmp_query_alarm_methods(&errp); - if (error_is_set(&errp)) { + if (errp) { monitor_printf(mon, "Could not query alarm methods\n"); error_free(errp); return; diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c index 7cb47b3f1f..ebe505784d 100644 --- a/hw/net/pcnet.c +++ b/hw/net/pcnet.c @@ -718,7 +718,6 @@ static void pcnet_s_reset(PCNetState *s) s->csr[94] = 0x0000; s->csr[100] = 0x0200; s->csr[103] = 0x0105; - s->csr[103] = 0x0105; s->csr[112] = 0x0000; s->csr[114] = 0x0000; s->csr[122] = 0x0000; diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c index 839d97ca86..0f485a0283 100644 --- a/hw/net/xilinx_axienet.c +++ b/hw/net/xilinx_axienet.c @@ -142,6 +142,9 @@ tdk_write(struct PHY *phy, unsigned int req, unsigned int data) phy->regs[regnum] = data; break; } + + /* Unconditionally clear regs[BMCR][BMCR_RESET] */ + phy->regs[0] &= ~0x8000; } static void diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h index cb4c1eb8a0..c003c6a73b 100644 --- a/linux-headers/asm-s390/kvm.h +++ b/linux-headers/asm-s390/kvm.h @@ -22,6 +22,8 @@ #define KVM_DEV_FLIC_CLEAR_IRQS 3 #define KVM_DEV_FLIC_APF_ENABLE 4 #define KVM_DEV_FLIC_APF_DISABLE_WAIT 5 +#define KVM_DEV_FLIC_ADAPTER_REGISTER 6 +#define KVM_DEV_FLIC_ADAPTER_MODIFY 7 /* * We can have up to 4*64k pending subchannels + 8 adapter interrupts, * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. @@ -32,6 +34,26 @@ #define KVM_S390_MAX_FLOAT_IRQS 266250 #define KVM_S390_FLIC_MAX_BUFFER 0x2000000 +struct kvm_s390_io_adapter { + __u32 id; + __u8 isc; + __u8 maskable; + __u8 swap; + __u8 pad; +}; + +#define KVM_S390_IO_ADAPTER_MASK 1 +#define KVM_S390_IO_ADAPTER_MAP 2 +#define KVM_S390_IO_ADAPTER_UNMAP 3 + +struct kvm_s390_io_adapter_req { + __u32 id; + __u8 type; + __u8 mask; + __u16 pad0; + __u64 addr; +}; + /* for KVM_GET_REGS and KVM_SET_REGS */ struct kvm_regs { /* general purpose regs for s390 */ @@ -76,4 +98,6 @@ struct kvm_sync_regs { #define KVM_REG_S390_PFTOKEN (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x5) #define KVM_REG_S390_PFCOMPARE (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x6) #define KVM_REG_S390_PFSELECT (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x7) +#define KVM_REG_S390_PP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x8) +#define KVM_REG_S390_GBEA (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x9) #endif diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index e27a4b33cf..b278ab3326 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -740,6 +740,9 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_SPAPR_MULTITCE 94 #define KVM_CAP_EXT_EMUL_CPUID 95 #define KVM_CAP_HYPERV_TIME 96 +#define KVM_CAP_IOAPIC_POLARITY_IGNORED 97 +#define KVM_CAP_ENABLE_CAP_VM 98 +#define KVM_CAP_S390_IRQCHIP 99 #ifdef KVM_CAP_IRQ_ROUTING @@ -755,9 +758,18 @@ struct kvm_irq_routing_msi { __u32 pad; }; +struct kvm_irq_routing_s390_adapter { + __u64 ind_addr; + __u64 summary_addr; + __u64 ind_offset; + __u32 summary_offset; + __u32 adapter_id; +}; + /* gsi routing entry types */ #define KVM_IRQ_ROUTING_IRQCHIP 1 #define KVM_IRQ_ROUTING_MSI 2 +#define KVM_IRQ_ROUTING_S390_ADAPTER 3 struct kvm_irq_routing_entry { __u32 gsi; @@ -767,6 +779,7 @@ struct kvm_irq_routing_entry { union { struct kvm_irq_routing_irqchip irqchip; struct kvm_irq_routing_msi msi; + struct kvm_irq_routing_s390_adapter adapter; __u32 pad[8]; } u; }; @@ -1075,6 +1088,10 @@ struct kvm_s390_ucas_mapping { /* Available with KVM_CAP_DEBUGREGS */ #define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs) #define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs) +/* + * vcpu version available with KVM_ENABLE_CAP + * vm version available with KVM_CAP_ENABLE_CAP_VM + */ #define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap) /* Available with KVM_CAP_XSAVE */ #define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave) diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h index 17c58e0ede..26c218e692 100644 --- a/linux-headers/linux/vfio.h +++ b/linux-headers/linux/vfio.h @@ -23,6 +23,12 @@ #define VFIO_TYPE1_IOMMU 1 #define VFIO_SPAPR_TCE_IOMMU 2 +#define VFIO_TYPE1v2_IOMMU 3 +/* + * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping). This + * capability is subject to change as groups are added or removed. + */ +#define VFIO_DMA_CC_IOMMU 4 /* * The IOCTL interface is designed for extensibility by embedding the @@ -473,7 +473,7 @@ ssize_t qemu_deliver_packet(NetClientState *sender, if (ret == 0) { nc->receive_disabled = 1; - }; + } return ret; } @@ -1045,7 +1045,7 @@ RxFilterInfoList *qmp_query_rx_filter(bool has_name, const char *name, if (nc->info->type != NET_CLIENT_OPTIONS_KIND_NIC) { if (has_name) { error_setg(errp, "net client(%s) isn't a NIC", name); - break; + return NULL; } continue; } @@ -1064,11 +1064,15 @@ RxFilterInfoList *qmp_query_rx_filter(bool has_name, const char *name, } else if (has_name) { error_setg(errp, "net client(%s) doesn't support" " rx-filter querying", name); + return NULL; + } + + if (has_name) { break; } } - if (filter_list == NULL && !error_is_set(errp) && has_name) { + if (filter_list == NULL && has_name) { error_setg(errp, "invalid net client name: %s", name); } @@ -367,11 +367,8 @@ static int launch_script(const char *setup_script, const char *ifname, int fd) if (pid == 0) { int open_max = sysconf(_SC_OPEN_MAX), i; - for (i = 0; i < open_max; i++) { - if (i != STDIN_FILENO && - i != STDOUT_FILENO && - i != STDERR_FILENO && - i != fd) { + for (i = 3; i < open_max; i++) { + if (i != fd) { close(i); } } @@ -452,11 +449,8 @@ static int net_bridge_run_helper(const char *helper, const char *bridge) char br_buf[6+IFNAMSIZ] = {0}; char helper_cmd[PATH_MAX + sizeof(fd_buf) + sizeof(br_buf) + 15]; - for (i = 0; i < open_max; i++) { - if (i != STDIN_FILENO && - i != STDOUT_FILENO && - i != STDERR_FILENO && - i != sv[1]) { + for (i = 3; i < open_max; i++) { + if (i != sv[1]) { close(i); } } diff --git a/qapi-schema.json b/qapi-schema.json index 391356fe29..0b00427c8c 100644 --- a/qapi-schema.json +++ b/qapi-schema.json @@ -4285,10 +4285,13 @@ # # Drivers that are supported in block device operations. # +# @host_device, @host_cdrom, @host_floppy: Since 2.1 +# # Since: 2.0 ## { 'enum': 'BlockdevDriver', - 'data': [ 'file', 'http', 'https', 'ftp', 'ftps', 'tftp', 'vvfat', 'blkdebug', + 'data': [ 'file', 'host_device', 'host_cdrom', 'host_floppy', + 'http', 'https', 'ftp', 'ftps', 'tftp', 'vvfat', 'blkdebug', 'blkverify', 'bochs', 'cloop', 'cow', 'dmg', 'parallels', 'qcow', 'qcow2', 'qed', 'raw', 'vdi', 'vhdx', 'vmdk', 'vpc', 'quorum' ] } @@ -4555,6 +4558,9 @@ 'discriminator': 'driver', 'data': { 'file': 'BlockdevOptionsFile', + 'host_device':'BlockdevOptionsFile', + 'host_cdrom': 'BlockdevOptionsFile', + 'host_floppy':'BlockdevOptionsFile', 'http': 'BlockdevOptionsFile', 'https': 'BlockdevOptionsFile', 'ftp': 'BlockdevOptionsFile', diff --git a/qemu-img.c b/qemu-img.c index 4dae84a182..968b4c8e83 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -457,12 +457,12 @@ fail: static void dump_json_image_check(ImageCheck *check, bool quiet) { - Error *errp = NULL; + Error *local_err = NULL; QString *str; QmpOutputVisitor *ov = qmp_output_visitor_new(); QObject *obj; visit_type_ImageCheck(qmp_output_get_visitor(ov), - &check, NULL, &errp); + &check, NULL, &local_err); obj = qmp_output_get_qobject(ov); str = qobject_to_json_pretty(obj); assert(str != NULL); @@ -1731,12 +1731,12 @@ static void dump_snapshots(BlockDriverState *bs) static void dump_json_image_info_list(ImageInfoList *list) { - Error *errp = NULL; + Error *local_err = NULL; QString *str; QmpOutputVisitor *ov = qmp_output_visitor_new(); QObject *obj; visit_type_ImageInfoList(qmp_output_get_visitor(ov), - &list, NULL, &errp); + &list, NULL, &local_err); obj = qmp_output_get_qobject(ov); str = qobject_to_json_pretty(obj); assert(str != NULL); @@ -1748,12 +1748,12 @@ static void dump_json_image_info_list(ImageInfoList *list) static void dump_json_image_info(ImageInfo *info) { - Error *errp = NULL; + Error *local_err = NULL; QString *str; QmpOutputVisitor *ov = qmp_output_visitor_new(); QObject *obj; visit_type_ImageInfo(qmp_output_get_visitor(ov), - &info, NULL, &errp); + &info, NULL, &local_err); obj = qmp_output_get_qobject(ov); str = qobject_to_json_pretty(obj); assert(str != NULL); diff --git a/qemu-options.hx b/qemu-options.hx index 6457034b8c..98b4002fc7 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -408,7 +408,8 @@ DEF("drive", HAS_ARG, QEMU_OPTION_drive, "-drive [file=file][,if=type][,bus=n][,unit=m][,media=d][,index=i]\n" " [,cyls=c,heads=h,secs=s[,trans=t]][,snapshot=on|off]\n" " [,cache=writethrough|writeback|none|directsync|unsafe][,format=f]\n" - " [,serial=s][,addr=A][,id=name][,aio=threads|native]\n" + " [,serial=s][,addr=A][,rerror=ignore|stop|report]\n" + " [,werror=ignore|stop|report|enospc][,id=name][,aio=threads|native]\n" " [,readonly=on|off][,copy-on-read=on|off]\n" " [[,bps=b]|[[,bps_rd=r][,bps_wr=w]]]\n" " [[,iops=i]|[[,iops_rd=r][,iops_wr=w]]]\n" diff --git a/target-alpha/fpu_helper.c b/target-alpha/fpu_helper.c index fad3575549..ee731555d3 100644 --- a/target-alpha/fpu_helper.c +++ b/target-alpha/fpu_helper.c @@ -820,3 +820,10 @@ uint64_t helper_cvtqg(CPUAlphaState *env, uint64_t a) fr = int64_to_float64(a, &FP_STATUS); return float64_to_g(fr); } + +void helper_fcvtql_v_input(CPUAlphaState *env, uint64_t val) +{ + if (val != (int32_t)val) { + arith_excp(env, GETPC(), EXC_M_IOV, 0); + } +} diff --git a/target-alpha/helper.h b/target-alpha/helper.h index 4f127c49c5..2389e96ea3 100644 --- a/target-alpha/helper.h +++ b/target-alpha/helper.h @@ -96,6 +96,7 @@ DEF_HELPER_FLAGS_3(fp_exc_raise_s, TCG_CALL_NO_WG, void, env, i32, i32) DEF_HELPER_FLAGS_2(ieee_input, TCG_CALL_NO_WG, void, env, i64) DEF_HELPER_FLAGS_2(ieee_input_cmp, TCG_CALL_NO_WG, void, env, i64) +DEF_HELPER_FLAGS_2(fcvtql_v_input, TCG_CALL_NO_WG, void, env, i64) #if !defined (CONFIG_USER_ONLY) DEF_HELPER_2(hw_ret, void, env, i64) diff --git a/target-alpha/translate.c b/target-alpha/translate.c index e7e319b31d..d0357ff114 100644 --- a/target-alpha/translate.c +++ b/target-alpha/translate.c @@ -49,6 +49,12 @@ struct DisasContext { /* implver value for this CPU. */ int implver; + /* Temporaries for $31 and $f31 as source and destination. */ + TCGv zero; + TCGv sink; + /* Temporary for immediate constants. */ + TCGv lit; + bool singlestep_enabled; }; @@ -83,64 +89,128 @@ static TCGv cpu_pc; static TCGv cpu_lock_addr; static TCGv cpu_lock_st_addr; static TCGv cpu_lock_value; -static TCGv cpu_unique; -#ifndef CONFIG_USER_ONLY -static TCGv cpu_sysval; -static TCGv cpu_usp; -#endif - -/* register names */ -static char cpu_reg_names[10*4+21*5 + 10*5+21*6]; #include "exec/gen-icount.h" void alpha_translate_init(void) { +#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) } + + typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar; + static const GlobalVar vars[] = { + DEF_VAR(pc), + DEF_VAR(lock_addr), + DEF_VAR(lock_st_addr), + DEF_VAR(lock_value), + }; + +#undef DEF_VAR + + /* Use the symbolic register names that match the disassembler. */ + static const char greg_names[31][4] = { + "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", + "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp", + "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", + "t10", "t11", "ra", "t12", "at", "gp", "sp" + }; + static const char freg_names[31][4] = { + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", + "f24", "f25", "f26", "f27", "f28", "f29", "f30" + }; + + static bool done_init = 0; int i; - char *p; - static int done_init = 0; - if (done_init) + if (done_init) { return; + } + done_init = 1; cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); - p = cpu_reg_names; for (i = 0; i < 31; i++) { - sprintf(p, "ir%d", i); cpu_ir[i] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUAlphaState, ir[i]), p); - p += (i < 10) ? 4 : 5; + offsetof(CPUAlphaState, ir[i]), + greg_names[i]); + } - sprintf(p, "fir%d", i); + for (i = 0; i < 31; i++) { cpu_fir[i] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUAlphaState, fir[i]), p); - p += (i < 10) ? 5 : 6; + offsetof(CPUAlphaState, fir[i]), + freg_names[i]); } - cpu_pc = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUAlphaState, pc), "pc"); - - cpu_lock_addr = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUAlphaState, lock_addr), - "lock_addr"); - cpu_lock_st_addr = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUAlphaState, lock_st_addr), - "lock_st_addr"); - cpu_lock_value = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUAlphaState, lock_value), - "lock_value"); - - cpu_unique = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUAlphaState, unique), "unique"); -#ifndef CONFIG_USER_ONLY - cpu_sysval = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUAlphaState, sysval), "sysval"); - cpu_usp = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUAlphaState, usp), "usp"); -#endif + for (i = 0; i < ARRAY_SIZE(vars); ++i) { + const GlobalVar *v = &vars[i]; + *v->var = tcg_global_mem_new_i64(TCG_AREG0, v->ofs, v->name); + } +} - done_init = 1; +static TCGv load_zero(DisasContext *ctx) +{ + if (TCGV_IS_UNUSED_I64(ctx->zero)) { + ctx->zero = tcg_const_i64(0); + } + return ctx->zero; +} + +static TCGv dest_sink(DisasContext *ctx) +{ + if (TCGV_IS_UNUSED_I64(ctx->sink)) { + ctx->sink = tcg_temp_new(); + } + return ctx->sink; +} + +static TCGv load_gpr(DisasContext *ctx, unsigned reg) +{ + if (likely(reg < 31)) { + return cpu_ir[reg]; + } else { + return load_zero(ctx); + } +} + +static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg, + uint8_t lit, bool islit) +{ + if (islit) { + ctx->lit = tcg_const_i64(lit); + return ctx->lit; + } else if (likely(reg < 31)) { + return cpu_ir[reg]; + } else { + return load_zero(ctx); + } +} + +static TCGv dest_gpr(DisasContext *ctx, unsigned reg) +{ + if (likely(reg < 31)) { + return cpu_ir[reg]; + } else { + return dest_sink(ctx); + } +} + +static TCGv load_fpr(DisasContext *ctx, unsigned reg) +{ + if (likely(reg < 31)) { + return cpu_fir[reg]; + } else { + return load_zero(ctx); + } +} + +static TCGv dest_fpr(DisasContext *ctx, unsigned reg) +{ + if (likely(reg < 31)) { + return cpu_fir[reg]; + } else { + return dest_sink(ctx); + } } static void gen_excp_1(int exception, int error_code) @@ -207,10 +277,10 @@ static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags) static inline void gen_load_mem(DisasContext *ctx, void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1, int flags), - int ra, int rb, int32_t disp16, int fp, - int clear) + int ra, int rb, int32_t disp16, bool fp, + bool clear) { - TCGv addr, va; + TCGv tmp, addr, va; /* LDQ_U with ra $31 is UNOP. Other various loads are forms of prefetches, which we can treat as nops. No worries about @@ -219,23 +289,22 @@ static inline void gen_load_mem(DisasContext *ctx, return; } - addr = tcg_temp_new(); - if (rb != 31) { - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - if (clear) { - tcg_gen_andi_i64(addr, addr, ~0x7); - } - } else { - if (clear) { - disp16 &= ~0x7; - } - tcg_gen_movi_i64(addr, disp16); + tmp = tcg_temp_new(); + addr = load_gpr(ctx, rb); + + if (disp16) { + tcg_gen_addi_i64(tmp, addr, disp16); + addr = tmp; + } + if (clear) { + tcg_gen_andi_i64(tmp, addr, ~0x7); + addr = tmp; } va = (fp ? cpu_fir[ra] : cpu_ir[ra]); tcg_gen_qemu_load(va, addr, ctx->mem_idx); - tcg_temp_free(addr); + tcg_temp_free(tmp); } static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags) @@ -265,35 +334,27 @@ static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags) static inline void gen_store_mem(DisasContext *ctx, void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, int flags), - int ra, int rb, int32_t disp16, int fp, - int clear) + int ra, int rb, int32_t disp16, bool fp, + bool clear) { - TCGv addr, va; + TCGv tmp, addr, va; - addr = tcg_temp_new(); - if (rb != 31) { - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - if (clear) { - tcg_gen_andi_i64(addr, addr, ~0x7); - } - } else { - if (clear) { - disp16 &= ~0x7; - } - tcg_gen_movi_i64(addr, disp16); - } + tmp = tcg_temp_new(); + addr = load_gpr(ctx, rb); - if (ra == 31) { - va = tcg_const_i64(0); - } else { - va = (fp ? cpu_fir[ra] : cpu_ir[ra]); + if (disp16) { + tcg_gen_addi_i64(tmp, addr, disp16); + addr = tmp; } + if (clear) { + tcg_gen_andi_i64(tmp, addr, ~0x7); + addr = tmp; + } + + va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra)); tcg_gen_qemu_store(va, addr, ctx->mem_idx); - tcg_temp_free(addr); - if (ra == 31) { - tcg_temp_free(va); - } + tcg_temp_free(tmp); } static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb, @@ -313,11 +374,7 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb, addr = tcg_temp_local_new(); #endif - if (rb != 31) { - tcg_gen_addi_i64(addr, cpu_ir[rb], disp16); - } else { - tcg_gen_movi_i64(addr, disp16); - } + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); #if defined(CONFIG_USER_ONLY) /* ??? This is handled via a complicated version of compare-and-swap @@ -367,7 +424,8 @@ static bool in_superpage(DisasContext *ctx, int64_t addr) static bool use_goto_tb(DisasContext *ctx, uint64_t dest) { /* Suppress goto_tb in the case of single-steping and IO. */ - if (ctx->singlestep_enabled || (ctx->tb->cflags & CF_LAST_IO)) { + if ((ctx->tb->cflags & CF_LAST_IO) + || ctx->singlestep_enabled || singlestep) { return false; } /* If the destination is in the superpage, the page perms can't change. */ @@ -438,15 +496,11 @@ static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra, { TCGv cmp_tmp; - if (unlikely(ra == 31)) { - cmp_tmp = tcg_const_i64(0); - } else { + if (mask) { cmp_tmp = tcg_temp_new(); - if (mask) { - tcg_gen_andi_i64(cmp_tmp, cpu_ir[ra], 1); - } else { - tcg_gen_mov_i64(cmp_tmp, cpu_ir[ra]); - } + tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1); + } else { + cmp_tmp = load_gpr(ctx, ra); } return gen_bcond_internal(ctx, cond, cmp_tmp, disp); @@ -487,83 +541,23 @@ static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src) static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, int32_t disp) { - TCGv cmp_tmp; - - if (unlikely(ra == 31)) { - /* Very uncommon case, but easier to optimize it to an integer - comparison than continuing with the floating point comparison. */ - return gen_bcond(ctx, cond, ra, disp, 0); - } - - cmp_tmp = tcg_temp_new(); - gen_fold_mzero(cond, cmp_tmp, cpu_fir[ra]); + TCGv cmp_tmp = tcg_temp_new(); + gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra)); return gen_bcond_internal(ctx, cond, cmp_tmp, disp); } -static void gen_cmov(TCGCond cond, int ra, int rb, int rc, - int islit, uint8_t lit, int mask) -{ - TCGv_i64 c1, z, v1; - - if (unlikely(rc == 31)) { - return; - } - - if (ra == 31) { - /* Very uncommon case - Do not bother to optimize. */ - c1 = tcg_const_i64(0); - } else if (mask) { - c1 = tcg_const_i64(1); - tcg_gen_and_i64(c1, c1, cpu_ir[ra]); - } else { - c1 = cpu_ir[ra]; - } - if (islit) { - v1 = tcg_const_i64(lit); - } else { - v1 = cpu_ir[rb]; - } - z = tcg_const_i64(0); - - tcg_gen_movcond_i64(cond, cpu_ir[rc], c1, z, v1, cpu_ir[rc]); - - tcg_temp_free_i64(z); - if (ra == 31 || mask) { - tcg_temp_free_i64(c1); - } - if (islit) { - tcg_temp_free_i64(v1); - } -} - -static void gen_fcmov(TCGCond cond, int ra, int rb, int rc) +static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc) { - TCGv_i64 c1, z, v1; - - if (unlikely(rc == 31)) { - return; - } + TCGv_i64 va, vb, z; - c1 = tcg_temp_new_i64(); - if (unlikely(ra == 31)) { - tcg_gen_movi_i64(c1, 0); - } else { - gen_fold_mzero(cond, c1, cpu_fir[ra]); - } - if (rb == 31) { - v1 = tcg_const_i64(0); - } else { - v1 = cpu_fir[rb]; - } - z = tcg_const_i64(0); + z = load_zero(ctx); + vb = load_fpr(ctx, rb); + va = tcg_temp_new(); + gen_fold_mzero(cond, va, load_fpr(ctx, ra)); - tcg_gen_movcond_i64(cond, cpu_fir[rc], c1, z, v1, cpu_fir[rc]); + tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc)); - tcg_temp_free_i64(z); - tcg_temp_free_i64(c1); - if (rb == 31) { - tcg_temp_free_i64(v1); - } + tcg_temp_free(va); } #define QUAL_RM_N 0x080 /* Round mode nearest even */ @@ -647,21 +641,21 @@ static void gen_qual_flushzero(DisasContext *ctx, int fn11) tcg_temp_free_i32(tmp); } -static TCGv gen_ieee_input(int reg, int fn11, int is_cmp) +static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp) { TCGv val; - if (reg == 31) { - val = tcg_const_i64(0); + + if (unlikely(reg == 31)) { + val = load_zero(ctx); } else { + val = cpu_fir[reg]; if ((fn11 & QUAL_S) == 0) { if (is_cmp) { - gen_helper_ieee_input_cmp(cpu_env, cpu_fir[reg]); + gen_helper_ieee_input_cmp(cpu_env, val); } else { - gen_helper_ieee_input(cpu_env, cpu_fir[reg]); + gen_helper_ieee_input(cpu_env, val); } } - val = tcg_temp_new(); - tcg_gen_mov_i64(val, cpu_fir[reg]); } return val; } @@ -721,105 +715,46 @@ static inline void gen_fp_exc_raise(int rc, int fn11) gen_fp_exc_raise_ignore(rc, fn11, fn11 & QUAL_I ? 0 : float_flag_inexact); } -static void gen_fcvtlq(int rb, int rc) +static void gen_fcvtlq(TCGv vc, TCGv vb) { - if (unlikely(rc == 31)) { - return; - } - if (unlikely(rb == 31)) { - tcg_gen_movi_i64(cpu_fir[rc], 0); - } else { - TCGv tmp = tcg_temp_new(); + TCGv tmp = tcg_temp_new(); - /* The arithmetic right shift here, plus the sign-extended mask below - yields a sign-extended result without an explicit ext32s_i64. */ - tcg_gen_sari_i64(tmp, cpu_fir[rb], 32); - tcg_gen_shri_i64(cpu_fir[rc], cpu_fir[rb], 29); - tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000); - tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rc], 0x3fffffff); - tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp); + /* The arithmetic right shift here, plus the sign-extended mask below + yields a sign-extended result without an explicit ext32s_i64. */ + tcg_gen_sari_i64(tmp, vb, 32); + tcg_gen_shri_i64(vc, vb, 29); + tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000); + tcg_gen_andi_i64(vc, vc, 0x3fffffff); + tcg_gen_or_i64(vc, vc, tmp); - tcg_temp_free(tmp); - } + tcg_temp_free(tmp); } -static void gen_fcvtql(int rb, int rc) +static void gen_fcvtql(TCGv vc, TCGv vb) { - if (unlikely(rc == 31)) { - return; - } - if (unlikely(rb == 31)) { - tcg_gen_movi_i64(cpu_fir[rc], 0); - } else { - TCGv tmp = tcg_temp_new(); + TCGv tmp = tcg_temp_new(); - tcg_gen_andi_i64(tmp, cpu_fir[rb], 0xC0000000); - tcg_gen_andi_i64(cpu_fir[rc], cpu_fir[rb], 0x3FFFFFFF); - tcg_gen_shli_i64(tmp, tmp, 32); - tcg_gen_shli_i64(cpu_fir[rc], cpu_fir[rc], 29); - tcg_gen_or_i64(cpu_fir[rc], cpu_fir[rc], tmp); + tcg_gen_andi_i64(tmp, vb, (int32_t)0xc0000000); + tcg_gen_andi_i64(vc, vb, 0x3FFFFFFF); + tcg_gen_shli_i64(tmp, tmp, 32); + tcg_gen_shli_i64(vc, vc, 29); + tcg_gen_or_i64(vc, vc, tmp); - tcg_temp_free(tmp); - } + tcg_temp_free(tmp); } -static void gen_fcvtql_v(DisasContext *ctx, int rb, int rc) -{ - if (rb != 31) { - int lab = gen_new_label(); - TCGv tmp = tcg_temp_new(); - - tcg_gen_ext32s_i64(tmp, cpu_fir[rb]); - tcg_gen_brcond_i64(TCG_COND_EQ, tmp, cpu_fir[rb], lab); - gen_excp(ctx, EXCP_ARITH, EXC_M_IOV); - - gen_set_label(lab); - } - gen_fcvtql(rb, rc); -} - -#define FARITH2(name) \ - static inline void glue(gen_f, name)(int rb, int rc) \ - { \ - if (unlikely(rc == 31)) { \ - return; \ - } \ - if (rb != 31) { \ - gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \ - } else { \ - TCGv tmp = tcg_const_i64(0); \ - gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \ - tcg_temp_free(tmp); \ - } \ - } - -/* ??? VAX instruction qualifiers ignored. */ -FARITH2(sqrtf) -FARITH2(sqrtg) -FARITH2(cvtgf) -FARITH2(cvtgq) -FARITH2(cvtqf) -FARITH2(cvtqg) - static void gen_ieee_arith2(DisasContext *ctx, void (*helper)(TCGv, TCGv_ptr, TCGv), int rb, int rc, int fn11) { TCGv vb; - /* ??? This is wrong: the instruction is not a nop, it still may - raise exceptions. */ - if (unlikely(rc == 31)) { - return; - } - gen_qual_roundmode(ctx, fn11); gen_qual_flushzero(ctx, fn11); gen_fp_exc_clear(); - vb = gen_ieee_input(rb, fn11, 0); - helper(cpu_fir[rc], cpu_env, vb); - tcg_temp_free(vb); + vb = gen_ieee_input(ctx, rb, fn11, 0); + helper(dest_fpr(ctx, rc), cpu_env, vb); gen_fp_exc_raise(rc, fn11); } @@ -837,40 +772,34 @@ IEEE_ARITH2(cvtts) static void gen_fcvttq(DisasContext *ctx, int rb, int rc, int fn11) { - TCGv vb; + TCGv vb, vc; int ignore = 0; - /* ??? This is wrong: the instruction is not a nop, it still may - raise exceptions. */ - if (unlikely(rc == 31)) { - return; - } - /* No need to set flushzero, since we have an integer output. */ gen_fp_exc_clear(); - vb = gen_ieee_input(rb, fn11, 0); + vb = gen_ieee_input(ctx, rb, fn11, 0); + vc = dest_fpr(ctx, rc); /* Almost all integer conversions use cropped rounding, and most also do not have integer overflow enabled. Special case that. */ switch (fn11) { case QUAL_RM_C: - gen_helper_cvttq_c(cpu_fir[rc], cpu_env, vb); + gen_helper_cvttq_c(vc, cpu_env, vb); break; case QUAL_V | QUAL_RM_C: case QUAL_S | QUAL_V | QUAL_RM_C: ignore = float_flag_inexact; /* FALLTHRU */ case QUAL_S | QUAL_V | QUAL_I | QUAL_RM_C: - gen_helper_cvttq_svic(cpu_fir[rc], cpu_env, vb); + gen_helper_cvttq_svic(vc, cpu_env, vb); break; default: gen_qual_roundmode(ctx, fn11); - gen_helper_cvttq(cpu_fir[rc], cpu_env, vb); + gen_helper_cvttq(vc, cpu_env, vb); ignore |= (fn11 & QUAL_V ? 0 : float_flag_overflow); ignore |= (fn11 & QUAL_I ? 0 : float_flag_inexact); break; } - tcg_temp_free(vb); gen_fp_exc_raise_ignore(rc, fn11, ignore); } @@ -879,35 +808,21 @@ static void gen_ieee_intcvt(DisasContext *ctx, void (*helper)(TCGv, TCGv_ptr, TCGv), int rb, int rc, int fn11) { - TCGv vb; - - /* ??? This is wrong: the instruction is not a nop, it still may - raise exceptions. */ - if (unlikely(rc == 31)) { - return; - } + TCGv vb, vc; gen_qual_roundmode(ctx, fn11); - - if (rb == 31) { - vb = tcg_const_i64(0); - } else { - vb = cpu_fir[rb]; - } + vb = load_fpr(ctx, rb); + vc = dest_fpr(ctx, rc); /* The only exception that can be raised by integer conversion is inexact. Thus we only need to worry about exceptions when inexact handling is requested. */ if (fn11 & QUAL_I) { gen_fp_exc_clear(); - helper(cpu_fir[rc], cpu_env, vb); + helper(vc, cpu_env, vb); gen_fp_exc_raise(rc, fn11); } else { - helper(cpu_fir[rc], cpu_env, vb); - } - - if (rb == 31) { - tcg_temp_free(vb); + helper(vc, cpu_env, vb); } } @@ -920,144 +835,38 @@ static inline void glue(gen_f, name)(DisasContext *ctx, \ IEEE_INTCVT(cvtqs) IEEE_INTCVT(cvtqt) -static void gen_cpys_internal(int ra, int rb, int rc, int inv_a, uint64_t mask) +static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask) { - TCGv va, vb, vmask; - int za = 0, zb = 0; - - if (unlikely(rc == 31)) { - return; - } - - vmask = tcg_const_i64(mask); - - TCGV_UNUSED_I64(va); - if (ra == 31) { - if (inv_a) { - va = vmask; - } else { - za = 1; - } - } else { - va = tcg_temp_new_i64(); - tcg_gen_mov_i64(va, cpu_fir[ra]); - if (inv_a) { - tcg_gen_andc_i64(va, vmask, va); - } else { - tcg_gen_and_i64(va, va, vmask); - } - } + TCGv vmask = tcg_const_i64(mask); + TCGv tmp = tcg_temp_new_i64(); - TCGV_UNUSED_I64(vb); - if (rb == 31) { - zb = 1; + if (inv_a) { + tcg_gen_andc_i64(tmp, vmask, va); } else { - vb = tcg_temp_new_i64(); - tcg_gen_andc_i64(vb, cpu_fir[rb], vmask); + tcg_gen_and_i64(tmp, va, vmask); } - switch (za << 1 | zb) { - case 0 | 0: - tcg_gen_or_i64(cpu_fir[rc], va, vb); - break; - case 0 | 1: - tcg_gen_mov_i64(cpu_fir[rc], va); - break; - case 2 | 0: - tcg_gen_mov_i64(cpu_fir[rc], vb); - break; - case 2 | 1: - tcg_gen_movi_i64(cpu_fir[rc], 0); - break; - } + tcg_gen_andc_i64(vc, vb, vmask); + tcg_gen_or_i64(vc, vc, tmp); tcg_temp_free(vmask); - if (ra != 31) { - tcg_temp_free(va); - } - if (rb != 31) { - tcg_temp_free(vb); - } -} - -static inline void gen_fcpys(int ra, int rb, int rc) -{ - gen_cpys_internal(ra, rb, rc, 0, 0x8000000000000000ULL); -} - -static inline void gen_fcpysn(int ra, int rb, int rc) -{ - gen_cpys_internal(ra, rb, rc, 1, 0x8000000000000000ULL); + tcg_temp_free(tmp); } -static inline void gen_fcpyse(int ra, int rb, int rc) -{ - gen_cpys_internal(ra, rb, rc, 0, 0xFFF0000000000000ULL); -} - -#define FARITH3(name) \ - static inline void glue(gen_f, name)(int ra, int rb, int rc) \ - { \ - TCGv va, vb; \ - \ - if (unlikely(rc == 31)) { \ - return; \ - } \ - if (ra == 31) { \ - va = tcg_const_i64(0); \ - } else { \ - va = cpu_fir[ra]; \ - } \ - if (rb == 31) { \ - vb = tcg_const_i64(0); \ - } else { \ - vb = cpu_fir[rb]; \ - } \ - \ - gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \ - \ - if (ra == 31) { \ - tcg_temp_free(va); \ - } \ - if (rb == 31) { \ - tcg_temp_free(vb); \ - } \ - } - -/* ??? VAX instruction qualifiers ignored. */ -FARITH3(addf) -FARITH3(subf) -FARITH3(mulf) -FARITH3(divf) -FARITH3(addg) -FARITH3(subg) -FARITH3(mulg) -FARITH3(divg) -FARITH3(cmpgeq) -FARITH3(cmpglt) -FARITH3(cmpgle) - static void gen_ieee_arith3(DisasContext *ctx, void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), int ra, int rb, int rc, int fn11) { - TCGv va, vb; - - /* ??? This is wrong: the instruction is not a nop, it still may - raise exceptions. */ - if (unlikely(rc == 31)) { - return; - } + TCGv va, vb, vc; gen_qual_roundmode(ctx, fn11); gen_qual_flushzero(ctx, fn11); gen_fp_exc_clear(); - va = gen_ieee_input(ra, fn11, 0); - vb = gen_ieee_input(rb, fn11, 0); - helper(cpu_fir[rc], cpu_env, va, vb); - tcg_temp_free(va); - tcg_temp_free(vb); + va = gen_ieee_input(ctx, ra, fn11, 0); + vb = gen_ieee_input(ctx, rb, fn11, 0); + vc = dest_fpr(ctx, rc); + helper(vc, cpu_env, va, vb); gen_fp_exc_raise(rc, fn11); } @@ -1081,21 +890,14 @@ static void gen_ieee_compare(DisasContext *ctx, void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), int ra, int rb, int rc, int fn11) { - TCGv va, vb; - - /* ??? This is wrong: the instruction is not a nop, it still may - raise exceptions. */ - if (unlikely(rc == 31)) { - return; - } + TCGv va, vb, vc; gen_fp_exc_clear(); - va = gen_ieee_input(ra, fn11, 1); - vb = gen_ieee_input(rb, fn11, 1); - helper(cpu_fir[rc], cpu_env, va, vb); - tcg_temp_free(va); - tcg_temp_free(vb); + va = gen_ieee_input(ctx, ra, fn11, 1); + vb = gen_ieee_input(ctx, rb, fn11, 1); + vc = dest_fpr(ctx, rc); + helper(vc, cpu_env, va, vb); gen_fp_exc_raise(rc, fn11); } @@ -1117,8 +919,9 @@ static inline uint64_t zapnot_mask(uint8_t lit) int i; for (i = 0; i < 8; ++i) { - if ((lit >> i) & 1) + if ((lit >> i) & 1) { mask |= 0xffull << (i * 8); + } } return mask; } @@ -1145,165 +948,111 @@ static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit) tcg_gen_mov_i64(dest, src); break; default: - tcg_gen_andi_i64 (dest, src, zapnot_mask (lit)); + tcg_gen_andi_i64(dest, src, zapnot_mask(lit)); break; } } -static inline void gen_zapnot(int ra, int rb, int rc, int islit, uint8_t lit) -{ - if (unlikely(rc == 31)) - return; - else if (unlikely(ra == 31)) - tcg_gen_movi_i64(cpu_ir[rc], 0); - else if (islit) - gen_zapnoti(cpu_ir[rc], cpu_ir[ra], lit); - else - gen_helper_zapnot (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); -} - -static inline void gen_zap(int ra, int rb, int rc, int islit, uint8_t lit) -{ - if (unlikely(rc == 31)) - return; - else if (unlikely(ra == 31)) - tcg_gen_movi_i64(cpu_ir[rc], 0); - else if (islit) - gen_zapnoti(cpu_ir[rc], cpu_ir[ra], ~lit); - else - gen_helper_zap (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); -} - - /* EXTWH, EXTLH, EXTQH */ -static void gen_ext_h(int ra, int rb, int rc, int islit, +static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, uint8_t lit, uint8_t byte_mask) { - if (unlikely(rc == 31)) - return; - else if (unlikely(ra == 31)) - tcg_gen_movi_i64(cpu_ir[rc], 0); - else { - if (islit) { - lit = (64 - (lit & 7) * 8) & 0x3f; - tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit); - } else { - TCGv tmp1 = tcg_temp_new(); - tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7); - tcg_gen_shli_i64(tmp1, tmp1, 3); - tcg_gen_neg_i64(tmp1, tmp1); - tcg_gen_andi_i64(tmp1, tmp1, 0x3f); - tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1); - tcg_temp_free(tmp1); - } - gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask); + if (islit) { + tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f); + } else { + TCGv tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3); + tcg_gen_neg_i64(tmp, tmp); + tcg_gen_andi_i64(tmp, tmp, 0x3f); + tcg_gen_shl_i64(vc, va, tmp); + tcg_temp_free(tmp); } + gen_zapnoti(vc, vc, byte_mask); } /* EXTBL, EXTWL, EXTLL, EXTQL */ -static void gen_ext_l(int ra, int rb, int rc, int islit, +static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, uint8_t lit, uint8_t byte_mask) { - if (unlikely(rc == 31)) - return; - else if (unlikely(ra == 31)) - tcg_gen_movi_i64(cpu_ir[rc], 0); - else { - if (islit) { - tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], (lit & 7) * 8); - } else { - TCGv tmp = tcg_temp_new(); - tcg_gen_andi_i64(tmp, cpu_ir[rb], 7); - tcg_gen_shli_i64(tmp, tmp, 3); - tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], tmp); - tcg_temp_free(tmp); - } - gen_zapnoti(cpu_ir[rc], cpu_ir[rc], byte_mask); + if (islit) { + tcg_gen_shri_i64(vc, va, (lit & 7) * 8); + } else { + TCGv tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7); + tcg_gen_shli_i64(tmp, tmp, 3); + tcg_gen_shr_i64(vc, va, tmp); + tcg_temp_free(tmp); } + gen_zapnoti(vc, vc, byte_mask); } /* INSWH, INSLH, INSQH */ -static void gen_ins_h(int ra, int rb, int rc, int islit, +static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, uint8_t lit, uint8_t byte_mask) { - if (unlikely(rc == 31)) - return; - else if (unlikely(ra == 31) || (islit && (lit & 7) == 0)) - tcg_gen_movi_i64(cpu_ir[rc], 0); - else { - TCGv tmp = tcg_temp_new(); + TCGv tmp = tcg_temp_new(); - /* The instruction description has us left-shift the byte mask - and extract bits <15:8> and apply that zap at the end. This - is equivalent to simply performing the zap first and shifting - afterward. */ - gen_zapnoti (tmp, cpu_ir[ra], byte_mask); + /* The instruction description has us left-shift the byte mask and extract + bits <15:8> and apply that zap at the end. This is equivalent to simply + performing the zap first and shifting afterward. */ + gen_zapnoti(tmp, va, byte_mask); - if (islit) { - /* Note that we have handled the lit==0 case above. */ - tcg_gen_shri_i64 (cpu_ir[rc], tmp, 64 - (lit & 7) * 8); + if (islit) { + lit &= 7; + if (unlikely(lit == 0)) { + tcg_gen_movi_i64(vc, 0); } else { - TCGv shift = tcg_temp_new(); - - /* If (B & 7) == 0, we need to shift by 64 and leave a zero. - Do this portably by splitting the shift into two parts: - shift_count-1 and 1. Arrange for the -1 by using - ones-complement instead of twos-complement in the negation: - ~((B & 7) * 8) & 63. */ - - tcg_gen_andi_i64(shift, cpu_ir[rb], 7); - tcg_gen_shli_i64(shift, shift, 3); - tcg_gen_not_i64(shift, shift); - tcg_gen_andi_i64(shift, shift, 0x3f); - - tcg_gen_shr_i64(cpu_ir[rc], tmp, shift); - tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[rc], 1); - tcg_temp_free(shift); + tcg_gen_shri_i64(vc, tmp, 64 - lit * 8); } - tcg_temp_free(tmp); + } else { + TCGv shift = tcg_temp_new(); + + /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this + portably by splitting the shift into two parts: shift_count-1 and 1. + Arrange for the -1 by using ones-complement instead of + twos-complement in the negation: ~(B * 8) & 63. */ + + tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); + tcg_gen_not_i64(shift, shift); + tcg_gen_andi_i64(shift, shift, 0x3f); + + tcg_gen_shr_i64(vc, tmp, shift); + tcg_gen_shri_i64(vc, vc, 1); + tcg_temp_free(shift); } + tcg_temp_free(tmp); } /* INSBL, INSWL, INSLL, INSQL */ -static void gen_ins_l(int ra, int rb, int rc, int islit, +static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, uint8_t lit, uint8_t byte_mask) { - if (unlikely(rc == 31)) - return; - else if (unlikely(ra == 31)) - tcg_gen_movi_i64(cpu_ir[rc], 0); - else { - TCGv tmp = tcg_temp_new(); + TCGv tmp = tcg_temp_new(); - /* The instruction description has us left-shift the byte mask - the same number of byte slots as the data and apply the zap - at the end. This is equivalent to simply performing the zap - first and shifting afterward. */ - gen_zapnoti (tmp, cpu_ir[ra], byte_mask); + /* The instruction description has us left-shift the byte mask + the same number of byte slots as the data and apply the zap + at the end. This is equivalent to simply performing the zap + first and shifting afterward. */ + gen_zapnoti(tmp, va, byte_mask); - if (islit) { - tcg_gen_shli_i64(cpu_ir[rc], tmp, (lit & 7) * 8); - } else { - TCGv shift = tcg_temp_new(); - tcg_gen_andi_i64(shift, cpu_ir[rb], 7); - tcg_gen_shli_i64(shift, shift, 3); - tcg_gen_shl_i64(cpu_ir[rc], tmp, shift); - tcg_temp_free(shift); - } - tcg_temp_free(tmp); + if (islit) { + tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8); + } else { + TCGv shift = tcg_temp_new(); + tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); + tcg_gen_shli_i64(shift, shift, 3); + tcg_gen_shl_i64(vc, tmp, shift); + tcg_temp_free(shift); } + tcg_temp_free(tmp); } /* MSKWH, MSKLH, MSKQH */ -static void gen_msk_h(int ra, int rb, int rc, int islit, +static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, uint8_t lit, uint8_t byte_mask) { - if (unlikely(rc == 31)) - return; - else if (unlikely(ra == 31)) - tcg_gen_movi_i64(cpu_ir[rc], 0); - else if (islit) { - gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~((byte_mask << (lit & 7)) >> 8)); + if (islit) { + gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8)); } else { TCGv shift = tcg_temp_new(); TCGv mask = tcg_temp_new(); @@ -1315,17 +1064,16 @@ static void gen_msk_h(int ra, int rb, int rc, int islit, shift of 64 bits in order to generate a zero. This is done by splitting the shift into two parts, the variable shift - 1 followed by a constant 1 shift. The code we expand below is - equivalent to ~((B & 7) * 8) & 63. */ + equivalent to ~(B * 8) & 63. */ - tcg_gen_andi_i64(shift, cpu_ir[rb], 7); - tcg_gen_shli_i64(shift, shift, 3); + tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3); tcg_gen_not_i64(shift, shift); tcg_gen_andi_i64(shift, shift, 0x3f); tcg_gen_movi_i64(mask, zapnot_mask (byte_mask)); tcg_gen_shr_i64(mask, mask, shift); tcg_gen_shri_i64(mask, mask, 1); - tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask); + tcg_gen_andc_i64(vc, va, mask); tcg_temp_free(mask); tcg_temp_free(shift); @@ -1333,150 +1081,27 @@ static void gen_msk_h(int ra, int rb, int rc, int islit, } /* MSKBL, MSKWL, MSKLL, MSKQL */ -static void gen_msk_l(int ra, int rb, int rc, int islit, +static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit, uint8_t lit, uint8_t byte_mask) { - if (unlikely(rc == 31)) - return; - else if (unlikely(ra == 31)) - tcg_gen_movi_i64(cpu_ir[rc], 0); - else if (islit) { - gen_zapnoti (cpu_ir[rc], cpu_ir[ra], ~(byte_mask << (lit & 7))); + if (islit) { + gen_zapnoti(vc, va, ~(byte_mask << (lit & 7))); } else { TCGv shift = tcg_temp_new(); TCGv mask = tcg_temp_new(); - tcg_gen_andi_i64(shift, cpu_ir[rb], 7); + tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7); tcg_gen_shli_i64(shift, shift, 3); - tcg_gen_movi_i64(mask, zapnot_mask (byte_mask)); + tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); tcg_gen_shl_i64(mask, mask, shift); - tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], mask); + tcg_gen_andc_i64(vc, va, mask); tcg_temp_free(mask); tcg_temp_free(shift); } } -/* Code to call arith3 helpers */ -#define ARITH3(name) \ -static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\ - uint8_t lit) \ -{ \ - if (unlikely(rc == 31)) \ - return; \ - \ - if (ra != 31) { \ - if (islit) { \ - TCGv tmp = tcg_const_i64(lit); \ - gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \ - tcg_temp_free(tmp); \ - } else \ - gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \ - } else { \ - TCGv tmp1 = tcg_const_i64(0); \ - if (islit) { \ - TCGv tmp2 = tcg_const_i64(lit); \ - gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \ - tcg_temp_free(tmp2); \ - } else \ - gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \ - tcg_temp_free(tmp1); \ - } \ -} -ARITH3(cmpbge) -ARITH3(minub8) -ARITH3(minsb8) -ARITH3(minuw4) -ARITH3(minsw4) -ARITH3(maxub8) -ARITH3(maxsb8) -ARITH3(maxuw4) -ARITH3(maxsw4) -ARITH3(perr) - -/* Code to call arith3 helpers */ -#define ARITH3_EX(name) \ - static inline void glue(gen_, name)(int ra, int rb, int rc, \ - int islit, uint8_t lit) \ - { \ - if (unlikely(rc == 31)) { \ - return; \ - } \ - if (ra != 31) { \ - if (islit) { \ - TCGv tmp = tcg_const_i64(lit); \ - gen_helper_ ## name(cpu_ir[rc], cpu_env, \ - cpu_ir[ra], tmp); \ - tcg_temp_free(tmp); \ - } else { \ - gen_helper_ ## name(cpu_ir[rc], cpu_env, \ - cpu_ir[ra], cpu_ir[rb]); \ - } \ - } else { \ - TCGv tmp1 = tcg_const_i64(0); \ - if (islit) { \ - TCGv tmp2 = tcg_const_i64(lit); \ - gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \ - tcg_temp_free(tmp2); \ - } else { \ - gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \ - } \ - tcg_temp_free(tmp1); \ - } \ - } -ARITH3_EX(addlv) -ARITH3_EX(sublv) -ARITH3_EX(addqv) -ARITH3_EX(subqv) -ARITH3_EX(mullv) -ARITH3_EX(mulqv) - -#define MVIOP2(name) \ -static inline void glue(gen_, name)(int rb, int rc) \ -{ \ - if (unlikely(rc == 31)) \ - return; \ - if (unlikely(rb == 31)) \ - tcg_gen_movi_i64(cpu_ir[rc], 0); \ - else \ - gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \ -} -MVIOP2(pklb) -MVIOP2(pkwb) -MVIOP2(unpkbl) -MVIOP2(unpkbw) - -static void gen_cmp(TCGCond cond, int ra, int rb, int rc, - int islit, uint8_t lit) -{ - TCGv va, vb; - - if (unlikely(rc == 31)) { - return; - } - - if (ra == 31) { - va = tcg_const_i64(0); - } else { - va = cpu_ir[ra]; - } - if (islit) { - vb = tcg_const_i64(lit); - } else { - vb = cpu_ir[rb]; - } - - tcg_gen_setcond_i64(cond, cpu_ir[rc], va, vb); - - if (ra == 31) { - tcg_temp_free(va); - } - if (islit) { - tcg_temp_free(vb); - } -} - static void gen_rx(int ra, int set) { TCGv_i32 tmp; @@ -1504,11 +1129,13 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) break; case 0x9E: /* RDUNIQUE */ - tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique); + tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env, + offsetof(CPUAlphaState, unique)); break; case 0x9F: /* WRUNIQUE */ - tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]); + tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, + offsetof(CPUAlphaState, unique)); break; default: palcode &= 0xbf; @@ -1531,15 +1158,18 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) break; case 0x2D: /* WRVPTPTR */ - tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr)); + tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, + offsetof(CPUAlphaState, vptptr)); break; case 0x31: /* WRVAL */ - tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]); + tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, + offsetof(CPUAlphaState, sysval)); break; case 0x32: /* RDVAL */ - tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval); + tcg_gen_ld_i64(cpu_ir[IR_V0], cpu_env, + offsetof(CPUAlphaState, sysval)); break; case 0x35: { @@ -1548,7 +1178,8 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) /* Note that we already know we're in kernel mode, so we know that PS only contains the 3 IPL bits. */ - tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps)); + tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, + offsetof(CPUAlphaState, ps)); /* But make sure and store only the 3 IPL bits from the user. */ tmp = tcg_temp_new(); @@ -1560,15 +1191,18 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) case 0x36: /* RDPS */ - tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps)); + tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, + offsetof(CPUAlphaState, ps)); break; case 0x38: /* WRUSP */ - tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]); + tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, + offsetof(CPUAlphaState, usp)); break; case 0x3A: /* RDUSP */ - tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp); + tcg_gen_st_i64(cpu_ir[IR_V0], cpu_env, + offsetof(CPUAlphaState, usp)); break; case 0x3C: /* WHAMI */ @@ -1648,16 +1282,10 @@ static int cpu_pr_data(int pr) return 0; } -static ExitStatus gen_mfpr(int ra, int regno) +static ExitStatus gen_mfpr(TCGv va, int regno) { int data = cpu_pr_data(regno); - /* In our emulated PALcode, these processor registers have no - side effects from reading. */ - if (ra == 31) { - return NO_EXIT; - } - /* Special help for VMTIME and WALLTIME. */ if (regno == 250 || regno == 249) { void (*helper)(TCGv) = gen_helper_get_walltime; @@ -1666,11 +1294,11 @@ static ExitStatus gen_mfpr(int ra, int regno) } if (use_icount) { gen_io_start(); - helper(cpu_ir[ra]); + helper(va); gen_io_end(); return EXIT_PC_STALE; } else { - helper(cpu_ir[ra]); + helper(va); return NO_EXIT; } } @@ -1678,28 +1306,22 @@ static ExitStatus gen_mfpr(int ra, int regno) /* The basic registers are data only, and unknown registers are read-zero, write-ignore. */ if (data == 0) { - tcg_gen_movi_i64(cpu_ir[ra], 0); + tcg_gen_movi_i64(va, 0); } else if (data & PR_BYTE) { - tcg_gen_ld8u_i64(cpu_ir[ra], cpu_env, data & ~PR_BYTE); + tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE); } else if (data & PR_LONG) { - tcg_gen_ld32s_i64(cpu_ir[ra], cpu_env, data & ~PR_LONG); + tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG); } else { - tcg_gen_ld_i64(cpu_ir[ra], cpu_env, data); + tcg_gen_ld_i64(va, cpu_env, data); } return NO_EXIT; } -static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno) +static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno) { TCGv tmp; int data; - if (rb == 31) { - tmp = tcg_const_i64(0); - } else { - tmp = cpu_ir[rb]; - } - switch (regno) { case 255: /* TBIA */ @@ -1708,7 +1330,7 @@ static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno) case 254: /* TBIS */ - gen_helper_tbis(cpu_env, tmp); + gen_helper_tbis(cpu_env, vb); break; case 253: @@ -1720,17 +1342,17 @@ static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno) case 252: /* HALT */ - gen_helper_halt(tmp); + gen_helper_halt(vb); return EXIT_PC_STALE; case 251: /* ALARM */ - gen_helper_set_alarm(cpu_env, tmp); + gen_helper_set_alarm(cpu_env, vb); break; case 7: /* PALBR */ - tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, palbr)); + tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr)); /* Changing the PAL base register implies un-chaining all of the TBs that ended with a CALL_PAL. Since the base register usually only changes during boot, flushing everything works well. */ @@ -1743,64 +1365,70 @@ static ExitStatus gen_mtpr(DisasContext *ctx, int rb, int regno) data = cpu_pr_data(regno); if (data != 0) { if (data & PR_BYTE) { - tcg_gen_st8_i64(tmp, cpu_env, data & ~PR_BYTE); + tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE); } else if (data & PR_LONG) { - tcg_gen_st32_i64(tmp, cpu_env, data & ~PR_LONG); + tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG); } else { - tcg_gen_st_i64(tmp, cpu_env, data); + tcg_gen_st_i64(vb, cpu_env, data); } } break; } - if (rb == 31) { - tcg_temp_free(tmp); - } - return NO_EXIT; } #endif /* !USER_ONLY*/ +#define REQUIRE_TB_FLAG(FLAG) \ + do { \ + if ((ctx->tb->flags & (FLAG)) == 0) { \ + goto invalid_opc; \ + } \ + } while (0) + +#define REQUIRE_REG_31(WHICH) \ + do { \ + if (WHICH != 31) { \ + goto invalid_opc; \ + } \ + } while (0) + static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) { - uint32_t palcode; - int32_t disp21, disp16; -#ifndef CONFIG_USER_ONLY - int32_t disp12; -#endif + int32_t disp21, disp16, disp12 __attribute__((unused)); uint16_t fn11; - uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit; - uint8_t lit; + uint8_t opc, ra, rb, rc, fpfn, fn7, lit; + bool islit; + TCGv va, vb, vc, tmp; + TCGv_i32 t32; ExitStatus ret; /* Decode all instruction fields */ - opc = insn >> 26; - ra = (insn >> 21) & 0x1F; - rb = (insn >> 16) & 0x1F; - rc = insn & 0x1F; - real_islit = islit = (insn >> 12) & 1; + opc = extract32(insn, 26, 6); + ra = extract32(insn, 21, 5); + rb = extract32(insn, 16, 5); + rc = extract32(insn, 0, 5); + islit = extract32(insn, 12, 1); + lit = extract32(insn, 13, 8); + + disp21 = sextract32(insn, 0, 21); + disp16 = sextract32(insn, 0, 16); + disp12 = sextract32(insn, 0, 12); + + fn11 = extract32(insn, 5, 11); + fpfn = extract32(insn, 5, 6); + fn7 = extract32(insn, 5, 7); + if (rb == 31 && !islit) { - islit = 1; + islit = true; lit = 0; - } else - lit = (insn >> 13) & 0xFF; - palcode = insn & 0x03FFFFFF; - disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11; - disp16 = (int16_t)(insn & 0x0000FFFF); -#ifndef CONFIG_USER_ONLY - disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20; -#endif - fn11 = (insn >> 5) & 0x000007FF; - fpfn = fn11 & 0x3F; - fn7 = (insn >> 5) & 0x0000007F; - LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n", - opc, ra, rb, rc, disp16); + } ret = NO_EXIT; switch (opc) { case 0x00: /* CALL_PAL */ - ret = gen_call_pal(ctx, palcode); + ret = gen_call_pal(ctx, insn & 0x03ffffff); break; case 0x01: /* OPC01 */ @@ -1823,839 +1451,627 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) case 0x07: /* OPC07 */ goto invalid_opc; - case 0x08: - /* LDA */ - if (likely(ra != 31)) { - if (rb != 31) - tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16); - else - tcg_gen_movi_i64(cpu_ir[ra], disp16); - } - break; + case 0x09: /* LDAH */ - if (likely(ra != 31)) { - if (rb != 31) - tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16); - else - tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16); + disp16 = (uint32_t)disp16 << 16; + /* fall through */ + case 0x08: + /* LDA */ + va = dest_gpr(ctx, ra); + /* It's worth special-casing immediate loads. */ + if (rb == 31) { + tcg_gen_movi_i64(va, disp16); + } else { + tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16); } break; + case 0x0A: /* LDBU */ - if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) { - gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); + break; case 0x0B: /* LDQ_U */ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1); break; case 0x0C: /* LDWU */ - if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) { - gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); + break; case 0x0D: /* STW */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0); break; case 0x0E: /* STB */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0); break; case 0x0F: /* STQ_U */ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1); break; + case 0x10: + vc = dest_gpr(ctx, rc); + vb = load_gpr_lit(ctx, rb, lit, islit); + + if (ra == 31) { + if (fn7 == 0x00) { + /* Special case ADDL as SEXTL. */ + tcg_gen_ext32s_i64(vc, vb); + break; + } + if (fn7 == 0x29) { + /* Special case SUBQ as NEGQ. */ + tcg_gen_neg_i64(vc, vb); + break; + } + } + + va = load_gpr(ctx, ra); switch (fn7) { case 0x00: /* ADDL */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) { - tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit); - tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); - } else { - tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); - } - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], lit); - else - tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tcg_gen_add_i64(vc, va, vb); + tcg_gen_ext32s_i64(vc, vc); break; case 0x02: /* S4ADDL */ - if (likely(rc != 31)) { - if (ra != 31) { - TCGv tmp = tcg_temp_new(); - tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); - if (islit) - tcg_gen_addi_i64(tmp, tmp, lit); - else - tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]); - tcg_gen_ext32s_i64(cpu_ir[rc], tmp); - tcg_temp_free(tmp); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], lit); - else - tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_add_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); break; case 0x09: /* SUBL */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) - tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit); - else - tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], -lit); - else { - tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); - tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); - } - } - } + tcg_gen_sub_i64(vc, va, vb); + tcg_gen_ext32s_i64(vc, vc); break; case 0x0B: /* S4SUBL */ - if (likely(rc != 31)) { - if (ra != 31) { - TCGv tmp = tcg_temp_new(); - tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); - if (islit) - tcg_gen_subi_i64(tmp, tmp, lit); - else - tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]); - tcg_gen_ext32s_i64(cpu_ir[rc], tmp); - tcg_temp_free(tmp); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], -lit); - else { - tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); - tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); - } - } - } + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_sub_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); break; case 0x0F: /* CMPBGE */ - gen_cmpbge(ra, rb, rc, islit, lit); + gen_helper_cmpbge(vc, va, vb); break; case 0x12: /* S8ADDL */ - if (likely(rc != 31)) { - if (ra != 31) { - TCGv tmp = tcg_temp_new(); - tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); - if (islit) - tcg_gen_addi_i64(tmp, tmp, lit); - else - tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]); - tcg_gen_ext32s_i64(cpu_ir[rc], tmp); - tcg_temp_free(tmp); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], lit); - else - tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_add_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); break; case 0x1B: /* S8SUBL */ - if (likely(rc != 31)) { - if (ra != 31) { - TCGv tmp = tcg_temp_new(); - tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); - if (islit) - tcg_gen_subi_i64(tmp, tmp, lit); - else - tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]); - tcg_gen_ext32s_i64(cpu_ir[rc], tmp); - tcg_temp_free(tmp); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], -lit); - else { - tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); - tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); - } - } - } + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_sub_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); break; case 0x1D: /* CMPULT */ - gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit); + tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb); break; case 0x20: /* ADDQ */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) - tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit); - else - tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], lit); - else - tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tcg_gen_add_i64(vc, va, vb); break; case 0x22: /* S4ADDQ */ - if (likely(rc != 31)) { - if (ra != 31) { - TCGv tmp = tcg_temp_new(); - tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); - if (islit) - tcg_gen_addi_i64(cpu_ir[rc], tmp, lit); - else - tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]); - tcg_temp_free(tmp); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], lit); - else - tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_add_i64(vc, tmp, vb); + tcg_temp_free(tmp); break; case 0x29: /* SUBQ */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) - tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit); - else - tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], -lit); - else - tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tcg_gen_sub_i64(vc, va, vb); break; case 0x2B: /* S4SUBQ */ - if (likely(rc != 31)) { - if (ra != 31) { - TCGv tmp = tcg_temp_new(); - tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); - if (islit) - tcg_gen_subi_i64(cpu_ir[rc], tmp, lit); - else - tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]); - tcg_temp_free(tmp); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], -lit); - else - tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_sub_i64(vc, tmp, vb); + tcg_temp_free(tmp); break; case 0x2D: /* CMPEQ */ - gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit); + tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb); break; case 0x32: /* S8ADDQ */ - if (likely(rc != 31)) { - if (ra != 31) { - TCGv tmp = tcg_temp_new(); - tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); - if (islit) - tcg_gen_addi_i64(cpu_ir[rc], tmp, lit); - else - tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]); - tcg_temp_free(tmp); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], lit); - else - tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_add_i64(vc, tmp, vb); + tcg_temp_free(tmp); break; case 0x3B: /* S8SUBQ */ - if (likely(rc != 31)) { - if (ra != 31) { - TCGv tmp = tcg_temp_new(); - tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); - if (islit) - tcg_gen_subi_i64(cpu_ir[rc], tmp, lit); - else - tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]); - tcg_temp_free(tmp); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], -lit); - else - tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_sub_i64(vc, tmp, vb); + tcg_temp_free(tmp); break; case 0x3D: /* CMPULE */ - gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit); + tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb); break; case 0x40: /* ADDL/V */ - gen_addlv(ra, rb, rc, islit, lit); + gen_helper_addlv(vc, cpu_env, va, vb); break; case 0x49: /* SUBL/V */ - gen_sublv(ra, rb, rc, islit, lit); + gen_helper_sublv(vc, cpu_env, va, vb); break; case 0x4D: /* CMPLT */ - gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit); + tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb); break; case 0x60: /* ADDQ/V */ - gen_addqv(ra, rb, rc, islit, lit); + gen_helper_addqv(vc, cpu_env, va, vb); break; case 0x69: /* SUBQ/V */ - gen_subqv(ra, rb, rc, islit, lit); + gen_helper_subqv(vc, cpu_env, va, vb); break; case 0x6D: /* CMPLE */ - gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit); + tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb); break; default: goto invalid_opc; } break; + case 0x11: + if (fn7 == 0x20) { + if (rc == 31) { + /* Special case BIS as NOP. */ + break; + } + if (ra == 31) { + /* Special case BIS as MOV. */ + vc = dest_gpr(ctx, rc); + if (islit) { + tcg_gen_movi_i64(vc, lit); + } else { + tcg_gen_mov_i64(vc, load_gpr(ctx, rb)); + } + break; + } + } + + vc = dest_gpr(ctx, rc); + vb = load_gpr_lit(ctx, rb, lit, islit); + + if (fn7 == 0x28 && ra == 31) { + /* Special case ORNOT as NOT. */ + tcg_gen_not_i64(vc, vb); + break; + } + + va = load_gpr(ctx, ra); switch (fn7) { case 0x00: /* AND */ - if (likely(rc != 31)) { - if (ra == 31) - tcg_gen_movi_i64(cpu_ir[rc], 0); - else if (islit) - tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit); - else - tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - } + tcg_gen_and_i64(vc, va, vb); break; case 0x08: /* BIC */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) - tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit); - else - tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - } else - tcg_gen_movi_i64(cpu_ir[rc], 0); - } + tcg_gen_andc_i64(vc, va, vb); break; case 0x14: /* CMOVLBS */ - gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1); + tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, va, 1); + tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx), + vb, load_gpr(ctx, rc)); + tcg_temp_free(tmp); break; case 0x16: /* CMOVLBC */ - gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1); + tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, va, 1); + tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx), + vb, load_gpr(ctx, rc)); + tcg_temp_free(tmp); break; case 0x20: /* BIS */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) - tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit); - else - tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], lit); - else - tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tcg_gen_or_i64(vc, va, vb); break; case 0x24: /* CMOVEQ */ - gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0); + tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); break; case 0x26: /* CMOVNE */ - gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0); + tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); break; case 0x28: /* ORNOT */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) - tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit); - else - tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], ~lit); - else - tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tcg_gen_orc_i64(vc, va, vb); break; case 0x40: /* XOR */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) - tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit); - else - tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], lit); - else - tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tcg_gen_xor_i64(vc, va, vb); break; case 0x44: /* CMOVLT */ - gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0); + tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); break; case 0x46: /* CMOVGE */ - gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0); + tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); break; case 0x48: /* EQV */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) - tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit); - else - tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - } else { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], ~lit); - else - tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]); - } - } + tcg_gen_eqv_i64(vc, va, vb); break; case 0x61: /* AMASK */ - if (likely(rc != 31)) { + REQUIRE_REG_31(ra); + { uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT; - - if (islit) { - tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask); - } else { - tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask); - } + tcg_gen_andi_i64(vc, vb, ~amask); } break; case 0x64: /* CMOVLE */ - gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0); + tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); break; case 0x66: /* CMOVGT */ - gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0); + tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx), + vb, load_gpr(ctx, rc)); break; case 0x6C: /* IMPLVER */ - if (rc != 31) { - tcg_gen_movi_i64(cpu_ir[rc], ctx->implver); - } + REQUIRE_REG_31(ra); + tcg_gen_movi_i64(vc, ctx->implver); break; default: goto invalid_opc; } break; + case 0x12: + vc = dest_gpr(ctx, rc); + va = load_gpr(ctx, ra); switch (fn7) { case 0x02: /* MSKBL */ - gen_msk_l(ra, rb, rc, islit, lit, 0x01); + gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01); break; case 0x06: /* EXTBL */ - gen_ext_l(ra, rb, rc, islit, lit, 0x01); + gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01); break; case 0x0B: /* INSBL */ - gen_ins_l(ra, rb, rc, islit, lit, 0x01); + gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01); break; case 0x12: /* MSKWL */ - gen_msk_l(ra, rb, rc, islit, lit, 0x03); + gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03); break; case 0x16: /* EXTWL */ - gen_ext_l(ra, rb, rc, islit, lit, 0x03); + gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03); break; case 0x1B: /* INSWL */ - gen_ins_l(ra, rb, rc, islit, lit, 0x03); + gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03); break; case 0x22: /* MSKLL */ - gen_msk_l(ra, rb, rc, islit, lit, 0x0f); + gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f); break; case 0x26: /* EXTLL */ - gen_ext_l(ra, rb, rc, islit, lit, 0x0f); + gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f); break; case 0x2B: /* INSLL */ - gen_ins_l(ra, rb, rc, islit, lit, 0x0f); + gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f); break; case 0x30: /* ZAP */ - gen_zap(ra, rb, rc, islit, lit); + if (islit) { + gen_zapnoti(vc, va, ~lit); + } else { + gen_helper_zap(vc, va, load_gpr(ctx, rb)); + } break; case 0x31: /* ZAPNOT */ - gen_zapnot(ra, rb, rc, islit, lit); + if (islit) { + gen_zapnoti(vc, va, lit); + } else { + gen_helper_zapnot(vc, va, load_gpr(ctx, rb)); + } break; case 0x32: /* MSKQL */ - gen_msk_l(ra, rb, rc, islit, lit, 0xff); + gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff); break; case 0x34: /* SRL */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) - tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); - else { - TCGv shift = tcg_temp_new(); - tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); - tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift); - tcg_temp_free(shift); - } - } else - tcg_gen_movi_i64(cpu_ir[rc], 0); + if (islit) { + tcg_gen_shri_i64(vc, va, lit & 0x3f); + } else { + tmp = tcg_temp_new(); + vb = load_gpr(ctx, rb); + tcg_gen_andi_i64(tmp, vb, 0x3f); + tcg_gen_shr_i64(vc, va, tmp); + tcg_temp_free(tmp); } break; case 0x36: /* EXTQL */ - gen_ext_l(ra, rb, rc, islit, lit, 0xff); + gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff); break; case 0x39: /* SLL */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) - tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); - else { - TCGv shift = tcg_temp_new(); - tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); - tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift); - tcg_temp_free(shift); - } - } else - tcg_gen_movi_i64(cpu_ir[rc], 0); + if (islit) { + tcg_gen_shli_i64(vc, va, lit & 0x3f); + } else { + tmp = tcg_temp_new(); + vb = load_gpr(ctx, rb); + tcg_gen_andi_i64(tmp, vb, 0x3f); + tcg_gen_shl_i64(vc, va, tmp); + tcg_temp_free(tmp); } break; case 0x3B: /* INSQL */ - gen_ins_l(ra, rb, rc, islit, lit, 0xff); + gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff); break; case 0x3C: /* SRA */ - if (likely(rc != 31)) { - if (ra != 31) { - if (islit) - tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); - else { - TCGv shift = tcg_temp_new(); - tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); - tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift); - tcg_temp_free(shift); - } - } else - tcg_gen_movi_i64(cpu_ir[rc], 0); + if (islit) { + tcg_gen_sari_i64(vc, va, lit & 0x3f); + } else { + tmp = tcg_temp_new(); + vb = load_gpr(ctx, rb); + tcg_gen_andi_i64(tmp, vb, 0x3f); + tcg_gen_sar_i64(vc, va, tmp); + tcg_temp_free(tmp); } break; case 0x52: /* MSKWH */ - gen_msk_h(ra, rb, rc, islit, lit, 0x03); + gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03); break; case 0x57: /* INSWH */ - gen_ins_h(ra, rb, rc, islit, lit, 0x03); + gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03); break; case 0x5A: /* EXTWH */ - gen_ext_h(ra, rb, rc, islit, lit, 0x03); + gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03); break; case 0x62: /* MSKLH */ - gen_msk_h(ra, rb, rc, islit, lit, 0x0f); + gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f); break; case 0x67: /* INSLH */ - gen_ins_h(ra, rb, rc, islit, lit, 0x0f); + gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f); break; case 0x6A: /* EXTLH */ - gen_ext_h(ra, rb, rc, islit, lit, 0x0f); + gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f); break; case 0x72: /* MSKQH */ - gen_msk_h(ra, rb, rc, islit, lit, 0xff); + gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff); break; case 0x77: /* INSQH */ - gen_ins_h(ra, rb, rc, islit, lit, 0xff); + gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff); break; case 0x7A: /* EXTQH */ - gen_ext_h(ra, rb, rc, islit, lit, 0xff); + gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff); break; default: goto invalid_opc; } break; + case 0x13: + vc = dest_gpr(ctx, rc); + vb = load_gpr_lit(ctx, rb, lit, islit); + va = load_gpr(ctx, ra); switch (fn7) { case 0x00: /* MULL */ - if (likely(rc != 31)) { - if (ra == 31) - tcg_gen_movi_i64(cpu_ir[rc], 0); - else { - if (islit) - tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit); - else - tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); - } - } + tcg_gen_mul_i64(vc, va, vb); + tcg_gen_ext32s_i64(vc, vc); break; case 0x20: /* MULQ */ - if (likely(rc != 31)) { - if (ra == 31) - tcg_gen_movi_i64(cpu_ir[rc], 0); - else if (islit) - tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit); - else - tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - } + tcg_gen_mul_i64(vc, va, vb); break; case 0x30: /* UMULH */ - { - TCGv low; - if (unlikely(rc == 31)){ - break; - } - if (ra == 31) { - tcg_gen_movi_i64(cpu_ir[rc], 0); - break; - } - low = tcg_temp_new(); - if (islit) { - tcg_gen_movi_tl(low, lit); - tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low); - } else { - tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); - } - tcg_temp_free(low); - } + tmp = tcg_temp_new(); + tcg_gen_mulu2_i64(tmp, vc, va, vb); + tcg_temp_free(tmp); break; case 0x40: /* MULL/V */ - gen_mullv(ra, rb, rc, islit, lit); + gen_helper_mullv(vc, cpu_env, va, vb); break; case 0x60: /* MULQ/V */ - gen_mulqv(ra, rb, rc, islit, lit); + gen_helper_mulqv(vc, cpu_env, va, vb); break; default: goto invalid_opc; } break; + case 0x14: + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX); + vc = dest_fpr(ctx, rc); switch (fpfn) { /* fn11 & 0x3F */ case 0x04: /* ITOFS */ - if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { - goto invalid_opc; - } - if (likely(rc != 31)) { - if (ra != 31) { - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]); - gen_helper_memory_to_s(cpu_fir[rc], tmp); - tcg_temp_free_i32(tmp); - } else - tcg_gen_movi_i64(cpu_fir[rc], 0); - } + REQUIRE_REG_31(rb); + t32 = tcg_temp_new_i32(); + va = load_gpr(ctx, ra); + tcg_gen_trunc_i64_i32(t32, va); + gen_helper_memory_to_s(vc, t32); + tcg_temp_free_i32(t32); break; case 0x0A: /* SQRTF */ - if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { - gen_fsqrtf(rb, rc); - break; - } - goto invalid_opc; + REQUIRE_REG_31(ra); + vb = load_fpr(ctx, rb); + gen_helper_sqrtf(vc, cpu_env, vb); + break; case 0x0B: /* SQRTS */ - if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { - gen_fsqrts(ctx, rb, rc, fn11); - break; - } - goto invalid_opc; + REQUIRE_REG_31(ra); + gen_fsqrts(ctx, rb, rc, fn11); + break; case 0x14: /* ITOFF */ - if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { - goto invalid_opc; - } - if (likely(rc != 31)) { - if (ra != 31) { - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]); - gen_helper_memory_to_f(cpu_fir[rc], tmp); - tcg_temp_free_i32(tmp); - } else - tcg_gen_movi_i64(cpu_fir[rc], 0); - } + REQUIRE_REG_31(rb); + t32 = tcg_temp_new_i32(); + va = load_gpr(ctx, ra); + tcg_gen_trunc_i64_i32(t32, va); + gen_helper_memory_to_f(vc, t32); + tcg_temp_free_i32(t32); break; case 0x24: /* ITOFT */ - if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { - goto invalid_opc; - } - if (likely(rc != 31)) { - if (ra != 31) - tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]); - else - tcg_gen_movi_i64(cpu_fir[rc], 0); - } + REQUIRE_REG_31(rb); + va = load_gpr(ctx, ra); + tcg_gen_mov_i64(vc, va); break; case 0x2A: /* SQRTG */ - if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { - gen_fsqrtg(rb, rc); - break; - } - goto invalid_opc; + REQUIRE_REG_31(ra); + vb = load_fpr(ctx, rb); + gen_helper_sqrtg(vc, cpu_env, vb); + break; case 0x02B: /* SQRTT */ - if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) { - gen_fsqrtt(ctx, rb, rc, fn11); - break; - } - goto invalid_opc; + REQUIRE_REG_31(ra); + gen_fsqrtt(ctx, rb, rc, fn11); + break; default: goto invalid_opc; } break; + case 0x15: /* VAX floating point */ /* XXX: rounding mode and trap are ignored (!) */ + vc = dest_fpr(ctx, rc); + vb = load_fpr(ctx, rb); + va = load_fpr(ctx, ra); switch (fpfn) { /* fn11 & 0x3F */ case 0x00: /* ADDF */ - gen_faddf(ra, rb, rc); + gen_helper_addf(vc, cpu_env, va, vb); break; case 0x01: /* SUBF */ - gen_fsubf(ra, rb, rc); + gen_helper_subf(vc, cpu_env, va, vb); break; case 0x02: /* MULF */ - gen_fmulf(ra, rb, rc); + gen_helper_mulf(vc, cpu_env, va, vb); break; case 0x03: /* DIVF */ - gen_fdivf(ra, rb, rc); + gen_helper_divf(vc, cpu_env, va, vb); break; case 0x1E: - /* CVTDG */ -#if 0 // TODO - gen_fcvtdg(rb, rc); -#else + /* CVTDG -- TODO */ + REQUIRE_REG_31(ra); goto invalid_opc; -#endif - break; case 0x20: /* ADDG */ - gen_faddg(ra, rb, rc); + gen_helper_addg(vc, cpu_env, va, vb); break; case 0x21: /* SUBG */ - gen_fsubg(ra, rb, rc); + gen_helper_subg(vc, cpu_env, va, vb); break; case 0x22: /* MULG */ - gen_fmulg(ra, rb, rc); + gen_helper_mulg(vc, cpu_env, va, vb); break; case 0x23: /* DIVG */ - gen_fdivg(ra, rb, rc); + gen_helper_divg(vc, cpu_env, va, vb); break; case 0x25: /* CMPGEQ */ - gen_fcmpgeq(ra, rb, rc); + gen_helper_cmpgeq(vc, cpu_env, va, vb); break; case 0x26: /* CMPGLT */ - gen_fcmpglt(ra, rb, rc); + gen_helper_cmpglt(vc, cpu_env, va, vb); break; case 0x27: /* CMPGLE */ - gen_fcmpgle(ra, rb, rc); + gen_helper_cmpgle(vc, cpu_env, va, vb); break; case 0x2C: /* CVTGF */ - gen_fcvtgf(rb, rc); + REQUIRE_REG_31(ra); + gen_helper_cvtgf(vc, cpu_env, vb); break; case 0x2D: - /* CVTGD */ -#if 0 // TODO - gen_fcvtgd(rb, rc); -#else + /* CVTGD -- TODO */ + REQUIRE_REG_31(ra); goto invalid_opc; -#endif - break; case 0x2F: /* CVTGQ */ - gen_fcvtgq(rb, rc); + REQUIRE_REG_31(ra); + gen_helper_cvtgq(vc, cpu_env, vb); break; case 0x3C: /* CVTQF */ - gen_fcvtqf(rb, rc); + REQUIRE_REG_31(ra); + gen_helper_cvtqf(vc, cpu_env, vb); break; case 0x3E: /* CVTQG */ - gen_fcvtqg(rb, rc); + REQUIRE_REG_31(ra); + gen_helper_cvtqg(vc, cpu_env, vb); break; default: goto invalid_opc; } break; + case 0x16: /* IEEE floating-point */ switch (fpfn) { /* fn11 & 0x3F */ @@ -2708,6 +2124,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) gen_fcmptle(ctx, ra, rb, rc, fn11); break; case 0x2C: + REQUIRE_REG_31(ra); if (fn11 == 0x2AC || fn11 == 0x6AC) { /* CVTST */ gen_fcvtst(ctx, rb, rc, fn11); @@ -2718,104 +2135,122 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0x2F: /* CVTTQ */ + REQUIRE_REG_31(ra); gen_fcvttq(ctx, rb, rc, fn11); break; case 0x3C: /* CVTQS */ + REQUIRE_REG_31(ra); gen_fcvtqs(ctx, rb, rc, fn11); break; case 0x3E: /* CVTQT */ + REQUIRE_REG_31(ra); gen_fcvtqt(ctx, rb, rc, fn11); break; default: goto invalid_opc; } break; + case 0x17: switch (fn11) { case 0x010: /* CVTLQ */ - gen_fcvtlq(rb, rc); + REQUIRE_REG_31(ra); + vc = dest_fpr(ctx, rc); + vb = load_fpr(ctx, rb); + gen_fcvtlq(vc, vb); break; case 0x020: - if (likely(rc != 31)) { + /* CPYS */ + if (rc == 31) { + /* Special case CPYS as FNOP. */ + } else { + vc = dest_fpr(ctx, rc); + va = load_fpr(ctx, ra); if (ra == rb) { - /* FMOV */ - if (ra == 31) - tcg_gen_movi_i64(cpu_fir[rc], 0); - else - tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); + /* Special case CPYS as FMOV. */ + tcg_gen_mov_i64(vc, va); } else { - /* CPYS */ - gen_fcpys(ra, rb, rc); + vb = load_fpr(ctx, rb); + gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL); } } break; case 0x021: /* CPYSN */ - gen_fcpysn(ra, rb, rc); + vc = dest_fpr(ctx, rc); + vb = load_fpr(ctx, rb); + va = load_fpr(ctx, ra); + gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL); break; case 0x022: /* CPYSE */ - gen_fcpyse(ra, rb, rc); + vc = dest_fpr(ctx, rc); + vb = load_fpr(ctx, rb); + va = load_fpr(ctx, ra); + gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL); break; case 0x024: /* MT_FPCR */ - if (likely(ra != 31)) - gen_helper_store_fpcr(cpu_env, cpu_fir[ra]); - else { - TCGv tmp = tcg_const_i64(0); - gen_helper_store_fpcr(cpu_env, tmp); - tcg_temp_free(tmp); - } + va = load_fpr(ctx, ra); + gen_helper_store_fpcr(cpu_env, va); break; case 0x025: /* MF_FPCR */ - if (likely(ra != 31)) - gen_helper_load_fpcr(cpu_fir[ra], cpu_env); + va = dest_fpr(ctx, ra); + gen_helper_load_fpcr(va, cpu_env); break; case 0x02A: /* FCMOVEQ */ - gen_fcmov(TCG_COND_EQ, ra, rb, rc); + gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc); break; case 0x02B: /* FCMOVNE */ - gen_fcmov(TCG_COND_NE, ra, rb, rc); + gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc); break; case 0x02C: /* FCMOVLT */ - gen_fcmov(TCG_COND_LT, ra, rb, rc); + gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc); break; case 0x02D: /* FCMOVGE */ - gen_fcmov(TCG_COND_GE, ra, rb, rc); + gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc); break; case 0x02E: /* FCMOVLE */ - gen_fcmov(TCG_COND_LE, ra, rb, rc); + gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc); break; case 0x02F: /* FCMOVGT */ - gen_fcmov(TCG_COND_GT, ra, rb, rc); + gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc); break; case 0x030: /* CVTQL */ - gen_fcvtql(rb, rc); + REQUIRE_REG_31(ra); + vc = dest_fpr(ctx, rc); + vb = load_fpr(ctx, rb); + gen_fcvtql(vc, vb); break; case 0x130: /* CVTQL/V */ case 0x530: /* CVTQL/SV */ + REQUIRE_REG_31(ra); /* ??? I'm pretty sure there's nothing that /sv needs to do that /v doesn't do. The only thing I can think is that /sv is a valid instruction merely for completeness in the ISA. */ - gen_fcvtql_v(ctx, rb, rc); + vc = dest_fpr(ctx, rc); + vb = load_fpr(ctx, rb); + gen_helper_fcvtql_v_input(cpu_env, vb); + gen_fcvtql(vc, vb); break; default: goto invalid_opc; } break; + case 0x18: switch ((uint16_t)disp16) { case 0x0000: @@ -2844,15 +2279,14 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) break; case 0xC000: /* RPCC */ - if (ra != 31) { - if (use_icount) { - gen_io_start(); - gen_helper_load_pcc(cpu_ir[ra], cpu_env); - gen_io_end(); - ret = EXIT_PC_STALE; - } else { - gen_helper_load_pcc(cpu_ir[ra], cpu_env); - } + va = dest_gpr(ctx, ra); + if (use_icount) { + gen_io_start(); + gen_helper_load_pcc(va, cpu_env); + gen_io_end(); + ret = EXIT_PC_STALE; + } else { + gen_helper_load_pcc(va, cpu_env); } break; case 0xE000: @@ -2874,58 +2308,55 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) goto invalid_opc; } break; + case 0x19: /* HW_MFPR (PALcode) */ #ifndef CONFIG_USER_ONLY - if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { - return gen_mfpr(ra, insn & 0xffff); - } -#endif + REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + va = dest_gpr(ctx, ra); + ret = gen_mfpr(va, insn & 0xffff); + break; +#else goto invalid_opc; +#endif + case 0x1A: /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch prediction stack action, which of course we don't implement. */ - if (rb != 31) { - tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3); - } else { - tcg_gen_movi_i64(cpu_pc, 0); - } + vb = load_gpr(ctx, rb); + tcg_gen_andi_i64(cpu_pc, vb, ~3); if (ra != 31) { tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); } ret = EXIT_PC_UPDATED; break; + case 0x1B: /* HW_LD (PALcode) */ #ifndef CONFIG_USER_ONLY - if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { - TCGv addr; + REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + { + TCGv addr = tcg_temp_new(); + vb = load_gpr(ctx, rb); + va = dest_gpr(ctx, ra); - if (ra == 31) { - break; - } - - addr = tcg_temp_new(); - if (rb != 31) - tcg_gen_addi_i64(addr, cpu_ir[rb], disp12); - else - tcg_gen_movi_i64(addr, disp12); + tcg_gen_addi_i64(addr, vb, disp12); switch ((insn >> 12) & 0xF) { case 0x0: /* Longword physical access (hw_ldl/p) */ - gen_helper_ldl_phys(cpu_ir[ra], cpu_env, addr); + gen_helper_ldl_phys(va, cpu_env, addr); break; case 0x1: /* Quadword physical access (hw_ldq/p) */ - gen_helper_ldq_phys(cpu_ir[ra], cpu_env, addr); + gen_helper_ldq_phys(va, cpu_env, addr); break; case 0x2: /* Longword physical access with lock (hw_ldl_l/p) */ - gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr); + gen_helper_ldl_l_phys(va, cpu_env, addr); break; case 0x3: /* Quadword physical access with lock (hw_ldq_l/p) */ - gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr); + gen_helper_ldq_l_phys(va, cpu_env, addr); break; case 0x4: /* Longword virtual PTE fetch (hw_ldl/v) */ @@ -2948,11 +2379,11 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) goto invalid_opc; case 0xA: /* Longword virtual access with protection check (hw_ldl/w) */ - tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LESL); + tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL); break; case 0xB: /* Quadword virtual access with protection check (hw_ldq/w) */ - tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LEQ); + tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ); break; case 0xC: /* Longword virtual access with alt access mode (hw_ldl/a)*/ @@ -2963,282 +2394,215 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) case 0xE: /* Longword virtual access with alternate access mode and protection checks (hw_ldl/wa) */ - tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LESL); + tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL); break; case 0xF: /* Quadword virtual access with alternate access mode and protection checks (hw_ldq/wa) */ - tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LEQ); + tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ); break; } tcg_temp_free(addr); break; } -#endif +#else goto invalid_opc; +#endif + case 0x1C: + vc = dest_gpr(ctx, rc); + if (fn7 == 0x70) { + /* FTOIT */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX); + REQUIRE_REG_31(rb); + va = load_fpr(ctx, ra); + tcg_gen_mov_i64(vc, va); + break; + } else if (fn7 == 0x78) { + /* FTOIS */ + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX); + REQUIRE_REG_31(rb); + t32 = tcg_temp_new_i32(); + va = load_fpr(ctx, ra); + gen_helper_s_to_memory(t32, va); + tcg_gen_ext_i32_i64(vc, t32); + tcg_temp_free_i32(t32); + break; + } + + vb = load_gpr_lit(ctx, rb, lit, islit); switch (fn7) { case 0x00: /* SEXTB */ - if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) { - goto invalid_opc; - } - if (likely(rc != 31)) { - if (islit) - tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit)); - else - tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]); - } + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + REQUIRE_REG_31(ra); + tcg_gen_ext8s_i64(vc, vb); break; case 0x01: /* SEXTW */ - if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) { - if (likely(rc != 31)) { - if (islit) { - tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit)); - } else { - tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]); - } - } - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX); + REQUIRE_REG_31(ra); + tcg_gen_ext16s_i64(vc, vb); + break; case 0x30: /* CTPOP */ - if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) { - if (likely(rc != 31)) { - if (islit) { - tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit)); - } else { - gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]); - } - } - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX); + REQUIRE_REG_31(ra); + gen_helper_ctpop(vc, vb); + break; case 0x31: /* PERR */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - gen_perr(ra, rb, rc, islit, lit); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_perr(vc, va, vb); + break; case 0x32: /* CTLZ */ - if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) { - if (likely(rc != 31)) { - if (islit) { - tcg_gen_movi_i64(cpu_ir[rc], clz64(lit)); - } else { - gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]); - } - } - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX); + REQUIRE_REG_31(ra); + gen_helper_ctlz(vc, vb); + break; case 0x33: /* CTTZ */ - if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) { - if (likely(rc != 31)) { - if (islit) { - tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit)); - } else { - gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]); - } - } - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX); + REQUIRE_REG_31(ra); + gen_helper_cttz(vc, vb); + break; case 0x34: /* UNPKBW */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - if (real_islit || ra != 31) { - goto invalid_opc; - } - gen_unpkbw(rb, rc); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_REG_31(ra); + gen_helper_unpkbw(vc, vb); + break; case 0x35: /* UNPKBL */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - if (real_islit || ra != 31) { - goto invalid_opc; - } - gen_unpkbl(rb, rc); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_REG_31(ra); + gen_helper_unpkbl(vc, vb); + break; case 0x36: /* PKWB */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - if (real_islit || ra != 31) { - goto invalid_opc; - } - gen_pkwb(rb, rc); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_REG_31(ra); + gen_helper_pkwb(vc, vb); + break; case 0x37: /* PKLB */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - if (real_islit || ra != 31) { - goto invalid_opc; - } - gen_pklb(rb, rc); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + REQUIRE_REG_31(ra); + gen_helper_pklb(vc, vb); + break; case 0x38: /* MINSB8 */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - gen_minsb8(ra, rb, rc, islit, lit); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_minsb8(vc, va, vb); + break; case 0x39: /* MINSW4 */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - gen_minsw4(ra, rb, rc, islit, lit); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_minsw4(vc, va, vb); + break; case 0x3A: /* MINUB8 */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - gen_minub8(ra, rb, rc, islit, lit); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_minub8(vc, va, vb); + break; case 0x3B: /* MINUW4 */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - gen_minuw4(ra, rb, rc, islit, lit); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_minuw4(vc, va, vb); + break; case 0x3C: /* MAXUB8 */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - gen_maxub8(ra, rb, rc, islit, lit); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_maxub8(vc, va, vb); + break; case 0x3D: /* MAXUW4 */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - gen_maxuw4(ra, rb, rc, islit, lit); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_maxuw4(vc, va, vb); + break; case 0x3E: /* MAXSB8 */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - gen_maxsb8(ra, rb, rc, islit, lit); - break; - } - goto invalid_opc; + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_maxsb8(vc, va, vb); + break; case 0x3F: /* MAXSW4 */ - if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) { - gen_maxsw4(ra, rb, rc, islit, lit); - break; - } - goto invalid_opc; - case 0x70: - /* FTOIT */ - if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { - goto invalid_opc; - } - if (likely(rc != 31)) { - if (ra != 31) - tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]); - else - tcg_gen_movi_i64(cpu_ir[rc], 0); - } - break; - case 0x78: - /* FTOIS */ - if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) { - goto invalid_opc; - } - if (rc != 31) { - TCGv_i32 tmp1 = tcg_temp_new_i32(); - if (ra != 31) - gen_helper_s_to_memory(tmp1, cpu_fir[ra]); - else { - TCGv tmp2 = tcg_const_i64(0); - gen_helper_s_to_memory(tmp1, tmp2); - tcg_temp_free(tmp2); - } - tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1); - tcg_temp_free_i32(tmp1); - } + REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI); + va = load_gpr(ctx, ra); + gen_helper_maxsw4(vc, va, vb); break; default: goto invalid_opc; } break; + case 0x1D: /* HW_MTPR (PALcode) */ #ifndef CONFIG_USER_ONLY - if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { - return gen_mtpr(ctx, rb, insn & 0xffff); - } -#endif + REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + vb = load_gpr(ctx, rb); + ret = gen_mtpr(ctx, vb, insn & 0xffff); + break; +#else goto invalid_opc; +#endif + case 0x1E: /* HW_RET (PALcode) */ #ifndef CONFIG_USER_ONLY - if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { - if (rb == 31) { - /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return - address from EXC_ADDR. This turns out to be useful for our - emulation PALcode, so continue to accept it. */ - TCGv tmp = tcg_temp_new(); - tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr)); - gen_helper_hw_ret(cpu_env, tmp); - tcg_temp_free(tmp); - } else { - gen_helper_hw_ret(cpu_env, cpu_ir[rb]); - } - ret = EXIT_PC_UPDATED; - break; + REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + if (rb == 31) { + /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return + address from EXC_ADDR. This turns out to be useful for our + emulation PALcode, so continue to accept it. */ + tmp = tcg_temp_new(); + tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr)); + gen_helper_hw_ret(cpu_env, tmp); + tcg_temp_free(tmp); + } else { + gen_helper_hw_ret(cpu_env, load_gpr(ctx, rb)); } -#endif + ret = EXIT_PC_UPDATED; + break; +#else goto invalid_opc; +#endif + case 0x1F: /* HW_ST (PALcode) */ #ifndef CONFIG_USER_ONLY - if (ctx->tb->flags & TB_FLAGS_PAL_MODE) { - TCGv addr, val; - addr = tcg_temp_new(); - if (rb != 31) - tcg_gen_addi_i64(addr, cpu_ir[rb], disp12); - else - tcg_gen_movi_i64(addr, disp12); - if (ra != 31) - val = cpu_ir[ra]; - else { - val = tcg_temp_new(); - tcg_gen_movi_i64(val, 0); - } + REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); + { + TCGv addr = tcg_temp_new(); + va = load_gpr(ctx, ra); + vb = load_gpr(ctx, rb); + + tcg_gen_addi_i64(addr, vb, disp12); switch ((insn >> 12) & 0xF) { case 0x0: /* Longword physical access */ - gen_helper_stl_phys(cpu_env, addr, val); + gen_helper_stl_phys(cpu_env, addr, va); break; case 0x1: /* Quadword physical access */ - gen_helper_stq_phys(cpu_env, addr, val); + gen_helper_stq_phys(cpu_env, addr, va); break; case 0x2: /* Longword physical access with lock */ - gen_helper_stl_c_phys(val, cpu_env, addr, val); + gen_helper_stl_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va); break; case 0x3: /* Quadword physical access with lock */ - gen_helper_stq_c_phys(val, cpu_env, addr, val); + gen_helper_stq_c_phys(dest_gpr(ctx, ra), cpu_env, addr, va); break; case 0x4: /* Longword virtual access */ @@ -3277,13 +2641,12 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) /* Invalid */ goto invalid_opc; } - if (ra == 31) - tcg_temp_free(val); tcg_temp_free(addr); break; } -#endif +#else goto invalid_opc; +#endif case 0x20: /* LDF */ gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0); @@ -3483,8 +2846,9 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu, tcg_ctx.gen_opc_instr_start[lj] = 1; tcg_ctx.gen_opc_icount[lj] = num_insns; } - if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) + if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) { gen_io_start(); + } insn = cpu_ldl_code(env, ctx.pc); num_insns++; @@ -3492,9 +2856,24 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu, tcg_gen_debug_insn_start(ctx.pc); } + TCGV_UNUSED_I64(ctx.zero); + TCGV_UNUSED_I64(ctx.sink); + TCGV_UNUSED_I64(ctx.lit); + ctx.pc += 4; ret = translate_one(ctxp, insn); + if (!TCGV_IS_UNUSED_I64(ctx.sink)) { + tcg_gen_discard_i64(ctx.sink); + tcg_temp_free(ctx.sink); + } + if (!TCGV_IS_UNUSED_I64(ctx.zero)) { + tcg_temp_free(ctx.zero); + } + if (!TCGV_IS_UNUSED_I64(ctx.lit)) { + tcg_temp_free(ctx.lit); + } + /* If we reach a page boundary, are single stepping, or exhaust instruction count, stop generation. */ if (ret == NO_EXIT diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index f332d41b94..41903a93fb 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -126,6 +126,9 @@ typedef struct CPUS390XState { uint64_t pfault_compare; uint64_t pfault_select; + uint64_t gbea; + uint64_t pp; + CPU_COMMON /* reset does memset(0) up to here */ diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index 56b9af7505..a30d1bc060 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -36,6 +36,7 @@ #include "sysemu/device_tree.h" #include "qapi/qmp/qjson.h" #include "monitor/monitor.h" +#include "trace.h" /* #define DEBUG_KVM */ @@ -128,14 +129,42 @@ void kvm_arch_reset_vcpu(CPUState *cpu) } } +static int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source) +{ + struct kvm_one_reg reg; + int r; + + reg.id = id; + reg.addr = (uint64_t) source; + r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); + if (r) { + trace_kvm_failed_reg_set(id, strerror(errno)); + } + return r; +} + +static int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) +{ + struct kvm_one_reg reg; + int r; + + reg.id = id; + reg.addr = (uint64_t) target; + r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); + if (r) { + trace_kvm_failed_reg_get(id, strerror(errno)); + } + return r; +} + + int kvm_arch_put_registers(CPUState *cs, int level) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; - struct kvm_one_reg reg; struct kvm_sregs sregs; struct kvm_regs regs; - int ret; + int r; int i; /* always save the PSW and the GPRS*/ @@ -151,9 +180,9 @@ int kvm_arch_put_registers(CPUState *cs, int level) for (i = 0; i < 16; i++) { regs.gprs[i] = env->regs[i]; } - ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); - if (ret < 0) { - return ret; + r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); + if (r < 0) { + return r; } } @@ -162,47 +191,29 @@ int kvm_arch_put_registers(CPUState *cs, int level) return 0; } - reg.id = KVM_REG_S390_CPU_TIMER; - reg.addr = (__u64)&(env->cputm); - ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); - if (ret < 0) { - return ret; - } - - reg.id = KVM_REG_S390_CLOCK_COMP; - reg.addr = (__u64)&(env->ckc); - ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); - if (ret < 0) { - return ret; - } - - reg.id = KVM_REG_S390_TODPR; - reg.addr = (__u64)&(env->todpr); - ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); - if (ret < 0) { - return ret; - } + /* + * These ONE_REGS are not protected by a capability. As they are only + * necessary for migration we just trace a possible error, but don't + * return with an error return code. + */ + kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); + kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); + kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); + kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); + kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); if (cap_async_pf) { - reg.id = KVM_REG_S390_PFTOKEN; - reg.addr = (__u64)&(env->pfault_token); - ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); - if (ret < 0) { - return ret; + r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); + if (r < 0) { + return r; } - - reg.id = KVM_REG_S390_PFCOMPARE; - reg.addr = (__u64)&(env->pfault_compare); - ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); - if (ret < 0) { - return ret; + r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); + if (r < 0) { + return r; } - - reg.id = KVM_REG_S390_PFSELECT; - reg.addr = (__u64)&(env->pfault_select); - ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); - if (ret < 0) { - return ret; + r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); + if (r < 0) { + return r; } } @@ -220,9 +231,9 @@ int kvm_arch_put_registers(CPUState *cs, int level) sregs.acrs[i] = env->aregs[i]; sregs.crs[i] = env->cregs[i]; } - ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); - if (ret < 0) { - return ret; + r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); + if (r < 0) { + return r; } } @@ -240,7 +251,6 @@ int kvm_arch_get_registers(CPUState *cs) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; - struct kvm_one_reg reg; struct kvm_sregs sregs; struct kvm_regs regs; int i, r; @@ -288,46 +298,27 @@ int kvm_arch_get_registers(CPUState *cs) env->psa = cs->kvm_run->s.regs.prefix; } - /* One Regs */ - reg.id = KVM_REG_S390_CPU_TIMER; - reg.addr = (__u64)&(env->cputm); - r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); - if (r < 0) { - return r; - } - - reg.id = KVM_REG_S390_CLOCK_COMP; - reg.addr = (__u64)&(env->ckc); - r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); - if (r < 0) { - return r; - } - - reg.id = KVM_REG_S390_TODPR; - reg.addr = (__u64)&(env->todpr); - r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); - if (r < 0) { - return r; - } + /* + * These ONE_REGS are not protected by a capability. As they are only + * necessary for migration we just trace a possible error, but don't + * return with an error return code. + */ + kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); + kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); + kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); + kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); + kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); if (cap_async_pf) { - reg.id = KVM_REG_S390_PFTOKEN; - reg.addr = (__u64)&(env->pfault_token); - r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); + r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); if (r < 0) { return r; } - - reg.id = KVM_REG_S390_PFCOMPARE; - reg.addr = (__u64)&(env->pfault_compare); - r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); + r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); if (r < 0) { return r; } - - reg.id = KVM_REG_S390_PFSELECT; - reg.addr = (__u64)&(env->pfault_select); - r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); + r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); if (r < 0) { return r; } @@ -383,6 +374,26 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) return 0; } +int kvm_arch_insert_hw_breakpoint(target_ulong addr, + target_ulong len, int type) +{ + return -ENOSYS; +} + +int kvm_arch_remove_hw_breakpoint(target_ulong addr, + target_ulong len, int type) +{ + return -ENOSYS; +} + +void kvm_arch_remove_all_hw_breakpoints(void) +{ +} + +void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) +{ +} + void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) { } @@ -844,6 +855,11 @@ static int handle_tsch(S390CPU *cpu) return ret; } +static int kvm_arch_handle_debug_exit(S390CPU *cpu) +{ + return -ENOSYS; +} + int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { S390CPU *cpu = S390_CPU(cs); @@ -859,6 +875,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) case KVM_EXIT_S390_TSCH: ret = handle_tsch(cpu); break; + case KVM_EXIT_DEBUG: + ret = kvm_arch_handle_debug_exit(cpu); + break; default: fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); break; diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030 index 59a34f76f5..8cb61fd7ec 100755 --- a/tests/qemu-iotests/030 +++ b/tests/qemu-iotests/030 @@ -50,15 +50,7 @@ class TestSingleDrive(iotests.QMPTestCase): result = self.vm.qmp('block-stream', device='drive0') self.assert_qmp(result, 'return', {}) - completed = False - while not completed: - for event in self.vm.get_qmp_events(wait=True): - if event['event'] == 'BLOCK_JOB_COMPLETED': - self.assert_qmp(event, 'data/type', 'stream') - self.assert_qmp(event, 'data/device', 'drive0') - self.assert_qmp(event, 'data/offset', self.image_len) - self.assert_qmp(event, 'data/len', self.image_len) - completed = True + self.wait_until_completed() self.assert_no_active_block_jobs() self.vm.shutdown() @@ -89,15 +81,7 @@ class TestSingleDrive(iotests.QMPTestCase): self.assert_qmp(result, 'return', {}) self.vm.resume_drive('drive0') - completed = False - while not completed: - for event in self.vm.get_qmp_events(wait=True): - if event['event'] == 'BLOCK_JOB_COMPLETED': - self.assert_qmp(event, 'data/type', 'stream') - self.assert_qmp(event, 'data/device', 'drive0') - self.assert_qmp(event, 'data/offset', self.image_len) - self.assert_qmp(event, 'data/len', self.image_len) - completed = True + self.wait_until_completed() self.assert_no_active_block_jobs() self.vm.shutdown() @@ -112,15 +96,7 @@ class TestSingleDrive(iotests.QMPTestCase): result = self.vm.qmp('block-stream', device='drive0', base=mid_img) self.assert_qmp(result, 'return', {}) - completed = False - while not completed: - for event in self.vm.get_qmp_events(wait=True): - if event['event'] == 'BLOCK_JOB_COMPLETED': - self.assert_qmp(event, 'data/type', 'stream') - self.assert_qmp(event, 'data/device', 'drive0') - self.assert_qmp(event, 'data/offset', self.image_len) - self.assert_qmp(event, 'data/len', self.image_len) - completed = True + self.wait_until_completed() self.assert_no_active_block_jobs() self.vm.shutdown() @@ -152,15 +128,7 @@ class TestSmallerBackingFile(iotests.QMPTestCase): result = self.vm.qmp('block-stream', device='drive0') self.assert_qmp(result, 'return', {}) - completed = False - while not completed: - for event in self.vm.get_qmp_events(wait=True): - if event['event'] == 'BLOCK_JOB_COMPLETED': - self.assert_qmp(event, 'data/type', 'stream') - self.assert_qmp(event, 'data/device', 'drive0') - self.assert_qmp(event, 'data/offset', self.image_len) - self.assert_qmp(event, 'data/len', self.image_len) - completed = True + self.wait_until_completed() self.assert_no_active_block_jobs() self.vm.shutdown() @@ -442,15 +410,7 @@ class TestSetSpeed(iotests.QMPTestCase): result = self.vm.qmp('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024) self.assert_qmp(result, 'return', {}) - completed = False - while not completed: - for event in self.vm.get_qmp_events(wait=True): - if event['event'] == 'BLOCK_JOB_COMPLETED': - self.assert_qmp(event, 'data/type', 'stream') - self.assert_qmp(event, 'data/device', 'drive0') - self.assert_qmp(event, 'data/offset', self.image_len) - self.assert_qmp(event, 'data/len', self.image_len) - completed = True + self.wait_until_completed() self.assert_no_active_block_jobs() diff --git a/tests/qemu-iotests/056 b/tests/qemu-iotests/056 index 63893423cf..54e4bd0692 100755 --- a/tests/qemu-iotests/056 +++ b/tests/qemu-iotests/056 @@ -57,14 +57,7 @@ class TestSyncModesNoneAndTop(iotests.QMPTestCase): format=iotests.imgfmt, target=target_img) self.assert_qmp(result, 'return', {}) - # Custom completed check as we are not copying all data. - completed = False - while not completed: - for event in self.vm.get_qmp_events(wait=True): - if event['event'] == 'BLOCK_JOB_COMPLETED': - self.assert_qmp(event, 'data/device', 'drive0') - self.assert_qmp_absent(event, 'data/error') - completed = True + self.wait_until_completed(check_offset=False) self.assert_no_active_block_jobs() self.vm.shutdown() diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py index e4fa9af714..f6c437c0c3 100644 --- a/tests/qemu-iotests/iotests.py +++ b/tests/qemu-iotests/iotests.py @@ -257,7 +257,7 @@ class QMPTestCase(unittest.TestCase): self.assert_no_active_block_jobs() return result - def wait_until_completed(self, drive='drive0'): + def wait_until_completed(self, drive='drive0', check_offset=True): '''Wait for a block job to finish, returning the event''' completed = False while not completed: @@ -265,7 +265,8 @@ class QMPTestCase(unittest.TestCase): if event['event'] == 'BLOCK_JOB_COMPLETED': self.assert_qmp(event, 'data/device', drive) self.assert_qmp_absent(event, 'data/error') - self.assert_qmp(event, 'data/offset', self.image_len) + if check_offset: + self.assert_qmp(event, 'data/offset', self.image_len) self.assert_qmp(event, 'data/len', self.image_len) completed = True diff --git a/tests/test-qmp-input-strict.c b/tests/test-qmp-input-strict.c index 38b5e95f68..f03353b755 100644 --- a/tests/test-qmp-input-strict.c +++ b/tests/test-qmp-input-strict.c @@ -153,7 +153,7 @@ static void test_validate_union_flat(TestInputVisitorData *data, /* TODO when generator bug is fixed, add 'integer': 41 */ visit_type_UserDefFlatUnion(v, &tmp, NULL, &errp); - g_assert(!error_is_set(&errp)); + g_assert(!errp); qapi_free_UserDefFlatUnion(tmp); } @@ -167,7 +167,7 @@ static void test_validate_union_anon(TestInputVisitorData *data, v = validate_test_init(data, "42"); visit_type_UserDefAnonUnion(v, &tmp, NULL, &errp); - g_assert(!error_is_set(&errp)); + g_assert(!errp); qapi_free_UserDefAnonUnion(tmp); } @@ -240,7 +240,7 @@ static void test_validate_fail_union_flat(TestInputVisitorData *data, v = validate_test_init(data, "{ 'string': 'c', 'integer': 41, 'boolean': true }"); visit_type_UserDefFlatUnion(v, &tmp, NULL, &errp); - g_assert(error_is_set(&errp)); + g_assert(errp); qapi_free_UserDefFlatUnion(tmp); } @@ -254,7 +254,7 @@ static void test_validate_fail_union_anon(TestInputVisitorData *data, v = validate_test_init(data, "3.14"); visit_type_UserDefAnonUnion(v, &tmp, NULL, &errp); - g_assert(error_is_set(&errp)); + g_assert(errp); qapi_free_UserDefAnonUnion(tmp); } diff --git a/trace-events b/trace-events index 6ecaab2f27..a5218ba393 100644 --- a/trace-events +++ b/trace-events @@ -1243,3 +1243,7 @@ xen_pv_mmio_write(uint64_t addr) "WARNING: write to Xen PV Device MMIO space (ad # hw/pci/pci_host.c pci_cfg_read(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x -> 0x%x" pci_cfg_write(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x <- 0x%x" + +# target-s390/kvm.c +kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s" +kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s" |