summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile2
-rw-r--r--accel/kvm/kvm-all.c13
-rw-r--r--accel/tcg/cpu-exec.c34
-rw-r--r--accel/tcg/tcg-accel-ops-mttcg.c3
-rw-r--r--accel/tcg/tcg-accel-ops-rr.c2
-rw-r--r--accel/tcg/tcg-accel-ops.c8
-rw-r--r--accel/tcg/tcg-accel-ops.h1
-rw-r--r--accel/tcg/tcg-runtime.c6
-rw-r--r--accel/tcg/translate-all.c18
-rw-r--r--block/blkdebug.c3
-rw-r--r--chardev/char-socket.c13
-rw-r--r--chardev/char.c3
-rw-r--r--docs/system/deprecated.rst13
-rw-r--r--gdbstub.c2
-rw-r--r--hw/block/Kconfig3
-rw-r--r--hw/block/meson.build2
-rw-r--r--hw/block/tc58128.c26
-rw-r--r--hw/char/Kconfig3
-rw-r--r--hw/char/meson.build2
-rw-r--r--hw/dma/sparc32_dma.c4
-rw-r--r--hw/i386/x86.c6
-rw-r--r--hw/intc/Kconfig3
-rw-r--r--hw/intc/apic.c6
-rw-r--r--hw/intc/meson.build2
-rw-r--r--hw/m68k/q800.c4
-rw-r--r--hw/mips/jazz.c4
-rw-r--r--hw/pci-host/Kconfig4
-rw-r--r--hw/pci-host/meson.build1
-rw-r--r--hw/pci-host/sh_pci.c (renamed from hw/sh4/sh_pci.c)0
-rw-r--r--hw/scsi/esp-pci.c53
-rw-r--r--hw/scsi/esp.c975
-rw-r--r--hw/scsi/lsi53c895a.c4
-rw-r--r--hw/scsi/scsi-bus.c33
-rw-r--r--hw/scsi/scsi-disk.c47
-rw-r--r--hw/scsi/scsi-generic.c25
-rw-r--r--hw/scsi/trace-events5
-rw-r--r--hw/scsi/virtio-scsi.c46
-rw-r--r--hw/scsi/vmw_pvscsi.c39
-rw-r--r--hw/sh4/Kconfig12
-rw-r--r--hw/sh4/meson.build1
-rw-r--r--hw/sh4/sh7750_regs.h24
-rw-r--r--hw/sparc/sun4m.c2
-rw-r--r--hw/timer/Kconfig4
-rw-r--r--hw/timer/meson.build2
-rw-r--r--include/exec/exec-all.h22
-rw-r--r--include/exec/poison.h2
-rw-r--r--include/exec/tb-lookup.h26
-rw-r--r--include/hw/core/cpu.h2
-rw-r--r--include/hw/elf_ops.h4
-rw-r--r--include/hw/scsi/esp.h52
-rw-r--r--include/hw/scsi/scsi.h5
-rw-r--r--include/hw/sh4/sh.h31
-rw-r--r--include/qemu/config-file.h5
-rw-r--r--include/scsi/utils.h27
-rw-r--r--linux-user/main.c1
-rw-r--r--linux-user/sh4/signal.c8
-rw-r--r--linux-user/syscall.c18
-rw-r--r--meson.build12
-rw-r--r--qemu-options.hx14
-rw-r--r--qga/vss-win32/meson.build4
-rw-r--r--qom/object_interfaces.c9
-rw-r--r--scsi/qemu-pr-helper.c24
-rw-r--r--scsi/utils.c72
-rw-r--r--softmmu/physmem.c2
-rw-r--r--softmmu/vl.c51
-rw-r--r--target/hexagon/macros.h4
-rw-r--r--target/hexagon/opcodes.c1
-rw-r--r--target/i386/kvm/kvm.c9
-rw-r--r--target/sh4/cpu.h11
-rw-r--r--target/sh4/helper.c101
-rw-r--r--tcg/aarch64/tcg-target.c.inc229
-rw-r--r--tcg/tcg.c29
-rw-r--r--tcg/tci.c526
-rw-r--r--tcg/tci/tcg-target.c.inc204
-rw-r--r--tests/fp/meson.build2
-rw-r--r--tests/meson.build8
-rw-r--r--tests/qtest/meson.build15
-rw-r--r--trace/control.c13
-rw-r--r--util/qemu-config.c23
-rw-r--r--util/qemu-option.c6
81 files changed, 1876 insertions, 1165 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 7da2d01671..738786146d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1397,16 +1397,22 @@ R2D
M: Yoshinori Sato <ysato@users.sourceforge.jp>
R: Magnus Damm <magnus.damm@gmail.com>
S: Odd Fixes
+F: hw/char/sh_serial.c
F: hw/sh4/r2d.c
F: hw/intc/sh_intc.c
+F: hw/pci-host/sh_pci.c
+F: hw/timer/sh_timer.c
F: include/hw/sh4/sh_intc.h
Shix
M: Yoshinori Sato <ysato@users.sourceforge.jp>
R: Magnus Damm <magnus.damm@gmail.com>
S: Odd Fixes
+F: hw/block/tc58128.c
+F: hw/char/sh_serial.c
F: hw/sh4/shix.c
F: hw/intc/sh_intc.c
+F: hw/timer/sh_timer.c
F: include/hw/sh4/sh_intc.h
SPARC Machines
diff --git a/Makefile b/Makefile
index d7fb6b270e..bcbbec71a1 100644
--- a/Makefile
+++ b/Makefile
@@ -149,7 +149,7 @@ $(ninja-targets): run-ninja
# --output-sync line.
run-ninja: config-host.mak
ifneq ($(filter $(ninja-targets), $(ninja-cmd-goals)),)
- +$(quiet-@)$(if $(MAKE.nq),@:, $(NINJA) \
+ +$(quiet-@)$(if $(MAKE.nq),@:, $(NINJA) -d keepdepfile \
$(NINJAFLAGS) $(sort $(filter $(ninja-targets), $(ninja-cmd-goals))) | cat)
endif
endif
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 84c943fcdb..f88a52393f 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -598,8 +598,12 @@ static void kvm_memslot_init_dirty_bitmap(KVMSlot *mem)
* too, in most cases).
* So for now, let's align to 64 instead of HOST_LONG_BITS here, in
* a hope that sizeof(long) won't become >8 any time soon.
+ *
+ * Note: the granule of kvm dirty log is qemu_real_host_page_size.
+ * And mem->memory_size is aligned to it (otherwise this mem can't
+ * be registered to KVM).
*/
- hwaddr bitmap_size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
+ hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size,
/*HOST_LONG_BITS*/ 64) / 8;
mem->dirty_bmap = g_malloc0(bitmap_size);
}
@@ -669,6 +673,10 @@ out:
#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
+/*
+ * As the granule of kvm dirty log is qemu_real_host_page_size,
+ * @start and @size are expected and restricted to align to it.
+ */
static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
uint64_t size)
{
@@ -678,6 +686,9 @@ static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
int ret;
+ /* Make sure start and size are qemu_real_host_page_size aligned */
+ assert(QEMU_IS_ALIGNED(start | size, psize));
+
/*
* We need to extend either the start or the size or both to
* satisfy the KVM interface requirement. Firstly, do the start
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 16e4fe3ccd..bdfa036ac8 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -245,11 +245,11 @@ static void cpu_exec_exit(CPUState *cpu)
void cpu_exec_step_atomic(CPUState *cpu)
{
+ CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
- uint32_t cflags = 1;
- uint32_t cf_mask = cflags & CF_HASH_MASK;
+ uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@@ -258,15 +258,15 @@ void cpu_exec_step_atomic(CPUState *cpu)
g_assert(!cpu->running);
cpu->running = true;
- tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
+
if (tb == NULL) {
mmap_lock();
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
}
- /* Since we got here, we know that parallel_cpus must be true. */
- parallel_cpus = false;
cpu_exec_enter(cpu);
/* execute the generated code */
trace_exec_tb(tb, pc);
@@ -294,7 +294,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
* the execution.
*/
g_assert(cpu_in_exclusive_context(cpu));
- parallel_cpus = true;
cpu->running = false;
end_exclusive();
}
@@ -305,7 +304,7 @@ struct tb_desc {
CPUArchState *env;
tb_page_addr_t phys_page1;
uint32_t flags;
- uint32_t cf_mask;
+ uint32_t cflags;
uint32_t trace_vcpu_dstate;
};
@@ -319,7 +318,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
- (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
+ tb_cflags(tb) == desc->cflags) {
/* check next page if needed */
if (tb->page_addr[1] == -1) {
return true;
@@ -339,7 +338,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
- uint32_t cf_mask)
+ uint32_t cflags)
{
tb_page_addr_t phys_pc;
struct tb_desc desc;
@@ -348,7 +347,7 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
desc.env = (CPUArchState *)cpu->env_ptr;
desc.cs_base = cs_base;
desc.flags = flags;
- desc.cf_mask = cf_mask;
+ desc.cflags = cflags;
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
@@ -356,7 +355,7 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
return NULL;
}
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
- h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
+ h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
@@ -416,16 +415,19 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
static inline TranslationBlock *tb_find(CPUState *cpu,
TranslationBlock *last_tb,
- int tb_exit, uint32_t cf_mask)
+ int tb_exit, uint32_t cflags)
{
+ CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
- tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
+ tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
mmap_unlock();
/* We add the TB in the virtual pc hash table for the fast lookup */
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
@@ -491,7 +493,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
if (replay_has_exception()
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
/* Execute just one insn to trigger exception pending in the log */
- cpu->cflags_next_tb = (curr_cflags() & ~CF_USE_ICOUNT) | 1;
+ cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1;
}
#endif
return false;
@@ -788,7 +790,7 @@ int cpu_exec(CPUState *cpu)
have CF_INVALID set, -1 is a convenient invalid value that
does not require tcg headers for cpu_common_reset. */
if (cflags == -1) {
- cflags = curr_cflags();
+ cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
index 42973fb062..847d2079d2 100644
--- a/accel/tcg/tcg-accel-ops-mttcg.c
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
@@ -114,8 +114,7 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
char thread_name[VCPU_THREAD_NAME_SIZE];
g_assert(tcg_enabled());
-
- parallel_cpus = (current_machine->smp.max_cpus > 1);
+ tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
cpu->thread = g_malloc0(sizeof(QemuThread));
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index 4a66055e0d..018b54c508 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -269,7 +269,7 @@ void rr_start_vcpu_thread(CPUState *cpu)
static QemuThread *single_tcg_cpu_thread;
g_assert(tcg_enabled());
- parallel_cpus = false;
+ tcg_cpu_init_cflags(cpu, false);
if (!single_tcg_cpu_thread) {
cpu->thread = g_malloc0(sizeof(QemuThread));
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
index 6144d9df87..6cdcaa2855 100644
--- a/accel/tcg/tcg-accel-ops.c
+++ b/accel/tcg/tcg-accel-ops.c
@@ -41,6 +41,14 @@
/* common functionality among all TCG variants */
+void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
+{
+ uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
+ cflags |= parallel ? CF_PARALLEL : 0;
+ cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
+ cpu->tcg_cflags = cflags;
+}
+
void tcg_cpus_destroy(CPUState *cpu)
{
cpu_thread_signal_destroyed(cpu);
diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h
index 48130006de..6a5fcef889 100644
--- a/accel/tcg/tcg-accel-ops.h
+++ b/accel/tcg/tcg-accel-ops.h
@@ -17,5 +17,6 @@
void tcg_cpus_destroy(CPUState *cpu);
int tcg_cpus_exec(CPUState *cpu);
void tcg_handle_interrupt(CPUState *cpu, int mask);
+void tcg_cpu_init_cflags(CPUState *cpu, bool parallel);
#endif /* TCG_CPUS_H */
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
index d736f4ff55..49f5de37e8 100644
--- a/accel/tcg/tcg-runtime.c
+++ b/accel/tcg/tcg-runtime.c
@@ -27,10 +27,10 @@
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
#include "exec/exec-all.h"
-#include "exec/tb-lookup.h"
#include "disas/disas.h"
#include "exec/log.h"
#include "tcg/tcg.h"
+#include "exec/tb-lookup.h"
/* 32-bit helpers */
@@ -152,7 +152,9 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
target_ulong cs_base, pc;
uint32_t flags;
- tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, curr_cflags());
+ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+
+ tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu));
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index bbd919a393..f32df8b240 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -224,7 +224,6 @@ static void *l1_map[V_L1_MAX_SIZE];
TCGContext tcg_init_ctx;
__thread TCGContext *tcg_ctx;
TBContext tb_ctx;
-bool parallel_cpus;
static void page_table_config_init(void)
{
@@ -1311,7 +1310,7 @@ static bool tb_cmp(const void *ap, const void *bp)
return a->pc == b->pc &&
a->cs_base == b->cs_base &&
a->flags == b->flags &&
- (tb_cflags(a) & CF_HASH_MASK) == (tb_cflags(b) & CF_HASH_MASK) &&
+ (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
a->page_addr[0] == b->page_addr[0] &&
a->page_addr[1] == b->page_addr[1];
@@ -1616,6 +1615,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
PageDesc *p;
uint32_t h;
tb_page_addr_t phys_pc;
+ uint32_t orig_cflags = tb_cflags(tb);
assert_memory_lock();
@@ -1626,7 +1626,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb_cflags(tb) & CF_HASH_MASK,
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) {
return;
@@ -1793,6 +1793,7 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
uint32_t h;
assert_memory_lock();
+ tcg_debug_assert(!(tb->cflags & CF_INVALID));
/*
* Add the TB to the page list, acquiring first the pages's locks.
@@ -1811,7 +1812,7 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
}
/* add in the hash table */
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK,
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
tb->trace_vcpu_dstate);
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
@@ -1865,9 +1866,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
cflags = (cflags & ~CF_COUNT_MASK) | 1;
}
- cflags &= ~CF_CLUSTER_MASK;
- cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT;
-
max_insns = cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
@@ -2194,7 +2192,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
if (current_tb_modified) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags();
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
@@ -2362,7 +2360,7 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags();
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
return true;
}
#endif
@@ -2438,7 +2436,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
* operations only (which execute after completion) so we don't
* double instrument the instruction.
*/
- cpu->cflags_next_tb = curr_cflags() | CF_MEMI_ONLY | CF_LAST_IO | n;
+ cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"cpu_io_recompile: rewound execution of TB to "
diff --git a/block/blkdebug.c b/block/blkdebug.c
index 5fe6172da9..7eaa8a28bf 100644
--- a/block/blkdebug.c
+++ b/block/blkdebug.c
@@ -279,9 +279,8 @@ static int read_config(BDRVBlkdebugState *s, const char *filename,
return -errno;
}
- ret = qemu_config_parse(f, config_groups, filename);
+ ret = qemu_config_parse(f, config_groups, filename, errp);
if (ret < 0) {
- error_setg(errp, "Could not parse blkdebug config file");
goto fail;
}
}
diff --git a/chardev/char-socket.c b/chardev/char-socket.c
index 06a37c0cc8..c8bced76b7 100644
--- a/chardev/char-socket.c
+++ b/chardev/char-socket.c
@@ -1472,8 +1472,17 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend,
sock = backend->u.socket.data = g_new0(ChardevSocket, 1);
qemu_chr_parse_common(opts, qapi_ChardevSocket_base(sock));
- sock->has_nodelay = qemu_opt_get(opts, "delay");
- sock->nodelay = !qemu_opt_get_bool(opts, "delay", true);
+ if (qemu_opt_get(opts, "delay") && qemu_opt_get(opts, "nodelay")) {
+ error_setg(errp, "'delay' and 'nodelay' are mutually exclusive");
+ return;
+ }
+ sock->has_nodelay =
+ qemu_opt_get(opts, "delay") ||
+ qemu_opt_get(opts, "nodelay");
+ sock->nodelay =
+ !qemu_opt_get_bool(opts, "delay", true) ||
+ qemu_opt_get_bool(opts, "nodelay", false);
+
/*
* We have different default to QMP for 'server', hence
* we can't just check for existence of 'server'
diff --git a/chardev/char.c b/chardev/char.c
index 288efebd12..97cafd6849 100644
--- a/chardev/char.c
+++ b/chardev/char.c
@@ -868,6 +868,9 @@ QemuOptsList qemu_chardev_opts = {
.name = "delay",
.type = QEMU_OPT_BOOL,
},{
+ .name = "nodelay",
+ .type = QEMU_OPT_BOOL,
+ },{
.name = "reconnect",
.type = QEMU_OPT_NUMBER,
},{
diff --git a/docs/system/deprecated.rst b/docs/system/deprecated.rst
index 561c916da2..cfabe69846 100644
--- a/docs/system/deprecated.rst
+++ b/docs/system/deprecated.rst
@@ -134,6 +134,12 @@ Boolean options such as ``share=on``/``share=off`` could be written
in short form as ``share`` and ``noshare``. This is now deprecated
and will cause a warning.
+``delay`` option for socket character devices (since 6.0)
+'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
+
+The replacement for the ``nodelay`` short-form boolean option is ``nodelay=on``
+rather than ``delay=off``.
+
``--enable-fips`` (since 6.0)
'''''''''''''''''''''''''''''
@@ -153,6 +159,13 @@ The ``-writeconfig`` option is not able to serialize the entire contents
of the QEMU command line. It is thus considered a failed experiment
and deprecated, with no current replacement.
+Userspace local APIC with KVM (x86, since 6.0)
+''''''''''''''''''''''''''''''''''''''''''''''
+
+Using ``-M kernel-irqchip=off`` with x86 machine types that include a local
+APIC is deprecated. The ``split`` setting is supported, as is using
+``-M kernel-irqchip=off`` with the ISA PC machine type.
+
QEMU Machine Protocol (QMP) commands
------------------------------------
diff --git a/gdbstub.c b/gdbstub.c
index 3ee40479b6..16d7c8f534 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -3505,7 +3505,7 @@ int gdbserver_start(const char *device)
if (strstart(device, "tcp:", NULL)) {
/* enforce required TCP attributes */
snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
- "%s,wait=off,delay=off,server=on", device);
+ "%s,wait=off,nodelay=on,server=on", device);
device = gdbstub_device_name;
}
#ifndef _WIN32
diff --git a/hw/block/Kconfig b/hw/block/Kconfig
index 2d17f481ad..4fcd152166 100644
--- a/hw/block/Kconfig
+++ b/hw/block/Kconfig
@@ -22,6 +22,9 @@ config ECC
config ONENAND
bool
+config TC58128
+ bool
+
config NVME_PCI
bool
default y if PCI_DEVICES
diff --git a/hw/block/meson.build b/hw/block/meson.build
index 602ca6c854..4bf994c64f 100644
--- a/hw/block/meson.build
+++ b/hw/block/meson.build
@@ -12,7 +12,7 @@ softmmu_ss.add(when: 'CONFIG_PFLASH_CFI02', if_true: files('pflash_cfi02.c'))
softmmu_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80.c'))
softmmu_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c'))
softmmu_ss.add(when: 'CONFIG_XEN', if_true: files('xen-block.c'))
-softmmu_ss.add(when: 'CONFIG_SH4', if_true: files('tc58128.c'))
+softmmu_ss.add(when: 'CONFIG_TC58128', if_true: files('tc58128.c'))
softmmu_ss.add(when: 'CONFIG_NVME_PCI', if_true: files('nvme.c', 'nvme-ns.c'))
specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c'))
diff --git a/hw/block/tc58128.c b/hw/block/tc58128.c
index 9888f01ac6..bfc27ad899 100644
--- a/hw/block/tc58128.c
+++ b/hw/block/tc58128.c
@@ -1,3 +1,29 @@
+/*
+ * TC58128 NAND EEPROM emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * SPDX-License-Identifier: MIT
+ */
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "hw/sh4/sh.h"
diff --git a/hw/char/Kconfig b/hw/char/Kconfig
index 939bc44758..f6f4fffd1b 100644
--- a/hw/char/Kconfig
+++ b/hw/char/Kconfig
@@ -50,6 +50,9 @@ config SCLPCONSOLE
config TERMINAL3270
bool
+config SH_SCI
+ bool
+
config RENESAS_SCI
bool
diff --git a/hw/char/meson.build b/hw/char/meson.build
index 196ac91fa2..afe9a0af88 100644
--- a/hw/char/meson.build
+++ b/hw/char/meson.build
@@ -31,7 +31,7 @@ softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_uart.c'))
softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_aux.c'))
softmmu_ss.add(when: 'CONFIG_RENESAS_SCI', if_true: files('renesas_sci.c'))
softmmu_ss.add(when: 'CONFIG_SIFIVE_UART', if_true: files('sifive_uart.c'))
-softmmu_ss.add(when: 'CONFIG_SH4', if_true: files('sh_serial.c'))
+softmmu_ss.add(when: 'CONFIG_SH_SCI', if_true: files('sh_serial.c'))
softmmu_ss.add(when: 'CONFIG_STM32F2XX_USART', if_true: files('stm32f2xx_usart.c'))
softmmu_ss.add(when: 'CONFIG_MCHP_PFSOC_MMUART', if_true: files('mchp_pfsoc_mmuart.c'))
diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c
index b643b413c5..03bc500878 100644
--- a/hw/dma/sparc32_dma.c
+++ b/hw/dma/sparc32_dma.c
@@ -295,13 +295,13 @@ static void sparc32_espdma_device_init(Object *obj)
memory_region_init_io(&s->iomem, OBJECT(s), &dma_mem_ops, s,
"espdma-mmio", DMA_SIZE);
- object_initialize_child(obj, "esp", &es->esp, TYPE_ESP);
+ object_initialize_child(obj, "esp", &es->esp, TYPE_SYSBUS_ESP);
}
static void sparc32_espdma_device_realize(DeviceState *dev, Error **errp)
{
ESPDMADeviceState *es = SPARC32_ESPDMA_DEVICE(dev);
- SysBusESPState *sysbus = ESP(&es->esp);
+ SysBusESPState *sysbus = SYSBUS_ESP(&es->esp);
ESPState *esp = &sysbus->esp;
esp->dma_memory_read = espdma_memory_read;
diff --git a/hw/i386/x86.c b/hw/i386/x86.c
index 6329f90ef9..7865660e2c 100644
--- a/hw/i386/x86.c
+++ b/hw/i386/x86.c
@@ -690,6 +690,8 @@ static uint64_t read_pvh_start_addr(void *arg1, void *arg2, bool is64)
elf_note_data_addr =
((void *)nhdr64) + nhdr_size64 +
QEMU_ALIGN_UP(nhdr_namesz, phdr_align);
+
+ pvh_start_addr = *elf_note_data_addr;
} else {
struct elf32_note *nhdr32 = (struct elf32_note *)arg1;
uint32_t nhdr_size32 = sizeof(struct elf32_note);
@@ -699,9 +701,9 @@ static uint64_t read_pvh_start_addr(void *arg1, void *arg2, bool is64)
elf_note_data_addr =
((void *)nhdr32) + nhdr_size32 +
QEMU_ALIGN_UP(nhdr_namesz, phdr_align);
- }
- pvh_start_addr = *elf_note_data_addr;
+ pvh_start_addr = *(uint32_t *)elf_note_data_addr;
+ }
return pvh_start_addr;
}
diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig
index c18d11142a..66bf0b90b4 100644
--- a/hw/intc/Kconfig
+++ b/hw/intc/Kconfig
@@ -53,6 +53,9 @@ config OMPIC
config PPC_UIC
bool
+config SH_INTC
+ bool
+
config RX_ICU
bool
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index 3ada22f427..f4f50f974e 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -25,6 +25,7 @@
#include "hw/intc/i8259.h"
#include "hw/pci/msi.h"
#include "qemu/host-utils.h"
+#include "sysemu/kvm.h"
#include "trace.h"
#include "hw/i386/apic-msidef.h"
#include "qapi/error.h"
@@ -875,6 +876,11 @@ static void apic_realize(DeviceState *dev, Error **errp)
return;
}
+ if (kvm_enabled()) {
+ warn_report("Userspace local APIC is deprecated for KVM.");
+ warn_report("Do not use kernel-irqchip except for the -M isapc machine type.");
+ }
+
memory_region_init_io(&s->io_memory, OBJECT(s), &apic_io_ops, s, "apic-msi",
APIC_SPACE_SIZE);
diff --git a/hw/intc/meson.build b/hw/intc/meson.build
index 53cba11569..b3d9345a0d 100644
--- a/hw/intc/meson.build
+++ b/hw/intc/meson.build
@@ -47,7 +47,7 @@ specific_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_ic.c', 'bcm2836_co
specific_ss.add(when: 'CONFIG_RX_ICU', if_true: files('rx_icu.c'))
specific_ss.add(when: 'CONFIG_S390_FLIC', if_true: files('s390_flic.c'))
specific_ss.add(when: 'CONFIG_S390_FLIC_KVM', if_true: files('s390_flic_kvm.c'))
-specific_ss.add(when: 'CONFIG_SH4', if_true: files('sh_intc.c'))
+specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c'))
specific_ss.add(when: 'CONFIG_SIFIVE_CLINT', if_true: files('sifive_clint.c'))
specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c'))
specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c'))
diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c
index d4eca46767..4d2e866eec 100644
--- a/hw/m68k/q800.c
+++ b/hw/m68k/q800.c
@@ -350,8 +350,8 @@ static void q800_init(MachineState *machine)
/* SCSI */
- dev = qdev_new(TYPE_ESP);
- sysbus_esp = ESP(dev);
+ dev = qdev_new(TYPE_SYSBUS_ESP);
+ sysbus_esp = SYSBUS_ESP(dev);
esp = &sysbus_esp->esp;
esp->dma_memory_read = NULL;
esp->dma_memory_write = NULL;
diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c
index 83c8086062..1a0888a0fd 100644
--- a/hw/mips/jazz.c
+++ b/hw/mips/jazz.c
@@ -328,8 +328,8 @@ static void mips_jazz_init(MachineState *machine,
}
/* SCSI adapter */
- dev = qdev_new(TYPE_ESP);
- sysbus_esp = ESP(dev);
+ dev = qdev_new(TYPE_SYSBUS_ESP);
+ sysbus_esp = SYSBUS_ESP(dev);
esp = &sysbus_esp->esp;
esp->dma_memory_read = rc4030_dma_read;
esp->dma_memory_write = rc4030_dma_write;
diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig
index 8b8c763c28..2ccc96f02c 100644
--- a/hw/pci-host/Kconfig
+++ b/hw/pci-host/Kconfig
@@ -68,3 +68,7 @@ config PCI_POWERNV
config REMOTE_PCIHOST
bool
+
+config SH_PCI
+ bool
+ select PCI
diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build
index 1847c69905..87a896973e 100644
--- a/hw/pci-host/meson.build
+++ b/hw/pci-host/meson.build
@@ -10,6 +10,7 @@ pci_ss.add(when: 'CONFIG_PCI_I440FX', if_true: files('i440fx.c'))
pci_ss.add(when: 'CONFIG_PCI_SABRE', if_true: files('sabre.c'))
pci_ss.add(when: 'CONFIG_XEN_IGD_PASSTHROUGH', if_true: files('xen_igd_pt.c'))
pci_ss.add(when: 'CONFIG_REMOTE_PCIHOST', if_true: files('remote.c'))
+pci_ss.add(when: 'CONFIG_SH_PCI', if_true: files('sh_pci.c'))
# PPC devices
pci_ss.add(when: 'CONFIG_PREP_PCI', if_true: files('prep.c'))
diff --git a/hw/sh4/sh_pci.c b/hw/pci-host/sh_pci.c
index 734892f47c..734892f47c 100644
--- a/hw/sh4/sh_pci.c
+++ b/hw/pci-host/sh_pci.c
diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c
index 4d7c2cab56..c3d3dab05e 100644
--- a/hw/scsi/esp-pci.c
+++ b/hw/scsi/esp-pci.c
@@ -79,8 +79,10 @@ struct PCIESPState {
static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val)
{
+ ESPState *s = ESP(&pci->esp);
+
trace_esp_pci_dma_idle(val);
- esp_dma_enable(&pci->esp, 0, 0);
+ esp_dma_enable(s, 0, 0);
}
static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
@@ -91,14 +93,18 @@ static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val)
{
+ ESPState *s = ESP(&pci->esp);
+
trace_esp_pci_dma_abort(val);
- if (pci->esp.current_req) {
- scsi_req_cancel(pci->esp.current_req);
+ if (s->current_req) {
+ scsi_req_cancel(s->current_req);
}
}
static void esp_pci_handle_start(PCIESPState *pci, uint32_t val)
{
+ ESPState *s = ESP(&pci->esp);
+
trace_esp_pci_dma_start(val);
pci->dma_regs[DMA_WBC] = pci->dma_regs[DMA_STC];
@@ -109,7 +115,7 @@ static void esp_pci_handle_start(PCIESPState *pci, uint32_t val)
| DMA_STAT_DONE | DMA_STAT_ABORT
| DMA_STAT_ERROR | DMA_STAT_PWDN);
- esp_dma_enable(&pci->esp, 0, 1);
+ esp_dma_enable(s, 0, 1);
}
static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
@@ -155,11 +161,12 @@ static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr)
{
+ ESPState *s = ESP(&pci->esp);
uint32_t val;
val = pci->dma_regs[saddr];
if (saddr == DMA_STAT) {
- if (pci->esp.rregs[ESP_RSTAT] & STAT_INT) {
+ if (s->rregs[ESP_RSTAT] & STAT_INT) {
val |= DMA_STAT_SCSIINT;
}
if (!(pci->sbac & SBAC_STATUS)) {
@@ -176,6 +183,7 @@ static void esp_pci_io_write(void *opaque, hwaddr addr,
uint64_t val, unsigned int size)
{
PCIESPState *pci = opaque;
+ ESPState *s = ESP(&pci->esp);
if (size < 4 || addr & 3) {
/* need to upgrade request: we only support 4-bytes accesses */
@@ -183,7 +191,7 @@ static void esp_pci_io_write(void *opaque, hwaddr addr,
int shift;
if (addr < 0x40) {
- current = pci->esp.wregs[addr >> 2];
+ current = s->wregs[addr >> 2];
} else if (addr < 0x60) {
current = pci->dma_regs[(addr - 0x40) >> 2];
} else if (addr < 0x74) {
@@ -203,7 +211,7 @@ static void esp_pci_io_write(void *opaque, hwaddr addr,
if (addr < 0x40) {
/* SCSI core reg */
- esp_reg_write(&pci->esp, addr >> 2, val);
+ esp_reg_write(s, addr >> 2, val);
} else if (addr < 0x60) {
/* PCI DMA CCB */
esp_pci_dma_write(pci, (addr - 0x40) >> 2, val);
@@ -220,11 +228,12 @@ static uint64_t esp_pci_io_read(void *opaque, hwaddr addr,
unsigned int size)
{
PCIESPState *pci = opaque;
+ ESPState *s = ESP(&pci->esp);
uint32_t ret;
if (addr < 0x40) {
/* SCSI core reg */
- ret = esp_reg_read(&pci->esp, addr >> 2);
+ ret = esp_reg_read(s, addr >> 2);
} else if (addr < 0x60) {
/* PCI DMA CCB */
ret = esp_pci_dma_read(pci, (addr - 0x40) >> 2);
@@ -306,7 +315,9 @@ static const MemoryRegionOps esp_pci_io_ops = {
static void esp_pci_hard_reset(DeviceState *dev)
{
PCIESPState *pci = PCI_ESP(dev);
- esp_hard_reset(&pci->esp);
+ ESPState *s = ESP(&pci->esp);
+
+ esp_hard_reset(s);
pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P
| DMA_CMD_MDL | DMA_CMD_DIAG | DMA_CMD_MASK);
pci->dma_regs[DMA_WBC] &= ~0xffff;
@@ -319,11 +330,12 @@ static void esp_pci_hard_reset(DeviceState *dev)
static const VMStateDescription vmstate_esp_pci_scsi = {
.name = "pciespscsi",
- .version_id = 1,
+ .version_id = 2,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(parent_obj, PCIESPState),
VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)),
+ VMSTATE_UINT8_V(esp.mig_version_id, PCIESPState, 2),
VMSTATE_STRUCT(esp, PCIESPState, 0, vmstate_esp, ESPState),
VMSTATE_END_OF_LIST()
}
@@ -353,9 +365,13 @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp)
{
PCIESPState *pci = PCI_ESP(dev);
DeviceState *d = DEVICE(dev);
- ESPState *s = &pci->esp;
+ ESPState *s = ESP(&pci->esp);
uint8_t *pci_conf;
+ if (!qdev_realize(DEVICE(s), NULL, errp)) {
+ return;
+ }
+
pci_conf = dev->config;
/* Interrupt pin A */
@@ -374,11 +390,19 @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp)
scsi_bus_new(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info, NULL);
}
-static void esp_pci_scsi_uninit(PCIDevice *d)
+static void esp_pci_scsi_exit(PCIDevice *d)
{
PCIESPState *pci = PCI_ESP(d);
+ ESPState *s = ESP(&pci->esp);
+
+ qemu_free_irq(s->irq);
+}
+
+static void esp_pci_init(Object *obj)
+{
+ PCIESPState *pci = PCI_ESP(obj);
- qemu_free_irq(pci->esp.irq);
+ object_initialize_child(obj, "esp", &pci->esp, TYPE_ESP);
}
static void esp_pci_class_init(ObjectClass *klass, void *data)
@@ -387,7 +411,7 @@ static void esp_pci_class_init(ObjectClass *klass, void *data)
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->realize = esp_pci_scsi_realize;
- k->exit = esp_pci_scsi_uninit;
+ k->exit = esp_pci_scsi_exit;
k->vendor_id = PCI_VENDOR_ID_AMD;
k->device_id = PCI_DEVICE_ID_AMD_SCSI;
k->revision = 0x10;
@@ -401,6 +425,7 @@ static void esp_pci_class_init(ObjectClass *klass, void *data)
static const TypeInfo esp_pci_info = {
.name = TYPE_AM53C974_DEVICE,
.parent = TYPE_PCI_DEVICE,
+ .instance_init = esp_pci_init,
.instance_size = sizeof(PCIESPState),
.class_init = esp_pci_class_init,
.interfaces = (InterfaceInfo[]) {
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
index 93d9c9c7b9..507ab363bc 100644
--- a/hw/scsi/esp.c
+++ b/hw/scsi/esp.c
@@ -63,11 +63,13 @@ static void esp_lower_irq(ESPState *s)
static void esp_raise_drq(ESPState *s)
{
qemu_irq_raise(s->irq_data);
+ trace_esp_raise_drq();
}
static void esp_lower_drq(ESPState *s)
{
qemu_irq_lower(s->irq_data);
+ trace_esp_lower_drq();
}
void esp_dma_enable(ESPState *s, int irq, int level)
@@ -96,39 +98,112 @@ void esp_request_cancelled(SCSIRequest *req)
}
}
-static void set_pdma(ESPState *s, enum pdma_origin_id origin,
- uint32_t index, uint32_t len)
+static void esp_fifo_push(ESPState *s, uint8_t val)
{
- s->pdma_origin = origin;
- s->pdma_start = index;
- s->pdma_cur = index;
- s->pdma_len = len;
+ if (fifo8_num_used(&s->fifo) == ESP_FIFO_SZ) {
+ trace_esp_error_fifo_overrun();
+ return;
+ }
+
+ fifo8_push(&s->fifo, val);
+}
+
+static uint8_t esp_fifo_pop(ESPState *s)
+{
+ if (fifo8_is_empty(&s->fifo)) {
+ return 0;
+ }
+
+ return fifo8_pop(&s->fifo);
+}
+
+static void esp_cmdfifo_push(ESPState *s, uint8_t val)
+{
+ if (fifo8_num_used(&s->cmdfifo) == ESP_CMDFIFO_SZ) {
+ trace_esp_error_fifo_overrun();
+ return;
+ }
+
+ fifo8_push(&s->cmdfifo, val);
+}
+
+static uint8_t esp_cmdfifo_pop(ESPState *s)
+{
+ if (fifo8_is_empty(&s->cmdfifo)) {
+ return 0;
+ }
+
+ return fifo8_pop(&s->cmdfifo);
+}
+
+static uint32_t esp_get_tc(ESPState *s)
+{
+ uint32_t dmalen;
+
+ dmalen = s->rregs[ESP_TCLO];
+ dmalen |= s->rregs[ESP_TCMID] << 8;
+ dmalen |= s->rregs[ESP_TCHI] << 16;
+
+ return dmalen;
+}
+
+static void esp_set_tc(ESPState *s, uint32_t dmalen)
+{
+ s->rregs[ESP_TCLO] = dmalen;
+ s->rregs[ESP_TCMID] = dmalen >> 8;
+ s->rregs[ESP_TCHI] = dmalen >> 16;
+}
+
+static uint32_t esp_get_stc(ESPState *s)
+{
+ uint32_t dmalen;
+
+ dmalen = s->wregs[ESP_TCLO];
+ dmalen |= s->wregs[ESP_TCMID] << 8;
+ dmalen |= s->wregs[ESP_TCHI] << 16;
+
+ return dmalen;
+}
+
+static uint8_t esp_pdma_read(ESPState *s)
+{
+ uint8_t val;
+
+ if (s->do_cmd) {
+ val = esp_cmdfifo_pop(s);
+ } else {
+ val = esp_fifo_pop(s);
+ }
+
+ return val;
}
-static uint8_t *get_pdma_buf(ESPState *s)
+static void esp_pdma_write(ESPState *s, uint8_t val)
{
- switch (s->pdma_origin) {
- case PDMA:
- return s->pdma_buf;
- case TI:
- return s->ti_buf;
- case CMD:
- return s->cmdbuf;
- case ASYNC:
- return s->async_buf;
+ uint32_t dmalen = esp_get_tc(s);
+
+ if (dmalen == 0) {
+ return;
+ }
+
+ if (s->do_cmd) {
+ esp_cmdfifo_push(s, val);
+ } else {
+ esp_fifo_push(s, val);
}
- return NULL;
+
+ dmalen--;
+ esp_set_tc(s, dmalen);
}
-static int get_cmd_cb(ESPState *s)
+static int esp_select(ESPState *s)
{
int target;
target = s->wregs[ESP_WBUSID] & BUSID_DID;
s->ti_size = 0;
- s->ti_rptr = 0;
- s->ti_wptr = 0;
+ fifo8_reset(&s->fifo);
if (s->current_req) {
/* Started a new command before the old one finished. Cancel it. */
@@ -140,148 +215,195 @@ static int get_cmd_cb(ESPState *s)
if (!s->current_dev) {
/* No such drive */
s->rregs[ESP_RSTAT] = 0;
- s->rregs[ESP_RINTR] = INTR_DC;
+ s->rregs[ESP_RINTR] |= INTR_DC;
s->rregs[ESP_RSEQ] = SEQ_0;
esp_raise_irq(s);
return -1;
}
+
+ /*
+ * Note that we deliberately don't raise the IRQ here: this will be done
+ * either in do_busid_cmd() for DATA OUT transfers or by the deferred
+ * IRQ mechanism in esp_transfer_data() for DATA IN transfers
+ */
+ s->rregs[ESP_RINTR] |= INTR_FC;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
return 0;
}
-static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
+static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
{
- uint32_t dmalen;
+ uint8_t buf[ESP_CMDFIFO_SZ];
+ uint32_t dmalen, n;
int target;
target = s->wregs[ESP_WBUSID] & BUSID_DID;
if (s->dma) {
- dmalen = s->rregs[ESP_TCLO];
- dmalen |= s->rregs[ESP_TCMID] << 8;
- dmalen |= s->rregs[ESP_TCHI] << 16;
- if (dmalen > buflen) {
+ dmalen = MIN(esp_get_tc(s), maxlen);
+ if (dmalen == 0) {
return 0;
}
if (s->dma_memory_read) {
s->dma_memory_read(s->dma_opaque, buf, dmalen);
+ fifo8_push_all(&s->cmdfifo, buf, dmalen);
} else {
- memcpy(s->pdma_buf, buf, dmalen);
- set_pdma(s, PDMA, 0, dmalen);
+ if (esp_select(s) < 0) {
+ fifo8_reset(&s->cmdfifo);
+ return -1;
+ }
esp_raise_drq(s);
+ fifo8_reset(&s->cmdfifo);
return 0;
}
} else {
- dmalen = s->ti_size;
- if (dmalen > TI_BUFSZ) {
+ dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
+ if (dmalen == 0) {
return 0;
}
- memcpy(buf, s->ti_buf, dmalen);
- buf[0] = buf[2] >> 5;
+ memcpy(buf, fifo8_pop_buf(&s->fifo, dmalen, &n), dmalen);
+ if (dmalen >= 3) {
+ buf[0] = buf[2] >> 5;
+ }
+ fifo8_push_all(&s->cmdfifo, buf, dmalen);
}
trace_esp_get_cmd(dmalen, target);
- if (get_cmd_cb(s) < 0) {
- return 0;
+ if (esp_select(s) < 0) {
+ fifo8_reset(&s->cmdfifo);
+ return -1;
}
return dmalen;
}
-static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
+static void do_busid_cmd(ESPState *s, uint8_t busid)
{
+ uint32_t n, cmdlen;
int32_t datalen;
int lun;
SCSIDevice *current_lun;
+ uint8_t *buf;
trace_esp_do_busid_cmd(busid);
lun = busid & 7;
+ cmdlen = fifo8_num_used(&s->cmdfifo);
+ buf = (uint8_t *)fifo8_pop_buf(&s->cmdfifo, cmdlen, &n);
+
current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
datalen = scsi_req_enqueue(s->current_req);
s->ti_size = datalen;
+ fifo8_reset(&s->cmdfifo);
if (datalen != 0) {
s->rregs[ESP_RSTAT] = STAT_TC;
- s->dma_left = 0;
- s->dma_counter = 0;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->ti_cmd = 0;
+ esp_set_tc(s, 0);
if (datalen > 0) {
+ /*
+ * Switch to DATA IN phase but wait until initial data xfer is
+ * complete before raising the command completion interrupt
+ */
+ s->data_in_ready = false;
s->rregs[ESP_RSTAT] |= STAT_DI;
} else {
s->rregs[ESP_RSTAT] |= STAT_DO;
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
+ esp_raise_irq(s);
+ esp_lower_drq(s);
}
scsi_req_continue(s->current_req);
+ return;
}
- s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
- s->rregs[ESP_RSEQ] = SEQ_CD;
- esp_raise_irq(s);
}
-static void do_cmd(ESPState *s, uint8_t *buf)
+static void do_cmd(ESPState *s)
{
- uint8_t busid = buf[0];
+ uint8_t busid = fifo8_pop(&s->cmdfifo);
+ uint32_t n;
+
+ s->cmdfifo_cdb_offset--;
+
+ /* Ignore extended messages for now */
+ if (s->cmdfifo_cdb_offset) {
+ fifo8_pop_buf(&s->cmdfifo, s->cmdfifo_cdb_offset, &n);
+ s->cmdfifo_cdb_offset = 0;
+ }
- do_busid_cmd(s, &buf[1], busid);
+ do_busid_cmd(s, busid);
}
static void satn_pdma_cb(ESPState *s)
{
- if (get_cmd_cb(s) < 0) {
- return;
- }
- if (s->pdma_cur != s->pdma_start) {
- do_cmd(s, get_pdma_buf(s) + s->pdma_start);
+ s->do_cmd = 0;
+ if (!fifo8_is_empty(&s->cmdfifo)) {
+ s->cmdfifo_cdb_offset = 1;
+ do_cmd(s);
}
}
static void handle_satn(ESPState *s)
{
- uint8_t buf[32];
- int len;
+ int32_t cmdlen;
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_satn;
return;
}
s->pdma_cb = satn_pdma_cb;
- len = get_cmd(s, buf, sizeof(buf));
- if (len)
- do_cmd(s, buf);
+ cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
+ if (cmdlen > 0) {
+ s->cmdfifo_cdb_offset = 1;
+ do_cmd(s);
+ } else if (cmdlen == 0) {
+ s->do_cmd = 1;
+ /* Target present, but no cmd yet - switch to command phase */
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->rregs[ESP_RSTAT] = STAT_CD;
+ }
}
static void s_without_satn_pdma_cb(ESPState *s)
{
- if (get_cmd_cb(s) < 0) {
- return;
- }
- if (s->pdma_cur != s->pdma_start) {
- do_busid_cmd(s, get_pdma_buf(s) + s->pdma_start, 0);
+ uint32_t len;
+
+ s->do_cmd = 0;
+ len = fifo8_num_used(&s->cmdfifo);
+ if (len) {
+ s->cmdfifo_cdb_offset = 0;
+ do_busid_cmd(s, 0);
}
}
static void handle_s_without_atn(ESPState *s)
{
- uint8_t buf[32];
- int len;
+ int32_t cmdlen;
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_s_without_atn;
return;
}
s->pdma_cb = s_without_satn_pdma_cb;
- len = get_cmd(s, buf, sizeof(buf));
- if (len) {
- do_busid_cmd(s, buf, 0);
+ cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
+ if (cmdlen > 0) {
+ s->cmdfifo_cdb_offset = 0;
+ do_busid_cmd(s, 0);
+ } else if (cmdlen == 0) {
+ s->do_cmd = 1;
+ /* Target present, but no cmd yet - switch to command phase */
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->rregs[ESP_RSTAT] = STAT_CD;
}
}
static void satn_stop_pdma_cb(ESPState *s)
{
- if (get_cmd_cb(s) < 0) {
- return;
- }
- s->cmdlen = s->pdma_cur - s->pdma_start;
- if (s->cmdlen) {
- trace_esp_handle_satn_stop(s->cmdlen);
+ s->do_cmd = 0;
+ if (!fifo8_is_empty(&s->cmdfifo)) {
+ trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
s->do_cmd = 1;
+ s->cmdfifo_cdb_offset = 1;
s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
- s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
s->rregs[ESP_RSEQ] = SEQ_CD;
esp_raise_irq(s);
}
@@ -289,51 +411,62 @@ static void satn_stop_pdma_cb(ESPState *s)
static void handle_satn_stop(ESPState *s)
{
+ int32_t cmdlen;
+
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_satn_stop;
return;
}
s->pdma_cb = satn_stop_pdma_cb;
- s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
- if (s->cmdlen) {
- trace_esp_handle_satn_stop(s->cmdlen);
+ cmdlen = get_cmd(s, 1);
+ if (cmdlen > 0) {
+ trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
s->do_cmd = 1;
- s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
- s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
- s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->cmdfifo_cdb_offset = 1;
+ s->rregs[ESP_RSTAT] = STAT_MO;
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
+ s->rregs[ESP_RSEQ] = SEQ_MO;
esp_raise_irq(s);
+ } else if (cmdlen == 0) {
+ s->do_cmd = 1;
+ /* Target present, switch to message out phase */
+ s->rregs[ESP_RSEQ] = SEQ_MO;
+ s->rregs[ESP_RSTAT] = STAT_MO;
}
}
static void write_response_pdma_cb(ESPState *s)
{
s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
- s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
s->rregs[ESP_RSEQ] = SEQ_CD;
esp_raise_irq(s);
}
static void write_response(ESPState *s)
{
+ uint32_t n;
+
trace_esp_write_response(s->status);
- s->ti_buf[0] = s->status;
- s->ti_buf[1] = 0;
+
+ fifo8_reset(&s->fifo);
+ esp_fifo_push(s, s->status);
+ esp_fifo_push(s, 0);
+
if (s->dma) {
if (s->dma_memory_write) {
- s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
+ s->dma_memory_write(s->dma_opaque,
+ (uint8_t *)fifo8_pop_buf(&s->fifo, 2, &n), 2);
s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
- s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
+ s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
s->rregs[ESP_RSEQ] = SEQ_CD;
} else {
- set_pdma(s, TI, 0, 2);
s->pdma_cb = write_response_pdma_cb;
esp_raise_drq(s);
return;
}
} else {
s->ti_size = 2;
- s->ti_rptr = 0;
- s->ti_wptr = 2;
s->rregs[ESP_RFLAGS] = 2;
}
esp_raise_irq(s);
@@ -342,77 +475,133 @@ static void write_response(ESPState *s)
static void esp_dma_done(ESPState *s)
{
s->rregs[ESP_RSTAT] |= STAT_TC;
- s->rregs[ESP_RINTR] = INTR_BS;
+ s->rregs[ESP_RINTR] |= INTR_BS;
s->rregs[ESP_RSEQ] = 0;
s->rregs[ESP_RFLAGS] = 0;
- s->rregs[ESP_TCLO] = 0;
- s->rregs[ESP_TCMID] = 0;
- s->rregs[ESP_TCHI] = 0;
+ esp_set_tc(s, 0);
esp_raise_irq(s);
}
static void do_dma_pdma_cb(ESPState *s)
{
- int to_device = (s->ti_size < 0);
- int len = s->pdma_cur - s->pdma_start;
+ int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
+ int len;
+ uint32_t n;
+
if (s->do_cmd) {
s->ti_size = 0;
- s->cmdlen = 0;
s->do_cmd = 0;
- do_cmd(s, s->cmdbuf);
+ do_cmd(s);
+ esp_lower_drq(s);
return;
}
- s->dma_left -= len;
- s->async_buf += len;
- s->async_len -= len;
+
if (to_device) {
- s->ti_size += len;
+ /* Copy FIFO data to device */
+ len = MIN(s->async_len, ESP_FIFO_SZ);
+ len = MIN(len, fifo8_num_used(&s->fifo));
+ memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
+ s->async_buf += n;
+ s->async_len -= n;
+ s->ti_size += n;
+
+ if (n < len) {
+ /* Unaligned accesses can cause FIFO wraparound */
+ len = len - n;
+ memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
+ s->async_buf += n;
+ s->async_len -= n;
+ s->ti_size += n;
+ }
+
+ if (s->async_len == 0) {
+ scsi_req_continue(s->current_req);
+ return;
+ }
+
+ if (esp_get_tc(s) == 0) {
+ esp_lower_drq(s);
+ esp_dma_done(s);
+ }
+
+ return;
} else {
- s->ti_size -= len;
- }
- if (s->async_len == 0) {
- scsi_req_continue(s->current_req);
- /*
- * If there is still data to be read from the device then
- * complete the DMA operation immediately. Otherwise defer
- * until the scsi layer has completed.
- */
- if (to_device || s->dma_left != 0 || s->ti_size == 0) {
+ if (s->async_len == 0) {
+ if (s->current_req) {
+ /* Defer until the scsi layer has completed */
+ scsi_req_continue(s->current_req);
+ s->data_in_ready = false;
+ }
return;
}
- }
- /* Partially filled a scsi buffer. Complete immediately. */
- esp_dma_done(s);
+ if (esp_get_tc(s) != 0) {
+ /* Copy device data to FIFO */
+ len = MIN(s->async_len, esp_get_tc(s));
+ len = MIN(len, fifo8_num_free(&s->fifo));
+ fifo8_push_all(&s->fifo, s->async_buf, len);
+ s->async_buf += len;
+ s->async_len -= len;
+ s->ti_size -= len;
+ esp_set_tc(s, esp_get_tc(s) - len);
+
+ if (esp_get_tc(s) == 0) {
+ /* Indicate transfer to FIFO is complete */
+ s->rregs[ESP_RSTAT] |= STAT_TC;
+ }
+ return;
+ }
+
+ /* Partially filled a scsi buffer. Complete immediately. */
+ esp_lower_drq(s);
+ esp_dma_done(s);
+ }
}
static void esp_do_dma(ESPState *s)
{
- uint32_t len;
- int to_device;
+ uint32_t len, cmdlen;
+ int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
+ uint8_t buf[ESP_CMDFIFO_SZ];
- len = s->dma_left;
+ len = esp_get_tc(s);
if (s->do_cmd) {
/*
* handle_ti_cmd() case: esp_do_dma() is called only from
* handle_ti_cmd() with do_cmd != NULL (see the assert())
*/
- trace_esp_do_dma(s->cmdlen, len);
- assert (s->cmdlen <= sizeof(s->cmdbuf) &&
- len <= sizeof(s->cmdbuf) - s->cmdlen);
+ cmdlen = fifo8_num_used(&s->cmdfifo);
+ trace_esp_do_dma(cmdlen, len);
if (s->dma_memory_read) {
- s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
+ s->dma_memory_read(s->dma_opaque, buf, len);
+ fifo8_push_all(&s->cmdfifo, buf, len);
} else {
- set_pdma(s, CMD, s->cmdlen, len);
s->pdma_cb = do_dma_pdma_cb;
esp_raise_drq(s);
return;
}
- trace_esp_handle_ti_cmd(s->cmdlen);
+ trace_esp_handle_ti_cmd(cmdlen);
s->ti_size = 0;
- s->cmdlen = 0;
- s->do_cmd = 0;
- do_cmd(s, s->cmdbuf);
+ if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
+ /* No command received */
+ if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
+ return;
+ }
+
+ /* Command has been received */
+ s->do_cmd = 0;
+ do_cmd(s);
+ } else {
+ /*
+ * Extra message out bytes received: update cmdfifo_cdb_offset
+ * and then switch to commmand phase
+ */
+ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
+ s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+ }
return;
}
if (s->async_len == 0) {
@@ -422,12 +611,10 @@ static void esp_do_dma(ESPState *s)
if (len > s->async_len) {
len = s->async_len;
}
- to_device = (s->ti_size < 0);
if (to_device) {
if (s->dma_memory_read) {
s->dma_memory_read(s->dma_opaque, s->async_buf, len);
} else {
- set_pdma(s, ASYNC, 0, len);
s->pdma_cb = do_dma_pdma_cb;
esp_raise_drq(s);
return;
@@ -436,48 +623,145 @@ static void esp_do_dma(ESPState *s)
if (s->dma_memory_write) {
s->dma_memory_write(s->dma_opaque, s->async_buf, len);
} else {
- set_pdma(s, ASYNC, 0, len);
+ /* Adjust TC for any leftover data in the FIFO */
+ if (!fifo8_is_empty(&s->fifo)) {
+ esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
+ }
+
+ /* Copy device data to FIFO */
+ len = MIN(len, fifo8_num_free(&s->fifo));
+ fifo8_push_all(&s->fifo, s->async_buf, len);
+ s->async_buf += len;
+ s->async_len -= len;
+ s->ti_size -= len;
+
+ /*
+ * MacOS toolbox uses a TI length of 16 bytes for all commands, so
+ * commands shorter than this must be padded accordingly
+ */
+ if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
+ while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
+ esp_fifo_push(s, 0);
+ len++;
+ }
+ }
+
+ esp_set_tc(s, esp_get_tc(s) - len);
s->pdma_cb = do_dma_pdma_cb;
esp_raise_drq(s);
+
+ /* Indicate transfer to FIFO is complete */
+ s->rregs[ESP_RSTAT] |= STAT_TC;
return;
}
}
- s->dma_left -= len;
+ esp_set_tc(s, esp_get_tc(s) - len);
s->async_buf += len;
s->async_len -= len;
- if (to_device)
+ if (to_device) {
s->ti_size += len;
- else
+ } else {
s->ti_size -= len;
+ }
if (s->async_len == 0) {
scsi_req_continue(s->current_req);
- /* If there is still data to be read from the device then
- complete the DMA operation immediately. Otherwise defer
- until the scsi layer has completed. */
- if (to_device || s->dma_left != 0 || s->ti_size == 0) {
+ /*
+ * If there is still data to be read from the device then
+ * complete the DMA operation immediately. Otherwise defer
+ * until the scsi layer has completed.
+ */
+ if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
return;
}
}
/* Partially filled a scsi buffer. Complete immediately. */
esp_dma_done(s);
+ esp_lower_drq(s);
}
-static void esp_report_command_complete(ESPState *s, uint32_t status)
+static void esp_do_nodma(ESPState *s)
{
+ int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
+ uint32_t cmdlen, n;
+ int len;
+
+ if (s->do_cmd) {
+ cmdlen = fifo8_num_used(&s->cmdfifo);
+ trace_esp_handle_ti_cmd(cmdlen);
+ s->ti_size = 0;
+ if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
+ /* No command received */
+ if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
+ return;
+ }
+
+ /* Command has been received */
+ s->do_cmd = 0;
+ do_cmd(s);
+ } else {
+ /*
+ * Extra message out bytes received: update cmdfifo_cdb_offset
+ * and then switch to commmand phase
+ */
+ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
+ s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
+ s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+ }
+ return;
+ }
+
+ if (s->async_len == 0) {
+ /* Defer until data is available. */
+ return;
+ }
+
+ if (to_device) {
+ len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ);
+ memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len);
+ s->async_buf += len;
+ s->async_len -= len;
+ s->ti_size += len;
+ } else {
+ len = MIN(s->ti_size, s->async_len);
+ len = MIN(len, fifo8_num_free(&s->fifo));
+ fifo8_push_all(&s->fifo, s->async_buf, len);
+ s->async_buf += len;
+ s->async_len -= len;
+ s->ti_size -= len;
+ }
+
+ if (s->async_len == 0) {
+ scsi_req_continue(s->current_req);
+
+ if (to_device || s->ti_size == 0) {
+ return;
+ }
+ }
+
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+}
+
+void esp_command_complete(SCSIRequest *req, size_t resid)
+{
+ ESPState *s = req->hba_private;
+
trace_esp_command_complete();
if (s->ti_size != 0) {
trace_esp_command_complete_unexpected();
}
s->ti_size = 0;
- s->dma_left = 0;
s->async_len = 0;
- if (status) {
+ if (req->status) {
trace_esp_command_complete_fail();
}
- s->status = status;
+ s->status = req->status;
s->rregs[ESP_RSTAT] = STAT_ST;
esp_dma_done(s);
+ esp_lower_drq(s);
if (s->current_req) {
scsi_req_unref(s->current_req);
s->current_req = NULL;
@@ -485,73 +769,83 @@ static void esp_report_command_complete(ESPState *s, uint32_t status)
}
}
-void esp_command_complete(SCSIRequest *req, size_t resid)
+void esp_transfer_data(SCSIRequest *req, uint32_t len)
{
ESPState *s = req->hba_private;
+ int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
+ uint32_t dmalen = esp_get_tc(s);
- if (s->rregs[ESP_RSTAT] & STAT_INT) {
- /* Defer handling command complete until the previous
- * interrupt has been handled.
+ assert(!s->do_cmd);
+ trace_esp_transfer_data(dmalen, s->ti_size);
+ s->async_len = len;
+ s->async_buf = scsi_req_get_buf(req);
+
+ if (!to_device && !s->data_in_ready) {
+ /*
+ * Initial incoming data xfer is complete so raise command
+ * completion interrupt
*/
- trace_esp_command_complete_deferred();
- s->deferred_status = req->status;
- s->deferred_complete = true;
+ s->data_in_ready = true;
+ s->rregs[ESP_RSTAT] |= STAT_TC;
+ s->rregs[ESP_RINTR] |= INTR_BS;
+ esp_raise_irq(s);
+
+ /*
+ * If data is ready to transfer and the TI command has already
+ * been executed, start DMA immediately. Otherwise DMA will start
+ * when host sends the TI command
+ */
+ if (s->ti_size && (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA))) {
+ esp_do_dma(s);
+ }
return;
}
- esp_report_command_complete(s, req->status);
-}
-void esp_transfer_data(SCSIRequest *req, uint32_t len)
-{
- ESPState *s = req->hba_private;
+ if (s->ti_cmd == 0) {
+ /*
+ * Always perform the initial transfer upon reception of the next TI
+ * command to ensure the DMA/non-DMA status of the command is correct.
+ * It is not possible to use s->dma directly in the section below as
+ * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
+ * async data transfer is delayed then s->dma is set incorrectly.
+ */
+ return;
+ }
- assert(!s->do_cmd);
- trace_esp_transfer_data(s->dma_left, s->ti_size);
- s->async_len = len;
- s->async_buf = scsi_req_get_buf(req);
- if (s->dma_left) {
- esp_do_dma(s);
- } else if (s->dma_counter != 0 && s->ti_size <= 0) {
- /* If this was the last part of a DMA transfer then the
- completion interrupt is deferred to here. */
- esp_dma_done(s);
+ if (s->ti_cmd & CMD_DMA) {
+ if (dmalen) {
+ esp_do_dma(s);
+ } else if (s->ti_size <= 0) {
+ /*
+ * If this was the last part of a DMA transfer then the
+ * completion interrupt is deferred to here.
+ */
+ esp_dma_done(s);
+ esp_lower_drq(s);
+ }
+ } else {
+ esp_do_nodma(s);
}
}
static void handle_ti(ESPState *s)
{
- uint32_t dmalen, minlen;
+ uint32_t dmalen;
if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_ti;
return;
}
- dmalen = s->rregs[ESP_TCLO];
- dmalen |= s->rregs[ESP_TCMID] << 8;
- dmalen |= s->rregs[ESP_TCHI] << 16;
- if (dmalen==0) {
- dmalen=0x10000;
- }
- s->dma_counter = dmalen;
-
- if (s->do_cmd)
- minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
- else if (s->ti_size < 0)
- minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
- else
- minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
- trace_esp_handle_ti(minlen);
+ s->ti_cmd = s->rregs[ESP_CMD];
if (s->dma) {
- s->dma_left = minlen;
+ dmalen = esp_get_tc(s);
+ trace_esp_handle_ti(dmalen);
s->rregs[ESP_RSTAT] &= ~STAT_TC;
esp_do_dma(s);
- } else if (s->do_cmd) {
- trace_esp_handle_ti_cmd(s->cmdlen);
- s->ti_size = 0;
- s->cmdlen = 0;
- s->do_cmd = 0;
- do_cmd(s, s->cmdbuf);
+ } else {
+ trace_esp_handle_ti(s->ti_size);
+ esp_do_nodma(s);
}
}
@@ -561,8 +855,8 @@ void esp_hard_reset(ESPState *s)
memset(s->wregs, 0, ESP_REGS);
s->tchi_written = 0;
s->ti_size = 0;
- s->ti_rptr = 0;
- s->ti_wptr = 0;
+ fifo8_reset(&s->fifo);
+ fifo8_reset(&s->cmdfifo);
s->dma = 0;
s->do_cmd = 0;
s->dma_cb = NULL;
@@ -586,46 +880,50 @@ static void parent_esp_reset(ESPState *s, int irq, int level)
uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
{
- uint32_t old_val;
+ uint32_t val;
- trace_esp_mem_readb(saddr, s->rregs[saddr]);
switch (saddr) {
case ESP_FIFO:
- if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
+ if (s->dma_memory_read && s->dma_memory_write &&
+ (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
/* Data out. */
qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
s->rregs[ESP_FIFO] = 0;
- } else if (s->ti_rptr < s->ti_wptr) {
- s->ti_size--;
- s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
- }
- if (s->ti_rptr == s->ti_wptr) {
- s->ti_rptr = 0;
- s->ti_wptr = 0;
+ } else {
+ s->rregs[ESP_FIFO] = esp_fifo_pop(s);
}
+ val = s->rregs[ESP_FIFO];
break;
case ESP_RINTR:
- /* Clear sequence step, interrupt register and all status bits
- except TC */
- old_val = s->rregs[ESP_RINTR];
+ /*
+ * Clear sequence step, interrupt register and all status bits
+ * except TC
+ */
+ val = s->rregs[ESP_RINTR];
s->rregs[ESP_RINTR] = 0;
s->rregs[ESP_RSTAT] &= ~STAT_TC;
- s->rregs[ESP_RSEQ] = SEQ_CD;
+ s->rregs[ESP_RSEQ] = SEQ_0;
esp_lower_irq(s);
- if (s->deferred_complete) {
- esp_report_command_complete(s, s->deferred_status);
- s->deferred_complete = false;
- }
- return old_val;
+ break;
case ESP_TCHI:
/* Return the unique id if the value has never been written */
if (!s->tchi_written) {
- return s->chip_id;
+ val = s->chip_id;
+ } else {
+ val = s->rregs[saddr];
}
+ break;
+ case ESP_RFLAGS:
+ /* Bottom 5 bits indicate number of bytes in FIFO */
+ val = fifo8_num_used(&s->fifo);
+ break;
default:
+ val = s->rregs[saddr];
break;
}
- return s->rregs[saddr];
+
+ trace_esp_mem_readb(saddr, val);
+ return val;
}
void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
@@ -641,16 +939,15 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
break;
case ESP_FIFO:
if (s->do_cmd) {
- if (s->cmdlen < ESP_CMDBUF_SZ) {
- s->cmdbuf[s->cmdlen++] = val & 0xff;
- } else {
- trace_esp_error_fifo_overrun();
- }
- } else if (s->ti_wptr == TI_BUFSZ - 1) {
- trace_esp_error_fifo_overrun();
+ esp_cmdfifo_push(s, val);
} else {
- s->ti_size++;
- s->ti_buf[s->ti_wptr++] = val & 0xff;
+ esp_fifo_push(s, val);
+ }
+
+ /* Non-DMA transfers raise an interrupt after every byte */
+ if (s->rregs[ESP_CMD] == CMD_TI) {
+ s->rregs[ESP_RINTR] |= INTR_FC | INTR_BS;
+ esp_raise_irq(s);
}
break;
case ESP_CMD:
@@ -658,22 +955,21 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
if (val & CMD_DMA) {
s->dma = 1;
/* Reload DMA counter. */
- s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
- s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
- s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
+ if (esp_get_stc(s) == 0) {
+ esp_set_tc(s, 0x10000);
+ } else {
+ esp_set_tc(s, esp_get_stc(s));
+ }
} else {
s->dma = 0;
}
- switch(val & CMD_CMD) {
+ switch (val & CMD_CMD) {
case CMD_NOP:
trace_esp_mem_writeb_cmd_nop(val);
break;
case CMD_FLUSH:
trace_esp_mem_writeb_cmd_flush(val);
- //s->ti_size = 0;
- s->rregs[ESP_RINTR] = INTR_FC;
- s->rregs[ESP_RSEQ] = 0;
- s->rregs[ESP_RFLAGS] = 0;
+ fifo8_reset(&s->fifo);
break;
case CMD_RESET:
trace_esp_mem_writeb_cmd_reset(val);
@@ -681,23 +977,24 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
break;
case CMD_BUSRESET:
trace_esp_mem_writeb_cmd_bus_reset(val);
- s->rregs[ESP_RINTR] = INTR_RST;
if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
+ s->rregs[ESP_RINTR] |= INTR_RST;
esp_raise_irq(s);
}
break;
case CMD_TI:
+ trace_esp_mem_writeb_cmd_ti(val);
handle_ti(s);
break;
case CMD_ICCS:
trace_esp_mem_writeb_cmd_iccs(val);
write_response(s);
- s->rregs[ESP_RINTR] = INTR_FC;
+ s->rregs[ESP_RINTR] |= INTR_FC;
s->rregs[ESP_RSTAT] |= STAT_MI;
break;
case CMD_MSGACC:
trace_esp_mem_writeb_cmd_msgacc(val);
- s->rregs[ESP_RINTR] = INTR_DC;
+ s->rregs[ESP_RINTR] |= INTR_DC;
s->rregs[ESP_RSEQ] = 0;
s->rregs[ESP_RFLAGS] = 0;
esp_raise_irq(s);
@@ -705,7 +1002,7 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
case CMD_PAD:
trace_esp_mem_writeb_cmd_pad(val);
s->rregs[ESP_RSTAT] = STAT_TC;
- s->rregs[ESP_RINTR] = INTR_FC;
+ s->rregs[ESP_RINTR] |= INTR_FC;
s->rregs[ESP_RSEQ] = 0;
break;
case CMD_SATN:
@@ -763,74 +1060,112 @@ static bool esp_mem_accepts(void *opaque, hwaddr addr,
return (size == 1) || (is_write && size == 4);
}
-static bool esp_pdma_needed(void *opaque)
+static bool esp_is_before_version_5(void *opaque, int version_id)
{
- ESPState *s = opaque;
- return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
- s->dma_enabled;
+ ESPState *s = ESP(opaque);
+
+ version_id = MIN(version_id, s->mig_version_id);
+ return version_id < 5;
}
-static const VMStateDescription vmstate_esp_pdma = {
- .name = "esp/pdma",
- .version_id = 1,
- .minimum_version_id = 1,
- .needed = esp_pdma_needed,
- .fields = (VMStateField[]) {
- VMSTATE_BUFFER(pdma_buf, ESPState),
- VMSTATE_INT32(pdma_origin, ESPState),
- VMSTATE_UINT32(pdma_len, ESPState),
- VMSTATE_UINT32(pdma_start, ESPState),
- VMSTATE_UINT32(pdma_cur, ESPState),
- VMSTATE_END_OF_LIST()
+static bool esp_is_version_5(void *opaque, int version_id)
+{
+ ESPState *s = ESP(opaque);
+
+ version_id = MIN(version_id, s->mig_version_id);
+ return version_id == 5;
+}
+
+static int esp_pre_save(void *opaque)
+{
+ ESPState *s = ESP(opaque);
+
+ s->mig_version_id = vmstate_esp.version_id;
+ return 0;
+}
+
+static int esp_post_load(void *opaque, int version_id)
+{
+ ESPState *s = ESP(opaque);
+ int len, i;
+
+ version_id = MIN(version_id, s->mig_version_id);
+
+ if (version_id < 5) {
+ esp_set_tc(s, s->mig_dma_left);
+
+ /* Migrate ti_buf to fifo */
+ len = s->mig_ti_wptr - s->mig_ti_rptr;
+ for (i = 0; i < len; i++) {
+ fifo8_push(&s->fifo, s->mig_ti_buf[i]);
+ }
+
+ /* Migrate cmdbuf to cmdfifo */
+ for (i = 0; i < s->mig_cmdlen; i++) {
+ fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
+ }
}
-};
+
+ s->mig_version_id = vmstate_esp.version_id;
+ return 0;
+}
const VMStateDescription vmstate_esp = {
- .name ="esp",
- .version_id = 4,
+ .name = "esp",
+ .version_id = 5,
.minimum_version_id = 3,
+ .pre_save = esp_pre_save,
+ .post_load = esp_post_load,
.fields = (VMStateField[]) {
VMSTATE_BUFFER(rregs, ESPState),
VMSTATE_BUFFER(wregs, ESPState),
VMSTATE_INT32(ti_size, ESPState),
- VMSTATE_UINT32(ti_rptr, ESPState),
- VMSTATE_UINT32(ti_wptr, ESPState),
- VMSTATE_BUFFER(ti_buf, ESPState),
+ VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
+ VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
+ VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
VMSTATE_UINT32(status, ESPState),
- VMSTATE_UINT32(deferred_status, ESPState),
- VMSTATE_BOOL(deferred_complete, ESPState),
+ VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
+ esp_is_before_version_5),
+ VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
+ esp_is_before_version_5),
VMSTATE_UINT32(dma, ESPState),
- VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
- VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
- VMSTATE_UINT32(cmdlen, ESPState),
+ VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
+ esp_is_before_version_5, 0, 16),
+ VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
+ esp_is_before_version_5, 16,
+ sizeof(typeof_field(ESPState, mig_cmdbuf))),
+ VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
VMSTATE_UINT32(do_cmd, ESPState),
- VMSTATE_UINT32(dma_left, ESPState),
+ VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
+ VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
+ VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
+ VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
+ VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
+ VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
VMSTATE_END_OF_LIST()
},
- .subsections = (const VMStateDescription * []) {
- &vmstate_esp_pdma,
- NULL
- }
};
static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
uint64_t val, unsigned int size)
{
SysBusESPState *sysbus = opaque;
+ ESPState *s = ESP(&sysbus->esp);
uint32_t saddr;
saddr = addr >> sysbus->it_shift;
- esp_reg_write(&sysbus->esp, saddr, val);
+ esp_reg_write(s, saddr, val);
}
static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
unsigned int size)
{
SysBusESPState *sysbus = opaque;
+ ESPState *s = ESP(&sysbus->esp);
uint32_t saddr;
saddr = addr >> sysbus->it_shift;
- return esp_reg_read(&sysbus->esp, saddr);
+ return esp_reg_read(s, saddr);
}
static const MemoryRegionOps sysbus_esp_mem_ops = {
@@ -844,36 +1179,23 @@ static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
uint64_t val, unsigned int size)
{
SysBusESPState *sysbus = opaque;
- ESPState *s = &sysbus->esp;
+ ESPState *s = ESP(&sysbus->esp);
uint32_t dmalen;
- uint8_t *buf = get_pdma_buf(s);
- dmalen = s->rregs[ESP_TCLO];
- dmalen |= s->rregs[ESP_TCMID] << 8;
- dmalen |= s->rregs[ESP_TCHI] << 16;
- if (dmalen == 0 || s->pdma_len == 0) {
- return;
- }
+ trace_esp_pdma_write(size);
+
switch (size) {
case 1:
- buf[s->pdma_cur++] = val;
- s->pdma_len--;
- dmalen--;
+ esp_pdma_write(s, val);
break;
case 2:
- buf[s->pdma_cur++] = val >> 8;
- buf[s->pdma_cur++] = val;
- s->pdma_len -= 2;
- dmalen -= 2;
+ esp_pdma_write(s, val >> 8);
+ esp_pdma_write(s, val);
break;
}
- s->rregs[ESP_TCLO] = dmalen & 0xff;
- s->rregs[ESP_TCMID] = dmalen >> 8;
- s->rregs[ESP_TCHI] = dmalen >> 16;
- if (s->pdma_len == 0 && s->pdma_cb) {
- esp_lower_drq(s);
+ dmalen = esp_get_tc(s);
+ if (dmalen == 0 || fifo8_num_free(&s->fifo) < 2) {
s->pdma_cb(s);
- s->pdma_cb = NULL;
}
}
@@ -881,29 +1203,22 @@ static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
unsigned int size)
{
SysBusESPState *sysbus = opaque;
- ESPState *s = &sysbus->esp;
- uint8_t *buf = get_pdma_buf(s);
+ ESPState *s = ESP(&sysbus->esp);
uint64_t val = 0;
- if (s->pdma_len == 0) {
- return 0;
- }
+ trace_esp_pdma_read(size);
+
switch (size) {
case 1:
- val = buf[s->pdma_cur++];
- s->pdma_len--;
+ val = esp_pdma_read(s);
break;
case 2:
- val = buf[s->pdma_cur++];
- val = (val << 8) | buf[s->pdma_cur++];
- s->pdma_len -= 2;
+ val = esp_pdma_read(s);
+ val = (val << 8) | esp_pdma_read(s);
break;
}
-
- if (s->pdma_len == 0 && s->pdma_cb) {
- esp_lower_drq(s);
+ if (fifo8_num_used(&s->fifo) < 2) {
s->pdma_cb(s);
- s->pdma_cb = NULL;
}
return val;
}
@@ -913,7 +1228,9 @@ static const MemoryRegionOps sysbus_esp_pdma_ops = {
.write = sysbus_esp_pdma_write,
.endianness = DEVICE_NATIVE_ENDIAN,
.valid.min_access_size = 1,
- .valid.max_access_size = 2,
+ .valid.max_access_size = 4,
+ .impl.min_access_size = 1,
+ .impl.max_access_size = 2,
};
static const struct SCSIBusInfo esp_scsi_info = {
@@ -928,8 +1245,8 @@ static const struct SCSIBusInfo esp_scsi_info = {
static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
{
- SysBusESPState *sysbus = ESP(opaque);
- ESPState *s = &sysbus->esp;
+ SysBusESPState *sysbus = SYSBUS_ESP(opaque);
+ ESPState *s = ESP(&sysbus->esp);
switch (irq) {
case 0:
@@ -944,8 +1261,12 @@ static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
static void sysbus_esp_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
- SysBusESPState *sysbus = ESP(dev);
- ESPState *s = &sysbus->esp;
+ SysBusESPState *sysbus = SYSBUS_ESP(dev);
+ ESPState *s = ESP(&sysbus->esp);
+
+ if (!qdev_realize(DEVICE(s), NULL, errp)) {
+ return;
+ }
sysbus_init_irq(sbd, &s->irq);
sysbus_init_irq(sbd, &s->irq_data);
@@ -956,7 +1277,7 @@ static void sysbus_esp_realize(DeviceState *dev, Error **errp)
sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
sysbus_init_mmio(sbd, &sysbus->iomem);
memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
- sysbus, "esp-pdma", 2);
+ sysbus, "esp-pdma", 4);
sysbus_init_mmio(sbd, &sysbus->pdma);
qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
@@ -966,15 +1287,25 @@ static void sysbus_esp_realize(DeviceState *dev, Error **errp)
static void sysbus_esp_hard_reset(DeviceState *dev)
{
- SysBusESPState *sysbus = ESP(dev);
- esp_hard_reset(&sysbus->esp);
+ SysBusESPState *sysbus = SYSBUS_ESP(dev);
+ ESPState *s = ESP(&sysbus->esp);
+
+ esp_hard_reset(s);
+}
+
+static void sysbus_esp_init(Object *obj)
+{
+ SysBusESPState *sysbus = SYSBUS_ESP(obj);
+
+ object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
}
static const VMStateDescription vmstate_sysbus_esp_scsi = {
.name = "sysbusespscsi",
- .version_id = 1,
+ .version_id = 2,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
+ VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
VMSTATE_END_OF_LIST()
}
@@ -991,15 +1322,51 @@ static void sysbus_esp_class_init(ObjectClass *klass, void *data)
}
static const TypeInfo sysbus_esp_info = {
- .name = TYPE_ESP,
+ .name = TYPE_SYSBUS_ESP,
.parent = TYPE_SYS_BUS_DEVICE,
+ .instance_init = sysbus_esp_init,
.instance_size = sizeof(SysBusESPState),
.class_init = sysbus_esp_class_init,
};
+static void esp_finalize(Object *obj)
+{
+ ESPState *s = ESP(obj);
+
+ fifo8_destroy(&s->fifo);
+ fifo8_destroy(&s->cmdfifo);
+}
+
+static void esp_init(Object *obj)
+{
+ ESPState *s = ESP(obj);
+
+ fifo8_create(&s->fifo, ESP_FIFO_SZ);
+ fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
+}
+
+static void esp_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ /* internal device for sysbusesp/pciespscsi, not user-creatable */
+ dc->user_creatable = false;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo esp_info = {
+ .name = TYPE_ESP,
+ .parent = TYPE_DEVICE,
+ .instance_init = esp_init,
+ .instance_finalize = esp_finalize,
+ .instance_size = sizeof(ESPState),
+ .class_init = esp_class_init,
+};
+
static void esp_register_types(void)
{
type_register_static(&sysbus_esp_info);
+ type_register_static(&esp_info);
}
type_init(esp_register_types)
diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c
index a4e58580e4..e2c19180a0 100644
--- a/hw/scsi/lsi53c895a.c
+++ b/hw/scsi/lsi53c895a.c
@@ -2312,7 +2312,7 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp)
scsi_bus_new(&s->bus, sizeof(s->bus), d, &lsi_scsi_info, NULL);
}
-static void lsi_scsi_unrealize(DeviceState *dev)
+static void lsi_scsi_exit(PCIDevice *dev)
{
LSIState *s = LSI53C895A(dev);
@@ -2325,11 +2325,11 @@ static void lsi_class_init(ObjectClass *klass, void *data)
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->realize = lsi_scsi_realize;
+ k->exit = lsi_scsi_exit;
k->vendor_id = PCI_VENDOR_ID_LSI_LOGIC;
k->device_id = PCI_DEVICE_ID_LSI_53C895A;
k->class_id = PCI_CLASS_STORAGE_SCSI;
k->subsystem_id = 0x1000;
- dc->unrealize = lsi_scsi_unrealize;
dc->reset = lsi_scsi_reset;
dc->vmsd = &vmstate_lsi_scsi;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index dc4141ec8d..2d674f94d7 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -692,6 +692,7 @@ SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
req->lun = lun;
req->hba_private = hba_private;
req->status = -1;
+ req->host_status = -1;
req->ops = reqops;
object_ref(OBJECT(d));
object_ref(OBJECT(qbus->parent));
@@ -1455,10 +1456,38 @@ void scsi_req_print(SCSIRequest *req)
}
}
+void scsi_req_complete_failed(SCSIRequest *req, int host_status)
+{
+ SCSISense sense;
+ int status;
+
+ assert(req->status == -1 && req->host_status == -1);
+ assert(req->ops != &reqops_unit_attention);
+
+ if (!req->bus->info->fail) {
+ status = scsi_sense_from_host_status(req->host_status, &sense);
+ if (status == CHECK_CONDITION) {
+ scsi_req_build_sense(req, sense);
+ }
+ scsi_req_complete(req, status);
+ return;
+ }
+
+ req->host_status = host_status;
+ scsi_req_ref(req);
+ scsi_req_dequeue(req);
+ req->bus->info->fail(req);
+
+ /* Cancelled requests might end up being completed instead of cancelled */
+ notifier_list_notify(&req->cancel_notifiers, req);
+ scsi_req_unref(req);
+}
+
void scsi_req_complete(SCSIRequest *req, int status)
{
- assert(req->status == -1);
+ assert(req->status == -1 && req->host_status == -1);
req->status = status;
+ req->host_status = SCSI_HOST_OK;
assert(req->sense_len <= sizeof(req->sense));
if (status == GOOD) {
@@ -1646,7 +1675,7 @@ static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
QTAILQ_FOREACH(req, &s->requests, next) {
assert(!req->io_canceled);
- assert(req->status == -1);
+ assert(req->status == -1 && req->host_status == -1);
assert(req->enqueued);
qemu_put_sbyte(f, req->retry ? 1 : 2);
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index a5a58d7db3..bd7103cd0e 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -77,7 +77,6 @@ typedef struct SCSIDiskReq {
struct iovec iov;
QEMUIOVector qiov;
BlockAcctCookie acct;
- unsigned char *status;
} SCSIDiskReq;
#define SCSI_DISK_F_REMOVABLE 0
@@ -261,8 +260,6 @@ static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
if (ret < 0) {
return scsi_handle_rw_error(r, ret, acct_failed);
- } else if (r->status && *r->status) {
- return scsi_handle_rw_error(r, *r->status, acct_failed);
}
return false;
@@ -2697,8 +2694,47 @@ typedef struct SCSIBlockReq {
/* CDB passed to SG_IO. */
uint8_t cdb[16];
+ BlockCompletionFunc *cb;
+ void *cb_opaque;
} SCSIBlockReq;
+static void scsi_block_sgio_complete(void *opaque, int ret)
+{
+ SCSIBlockReq *req = (SCSIBlockReq *)opaque;
+ SCSIDiskReq *r = &req->req;
+ SCSIDevice *s = r->req.dev;
+ sg_io_hdr_t *io_hdr = &req->io_header;
+
+ if (ret == 0) {
+ if (io_hdr->host_status != SCSI_HOST_OK) {
+ scsi_req_complete_failed(&r->req, io_hdr->host_status);
+ scsi_req_unref(&r->req);
+ return;
+ }
+
+ if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
+ ret = BUSY;
+ } else {
+ ret = io_hdr->status;
+ }
+
+ if (ret > 0) {
+ aio_context_acquire(blk_get_aio_context(s->conf.blk));
+ if (scsi_handle_rw_error(r, ret, true)) {
+ aio_context_release(blk_get_aio_context(s->conf.blk));
+ scsi_req_unref(&r->req);
+ return;
+ }
+ aio_context_release(blk_get_aio_context(s->conf.blk));
+
+ /* Ignore error. */
+ ret = 0;
+ }
+ }
+
+ req->cb(req->cb_opaque, ret);
+}
+
static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
int64_t offset, QEMUIOVector *iov,
int direction,
@@ -2777,9 +2813,11 @@ static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
io_header->timeout = s->qdev.io_timeout * 1000;
io_header->usr_ptr = r;
io_header->flags |= SG_FLAG_DIRECT_IO;
+ req->cb = cb;
+ req->cb_opaque = opaque;
trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba,
nb_logical_blocks, io_header->timeout);
- aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque);
+ aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, scsi_block_sgio_complete, req);
assert(aiocb != NULL);
return aiocb;
}
@@ -2893,7 +2931,6 @@ static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
return 0;
}
- r->req.status = &r->io_header.status;
return scsi_disk_dma_command(req, buf);
}
diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c
index cf7e11cf44..98c30c5d5c 100644
--- a/hw/scsi/scsi-generic.c
+++ b/hw/scsi/scsi-generic.c
@@ -75,6 +75,7 @@ static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
{
int status;
SCSISense sense;
+ sg_io_hdr_t *io_hdr = &r->io_header;
assert(r->req.aiocb == NULL);
@@ -82,15 +83,22 @@ static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
scsi_req_cancel_complete(&r->req);
goto done;
}
- status = sg_io_sense_from_errno(-ret, &r->io_header, &sense);
- if (status == CHECK_CONDITION) {
- if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
- r->req.sense_len = r->io_header.sb_len_wr;
- } else {
+ if (ret < 0) {
+ status = scsi_sense_from_errno(-ret, &sense);
+ if (status == CHECK_CONDITION) {
scsi_req_build_sense(&r->req, sense);
}
+ } else if (io_hdr->host_status != SCSI_HOST_OK) {
+ scsi_req_complete_failed(&r->req, io_hdr->host_status);
+ goto done;
+ } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
+ status = BUSY;
+ } else {
+ status = io_hdr->status;
+ if (io_hdr->driver_status & SG_ERR_DRIVER_SENSE) {
+ r->req.sense_len = io_hdr->sb_len_wr;
+ }
}
-
trace_scsi_generic_command_complete_noio(r, r->req.tag, status);
scsi_req_complete(&r->req, status);
@@ -288,7 +296,10 @@ static void scsi_read_complete(void * opaque, int ret)
}
}
- if (len == 0) {
+ if (r->io_header.host_status != SCSI_HOST_OK ||
+ (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT) ||
+ r->io_header.status != GOOD ||
+ len == 0) {
scsi_command_complete_noio(r, 0);
goto done;
}
diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events
index 9788661bfd..1c331fb189 100644
--- a/hw/scsi/trace-events
+++ b/hw/scsi/trace-events
@@ -159,8 +159,12 @@ esp_error_unhandled_command(uint32_t val) "unhandled command (0x%2.2x)"
esp_error_invalid_write(uint32_t val, uint32_t addr) "invalid write of 0x%02x at [0x%x]"
esp_raise_irq(void) "Raise IRQ"
esp_lower_irq(void) "Lower IRQ"
+esp_raise_drq(void) "Raise DREQ"
+esp_lower_drq(void) "Lower DREQ"
esp_dma_enable(void) "Raise enable"
esp_dma_disable(void) "Lower enable"
+esp_pdma_read(int size) "pDMA read %u bytes"
+esp_pdma_write(int size) "pDMA write %u bytes"
esp_get_cmd(uint32_t dmalen, int target) "len %d target %d"
esp_do_busid_cmd(uint8_t busid) "busid 0x%x"
esp_handle_satn_stop(uint32_t cmdlen) "cmdlen %d"
@@ -189,6 +193,7 @@ esp_mem_writeb_cmd_selatn(uint32_t val) "Select with ATN (0x%2.2x)"
esp_mem_writeb_cmd_selatns(uint32_t val) "Select with ATN & stop (0x%2.2x)"
esp_mem_writeb_cmd_ensel(uint32_t val) "Enable selection (0x%2.2x)"
esp_mem_writeb_cmd_dissel(uint32_t val) "Disable selection (0x%2.2x)"
+esp_mem_writeb_cmd_ti(uint32_t val) "Transfer Information (0x%2.2x)"
# esp-pci.c
esp_pci_error_invalid_dma_direction(void) "invalid DMA transfer direction"
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 358c0e70b0..6d80730287 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -500,6 +500,51 @@ static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
virtio_scsi_complete_req(req);
}
+static void virtio_scsi_command_failed(SCSIRequest *r)
+{
+ VirtIOSCSIReq *req = r->hba_private;
+
+ if (r->io_canceled) {
+ return;
+ }
+
+ req->resp.cmd.status = GOOD;
+ switch (r->host_status) {
+ case SCSI_HOST_NO_LUN:
+ req->resp.cmd.response = VIRTIO_SCSI_S_INCORRECT_LUN;
+ break;
+ case SCSI_HOST_BUSY:
+ req->resp.cmd.response = VIRTIO_SCSI_S_BUSY;
+ break;
+ case SCSI_HOST_TIME_OUT:
+ case SCSI_HOST_ABORTED:
+ req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
+ break;
+ case SCSI_HOST_BAD_RESPONSE:
+ req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ break;
+ case SCSI_HOST_RESET:
+ req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
+ break;
+ case SCSI_HOST_TRANSPORT_DISRUPTED:
+ req->resp.cmd.response = VIRTIO_SCSI_S_TRANSPORT_FAILURE;
+ break;
+ case SCSI_HOST_TARGET_FAILURE:
+ req->resp.cmd.response = VIRTIO_SCSI_S_TARGET_FAILURE;
+ break;
+ case SCSI_HOST_RESERVATION_ERROR:
+ req->resp.cmd.response = VIRTIO_SCSI_S_NEXUS_FAILURE;
+ break;
+ case SCSI_HOST_ALLOCATION_FAILURE:
+ case SCSI_HOST_MEDIUM_ERROR:
+ case SCSI_HOST_ERROR:
+ default:
+ req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
+ break;
+ }
+ virtio_scsi_complete_cmd_req(req);
+}
+
static void virtio_scsi_command_complete(SCSIRequest *r, size_t resid)
{
VirtIOSCSIReq *req = r->hba_private;
@@ -908,6 +953,7 @@ static struct SCSIBusInfo virtio_scsi_scsi_info = {
.max_lun = VIRTIO_SCSI_MAX_LUN,
.complete = virtio_scsi_command_complete,
+ .fail = virtio_scsi_command_failed,
.cancel = virtio_scsi_request_cancelled,
.change = virtio_scsi_change,
.parse_cdb = virtio_scsi_parse_cdb,
diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c
index 0da378ed50..1f30cb020a 100644
--- a/hw/scsi/vmw_pvscsi.c
+++ b/hw/scsi/vmw_pvscsi.c
@@ -511,6 +511,44 @@ pvscsi_write_sense(PVSCSIRequest *r, uint8_t *sense, int len)
}
static void
+pvscsi_command_failed(SCSIRequest *req)
+{
+ PVSCSIRequest *pvscsi_req = req->hba_private;
+ PVSCSIState *s;
+
+ if (!pvscsi_req) {
+ trace_pvscsi_command_complete_not_found(req->tag);
+ return;
+ }
+ s = pvscsi_req->dev;
+
+ switch (req->host_status) {
+ case SCSI_HOST_NO_LUN:
+ pvscsi_req->cmp.hostStatus = BTSTAT_LUNMISMATCH;
+ break;
+ case SCSI_HOST_BUSY:
+ pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE;
+ break;
+ case SCSI_HOST_TIME_OUT:
+ case SCSI_HOST_ABORTED:
+ pvscsi_req->cmp.hostStatus = BTSTAT_SENTRST;
+ break;
+ case SCSI_HOST_BAD_RESPONSE:
+ pvscsi_req->cmp.hostStatus = BTSTAT_SELTIMEO;
+ break;
+ case SCSI_HOST_RESET:
+ pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET;
+ break;
+ default:
+ pvscsi_req->cmp.hostStatus = BTSTAT_HASOFTWARE;
+ break;
+ }
+ pvscsi_req->cmp.scsiStatus = GOOD;
+ qemu_sglist_destroy(&pvscsi_req->sgl);
+ pvscsi_complete_request(s, pvscsi_req);
+}
+
+static void
pvscsi_command_complete(SCSIRequest *req, size_t resid)
{
PVSCSIRequest *pvscsi_req = req->hba_private;
@@ -1103,6 +1141,7 @@ static const struct SCSIBusInfo pvscsi_scsi_info = {
.get_sg_list = pvscsi_get_sg_list,
.complete = pvscsi_command_complete,
.cancel = pvscsi_request_cancelled,
+ .fail = pvscsi_command_failed,
};
static void
diff --git a/hw/sh4/Kconfig b/hw/sh4/Kconfig
index 4cbce3a0ed..ab733a3f76 100644
--- a/hw/sh4/Kconfig
+++ b/hw/sh4/Kconfig
@@ -9,16 +9,16 @@ config R2D
select USB_OHCI_PCI
select PCI
select SM501
- select SH4
+ select SH7750
+ select SH_PCI
config SHIX
bool
select SH7750
- select SH4
+ select TC58128
config SH7750
bool
-
-config SH4
- bool
- select PTIMER
+ select SH_INTC
+ select SH_SCI
+ select SH_TIMER
diff --git a/hw/sh4/meson.build b/hw/sh4/meson.build
index 303c0f4287..424d5674de 100644
--- a/hw/sh4/meson.build
+++ b/hw/sh4/meson.build
@@ -2,7 +2,6 @@ sh4_ss = ss.source_set()
sh4_ss.add(files(
'sh7750.c',
'sh7750_regnames.c',
- 'sh_pci.c'
))
sh4_ss.add(when: 'CONFIG_R2D', if_true: files('r2d.c'))
sh4_ss.add(when: 'CONFIG_SHIX', if_true: files('shix.c'))
diff --git a/hw/sh4/sh7750_regs.h b/hw/sh4/sh7750_regs.h
index 3e4554af31..ab073dadc7 100644
--- a/hw/sh4/sh7750_regs.h
+++ b/hw/sh4/sh7750_regs.h
@@ -10,8 +10,28 @@
* Victor V. Vengerov <vvv@oktet.ru>
*
* The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.com/license/LICENSE.
+ * found in this file hereafter or at http://www.rtems.com/license/LICENSE.
+ *
+ * LICENSE INFORMATION
+ *
+ * RTEMS is free software; you can redistribute it and/or modify it under
+ * terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version. RTEMS is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details. You should have received
+ * a copy of the GNU General Public License along with RTEMS; see
+ * file COPYING. If not, write to the Free Software Foundation, 675
+ * Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * As a special exception, including RTEMS header files in a file,
+ * instantiating RTEMS generics or templates, or linking other files
+ * with RTEMS objects to produce an executable application, does not
+ * by itself cause the resulting executable application to be covered
+ * by the GNU General Public License. This exception does not
+ * however invalidate any other reasons why the executable file might be
+ * covered by the GNU Public License.
*
* @(#) sh7750_regs.h,v 1.2.4.1 2003/09/04 18:46:00 joel Exp
*/
diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c
index 38ca1e33c7..312e2afaf9 100644
--- a/hw/sparc/sun4m.c
+++ b/hw/sparc/sun4m.c
@@ -334,7 +334,7 @@ static void *sparc32_dma_init(hwaddr dma_base,
OBJECT(dma), "espdma"));
sysbus_connect_irq(SYS_BUS_DEVICE(espdma), 0, espdma_irq);
- esp = ESP(object_resolve_path_component(OBJECT(espdma), "esp"));
+ esp = SYSBUS_ESP(object_resolve_path_component(OBJECT(espdma), "esp"));
ledma = SPARC32_LEDMA_DEVICE(object_resolve_path_component(
OBJECT(dma), "ledma"));
diff --git a/hw/timer/Kconfig b/hw/timer/Kconfig
index 8749edfb6a..18936ef55b 100644
--- a/hw/timer/Kconfig
+++ b/hw/timer/Kconfig
@@ -36,6 +36,10 @@ config CMSDK_APB_DUALTIMER
bool
select PTIMER
+config SH_TIMER
+ bool
+ select PTIMER
+
config RENESAS_TMR
bool
diff --git a/hw/timer/meson.build b/hw/timer/meson.build
index be343f68fe..26c2701fd7 100644
--- a/hw/timer/meson.build
+++ b/hw/timer/meson.build
@@ -30,7 +30,7 @@ softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_synctimer.c'))
softmmu_ss.add(when: 'CONFIG_PUV3', if_true: files('puv3_ost.c'))
softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_timer.c'))
softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_systmr.c'))
-softmmu_ss.add(when: 'CONFIG_SH4', if_true: files('sh_timer.c'))
+softmmu_ss.add(when: 'CONFIG_SH_TIMER', if_true: files('sh_timer.c'))
softmmu_ss.add(when: 'CONFIG_SLAVIO', if_true: files('slavio_timer.c'))
softmmu_ss.add(when: 'CONFIG_STM32F2XX_TIMER', if_true: files('stm32f2xx_timer.c'))
softmmu_ss.add(when: 'CONFIG_XILINX', if_true: files('xilinx_timer.c'))
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index b7b3c0ef12..6b036cae8f 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -448,9 +448,6 @@ struct TranslationBlock {
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
target_ulong cs_base; /* CS base for this block */
uint32_t flags; /* flags defining in which context the code was generated */
- uint16_t size; /* size of target code for this block (1 <=
- size <= TARGET_PAGE_SIZE) */
- uint16_t icount;
uint32_t cflags; /* compile flags */
#define CF_COUNT_MASK 0x00007fff
#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
@@ -460,12 +457,18 @@ struct TranslationBlock {
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
#define CF_CLUSTER_SHIFT 24
-/* cflags' mask for hashing/comparison, basically ignore CF_INVALID */
-#define CF_HASH_MASK (~CF_INVALID)
/* Per-vCPU dynamic tracing state used to generate this TB */
uint32_t trace_vcpu_dstate;
+ /*
+ * Above fields used for comparing
+ */
+
+ /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
+ uint16_t size;
+ uint16_t icount;
+
struct tb_tc tc;
/* first and second physical page containing code. The lower bit
@@ -510,8 +513,6 @@ struct TranslationBlock {
uintptr_t jmp_dest[2];
};
-extern bool parallel_cpus;
-
/* Hide the qatomic_read to make code a little easier on the eyes */
static inline uint32_t tb_cflags(const TranslationBlock *tb)
{
@@ -519,10 +520,9 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
}
/* current cflags for hashing/comparison */
-static inline uint32_t curr_cflags(void)
+static inline uint32_t curr_cflags(CPUState *cpu)
{
- return (parallel_cpus ? CF_PARALLEL : 0)
- | (icount_enabled() ? CF_USE_ICOUNT : 0);
+ return cpu->tcg_cflags;
}
/* TranslationBlock invalidate API */
@@ -536,7 +536,7 @@ void tb_flush(CPUState *cpu);
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
target_ulong cs_base, uint32_t flags,
- uint32_t cf_mask);
+ uint32_t cflags);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
/* GETPC is the true target of the return instruction that we'll execute. */
diff --git a/include/exec/poison.h b/include/exec/poison.h
index d7ae1f23e7..6bb86f6c2f 100644
--- a/include/exec/poison.h
+++ b/include/exec/poison.h
@@ -10,6 +10,7 @@
#pragma GCC poison TARGET_ALPHA
#pragma GCC poison TARGET_ARM
#pragma GCC poison TARGET_CRIS
+#pragma GCC poison TARGET_HEXAGON
#pragma GCC poison TARGET_HPPA
#pragma GCC poison TARGET_LM32
#pragma GCC poison TARGET_M68K
@@ -73,6 +74,7 @@
#pragma GCC poison CONFIG_CRIS_DIS
#pragma GCC poison CONFIG_HPPA_DIS
#pragma GCC poison CONFIG_I386_DIS
+#pragma GCC poison CONFIG_HEXAGON_DIS
#pragma GCC poison CONFIG_LM32_DIS
#pragma GCC poison CONFIG_M68K_DIS
#pragma GCC poison CONFIG_MICROBLAZE_DIS
diff --git a/include/exec/tb-lookup.h b/include/exec/tb-lookup.h
index 9cf475bb03..29d61ceb34 100644
--- a/include/exec/tb-lookup.h
+++ b/include/exec/tb-lookup.h
@@ -17,30 +17,28 @@
#include "exec/tb-hash.h"
/* Might cause an exception, so have a longjmp destination ready */
-static inline TranslationBlock *
-tb_lookup__cpu_state(CPUState *cpu, target_ulong *pc, target_ulong *cs_base,
- uint32_t *flags, uint32_t cf_mask)
+static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
+ target_ulong cs_base,
+ uint32_t flags, uint32_t cflags)
{
- CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
uint32_t hash;
- cpu_get_tb_cpu_state(env, pc, cs_base, flags);
- hash = tb_jmp_cache_hash_func(*pc);
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
+ /* we should never be trying to look up an INVALID tb */
+ tcg_debug_assert(!(cflags & CF_INVALID));
- cf_mask &= ~CF_CLUSTER_MASK;
- cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT;
+ hash = tb_jmp_cache_hash_func(pc);
+ tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
if (likely(tb &&
- tb->pc == *pc &&
- tb->cs_base == *cs_base &&
- tb->flags == *flags &&
+ tb->pc == pc &&
+ tb->cs_base == cs_base &&
+ tb->flags == flags &&
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
- (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) {
+ tb_cflags(tb) == cflags)) {
return tb;
}
- tb = tb_htable_lookup(cpu, *pc, *cs_base, *flags, cf_mask);
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
return NULL;
}
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index c005d3dc2d..c68bc3ba8a 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -282,6 +282,7 @@ struct qemu_work_item;
* to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
* be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
* QOM parent.
+ * @tcg_cflags: Pre-computed cflags for this cpu.
* @nr_cores: Number of cores within this CPU package.
* @nr_threads: Number of threads within this CPU.
* @running: #true if CPU is currently running (lockless).
@@ -412,6 +413,7 @@ struct CPUState {
/* TODO Move common fields from CPUArchState here. */
int cpu_index;
int cluster_index;
+ uint32_t tcg_cflags;
uint32_t halted;
uint32_t can_do_io;
int32_t exception_index;
diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h
index 8e8436831d..78409ab34a 100644
--- a/include/hw/elf_ops.h
+++ b/include/hw/elf_ops.h
@@ -598,9 +598,7 @@ static int glue(load_elf, SZ)(const char *name, int fd,
nhdr = glue(get_elf_note_type, SZ)(nhdr, file_size, ph->p_align,
*(uint64_t *)translate_opaque);
if (nhdr != NULL) {
- bool is64 =
- sizeof(struct elf_note) == sizeof(struct elf64_note);
- elf_note_fn((void *)nhdr, (void *)&ph->p_align, is64);
+ elf_note_fn((void *)nhdr, (void *)&ph->p_align, SZ == 64);
}
data = NULL;
}
diff --git a/include/hw/scsi/esp.h b/include/hw/scsi/esp.h
index d8a6263c13..95088490aa 100644
--- a/include/hw/scsi/esp.h
+++ b/include/hw/scsi/esp.h
@@ -3,6 +3,7 @@
#include "hw/scsi/scsi.h"
#include "hw/sysbus.h"
+#include "qemu/fifo8.h"
#include "qom/object.h"
/* esp.c */
@@ -10,19 +11,17 @@
typedef void (*ESPDMAMemoryReadWriteFunc)(void *opaque, uint8_t *buf, int len);
#define ESP_REGS 16
-#define TI_BUFSZ 16
-#define ESP_CMDBUF_SZ 32
+#define ESP_FIFO_SZ 16
+#define ESP_CMDFIFO_SZ 32
typedef struct ESPState ESPState;
-enum pdma_origin_id {
- PDMA,
- TI,
- CMD,
- ASYNC,
-};
+#define TYPE_ESP "esp"
+OBJECT_DECLARE_SIMPLE_TYPE(ESPState, ESP)
struct ESPState {
+ DeviceState parent_obj;
+
uint8_t rregs[ESP_REGS];
uint8_t wregs[ESP_REGS];
qemu_irq irq;
@@ -30,24 +29,18 @@ struct ESPState {
uint8_t chip_id;
bool tchi_written;
int32_t ti_size;
- uint32_t ti_rptr, ti_wptr;
uint32_t status;
- uint32_t deferred_status;
- bool deferred_complete;
uint32_t dma;
- uint8_t ti_buf[TI_BUFSZ];
+ Fifo8 fifo;
SCSIBus bus;
SCSIDevice *current_dev;
SCSIRequest *current_req;
- uint8_t cmdbuf[ESP_CMDBUF_SZ];
- uint32_t cmdlen;
+ Fifo8 cmdfifo;
+ uint8_t cmdfifo_cdb_offset;
uint32_t do_cmd;
- /* The amount of data left in the current DMA transfer. */
- uint32_t dma_left;
- /* The size of the current DMA transfer. Zero if no transfer is in
- progress. */
- uint32_t dma_counter;
+ bool data_in_ready;
+ uint8_t ti_cmd;
int dma_enabled;
uint32_t async_len;
@@ -57,16 +50,22 @@ struct ESPState {
ESPDMAMemoryReadWriteFunc dma_memory_write;
void *dma_opaque;
void (*dma_cb)(ESPState *s);
- uint8_t pdma_buf[32];
- int pdma_origin;
- uint32_t pdma_len;
- uint32_t pdma_start;
- uint32_t pdma_cur;
void (*pdma_cb)(ESPState *s);
+
+ uint8_t mig_version_id;
+
+ /* Legacy fields for vmstate_esp version < 5 */
+ uint32_t mig_dma_left;
+ uint32_t mig_deferred_status;
+ bool mig_deferred_complete;
+ uint32_t mig_ti_rptr, mig_ti_wptr;
+ uint8_t mig_ti_buf[ESP_FIFO_SZ];
+ uint8_t mig_cmdbuf[ESP_CMDFIFO_SZ];
+ uint32_t mig_cmdlen;
};
-#define TYPE_ESP "esp"
-OBJECT_DECLARE_SIMPLE_TYPE(SysBusESPState, ESP)
+#define TYPE_SYSBUS_ESP "sysbus-esp"
+OBJECT_DECLARE_SIMPLE_TYPE(SysBusESPState, SYSBUS_ESP)
struct SysBusESPState {
/*< private >*/
@@ -142,6 +141,7 @@ struct SysBusESPState {
#define INTR_RST 0x80
#define SEQ_0 0x0
+#define SEQ_MO 0x1
#define SEQ_CD 0x4
#define CFG1_RESREPT 0x40
diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h
index 5d992e6e1d..0b726bc78c 100644
--- a/include/hw/scsi/scsi.h
+++ b/include/hw/scsi/scsi.h
@@ -27,7 +27,8 @@ struct SCSIRequest {
uint32_t refcount;
uint32_t tag;
uint32_t lun;
- uint32_t status;
+ int16_t status;
+ int16_t host_status;
void *hba_private;
size_t resid;
SCSICommand cmd;
@@ -123,6 +124,7 @@ struct SCSIBusInfo {
int (*parse_cdb)(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
void *hba_private);
void (*transfer_data)(SCSIRequest *req, uint32_t arg);
+ void (*fail)(SCSIRequest *req);
void (*complete)(SCSIRequest *req, size_t resid);
void (*cancel)(SCSIRequest *req);
void (*change)(SCSIBus *bus, SCSIDevice *dev, SCSISense sense);
@@ -177,6 +179,7 @@ void scsi_req_print(SCSIRequest *req);
void scsi_req_continue(SCSIRequest *req);
void scsi_req_data(SCSIRequest *req, int len);
void scsi_req_complete(SCSIRequest *req, int status);
+void scsi_req_complete_failed(SCSIRequest *req, int host_status);
uint8_t *scsi_req_get_buf(SCSIRequest *req);
int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len);
void scsi_req_cancel_complete(SCSIRequest *req);
diff --git a/include/hw/sh4/sh.h b/include/hw/sh4/sh.h
index 93f464bf4c..becb596979 100644
--- a/include/hw/sh4/sh.h
+++ b/include/hw/sh4/sh.h
@@ -1,6 +1,31 @@
-#ifndef QEMU_SH_H
-#define QEMU_SH_H
-/* Definitions for SH board emulation. */
+/*
+ * Definitions for SH board emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#ifndef QEMU_HW_SH_H
+#define QEMU_HW_SH_H
#include "hw/sh4/sh_intc.h"
#include "target/sh4/cpu-qom.h"
diff --git a/include/qemu/config-file.h b/include/qemu/config-file.h
index 29226107bd..8d3e53ae4d 100644
--- a/include/qemu/config-file.h
+++ b/include/qemu/config-file.h
@@ -11,9 +11,10 @@ void qemu_add_drive_opts(QemuOptsList *list);
int qemu_global_option(const char *str);
void qemu_config_write(FILE *fp);
-int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname);
+int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname,
+ Error **errp);
-int qemu_read_config_file(const char *filename);
+int qemu_read_config_file(const char *filename, Error **errp);
/* Parse QDict options as a replacement for a config file (allowing multiple
enumerated (0..(n-1)) configuration "sections") */
diff --git a/include/scsi/utils.h b/include/scsi/utils.h
index ff7c7091b6..d5c8efa16e 100644
--- a/include/scsi/utils.h
+++ b/include/scsi/utils.h
@@ -16,6 +16,22 @@ enum SCSIXferMode {
SCSI_XFER_TO_DEV, /* WRITE, MODE_SELECT, ... */
};
+enum SCSIHostStatus {
+ SCSI_HOST_OK,
+ SCSI_HOST_NO_LUN,
+ SCSI_HOST_BUSY,
+ SCSI_HOST_TIME_OUT,
+ SCSI_HOST_BAD_RESPONSE,
+ SCSI_HOST_ABORTED,
+ SCSI_HOST_ERROR = 0x07,
+ SCSI_HOST_RESET = 0x08,
+ SCSI_HOST_TRANSPORT_DISRUPTED = 0xe,
+ SCSI_HOST_TARGET_FAILURE = 0x10,
+ SCSI_HOST_RESERVATION_ERROR = 0x11,
+ SCSI_HOST_ALLOCATION_FAILURE = 0x12,
+ SCSI_HOST_MEDIUM_ERROR = 0x13,
+};
+
typedef struct SCSICommand {
uint8_t buf[SCSI_CMD_BUF_SIZE];
int len;
@@ -123,18 +139,9 @@ int scsi_cdb_length(uint8_t *buf);
#ifdef CONFIG_LINUX
#define SG_ERR_DRIVER_TIMEOUT 0x06
#define SG_ERR_DRIVER_SENSE 0x08
-
-#define SG_ERR_DID_OK 0x00
-#define SG_ERR_DID_NO_CONNECT 0x01
-#define SG_ERR_DID_BUS_BUSY 0x02
-#define SG_ERR_DID_TIME_OUT 0x03
-
-#define SG_ERR_DRIVER_SENSE 0x08
-
-int sg_io_sense_from_errno(int errno_value, struct sg_io_hdr *io_hdr,
- SCSISense *sense);
#endif
int scsi_sense_from_errno(int errno_value, SCSISense *sense);
+int scsi_sense_from_host_status(uint8_t host_status, SCSISense *sense);
#endif
diff --git a/linux-user/main.c b/linux-user/main.c
index 81f48ff54e..4f4746dce8 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -205,6 +205,7 @@ CPUArchState *cpu_copy(CPUArchState *env)
/* Reset non arch specific state */
cpu_reset(new_cpu);
+ new_cpu->tcg_cflags = cpu->tcg_cflags;
memcpy(new_env, env, sizeof(CPUArchState));
/* Clone all break/watchpoints.
diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c
index cc89a48ff8..29c1ee30e6 100644
--- a/linux-user/sh4/signal.c
+++ b/linux-user/sh4/signal.c
@@ -82,9 +82,11 @@ static abi_ulong get_sigframe(struct target_sigaction *ka,
return (sp - frame_size) & -8ul;
}
-/* Notice when we're in the middle of a gUSA region and reset.
- Note that this will only occur for !parallel_cpus, as we will
- translate such sequences differently in a parallel context. */
+/*
+ * Notice when we're in the middle of a gUSA region and reset.
+ * Note that this will only occur when #CF_PARALLEL is unset, as we
+ * will translate such sequences differently in a parallel context.
+ */
static void unwind_gusa(CPUSH4State *regs)
{
/* If the stack pointer is sufficiently negative, and we haven't
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 389ec09764..9522f603aa 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -6481,6 +6481,16 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
/* Grab a mutex so that thread setup appears atomic. */
pthread_mutex_lock(&clone_lock);
+ /*
+ * If this is our first additional thread, we need to ensure we
+ * generate code for parallel execution and flush old translations.
+ * Do this now so that the copy gets CF_PARALLEL too.
+ */
+ if (!(cpu->tcg_cflags & CF_PARALLEL)) {
+ cpu->tcg_cflags |= CF_PARALLEL;
+ tb_flush(cpu);
+ }
+
/* we create a new CPU instance. */
new_env = cpu_copy(env);
/* Init regs that differ from the parent. */
@@ -6521,14 +6531,6 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
cpu->random_seed = qemu_guest_random_seed_thread_part1();
- /* If this is our first additional thread, we need to ensure we
- * generate code for parallel execution and flush old translations.
- */
- if (!parallel_cpus) {
- parallel_cpus = true;
- tb_flush(cpu);
- }
-
ret = pthread_create(&info.thread, &attr, clone_func, &info);
/* TODO: Free new CPU state if thread creation failed. */
diff --git a/meson.build b/meson.build
index 81d760d6e8..adeec153d9 100644
--- a/meson.build
+++ b/meson.build
@@ -1574,6 +1574,18 @@ if have_system
endif
endif
+# For CFI, we need to compile slirp as a static library together with qemu.
+# This is because we register slirp functions as callbacks for QEMU Timers.
+# When using a system-wide shared libslirp, the type information for the
+# callback is missing and the timer call produces a false positive with CFI.
+#
+# Now that slirp_opt has been defined, check if the selected slirp is compatible
+# with control-flow integrity.
+if get_option('cfi') and slirp_opt == 'system'
+ error('Control-Flow Integrity is not compatible with system-wide slirp.' \
+ + ' Please configure with --enable-slirp=git')
+endif
+
fdt = not_found
fdt_opt = get_option('fdt')
if have_system
diff --git a/qemu-options.hx b/qemu-options.hx
index 252db9357c..90801286c6 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -3033,7 +3033,7 @@ DEFHEADING(Character device options:)
DEF("chardev", HAS_ARG, QEMU_OPTION_chardev,
"-chardev help\n"
"-chardev null,id=id[,mux=on|off][,logfile=PATH][,logappend=on|off]\n"
- "-chardev socket,id=id[,host=host],port=port[,to=to][,ipv4=on|off][,ipv6=on|off][,delay=on|off][,reconnect=seconds]\n"
+ "-chardev socket,id=id[,host=host],port=port[,to=to][,ipv4=on|off][,ipv6=on|off][,nodelay=on|off][,reconnect=seconds]\n"
" [,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds][,mux=on|off]\n"
" [,logfile=PATH][,logappend=on|off][,tls-creds=ID][,tls-authz=ID] (tcp)\n"
"-chardev socket,id=id,path=path[,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds]\n"
@@ -3184,7 +3184,7 @@ The available backends are:
TCP and unix socket options are given below:
- ``TCP options: port=port[,host=host][,to=to][,ipv4=on|off][,ipv6=on|off][,delay=on|off]``
+ ``TCP options: port=port[,host=host][,to=to][,ipv4=on|off][,ipv6=on|off][,nodelay=on|off]``
``host`` for a listening socket specifies the local address to
be bound. For a connecting socket species the remote host to
connect to. ``host`` is optional for listening sockets. If not
@@ -3204,7 +3204,7 @@ The available backends are:
or IPv6 must be used. If neither is specified the socket may
use either protocol.
- ``delay=on|off`` disables the Nagle algorithm.
+ ``nodelay=on|off`` disables the Nagle algorithm.
``unix options: path=path[,abstract=on|off][,tight=on|off]``
``path`` specifies the local path of the unix socket. ``path``
@@ -3593,13 +3593,13 @@ SRST
``telnet options:``
localhost 5555
- ``tcp:[host]:port[,server=on|off][,wait=on|off][,delay=on|off][,reconnect=seconds]``
+ ``tcp:[host]:port[,server=on|off][,wait=on|off][,nodelay=on|off][,reconnect=seconds]``
The TCP Net Console has two modes of operation. It can send the
serial I/O to a location or wait for a connection from a
location. By default the TCP Net Console is sent to host at the
port. If you use the ``server=on`` option QEMU will wait for a client
socket application to connect to the port before continuing,
- unless the ``wait=on|off`` option was specified. The ``delay=on|off``
+ unless the ``wait=on|off`` option was specified. The ``nodelay=on|off``
option disables the Nagle buffering algorithm. The ``reconnect=on``
option only applies if ``server=no`` is set, if the connection goes
down it will attempt to reconnect at the given interval. If host
@@ -3616,7 +3616,7 @@ SRST
``Example to not wait and listen on ip 192.168.0.100 port 4444``
-serial tcp:192.168.0.100:4444,server=on,wait=off
- ``telnet:host:port[,server=on|off][,wait=on|off][,delay=on|off]``
+ ``telnet:host:port[,server=on|off][,wait=on|off][,nodelay=on|off]``
The telnet protocol is used instead of raw tcp sockets. The
options work the same as if you had specified ``-serial tcp``.
The difference is that the port acts like a telnet server or
@@ -3626,7 +3626,7 @@ SRST
you do it with Control-] and then type "send break" followed by
pressing the enter key.
- ``websocket:host:port,server=on[,wait=on|off][,delay=on|off]``
+ ``websocket:host:port,server=on[,wait=on|off][,nodelay=on|off]``
The WebSocket protocol is used instead of raw tcp socket. The
port acts as a WebSocket server. Client mode is not supported.
diff --git a/qga/vss-win32/meson.build b/qga/vss-win32/meson.build
index 780c461432..90825edef3 100644
--- a/qga/vss-win32/meson.build
+++ b/qga/vss-win32/meson.build
@@ -1,5 +1,5 @@
if add_languages('cpp', required: false)
- glib_static = dependency('glib-2.0', static: true)
+ glib_dynamic = dependency('glib-2.0', static: false)
link_args = cc.get_supported_link_arguments(['-fstack-protector-all', '-fstack-protector-strong',
'-Wl,--add-stdcall-alias', '-Wl,--enable-stdcall-fixup'])
@@ -8,7 +8,7 @@ if add_languages('cpp', required: false)
cpp_args: ['-Wno-unknown-pragmas', '-Wno-delete-non-virtual-dtor', '-Wno-non-virtual-dtor'],
link_args: link_args,
vs_module_defs: 'qga-vss.def',
- dependencies: [glib_static, socket,
+ dependencies: [glib_dynamic, socket,
cc.find_library('ole32'),
cc.find_library('oleaut32'),
cc.find_library('shlwapi'),
diff --git a/qom/object_interfaces.c b/qom/object_interfaces.c
index 1e9ad6f08a..7661270b98 100644
--- a/qom/object_interfaces.c
+++ b/qom/object_interfaces.c
@@ -8,6 +8,7 @@
#include "qapi/qobject-input-visitor.h"
#include "qom/object_interfaces.h"
#include "qemu/help_option.h"
+#include "qemu/id.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include "qapi/opts-visitor.h"
@@ -41,11 +42,19 @@ Object *user_creatable_add_type(const char *type, const char *id,
const QDict *qdict,
Visitor *v, Error **errp)
{
+ ERRP_GUARD();
Object *obj;
ObjectClass *klass;
const QDictEntry *e;
Error *local_err = NULL;
+ if (id != NULL && !id_wellformed(id)) {
+ error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "id", "an identifier");
+ error_append_hint(errp, "Identifiers consist of letters, digits, "
+ "'-', '.', '_', starting with a letter.\n");
+ return NULL;
+ }
+
klass = object_class_by_name(type);
if (!klass) {
error_setg(errp, "invalid object type: %s", type);
diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c
index 2733d92f2d..7b9389b47b 100644
--- a/scsi/qemu-pr-helper.c
+++ b/scsi/qemu-pr-helper.c
@@ -149,19 +149,29 @@ static int do_sgio_worker(void *opaque)
io_hdr.dxferp = (char *)data->buf;
io_hdr.dxfer_len = data->sz;
ret = ioctl(data->fd, SG_IO, &io_hdr);
- status = sg_io_sense_from_errno(ret < 0 ? errno : 0, &io_hdr,
- &sense_code);
+
+ if (ret < 0) {
+ status = scsi_sense_from_errno(errno, &sense_code);
+ if (status == CHECK_CONDITION) {
+ scsi_build_sense(data->sense, sense_code);
+ }
+ } else if (io_hdr.host_status != SCSI_HOST_OK) {
+ status = scsi_sense_from_host_status(io_hdr.host_status, &sense_code);
+ if (status == CHECK_CONDITION) {
+ scsi_build_sense(data->sense, sense_code);
+ }
+ } else if (io_hdr.driver_status & SG_ERR_DRIVER_TIMEOUT) {
+ status = BUSY;
+ } else {
+ status = io_hdr.status;
+ }
+
if (status == GOOD) {
data->sz -= io_hdr.resid;
} else {
data->sz = 0;
}
- if (status == CHECK_CONDITION &&
- !(io_hdr.driver_status & SG_ERR_DRIVER_SENSE)) {
- scsi_build_sense(data->sense, sense_code);
- }
-
return status;
}
diff --git a/scsi/utils.c b/scsi/utils.c
index 6b56e01002..873e05aeaf 100644
--- a/scsi/utils.c
+++ b/scsi/utils.c
@@ -257,6 +257,21 @@ const struct SCSISense sense_code_LUN_COMM_FAILURE = {
.key = ABORTED_COMMAND, .asc = 0x08, .ascq = 0x00
};
+/* Command aborted, LUN does not respond to selection */
+const struct SCSISense sense_code_LUN_NOT_RESPONDING = {
+ .key = ABORTED_COMMAND, .asc = 0x05, .ascq = 0x00
+};
+
+/* Command aborted, Command Timeout during processing */
+const struct SCSISense sense_code_COMMAND_TIMEOUT = {
+ .key = ABORTED_COMMAND, .asc = 0x2e, .ascq = 0x02
+};
+
+/* Command aborted, Commands cleared by device server */
+const struct SCSISense sense_code_COMMAND_ABORTED = {
+ .key = ABORTED_COMMAND, .asc = 0x2f, .ascq = 0x02
+};
+
/* Medium Error, Unrecovered read error */
const struct SCSISense sense_code_READ_ERROR = {
.key = MEDIUM_ERROR, .asc = 0x11, .ascq = 0x00
@@ -605,28 +620,41 @@ int scsi_sense_from_errno(int errno_value, SCSISense *sense)
}
}
-#ifdef CONFIG_LINUX
-int sg_io_sense_from_errno(int errno_value, struct sg_io_hdr *io_hdr,
- SCSISense *sense)
+int scsi_sense_from_host_status(uint8_t host_status,
+ SCSISense *sense)
{
- if (errno_value != 0) {
- return scsi_sense_from_errno(errno_value, sense);
- } else {
- if (io_hdr->host_status == SG_ERR_DID_NO_CONNECT ||
- io_hdr->host_status == SG_ERR_DID_BUS_BUSY ||
- io_hdr->host_status == SG_ERR_DID_TIME_OUT ||
- (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT)) {
- return BUSY;
- } else if (io_hdr->host_status) {
- *sense = SENSE_CODE(I_T_NEXUS_LOSS);
- return CHECK_CONDITION;
- } else if (io_hdr->status) {
- return io_hdr->status;
- } else if (io_hdr->driver_status & SG_ERR_DRIVER_SENSE) {
- return CHECK_CONDITION;
- } else {
- return GOOD;
- }
+ switch (host_status) {
+ case SCSI_HOST_NO_LUN:
+ *sense = SENSE_CODE(LUN_NOT_RESPONDING);
+ return CHECK_CONDITION;
+ case SCSI_HOST_BUSY:
+ return BUSY;
+ case SCSI_HOST_TIME_OUT:
+ *sense = SENSE_CODE(COMMAND_TIMEOUT);
+ return CHECK_CONDITION;
+ case SCSI_HOST_BAD_RESPONSE:
+ *sense = SENSE_CODE(LUN_COMM_FAILURE);
+ return CHECK_CONDITION;
+ case SCSI_HOST_ABORTED:
+ *sense = SENSE_CODE(COMMAND_ABORTED);
+ return CHECK_CONDITION;
+ case SCSI_HOST_RESET:
+ *sense = SENSE_CODE(RESET);
+ return CHECK_CONDITION;
+ case SCSI_HOST_TRANSPORT_DISRUPTED:
+ *sense = SENSE_CODE(I_T_NEXUS_LOSS);
+ return CHECK_CONDITION;
+ case SCSI_HOST_TARGET_FAILURE:
+ *sense = SENSE_CODE(TARGET_FAILURE);
+ return CHECK_CONDITION;
+ case SCSI_HOST_RESERVATION_ERROR:
+ return RESERVATION_CONFLICT;
+ case SCSI_HOST_ALLOCATION_FAILURE:
+ *sense = SENSE_CODE(SPACE_ALLOC_FAILED);
+ return CHECK_CONDITION;
+ case SCSI_HOST_MEDIUM_ERROR:
+ *sense = SENSE_CODE(READ_ERROR);
+ return CHECK_CONDITION;
}
+ return GOOD;
}
-#endif
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 19e0aa9836..7e8b0fab89 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -937,7 +937,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
cpu_loop_exit_restore(cpu, ra);
} else {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags();
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
mmap_unlock();
if (ra) {
cpu_restore_state(cpu, ra, true);
diff --git a/softmmu/vl.c b/softmmu/vl.c
index 10bd8a10a3..ff488ea3e7 100644
--- a/softmmu/vl.c
+++ b/softmmu/vl.c
@@ -2062,17 +2062,19 @@ static int global_init_func(void *opaque, QemuOpts *opts, Error **errp)
return 0;
}
-static int qemu_read_default_config_file(void)
+static void qemu_read_default_config_file(Error **errp)
{
+ ERRP_GUARD();
int ret;
g_autofree char *file = get_relocated_path(CONFIG_QEMU_CONFDIR "/qemu.conf");
- ret = qemu_read_config_file(file);
- if (ret < 0 && ret != -ENOENT) {
- return ret;
+ ret = qemu_read_config_file(file, errp);
+ if (ret < 0) {
+ if (ret == -ENOENT) {
+ error_free(*errp);
+ *errp = NULL;
+ }
}
-
- return 0;
}
static int qemu_set_option(const char *str)
@@ -2361,13 +2363,10 @@ static void qemu_process_early_options(void)
cleanup_add_fd, NULL, &error_fatal);
#endif
- if (!trace_init_backends()) {
- exit(1);
- }
- trace_init_file();
-
/* Open the logfile at this point and set the log mask if necessary. */
- qemu_set_log_filename(log_file, &error_fatal);
+ if (log_file) {
+ qemu_set_log_filename(log_file, &error_fatal);
+ }
if (log_mask) {
int mask;
mask = qemu_str_to_log_mask(log_mask);
@@ -2638,9 +2637,7 @@ void qemu_init(int argc, char **argv, char **envp)
}
if (userconfig) {
- if (qemu_read_default_config_file() < 0) {
- exit(1);
- }
+ qemu_read_default_config_file(&error_fatal);
}
/* second pass of option parsing */
@@ -3328,15 +3325,8 @@ void qemu_init(int argc, char **argv, char **envp)
qemu_plugin_opt_parse(optarg, &plugin_list);
break;
case QEMU_OPTION_readconfig:
- {
- int ret = qemu_read_config_file(optarg);
- if (ret < 0) {
- error_report("read config %s: %s", optarg,
- strerror(-ret));
- exit(1);
- }
- break;
- }
+ qemu_read_config_file(optarg, &error_fatal);
+ break;
case QEMU_OPTION_spice:
olist = qemu_find_opts_err("spice", NULL);
if (!olist) {
@@ -3475,6 +3465,19 @@ void qemu_init(int argc, char **argv, char **envp)
qemu_process_help_options();
qemu_maybe_daemonize(pid_file);
+ /*
+ * The trace backend must be initialized after daemonizing.
+ * trace_init_backends() will call st_init(), which will create the
+ * trace thread in the parent, and also register st_flush_trace_buffer()
+ * in atexit(). This function will force the parent to wait for the
+ * writeout thread to finish, which will not occur, and the parent
+ * process will be left in the host.
+ */
+ if (!trace_init_backends()) {
+ exit(1);
+ }
+ trace_init_file();
+
qemu_init_main_loop(&error_fatal);
cpu_timers_init();
diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h
index 78c4efb5cb..cfcb8173ba 100644
--- a/target/hexagon/macros.h
+++ b/target/hexagon/macros.h
@@ -459,7 +459,7 @@ static inline void gen_logical_not(TCGv dest, TCGv src)
: (fCAST##REGSTYPE##s(SRC) >> (SHAMT)))
#define fASHIFTR(SRC, SHAMT, REGSTYPE) (fCAST##REGSTYPE##s(SRC) >> (SHAMT))
#define fLSHIFTR(SRC, SHAMT, REGSTYPE) \
- (((SHAMT) >= 64) ? 0 : (fCAST##REGSTYPE##u(SRC) >> (SHAMT)))
+ (((SHAMT) >= (sizeof(SRC) * 8)) ? 0 : (fCAST##REGSTYPE##u(SRC) >> (SHAMT)))
#define fROTL(SRC, SHAMT, REGSTYPE) \
(((SHAMT) == 0) ? (SRC) : ((fCAST##REGSTYPE##u(SRC) << (SHAMT)) | \
((fCAST##REGSTYPE##u(SRC) >> \
@@ -469,7 +469,7 @@ static inline void gen_logical_not(TCGv dest, TCGv src)
((fCAST##REGSTYPE##u(SRC) << \
((sizeof(SRC) * 8) - (SHAMT))))))
#define fASHIFTL(SRC, SHAMT, REGSTYPE) \
- (((SHAMT) >= 64) ? 0 : (fCAST##REGSTYPE##s(SRC) << (SHAMT)))
+ (((SHAMT) >= (sizeof(SRC) * 8)) ? 0 : (fCAST##REGSTYPE##s(SRC) << (SHAMT)))
#ifdef QEMU_GENERATE
#define fLOAD(NUM, SIZE, SIGN, EA, DST) MEM_LOAD##SIZE##SIGN(DST, EA)
diff --git a/target/hexagon/opcodes.c b/target/hexagon/opcodes.c
index 4eef5fc40f..35d790cdd5 100644
--- a/target/hexagon/opcodes.c
+++ b/target/hexagon/opcodes.c
@@ -82,6 +82,7 @@ static void init_attribs(int tag, ...)
while ((attr = va_arg(ap, int)) != 0) {
set_bit(attr, opcode_attribs[tag]);
}
+ va_end(ap);
}
const OpcodeEncoding opcode_encodings[] = {
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index 0b5755e42b..c8d61daf68 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -4352,8 +4352,13 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
{
uint8_t int3;
- if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
- cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
+ if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0)) {
+ return -EINVAL;
+ }
+ if (int3 != 0xcc) {
+ return 0;
+ }
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
return -EINVAL;
}
return 0;
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 714e3b5641..01c4344082 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -271,17 +271,6 @@ typedef SuperHCPU ArchCPU;
#include "exec/cpu-all.h"
-/* Memory access type */
-enum {
- /* Privilege */
- ACCESS_PRIV = 0x01,
- /* Direction */
- ACCESS_WRITE = 0x02,
- /* Type of instruction */
- ACCESS_CODE = 0x10,
- ACCESS_INT = 0x20
-};
-
/* MMU control register */
#define MMUCR 0x1F000010
#define MMUCR_AT (1<<0)
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 408478ce5d..bd8e034f17 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -330,22 +330,22 @@ static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid
MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE.
*/
static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
- int *prot, target_ulong address,
- int rw, int access_type)
+ int *prot, target_ulong address,
+ MMUAccessType access_type)
{
int use_asid, n;
tlb_t *matching = NULL;
use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
- if (rw == 2) {
+ if (access_type == MMU_INST_FETCH) {
n = find_itlb_entry(env, address, use_asid);
- if (n >= 0) {
- matching = &env->itlb[n];
+ if (n >= 0) {
+ matching = &env->itlb[n];
if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
- n = MMU_ITLB_VIOLATION;
+ n = MMU_ITLB_VIOLATION;
} else {
- *prot = PAGE_EXEC;
+ *prot = PAGE_EXEC;
}
} else {
n = find_utlb_entry(env, address, use_asid);
@@ -365,17 +365,17 @@ static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
} else if (n == MMU_DTLB_MISS) {
n = MMU_ITLB_MISS;
}
- }
+ }
} else {
- n = find_utlb_entry(env, address, use_asid);
- if (n >= 0) {
- matching = &env->utlb[n];
+ n = find_utlb_entry(env, address, use_asid);
+ if (n >= 0) {
+ matching = &env->utlb[n];
if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
- n = (rw == 1) ? MMU_DTLB_VIOLATION_WRITE :
- MMU_DTLB_VIOLATION_READ;
- } else if ((rw == 1) && !(matching->pr & 1)) {
+ n = (access_type == MMU_DATA_STORE)
+ ? MMU_DTLB_VIOLATION_WRITE : MMU_DTLB_VIOLATION_READ;
+ } else if ((access_type == MMU_DATA_STORE) && !(matching->pr & 1)) {
n = MMU_DTLB_VIOLATION_WRITE;
- } else if ((rw == 1) && !matching->d) {
+ } else if ((access_type == MMU_DATA_STORE) && !matching->d) {
n = MMU_DTLB_INITIAL_WRITE;
} else {
*prot = PAGE_READ;
@@ -383,56 +383,56 @@ static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
*prot |= PAGE_WRITE;
}
}
- } else if (n == MMU_DTLB_MISS) {
- n = (rw == 1) ? MMU_DTLB_MISS_WRITE :
- MMU_DTLB_MISS_READ;
- }
+ } else if (n == MMU_DTLB_MISS) {
+ n = (access_type == MMU_DATA_STORE)
+ ? MMU_DTLB_MISS_WRITE : MMU_DTLB_MISS_READ;
+ }
}
if (n >= 0) {
- n = MMU_OK;
- *physical = ((matching->ppn << 10) & ~(matching->size - 1)) |
- (address & (matching->size - 1));
+ n = MMU_OK;
+ *physical = ((matching->ppn << 10) & ~(matching->size - 1))
+ | (address & (matching->size - 1));
}
return n;
}
static int get_physical_address(CPUSH4State * env, target_ulong * physical,
int *prot, target_ulong address,
- int rw, int access_type)
+ MMUAccessType access_type)
{
/* P1, P2 and P4 areas do not use translation */
- if ((address >= 0x80000000 && address < 0xc0000000) ||
- address >= 0xe0000000) {
+ if ((address >= 0x80000000 && address < 0xc0000000) || address >= 0xe0000000) {
if (!(env->sr & (1u << SR_MD))
- && (address < 0xe0000000 || address >= 0xe4000000)) {
- /* Unauthorized access in user mode (only store queues are available) */
+ && (address < 0xe0000000 || address >= 0xe4000000)) {
+ /* Unauthorized access in user mode (only store queues are available) */
qemu_log_mask(LOG_GUEST_ERROR, "Unauthorized access\n");
- if (rw == 0)
- return MMU_DADDR_ERROR_READ;
- else if (rw == 1)
- return MMU_DADDR_ERROR_WRITE;
- else
- return MMU_IADDR_ERROR;
- }
- if (address >= 0x80000000 && address < 0xc0000000) {
- /* Mask upper 3 bits for P1 and P2 areas */
- *physical = address & 0x1fffffff;
- } else {
- *physical = address;
- }
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return MMU_OK;
+ if (access_type == MMU_DATA_LOAD) {
+ return MMU_DADDR_ERROR_READ;
+ } else if (access_type == MMU_DATA_STORE) {
+ return MMU_DADDR_ERROR_WRITE;
+ } else {
+ return MMU_IADDR_ERROR;
+ }
+ }
+ if (address >= 0x80000000 && address < 0xc0000000) {
+ /* Mask upper 3 bits for P1 and P2 areas */
+ *physical = address & 0x1fffffff;
+ } else {
+ *physical = address;
+ }
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return MMU_OK;
}
/* If MMU is disabled, return the corresponding physical page */
if (!(env->mmucr & MMUCR_AT)) {
- *physical = address & 0x1FFFFFFF;
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return MMU_OK;
+ *physical = address & 0x1FFFFFFF;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return MMU_OK;
}
/* We need to resort to the MMU */
- return get_mmu_address(env, physical, prot, address, rw, access_type);
+ return get_mmu_address(env, physical, prot, address, access_type);
}
hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
@@ -441,7 +441,8 @@ hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
target_ulong physical;
int prot;
- get_physical_address(&cpu->env, &physical, &prot, addr, 0, 0);
+ get_physical_address(&cpu->env, &physical, &prot, addr, MMU_DATA_LOAD);
+
return physical;
}
@@ -813,11 +814,9 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMU_DTLB_VIOLATION_READ);
#else
target_ulong physical;
- int prot, sh_access_type;
+ int prot;
- sh_access_type = ACCESS_INT;
- ret = get_physical_address(env, &physical, &prot, address,
- access_type, sh_access_type);
+ ret = get_physical_address(env, &physical, &prot, address, access_type);
if (ret == MMU_OK) {
address &= TARGET_PAGE_MASK;
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 1376cdc404..fcaa5aface 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -519,6 +519,39 @@ typedef enum {
I3606_BIC = 0x2f001400,
I3606_ORR = 0x0f001400,
+ /* AdvSIMD scalar shift by immediate */
+ I3609_SSHR = 0x5f000400,
+ I3609_SSRA = 0x5f001400,
+ I3609_SHL = 0x5f005400,
+ I3609_USHR = 0x7f000400,
+ I3609_USRA = 0x7f001400,
+ I3609_SLI = 0x7f005400,
+
+ /* AdvSIMD scalar three same */
+ I3611_SQADD = 0x5e200c00,
+ I3611_SQSUB = 0x5e202c00,
+ I3611_CMGT = 0x5e203400,
+ I3611_CMGE = 0x5e203c00,
+ I3611_SSHL = 0x5e204400,
+ I3611_ADD = 0x5e208400,
+ I3611_CMTST = 0x5e208c00,
+ I3611_UQADD = 0x7e200c00,
+ I3611_UQSUB = 0x7e202c00,
+ I3611_CMHI = 0x7e203400,
+ I3611_CMHS = 0x7e203c00,
+ I3611_USHL = 0x7e204400,
+ I3611_SUB = 0x7e208400,
+ I3611_CMEQ = 0x7e208c00,
+
+ /* AdvSIMD scalar two-reg misc */
+ I3612_CMGT0 = 0x5e208800,
+ I3612_CMEQ0 = 0x5e209800,
+ I3612_CMLT0 = 0x5e20a800,
+ I3612_ABS = 0x5e20b800,
+ I3612_CMGE0 = 0x7e208800,
+ I3612_CMLE0 = 0x7e209800,
+ I3612_NEG = 0x7e20b800,
+
/* AdvSIMD shift by immediate */
I3614_SSHR = 0x0f000400,
I3614_SSRA = 0x0f001400,
@@ -561,7 +594,7 @@ typedef enum {
I3617_CMEQ0 = 0x0e209800,
I3617_CMLT0 = 0x0e20a800,
I3617_CMGE0 = 0x2e208800,
- I3617_CMLE0 = 0x2e20a800,
+ I3617_CMLE0 = 0x2e209800,
I3617_NOT = 0x2e205800,
I3617_ABS = 0x0e20b800,
I3617_NEG = 0x2e20b800,
@@ -735,6 +768,25 @@ static void tcg_out_insn_3606(TCGContext *s, AArch64Insn insn, bool q,
| (imm8 & 0xe0) << (16 - 5) | (imm8 & 0x1f) << 5);
}
+static void tcg_out_insn_3609(TCGContext *s, AArch64Insn insn,
+ TCGReg rd, TCGReg rn, unsigned immhb)
+{
+ tcg_out32(s, insn | immhb << 16 | (rn & 0x1f) << 5 | (rd & 0x1f));
+}
+
+static void tcg_out_insn_3611(TCGContext *s, AArch64Insn insn,
+ unsigned size, TCGReg rd, TCGReg rn, TCGReg rm)
+{
+ tcg_out32(s, insn | (size << 22) | (rm & 0x1f) << 16
+ | (rn & 0x1f) << 5 | (rd & 0x1f));
+}
+
+static void tcg_out_insn_3612(TCGContext *s, AArch64Insn insn,
+ unsigned size, TCGReg rd, TCGReg rn)
+{
+ tcg_out32(s, insn | (size << 22) | (rn & 0x1f) << 5 | (rd & 0x1f));
+}
+
static void tcg_out_insn_3614(TCGContext *s, AArch64Insn insn, bool q,
TCGReg rd, TCGReg rn, unsigned immhb)
{
@@ -1410,10 +1462,10 @@ static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,
}
}
-static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
- TCGReg rh, TCGReg al, TCGReg ah,
- tcg_target_long bl, tcg_target_long bh,
- bool const_bl, bool const_bh, bool sub)
+static void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
+ TCGReg rh, TCGReg al, TCGReg ah,
+ tcg_target_long bl, tcg_target_long bh,
+ bool const_bl, bool const_bh, bool sub)
{
TCGReg orig_rl = rl;
AArch64Insn insn;
@@ -1423,11 +1475,13 @@ static inline void tcg_out_addsub2(TCGContext *s, TCGType ext, TCGReg rl,
}
if (const_bl) {
- insn = I3401_ADDSI;
- if ((bl < 0) ^ sub) {
- insn = I3401_SUBSI;
+ if (bl < 0) {
bl = -bl;
+ insn = sub ? I3401_ADDSI : I3401_SUBSI;
+ } else {
+ insn = sub ? I3401_SUBSI : I3401_ADDSI;
}
+
if (unlikely(al == TCG_REG_XZR)) {
/* ??? We want to allow al to be zero for the benefit of
negation via subtraction. However, that leaves open the
@@ -2234,23 +2288,38 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
unsigned vecl, unsigned vece,
const TCGArg *args, const int *const_args)
{
- static const AArch64Insn cmp_insn[16] = {
+ static const AArch64Insn cmp_vec_insn[16] = {
[TCG_COND_EQ] = I3616_CMEQ,
[TCG_COND_GT] = I3616_CMGT,
[TCG_COND_GE] = I3616_CMGE,
[TCG_COND_GTU] = I3616_CMHI,
[TCG_COND_GEU] = I3616_CMHS,
};
- static const AArch64Insn cmp0_insn[16] = {
+ static const AArch64Insn cmp_scalar_insn[16] = {
+ [TCG_COND_EQ] = I3611_CMEQ,
+ [TCG_COND_GT] = I3611_CMGT,
+ [TCG_COND_GE] = I3611_CMGE,
+ [TCG_COND_GTU] = I3611_CMHI,
+ [TCG_COND_GEU] = I3611_CMHS,
+ };
+ static const AArch64Insn cmp0_vec_insn[16] = {
[TCG_COND_EQ] = I3617_CMEQ0,
[TCG_COND_GT] = I3617_CMGT0,
[TCG_COND_GE] = I3617_CMGE0,
[TCG_COND_LT] = I3617_CMLT0,
[TCG_COND_LE] = I3617_CMLE0,
};
+ static const AArch64Insn cmp0_scalar_insn[16] = {
+ [TCG_COND_EQ] = I3612_CMEQ0,
+ [TCG_COND_GT] = I3612_CMGT0,
+ [TCG_COND_GE] = I3612_CMGE0,
+ [TCG_COND_LT] = I3612_CMLT0,
+ [TCG_COND_LE] = I3612_CMLE0,
+ };
TCGType type = vecl + TCG_TYPE_V64;
unsigned is_q = vecl;
+ bool is_scalar = !is_q && vece == MO_64;
TCGArg a0, a1, a2, a3;
int cmode, imm8;
@@ -2269,19 +2338,35 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
break;
case INDEX_op_add_vec:
- tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, ADD, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, ADD, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_sub_vec:
- tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, SUB, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, SUB, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_mul_vec:
tcg_out_insn(s, 3616, MUL, is_q, vece, a0, a1, a2);
break;
case INDEX_op_neg_vec:
- tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1);
+ if (is_scalar) {
+ tcg_out_insn(s, 3612, NEG, vece, a0, a1);
+ } else {
+ tcg_out_insn(s, 3617, NEG, is_q, vece, a0, a1);
+ }
break;
case INDEX_op_abs_vec:
- tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1);
+ if (is_scalar) {
+ tcg_out_insn(s, 3612, ABS, vece, a0, a1);
+ } else {
+ tcg_out_insn(s, 3617, ABS, is_q, vece, a0, a1);
+ }
break;
case INDEX_op_and_vec:
if (const_args[2]) {
@@ -2335,16 +2420,32 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
tcg_out_insn(s, 3616, EOR, is_q, 0, a0, a1, a2);
break;
case INDEX_op_ssadd_vec:
- tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, SQADD, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, SQADD, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_sssub_vec:
- tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, SQSUB, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, SQSUB, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_usadd_vec:
- tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, UQADD, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, UQADD, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_ussub_vec:
- tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, UQSUB, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, UQSUB, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_smax_vec:
tcg_out_insn(s, 3616, SMAX, is_q, vece, a0, a1, a2);
@@ -2362,22 +2463,46 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a1);
break;
case INDEX_op_shli_vec:
- tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece));
+ if (is_scalar) {
+ tcg_out_insn(s, 3609, SHL, a0, a1, a2 + (8 << vece));
+ } else {
+ tcg_out_insn(s, 3614, SHL, is_q, a0, a1, a2 + (8 << vece));
+ }
break;
case INDEX_op_shri_vec:
- tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3609, USHR, a0, a1, (16 << vece) - a2);
+ } else {
+ tcg_out_insn(s, 3614, USHR, is_q, a0, a1, (16 << vece) - a2);
+ }
break;
case INDEX_op_sari_vec:
- tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3609, SSHR, a0, a1, (16 << vece) - a2);
+ } else {
+ tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2);
+ }
break;
case INDEX_op_aa64_sli_vec:
- tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece));
+ if (is_scalar) {
+ tcg_out_insn(s, 3609, SLI, a0, a2, args[3] + (8 << vece));
+ } else {
+ tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece));
+ }
break;
case INDEX_op_shlv_vec:
- tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, USHL, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_aa64_sshl_vec:
- tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, SSHL, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, SSHL, is_q, vece, a0, a1, a2);
+ }
break;
case INDEX_op_cmp_vec:
{
@@ -2386,30 +2511,58 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
if (cond == TCG_COND_NE) {
if (const_args[2]) {
- tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, CMTST, vece, a0, a1, a1);
+ } else {
+ tcg_out_insn(s, 3616, CMTST, is_q, vece, a0, a1, a1);
+ }
} else {
- tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2);
+ if (is_scalar) {
+ tcg_out_insn(s, 3611, CMEQ, vece, a0, a1, a2);
+ } else {
+ tcg_out_insn(s, 3616, CMEQ, is_q, vece, a0, a1, a2);
+ }
tcg_out_insn(s, 3617, NOT, is_q, 0, a0, a0);
}
} else {
if (const_args[2]) {
- insn = cmp0_insn[cond];
- if (insn) {
- tcg_out_insn_3617(s, insn, is_q, vece, a0, a1);
- break;
+ if (is_scalar) {
+ insn = cmp0_scalar_insn[cond];
+ if (insn) {
+ tcg_out_insn_3612(s, insn, vece, a0, a1);
+ break;
+ }
+ } else {
+ insn = cmp0_vec_insn[cond];
+ if (insn) {
+ tcg_out_insn_3617(s, insn, is_q, vece, a0, a1);
+ break;
+ }
}
tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
a2 = TCG_VEC_TMP;
}
- insn = cmp_insn[cond];
- if (insn == 0) {
- TCGArg t;
- t = a1, a1 = a2, a2 = t;
- cond = tcg_swap_cond(cond);
- insn = cmp_insn[cond];
- tcg_debug_assert(insn != 0);
+ if (is_scalar) {
+ insn = cmp_scalar_insn[cond];
+ if (insn == 0) {
+ TCGArg t;
+ t = a1, a1 = a2, a2 = t;
+ cond = tcg_swap_cond(cond);
+ insn = cmp_scalar_insn[cond];
+ tcg_debug_assert(insn != 0);
+ }
+ tcg_out_insn_3611(s, insn, vece, a0, a1, a2);
+ } else {
+ insn = cmp_vec_insn[cond];
+ if (insn == 0) {
+ TCGArg t;
+ t = a1, a1 = a2, a2 = t;
+ cond = tcg_swap_cond(cond);
+ insn = cmp_vec_insn[cond];
+ tcg_debug_assert(insn != 0);
+ }
+ tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2);
}
- tcg_out_insn_3616(s, insn, is_q, vece, a0, a1, a2);
}
}
break;
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 63a12b197b..2991112829 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -346,6 +346,12 @@ static void set_jmp_reset_offset(TCGContext *s, int which)
s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
}
+/* Signal overflow, starting over with fewer guest insns. */
+static void QEMU_NORETURN tcg_raise_tb_overflow(TCGContext *s)
+{
+ siglongjmp(s->jmp_trans, -2);
+}
+
#define C_PFX1(P, A) P##A
#define C_PFX2(P, A, B) P##A##_##B
#define C_PFX3(P, A, B, C) P##A##_##B##_##C
@@ -507,11 +513,21 @@ static void tcg_region_trees_init(void)
}
}
-static struct tcg_region_tree *tc_ptr_to_region_tree(const void *cp)
+static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
{
- void *p = tcg_splitwx_to_rw(cp);
size_t region_idx;
+ /*
+ * Like tcg_splitwx_to_rw, with no assert. The pc may come from
+ * a signal handler over which the caller has no control.
+ */
+ if (!in_code_gen_buffer(p)) {
+ p -= tcg_splitwx_diff;
+ if (!in_code_gen_buffer(p)) {
+ return NULL;
+ }
+ }
+
if (p < region.start_aligned) {
region_idx = 0;
} else {
@@ -530,6 +546,7 @@ void tcg_tb_insert(TranslationBlock *tb)
{
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
+ g_assert(rt != NULL);
qemu_mutex_lock(&rt->lock);
g_tree_insert(rt->tree, &tb->tc, tb);
qemu_mutex_unlock(&rt->lock);
@@ -539,6 +556,7 @@ void tcg_tb_remove(TranslationBlock *tb)
{
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
+ g_assert(rt != NULL);
qemu_mutex_lock(&rt->lock);
g_tree_remove(rt->tree, &tb->tc);
qemu_mutex_unlock(&rt->lock);
@@ -555,6 +573,10 @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
TranslationBlock *tb;
struct tb_tc s = { .ptr = (void *)tc_ptr };
+ if (rt == NULL) {
+ return NULL;
+ }
+
qemu_mutex_lock(&rt->lock);
tb = g_tree_lookup(rt->tree, &s);
qemu_mutex_unlock(&rt->lock);
@@ -1310,8 +1332,7 @@ static TCGTemp *tcg_temp_alloc(TCGContext *s)
int n = s->nb_temps++;
if (n >= TCG_MAX_TEMPS) {
- /* Signal overflow, starting over with fewer guest insns. */
- siglongjmp(s->jmp_trans, -2);
+ tcg_raise_tb_overflow(s);
}
return memset(&s->temps[n], 0, sizeof(TCGTemp));
}
diff --git a/tcg/tci.c b/tcg/tci.c
index fb3c97aaf1..3ccd30c39c 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -57,49 +57,6 @@ static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
return regs[index];
}
-#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
-static int8_t tci_read_reg8s(const tcg_target_ulong *regs, TCGReg index)
-{
- return (int8_t)tci_read_reg(regs, index);
-}
-#endif
-
-#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
-static int16_t tci_read_reg16s(const tcg_target_ulong *regs, TCGReg index)
-{
- return (int16_t)tci_read_reg(regs, index);
-}
-#endif
-
-#if TCG_TARGET_REG_BITS == 64
-static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index)
-{
- return (int32_t)tci_read_reg(regs, index);
-}
-#endif
-
-static uint8_t tci_read_reg8(const tcg_target_ulong *regs, TCGReg index)
-{
- return (uint8_t)tci_read_reg(regs, index);
-}
-
-static uint16_t tci_read_reg16(const tcg_target_ulong *regs, TCGReg index)
-{
- return (uint16_t)tci_read_reg(regs, index);
-}
-
-static uint32_t tci_read_reg32(const tcg_target_ulong *regs, TCGReg index)
-{
- return (uint32_t)tci_read_reg(regs, index);
-}
-
-#if TCG_TARGET_REG_BITS == 64
-static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index)
-{
- return tci_read_reg(regs, index);
-}
-#endif
-
static void
tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
{
@@ -169,78 +126,20 @@ tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
return value;
}
-/* Read indexed register (8 bit) from bytecode. */
-static uint8_t tci_read_r8(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
-{
- uint8_t value = tci_read_reg8(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-
-#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
-/* Read indexed register (8 bit signed) from bytecode. */
-static int8_t tci_read_r8s(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
-{
- int8_t value = tci_read_reg8s(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-#endif
-
-/* Read indexed register (16 bit) from bytecode. */
-static uint16_t tci_read_r16(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
-{
- uint16_t value = tci_read_reg16(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-
-#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
-/* Read indexed register (16 bit signed) from bytecode. */
-static int16_t tci_read_r16s(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
-{
- int16_t value = tci_read_reg16s(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-#endif
-
-/* Read indexed register (32 bit) from bytecode. */
-static uint32_t tci_read_r32(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
-{
- uint32_t value = tci_read_reg32(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-
#if TCG_TARGET_REG_BITS == 32
/* Read two indexed registers (2 * 32 bit) from bytecode. */
static uint64_t tci_read_r64(const tcg_target_ulong *regs,
const uint8_t **tb_ptr)
{
- uint32_t low = tci_read_r32(regs, tb_ptr);
- return tci_uint64(tci_read_r32(regs, tb_ptr), low);
+ uint32_t low = tci_read_r(regs, tb_ptr);
+ return tci_uint64(tci_read_r(regs, tb_ptr), low);
}
#elif TCG_TARGET_REG_BITS == 64
-/* Read indexed register (32 bit signed) from bytecode. */
-static int32_t tci_read_r32s(const tcg_target_ulong *regs,
- const uint8_t **tb_ptr)
-{
- int32_t value = tci_read_reg32s(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
-}
-
/* Read indexed register (64 bit) from bytecode. */
static uint64_t tci_read_r64(const tcg_target_ulong *regs,
const uint8_t **tb_ptr)
{
- uint64_t value = tci_read_reg64(regs, **tb_ptr);
- *tb_ptr += 1;
- return value;
+ return tci_read_r(regs, tb_ptr);
}
#endif
@@ -346,51 +245,34 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
return result;
}
-#ifdef CONFIG_SOFTMMU
-# define qemu_ld_ub \
- helper_ret_ldub_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_leuw \
- helper_le_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_leul \
- helper_le_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_leq \
- helper_le_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_beuw \
- helper_be_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_beul \
- helper_be_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_ld_beq \
- helper_be_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
-# define qemu_st_b(X) \
- helper_ret_stb_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_lew(X) \
- helper_le_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_lel(X) \
- helper_le_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_leq(X) \
- helper_le_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_bew(X) \
- helper_be_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_bel(X) \
- helper_be_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-# define qemu_st_beq(X) \
- helper_be_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
-#else
-# define qemu_ld_ub ldub_p(g2h(taddr))
-# define qemu_ld_leuw lduw_le_p(g2h(taddr))
-# define qemu_ld_leul (uint32_t)ldl_le_p(g2h(taddr))
-# define qemu_ld_leq ldq_le_p(g2h(taddr))
-# define qemu_ld_beuw lduw_be_p(g2h(taddr))
-# define qemu_ld_beul (uint32_t)ldl_be_p(g2h(taddr))
-# define qemu_ld_beq ldq_be_p(g2h(taddr))
-# define qemu_st_b(X) stb_p(g2h(taddr), X)
-# define qemu_st_lew(X) stw_le_p(g2h(taddr), X)
-# define qemu_st_lel(X) stl_le_p(g2h(taddr), X)
-# define qemu_st_leq(X) stq_le_p(g2h(taddr), X)
-# define qemu_st_bew(X) stw_be_p(g2h(taddr), X)
-# define qemu_st_bel(X) stl_be_p(g2h(taddr), X)
-# define qemu_st_beq(X) stq_be_p(g2h(taddr), X)
-#endif
+#define qemu_ld_ub \
+ cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_leuw \
+ cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_leul \
+ cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_leq \
+ cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_beuw \
+ cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_beul \
+ cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_ld_beq \
+ cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_b(X) \
+ cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_lew(X) \
+ cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_lel(X) \
+ cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_leq(X) \
+ cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_bew(X) \
+ cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_bel(X) \
+ cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
+#define qemu_st_beq(X) \
+ cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
#if TCG_TARGET_REG_BITS == 64
# define CASE_32_64(x) \
@@ -483,8 +365,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
continue;
case INDEX_op_setcond_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
condition = *tb_ptr++;
tci_write_reg(regs, t0, tci_compare32(t1, t2, condition));
break;
@@ -499,15 +381,15 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
#elif TCG_TARGET_REG_BITS == 64
case INDEX_op_setcond_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
condition = *tb_ptr++;
tci_write_reg(regs, t0, tci_compare64(t1, t2, condition));
break;
#endif
- case INDEX_op_mov_i32:
+ CASE_32_64(mov)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1);
break;
case INDEX_op_tci_movi_i32:
@@ -550,127 +432,130 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2));
break;
CASE_32_64(st8)
- t0 = tci_read_r8(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
t1 = tci_read_r(regs, &tb_ptr);
t2 = tci_read_s32(&tb_ptr);
*(uint8_t *)(t1 + t2) = t0;
break;
CASE_32_64(st16)
- t0 = tci_read_r16(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
t1 = tci_read_r(regs, &tb_ptr);
t2 = tci_read_s32(&tb_ptr);
*(uint16_t *)(t1 + t2) = t0;
break;
case INDEX_op_st_i32:
CASE_64(st32)
- t0 = tci_read_r32(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
t1 = tci_read_r(regs, &tb_ptr);
t2 = tci_read_s32(&tb_ptr);
*(uint32_t *)(t1 + t2) = t0;
break;
- /* Arithmetic operations (32 bit). */
+ /* Arithmetic operations (mixed 32/64 bit). */
- case INDEX_op_add_i32:
+ CASE_32_64(add)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1 + t2);
break;
- case INDEX_op_sub_i32:
+ CASE_32_64(sub)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1 - t2);
break;
- case INDEX_op_mul_i32:
+ CASE_32_64(mul)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1 * t2);
break;
- case INDEX_op_div_i32:
+ CASE_32_64(and)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, t1 & t2);
break;
- case INDEX_op_divu_i32:
+ CASE_32_64(or)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 / t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, t1 | t2);
break;
- case INDEX_op_rem_i32:
+ CASE_32_64(xor)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, t1 ^ t2);
break;
- case INDEX_op_remu_i32:
+
+ /* Arithmetic operations (32 bit). */
+
+ case INDEX_op_div_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 % t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2);
break;
- case INDEX_op_and_i32:
+ case INDEX_op_divu_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 & t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint32_t)t1 / (uint32_t)t2);
break;
- case INDEX_op_or_i32:
+ case INDEX_op_rem_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 | t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2);
break;
- case INDEX_op_xor_i32:
+ case INDEX_op_remu_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 ^ t2);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2);
break;
/* Shift/rotate operations (32 bit). */
case INDEX_op_shl_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 << (t2 & 31));
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint32_t)t1 << (t2 & 31));
break;
case INDEX_op_shr_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 >> (t2 & 31));
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint32_t)t1 >> (t2 & 31));
break;
case INDEX_op_sar_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, ((int32_t)t1 >> (t2 & 31)));
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int32_t)t1 >> (t2 & 31));
break;
#if TCG_TARGET_HAS_rot_i32
case INDEX_op_rotl_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, rol32(t1, t2 & 31));
break;
case INDEX_op_rotr_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, ror32(t1, t2 & 31));
break;
#endif
#if TCG_TARGET_HAS_deposit_i32
case INDEX_op_deposit_i32:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- t2 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tmp16 = *tb_ptr++;
tmp8 = *tb_ptr++;
tmp32 = (((1 << tmp8) - 1) << tmp16);
@@ -678,8 +563,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
break;
#endif
case INDEX_op_brcond_i32:
- t0 = tci_read_r32(regs, &tb_ptr);
- t1 = tci_read_r32(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
condition = *tb_ptr++;
label = tci_read_label(&tb_ptr);
if (tci_compare32(t0, t1, condition)) {
@@ -717,73 +602,68 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
case INDEX_op_mulu2_i32:
t0 = *tb_ptr++;
t1 = *tb_ptr++;
- t2 = tci_read_r32(regs, &tb_ptr);
- tmp64 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg64(regs, t1, t0, t2 * tmp64);
+ t2 = tci_read_r(regs, &tb_ptr);
+ tmp64 = (uint32_t)tci_read_r(regs, &tb_ptr);
+ tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64);
break;
#endif /* TCG_TARGET_REG_BITS == 32 */
-#if TCG_TARGET_HAS_ext8s_i32
- case INDEX_op_ext8s_i32:
+#if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
+ CASE_32_64(ext8s)
t0 = *tb_ptr++;
- t1 = tci_read_r8s(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int8_t)t1);
break;
#endif
-#if TCG_TARGET_HAS_ext16s_i32
- case INDEX_op_ext16s_i32:
+#if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
+ CASE_32_64(ext16s)
t0 = *tb_ptr++;
- t1 = tci_read_r16s(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int16_t)t1);
break;
#endif
-#if TCG_TARGET_HAS_ext8u_i32
- case INDEX_op_ext8u_i32:
+#if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
+ CASE_32_64(ext8u)
t0 = *tb_ptr++;
- t1 = tci_read_r8(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint8_t)t1);
break;
#endif
-#if TCG_TARGET_HAS_ext16u_i32
- case INDEX_op_ext16u_i32:
+#if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
+ CASE_32_64(ext16u)
t0 = *tb_ptr++;
- t1 = tci_read_r16(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint16_t)t1);
break;
#endif
-#if TCG_TARGET_HAS_bswap16_i32
- case INDEX_op_bswap16_i32:
+#if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
+ CASE_32_64(bswap16)
t0 = *tb_ptr++;
- t1 = tci_read_r16(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, bswap16(t1));
break;
#endif
-#if TCG_TARGET_HAS_bswap32_i32
- case INDEX_op_bswap32_i32:
+#if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
+ CASE_32_64(bswap32)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, bswap32(t1));
break;
#endif
-#if TCG_TARGET_HAS_not_i32
- case INDEX_op_not_i32:
+#if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
+ CASE_32_64(not)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, ~t1);
break;
#endif
-#if TCG_TARGET_HAS_neg_i32
- case INDEX_op_neg_i32:
+#if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
+ CASE_32_64(neg)
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, -t1);
break;
#endif
#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_mov_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
case INDEX_op_tci_movi_i64:
t0 = *tb_ptr++;
t1 = tci_read_i64(&tb_ptr);
@@ -805,7 +685,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2));
break;
case INDEX_op_st_i64:
- t0 = tci_read_r64(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
t1 = tci_read_r(regs, &tb_ptr);
t2 = tci_read_s32(&tb_ptr);
*(uint64_t *)(t1 + t2) = t0;
@@ -813,106 +693,70 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
/* Arithmetic operations (64 bit). */
- case INDEX_op_add_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 + t2);
- break;
- case INDEX_op_sub_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 - t2);
- break;
- case INDEX_op_mul_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 * t2);
- break;
case INDEX_op_div_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, (int64_t)t1 / (int64_t)t2);
break;
case INDEX_op_divu_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, (uint64_t)t1 / (uint64_t)t2);
break;
case INDEX_op_rem_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, (int64_t)t1 % (int64_t)t2);
break;
case INDEX_op_remu_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2);
break;
- case INDEX_op_and_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 & t2);
- break;
- case INDEX_op_or_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 | t2);
- break;
- case INDEX_op_xor_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1 ^ t2);
- break;
/* Shift/rotate operations (64 bit). */
case INDEX_op_shl_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1 << (t2 & 63));
break;
case INDEX_op_shr_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, t1 >> (t2 & 63));
break;
case INDEX_op_sar_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, ((int64_t)t1 >> (t2 & 63)));
break;
#if TCG_TARGET_HAS_rot_i64
case INDEX_op_rotl_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, rol64(t1, t2 & 63));
break;
case INDEX_op_rotr_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, ror64(t1, t2 & 63));
break;
#endif
#if TCG_TARGET_HAS_deposit_i64
case INDEX_op_deposit_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- t2 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
+ t2 = tci_read_r(regs, &tb_ptr);
tmp16 = *tb_ptr++;
tmp8 = *tb_ptr++;
tmp64 = (((1ULL << tmp8) - 1) << tmp16);
@@ -920,8 +764,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
break;
#endif
case INDEX_op_brcond_i64:
- t0 = tci_read_r64(regs, &tb_ptr);
- t1 = tci_read_r64(regs, &tb_ptr);
+ t0 = tci_read_r(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
condition = *tb_ptr++;
label = tci_read_label(&tb_ptr);
if (tci_compare64(t0, t1, condition)) {
@@ -930,85 +774,29 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
continue;
}
break;
-#if TCG_TARGET_HAS_ext8u_i64
- case INDEX_op_ext8u_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r8(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
-#endif
-#if TCG_TARGET_HAS_ext8s_i64
- case INDEX_op_ext8s_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r8s(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
-#endif
-#if TCG_TARGET_HAS_ext16s_i64
- case INDEX_op_ext16s_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r16s(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
-#endif
-#if TCG_TARGET_HAS_ext16u_i64
- case INDEX_op_ext16u_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r16(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
-#endif
#if TCG_TARGET_HAS_ext32s_i64
case INDEX_op_ext32s_i64:
#endif
case INDEX_op_ext_i32_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r32s(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (int32_t)t1);
break;
#if TCG_TARGET_HAS_ext32u_i64
case INDEX_op_ext32u_i64:
#endif
case INDEX_op_extu_i32_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, t1);
- break;
-#if TCG_TARGET_HAS_bswap16_i64
- case INDEX_op_bswap16_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r16(regs, &tb_ptr);
- tci_write_reg(regs, t0, bswap16(t1));
- break;
-#endif
-#if TCG_TARGET_HAS_bswap32_i64
- case INDEX_op_bswap32_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg(regs, t0, bswap32(t1));
+ t1 = tci_read_r(regs, &tb_ptr);
+ tci_write_reg(regs, t0, (uint32_t)t1);
break;
-#endif
#if TCG_TARGET_HAS_bswap64_i64
case INDEX_op_bswap64_i64:
t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
+ t1 = tci_read_r(regs, &tb_ptr);
tci_write_reg(regs, t0, bswap64(t1));
break;
#endif
-#if TCG_TARGET_HAS_not_i64
- case INDEX_op_not_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, ~t1);
- break;
-#endif
-#if TCG_TARGET_HAS_neg_i64
- case INDEX_op_neg_i64:
- t0 = *tb_ptr++;
- t1 = tci_read_r64(regs, &tb_ptr);
- tci_write_reg(regs, t0, -t1);
- break;
-#endif
#endif /* TCG_TARGET_REG_BITS == 64 */
/* QEMU specific operations. */
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
index feac4659cc..c79f9c32d8 100644
--- a/tcg/tci/tcg-target.c.inc
+++ b/tcg/tci/tcg-target.c.inc
@@ -380,6 +380,18 @@ static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
old_code_ptr[1] = s->code_ptr - old_code_ptr;
}
+#if TCG_TARGET_REG_BITS == 64
+# define CASE_32_64(x) \
+ case glue(glue(INDEX_op_, x), _i64): \
+ case glue(glue(INDEX_op_, x), _i32):
+# define CASE_64(x) \
+ case glue(glue(INDEX_op_, x), _i64):
+#else
+# define CASE_32_64(x) \
+ case glue(glue(INDEX_op_, x), _i32):
+# define CASE_64(x)
+#endif
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
const int *const_args)
{
@@ -391,6 +403,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
case INDEX_op_exit_tb:
tcg_out64(s, args[0]);
break;
+
case INDEX_op_goto_tb:
if (s->tb_jmp_insn_offset) {
/* Direct jump method. */
@@ -404,15 +417,18 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
}
set_jmp_reset_offset(s, args[0]);
break;
+
case INDEX_op_br:
tci_out_label(s, arg_label(args[0]));
break;
- case INDEX_op_setcond_i32:
+
+ CASE_32_64(setcond)
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
tcg_out_r(s, args[2]);
tcg_out8(s, args[3]); /* condition */
break;
+
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_setcond2_i32:
/* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */
@@ -423,95 +439,54 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out_r(s, args[4]);
tcg_out8(s, args[5]); /* condition */
break;
-#elif TCG_TARGET_REG_BITS == 64
- case INDEX_op_setcond_i64:
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- tcg_out8(s, args[3]); /* condition */
- break;
#endif
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16s_i32:
+
+ CASE_32_64(ld8u)
+ CASE_32_64(ld8s)
+ CASE_32_64(ld16u)
+ CASE_32_64(ld16s)
case INDEX_op_ld_i32:
- case INDEX_op_st8_i32:
- case INDEX_op_st16_i32:
+ CASE_64(ld32u)
+ CASE_64(ld32s)
+ CASE_64(ld)
+ CASE_32_64(st8)
+ CASE_32_64(st16)
case INDEX_op_st_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i64:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i64:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
+ CASE_64(st32)
+ CASE_64(st)
stack_bounds_check(args[1], args[2]);
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
tcg_debug_assert(args[2] == (int32_t)args[2]);
tcg_out32(s, args[2]);
break;
- case INDEX_op_add_i32:
- case INDEX_op_sub_i32:
- case INDEX_op_mul_i32:
- case INDEX_op_and_i32:
- case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */
- case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */
- case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */
- case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */
- case INDEX_op_or_i32:
- case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */
- case INDEX_op_xor_i32:
- case INDEX_op_shl_i32:
- case INDEX_op_shr_i32:
- case INDEX_op_sar_i32:
- case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
- case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- break;
- case INDEX_op_deposit_i32: /* Optional (TCG_TARGET_HAS_deposit_i32). */
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
- tcg_debug_assert(args[3] <= UINT8_MAX);
- tcg_out8(s, args[3]);
- tcg_debug_assert(args[4] <= UINT8_MAX);
- tcg_out8(s, args[4]);
- break;
-#if TCG_TARGET_REG_BITS == 64
- case INDEX_op_add_i64:
- case INDEX_op_sub_i64:
- case INDEX_op_mul_i64:
- case INDEX_op_and_i64:
- case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */
- case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */
- case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */
- case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */
- case INDEX_op_or_i64:
- case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */
- case INDEX_op_xor_i64:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i64:
- case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
- case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
- case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
- case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
- case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
- case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
+ CASE_32_64(add)
+ CASE_32_64(sub)
+ CASE_32_64(mul)
+ CASE_32_64(and)
+ CASE_32_64(or)
+ CASE_32_64(xor)
+ CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */
+ CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */
+ CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */
+ CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */
+ CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */
+ CASE_32_64(shl)
+ CASE_32_64(shr)
+ CASE_32_64(sar)
+ CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
+ CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
+ CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */
+ CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */
+ CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */
+ CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
tcg_out_r(s, args[2]);
break;
- case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */
+
+ CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
tcg_out_r(s, args[2]);
@@ -520,45 +495,31 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_debug_assert(args[4] <= UINT8_MAX);
tcg_out8(s, args[4]);
break;
- case INDEX_op_brcond_i64:
+
+ CASE_32_64(brcond)
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
tcg_out8(s, args[2]); /* condition */
tci_out_label(s, arg_label(args[3]));
break;
- case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
- case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
- case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
- case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */
- case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */
- case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */
- case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */
- case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */
- case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */
- case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */
- case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
-#endif /* TCG_TARGET_REG_BITS == 64 */
- case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */
- case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */
- case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */
- case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */
- case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */
- case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */
- case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
- case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- break;
- case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
- case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
- case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
- case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
+
+ CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
+ CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
+ CASE_32_64(ext8s) /* Optional (TCG_TARGET_HAS_ext8s_*). */
+ CASE_32_64(ext8u) /* Optional (TCG_TARGET_HAS_ext8u_*). */
+ CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */
+ CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */
+ CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */
+ CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */
+ CASE_64(ext_i32)
+ CASE_64(extu_i32)
+ CASE_32_64(bswap16) /* Optional (TCG_TARGET_HAS_bswap16_*). */
+ CASE_32_64(bswap32) /* Optional (TCG_TARGET_HAS_bswap32_*). */
+ CASE_64(bswap64) /* Optional (TCG_TARGET_HAS_bswap64_i64). */
tcg_out_r(s, args[0]);
tcg_out_r(s, args[1]);
- tcg_out_r(s, args[2]);
break;
+
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_add2_i32:
case INDEX_op_sub2_i32:
@@ -584,31 +545,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out_r(s, args[3]);
break;
#endif
- case INDEX_op_brcond_i32:
- tcg_out_r(s, args[0]);
- tcg_out_r(s, args[1]);
- tcg_out8(s, args[2]); /* condition */
- tci_out_label(s, arg_label(args[3]));
- break;
+
case INDEX_op_qemu_ld_i32:
- tcg_out_r(s, *args++);
- tcg_out_r(s, *args++);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- tcg_out_r(s, *args++);
- }
- tcg_out_i(s, *args++);
- break;
- case INDEX_op_qemu_ld_i64:
- tcg_out_r(s, *args++);
- if (TCG_TARGET_REG_BITS == 32) {
- tcg_out_r(s, *args++);
- }
- tcg_out_r(s, *args++);
- if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
- tcg_out_r(s, *args++);
- }
- tcg_out_i(s, *args++);
- break;
case INDEX_op_qemu_st_i32:
tcg_out_r(s, *args++);
tcg_out_r(s, *args++);
@@ -617,6 +555,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
}
tcg_out_i(s, *args++);
break;
+
+ case INDEX_op_qemu_ld_i64:
case INDEX_op_qemu_st_i64:
tcg_out_r(s, *args++);
if (TCG_TARGET_REG_BITS == 32) {
@@ -628,8 +568,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
}
tcg_out_i(s, *args++);
break;
+
case INDEX_op_mb:
break;
+
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
diff --git a/tests/fp/meson.build b/tests/fp/meson.build
index 8d739c4d59..1c3eee9955 100644
--- a/tests/fp/meson.build
+++ b/tests/fp/meson.build
@@ -624,7 +624,7 @@ test('fp-test-mulAdd', fptest,
# no fptest_rounding_args
args: fptest_args +
['f16_mulAdd', 'f32_mulAdd', 'f64_mulAdd', 'f128_mulAdd'],
- suite: ['softfloat-slow', 'softfloat-ops-slow'], timeout: 60)
+ suite: ['softfloat-slow', 'softfloat-ops-slow'], timeout: 90)
fpbench = executable(
'fp-bench',
diff --git a/tests/meson.build b/tests/meson.build
index 7d7da6a636..656d211e25 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -237,6 +237,11 @@ test_env = environment()
test_env.set('G_TEST_SRCDIR', meson.current_source_dir())
test_env.set('G_TEST_BUILDDIR', meson.current_build_dir())
+slow_tests = {
+ 'test-crypto-tlscredsx509': 45,
+ 'test-crypto-tlssession': 45
+}
+
foreach test_name, extra: tests
src = [test_name + '.c']
deps = [qemuutil]
@@ -254,6 +259,8 @@ foreach test_name, extra: tests
env: test_env,
args: ['--tap', '-k'],
protocol: 'tap',
+ timeout: slow_tests.get(test_name, 30),
+ priority: slow_tests.get(test_name, 30),
suite: ['unit'])
endforeach
@@ -263,6 +270,7 @@ foreach bench_name, deps: benchs
benchmark(bench_name, exe,
args: ['--tap', '-k'],
protocol: 'tap',
+ timeout: 0,
suite: ['speed'])
endforeach
diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build
index 9799017806..58efc46144 100644
--- a/tests/qtest/meson.build
+++ b/tests/qtest/meson.build
@@ -4,6 +4,19 @@ if not config_host.has_key('CONFIG_POSIX')
subdir_done()
endif
+slow_qtests = {
+ 'ahci-test' : 60,
+ 'bios-tables-test' : 120,
+ 'boot-serial-test' : 60,
+ 'migration-test' : 150,
+ 'npcm7xx_pwm-test': 150,
+ 'prom-env-test' : 60,
+ 'pxe-test' : 60,
+ 'qos-test' : 60,
+ 'qom-test' : 300,
+ 'test-hmp' : 120,
+}
+
qtests_generic = [
'cdrom-test',
'device-introspect-test',
@@ -274,6 +287,8 @@ foreach dir : target_dirs
env: qtest_env,
args: ['--tap', '-k'],
protocol: 'tap',
+ timeout: slow_qtests.get(test, 30),
+ priority: slow_qtests.get(test, 30),
suite: ['qtest', 'qtest-' + target_base])
endforeach
endforeach
diff --git a/trace/control.c b/trace/control.c
index cd04dd4e0c..4be38e1af2 100644
--- a/trace/control.c
+++ b/trace/control.c
@@ -40,6 +40,7 @@ static size_t nevent_groups;
static uint32_t next_id;
static uint32_t next_vcpu_id;
static bool init_trace_on_startup;
+static char *trace_opts_file;
QemuOptsList qemu_trace_opts = {
.name = "trace",
@@ -224,10 +225,8 @@ static void trace_init_events(const char *fname)
void trace_init_file(void)
{
- QemuOpts *opts = qemu_find_opts_singleton("trace");
- const char *file = qemu_opt_get(opts, "file");
#ifdef CONFIG_TRACE_SIMPLE
- st_set_trace_file(file);
+ st_set_trace_file(trace_opts_file);
if (init_trace_on_startup) {
st_set_trace_file_enabled(true);
}
@@ -238,11 +237,11 @@ void trace_init_file(void)
* backend. However we should only override -D if we actually have
* something to override it with.
*/
- if (file) {
- qemu_set_log_filename(file, &error_fatal);
+ if (trace_opts_file) {
+ qemu_set_log_filename(trace_opts_file, &error_fatal);
}
#else
- if (file) {
+ if (trace_opts_file) {
fprintf(stderr, "error: --trace file=...: "
"option not supported by the selected tracing backends\n");
exit(1);
@@ -303,6 +302,8 @@ void trace_opt_parse(const char *optarg)
}
trace_init_events(qemu_opt_get(opts, "events"));
init_trace_on_startup = true;
+ g_free(trace_opts_file);
+ trace_opts_file = g_strdup(qemu_opt_get(opts, "file"));
qemu_opts_del(opts);
}
diff --git a/util/qemu-config.c b/util/qemu-config.c
index e2a700b284..670bd6ebca 100644
--- a/util/qemu-config.c
+++ b/util/qemu-config.c
@@ -350,7 +350,7 @@ void qemu_config_write(FILE *fp)
}
/* Returns number of config groups on success, -errno on error */
-int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname)
+int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname, Error **errp)
{
char line[1024], group[64], id[64], arg[64], value[1024];
Location loc;
@@ -375,7 +375,7 @@ int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname)
/* group with id */
list = find_list(lists, group, &local_err);
if (local_err) {
- error_report_err(local_err);
+ error_propagate(errp, local_err);
goto out;
}
opts = qemu_opts_create(list, id, 1, NULL);
@@ -386,7 +386,7 @@ int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname)
/* group without id */
list = find_list(lists, group, &local_err);
if (local_err) {
- error_report_err(local_err);
+ error_propagate(errp, local_err);
goto out;
}
opts = qemu_opts_create(list, NULL, 0, &error_abort);
@@ -398,21 +398,21 @@ int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname)
sscanf(line, " %63s = \"\"", arg) == 1) {
/* arg = value */
if (opts == NULL) {
- error_report("no group defined");
+ error_setg(errp, "no group defined");
goto out;
}
- if (!qemu_opt_set(opts, arg, value, &local_err)) {
- error_report_err(local_err);
+ if (!qemu_opt_set(opts, arg, value, errp)) {
goto out;
}
continue;
}
- error_report("parse error");
+ error_setg(errp, "parse error");
goto out;
}
if (ferror(fp)) {
- error_report("error reading file");
- goto out;
+ loc_pop(&loc);
+ error_setg_errno(errp, errno, "Cannot read config file");
+ return res;
}
res = count;
out:
@@ -420,16 +420,17 @@ out:
return res;
}
-int qemu_read_config_file(const char *filename)
+int qemu_read_config_file(const char *filename, Error **errp)
{
FILE *f = fopen(filename, "r");
int ret;
if (f == NULL) {
+ error_setg_file_open(errp, errno, filename);
return -errno;
}
- ret = qemu_config_parse(f, vm_config_groups, filename);
+ ret = qemu_config_parse(f, vm_config_groups, filename, errp);
fclose(f);
return ret;
}
diff --git a/util/qemu-option.c b/util/qemu-option.c
index 40564a12eb..9678d5b682 100644
--- a/util/qemu-option.c
+++ b/util/qemu-option.c
@@ -785,7 +785,11 @@ static const char *get_opt_name_value(const char *params,
}
if (!is_help && warn_on_flag) {
warn_report("short-form boolean option '%s%s' deprecated", prefix, *name);
- error_printf("Please use %s=%s instead\n", *name, *value);
+ if (g_str_equal(*name, "delay")) {
+ error_printf("Please use nodelay=%s instead\n", prefix[0] ? "on" : "off");
+ } else {
+ error_printf("Please use %s=%s instead\n", *name, *value);
+ }
}
}
} else {