diff options
263 files changed, 10015 insertions, 3985 deletions
@@ -12,6 +12,7 @@ Fabrice Bellard <fabrice@bellard.org> bellard <bellard@c046a42c-6fe2-441c-8c8c-7 James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com> Jocelyn Mayer <l_indien@magic.fr> j_mayer <j_mayer@c046a42c-6fe2-441c-8c8c-71466251a162> Paul Brook <paul@codesourcery.com> pbrook <pbrook@c046a42c-6fe2-441c-8c8c-71466251a162> +Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com> Aleksandar Markovic <amarkovic@wavecomp.com> <aleksandar.markovic@mips.com> Aleksandar Markovic <amarkovic@wavecomp.com> <aleksandar.markovic@imgtec.com> Paul Burton <pburton@wavecomp.com> <paul.burton@mips.com> @@ -33,5 +34,6 @@ Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu # Also list preferred name forms where people have changed their -# git author config +# git author config, or having utf8/latin1 encoding issues. Daniel P. Berrangé <berrange@redhat.com> +Reimar Döffinger <Reimar.Doeffinger@gmx.de> diff --git a/MAINTAINERS b/MAINTAINERS index fb81b3a8eb..40672c4eba 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2023,7 +2023,6 @@ F: block/rbd.c T: git git://github.com/codyprime/qemu-kvm-jtc.git block Sheepdog -M: Hitoshi Mitake <mitake.hitoshi@lab.ntt.co.jp> M: Liu Yuan <namei.unix@gmail.com> M: Jeff Cody <jcody@redhat.com> L: qemu-block@nongnu.org diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index de12f78eb8..4880a05399 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -79,6 +79,7 @@ struct KVMState int fd; int vmfd; int coalesced_mmio; + int coalesced_pio; struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; bool coalesced_flush_in_progress; int vcpu_events; @@ -560,6 +561,45 @@ static void kvm_uncoalesce_mmio_region(MemoryListener *listener, } } +static void kvm_coalesce_pio_add(MemoryListener *listener, + MemoryRegionSection *section, + hwaddr start, hwaddr size) +{ + KVMState *s = kvm_state; + + if (s->coalesced_pio) { + struct kvm_coalesced_mmio_zone zone; + + zone.addr = start; + zone.size = size; + zone.pio = 1; + + (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); + } +} + +static void kvm_coalesce_pio_del(MemoryListener *listener, + MemoryRegionSection *section, + hwaddr start, hwaddr size) +{ + KVMState *s = kvm_state; + + if (s->coalesced_pio) { + struct kvm_coalesced_mmio_zone zone; + + zone.addr = start; + zone.size = size; + zone.pio = 1; + + (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); + } +} + +static MemoryListener kvm_coalesced_pio_listener = { + .coalesced_io_add = kvm_coalesce_pio_add, + .coalesced_io_del = kvm_coalesce_pio_del, +}; + int kvm_check_extension(KVMState *s, unsigned int extension) { int ret; @@ -1616,6 +1656,8 @@ static int kvm_init(MachineState *ms) } s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); + s->coalesced_pio = s->coalesced_mmio && + kvm_check_extension(s, KVM_CAP_COALESCED_PIO); #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); @@ -1686,13 +1728,15 @@ static int kvm_init(MachineState *ms) s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; } - s->memory_listener.listener.coalesced_mmio_add = kvm_coalesce_mmio_region; - s->memory_listener.listener.coalesced_mmio_del = kvm_uncoalesce_mmio_region; + s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; + s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; kvm_memory_listener_register(s, &s->memory_listener, &address_space_memory, 0); memory_listener_register(&kvm_io_listener, &address_space_io); + memory_listener_register(&kvm_coalesced_pio_listener, + &address_space_io); s->many_ioeventfds = kvm_check_many_ioeventfds(); @@ -1778,7 +1822,13 @@ void kvm_flush_coalesced_mmio_buffer(void) ent = &ring->coalesced_mmio[ring->first]; - cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); + if (ent->pio == 1) { + address_space_rw(&address_space_io, ent->phys_addr, + MEMTXATTRS_UNSPECIFIED, ent->data, + ent->len, true); + } else { + cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); + } smp_wmb(); ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; } diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h index d751bcba48..efde12fdb2 100644 --- a/accel/tcg/atomic_template.h +++ b/accel/tcg/atomic_template.h @@ -100,19 +100,24 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, DATA_TYPE ret; ATOMIC_TRACE_RMW; +#if DATA_SIZE == 16 + ret = atomic16_cmpxchg(haddr, cmpv, newv); +#else ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv); +#endif ATOMIC_MMU_CLEANUP; return ret; } #if DATA_SIZE >= 16 +#if HAVE_ATOMIC128 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) { ATOMIC_MMU_DECLS; DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; ATOMIC_TRACE_LD; - __atomic_load(haddr, &val, __ATOMIC_RELAXED); + val = atomic16_read(haddr); ATOMIC_MMU_CLEANUP; return val; } @@ -124,9 +129,10 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; ATOMIC_TRACE_ST; - __atomic_store(haddr, &val, __ATOMIC_RELAXED); + atomic16_set(haddr, val); ATOMIC_MMU_CLEANUP; } +#endif #else ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) @@ -228,19 +234,24 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, DATA_TYPE ret; ATOMIC_TRACE_RMW; +#if DATA_SIZE == 16 + ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); +#else ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); +#endif ATOMIC_MMU_CLEANUP; return BSWAP(ret); } #if DATA_SIZE >= 16 +#if HAVE_ATOMIC128 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS) { ATOMIC_MMU_DECLS; DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP; ATOMIC_TRACE_LD; - __atomic_load(haddr, &val, __ATOMIC_RELAXED); + val = atomic16_read(haddr); ATOMIC_MMU_CLEANUP; return BSWAP(val); } @@ -253,9 +264,10 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ATOMIC_TRACE_ST; val = BSWAP(val); - __atomic_store(haddr, &val, __ATOMIC_RELAXED); + atomic16_set(haddr, val); ATOMIC_MMU_CLEANUP; } +#endif #else ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val EXTRA_ARGS) diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 6bcb6d99bd..870027d435 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -416,7 +416,7 @@ static inline TranslationBlock *tb_find(CPUState *cpu, } #endif /* See if we can patch the calling TB. */ - if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { + if (last_tb) { tb_add_jump(last_tb, tb_exit, tb); } return tb; diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index f4702ce91f..af57aca5e4 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -32,6 +32,7 @@ #include "exec/log.h" #include "exec/helper-proto.h" #include "qemu/atomic.h" +#include "qemu/atomic128.h" /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -58,9 +59,9 @@ } \ } while (0) -#define assert_cpu_is_self(this_cpu) do { \ +#define assert_cpu_is_self(cpu) do { \ if (DEBUG_TLB_GATE) { \ - g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \ + g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ } \ } while (0) @@ -73,6 +74,13 @@ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) +void tlb_init(CPUState *cpu) +{ + CPUArchState *env = cpu->env_ptr; + + qemu_spin_init(&env->tlb_lock); +} + /* flush_all_helper: run fn across all cpus * * If the wait flag is set then the src cpu's helper will be queued as @@ -125,8 +133,17 @@ static void tlb_flush_nocheck(CPUState *cpu) atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1); tlb_debug("(count: %zu)\n", tlb_flush_count()); + /* + * tlb_table/tlb_v_table updates from any thread must hold tlb_lock. + * However, updates from the owner thread (as is the case here; see the + * above assert_cpu_is_self) do not need atomic_set because all reads + * that do not hold the lock are performed by the same owner thread. + */ + qemu_spin_lock(&env->tlb_lock); memset(env->tlb_table, -1, sizeof(env->tlb_table)); memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); + qemu_spin_unlock(&env->tlb_lock); + cpu_tb_jmp_cache_clear(cpu); env->vtlb_index = 0; @@ -178,6 +195,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask); + qemu_spin_lock(&env->tlb_lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { if (test_bit(mmu_idx, &mmu_idx_bitmask)) { @@ -187,6 +205,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); } } + qemu_spin_unlock(&env->tlb_lock); cpu_tb_jmp_cache_clear(cpu); @@ -239,23 +258,28 @@ static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, target_ulong page) { return tlb_hit_page(tlb_entry->addr_read, page) || - tlb_hit_page(tlb_entry->addr_write, page) || + tlb_hit_page(tlb_addr_write(tlb_entry), page) || tlb_hit_page(tlb_entry->addr_code, page); } -static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong page) +/* Called with tlb_lock held */ +static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, + target_ulong page) { if (tlb_hit_page_anyprot(tlb_entry, page)) { memset(tlb_entry, -1, sizeof(*tlb_entry)); } } -static inline void tlb_flush_vtlb_page(CPUArchState *env, int mmu_idx, - target_ulong page) +/* Called with tlb_lock held */ +static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, + target_ulong page) { int k; + + assert_cpu_is_self(ENV_GET_CPU(env)); for (k = 0; k < CPU_VTLB_SIZE; k++) { - tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], page); + tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page); } } @@ -263,7 +287,6 @@ static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data) { CPUArchState *env = cpu->env_ptr; target_ulong addr = (target_ulong) data.target_ptr; - int i; int mmu_idx; assert_cpu_is_self(cpu); @@ -281,11 +304,12 @@ static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data) } addr &= TARGET_PAGE_MASK; - i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + qemu_spin_lock(&env->tlb_lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); - tlb_flush_vtlb_page(env, mmu_idx, addr); + tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr); + tlb_flush_vtlb_page_locked(env, mmu_idx, addr); } + qemu_spin_unlock(&env->tlb_lock); tb_flush_jmp_cache(cpu, addr); } @@ -314,20 +338,21 @@ static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; - int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); int mmu_idx; assert_cpu_is_self(cpu); - tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n", - page, addr, mmu_idx_bitmap); + tlb_debug("flush page addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n", + addr, mmu_idx_bitmap); + qemu_spin_lock(&env->tlb_lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { if (test_bit(mmu_idx, &mmu_idx_bitmap)) { - tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr); - tlb_flush_vtlb_page(env, mmu_idx, addr); + tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr); + tlb_flush_vtlb_page_locked(env, mmu_idx, addr); } } + qemu_spin_unlock(&env->tlb_lock); tb_flush_jmp_cache(cpu, addr); } @@ -450,72 +475,44 @@ void tlb_unprotect_code(ram_addr_t ram_addr) * most usual is detecting writes to code regions which may invalidate * generated code. * - * Because we want other vCPUs to respond to changes straight away we - * update the te->addr_write field atomically. If the TLB entry has - * been changed by the vCPU in the mean time we skip the update. + * Other vCPUs might be reading their TLBs during guest execution, so we update + * te->addr_write with atomic_set. We don't need to worry about this for + * oversized guests as MTTCG is disabled for them. * - * As this function uses atomic accesses we also need to ensure - * updates to tlb_entries follow the same access rules. We don't need - * to worry about this for oversized guests as MTTCG is disabled for - * them. + * Called with tlb_lock held. */ - -static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start, - uintptr_t length) +static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, + uintptr_t start, uintptr_t length) { -#if TCG_OVERSIZED_GUEST uintptr_t addr = tlb_entry->addr_write; if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { addr &= TARGET_PAGE_MASK; addr += tlb_entry->addend; if ((addr - start) < length) { +#if TCG_OVERSIZED_GUEST tlb_entry->addr_write |= TLB_NOTDIRTY; - } - } #else - /* paired with atomic_mb_set in tlb_set_page_with_attrs */ - uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write); - uintptr_t addr = orig_addr; - - if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { - addr &= TARGET_PAGE_MASK; - addr += atomic_read(&tlb_entry->addend); - if ((addr - start) < length) { - uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY; - atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr); + atomic_set(&tlb_entry->addr_write, + tlb_entry->addr_write | TLB_NOTDIRTY); +#endif } } -#endif } -/* For atomic correctness when running MTTCG we need to use the right - * primitives when copying entries */ -static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s, - bool atomic_set) +/* + * Called with tlb_lock held. + * Called only from the vCPU context, i.e. the TLB's owner thread. + */ +static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) { -#if TCG_OVERSIZED_GUEST *d = *s; -#else - if (atomic_set) { - d->addr_read = s->addr_read; - d->addr_code = s->addr_code; - atomic_set(&d->addend, atomic_read(&s->addend)); - /* Pairs with flag setting in tlb_reset_dirty_range */ - atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write)); - } else { - d->addr_read = s->addr_read; - d->addr_write = atomic_read(&s->addr_write); - d->addr_code = s->addr_code; - d->addend = atomic_read(&s->addend); - } -#endif } /* This is a cross vCPU call (i.e. another vCPU resetting the flags of - * the target vCPU). As such care needs to be taken that we don't - * dangerously race with another vCPU update. The only thing actually - * updated is the target TLB entry ->addr_write flags. + * the target vCPU). + * We must take tlb_lock to avoid racing with another vCPU update. The only + * thing actually updated is the target TLB entry ->addr_write flags. */ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) { @@ -524,22 +521,26 @@ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) int mmu_idx; env = cpu->env_ptr; + qemu_spin_lock(&env->tlb_lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { unsigned int i; for (i = 0; i < CPU_TLB_SIZE; i++) { - tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i], - start1, length); + tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1, + length); } for (i = 0; i < CPU_VTLB_SIZE; i++) { - tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i], - start1, length); + tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1, + length); } } + qemu_spin_unlock(&env->tlb_lock); } -static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) +/* Called with tlb_lock held */ +static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, + target_ulong vaddr) { if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { tlb_entry->addr_write = vaddr; @@ -551,23 +552,23 @@ static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr) void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) { CPUArchState *env = cpu->env_ptr; - int i; int mmu_idx; assert_cpu_is_self(cpu); vaddr &= TARGET_PAGE_MASK; - i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); + qemu_spin_lock(&env->tlb_lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { - tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); + tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); } for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { int k; for (k = 0; k < CPU_VTLB_SIZE; k++) { - tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr); + tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr); } } + qemu_spin_unlock(&env->tlb_lock); } /* Our TLB does not support large pages, so remember the area covered by @@ -654,15 +655,24 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; } - /* Make sure there's no cached translation for the new page. */ - tlb_flush_vtlb_page(env, mmu_idx, vaddr_page); - code_address = address; iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, paddr_page, xlat, prot, &address); - index = (vaddr_page >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - te = &env->tlb_table[mmu_idx][index]; + index = tlb_index(env, mmu_idx, vaddr_page); + te = tlb_entry(env, mmu_idx, vaddr_page); + + /* + * Hold the TLB lock for the rest of the function. We could acquire/release + * the lock several times in the function, but it is faster to amortize the + * acquisition cost by acquiring it just once. Note that this leads to + * a longer critical section, but this is not a concern since the TLB lock + * is unlikely to be contended. + */ + qemu_spin_lock(&env->tlb_lock); + + /* Make sure there's no cached translation for the new page. */ + tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); /* * Only evict the old entry to the victim tlb if it's for a @@ -673,7 +683,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx]; /* Evict the old entry into the victim tlb. */ - copy_tlb_helper(tv, te, true); + copy_tlb_helper_locked(tv, te); env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; } @@ -725,9 +735,8 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, } } - /* Pairs with flag setting in tlb_reset_dirty_range */ - copy_tlb_helper(te, &tn, true); - /* atomic_mb_set(&te->addr_write, write_address); */ + copy_tlb_helper_locked(te, &tn); + qemu_spin_unlock(&env->tlb_lock); } /* Add a new TLB entry, but without specifying the memory @@ -773,16 +782,16 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, * repeat the MMU check here. This tlb_fill() call might * longjump out if this access should cause a guest exception. */ - int index; + CPUTLBEntry *entry; target_ulong tlb_addr; tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); - index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - tlb_addr = env->tlb_table[mmu_idx][index].addr_read; + entry = tlb_entry(env, mmu_idx, addr); + tlb_addr = entry->addr_read; if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { /* RAM access */ - uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; + uintptr_t haddr = addr + entry->addend; return ldn_p((void *)haddr, size); } @@ -840,16 +849,16 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, * repeat the MMU check here. This tlb_fill() call might * longjump out if this access should cause a guest exception. */ - int index; + CPUTLBEntry *entry; target_ulong tlb_addr; tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); - index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + entry = tlb_entry(env, mmu_idx, addr); + tlb_addr = tlb_addr_write(entry); if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { /* RAM access */ - uintptr_t haddr = addr + env->tlb_table[mmu_idx][index].addend; + uintptr_t haddr = addr + entry->addend; stn_p((void *)haddr, size, val); return; @@ -891,17 +900,28 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, size_t elt_ofs, target_ulong page) { size_t vidx; + + assert_cpu_is_self(ENV_GET_CPU(env)); for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; - target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); + target_ulong cmp; + + /* elt_ofs might correspond to .addr_write, so use atomic_read */ +#if TCG_OVERSIZED_GUEST + cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); +#else + cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); +#endif if (cmp == page) { /* Found entry in victim tlb, swap tlb and iotlb. */ CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; - copy_tlb_helper(&tmptlb, tlb, false); - copy_tlb_helper(tlb, vtlb, true); - copy_tlb_helper(vtlb, &tmptlb, true); + qemu_spin_lock(&env->tlb_lock); + copy_tlb_helper_locked(&tmptlb, tlb); + copy_tlb_helper_locked(tlb, vtlb); + copy_tlb_helper_locked(vtlb, &tmptlb); + qemu_spin_unlock(&env->tlb_lock); CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; @@ -924,20 +944,19 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, */ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) { - int mmu_idx, index; + uintptr_t mmu_idx = cpu_mmu_index(env, true); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); void *p; - index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - mmu_idx = cpu_mmu_index(env, true); - if (unlikely(!tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr))) { + if (unlikely(!tlb_hit(entry->addr_code, addr))) { if (!VICTIM_TLB_HIT(addr_code, addr)) { tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); } - assert(tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr)); + assert(tlb_hit(entry->addr_code, addr)); } - if (unlikely(env->tlb_table[mmu_idx][index].addr_code & - (TLB_RECHECK | TLB_MMIO))) { + if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) { /* * Return -1 if we can't translate and execute from an entire * page of RAM here, which will cause us to execute by loading @@ -949,7 +968,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) return -1; } - p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend); + p = (void *)((uintptr_t)addr + entry->addend); return qemu_ram_addr_from_host_nofail(p); } @@ -962,10 +981,10 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, uintptr_t retaddr) { - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); - if (!tlb_hit(tlb_addr, addr)) { + if (!tlb_hit(tlb_addr_write(entry), addr)) { /* TLB entry is for a different page */ if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, @@ -981,9 +1000,9 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, NotDirtyInfo *ndi) { size_t mmu_idx = get_mmuidx(oi); - size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index]; - target_ulong tlb_addr = tlbe->addr_write; + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = tlb_addr_write(tlbe); TCGMemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); int s_bits = mop & MO_SIZE; @@ -1014,7 +1033,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, mmu_idx, retaddr); } - tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK; + tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; } /* Notice an IO access or a needs-MMU-lookup access */ @@ -1101,7 +1120,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, #include "atomic_template.h" #endif -#ifdef CONFIG_ATOMIC128 +#if HAVE_CMPXCHG128 || HAVE_ATOMIC128 #define DATA_SIZE 16 #include "atomic_template.h" #endif diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h index f060a693d4..b0adea045e 100644 --- a/accel/tcg/softmmu_template.h +++ b/accel/tcg/softmmu_template.h @@ -111,9 +111,10 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env, WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - unsigned mmu_idx = get_mmuidx(oi); - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = entry->ADDR_READ; unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; DATA_TYPE res; @@ -129,7 +130,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, mmu_idx, retaddr); } - tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + tlb_addr = entry->ADDR_READ; } /* Handle an IO access. */ @@ -166,7 +167,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, return res; } - haddr = addr + env->tlb_table[mmu_idx][index].addend; + haddr = addr + entry->addend; #if DATA_SIZE == 1 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr); #else @@ -179,9 +180,10 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - unsigned mmu_idx = get_mmuidx(oi); - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = entry->ADDR_READ; unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; DATA_TYPE res; @@ -197,7 +199,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE, mmu_idx, retaddr); } - tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; + tlb_addr = entry->ADDR_READ; } /* Handle an IO access. */ @@ -234,7 +236,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, return res; } - haddr = addr + env->tlb_table[mmu_idx][index].addend; + haddr = addr + entry->addend; res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr); return res; } @@ -275,9 +277,10 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env, void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) { - unsigned mmu_idx = get_mmuidx(oi); - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = tlb_addr_write(entry); unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; @@ -292,7 +295,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); } - tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK; + tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; } /* Handle an IO access. */ @@ -313,16 +316,16 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, if (DATA_SIZE > 1 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 >= TARGET_PAGE_SIZE)) { - int i, index2; - target_ulong page2, tlb_addr2; + int i; + target_ulong page2; + CPUTLBEntry *entry2; do_unaligned_access: /* Ensure the second page is in the TLB. Note that the first page is already guaranteed to be filled, and that the second page cannot evict the first. */ page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; - index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; - if (!tlb_hit_page(tlb_addr2, page2) + entry2 = tlb_entry(env, mmu_idx, page2); + if (!tlb_hit_page(tlb_addr_write(entry2), page2) && !VICTIM_TLB_HIT(addr_write, page2)) { tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); @@ -340,7 +343,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, return; } - haddr = addr + env->tlb_table[mmu_idx][index].addend; + haddr = addr + entry->addend; #if DATA_SIZE == 1 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val); #else @@ -352,9 +355,10 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) { - unsigned mmu_idx = get_mmuidx(oi); - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write; + uintptr_t mmu_idx = get_mmuidx(oi); + uintptr_t index = tlb_index(env, mmu_idx, addr); + CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); + target_ulong tlb_addr = tlb_addr_write(entry); unsigned a_bits = get_alignment_bits(get_memop(oi)); uintptr_t haddr; @@ -369,7 +373,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); } - tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK; + tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; } /* Handle an IO access. */ @@ -390,16 +394,16 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, if (DATA_SIZE > 1 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1 >= TARGET_PAGE_SIZE)) { - int i, index2; - target_ulong page2, tlb_addr2; + int i; + target_ulong page2; + CPUTLBEntry *entry2; do_unaligned_access: /* Ensure the second page is in the TLB. Note that the first page is already guaranteed to be filled, and that the second page cannot evict the first. */ page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK; - index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write; - if (!tlb_hit_page(tlb_addr2, page2) + entry2 = tlb_entry(env, mmu_idx, page2); + if (!tlb_hit_page(tlb_addr_write(entry2), page2) && !VICTIM_TLB_HIT(addr_write, page2)) { tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE, mmu_idx, retaddr); @@ -417,7 +421,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, return; } - haddr = addr + env->tlb_table[mmu_idx][index].addend; + haddr = addr + entry->addend; glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val); } #endif /* DATA_SIZE > 1 */ diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c index 56dbb56a16..3d25bdcc17 100644 --- a/accel/tcg/tcg-all.c +++ b/accel/tcg/tcg-all.c @@ -51,7 +51,7 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask) if (!qemu_cpu_is_self(cpu)) { qemu_cpu_kick(cpu); } else { - cpu->icount_decr.u16.high = -1; + atomic_set(&cpu->icount_decr.u16.high, -1); if (use_icount && !cpu->can_do_io && (mask & ~old_mask) != 0) { diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index ad5c758246..356dcd0948 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -2341,7 +2341,7 @@ void cpu_interrupt(CPUState *cpu, int mask) { g_assert(qemu_mutex_iothread_locked()); cpu->interrupt_request |= mask; - cpu->icount_decr.u16.high = -1; + atomic_set(&cpu->icount_decr.u16.high, -1); } /* diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 26a3ffbba1..cd75829cf2 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -25,6 +25,7 @@ #include "exec/cpu_ldst.h" #include "translate-all.h" #include "exec/helper-proto.h" +#include "qemu/atomic128.h" #undef EAX #undef ECX @@ -615,7 +616,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, /* The following is only callable from other helpers, and matches up with the softmmu version. */ -#ifdef CONFIG_ATOMIC128 +#if HAVE_ATOMIC128 || HAVE_CMPXCHG128 #undef EXTRA_ARGS #undef ATOMIC_NAME @@ -628,4 +629,4 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, #define DATA_SIZE 16 #include "atomic_template.h" -#endif /* CONFIG_ATOMIC128 */ +#endif @@ -4697,9 +4697,9 @@ bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp) assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX); if (!QLIST_EMPTY(&bs->op_blockers[op])) { blocker = QLIST_FIRST(&bs->op_blockers[op]); - error_propagate(errp, error_copy(blocker->reason)); - error_prepend(errp, "Node '%s' is busy: ", - bdrv_get_device_or_node_name(bs)); + error_propagate_prepend(errp, error_copy(blocker->reason), + "Node '%s' is busy: ", + bdrv_get_device_or_node_name(bs)); return true; } return false; @@ -4803,9 +4803,6 @@ void bdrv_img_create(const char *filename, const char *fmt, if (options) { qemu_opts_do_parse(opts, options, NULL, &local_err); if (local_err) { - error_report_err(local_err); - local_err = NULL; - error_setg(errp, "Invalid options for file format '%s'", fmt); goto out; } } diff --git a/block/iscsi.c b/block/iscsi.c index bb69faf34a..73998c2860 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -1844,7 +1844,7 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags, iscsi_set_timeout(iscsi, timeout); #else if (timeout) { - error_report("iSCSI: ignoring timeout value for libiscsi <1.15.0"); + warn_report("iSCSI: ignoring timeout value for libiscsi <1.15.0"); } #endif diff --git a/block/qcow2.c b/block/qcow2.c index 7277feda13..4f8d2fa7bd 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -2208,8 +2208,8 @@ static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs, qemu_co_mutex_unlock(&s->lock); qobject_unref(options); if (local_err) { - error_propagate(errp, local_err); - error_prepend(errp, "Could not reopen qcow2 layer: "); + error_propagate_prepend(errp, local_err, + "Could not reopen qcow2 layer: "); bs->drv = NULL; return; } else if (ret < 0) { diff --git a/block/qed.c b/block/qed.c index 689ea9d4d5..9377c0b7ad 100644 --- a/block/qed.c +++ b/block/qed.c @@ -1606,8 +1606,8 @@ static void coroutine_fn bdrv_qed_co_invalidate_cache(BlockDriverState *bs, ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err); qemu_co_mutex_unlock(&s->table_lock); if (local_err) { - error_propagate(errp, local_err); - error_prepend(errp, "Could not reopen qed layer: "); + error_propagate_prepend(errp, local_err, + "Could not reopen qed layer: "); return; } else if (ret < 0) { error_setg_errno(errp, -ret, "Could not reopen qed layer"); diff --git a/block/rbd.c b/block/rbd.c index 014c68d629..e5bf5a146f 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -750,8 +750,8 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags, /* Take care whenever deciding to actually deprecate; once this ability * is removed, we will not be able to open any images with legacy-styled * backing image strings. */ - error_report("RBD options encoded in the filename as keyvalue pairs " - "is deprecated"); + warn_report("RBD options encoded in the filename as keyvalue pairs " + "is deprecated"); } /* Remove the processed options from the QDict (the visitor processes diff --git a/block/sheepdog.c b/block/sheepdog.c index b229a664d9..0125df9d49 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -572,7 +572,7 @@ static int connect_to_sdog(BDRVSheepdogState *s, Error **errp) if (s->addr->type == SOCKET_ADDRESS_TYPE_INET && fd >= 0) { int ret = socket_set_nodelay(fd); if (ret < 0) { - error_report("%s", strerror(errno)); + warn_report("can't set TCP_NODELAY: %s", strerror(errno)); } } diff --git a/block/vpc.c b/block/vpc.c index bf294abfa7..984187cadd 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -284,9 +284,11 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, checksum = be32_to_cpu(footer->checksum); footer->checksum = 0; - if (vpc_checksum(s->footer_buf, HEADER_SIZE) != checksum) - fprintf(stderr, "block-vpc: The header checksum of '%s' is " - "incorrect.\n", bs->filename); + if (vpc_checksum(s->footer_buf, HEADER_SIZE) != checksum) { + error_setg(errp, "Incorrect header checksum"); + ret = -EINVAL; + goto fail; + } /* Write 'checksum' back to footer, or else will leave it with zero. */ footer->checksum = cpu_to_be32(checksum); diff --git a/blockdev.c b/blockdev.c index a8755bd908..574adbcb7f 100644 --- a/blockdev.c +++ b/blockdev.c @@ -759,7 +759,8 @@ QemuOptsList qemu_legacy_drive_opts = { }, }; -DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) +DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type, + Error **errp) { const char *value; BlockBackend *blk; @@ -808,7 +809,7 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) qemu_opt_rename(all_opts, opt_renames[i].from, opt_renames[i].to, &local_err); if (local_err) { - error_report_err(local_err); + error_propagate(errp, local_err); return NULL; } } @@ -819,7 +820,7 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) bool writethrough; if (bdrv_parse_cache_mode(value, &flags, &writethrough) != 0) { - error_report("invalid cache option"); + error_setg(errp, "invalid cache option"); return NULL; } @@ -847,7 +848,7 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) &error_abort); qemu_opts_absorb_qdict(legacy_opts, bs_opts, &local_err); if (local_err) { - error_report_err(local_err); + error_propagate(errp, local_err); goto fail; } @@ -860,7 +861,7 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) media = MEDIA_CDROM; read_only = true; } else { - error_report("'%s' invalid media", value); + error_setg(errp, "'%s' invalid media", value); goto fail; } } @@ -885,7 +886,7 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) type++) { } if (type == IF_COUNT) { - error_report("unsupported bus type '%s'", value); + error_setg(errp, "unsupported bus type '%s'", value); goto fail; } } else { @@ -902,7 +903,7 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) if (index != -1) { if (bus_id != 0 || unit_id != -1) { - error_report("index cannot be used with bus and unit"); + error_setg(errp, "index cannot be used with bus and unit"); goto fail; } bus_id = drive_index_to_bus_id(type, index); @@ -921,13 +922,13 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) } if (max_devs && unit_id >= max_devs) { - error_report("unit %d too big (max is %d)", unit_id, max_devs - 1); + error_setg(errp, "unit %d too big (max is %d)", unit_id, max_devs - 1); goto fail; } if (drive_get(type, bus_id, unit_id) != NULL) { - error_report("drive with bus=%d, unit=%d (index=%d) exists", - bus_id, unit_id, index); + error_setg(errp, "drive with bus=%d, unit=%d (index=%d) exists", + bus_id, unit_id, index); goto fail; } @@ -970,7 +971,7 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) if (werror != NULL) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && type != IF_NONE) { - error_report("werror is not supported by this bus type"); + error_setg(errp, "werror is not supported by this bus type"); goto fail; } qdict_put_str(bs_opts, "werror", werror); @@ -980,7 +981,7 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) if (rerror != NULL) { if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI && type != IF_NONE) { - error_report("rerror is not supported by this bus type"); + error_setg(errp, "rerror is not supported by this bus type"); goto fail; } qdict_put_str(bs_opts, "rerror", rerror); @@ -991,7 +992,7 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type) bs_opts = NULL; if (!blk) { if (local_err) { - error_report_err(local_err); + error_propagate(errp, local_err); } goto fail; } else { diff --git a/chardev/char-pty.c b/chardev/char-pty.c index e8d9a53476..f681d637c1 100644 --- a/chardev/char-pty.c +++ b/chardev/char-pty.c @@ -259,7 +259,7 @@ static void char_pty_open(Chardev *chr, qemu_set_nonblock(master_fd); chr->filename = g_strdup_printf("pty:%s", pty_name); - error_report("char device redirected to %s (label %s)", + error_printf("char device redirected to %s (label %s)\n", pty_name, chr->label); s = PTY_CHARDEV(chr); diff --git a/chardev/char.c b/chardev/char.c index e115166995..7f07a1bfbd 100644 --- a/chardev/char.c +++ b/chardev/char.c @@ -634,7 +634,7 @@ Chardev *qemu_chr_new_from_opts(QemuOpts *opts, Error **errp) chardev_name_foreach(help_string_append, str); - error_report("Available chardev backend types: %s", str->str); + error_printf("Available chardev backend types: %s\n", str->str); g_string_free(str, true); return NULL; } @@ -428,7 +428,7 @@ usb_redir="" opengl="" opengl_dmabuf="no" cpuid_h="no" -avx2_opt="no" +avx2_opt="" zlib="yes" capstone="" lzo="" @@ -457,12 +457,9 @@ gtk="" gtk_gl="no" tls_priority="NORMAL" gnutls="" -gnutls_rnd="" nettle="" -nettle_kdf="no" gcrypt="" gcrypt_hmac="no" -gcrypt_kdf="no" vte="" virglrenderer="" tpm="yes" @@ -1332,6 +1329,10 @@ for opt do ;; --disable-glusterfs) glusterfs="no" ;; + --disable-avx2) avx2_opt="no" + ;; + --enable-avx2) avx2_opt="yes" + ;; --enable-glusterfs) glusterfs="yes" ;; --disable-virtio-blk-data-plane|--enable-virtio-blk-data-plane) @@ -1706,6 +1707,7 @@ disabled with --disable-FEATURE, default is enabled if available: libxml2 for Parallels image format tcmalloc tcmalloc support jemalloc jemalloc support + avx2 AVX2 optimization support replication replication support vhost-vsock virtio sockets device support opengl opengl support @@ -2666,79 +2668,28 @@ fi ########################################## # GNUTLS probe -gnutls_works() { - # Unfortunately some distros have bad pkg-config information for gnutls - # such that it claims to exist but you get a compiler error if you try - # to use the options returned by --libs. Specifically, Ubuntu for --static - # builds doesn't work: - # https://bugs.launchpad.net/ubuntu/+source/gnutls26/+bug/1478035 - # - # So sanity check the cflags/libs before assuming gnutls can be used. - if ! $pkg_config --exists "gnutls"; then - return 1 - fi - - write_c_skeleton - compile_prog "$($pkg_config --cflags gnutls)" "$($pkg_config --libs gnutls)" -} - -gnutls_gcrypt=no -gnutls_nettle=no if test "$gnutls" != "no"; then - if gnutls_works; then + if $pkg_config --exists "gnutls >= 3.1.18"; then gnutls_cflags=$($pkg_config --cflags gnutls) gnutls_libs=$($pkg_config --libs gnutls) libs_softmmu="$gnutls_libs $libs_softmmu" libs_tools="$gnutls_libs $libs_tools" QEMU_CFLAGS="$QEMU_CFLAGS $gnutls_cflags" gnutls="yes" - - # gnutls_rnd requires >= 2.11.0 - if $pkg_config --exists "gnutls >= 2.11.0"; then - gnutls_rnd="yes" - else - gnutls_rnd="no" - fi - - if $pkg_config --exists 'gnutls >= 3.0'; then - gnutls_gcrypt=no - gnutls_nettle=yes - elif $pkg_config --exists 'gnutls >= 2.12'; then - case $($pkg_config --libs --static gnutls) in - *gcrypt*) - gnutls_gcrypt=yes - gnutls_nettle=no - ;; - *nettle*) - gnutls_gcrypt=no - gnutls_nettle=yes - ;; - *) - gnutls_gcrypt=yes - gnutls_nettle=no - ;; - esac - else - gnutls_gcrypt=yes - gnutls_nettle=no - fi elif test "$gnutls" = "yes"; then - feature_not_found "gnutls" "Install gnutls devel" + feature_not_found "gnutls" "Install gnutls devel >= 3.1.18" else gnutls="no" - gnutls_rnd="no" fi -else - gnutls_rnd="no" fi # If user didn't give a --disable/enable-gcrypt flag, # then mark as disabled if user requested nettle -# explicitly, or if gnutls links to nettle +# explicitly if test -z "$gcrypt" then - if test "$nettle" = "yes" || test "$gnutls_nettle" = "yes" + if test "$nettle" = "yes" then gcrypt="no" fi @@ -2746,16 +2697,16 @@ fi # If user didn't give a --disable/enable-nettle flag, # then mark as disabled if user requested gcrypt -# explicitly, or if gnutls links to gcrypt +# explicitly if test -z "$nettle" then - if test "$gcrypt" = "yes" || test "$gnutls_gcrypt" = "yes" + if test "$gcrypt" = "yes" then nettle="no" fi fi -has_libgcrypt_config() { +has_libgcrypt() { if ! has "libgcrypt-config" then return 1 @@ -2770,11 +2721,42 @@ has_libgcrypt_config() { fi fi + maj=`libgcrypt-config --version | awk -F . '{print $1}'` + min=`libgcrypt-config --version | awk -F . '{print $2}'` + + if test $maj != 1 || test $min -lt 5 + then + return 1 + fi + return 0 } + +if test "$nettle" != "no"; then + if $pkg_config --exists "nettle >= 2.7.1"; then + nettle_cflags=$($pkg_config --cflags nettle) + nettle_libs=$($pkg_config --libs nettle) + nettle_version=$($pkg_config --modversion nettle) + libs_softmmu="$nettle_libs $libs_softmmu" + libs_tools="$nettle_libs $libs_tools" + QEMU_CFLAGS="$QEMU_CFLAGS $nettle_cflags" + nettle="yes" + + if test -z "$gcrypt"; then + gcrypt="no" + fi + else + if test "$nettle" = "yes"; then + feature_not_found "nettle" "Install nettle devel >= 2.7.1" + else + nettle="no" + fi + fi +fi + if test "$gcrypt" != "no"; then - if has_libgcrypt_config; then + if has_libgcrypt; then gcrypt_cflags=$(libgcrypt-config --cflags) gcrypt_libs=$(libgcrypt-config --libs) # Debian has remove -lgpg-error from libgcrypt-config @@ -2788,22 +2770,6 @@ if test "$gcrypt" != "no"; then libs_tools="$gcrypt_libs $libs_tools" QEMU_CFLAGS="$QEMU_CFLAGS $gcrypt_cflags" gcrypt="yes" - if test -z "$nettle"; then - nettle="no" - fi - - cat > $TMPC << EOF -#include <gcrypt.h> -int main(void) { - gcry_kdf_derive(NULL, 0, GCRY_KDF_PBKDF2, - GCRY_MD_SHA256, - NULL, 0, 0, 0, NULL); - return 0; -} -EOF - if compile_prog "$gcrypt_cflags" "$gcrypt_libs" ; then - gcrypt_kdf=yes - fi cat > $TMPC << EOF #include <gcrypt.h> @@ -2819,7 +2785,7 @@ EOF fi else if test "$gcrypt" = "yes"; then - feature_not_found "gcrypt" "Install gcrypt devel" + feature_not_found "gcrypt" "Install gcrypt devel >= 1.5.0" else gcrypt="no" fi @@ -2827,36 +2793,6 @@ EOF fi -if test "$nettle" != "no"; then - if $pkg_config --exists "nettle"; then - nettle_cflags=$($pkg_config --cflags nettle) - nettle_libs=$($pkg_config --libs nettle) - nettle_version=$($pkg_config --modversion nettle) - libs_softmmu="$nettle_libs $libs_softmmu" - libs_tools="$nettle_libs $libs_tools" - QEMU_CFLAGS="$QEMU_CFLAGS $nettle_cflags" - nettle="yes" - - cat > $TMPC << EOF -#include <stddef.h> -#include <nettle/pbkdf2.h> -int main(void) { - pbkdf2_hmac_sha256(8, NULL, 1000, 8, NULL, 8, NULL); - return 0; -} -EOF - if compile_prog "$nettle_cflags" "$nettle_libs" ; then - nettle_kdf=yes - fi - else - if test "$nettle" = "yes"; then - feature_not_found "nettle" "Install nettle devel" - else - nettle="no" - fi - fi -fi - if test "$gcrypt" = "yes" && test "$nettle" = "yes" then error_exit "Only one of gcrypt & nettle can be enabled" @@ -3531,12 +3467,6 @@ if ! compile_prog "$CFLAGS" "$LIBS" ; then "build target" fi -# g_test_trap_subprocess added in 2.38. Used by some tests. -glib_subprocess=yes -if ! $pkg_config --atleast-version=2.38 glib-2.0; then - glib_subprocess=no -fi - # Silence clang 3.5.0 warnings about glib attribute __alloc_size__ usage cat > $TMPC << EOF #include <glib.h> @@ -4208,7 +4138,14 @@ if compile_prog "" "" ; then memfd=yes fi - +# check for usbfs +have_usbfs=no +if test "$linux_user" = "yes"; then + if check_include linux/usbdevice_fs.h; then + have_usbfs=yes + fi + have_usbfs=yes +fi # check for fallocate fallocate=no @@ -5100,7 +5037,7 @@ fi # There is no point enabling this if cpuid.h is not usable, # since we won't be able to select the new routines. -if test $cpuid_h = yes; then +if test "$cpuid_h" = "yes" -a "$avx2_opt" != "no"; then cat > $TMPC << EOF #pragma GCC push_options #pragma GCC target("avx2") @@ -5114,6 +5051,8 @@ int main(int argc, char *argv[]) { return bar(argv[0]); } EOF if compile_object "" ; then avx2_opt="yes" + else + avx2_opt="no" fi fi @@ -5160,6 +5099,21 @@ EOF fi fi +cmpxchg128=no +if test "$int128" = yes -a "$atomic128" = no; then + cat > $TMPC << EOF +int main(void) +{ + unsigned __int128 x = 0, y = 0; + __sync_val_compare_and_swap_16(&x, y, x); + return 0; +} +EOF + if compile_prog "" "" ; then + cmpxchg128=yes + fi +fi + ######################################### # See if 64-bit atomic operations are supported. # Note that without __atomic builtins, we can only @@ -5967,11 +5921,8 @@ echo "GTK GL support $gtk_gl" echo "VTE support $vte $(echo_version $vte $vteversion)" echo "TLS priority $tls_priority" echo "GNUTLS support $gnutls" -echo "GNUTLS rnd $gnutls_rnd" echo "libgcrypt $gcrypt" -echo "libgcrypt kdf $gcrypt_kdf" echo "nettle $nettle $(echo_version $nettle $nettle_version)" -echo "nettle kdf $nettle_kdf" echo "libtasn1 $tasn1" echo "curses support $curses" echo "virgl support $virglrenderer $(echo_version $virglrenderer $virgl_version)" @@ -6316,6 +6267,9 @@ fi if test "$memfd" = "yes" ; then echo "CONFIG_MEMFD=y" >> $config_host_mak fi +if test "$have_usbfs" = "yes" ; then + echo "CONFIG_USBFS=y" >> $config_host_mak +fi if test "$fallocate" = "yes" ; then echo "CONFIG_FALLOCATE=y" >> $config_host_mak fi @@ -6407,24 +6361,15 @@ echo "CONFIG_TLS_PRIORITY=\"$tls_priority\"" >> $config_host_mak if test "$gnutls" = "yes" ; then echo "CONFIG_GNUTLS=y" >> $config_host_mak fi -if test "$gnutls_rnd" = "yes" ; then - echo "CONFIG_GNUTLS_RND=y" >> $config_host_mak -fi if test "$gcrypt" = "yes" ; then echo "CONFIG_GCRYPT=y" >> $config_host_mak if test "$gcrypt_hmac" = "yes" ; then echo "CONFIG_GCRYPT_HMAC=y" >> $config_host_mak fi - if test "$gcrypt_kdf" = "yes" ; then - echo "CONFIG_GCRYPT_KDF=y" >> $config_host_mak - fi fi if test "$nettle" = "yes" ; then echo "CONFIG_NETTLE=y" >> $config_host_mak echo "CONFIG_NETTLE_VERSION_MAJOR=${nettle_version%%.*}" >> $config_host_mak - if test "$nettle_kdf" = "yes" ; then - echo "CONFIG_NETTLE_KDF=y" >> $config_host_mak - fi fi if test "$tasn1" = "yes" ; then echo "CONFIG_TASN1=y" >> $config_host_mak @@ -6669,6 +6614,10 @@ if test "$atomic128" = "yes" ; then echo "CONFIG_ATOMIC128=y" >> $config_host_mak fi +if test "$cmpxchg128" = "yes" ; then + echo "CONFIG_CMPXCHG128=y" >> $config_host_mak +fi + if test "$atomic64" = "yes" ; then echo "CONFIG_ATOMIC64=y" >> $config_host_mak fi @@ -211,12 +211,12 @@ void qemu_tcg_configure(QemuOpts *opts, Error **errp) error_setg(errp, "No MTTCG when icount is enabled"); } else { #ifndef TARGET_SUPPORTS_MTTCG - error_report("Guest not yet converted to MTTCG - " - "you may get unexpected results"); + warn_report("Guest not yet converted to MTTCG - " + "you may get unexpected results"); #endif if (!check_tcg_memory_orders_compatible()) { - error_report("Guest expects a stronger memory ordering " - "than the host provides"); + warn_report("Guest expects a stronger memory ordering " + "than the host provides"); error_printf("This may cause strange/hard to debug errors\n"); } mttcg_enabled = true; @@ -509,8 +509,8 @@ static void icount_warp_rt(void) seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); if (runstate_is_running()) { - int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT, - cpu_get_clock_locked()); + int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT, + cpu_get_clock_locked()); int64_t warp_delta; warp_delta = clock - timers_state.vm_clock_warp_start; @@ -1425,7 +1425,8 @@ static int tcg_cpu_exec(CPUState *cpu) ret = cpu_exec(cpu); cpu_exec_end(cpu); #ifdef CONFIG_PROFILER - tcg_time += profile_getclock() - ti; + atomic_set(&tcg_ctx->prof.cpu_exec_time, + tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti); #endif return ret; } diff --git a/crypto/Makefile.objs b/crypto/Makefile.objs index 756bab111b..256c9aca1f 100644 --- a/crypto/Makefile.objs +++ b/crypto/Makefile.objs @@ -20,11 +20,11 @@ crypto-obj-y += tlscredsx509.o crypto-obj-y += tlssession.o crypto-obj-y += secret.o crypto-obj-$(CONFIG_GCRYPT) += random-gcrypt.o -crypto-obj-$(if $(CONFIG_GCRYPT),n,$(CONFIG_GNUTLS_RND)) += random-gnutls.o -crypto-obj-$(if $(CONFIG_GCRYPT),n,$(if $(CONFIG_GNUTLS_RND),n,y)) += random-platform.o +crypto-obj-$(if $(CONFIG_GCRYPT),n,$(CONFIG_GNUTLS)) += random-gnutls.o +crypto-obj-$(if $(CONFIG_GCRYPT),n,$(if $(CONFIG_GNUTLS),n,y)) += random-platform.o crypto-obj-y += pbkdf.o -crypto-obj-$(CONFIG_NETTLE_KDF) += pbkdf-nettle.o -crypto-obj-$(if $(CONFIG_NETTLE_KDF),n,$(CONFIG_GCRYPT_KDF)) += pbkdf-gcrypt.o +crypto-obj-$(CONFIG_NETTLE) += pbkdf-nettle.o +crypto-obj-$(if $(CONFIG_NETTLE),n,$(CONFIG_GCRYPT)) += pbkdf-gcrypt.o crypto-obj-y += ivgen.o crypto-obj-y += ivgen-essiv.o crypto-obj-y += ivgen-plain.o diff --git a/crypto/init.c b/crypto/init.c index f131c42306..c30156405a 100644 --- a/crypto/init.c +++ b/crypto/init.c @@ -37,33 +37,14 @@ /* #define DEBUG_GNUTLS */ /* - * If GNUTLS is built against GCrypt then - * - * - When GNUTLS >= 2.12, we must not initialize gcrypt threading - * because GNUTLS will do that itself - * - When GNUTLS < 2.12 we must always initialize gcrypt threading - * - When GNUTLS is disabled we must always initialize gcrypt threading - * - * But.... - * - * When gcrypt >= 1.6.0 we must not initialize gcrypt threading - * because gcrypt will do that itself. - * - * So we need to init gcrypt threading if + * We need to init gcrypt threading if * * - gcrypt < 1.6.0 - * AND - * - gnutls < 2.12 - * OR - * - gnutls is disabled * */ #if (defined(CONFIG_GCRYPT) && \ - (!defined(CONFIG_GNUTLS) || \ - (LIBGNUTLS_VERSION_NUMBER < 0x020c00)) && \ - (!defined(GCRYPT_VERSION_NUMBER) || \ - (GCRYPT_VERSION_NUMBER < 0x010600))) + (GCRYPT_VERSION_NUMBER < 0x010600)) #define QCRYPTO_INIT_GCRYPT_THREADS #else #undef QCRYPTO_INIT_GCRYPT_THREADS diff --git a/crypto/tlscredsx509.c b/crypto/tlscredsx509.c index 98ee0424e5..d6ab4a9862 100644 --- a/crypto/tlscredsx509.c +++ b/crypto/tlscredsx509.c @@ -72,14 +72,6 @@ qcrypto_tls_creds_check_cert_times(gnutls_x509_crt_t cert, } -#if LIBGNUTLS_VERSION_NUMBER >= 2 -/* - * The gnutls_x509_crt_get_basic_constraints function isn't - * available in GNUTLS 1.0.x branches. This isn't critical - * though, since gnutls_certificate_verify_peers2 will do - * pretty much the same check at runtime, so we can just - * disable this code - */ static int qcrypto_tls_creds_check_cert_basic_constraints(QCryptoTLSCredsX509 *creds, gnutls_x509_crt_t cert, @@ -130,7 +122,6 @@ qcrypto_tls_creds_check_cert_basic_constraints(QCryptoTLSCredsX509 *creds, return 0; } -#endif static int @@ -299,14 +290,12 @@ qcrypto_tls_creds_check_cert(QCryptoTLSCredsX509 *creds, return -1; } -#if LIBGNUTLS_VERSION_NUMBER >= 2 if (qcrypto_tls_creds_check_cert_basic_constraints(creds, cert, certFile, isServer, isCA, errp) < 0) { return -1; } -#endif if (qcrypto_tls_creds_check_cert_key_usage(creds, cert, certFile, @@ -615,7 +604,6 @@ qcrypto_tls_creds_x509_load(QCryptoTLSCredsX509 *creds, } if (cert != NULL && key != NULL) { -#if LIBGNUTLS_VERSION_NUMBER >= 0x030111 char *password = NULL; if (creds->passwordid) { password = qcrypto_secret_lookup_as_utf8(creds->passwordid, @@ -630,15 +618,6 @@ qcrypto_tls_creds_x509_load(QCryptoTLSCredsX509 *creds, password, 0); g_free(password); -#else /* LIBGNUTLS_VERSION_NUMBER < 0x030111 */ - if (creds->passwordid) { - error_setg(errp, "PKCS8 decryption requires GNUTLS >= 3.1.11"); - goto cleanup; - } - ret = gnutls_certificate_set_x509_key_file(creds->data, - cert, key, - GNUTLS_X509_FMT_PEM); -#endif if (ret < 0) { error_setg(errp, "Cannot load certificate '%s' & key '%s': %s", cert, key, gnutls_strerror(ret)); diff --git a/crypto/tlssession.c b/crypto/tlssession.c index 66a6fbe19c..2f28fa7f71 100644 --- a/crypto/tlssession.c +++ b/crypto/tlssession.c @@ -90,13 +90,7 @@ qcrypto_tls_session_pull(void *opaque, void *buf, size_t len) } #define TLS_PRIORITY_ADDITIONAL_ANON "+ANON-DH" - -#if GNUTLS_VERSION_MAJOR >= 3 -#define TLS_ECDHE_PSK "+ECDHE-PSK:" -#else -#define TLS_ECDHE_PSK "" -#endif -#define TLS_PRIORITY_ADDITIONAL_PSK TLS_ECDHE_PSK "+DHE-PSK:+PSK" +#define TLS_PRIORITY_ADDITIONAL_PSK "+ECDHE-PSK:+DHE-PSK:+PSK" QCryptoTLSSession * qcrypto_tls_session_new(QCryptoTLSCreds *creds, diff --git a/crypto/xts.c b/crypto/xts.c index 95212341f6..4277ad40de 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -24,52 +24,75 @@ */ #include "qemu/osdep.h" +#include "qemu/bswap.h" #include "crypto/xts.h" -static void xts_mult_x(uint8_t *I) +typedef union { + uint8_t b[XTS_BLOCK_SIZE]; + uint64_t u[2]; +} xts_uint128; + +static inline void xts_uint128_xor(xts_uint128 *D, + const xts_uint128 *S1, + const xts_uint128 *S2) { - int x; - uint8_t t, tt; + D->u[0] = S1->u[0] ^ S2->u[0]; + D->u[1] = S1->u[1] ^ S2->u[1]; +} - for (x = t = 0; x < 16; x++) { - tt = I[x] >> 7; - I[x] = ((I[x] << 1) | t) & 0xFF; - t = tt; - } - if (tt) { - I[0] ^= 0x87; +static inline void xts_uint128_cpu_to_les(xts_uint128 *v) +{ + cpu_to_le64s(&v->u[0]); + cpu_to_le64s(&v->u[1]); +} + +static inline void xts_uint128_le_to_cpus(xts_uint128 *v) +{ + le64_to_cpus(&v->u[0]); + le64_to_cpus(&v->u[1]); +} + +static void xts_mult_x(xts_uint128 *I) +{ + uint64_t tt; + + xts_uint128_le_to_cpus(I); + + tt = I->u[0] >> 63; + I->u[0] <<= 1; + + if (I->u[1] >> 63) { + I->u[0] ^= 0x87; } + I->u[1] <<= 1; + I->u[1] |= tt; + + xts_uint128_cpu_to_les(I); } /** - * xts_tweak_uncrypt: + * xts_tweak_encdec: * @param ctxt: the cipher context * @param func: the cipher function - * @src: buffer providing the cipher text of XTS_BLOCK_SIZE bytes - * @dst: buffer to output the plain text of XTS_BLOCK_SIZE bytes + * @src: buffer providing the input text of XTS_BLOCK_SIZE bytes + * @dst: buffer to output the output text of XTS_BLOCK_SIZE bytes * @iv: the initialization vector tweak of XTS_BLOCK_SIZE bytes * - * Decrypt data with a tweak + * Encrypt/decrypt data with a tweak */ -static void xts_tweak_decrypt(const void *ctx, - xts_cipher_func *func, - const uint8_t *src, - uint8_t *dst, - uint8_t *iv) +static inline void xts_tweak_encdec(const void *ctx, + xts_cipher_func *func, + const xts_uint128 *src, + xts_uint128 *dst, + xts_uint128 *iv) { - unsigned long x; - /* tweak encrypt block i */ - for (x = 0; x < XTS_BLOCK_SIZE; x++) { - dst[x] = src[x] ^ iv[x]; - } + xts_uint128_xor(dst, src, iv); - func(ctx, XTS_BLOCK_SIZE, dst, dst); + func(ctx, XTS_BLOCK_SIZE, dst->b, dst->b); - for (x = 0; x < XTS_BLOCK_SIZE; x++) { - dst[x] = dst[x] ^ iv[x]; - } + xts_uint128_xor(dst, dst, iv); /* LFSR the tweak */ xts_mult_x(iv); @@ -85,7 +108,7 @@ void xts_decrypt(const void *datactx, uint8_t *dst, const uint8_t *src) { - uint8_t PP[XTS_BLOCK_SIZE], CC[XTS_BLOCK_SIZE], T[XTS_BLOCK_SIZE]; + xts_uint128 PP, CC, T; unsigned long i, m, mo, lim; /* get number of blocks */ @@ -102,72 +125,53 @@ void xts_decrypt(const void *datactx, } /* encrypt the iv */ - encfunc(tweakctx, XTS_BLOCK_SIZE, T, iv); - - for (i = 0; i < lim; i++) { - xts_tweak_decrypt(datactx, decfunc, src, dst, T); - - src += XTS_BLOCK_SIZE; - dst += XTS_BLOCK_SIZE; + encfunc(tweakctx, XTS_BLOCK_SIZE, T.b, iv); + + if (QEMU_PTR_IS_ALIGNED(src, sizeof(uint64_t)) && + QEMU_PTR_IS_ALIGNED(dst, sizeof(uint64_t))) { + xts_uint128 *S = (xts_uint128 *)src; + xts_uint128 *D = (xts_uint128 *)dst; + for (i = 0; i < lim; i++, S++, D++) { + xts_tweak_encdec(datactx, decfunc, S, D, &T); + } + } else { + xts_uint128 D; + + for (i = 0; i < lim; i++) { + memcpy(&D, src, XTS_BLOCK_SIZE); + xts_tweak_encdec(datactx, decfunc, &D, &D, &T); + memcpy(dst, &D, XTS_BLOCK_SIZE); + src += XTS_BLOCK_SIZE; + dst += XTS_BLOCK_SIZE; + } } /* if length is not a multiple of XTS_BLOCK_SIZE then */ if (mo > 0) { - memcpy(CC, T, XTS_BLOCK_SIZE); - xts_mult_x(CC); + xts_uint128 S, D; + memcpy(&CC, &T, XTS_BLOCK_SIZE); + xts_mult_x(&CC); /* PP = tweak decrypt block m-1 */ - xts_tweak_decrypt(datactx, decfunc, src, PP, CC); + memcpy(&S, src, XTS_BLOCK_SIZE); + xts_tweak_encdec(datactx, decfunc, &S, &PP, &CC); /* Pm = first length % XTS_BLOCK_SIZE bytes of PP */ for (i = 0; i < mo; i++) { - CC[i] = src[XTS_BLOCK_SIZE + i]; - dst[XTS_BLOCK_SIZE + i] = PP[i]; + CC.b[i] = src[XTS_BLOCK_SIZE + i]; + dst[XTS_BLOCK_SIZE + i] = PP.b[i]; } for (; i < XTS_BLOCK_SIZE; i++) { - CC[i] = PP[i]; + CC.b[i] = PP.b[i]; } /* Pm-1 = Tweak uncrypt CC */ - xts_tweak_decrypt(datactx, decfunc, CC, dst, T); + xts_tweak_encdec(datactx, decfunc, &CC, &D, &T); + memcpy(dst, &D, XTS_BLOCK_SIZE); } /* Decrypt the iv back */ - decfunc(tweakctx, XTS_BLOCK_SIZE, iv, T); -} - - -/** - * xts_tweak_crypt: - * @param ctxt: the cipher context - * @param func: the cipher function - * @src: buffer providing the plain text of XTS_BLOCK_SIZE bytes - * @dst: buffer to output the cipher text of XTS_BLOCK_SIZE bytes - * @iv: the initialization vector tweak of XTS_BLOCK_SIZE bytes - * - * Encrypt data with a tweak - */ -static void xts_tweak_encrypt(const void *ctx, - xts_cipher_func *func, - const uint8_t *src, - uint8_t *dst, - uint8_t *iv) -{ - unsigned long x; - - /* tweak encrypt block i */ - for (x = 0; x < XTS_BLOCK_SIZE; x++) { - dst[x] = src[x] ^ iv[x]; - } - - func(ctx, XTS_BLOCK_SIZE, dst, dst); - - for (x = 0; x < XTS_BLOCK_SIZE; x++) { - dst[x] = dst[x] ^ iv[x]; - } - - /* LFSR the tweak */ - xts_mult_x(iv); + decfunc(tweakctx, XTS_BLOCK_SIZE, iv, T.b); } @@ -180,7 +184,7 @@ void xts_encrypt(const void *datactx, uint8_t *dst, const uint8_t *src) { - uint8_t PP[XTS_BLOCK_SIZE], CC[XTS_BLOCK_SIZE], T[XTS_BLOCK_SIZE]; + xts_uint128 PP, CC, T; unsigned long i, m, mo, lim; /* get number of blocks */ @@ -197,34 +201,50 @@ void xts_encrypt(const void *datactx, } /* encrypt the iv */ - encfunc(tweakctx, XTS_BLOCK_SIZE, T, iv); + encfunc(tweakctx, XTS_BLOCK_SIZE, T.b, iv); + + if (QEMU_PTR_IS_ALIGNED(src, sizeof(uint64_t)) && + QEMU_PTR_IS_ALIGNED(dst, sizeof(uint64_t))) { + xts_uint128 *S = (xts_uint128 *)src; + xts_uint128 *D = (xts_uint128 *)dst; + for (i = 0; i < lim; i++, S++, D++) { + xts_tweak_encdec(datactx, encfunc, S, D, &T); + } + } else { + xts_uint128 D; - for (i = 0; i < lim; i++) { - xts_tweak_encrypt(datactx, encfunc, src, dst, T); + for (i = 0; i < lim; i++) { + memcpy(&D, src, XTS_BLOCK_SIZE); + xts_tweak_encdec(datactx, encfunc, &D, &D, &T); + memcpy(dst, &D, XTS_BLOCK_SIZE); - dst += XTS_BLOCK_SIZE; - src += XTS_BLOCK_SIZE; + dst += XTS_BLOCK_SIZE; + src += XTS_BLOCK_SIZE; + } } /* if length is not a multiple of XTS_BLOCK_SIZE then */ if (mo > 0) { + xts_uint128 S, D; /* CC = tweak encrypt block m-1 */ - xts_tweak_encrypt(datactx, encfunc, src, CC, T); + memcpy(&S, src, XTS_BLOCK_SIZE); + xts_tweak_encdec(datactx, encfunc, &S, &CC, &T); /* Cm = first length % XTS_BLOCK_SIZE bytes of CC */ for (i = 0; i < mo; i++) { - PP[i] = src[XTS_BLOCK_SIZE + i]; - dst[XTS_BLOCK_SIZE + i] = CC[i]; + PP.b[i] = src[XTS_BLOCK_SIZE + i]; + dst[XTS_BLOCK_SIZE + i] = CC.b[i]; } for (; i < XTS_BLOCK_SIZE; i++) { - PP[i] = CC[i]; + PP.b[i] = CC.b[i]; } /* Cm-1 = Tweak encrypt PP */ - xts_tweak_encrypt(datactx, encfunc, PP, dst, T); + xts_tweak_encdec(datactx, encfunc, &PP, &D, &T); + memcpy(dst, &D, XTS_BLOCK_SIZE); } /* Decrypt the iv back */ - decfunc(tweakctx, XTS_BLOCK_SIZE, iv, T); + decfunc(tweakctx, XTS_BLOCK_SIZE, iv, T.b); } diff --git a/default-configs/alpha-softmmu.mak b/default-configs/alpha-softmmu.mak index bbe361f01a..eb58b40254 100644 --- a/default-configs/alpha-softmmu.mak +++ b/default-configs/alpha-softmmu.mak @@ -19,3 +19,4 @@ CONFIG_IDE_CMD646=y CONFIG_I8259=y CONFIG_MC146818RTC=y CONFIG_ISA_TESTDEV=y +CONFIG_SMC37C669=y diff --git a/default-configs/hyperv.mak b/default-configs/hyperv.mak new file mode 100644 index 0000000000..5d0d9fd830 --- /dev/null +++ b/default-configs/hyperv.mak @@ -0,0 +1,2 @@ +CONFIG_HYPERV=$(CONFIG_KVM) +CONFIG_HYPERV_TESTDEV=y diff --git a/default-configs/i386-softmmu.mak b/default-configs/i386-softmmu.mak index 8c7d4a0fa0..210cff2781 100644 --- a/default-configs/i386-softmmu.mak +++ b/default-configs/i386-softmmu.mak @@ -3,6 +3,7 @@ include pci.mak include sound.mak include usb.mak +include hyperv.mak CONFIG_QXL=$(CONFIG_SPICE) CONFIG_VGA_ISA=y CONFIG_VGA_CIRRUS=y @@ -58,7 +59,6 @@ CONFIG_XIO3130=y CONFIG_IOH3420=y CONFIG_I82801B11=y CONFIG_SMBIOS=y -CONFIG_HYPERV_TESTDEV=$(CONFIG_KVM) CONFIG_PXB=y CONFIG_ACPI_VMGENID=y CONFIG_FW_CFG_DMA=y diff --git a/device-hotplug.c b/device-hotplug.c index cd427e2c76..6090d5f1e9 100644 --- a/device-hotplug.c +++ b/device-hotplug.c @@ -28,6 +28,7 @@ #include "sysemu/block-backend.h" #include "sysemu/blockdev.h" #include "qapi/qmp/qdict.h" +#include "qapi/error.h" #include "qemu/config-file.h" #include "qemu/option.h" #include "sysemu/sysemu.h" @@ -36,6 +37,7 @@ static DriveInfo *add_init_drive(const char *optstr) { + Error *err = NULL; DriveInfo *dinfo; QemuOpts *opts; MachineClass *mc; @@ -45,8 +47,9 @@ static DriveInfo *add_init_drive(const char *optstr) return NULL; mc = MACHINE_GET_CLASS(current_machine); - dinfo = drive_new(opts, mc->block_default_type); + dinfo = drive_new(opts, mc->block_default_type, &err); if (!dinfo) { + error_report_err(err); qemu_opts_del(opts); return NULL; } diff --git a/docs/COLO-FT.txt b/docs/COLO-FT.txt index 70cfb9ce7d..6302469d0e 100644 --- a/docs/COLO-FT.txt +++ b/docs/COLO-FT.txt @@ -110,6 +110,40 @@ Note: HeartBeat has not been implemented yet, so you need to trigger failover process by using 'x-colo-lost-heartbeat' command. +== COLO operation status == + ++-----------------+ +| | +| Start COLO | +| | ++--------+--------+ + | + | Main qmp command: + | migrate-set-capabilities with x-colo + | migrate + | + v ++--------+--------+ +| | +| COLO running | +| | ++--------+--------+ + | + | Main qmp command: + | x-colo-lost-heartbeat + | or + | some error happened + v ++--------+--------+ +| | send qmp event: +| COLO failover | COLO_EXIT +| | ++-----------------+ + +COLO use the qmp command to switch and report operation status. +The diagram just shows the main qmp command, you can get the detail +in test procedure. + == Test procedure == 1. Startup qemu Primary: diff --git a/docs/devel/testing.rst b/docs/devel/testing.rst index 727c4019b5..fcfad87614 100644 --- a/docs/devel/testing.rst +++ b/docs/devel/testing.rst @@ -43,15 +43,13 @@ add a new unit test: 3. Add the test to ``tests/Makefile.include``. First, name the unit test program and add it to ``$(check-unit-y)``; then add a rule to build the - executable. Optionally, you can add a magical variable to support ``gcov``. - For example: + executable. For example: .. code:: check-unit-y += tests/foo-test$(EXESUF) tests/foo-test$(EXESUF): tests/foo-test.o $(test-util-obj-y) ... - gcov-files-foo-test-y = util/foo.c Since unit tests don't require environment variables, the simplest way to debug a unit test failure is often directly invoking it or even running it under @@ -965,6 +965,7 @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp) tcg_target_initialized = true; cc->tcg_initialize(); } + tlb_init(cpu); #ifndef CONFIG_USER_ONLY if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { @@ -3906,11 +3907,6 @@ int qemu_target_page_bits_min(void) } #endif -/* - * A helper function for the _utterly broken_ virtio device model to find out if - * it's running on a big endian machine. Don't do this at home kids! - */ -bool target_words_bigendian(void); bool target_words_bigendian(void) { #if defined(TARGET_WORDS_BIGENDIAN) diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 46ae206172..e1eef954e6 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -726,8 +726,7 @@ static FloatParts addsub_floats(FloatParts a, FloatParts b, bool subtract, * IEC/IEEE Standard for Binary Floating-Point Arithmetic. */ -float16 __attribute__((flatten)) float16_add(float16 a, float16 b, - float_status *status) +float16 QEMU_FLATTEN float16_add(float16 a, float16 b, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); FloatParts pb = float16_unpack_canonical(b, status); @@ -736,8 +735,7 @@ float16 __attribute__((flatten)) float16_add(float16 a, float16 b, return float16_round_pack_canonical(pr, status); } -float32 __attribute__((flatten)) float32_add(float32 a, float32 b, - float_status *status) +float32 QEMU_FLATTEN float32_add(float32 a, float32 b, float_status *status) { FloatParts pa = float32_unpack_canonical(a, status); FloatParts pb = float32_unpack_canonical(b, status); @@ -746,8 +744,7 @@ float32 __attribute__((flatten)) float32_add(float32 a, float32 b, return float32_round_pack_canonical(pr, status); } -float64 __attribute__((flatten)) float64_add(float64 a, float64 b, - float_status *status) +float64 QEMU_FLATTEN float64_add(float64 a, float64 b, float_status *status) { FloatParts pa = float64_unpack_canonical(a, status); FloatParts pb = float64_unpack_canonical(b, status); @@ -756,8 +753,7 @@ float64 __attribute__((flatten)) float64_add(float64 a, float64 b, return float64_round_pack_canonical(pr, status); } -float16 __attribute__((flatten)) float16_sub(float16 a, float16 b, - float_status *status) +float16 QEMU_FLATTEN float16_sub(float16 a, float16 b, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); FloatParts pb = float16_unpack_canonical(b, status); @@ -766,8 +762,7 @@ float16 __attribute__((flatten)) float16_sub(float16 a, float16 b, return float16_round_pack_canonical(pr, status); } -float32 __attribute__((flatten)) float32_sub(float32 a, float32 b, - float_status *status) +float32 QEMU_FLATTEN float32_sub(float32 a, float32 b, float_status *status) { FloatParts pa = float32_unpack_canonical(a, status); FloatParts pb = float32_unpack_canonical(b, status); @@ -776,8 +771,7 @@ float32 __attribute__((flatten)) float32_sub(float32 a, float32 b, return float32_round_pack_canonical(pr, status); } -float64 __attribute__((flatten)) float64_sub(float64 a, float64 b, - float_status *status) +float64 QEMU_FLATTEN float64_sub(float64 a, float64 b, float_status *status) { FloatParts pa = float64_unpack_canonical(a, status); FloatParts pb = float64_unpack_canonical(b, status); @@ -835,8 +829,7 @@ static FloatParts mul_floats(FloatParts a, FloatParts b, float_status *s) g_assert_not_reached(); } -float16 __attribute__((flatten)) float16_mul(float16 a, float16 b, - float_status *status) +float16 QEMU_FLATTEN float16_mul(float16 a, float16 b, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); FloatParts pb = float16_unpack_canonical(b, status); @@ -845,8 +838,7 @@ float16 __attribute__((flatten)) float16_mul(float16 a, float16 b, return float16_round_pack_canonical(pr, status); } -float32 __attribute__((flatten)) float32_mul(float32 a, float32 b, - float_status *status) +float32 QEMU_FLATTEN float32_mul(float32 a, float32 b, float_status *status) { FloatParts pa = float32_unpack_canonical(a, status); FloatParts pb = float32_unpack_canonical(b, status); @@ -855,8 +847,7 @@ float32 __attribute__((flatten)) float32_mul(float32 a, float32 b, return float32_round_pack_canonical(pr, status); } -float64 __attribute__((flatten)) float64_mul(float64 a, float64 b, - float_status *status) +float64 QEMU_FLATTEN float64_mul(float64 a, float64 b, float_status *status) { FloatParts pa = float64_unpack_canonical(a, status); FloatParts pb = float64_unpack_canonical(b, status); @@ -1068,7 +1059,7 @@ static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c, return a; } -float16 __attribute__((flatten)) float16_muladd(float16 a, float16 b, float16 c, +float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c, int flags, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); @@ -1079,7 +1070,7 @@ float16 __attribute__((flatten)) float16_muladd(float16 a, float16 b, float16 c, return float16_round_pack_canonical(pr, status); } -float32 __attribute__((flatten)) float32_muladd(float32 a, float32 b, float32 c, +float32 QEMU_FLATTEN float32_muladd(float32 a, float32 b, float32 c, int flags, float_status *status) { FloatParts pa = float32_unpack_canonical(a, status); @@ -1090,7 +1081,7 @@ float32 __attribute__((flatten)) float32_muladd(float32 a, float32 b, float32 c, return float32_round_pack_canonical(pr, status); } -float64 __attribute__((flatten)) float64_muladd(float64 a, float64 b, float64 c, +float64 QEMU_FLATTEN float64_muladd(float64 a, float64 b, float64 c, int flags, float_status *status) { FloatParts pa = float64_unpack_canonical(a, status); @@ -2414,21 +2405,21 @@ static FloatParts sqrt_float(FloatParts a, float_status *s, const FloatFmt *p) return a; } -float16 __attribute__((flatten)) float16_sqrt(float16 a, float_status *status) +float16 QEMU_FLATTEN float16_sqrt(float16 a, float_status *status) { FloatParts pa = float16_unpack_canonical(a, status); FloatParts pr = sqrt_float(pa, status, &float16_params); return float16_round_pack_canonical(pr, status); } -float32 __attribute__((flatten)) float32_sqrt(float32 a, float_status *status) +float32 QEMU_FLATTEN float32_sqrt(float32 a, float_status *status) { FloatParts pa = float32_unpack_canonical(a, status); FloatParts pr = sqrt_float(pa, status, &float32_params); return float32_round_pack_canonical(pr, status); } -float64 __attribute__((flatten)) float64_sqrt(float64 a, float_status *status) +float64 QEMU_FLATTEN float64_sqrt(float64 a, float_status *status) { FloatParts pa = float64_unpack_canonical(a, status); FloatParts pr = sqrt_float(pa, status, &float64_params); diff --git a/fsdev/qemu-fsdev-dummy.c b/fsdev/qemu-fsdev-dummy.c index 6dc0fbc4c4..489cd29081 100644 --- a/fsdev/qemu-fsdev-dummy.c +++ b/fsdev/qemu-fsdev-dummy.c @@ -15,7 +15,7 @@ #include "qemu/config-file.h" #include "qemu/module.h" -int qemu_fsdev_add(QemuOpts *opts) +int qemu_fsdev_add(QemuOpts *opts, Error **errp) { return 0; } diff --git a/fsdev/qemu-fsdev.c b/fsdev/qemu-fsdev.c index 8a4afbffbd..7a3b87cc9e 100644 --- a/fsdev/qemu-fsdev.c +++ b/fsdev/qemu-fsdev.c @@ -30,7 +30,7 @@ static FsDriverTable FsDrivers[] = { { .name = "proxy", .ops = &proxy_ops}, }; -int qemu_fsdev_add(QemuOpts *opts) +int qemu_fsdev_add(QemuOpts *opts, Error **errp) { int i; struct FsDriverListEntry *fsle; @@ -38,10 +38,9 @@ int qemu_fsdev_add(QemuOpts *opts) const char *fsdriver = qemu_opt_get(opts, "fsdriver"); const char *writeout = qemu_opt_get(opts, "writeout"); bool ro = qemu_opt_get_bool(opts, "readonly", 0); - Error *local_err = NULL; if (!fsdev_id) { - error_report("fsdev: No id specified"); + error_setg(errp, "fsdev: No id specified"); return -1; } @@ -53,11 +52,11 @@ int qemu_fsdev_add(QemuOpts *opts) } if (i == ARRAY_SIZE(FsDrivers)) { - error_report("fsdev: fsdriver %s not found", fsdriver); + error_setg(errp, "fsdev: fsdriver %s not found", fsdriver); return -1; } } else { - error_report("fsdev: No fsdriver specified"); + error_setg(errp, "fsdev: No fsdriver specified"); return -1; } @@ -76,8 +75,7 @@ int qemu_fsdev_add(QemuOpts *opts) } if (fsle->fse.ops->parse_opts) { - if (fsle->fse.ops->parse_opts(opts, &fsle->fse, &local_err)) { - error_report_err(local_err); + if (fsle->fse.ops->parse_opts(opts, &fsle->fse, errp)) { g_free(fsle->fse.fsdev_id); g_free(fsle); return -1; diff --git a/fsdev/qemu-fsdev.h b/fsdev/qemu-fsdev.h index 65e4b1cfab..d9716b4144 100644 --- a/fsdev/qemu-fsdev.h +++ b/fsdev/qemu-fsdev.h @@ -38,7 +38,7 @@ typedef struct FsDriverListEntry { QTAILQ_ENTRY(FsDriverListEntry) next; } FsDriverListEntry; -int qemu_fsdev_add(QemuOpts *opts); +int qemu_fsdev_add(QemuOpts *opts, Error **errp); FsDriverEntry *get_fsdev_fsentry(char *id); extern FileOperations local_ops; extern FileOperations handle_ops; @@ -20,7 +20,6 @@ #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/cutils.h" -#include "cpu.h" #include "trace-root.h" #ifdef CONFIG_USER_ONLY #include "qemu.h" diff --git a/hw/9pfs/9p-handle.c b/hw/9pfs/9p-handle.c index f3641dbe4a..3465b1ef30 100644 --- a/hw/9pfs/9p-handle.c +++ b/hw/9pfs/9p-handle.c @@ -19,6 +19,7 @@ #include <grp.h> #include <sys/socket.h> #include <sys/un.h> +#include "qapi/error.h" #include "qemu/xattr.h" #include "qemu/cutils.h" #include "qemu/error-report.h" @@ -655,12 +656,13 @@ static int handle_parse_opts(QemuOpts *opts, FsDriverEntry *fse, Error **errp) warn_report("handle backend is deprecated"); if (sec_model) { - error_report("Invalid argument security_model specified with handle fsdriver"); + error_setg(errp, + "Invalid argument security_model specified with handle fsdriver"); return -1; } if (!path) { - error_report("fsdev: No path specified"); + error_setg(errp, "fsdev: No path specified"); return -1; } fse->path = g_strdup(path); diff --git a/hw/9pfs/9p-local.c b/hw/9pfs/9p-local.c index c30f4f26bd..08e673a79c 100644 --- a/hw/9pfs/9p-local.c +++ b/hw/9pfs/9p-local.c @@ -1509,8 +1509,8 @@ static int local_parse_opts(QemuOpts *opts, FsDriverEntry *fse, Error **errp) fsdev_throttle_parse_opts(opts, &fse->fst, &local_err); if (local_err) { - error_propagate(errp, local_err); - error_prepend(errp, "invalid throttle configuration: "); + error_propagate_prepend(errp, local_err, + "invalid throttle configuration: "); return -1; } diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index 6026780f95..3f54a21c76 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -14,6 +14,7 @@ #include "hw/9pfs/9p.h" #include "hw/xen/xen_backend.h" #include "hw/9pfs/xen-9pfs.h" +#include "qapi/error.h" #include "qemu/config-file.h" #include "qemu/option.h" #include "fsdev/qemu-fsdev.h" @@ -355,6 +356,7 @@ static int xen_9pfs_free(struct XenDevice *xendev) static int xen_9pfs_connect(struct XenDevice *xendev) { + Error *err = NULL; int i; Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); V9fsState *s = &xen_9pdev->state; @@ -452,7 +454,10 @@ static int xen_9pfs_connect(struct XenDevice *xendev) qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL); qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL); qemu_opts_set_id(fsdev, s->fsconf.fsdev_id); - qemu_fsdev_add(fsdev); + qemu_fsdev_add(fsdev, &err); + if (err) { + error_report_err(err); + } v9fs_device_realize_common(s, &xen_9p_transport, NULL); return 0; diff --git a/hw/Makefile.objs b/hw/Makefile.objs index a19c1417ed..30722ccf98 100644 --- a/hw/Makefile.objs +++ b/hw/Makefile.objs @@ -9,6 +9,7 @@ devices-dirs-$(CONFIG_SOFTMMU) += cpu/ devices-dirs-$(CONFIG_SOFTMMU) += display/ devices-dirs-$(CONFIG_SOFTMMU) += dma/ devices-dirs-$(CONFIG_SOFTMMU) += gpio/ +devices-dirs-$(CONFIG_HYPERV) += hyperv/ devices-dirs-$(CONFIG_SOFTMMU) += i2c/ devices-dirs-$(CONFIG_SOFTMMU) += ide/ devices-dirs-$(CONFIG_SOFTMMU) += input/ diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 20c71d7d96..586baa9b64 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -24,6 +24,7 @@ #include "qemu/config-file.h" #include "qemu/option.h" #include "exec/address-spaces.h" +#include "qemu/units.h" /* Kernel boot protocol is specified in the kernel docs * Documentation/arm/Booting and Documentation/arm64/booting.txt @@ -36,6 +37,8 @@ #define ARM64_TEXT_OFFSET_OFFSET 8 #define ARM64_MAGIC_OFFSET 56 +#define BOOTLOADER_MAX_SIZE (4 * KiB) + AddressSpace *arm_boot_address_space(ARMCPU *cpu, const struct arm_boot_info *info) { @@ -184,6 +187,8 @@ static void write_bootloader(const char *name, hwaddr addr, code[i] = tswap32(insn); } + assert((len * sizeof(uint32_t)) < BOOTLOADER_MAX_SIZE); + rom_add_blob_fixed_as(name, code, len * sizeof(uint32_t), addr, as); g_free(code); @@ -919,6 +924,19 @@ static uint64_t load_aarch64_image(const char *filename, hwaddr mem_base, memcpy(&hdrvals, buffer + ARM64_TEXT_OFFSET_OFFSET, sizeof(hdrvals)); if (hdrvals[1] != 0) { kernel_load_offset = le64_to_cpu(hdrvals[0]); + + /* + * We write our startup "bootloader" at the very bottom of RAM, + * so that bit can't be used for the image. Luckily the Image + * format specification is that the image requests only an offset + * from a 2MB boundary, not an absolute load address. So if the + * image requests an offset that might mean it overlaps with the + * bootloader, we can just load it starting at 2MB+offset rather + * than 0MB + offset. + */ + if (kernel_load_offset < BOOTLOADER_MAX_SIZE) { + kernel_load_offset += 2 * MiB; + } } } diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c index 4f980a598b..97789a0771 100644 --- a/hw/audio/es1370.c +++ b/hw/audio/es1370.c @@ -585,10 +585,13 @@ static uint64_t es1370_read(void *opaque, hwaddr addr, unsigned size) #endif break; + case ES1370_REG_ADC_FRAMECNT: + d += 2; + goto framecnt; case ES1370_REG_DAC1_FRAMECNT: case ES1370_REG_DAC2_FRAMECNT: - case ES1370_REG_ADC_FRAMECNT: d += (addr - ES1370_REG_DAC1_FRAMECNT) >> 3; + framecnt: val = d->frame_cnt; #ifdef DEBUG_ES1370 { @@ -602,10 +605,13 @@ static uint64_t es1370_read(void *opaque, hwaddr addr, unsigned size) #endif break; + case ES1370_REG_ADC_FRAMEADR: + d += 2; + goto frameadr; case ES1370_REG_DAC1_FRAMEADR: case ES1370_REG_DAC2_FRAMEADR: - case ES1370_REG_ADC_FRAMEADR: d += (addr - ES1370_REG_DAC1_FRAMEADR) >> 3; + frameadr: val = d->frame_addr; break; diff --git a/hw/core/Makefile.objs b/hw/core/Makefile.objs index eb88ca979e..a799c83815 100644 --- a/hw/core/Makefile.objs +++ b/hw/core/Makefile.objs @@ -20,6 +20,5 @@ common-obj-$(CONFIG_SOFTMMU) += register.o common-obj-$(CONFIG_SOFTMMU) += or-irq.o common-obj-$(CONFIG_SOFTMMU) += split-irq.o common-obj-$(CONFIG_PLATFORM_BUS) += platform-bus.o - -obj-$(CONFIG_SOFTMMU) += generic-loader.o -obj-$(CONFIG_SOFTMMU) += null-machine.o +common-obj-$(CONFIG_SOFTMMU) += generic-loader.o +common-obj-$(CONFIG_SOFTMMU) += null-machine.o diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c index fde32cbda1..fbae05fb3b 100644 --- a/hw/core/generic-loader.c +++ b/hw/core/generic-loader.c @@ -130,11 +130,7 @@ static void generic_loader_realize(DeviceState *dev, Error **errp) s->cpu = first_cpu; } -#ifdef TARGET_WORDS_BIGENDIAN - big_endian = 1; -#else - big_endian = 0; -#endif + big_endian = target_words_bigendian(); if (s->file) { AddressSpace *as = s->cpu ? s->cpu->as : NULL; @@ -204,6 +200,7 @@ static void generic_loader_class_init(ObjectClass *klass, void *data) dc->unrealize = generic_loader_unrealize; dc->props = generic_loader_props; dc->desc = "Generic Loader"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); } static TypeInfo generic_loader_info = { diff --git a/hw/core/hotplug.c b/hw/core/hotplug.c index 2253072d0e..17ac986685 100644 --- a/hw/core/hotplug.c +++ b/hw/core/hotplug.c @@ -35,16 +35,6 @@ void hotplug_handler_plug(HotplugHandler *plug_handler, } } -void hotplug_handler_post_plug(HotplugHandler *plug_handler, - DeviceState *plugged_dev) -{ - HotplugHandlerClass *hdc = HOTPLUG_HANDLER_GET_CLASS(plug_handler); - - if (hdc->post_plug) { - hdc->post_plug(plug_handler, plugged_dev); - } -} - void hotplug_handler_unplug_request(HotplugHandler *plug_handler, DeviceState *plugged_dev, Error **errp) diff --git a/hw/core/null-machine.c b/hw/core/null-machine.c index cde4d3eb57..76d3f8e39c 100644 --- a/hw/core/null-machine.c +++ b/hw/core/null-machine.c @@ -18,7 +18,7 @@ #include "hw/boards.h" #include "sysemu/sysemu.h" #include "exec/address-spaces.h" -#include "cpu.h" +#include "qom/cpu.h" static void machine_none_init(MachineState *mch) { diff --git a/hw/core/qdev.c b/hw/core/qdev.c index 046d8f1f76..6b3cc55b27 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -832,14 +832,6 @@ static void device_set_realized(Object *obj, bool value, Error **errp) DEVICE_LISTENER_CALL(realize, Forward, dev); - if (hotplug_ctrl) { - hotplug_handler_plug(hotplug_ctrl, dev, &local_err); - } - - if (local_err != NULL) { - goto post_realize_fail; - } - /* * always free/re-initialize here since the value cannot be cleaned up * in device_unrealize due to its usage later on in the unplug path @@ -869,8 +861,12 @@ static void device_set_realized(Object *obj, bool value, Error **errp) dev->pending_deleted_event = false; if (hotplug_ctrl) { - hotplug_handler_post_plug(hotplug_ctrl, dev); - } + hotplug_handler_plug(hotplug_ctrl, dev, &local_err); + if (local_err != NULL) { + goto child_realize_fail; + } + } + } else if (!value && dev->realized) { Error **local_errp = NULL; QLIST_FOREACH(bus, &dev->child_bus, sibling) { diff --git a/hw/display/cg3.c b/hw/display/cg3.c index 1c199ab369..e50d97e48c 100644 --- a/hw/display/cg3.c +++ b/hw/display/cg3.c @@ -307,7 +307,7 @@ static void cg3_realizefn(DeviceState *dev, Error **errp) ret = load_image_mr(fcode_filename, &s->rom); g_free(fcode_filename); if (ret < 0 || ret > FCODE_MAX_ROM_SIZE) { - error_report("cg3: could not load prom '%s'", CG3_ROM_FILE); + warn_report("cg3: could not load prom '%s'", CG3_ROM_FILE); } } diff --git a/hw/display/tcx.c b/hw/display/tcx.c index b2786ee8d0..66f2459226 100644 --- a/hw/display/tcx.c +++ b/hw/display/tcx.c @@ -823,7 +823,7 @@ static void tcx_realizefn(DeviceState *dev, Error **errp) ret = load_image_mr(fcode_filename, &s->rom); g_free(fcode_filename); if (ret < 0 || ret > FCODE_MAX_ROM_SIZE) { - error_report("tcx: could not load prom '%s'", TCX_ROM_FILE); + warn_report("tcx: could not load prom '%s'", TCX_ROM_FILE); } } diff --git a/hw/hyperv/Makefile.objs b/hw/hyperv/Makefile.objs new file mode 100644 index 0000000000..edaca2f763 --- /dev/null +++ b/hw/hyperv/Makefile.objs @@ -0,0 +1,2 @@ +obj-y += hyperv.o +obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c new file mode 100644 index 0000000000..a28e7249d8 --- /dev/null +++ b/hw/hyperv/hyperv.c @@ -0,0 +1,654 @@ +/* + * Hyper-V guest/hypervisor interaction + * + * Copyright (c) 2015-2018 Virtuozzo International GmbH. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qapi/error.h" +#include "exec/address-spaces.h" +#include "sysemu/kvm.h" +#include "qemu/bitops.h" +#include "qemu/error-report.h" +#include "qemu/queue.h" +#include "qemu/rcu.h" +#include "qemu/rcu_queue.h" +#include "hw/hyperv/hyperv.h" + +typedef struct SynICState { + DeviceState parent_obj; + + CPUState *cs; + + bool enabled; + hwaddr msg_page_addr; + hwaddr event_page_addr; + MemoryRegion msg_page_mr; + MemoryRegion event_page_mr; + struct hyperv_message_page *msg_page; + struct hyperv_event_flags_page *event_page; +} SynICState; + +#define TYPE_SYNIC "hyperv-synic" +#define SYNIC(obj) OBJECT_CHECK(SynICState, (obj), TYPE_SYNIC) + +static SynICState *get_synic(CPUState *cs) +{ + return SYNIC(object_resolve_path_component(OBJECT(cs), "synic")); +} + +static void synic_update(SynICState *synic, bool enable, + hwaddr msg_page_addr, hwaddr event_page_addr) +{ + + synic->enabled = enable; + if (synic->msg_page_addr != msg_page_addr) { + if (synic->msg_page_addr) { + memory_region_del_subregion(get_system_memory(), + &synic->msg_page_mr); + } + if (msg_page_addr) { + memory_region_add_subregion(get_system_memory(), msg_page_addr, + &synic->msg_page_mr); + } + synic->msg_page_addr = msg_page_addr; + } + if (synic->event_page_addr != event_page_addr) { + if (synic->event_page_addr) { + memory_region_del_subregion(get_system_memory(), + &synic->event_page_mr); + } + if (event_page_addr) { + memory_region_add_subregion(get_system_memory(), event_page_addr, + &synic->event_page_mr); + } + synic->event_page_addr = event_page_addr; + } +} + +void hyperv_synic_update(CPUState *cs, bool enable, + hwaddr msg_page_addr, hwaddr event_page_addr) +{ + SynICState *synic = get_synic(cs); + + if (!synic) { + return; + } + + synic_update(synic, enable, msg_page_addr, event_page_addr); +} + +static void synic_realize(DeviceState *dev, Error **errp) +{ + Object *obj = OBJECT(dev); + SynICState *synic = SYNIC(dev); + char *msgp_name, *eventp_name; + uint32_t vp_index; + + /* memory region names have to be globally unique */ + vp_index = hyperv_vp_index(synic->cs); + msgp_name = g_strdup_printf("synic-%u-msg-page", vp_index); + eventp_name = g_strdup_printf("synic-%u-event-page", vp_index); + + memory_region_init_ram(&synic->msg_page_mr, obj, msgp_name, + sizeof(*synic->msg_page), &error_abort); + memory_region_init_ram(&synic->event_page_mr, obj, eventp_name, + sizeof(*synic->event_page), &error_abort); + synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr); + synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr); + + g_free(msgp_name); + g_free(eventp_name); +} +static void synic_reset(DeviceState *dev) +{ + SynICState *synic = SYNIC(dev); + memset(synic->msg_page, 0, sizeof(*synic->msg_page)); + memset(synic->event_page, 0, sizeof(*synic->event_page)); + synic_update(synic, false, 0, 0); +} + +static void synic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = synic_realize; + dc->reset = synic_reset; + dc->user_creatable = false; +} + +void hyperv_synic_add(CPUState *cs) +{ + Object *obj; + SynICState *synic; + + obj = object_new(TYPE_SYNIC); + synic = SYNIC(obj); + synic->cs = cs; + object_property_add_child(OBJECT(cs), "synic", obj, &error_abort); + object_unref(obj); + object_property_set_bool(obj, true, "realized", &error_abort); +} + +void hyperv_synic_reset(CPUState *cs) +{ + device_reset(DEVICE(get_synic(cs))); +} + +static const TypeInfo synic_type_info = { + .name = TYPE_SYNIC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SynICState), + .class_init = synic_class_init, +}; + +static void synic_register_types(void) +{ + type_register_static(&synic_type_info); +} + +type_init(synic_register_types) + +/* + * KVM has its own message producers (SynIC timers). To guarantee + * serialization with both KVM vcpu and the guest cpu, the messages are first + * staged in an intermediate area and then posted to the SynIC message page in + * the vcpu thread. + */ +typedef struct HvSintStagedMessage { + /* message content staged by hyperv_post_msg */ + struct hyperv_message msg; + /* callback + data (r/o) to complete the processing in a BH */ + HvSintMsgCb cb; + void *cb_data; + /* message posting status filled by cpu_post_msg */ + int status; + /* passing the buck: */ + enum { + /* initial state */ + HV_STAGED_MSG_FREE, + /* + * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE -> + * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu + */ + HV_STAGED_MSG_BUSY, + /* + * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot, + * notify the guest, records the status, marks the posting done (BUSY + * -> POSTED), and schedules sint_msg_bh BH + */ + HV_STAGED_MSG_POSTED, + /* + * sint_msg_bh (BH) verifies that the posting is done, runs the + * callback, and starts over (POSTED -> FREE) + */ + } state; +} HvSintStagedMessage; + +struct HvSintRoute { + uint32_t sint; + SynICState *synic; + int gsi; + EventNotifier sint_set_notifier; + EventNotifier sint_ack_notifier; + + HvSintStagedMessage *staged_msg; + + unsigned refcount; +}; + +static CPUState *hyperv_find_vcpu(uint32_t vp_index) +{ + CPUState *cs = qemu_get_cpu(vp_index); + assert(hyperv_vp_index(cs) == vp_index); + return cs; +} + +/* + * BH to complete the processing of a staged message. + */ +static void sint_msg_bh(void *opaque) +{ + HvSintRoute *sint_route = opaque; + HvSintStagedMessage *staged_msg = sint_route->staged_msg; + + if (atomic_read(&staged_msg->state) != HV_STAGED_MSG_POSTED) { + /* status nor ready yet (spurious ack from guest?), ignore */ + return; + } + + staged_msg->cb(staged_msg->cb_data, staged_msg->status); + staged_msg->status = 0; + + /* staged message processing finished, ready to start over */ + atomic_set(&staged_msg->state, HV_STAGED_MSG_FREE); + /* drop the reference taken in hyperv_post_msg */ + hyperv_sint_route_unref(sint_route); +} + +/* + * Worker to transfer the message from the staging area into the SynIC message + * page in vcpu context. + */ +static void cpu_post_msg(CPUState *cs, run_on_cpu_data data) +{ + HvSintRoute *sint_route = data.host_ptr; + HvSintStagedMessage *staged_msg = sint_route->staged_msg; + SynICState *synic = sint_route->synic; + struct hyperv_message *dst_msg; + bool wait_for_sint_ack = false; + + assert(staged_msg->state == HV_STAGED_MSG_BUSY); + + if (!synic->enabled || !synic->msg_page_addr) { + staged_msg->status = -ENXIO; + goto posted; + } + + dst_msg = &synic->msg_page->slot[sint_route->sint]; + + if (dst_msg->header.message_type != HV_MESSAGE_NONE) { + dst_msg->header.message_flags |= HV_MESSAGE_FLAG_PENDING; + staged_msg->status = -EAGAIN; + wait_for_sint_ack = true; + } else { + memcpy(dst_msg, &staged_msg->msg, sizeof(*dst_msg)); + staged_msg->status = hyperv_sint_route_set_sint(sint_route); + } + + memory_region_set_dirty(&synic->msg_page_mr, 0, sizeof(*synic->msg_page)); + +posted: + atomic_set(&staged_msg->state, HV_STAGED_MSG_POSTED); + /* + * Notify the msg originator of the progress made; if the slot was busy we + * set msg_pending flag in it so it will be the guest who will do EOM and + * trigger the notification from KVM via sint_ack_notifier + */ + if (!wait_for_sint_ack) { + aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, + sint_route); + } +} + +/* + * Post a Hyper-V message to the staging area, for delivery to guest in the + * vcpu thread. + */ +int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *src_msg) +{ + HvSintStagedMessage *staged_msg = sint_route->staged_msg; + + assert(staged_msg); + + /* grab the staging area */ + if (atomic_cmpxchg(&staged_msg->state, HV_STAGED_MSG_FREE, + HV_STAGED_MSG_BUSY) != HV_STAGED_MSG_FREE) { + return -EAGAIN; + } + + memcpy(&staged_msg->msg, src_msg, sizeof(*src_msg)); + + /* hold a reference on sint_route until the callback is finished */ + hyperv_sint_route_ref(sint_route); + + /* schedule message posting attempt in vcpu thread */ + async_run_on_cpu(sint_route->synic->cs, cpu_post_msg, + RUN_ON_CPU_HOST_PTR(sint_route)); + return 0; +} + +static void sint_ack_handler(EventNotifier *notifier) +{ + HvSintRoute *sint_route = container_of(notifier, HvSintRoute, + sint_ack_notifier); + event_notifier_test_and_clear(notifier); + + /* + * the guest consumed the previous message so complete the current one with + * -EAGAIN and let the msg originator retry + */ + aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh, sint_route); +} + +/* + * Set given event flag for a given sint on a given vcpu, and signal the sint. + */ +int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno) +{ + int ret; + SynICState *synic = sint_route->synic; + unsigned long *flags, set_mask; + unsigned set_idx; + + if (eventno > HV_EVENT_FLAGS_COUNT) { + return -EINVAL; + } + if (!synic->enabled || !synic->event_page_addr) { + return -ENXIO; + } + + set_idx = BIT_WORD(eventno); + set_mask = BIT_MASK(eventno); + flags = synic->event_page->slot[sint_route->sint].flags; + + if ((atomic_fetch_or(&flags[set_idx], set_mask) & set_mask) != set_mask) { + memory_region_set_dirty(&synic->event_page_mr, 0, + sizeof(*synic->event_page)); + ret = hyperv_sint_route_set_sint(sint_route); + } else { + ret = 0; + } + return ret; +} + +HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint, + HvSintMsgCb cb, void *cb_data) +{ + HvSintRoute *sint_route; + EventNotifier *ack_notifier; + int r, gsi; + CPUState *cs; + SynICState *synic; + + cs = hyperv_find_vcpu(vp_index); + if (!cs) { + return NULL; + } + + synic = get_synic(cs); + if (!synic) { + return NULL; + } + + sint_route = g_new0(HvSintRoute, 1); + r = event_notifier_init(&sint_route->sint_set_notifier, false); + if (r) { + goto err; + } + + + ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL; + if (ack_notifier) { + sint_route->staged_msg = g_new0(HvSintStagedMessage, 1); + sint_route->staged_msg->cb = cb; + sint_route->staged_msg->cb_data = cb_data; + + r = event_notifier_init(ack_notifier, false); + if (r) { + goto err_sint_set_notifier; + } + + event_notifier_set_handler(ack_notifier, sint_ack_handler); + } + + gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint); + if (gsi < 0) { + goto err_gsi; + } + + r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, + &sint_route->sint_set_notifier, + ack_notifier, gsi); + if (r) { + goto err_irqfd; + } + sint_route->gsi = gsi; + sint_route->synic = synic; + sint_route->sint = sint; + sint_route->refcount = 1; + + return sint_route; + +err_irqfd: + kvm_irqchip_release_virq(kvm_state, gsi); +err_gsi: + if (ack_notifier) { + event_notifier_set_handler(ack_notifier, NULL); + event_notifier_cleanup(ack_notifier); + g_free(sint_route->staged_msg); + } +err_sint_set_notifier: + event_notifier_cleanup(&sint_route->sint_set_notifier); +err: + g_free(sint_route); + + return NULL; +} + +void hyperv_sint_route_ref(HvSintRoute *sint_route) +{ + sint_route->refcount++; +} + +void hyperv_sint_route_unref(HvSintRoute *sint_route) +{ + if (!sint_route) { + return; + } + + assert(sint_route->refcount > 0); + + if (--sint_route->refcount) { + return; + } + + kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, + &sint_route->sint_set_notifier, + sint_route->gsi); + kvm_irqchip_release_virq(kvm_state, sint_route->gsi); + if (sint_route->staged_msg) { + event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL); + event_notifier_cleanup(&sint_route->sint_ack_notifier); + g_free(sint_route->staged_msg); + } + event_notifier_cleanup(&sint_route->sint_set_notifier); + g_free(sint_route); +} + +int hyperv_sint_route_set_sint(HvSintRoute *sint_route) +{ + return event_notifier_set(&sint_route->sint_set_notifier); +} + +typedef struct MsgHandler { + struct rcu_head rcu; + QLIST_ENTRY(MsgHandler) link; + uint32_t conn_id; + HvMsgHandler handler; + void *data; +} MsgHandler; + +typedef struct EventFlagHandler { + struct rcu_head rcu; + QLIST_ENTRY(EventFlagHandler) link; + uint32_t conn_id; + EventNotifier *notifier; +} EventFlagHandler; + +static QLIST_HEAD(, MsgHandler) msg_handlers; +static QLIST_HEAD(, EventFlagHandler) event_flag_handlers; +static QemuMutex handlers_mutex; + +static void __attribute__((constructor)) hv_init(void) +{ + QLIST_INIT(&msg_handlers); + QLIST_INIT(&event_flag_handlers); + qemu_mutex_init(&handlers_mutex); +} + +int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data) +{ + int ret; + MsgHandler *mh; + + qemu_mutex_lock(&handlers_mutex); + QLIST_FOREACH(mh, &msg_handlers, link) { + if (mh->conn_id == conn_id) { + if (handler) { + ret = -EEXIST; + } else { + QLIST_REMOVE_RCU(mh, link); + g_free_rcu(mh, rcu); + ret = 0; + } + goto unlock; + } + } + + if (handler) { + mh = g_new(MsgHandler, 1); + mh->conn_id = conn_id; + mh->handler = handler; + mh->data = data; + QLIST_INSERT_HEAD_RCU(&msg_handlers, mh, link); + ret = 0; + } else { + ret = -ENOENT; + } +unlock: + qemu_mutex_unlock(&handlers_mutex); + return ret; +} + +uint16_t hyperv_hcall_post_message(uint64_t param, bool fast) +{ + uint16_t ret; + hwaddr len; + struct hyperv_post_message_input *msg; + MsgHandler *mh; + + if (fast) { + return HV_STATUS_INVALID_HYPERCALL_CODE; + } + if (param & (__alignof__(*msg) - 1)) { + return HV_STATUS_INVALID_ALIGNMENT; + } + + len = sizeof(*msg); + msg = cpu_physical_memory_map(param, &len, 0); + if (len < sizeof(*msg)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto unmap; + } + if (msg->payload_size > sizeof(msg->payload)) { + ret = HV_STATUS_INVALID_HYPERCALL_INPUT; + goto unmap; + } + + ret = HV_STATUS_INVALID_CONNECTION_ID; + rcu_read_lock(); + QLIST_FOREACH_RCU(mh, &msg_handlers, link) { + if (mh->conn_id == (msg->connection_id & HV_CONNECTION_ID_MASK)) { + ret = mh->handler(msg, mh->data); + break; + } + } + rcu_read_unlock(); + +unmap: + cpu_physical_memory_unmap(msg, len, 0, 0); + return ret; +} + +static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier) +{ + int ret; + EventFlagHandler *handler; + + qemu_mutex_lock(&handlers_mutex); + QLIST_FOREACH(handler, &event_flag_handlers, link) { + if (handler->conn_id == conn_id) { + if (notifier) { + ret = -EEXIST; + } else { + QLIST_REMOVE_RCU(handler, link); + g_free_rcu(handler, rcu); + ret = 0; + } + goto unlock; + } + } + + if (notifier) { + handler = g_new(EventFlagHandler, 1); + handler->conn_id = conn_id; + handler->notifier = notifier; + QLIST_INSERT_HEAD_RCU(&event_flag_handlers, handler, link); + ret = 0; + } else { + ret = -ENOENT; + } +unlock: + qemu_mutex_unlock(&handlers_mutex); + return ret; +} + +static bool process_event_flags_userspace; + +int hyperv_set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier) +{ + if (!process_event_flags_userspace && + !kvm_check_extension(kvm_state, KVM_CAP_HYPERV_EVENTFD)) { + process_event_flags_userspace = true; + + warn_report("Hyper-V event signaling is not supported by this kernel; " + "using slower userspace hypercall processing"); + } + + if (!process_event_flags_userspace) { + struct kvm_hyperv_eventfd hvevfd = { + .conn_id = conn_id, + .fd = notifier ? event_notifier_get_fd(notifier) : -1, + .flags = notifier ? 0 : KVM_HYPERV_EVENTFD_DEASSIGN, + }; + + return kvm_vm_ioctl(kvm_state, KVM_HYPERV_EVENTFD, &hvevfd); + } + return set_event_flag_handler(conn_id, notifier); +} + +uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast) +{ + uint16_t ret; + EventFlagHandler *handler; + + if (unlikely(!fast)) { + hwaddr addr = param; + + if (addr & (__alignof__(addr) - 1)) { + return HV_STATUS_INVALID_ALIGNMENT; + } + + param = ldq_phys(&address_space_memory, addr); + } + + /* + * Per spec, bits 32-47 contain the extra "flag number". However, we + * have no use for it, and in all known usecases it is zero, so just + * report lookup failure if it isn't. + */ + if (param & 0xffff00000000ULL) { + return HV_STATUS_INVALID_PORT_ID; + } + /* remaining bits are reserved-zero */ + if (param & ~HV_CONNECTION_ID_MASK) { + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } + + ret = HV_STATUS_INVALID_CONNECTION_ID; + rcu_read_lock(); + QLIST_FOREACH_RCU(handler, &event_flag_handlers, link) { + if (handler->conn_id == param) { + event_notifier_set(handler->notifier); + ret = 0; + break; + } + } + rcu_read_unlock(); + return ret; +} diff --git a/hw/hyperv/hyperv_testdev.c b/hw/hyperv/hyperv_testdev.c new file mode 100644 index 0000000000..4880333cf5 --- /dev/null +++ b/hw/hyperv/hyperv_testdev.c @@ -0,0 +1,327 @@ +/* + * QEMU KVM Hyper-V test device to support Hyper-V kvm-unit-tests + * + * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> + * + * Authors: + * Andrey Smetanin <asmetanin@virtuozzo.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "qemu/queue.h" +#include "hw/qdev.h" +#include "hw/isa/isa.h" +#include "hw/hyperv/hyperv.h" + +typedef struct TestSintRoute { + QLIST_ENTRY(TestSintRoute) le; + uint8_t vp_index; + uint8_t sint; + HvSintRoute *sint_route; +} TestSintRoute; + +typedef struct TestMsgConn { + QLIST_ENTRY(TestMsgConn) le; + uint8_t conn_id; + HvSintRoute *sint_route; + struct hyperv_message msg; +} TestMsgConn; + +typedef struct TestEvtConn { + QLIST_ENTRY(TestEvtConn) le; + uint8_t conn_id; + HvSintRoute *sint_route; + EventNotifier notifier; +} TestEvtConn; + +struct HypervTestDev { + ISADevice parent_obj; + MemoryRegion sint_control; + QLIST_HEAD(, TestSintRoute) sint_routes; + QLIST_HEAD(, TestMsgConn) msg_conns; + QLIST_HEAD(, TestEvtConn) evt_conns; +}; +typedef struct HypervTestDev HypervTestDev; + +#define TYPE_HYPERV_TEST_DEV "hyperv-testdev" +#define HYPERV_TEST_DEV(obj) \ + OBJECT_CHECK(HypervTestDev, (obj), TYPE_HYPERV_TEST_DEV) + +enum { + HV_TEST_DEV_SINT_ROUTE_CREATE = 1, + HV_TEST_DEV_SINT_ROUTE_DESTROY, + HV_TEST_DEV_SINT_ROUTE_SET_SINT, + HV_TEST_DEV_MSG_CONN_CREATE, + HV_TEST_DEV_MSG_CONN_DESTROY, + HV_TEST_DEV_EVT_CONN_CREATE, + HV_TEST_DEV_EVT_CONN_DESTROY, +}; + +static void sint_route_create(HypervTestDev *dev, + uint8_t vp_index, uint8_t sint) +{ + TestSintRoute *sint_route; + + sint_route = g_new0(TestSintRoute, 1); + assert(sint_route); + + sint_route->vp_index = vp_index; + sint_route->sint = sint; + + sint_route->sint_route = hyperv_sint_route_new(vp_index, sint, NULL, NULL); + assert(sint_route->sint_route); + + QLIST_INSERT_HEAD(&dev->sint_routes, sint_route, le); +} + +static TestSintRoute *sint_route_find(HypervTestDev *dev, + uint8_t vp_index, uint8_t sint) +{ + TestSintRoute *sint_route; + + QLIST_FOREACH(sint_route, &dev->sint_routes, le) { + if (sint_route->vp_index == vp_index && sint_route->sint == sint) { + return sint_route; + } + } + assert(false); + return NULL; +} + +static void sint_route_destroy(HypervTestDev *dev, + uint8_t vp_index, uint8_t sint) +{ + TestSintRoute *sint_route; + + sint_route = sint_route_find(dev, vp_index, sint); + QLIST_REMOVE(sint_route, le); + hyperv_sint_route_unref(sint_route->sint_route); + g_free(sint_route); +} + +static void sint_route_set_sint(HypervTestDev *dev, + uint8_t vp_index, uint8_t sint) +{ + TestSintRoute *sint_route; + + sint_route = sint_route_find(dev, vp_index, sint); + + hyperv_sint_route_set_sint(sint_route->sint_route); +} + +static void msg_retry(void *opaque) +{ + TestMsgConn *conn = opaque; + assert(!hyperv_post_msg(conn->sint_route, &conn->msg)); +} + +static void msg_cb(void *data, int status) +{ + TestMsgConn *conn = data; + + if (!status) { + return; + } + + assert(status == -EAGAIN); + + aio_bh_schedule_oneshot(qemu_get_aio_context(), msg_retry, conn); +} + +static uint16_t msg_handler(const struct hyperv_post_message_input *msg, + void *data) +{ + int ret; + TestMsgConn *conn = data; + + /* post the same message we've got */ + conn->msg.header.message_type = msg->message_type; + assert(msg->payload_size < sizeof(conn->msg.payload)); + conn->msg.header.payload_size = msg->payload_size; + memcpy(&conn->msg.payload, msg->payload, msg->payload_size); + + ret = hyperv_post_msg(conn->sint_route, &conn->msg); + + switch (ret) { + case 0: + return HV_STATUS_SUCCESS; + case -EAGAIN: + return HV_STATUS_INSUFFICIENT_BUFFERS; + default: + return HV_STATUS_INVALID_HYPERCALL_INPUT; + } +} + +static void msg_conn_create(HypervTestDev *dev, uint8_t vp_index, + uint8_t sint, uint8_t conn_id) +{ + TestMsgConn *conn; + + conn = g_new0(TestMsgConn, 1); + assert(conn); + + conn->conn_id = conn_id; + + conn->sint_route = hyperv_sint_route_new(vp_index, sint, msg_cb, conn); + assert(conn->sint_route); + + assert(!hyperv_set_msg_handler(conn->conn_id, msg_handler, conn)); + + QLIST_INSERT_HEAD(&dev->msg_conns, conn, le); +} + +static void msg_conn_destroy(HypervTestDev *dev, uint8_t conn_id) +{ + TestMsgConn *conn; + + QLIST_FOREACH(conn, &dev->msg_conns, le) { + if (conn->conn_id == conn_id) { + QLIST_REMOVE(conn, le); + hyperv_set_msg_handler(conn->conn_id, NULL, NULL); + hyperv_sint_route_unref(conn->sint_route); + g_free(conn); + return; + } + } + assert(false); +} + +static void evt_conn_handler(EventNotifier *notifier) +{ + TestEvtConn *conn = container_of(notifier, TestEvtConn, notifier); + + event_notifier_test_and_clear(notifier); + + /* signal the same event flag we've got */ + assert(!hyperv_set_event_flag(conn->sint_route, conn->conn_id)); +} + +static void evt_conn_create(HypervTestDev *dev, uint8_t vp_index, + uint8_t sint, uint8_t conn_id) +{ + TestEvtConn *conn; + + conn = g_new0(TestEvtConn, 1); + assert(conn); + + conn->conn_id = conn_id; + + conn->sint_route = hyperv_sint_route_new(vp_index, sint, NULL, NULL); + assert(conn->sint_route); + + assert(!event_notifier_init(&conn->notifier, false)); + + event_notifier_set_handler(&conn->notifier, evt_conn_handler); + + assert(!hyperv_set_event_flag_handler(conn_id, &conn->notifier)); + + QLIST_INSERT_HEAD(&dev->evt_conns, conn, le); +} + +static void evt_conn_destroy(HypervTestDev *dev, uint8_t conn_id) +{ + TestEvtConn *conn; + + QLIST_FOREACH(conn, &dev->evt_conns, le) { + if (conn->conn_id == conn_id) { + QLIST_REMOVE(conn, le); + hyperv_set_event_flag_handler(conn->conn_id, NULL); + event_notifier_set_handler(&conn->notifier, NULL); + event_notifier_cleanup(&conn->notifier); + hyperv_sint_route_unref(conn->sint_route); + g_free(conn); + return; + } + } + assert(false); +} + +static uint64_t hv_test_dev_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void hv_test_dev_write(void *opaque, hwaddr addr, uint64_t data, + uint32_t len) +{ + HypervTestDev *dev = HYPERV_TEST_DEV(opaque); + uint8_t sint = data & 0xFF; + uint8_t vp_index = (data >> 8ULL) & 0xFF; + uint8_t ctl = (data >> 16ULL) & 0xFF; + uint8_t conn_id = (data >> 24ULL) & 0xFF; + + switch (ctl) { + case HV_TEST_DEV_SINT_ROUTE_CREATE: + sint_route_create(dev, vp_index, sint); + break; + case HV_TEST_DEV_SINT_ROUTE_DESTROY: + sint_route_destroy(dev, vp_index, sint); + break; + case HV_TEST_DEV_SINT_ROUTE_SET_SINT: + sint_route_set_sint(dev, vp_index, sint); + break; + case HV_TEST_DEV_MSG_CONN_CREATE: + msg_conn_create(dev, vp_index, sint, conn_id); + break; + case HV_TEST_DEV_MSG_CONN_DESTROY: + msg_conn_destroy(dev, conn_id); + break; + case HV_TEST_DEV_EVT_CONN_CREATE: + evt_conn_create(dev, vp_index, sint, conn_id); + break; + case HV_TEST_DEV_EVT_CONN_DESTROY: + evt_conn_destroy(dev, conn_id); + break; + default: + break; + } +} + +static const MemoryRegionOps synic_test_sint_ops = { + .read = hv_test_dev_read, + .write = hv_test_dev_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void hv_test_dev_realizefn(DeviceState *d, Error **errp) +{ + ISADevice *isa = ISA_DEVICE(d); + HypervTestDev *dev = HYPERV_TEST_DEV(d); + MemoryRegion *io = isa_address_space_io(isa); + + QLIST_INIT(&dev->sint_routes); + QLIST_INIT(&dev->msg_conns); + QLIST_INIT(&dev->evt_conns); + memory_region_init_io(&dev->sint_control, OBJECT(dev), + &synic_test_sint_ops, dev, + "hyperv-testdev-ctl", 4); + memory_region_add_subregion(io, 0x3000, &dev->sint_control); +} + +static void hv_test_dev_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->realize = hv_test_dev_realizefn; +} + +static const TypeInfo hv_test_dev_info = { + .name = TYPE_HYPERV_TEST_DEV, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(HypervTestDev), + .class_init = hv_test_dev_class_init, +}; + +static void hv_test_dev_register_types(void) +{ + type_register_static(&hv_test_dev_info); +} +type_init(hv_test_dev_register_types); diff --git a/hw/i386/pc.c b/hw/i386/pc.c index cd5029c149..eab8572f2a 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -2209,8 +2209,9 @@ static void pc_machine_set_nvdimm_persistence(Object *obj, const char *value, else if (strcmp(value, "mem-ctrl") == 0) nvdimm_state->persistence = 2; else { - error_report("-machine nvdimm-persistence=%s: unsupported option", value); - exit(EXIT_FAILURE); + error_setg(errp, "-machine nvdimm-persistence=%s: unsupported option", + value); + return; } g_free(nvdimm_state->persistence_string); diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 0d816fdd2c..0beefb05d4 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -1055,17 +1055,17 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) case 0xd5c: /* MMFR3. */ return cpu->id_mmfr3; case 0xd60: /* ISAR0. */ - return cpu->id_isar0; + return cpu->isar.id_isar0; case 0xd64: /* ISAR1. */ - return cpu->id_isar1; + return cpu->isar.id_isar1; case 0xd68: /* ISAR2. */ - return cpu->id_isar2; + return cpu->isar.id_isar2; case 0xd6c: /* ISAR3. */ - return cpu->id_isar3; + return cpu->isar.id_isar3; case 0xd70: /* ISAR4. */ - return cpu->id_isar4; + return cpu->isar.id_isar4; case 0xd74: /* ISAR5. */ - return cpu->id_isar5; + return cpu->isar.id_isar5; case 0xd78: /* CLIDR */ return cpu->clidr; case 0xd7c: /* CTR */ diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c index b6896ac4ce..4e529729b4 100644 --- a/hw/intc/ioapic.c +++ b/hw/intc/ioapic.c @@ -21,7 +21,7 @@ */ #include "qemu/osdep.h" -#include "qemu/error-report.h" +#include "qapi/error.h" #include "monitor/monitor.h" #include "hw/hw.h" #include "hw/i386/pc.h" @@ -393,9 +393,9 @@ static void ioapic_realize(DeviceState *dev, Error **errp) IOAPICCommonState *s = IOAPIC_COMMON(dev); if (s->version != 0x11 && s->version != 0x20) { - error_report("IOAPIC only supports version 0x11 or 0x20 " - "(default: 0x%x).", IOAPIC_VER_DEF); - exit(1); + error_setg(errp, "IOAPIC only supports version 0x11 or 0x20 " + "(default: 0x%x).", IOAPIC_VER_DEF); + return; } memory_region_init_io(&s->io_memory, OBJECT(s), &ioapic_io_ops, s, diff --git a/hw/intc/xics.c b/hw/intc/xics.c index c90c893228..406efee064 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -320,8 +320,9 @@ static void icp_realize(DeviceState *dev, Error **errp) obj = object_property_get_link(OBJECT(dev), ICP_PROP_XICS, &err); if (!obj) { - error_propagate(errp, err); - error_prepend(errp, "required link '" ICP_PROP_XICS "' not found: "); + error_propagate_prepend(errp, err, + "required link '" ICP_PROP_XICS + "' not found: "); return; } @@ -329,8 +330,9 @@ static void icp_realize(DeviceState *dev, Error **errp) obj = object_property_get_link(OBJECT(dev), ICP_PROP_CPU, &err); if (!obj) { - error_propagate(errp, err); - error_prepend(errp, "required link '" ICP_PROP_CPU "' not found: "); + error_propagate_prepend(errp, err, + "required link '" ICP_PROP_CPU + "' not found: "); return; } @@ -624,8 +626,9 @@ static void ics_base_realize(DeviceState *dev, Error **errp) obj = object_property_get_link(OBJECT(dev), ICS_PROP_XICS, &err); if (!obj) { - error_propagate(errp, err); - error_prepend(errp, "required link '" ICS_PROP_XICS "' not found: "); + error_propagate_prepend(errp, err, + "required link '" ICS_PROP_XICS + "' not found: "); return; } ics->xics = XICS_FABRIC(obj); diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c index 30c3769a20..e8fa9a53ae 100644 --- a/hw/intc/xics_kvm.c +++ b/hw/intc/xics_kvm.c @@ -198,17 +198,12 @@ static void ics_get_kvm_state(ICSState *ics) { uint64_t state; int i; - Error *local_err = NULL; for (i = 0; i < ics->nr_irqs; i++) { ICSIRQState *irq = &ics->irqs[i]; kvm_device_access(kernel_xics_fd, KVM_DEV_XICS_GRP_SOURCES, - i + ics->offset, &state, false, &local_err); - if (local_err) { - error_report_err(local_err); - exit(1); - } + i + ics->offset, &state, false, &error_fatal); irq->server = state & KVM_XICS_DESTINATION_MASK; irq->saved_priority = (state >> KVM_XICS_PRIORITY_SHIFT) diff --git a/hw/isa/Makefile.objs b/hw/isa/Makefile.objs index 83e06f6c04..9e106df186 100644 --- a/hw/isa/Makefile.objs +++ b/hw/isa/Makefile.objs @@ -1,9 +1,10 @@ common-obj-$(CONFIG_ISA_BUS) += isa-bus.o -common-obj-$(CONFIG_ISA_BUS) += isa-superio.o smc37c669-superio.o +common-obj-$(CONFIG_ISA_BUS) += isa-superio.o common-obj-$(CONFIG_APM) += apm.o common-obj-$(CONFIG_I82378) += i82378.o common-obj-$(CONFIG_PC87312) += pc87312.o common-obj-$(CONFIG_PIIX4) += piix4.o common-obj-$(CONFIG_VT82C686) += vt82c686.o +common-obj-$(CONFIG_SMC37C669) += smc37c669-superio.o obj-$(CONFIG_LPC_ICH9) += lpc_ich9.o diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs index 6d50b03cfd..680350b3c3 100644 --- a/hw/misc/Makefile.objs +++ b/hw/misc/Makefile.objs @@ -71,7 +71,6 @@ obj-$(CONFIG_IOTKIT_SYSCTL) += iotkit-sysctl.o obj-$(CONFIG_IOTKIT_SYSINFO) += iotkit-sysinfo.o obj-$(CONFIG_PVPANIC) += pvpanic.o -obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o obj-$(CONFIG_AUX) += auxbus.o obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o aspeed_sdmc.o obj-$(CONFIG_MSF2) += msf2-sysreg.o diff --git a/hw/misc/edu.c b/hw/misc/edu.c index 0687ffd343..cdcf550dd7 100644 --- a/hw/misc/edu.c +++ b/hw/misc/edu.c @@ -342,7 +342,7 @@ static void *edu_fact_thread(void *opaque) static void pci_edu_realize(PCIDevice *pdev, Error **errp) { - EduState *edu = DO_UPCAST(EduState, pdev, pdev); + EduState *edu = EDU(pdev); uint8_t *pci_conf = pdev->config; pci_config_set_interrupt_pin(pci_conf, 1); @@ -365,7 +365,7 @@ static void pci_edu_realize(PCIDevice *pdev, Error **errp) static void pci_edu_uninit(PCIDevice *pdev) { - EduState *edu = DO_UPCAST(EduState, pdev, pdev); + EduState *edu = EDU(pdev); qemu_mutex_lock(&edu->thr_mutex); edu->stopping = true; diff --git a/hw/misc/hyperv_testdev.c b/hw/misc/hyperv_testdev.c deleted file mode 100644 index 7549f470b1..0000000000 --- a/hw/misc/hyperv_testdev.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * QEMU KVM Hyper-V test device to support Hyper-V kvm-unit-tests - * - * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> - * - * Authors: - * Andrey Smetanin <asmetanin@virtuozzo.com> - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include <linux/kvm.h> -#include "hw/hw.h" -#include "hw/qdev.h" -#include "hw/isa/isa.h" -#include "sysemu/kvm.h" -#include "target/i386/hyperv.h" -#include "kvm_i386.h" - -#define HV_TEST_DEV_MAX_SINT_ROUTES 64 - -struct HypervTestDev { - ISADevice parent_obj; - MemoryRegion sint_control; - HvSintRoute *sint_route[HV_TEST_DEV_MAX_SINT_ROUTES]; -}; -typedef struct HypervTestDev HypervTestDev; - -#define TYPE_HYPERV_TEST_DEV "hyperv-testdev" -#define HYPERV_TEST_DEV(obj) \ - OBJECT_CHECK(HypervTestDev, (obj), TYPE_HYPERV_TEST_DEV) - -enum { - HV_TEST_DEV_SINT_ROUTE_CREATE = 1, - HV_TEST_DEV_SINT_ROUTE_DESTROY, - HV_TEST_DEV_SINT_ROUTE_SET_SINT -}; - -static int alloc_sint_route_index(HypervTestDev *dev) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(dev->sint_route); i++) { - if (dev->sint_route[i] == NULL) { - return i; - } - } - return -1; -} - -static void free_sint_route_index(HypervTestDev *dev, int i) -{ - assert(i >= 0 && i < ARRAY_SIZE(dev->sint_route)); - dev->sint_route[i] = NULL; -} - -static int find_sint_route_index(HypervTestDev *dev, uint32_t vp_index, - uint32_t sint) -{ - HvSintRoute *sint_route; - int i; - - for (i = 0; i < ARRAY_SIZE(dev->sint_route); i++) { - sint_route = dev->sint_route[i]; - if (sint_route && sint_route->vp_index == vp_index && - sint_route->sint == sint) { - return i; - } - } - return -1; -} - -static void hv_synic_test_dev_control(HypervTestDev *dev, uint32_t ctl, - uint32_t vp_index, uint32_t sint) -{ - int i; - HvSintRoute *sint_route; - - switch (ctl) { - case HV_TEST_DEV_SINT_ROUTE_CREATE: - i = alloc_sint_route_index(dev); - assert(i >= 0); - sint_route = kvm_hv_sint_route_create(vp_index, sint, NULL); - assert(sint_route); - dev->sint_route[i] = sint_route; - break; - case HV_TEST_DEV_SINT_ROUTE_DESTROY: - i = find_sint_route_index(dev, vp_index, sint); - assert(i >= 0); - sint_route = dev->sint_route[i]; - kvm_hv_sint_route_destroy(sint_route); - free_sint_route_index(dev, i); - break; - case HV_TEST_DEV_SINT_ROUTE_SET_SINT: - i = find_sint_route_index(dev, vp_index, sint); - assert(i >= 0); - sint_route = dev->sint_route[i]; - kvm_hv_sint_route_set_sint(sint_route); - break; - default: - break; - } -} - -static uint64_t hv_test_dev_read(void *opaque, hwaddr addr, unsigned size) -{ - return 0; -} - -static void hv_test_dev_write(void *opaque, hwaddr addr, uint64_t data, - uint32_t len) -{ - HypervTestDev *dev = HYPERV_TEST_DEV(opaque); - uint8_t ctl; - - ctl = (data >> 16ULL) & 0xFF; - switch (ctl) { - case HV_TEST_DEV_SINT_ROUTE_CREATE: - case HV_TEST_DEV_SINT_ROUTE_DESTROY: - case HV_TEST_DEV_SINT_ROUTE_SET_SINT: { - uint8_t sint = data & 0xFF; - uint8_t vp_index = (data >> 8ULL) & 0xFF; - hv_synic_test_dev_control(dev, ctl, vp_index, sint); - break; - } - default: - break; - } -} - -static const MemoryRegionOps synic_test_sint_ops = { - .read = hv_test_dev_read, - .write = hv_test_dev_write, - .valid.min_access_size = 4, - .valid.max_access_size = 4, - .endianness = DEVICE_LITTLE_ENDIAN, -}; - -static void hv_test_dev_realizefn(DeviceState *d, Error **errp) -{ - ISADevice *isa = ISA_DEVICE(d); - HypervTestDev *dev = HYPERV_TEST_DEV(d); - MemoryRegion *io = isa_address_space_io(isa); - - memset(dev->sint_route, 0, sizeof(dev->sint_route)); - memory_region_init_io(&dev->sint_control, OBJECT(dev), - &synic_test_sint_ops, dev, - "hyperv-testdev-ctl", 4); - memory_region_add_subregion(io, 0x3000, &dev->sint_control); -} - -static void hv_test_dev_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - set_bit(DEVICE_CATEGORY_MISC, dc->categories); - dc->realize = hv_test_dev_realizefn; -} - -static const TypeInfo hv_test_dev_info = { - .name = TYPE_HYPERV_TEST_DEV, - .parent = TYPE_ISA_DEVICE, - .instance_size = sizeof(HypervTestDev), - .class_init = hv_test_dev_class_init, -}; - -static void hv_test_dev_register_types(void) -{ - type_register_static(&hv_test_dev_info); -} -type_init(hv_test_dev_register_types); diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c index 8cb17b9dd4..f88910e55c 100644 --- a/hw/misc/ivshmem.c +++ b/hw/misc/ivshmem.c @@ -1288,8 +1288,8 @@ static void ivshmem_realize(PCIDevice *dev, Error **errp) IVShmemState *s = IVSHMEM_COMMON(dev); if (!qtest_enabled()) { - error_report("ivshmem is deprecated, please use ivshmem-plain" - " or ivshmem-doorbell instead"); + warn_report("ivshmem is deprecated, please use ivshmem-plain" + " or ivshmem-doorbell instead"); } if (qemu_chr_fe_backend_connected(&s->server_chr) + !!s->shmobj != 1) { diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index 1795998928..d95cc27f58 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -142,6 +142,7 @@ #define GEM_DESCONF4 (0x0000028C/4) #define GEM_DESCONF5 (0x00000290/4) #define GEM_DESCONF6 (0x00000294/4) +#define GEM_DESCONF6_64B_MASK (1U << 23) #define GEM_DESCONF7 (0x00000298/4) #define GEM_INT_Q1_STATUS (0x00000400 / 4) @@ -1283,6 +1284,7 @@ static void gem_reset(DeviceState *d) int i; CadenceGEMState *s = CADENCE_GEM(d); const uint8_t *a; + uint32_t queues_mask = 0; DB_PRINT("\n"); @@ -1299,7 +1301,12 @@ static void gem_reset(DeviceState *d) s->regs[GEM_DESCONF] = 0x02500111; s->regs[GEM_DESCONF2] = 0x2ab13fff; s->regs[GEM_DESCONF5] = 0x002f2045; - s->regs[GEM_DESCONF6] = 0x00000200; + s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK; + + if (s->num_priority_queues > 1) { + queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1); + s->regs[GEM_DESCONF6] |= queues_mask; + } /* Set MAC address */ a = &s->conf.macaddr.a[0]; diff --git a/hw/net/e1000.c b/hw/net/e1000.c index 13a9494a8d..5e144cb4e4 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -36,6 +36,7 @@ #include "qemu/range.h" #include "e1000x_common.h" +#include "trace.h" static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; @@ -847,6 +848,15 @@ static uint64_t rx_desc_base(E1000State *s) return (bah << 32) + bal; } +static void +e1000_receiver_overrun(E1000State *s, size_t size) +{ + trace_e1000_receiver_overrun(size, s->mac_reg[RDH], s->mac_reg[RDT]); + e1000x_inc_reg_if_not_full(s->mac_reg, RNBC); + e1000x_inc_reg_if_not_full(s->mac_reg, MPC); + set_ics(s, 0, E1000_ICS_RXO); +} + static ssize_t e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) { @@ -916,8 +926,8 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) desc_offset = 0; total_size = size + e1000x_fcs_len(s->mac_reg); if (!e1000_has_rxbufs(s, total_size)) { - set_ics(s, 0, E1000_ICS_RXO); - return -1; + e1000_receiver_overrun(s, total_size); + return -1; } do { desc_size = total_size - desc_offset; @@ -969,7 +979,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) rdh_start >= s->mac_reg[RDLEN] / sizeof(desc)) { DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n", rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]); - set_ics(s, 0, E1000_ICS_RXO); + e1000_receiver_overrun(s, total_size); return -1; } } while (desc_offset < total_size); diff --git a/hw/net/ne2000.c b/hw/net/ne2000.c index 07d79e317f..869518ee06 100644 --- a/hw/net/ne2000.c +++ b/hw/net/ne2000.c @@ -174,7 +174,7 @@ static int ne2000_buffer_full(NE2000State *s) ssize_t ne2000_receive(NetClientState *nc, const uint8_t *buf, size_t size_) { NE2000State *s = qemu_get_nic_opaque(nc); - int size = size_; + size_t size = size_; uint8_t *p; unsigned int total_len, next, avail, len, index, mcast_idx; uint8_t buf1[60]; @@ -182,7 +182,7 @@ ssize_t ne2000_receive(NetClientState *nc, const uint8_t *buf, size_t size_) { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; #if defined(DEBUG_NE2000) - printf("NE2000: received len=%d\n", size); + printf("NE2000: received len=%zu\n", size); #endif if (s->cmd & E8390_STOP || ne2000_buffer_full(s)) diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c index 0c44554168..d9ba04bdfc 100644 --- a/hw/net/pcnet.c +++ b/hw/net/pcnet.c @@ -988,14 +988,14 @@ ssize_t pcnet_receive(NetClientState *nc, const uint8_t *buf, size_t size_) uint8_t buf1[60]; int remaining; int crc_err = 0; - int size = size_; + size_t size = size_; if (CSR_DRX(s) || CSR_STOP(s) || CSR_SPND(s) || !size || (CSR_LOOP(s) && !s->looptest)) { return -1; } #ifdef PCNET_DEBUG - printf("pcnet_receive size=%d\n", size); + printf("pcnet_receive size=%zu\n", size); #endif /* if too small buffer, then expand it */ diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index 46daa16202..2342a095e3 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -817,7 +817,7 @@ static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t RTL8139State *s = qemu_get_nic_opaque(nc); PCIDevice *d = PCI_DEVICE(s); /* size is the length of the buffer passed to the driver */ - int size = size_; + size_t size = size_; const uint8_t *dot1q_buf = NULL; uint32_t packet_header = 0; @@ -826,7 +826,7 @@ static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t static const uint8_t broadcast_macaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - DPRINTF(">>> received len=%d\n", size); + DPRINTF(">>> received len=%zu\n", size); /* test if board clock is stopped */ if (!s->clock_enabled) @@ -1035,7 +1035,7 @@ static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t if (size+4 > rx_space) { - DPRINTF("C+ Rx mode : descriptor %d size %d received %d + 4\n", + DPRINTF("C+ Rx mode : descriptor %d size %d received %zu + 4\n", descriptor, rx_space, size); s->IntrStatus |= RxOverflow; @@ -1148,7 +1148,7 @@ static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t if (avail != 0 && RX_ALIGN(size + 8) >= avail) { DPRINTF("rx overflow: rx buffer length %d head 0x%04x " - "read 0x%04x === available 0x%04x need 0x%04x\n", + "read 0x%04x === available 0x%04x need 0x%04zx\n", s->RxBufferSize, s->RxBufAddr, s->RxBufPtr, avail, size + 8); s->IntrStatus |= RxOverflow; diff --git a/hw/net/trace-events b/hw/net/trace-events index c1dea4b156..9d49f62fa1 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -98,6 +98,9 @@ net_rx_pkt_rss_ip6_ex(void) "Calculating IPv6/EX RSS hash" net_rx_pkt_rss_hash(size_t rss_length, uint32_t rss_hash) "RSS hash for %zu bytes: 0x%X" net_rx_pkt_rss_add_chunk(void* ptr, size_t size, size_t input_offset) "Add RSS chunk %p, %zu bytes, RSS input offset %zu bytes" +# hw/net/e1000.c +e1000_receiver_overrun(size_t s, uint32_t rdh, uint32_t rdt) "Receiver overrun: dropped packet of %zu bytes, RDH=%u, RDT=%u" + # hw/net/e1000x_common.c e1000x_rx_can_recv_disabled(bool link_up, bool rx_enabled, bool pci_master) "link_up: %d, rx_enabled %d, pci_master %d" e1000x_vlan_is_vlan_pkt(bool is_vlan_pkt, uint16_t eth_proto, uint16_t vet) "Is VLAN packet: %d, ETH proto: 0x%X, VET: 0x%X" diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 4bdd5b8532..385b1a03e9 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -2020,10 +2020,10 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") && strcmp(n->net_conf.tx, "bh")) { - error_report("virtio-net: " - "Unknown option tx=%s, valid options: \"timer\" \"bh\"", - n->net_conf.tx); - error_report("Defaulting to \"bh\""); + warn_report("virtio-net: " + "Unknown option tx=%s, valid options: \"timer\" \"bh\"", + n->net_conf.tx); + error_printf("Defaulting to \"bh\""); } n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n), diff --git a/hw/pci-host/piix.c b/hw/pci-host/piix.c index 0e608347c1..da73743fa2 100644 --- a/hw/pci-host/piix.c +++ b/hw/pci-host/piix.c @@ -327,6 +327,10 @@ static void i440fx_pcihost_realize(DeviceState *dev, Error **errp) sysbus_add_io(sbd, 0xcfc, &s->data_mem); sysbus_init_ioports(sbd, 0xcfc, 4); + + /* register i440fx 0xcf8 port as coalesced pio */ + memory_region_set_flush_coalesced(&s->data_mem); + memory_region_add_coalescing(&s->conf_mem, 0, 4); } static void i440fx_realize(PCIDevice *dev, Error **errp) diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c index 02f9576588..8ce1e09932 100644 --- a/hw/pci-host/q35.c +++ b/hw/pci-host/q35.c @@ -51,6 +51,10 @@ static void q35_host_realize(DeviceState *dev, Error **errp) sysbus_add_io(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, &pci->data_mem); sysbus_init_ioports(sbd, MCH_HOST_BRIDGE_CONFIG_DATA, 4); + /* register q35 0xcf8 port as coalesced pio */ + memory_region_set_flush_coalesced(&pci->data_mem); + memory_region_add_coalescing(&pci->conf_mem, 0, 4); + pci->bus = pci_root_bus_new(DEVICE(s), "pcie.0", s->mch.pci_address_space, s->mch.address_space_io, diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c index 9750464bf4..ad1bcc7990 100644 --- a/hw/ppc/pnv_core.c +++ b/hw/ppc/pnv_core.c @@ -148,8 +148,8 @@ static void pnv_core_realize(DeviceState *dev, Error **errp) chip = object_property_get_link(OBJECT(dev), "chip", &local_err); if (!chip) { - error_propagate(errp, local_err); - error_prepend(errp, "required link 'chip' not found: "); + error_propagate_prepend(errp, local_err, + "required link 'chip' not found: "); return; } diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index c2271e6ed4..58afa46204 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -1724,16 +1724,15 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) if (smc->legacy_irq_allocation) { irq = spapr_irq_findone(spapr, &local_err); if (local_err) { - error_propagate(errp, local_err); - error_prepend(errp, "can't allocate LSIs: "); + error_propagate_prepend(errp, local_err, + "can't allocate LSIs: "); return; } } spapr_irq_claim(spapr, irq, true, &local_err); if (local_err) { - error_propagate(errp, local_err); - error_prepend(errp, "can't allocate LSIs: "); + error_propagate_prepend(errp, local_err, "can't allocate LSIs: "); return; } diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index c43163cef4..e2c5408aa2 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -441,9 +441,18 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) } switch (error) { case 0: - /* The command has run, no need to fake sense. */ + /* A passthrough command has run and has produced sense data; check + * whether the error has to be handled by the guest or should rather + * pause the host. + */ assert(r->status && *r->status); - scsi_req_complete(&r->req, *r->status); + error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); + if (error == ECANCELED || error == EAGAIN || error == ENOTCONN || + error == 0) { + /* These errors are handled by guest. */ + scsi_req_complete(&r->req, *r->status); + return true; + } break; case ENOMEDIUM: scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); @@ -462,23 +471,17 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) break; } } - if (!error) { - assert(r->status && *r->status); - error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); - if (error == ECANCELED || error == EAGAIN || error == ENOTCONN || - error == 0) { - /* These errors are handled by guest. */ - scsi_req_complete(&r->req, *r->status); - return true; - } + blk_error_action(s->qdev.conf.blk, action, is_read, error); + if (action == BLOCK_ERROR_ACTION_IGNORE) { + scsi_req_complete(&r->req, 0); + return true; } - blk_error_action(s->qdev.conf.blk, action, is_read, error); if (action == BLOCK_ERROR_ACTION_STOP) { scsi_req_retry(&r->req); } - return action != BLOCK_ERROR_ACTION_IGNORE; + return false; } static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 5a3057d1f8..3aa99717e2 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -797,16 +797,8 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, virtio_scsi_acquire(s); blk_set_aio_context(sd->conf.blk, s->ctx); virtio_scsi_release(s); - } -} -/* Announce the new device after it has been plugged */ -static void virtio_scsi_post_hotplug(HotplugHandler *hotplug_dev, - DeviceState *dev) -{ - VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); - VirtIOSCSI *s = VIRTIO_SCSI(vdev); - SCSIDevice *sd = SCSI_DEVICE(dev); + } if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { virtio_scsi_acquire(s); @@ -976,7 +968,6 @@ static void virtio_scsi_class_init(ObjectClass *klass, void *data) vdc->start_ioeventfd = virtio_scsi_dataplane_start; vdc->stop_ioeventfd = virtio_scsi_dataplane_stop; hc->plug = virtio_scsi_hotplug; - hc->post_plug = virtio_scsi_post_hotplug; hc->unplug = virtio_scsi_hotunplug; } diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c index 95a143bfba..623d0333e8 100644 --- a/hw/sd/ssi-sd.c +++ b/hw/sd/ssi-sd.c @@ -284,6 +284,8 @@ static void ssi_sd_class_init(ObjectClass *klass, void *data) k->cs_polarity = SSI_CS_LOW; dc->vmsd = &vmstate_ssi_sd; dc->reset = ssi_sd_reset; + /* Reason: init() method uses drive_get_next() */ + dc->user_creatable = false; } static const TypeInfo ssi_sd_info = { diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index a27e54b2fa..920939454e 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -950,6 +950,7 @@ static void save_opt_list(size_t *ndest, const char ***dest, void smbios_entry_add(QemuOpts *opts, Error **errp) { + Error *err = NULL; const char *val; assert(!smbios_immutable); @@ -960,12 +961,16 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) int size; struct smbios_table *table; /* legacy mode only */ - qemu_opts_validate(opts, qemu_smbios_file_opts, &error_fatal); + qemu_opts_validate(opts, qemu_smbios_file_opts, &err); + if (err) { + error_propagate(errp, err); + return; + } size = get_image_size(val); if (size == -1 || size < sizeof(struct smbios_structure_header)) { - error_report("Cannot read SMBIOS file %s", val); - exit(1); + error_setg(errp, "Cannot read SMBIOS file %s", val); + return; } /* @@ -978,14 +983,15 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) smbios_tables_len); if (load_image(val, (uint8_t *)header) != size) { - error_report("Failed to load SMBIOS file %s", val); - exit(1); + error_setg(errp, "Failed to load SMBIOS file %s", val); + return; } if (test_bit(header->type, have_fields_bitmap)) { - error_report("can't load type %d struct, fields already specified!", - header->type); - exit(1); + error_setg(errp, + "can't load type %d struct, fields already specified!", + header->type); + return; } set_bit(header->type, have_binfile_bitmap); @@ -1030,19 +1036,23 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) unsigned long type = strtoul(val, NULL, 0); if (type > SMBIOS_MAX_TYPE) { - error_report("out of range!"); - exit(1); + error_setg(errp, "out of range!"); + return; } if (test_bit(type, have_binfile_bitmap)) { - error_report("can't add fields, binary file already loaded!"); - exit(1); + error_setg(errp, "can't add fields, binary file already loaded!"); + return; } set_bit(type, have_fields_bitmap); switch (type) { case 0: - qemu_opts_validate(opts, qemu_smbios_type0_opts, &error_fatal); + qemu_opts_validate(opts, qemu_smbios_type0_opts, &err); + if (err) { + error_propagate(errp, err); + return; + } save_opt(&type0.vendor, opts, "vendor"); save_opt(&type0.version, opts, "version"); save_opt(&type0.date, opts, "date"); @@ -1051,14 +1061,18 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) val = qemu_opt_get(opts, "release"); if (val) { if (sscanf(val, "%hhu.%hhu", &type0.major, &type0.minor) != 2) { - error_report("Invalid release"); - exit(1); + error_setg(errp, "Invalid release"); + return; } type0.have_major_minor = true; } return; case 1: - qemu_opts_validate(opts, qemu_smbios_type1_opts, &error_fatal); + qemu_opts_validate(opts, qemu_smbios_type1_opts, &err); + if (err) { + error_propagate(errp, err); + return; + } save_opt(&type1.manufacturer, opts, "manufacturer"); save_opt(&type1.product, opts, "product"); save_opt(&type1.version, opts, "version"); @@ -1069,14 +1083,18 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) val = qemu_opt_get(opts, "uuid"); if (val) { if (qemu_uuid_parse(val, &qemu_uuid) != 0) { - error_report("Invalid UUID"); - exit(1); + error_setg(errp, "Invalid UUID"); + return; } qemu_uuid_set = true; } return; case 2: - qemu_opts_validate(opts, qemu_smbios_type2_opts, &error_fatal); + qemu_opts_validate(opts, qemu_smbios_type2_opts, &err); + if (err) { + error_propagate(errp, err); + return; + } save_opt(&type2.manufacturer, opts, "manufacturer"); save_opt(&type2.product, opts, "product"); save_opt(&type2.version, opts, "version"); @@ -1085,7 +1103,11 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) save_opt(&type2.location, opts, "location"); return; case 3: - qemu_opts_validate(opts, qemu_smbios_type3_opts, &error_fatal); + qemu_opts_validate(opts, qemu_smbios_type3_opts, &err); + if (err) { + error_propagate(errp, err); + return; + } save_opt(&type3.manufacturer, opts, "manufacturer"); save_opt(&type3.version, opts, "version"); save_opt(&type3.serial, opts, "serial"); @@ -1093,7 +1115,11 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) save_opt(&type3.sku, opts, "sku"); return; case 4: - qemu_opts_validate(opts, qemu_smbios_type4_opts, &error_fatal); + qemu_opts_validate(opts, qemu_smbios_type4_opts, &err); + if (err) { + error_propagate(errp, err); + return; + } save_opt(&type4.sock_pfx, opts, "sock_pfx"); save_opt(&type4.manufacturer, opts, "manufacturer"); save_opt(&type4.version, opts, "version"); @@ -1102,11 +1128,19 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) save_opt(&type4.part, opts, "part"); return; case 11: - qemu_opts_validate(opts, qemu_smbios_type11_opts, &error_fatal); + qemu_opts_validate(opts, qemu_smbios_type11_opts, &err); + if (err) { + error_propagate(errp, err); + return; + } save_opt_list(&type11.nvalues, &type11.values, opts, "value"); return; case 17: - qemu_opts_validate(opts, qemu_smbios_type17_opts, &error_fatal); + qemu_opts_validate(opts, qemu_smbios_type17_opts, &err); + if (err) { + error_propagate(errp, err); + return; + } save_opt(&type17.loc_pfx, opts, "loc_pfx"); save_opt(&type17.bank, opts, "bank"); save_opt(&type17.manufacturer, opts, "manufacturer"); @@ -1116,12 +1150,12 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) type17.speed = qemu_opt_get_number(opts, "speed", 0); return; default: - error_report("Don't know how to build fields for SMBIOS type %ld", - type); - exit(1); + error_setg(errp, + "Don't know how to build fields for SMBIOS type %ld", + type); + return; } } - error_report("Must specify type= or file="); - exit(1); + error_setg(errp, "Must specify type= or file="); } diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c index 54b400b94a..5c786e5128 100644 --- a/hw/timer/aspeed_timer.c +++ b/hw/timer/aspeed_timer.c @@ -454,8 +454,7 @@ static void aspeed_timer_realize(DeviceState *dev, Error **errp) obj = object_property_get_link(OBJECT(dev), "scu", &err); if (!obj) { - error_propagate(errp, err); - error_prepend(errp, "required link 'scu' not found: "); + error_propagate_prepend(errp, err, "required link 'scu' not found: "); return; } s->scu = ASPEED_SCU(obj); diff --git a/hw/timer/mc146818rtc.c b/hw/timer/mc146818rtc.c index acee47da0e..e4e4de8b8a 100644 --- a/hw/timer/mc146818rtc.c +++ b/hw/timer/mc146818rtc.c @@ -34,6 +34,7 @@ #include "qapi/qapi-commands-misc.h" #include "qapi/qapi-events-misc.h" #include "qapi/visitor.h" +#include "exec/address-spaces.h" #ifdef TARGET_I386 #include "hw/i386/apic.h" @@ -70,6 +71,7 @@ typedef struct RTCState { ISADevice parent_obj; MemoryRegion io; + MemoryRegion coalesced_io; uint8_t cmos_data[128]; uint8_t cmos_index; int32_t base_year; @@ -990,6 +992,13 @@ static void rtc_realizefn(DeviceState *dev, Error **errp) memory_region_init_io(&s->io, OBJECT(s), &cmos_ops, s, "rtc", 2); isa_register_ioport(isadev, &s->io, base); + /* register rtc 0x70 port for coalesced_pio */ + memory_region_set_flush_coalesced(&s->io); + memory_region_init_io(&s->coalesced_io, OBJECT(s), &cmos_ops, + s, "rtc-index", 1); + memory_region_add_subregion(&s->io, 0, &s->coalesced_io); + memory_region_add_coalescing(&s->coalesced_io, 0, 1); + qdev_set_legacy_instance_id(dev, base, 3); qemu_register_reset(rtc_reset, s); diff --git a/hw/usb/bus.c b/hw/usb/bus.c index 11f7720d71..bf796d67e6 100644 --- a/hw/usb/bus.c +++ b/hw/usb/bus.c @@ -340,8 +340,9 @@ static USBDevice *usb_try_create_simple(USBBus *bus, const char *name, } object_property_set_bool(OBJECT(dev), true, "realized", &err); if (err) { - error_propagate(errp, err); - error_prepend(errp, "Failed to initialize USB device '%s': ", name); + error_propagate_prepend(errp, err, + "Failed to initialize USB device '%s': ", + name); return NULL; } return dev; diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c index 481fd08df7..eae31c74d6 100644 --- a/hw/vfio/pci-quirks.c +++ b/hw/vfio/pci-quirks.c @@ -1670,7 +1670,7 @@ static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr) * but also no point in us enabling VGA if disabled in hardware. */ if (!(gmch & 0x2) && !vdev->vga && vfio_populate_vga(vdev, &err)) { - error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name); + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); error_report("IGD device %s failed to enable VGA access, " "legacy mode disabled", vdev->vbasedev.name); goto out; @@ -1696,7 +1696,7 @@ static void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr) ret = vfio_pci_igd_opregion_init(vdev, opregion, &err); if (ret) { error_append_hint(&err, "IGD legacy mode disabled\n"); - error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name); + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); goto out; } diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 8b73582d51..5c7bd96984 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -252,7 +252,7 @@ static void vfio_intx_update(PCIDevice *pdev) vfio_intx_enable_kvm(vdev, &err); if (err) { - error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name); + warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); } /* Re-enable the interrupt in cased we missed an EOI */ @@ -317,7 +317,7 @@ static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp) vfio_intx_enable_kvm(vdev, &err); if (err) { - error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name); + warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); } vdev->interrupt = VFIO_INT_INTx; @@ -745,7 +745,7 @@ static void vfio_msi_disable_common(VFIOPCIDevice *vdev) vfio_intx_enable(vdev, &err); if (err) { - error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name); + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); } } @@ -1283,8 +1283,7 @@ static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp) if (ret == -ENOTSUP) { return 0; } - error_prepend(&err, "msi_init failed: "); - error_propagate(errp, err); + error_propagate_prepend(errp, err, "msi_init failed: "); return ret; } vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0); @@ -1558,7 +1557,7 @@ static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp) &err); if (ret < 0) { if (ret == -ENOTSUP) { - error_report_err(err); + warn_report_err(err); return 0; } @@ -2197,7 +2196,7 @@ static void vfio_pci_post_reset(VFIOPCIDevice *vdev) vfio_intx_enable(vdev, &err); if (err) { - error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name); + error_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); } for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) { @@ -2591,9 +2590,9 @@ static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp) } else if (irq_info.count == 1) { vdev->pci_aer = true; } else { - error_report(WARN_PREFIX - "Could not enable error recovery for the device", - vbasedev->name); + warn_report(VFIO_MSG_PREFIX + "Could not enable error recovery for the device", + vbasedev->name); } } @@ -2718,7 +2717,7 @@ static void vfio_req_notifier_handler(void *opaque) qdev_unplug(&vdev->pdev.qdev, &err); if (err) { - error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name); + warn_reportf_err(err, VFIO_MSG_PREFIX, vdev->vbasedev.name); } } @@ -2831,7 +2830,7 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) if (stat(vdev->vbasedev.sysfsdev, &st) < 0) { error_setg_errno(errp, errno, "no such host device"); - error_prepend(errp, ERR_PREFIX, vdev->vbasedev.sysfsdev); + error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.sysfsdev); return; } @@ -3086,7 +3085,7 @@ out_teardown: vfio_teardown_msi(vdev); vfio_bars_exit(vdev); error: - error_prepend(errp, ERR_PREFIX, vdev->vbasedev.name); + error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); } static void vfio_instance_finalize(Object *obj) diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c index 64c1af653d..398db38f14 100644 --- a/hw/vfio/platform.c +++ b/hw/vfio/platform.c @@ -679,8 +679,8 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp) for (i = 0; i < vbasedev->num_regions; i++) { if (vfio_region_mmap(vdev->regions[i])) { - error_report("%s mmap unsupported. Performance may be slow", - memory_region_name(vdev->regions[i]->mem)); + warn_report("%s mmap unsupported, performance may be slow", + memory_region_name(vdev->regions[i]->mem)); } sysbus_init_mmio(sbdev, vdev->regions[i]->mem); } @@ -690,7 +690,7 @@ out: } if (vdev->vbasedev.name) { - error_prepend(errp, ERR_PREFIX, vdev->vbasedev.name); + error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); } else { error_prepend(errp, "vfio error: "); } diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 3a01fe90f0..a954799267 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -1683,8 +1683,8 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp) if (err) { /* Notice when a system that supports MSIx can't initialize it */ if (err != -ENOTSUP) { - error_report("unable to init msix vectors to %" PRIu32, - proxy->nvectors); + warn_report("unable to init msix vectors to %" PRIu32, + proxy->nvectors); } proxy->nvectors = 0; } diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 94f5c8e52a..4136d239dd 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -1169,7 +1169,6 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val) return 0; } -bool target_words_bigendian(void); static enum virtio_device_endian virtio_default_endian(void) { if (target_words_bigendian()) { @@ -1612,6 +1611,8 @@ void virtio_del_queue(VirtIODevice *vdev, int n) vdev->vq[n].vring.num = 0; vdev->vq[n].vring.num_default = 0; + vdev->vq[n].handle_output = NULL; + vdev->vq[n].handle_aio_output = NULL; } static void virtio_set_isr(VirtIODevice *vdev, int value) diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index e5a6eff44f..f1f3a3727c 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -830,7 +830,7 @@ static void xen_pt_realize(PCIDevice *d, Error **errp) xen_pt_config_init(s, &err); if (err) { error_append_hint(&err, "PCI Config space initialisation failed"); - error_report_err(err); + error_propagate(errp, err); rc = -1; goto err_out; } diff --git a/include/block/aio.h b/include/block/aio.h index f08630c6e5..0ca25dfec6 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -388,18 +388,41 @@ struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); /** - * aio_timer_new: + * aio_timer_new_with_attrs: * @ctx: the aio context * @type: the clock type * @scale: the scale + * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values + * to assign * @cb: the callback to call on timer expiry * @opaque: the opaque pointer to pass to the callback * - * Allocate a new timer attached to the context @ctx. + * Allocate a new timer (with attributes) attached to the context @ctx. * The function is responsible for memory allocation. * - * The preferred interface is aio_timer_init. Use that - * unless you really need dynamic memory allocation. + * The preferred interface is aio_timer_init or aio_timer_init_with_attrs. + * Use that unless you really need dynamic memory allocation. + * + * Returns: a pointer to the new timer + */ +static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx, + QEMUClockType type, + int scale, int attributes, + QEMUTimerCB *cb, void *opaque) +{ + return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque); +} + +/** + * aio_timer_new: + * @ctx: the aio context + * @type: the clock type + * @scale: the scale + * @cb: the callback to call on timer expiry + * @opaque: the opaque pointer to pass to the callback + * + * Allocate a new timer attached to the context @ctx. + * See aio_timer_new_with_attrs for details. * * Returns: a pointer to the new timer */ @@ -407,7 +430,29 @@ static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, int scale, QEMUTimerCB *cb, void *opaque) { - return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque); + return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque); +} + +/** + * aio_timer_init_with_attrs: + * @ctx: the aio context + * @ts: the timer + * @type: the clock type + * @scale: the scale + * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values + * to assign + * @cb: the callback to call on timer expiry + * @opaque: the opaque pointer to pass to the callback + * + * Initialise a new timer (with attributes) attached to the context @ctx. + * The caller is responsible for memory allocation. + */ +static inline void aio_timer_init_with_attrs(AioContext *ctx, + QEMUTimer *ts, QEMUClockType type, + int scale, int attributes, + QEMUTimerCB *cb, void *opaque) +{ + timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque); } /** @@ -420,14 +465,14 @@ static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, * @opaque: the opaque pointer to pass to the callback * * Initialise a new timer attached to the context @ctx. - * The caller is responsible for memory allocation. + * See aio_timer_init_with_attrs for details. */ static inline void aio_timer_init(AioContext *ctx, QEMUTimer *ts, QEMUClockType type, int scale, QEMUTimerCB *cb, void *opaque) { - timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque); + timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque); } /** diff --git a/include/elf.h b/include/elf.h index 312f68af81..5f45f9b997 100644 --- a/include/elf.h +++ b/include/elf.h @@ -28,8 +28,11 @@ typedef int64_t Elf64_Sxword; #define PT_PHDR 6 #define PT_LOPROC 0x70000000 #define PT_HIPROC 0x7fffffff -#define PT_MIPS_REGINFO 0x70000000 -#define PT_MIPS_OPTIONS 0x70000001 + +#define PT_MIPS_REGINFO 0x70000000 +#define PT_MIPS_RTPROC 0x70000001 +#define PT_MIPS_OPTIONS 0x70000002 +#define PT_MIPS_ABIFLAGS 0x70000003 /* Flags in the e_flags field of the header */ /* MIPS architecture level. */ @@ -76,14 +79,38 @@ typedef int64_t Elf64_Sxword; #define EF_MIPS_MACH_OCTEON2 0x008d0000 /* Cavium Networks Octeon2 */ #define EF_MIPS_MACH_OCTEON3 0x008e0000 /* Cavium Networks Octeon3 */ #define EF_MIPS_MACH_5400 0x00910000 /* NEC VR5400 */ -#define EF_MIPS_MACH_5900 0x00920000 /* MIPS R5900 */ +#define EF_MIPS_MACH_5900 0x00920000 /* Toshiba/Sony R5900 */ #define EF_MIPS_MACH_5500 0x00980000 /* NEC VR5500 */ -#define EF_MIPS_MACH_9000 0x00990000 /* PMC-Sierra's RM9000 */ +#define EF_MIPS_MACH_9000 0x00990000 /* PMC-Sierra RM9000 */ #define EF_MIPS_MACH_LS2E 0x00a00000 /* ST Microelectronics Loongson 2E */ #define EF_MIPS_MACH_LS2F 0x00a10000 /* ST Microelectronics Loongson 2F */ #define EF_MIPS_MACH_LS3A 0x00a20000 /* ST Microelectronics Loongson 3A */ #define EF_MIPS_MACH 0x00ff0000 /* EF_MIPS_MACH_xxx selection mask */ +#define MIPS_ABI_FP_ANY 0x0 /* FP ABI doesn't matter */ +#define MIPS_ABI_FP_DOUBLE 0x1 /* -mdouble-float */ +#define MIPS_ABI_FP_SINGLE 0x2 /* -msingle-float */ +#define MIPS_ABI_FP_SOFT 0x3 /* -msoft-float */ +#define MIPS_ABI_FP_OLD_64 0x4 /* -mips32r2 -mfp64 */ +#define MIPS_ABI_FP_XX 0x5 /* -mfpxx */ +#define MIPS_ABI_FP_64 0x6 /* -mips32r2 -mfp64 */ +#define MIPS_ABI_FP_64A 0x7 /* -mips32r2 -mfp64 -mno-odd-spreg */ + +typedef struct mips_elf_abiflags_v0 { + uint16_t version; /* Version of flags structure */ + uint8_t isa_level; /* The level of the ISA: 1-5, 32, 64 */ + uint8_t isa_rev; /* The revision of ISA: */ + /* - 0 for MIPS V and below, */ + /* - 1-n otherwise. */ + uint8_t gpr_size; /* The size of general purpose registers */ + uint8_t cpr1_size; /* The size of co-processor 1 registers */ + uint8_t cpr2_size; /* The size of co-processor 2 registers */ + uint8_t fp_abi; /* The floating-point ABI */ + uint32_t isa_ext; /* Mask of processor-specific extensions */ + uint32_t ases; /* Mask of ASEs used */ + uint32_t flags1; /* Mask of general flags */ + uint32_t flags2; +} Mips_elf_abiflags_v0; /* These constants define the different elf file types */ #define ET_NONE 0 diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index a171ffc1a4..4ff62f32bf 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -24,6 +24,7 @@ #endif #include "qemu/host-utils.h" +#include "qemu/thread.h" #include "qemu/queue.h" #ifdef CONFIG_TCG #include "tcg-target.h" @@ -142,6 +143,8 @@ typedef struct CPUIOTLBEntry { #define CPU_COMMON_TLB \ /* The meaning of the MMU modes is defined in the target code. */ \ + /* tlb_lock serializes updates to tlb_table and tlb_v_table */ \ + QemuSpin tlb_lock; \ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h index 41ed0526e2..959068495a 100644 --- a/include/exec/cpu_ldst.h +++ b/include/exec/cpu_ldst.h @@ -126,6 +126,29 @@ extern __thread uintptr_t helper_retaddr; /* The memory helpers for tcg-generated code need tcg_target_long etc. */ #include "tcg.h" +static inline target_ulong tlb_addr_write(const CPUTLBEntry *entry) +{ +#if TCG_OVERSIZED_GUEST + return entry->addr_write; +#else + return atomic_read(&entry->addr_write); +#endif +} + +/* Find the TLB index corresponding to the mmu_idx + address pair. */ +static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx, + target_ulong addr) +{ + return (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); +} + +/* Find the TLB entry corresponding to the mmu_idx + address pair. */ +static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, + target_ulong addr) +{ + return &env->tlb_table[mmu_idx][tlb_index(env, mmu_idx, addr)]; +} + #ifdef MMU_MODE0_SUFFIX #define CPU_MMU_INDEX 0 #define MEMSUFFIX MMU_MODE0_SUFFIX @@ -416,8 +439,7 @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, #if defined(CONFIG_USER_ONLY) return g2h(addr); #else - int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); - CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index]; + CPUTLBEntry *tlbentry = tlb_entry(env, mmu_idx, addr); abi_ptr tlb_addr; uintptr_t haddr; @@ -426,7 +448,7 @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, tlb_addr = tlbentry->addr_read; break; case 1: - tlb_addr = tlbentry->addr_write; + tlb_addr = tlb_addr_write(tlbentry); break; case 2: tlb_addr = tlbentry->addr_code; @@ -445,7 +467,7 @@ static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, return NULL; } - haddr = addr + env->tlb_table[mmu_idx][index].addend; + haddr = addr + tlbentry->addend; return (void *)haddr; #endif /* defined(CONFIG_USER_ONLY) */ } diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h index 4db2302962..0f061d47ef 100644 --- a/include/exec/cpu_ldst_template.h +++ b/include/exec/cpu_ldst_template.h @@ -81,7 +81,7 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) { - int page_index; + CPUTLBEntry *entry; RES_TYPE res; target_ulong addr; int mmu_idx; @@ -94,15 +94,15 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, #endif addr = ptr; - page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); mmu_idx = CPU_MMU_INDEX; - if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != + entry = tlb_entry(env, mmu_idx, addr); + if (unlikely(entry->ADDR_READ != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { oi = make_memop_idx(SHIFT, mmu_idx); res = glue(glue(helper_ret_ld, URETSUFFIX), MMUSUFFIX)(env, addr, oi, retaddr); } else { - uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; + uintptr_t hostaddr = addr + entry->addend; res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr); } return res; @@ -120,7 +120,8 @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) { - int res, page_index; + CPUTLBEntry *entry; + int res; target_ulong addr; int mmu_idx; TCGMemOpIdx oi; @@ -132,15 +133,15 @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, #endif addr = ptr; - page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); mmu_idx = CPU_MMU_INDEX; - if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != + entry = tlb_entry(env, mmu_idx, addr); + if (unlikely(entry->ADDR_READ != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { oi = make_memop_idx(SHIFT, mmu_idx); res = (DATA_STYPE)glue(glue(helper_ret_ld, SRETSUFFIX), MMUSUFFIX)(env, addr, oi, retaddr); } else { - uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; + uintptr_t hostaddr = addr + entry->addend; res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr); } return res; @@ -162,7 +163,7 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, target_ulong ptr, RES_TYPE v, uintptr_t retaddr) { - int page_index; + CPUTLBEntry *entry; target_ulong addr; int mmu_idx; TCGMemOpIdx oi; @@ -174,15 +175,15 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, #endif addr = ptr; - page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); mmu_idx = CPU_MMU_INDEX; - if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write != + entry = tlb_entry(env, mmu_idx, addr); + if (unlikely(tlb_addr_write(entry) != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { oi = make_memop_idx(SHIFT, mmu_idx); glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi, retaddr); } else { - uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend; + uintptr_t hostaddr = addr + entry->addend; glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v); } } diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 5f78125582..815e5b1e83 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -100,6 +100,11 @@ void cpu_address_space_init(CPUState *cpu, int asidx, #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) /* cputlb.c */ /** + * tlb_init - initialize a CPU's TLB + * @cpu: CPU whose TLB should be initialized + */ +void tlb_init(CPUState *cpu); +/** * tlb_flush_page: * @cpu: CPU whose TLB should be flushed * @addr: virtual address of page to be flushed @@ -258,6 +263,9 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr, void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, uintptr_t retaddr); #else +static inline void tlb_init(CPUState *cpu) +{ +} static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) { } diff --git a/include/exec/memory.h b/include/exec/memory.h index 3a427aacf1..667466b8f3 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -419,9 +419,9 @@ struct MemoryListener { bool match_data, uint64_t data, EventNotifier *e); void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, bool match_data, uint64_t data, EventNotifier *e); - void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section, + void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section, hwaddr addr, hwaddr len); - void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section, + void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section, hwaddr addr, hwaddr len); /* Lower = earlier (during add), later (during del) */ unsigned priority; diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 3abb639056..9ecd911c3e 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -27,6 +27,7 @@ struct RAMBlock { struct rcu_head rcu; struct MemoryRegion *mr; uint8_t *host; + uint8_t *colo_cache; /* For colo, VM's ram cache */ ram_addr_t offset; ram_addr_t used_length; ram_addr_t max_length; diff --git a/include/hw/hotplug.h b/include/hw/hotplug.h index 51541d63e1..1a0516a479 100644 --- a/include/hw/hotplug.h +++ b/include/hw/hotplug.h @@ -47,8 +47,6 @@ typedef void (*hotplug_fn)(HotplugHandler *plug_handler, * @parent: Opaque parent interface. * @pre_plug: pre plug callback called at start of device.realize(true) * @plug: plug callback called at end of device.realize(true). - * @post_plug: post plug callback called after device.realize(true) and device - * reset * @unplug_request: unplug request callback. * Used as a means to initiate device unplug for devices that * require asynchronous unplug handling. @@ -63,7 +61,6 @@ typedef struct HotplugHandlerClass { /* <public> */ hotplug_fn pre_plug; hotplug_fn plug; - void (*post_plug)(HotplugHandler *plug_handler, DeviceState *plugged_dev); hotplug_fn unplug_request; hotplug_fn unplug; } HotplugHandlerClass; @@ -87,14 +84,6 @@ void hotplug_handler_pre_plug(HotplugHandler *plug_handler, Error **errp); /** - * hotplug_handler_post_plug: - * - * Call #HotplugHandlerClass.post_plug callback of @plug_handler. - */ -void hotplug_handler_post_plug(HotplugHandler *plug_handler, - DeviceState *plugged_dev); - -/** * hotplug_handler_unplug_request: * * Calls #HotplugHandlerClass.unplug_request callback of @plug_handler. diff --git a/include/hw/hyperv/hyperv-proto.h b/include/hw/hyperv/hyperv-proto.h new file mode 100644 index 0000000000..21dc28aee9 --- /dev/null +++ b/include/hw/hyperv/hyperv-proto.h @@ -0,0 +1,130 @@ +/* + * Definitions for Hyper-V guest/hypervisor interaction + * + * Copyright (c) 2017-2018 Virtuozzo International GmbH. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef HW_HYPERV_HYPERV_PROTO_H +#define HW_HYPERV_HYPERV_PROTO_H + +#include "qemu/bitmap.h" + +/* + * Hypercall status code + */ +#define HV_STATUS_SUCCESS 0 +#define HV_STATUS_INVALID_HYPERCALL_CODE 2 +#define HV_STATUS_INVALID_HYPERCALL_INPUT 3 +#define HV_STATUS_INVALID_ALIGNMENT 4 +#define HV_STATUS_INVALID_PARAMETER 5 +#define HV_STATUS_INSUFFICIENT_MEMORY 11 +#define HV_STATUS_INVALID_PORT_ID 17 +#define HV_STATUS_INVALID_CONNECTION_ID 18 +#define HV_STATUS_INSUFFICIENT_BUFFERS 19 + +/* + * Hypercall numbers + */ +#define HV_POST_MESSAGE 0x005c +#define HV_SIGNAL_EVENT 0x005d +#define HV_HYPERCALL_FAST (1u << 16) + +/* + * Message size + */ +#define HV_MESSAGE_PAYLOAD_SIZE 240 + +/* + * Message types + */ +#define HV_MESSAGE_NONE 0x00000000 +#define HV_MESSAGE_VMBUS 0x00000001 +#define HV_MESSAGE_UNMAPPED_GPA 0x80000000 +#define HV_MESSAGE_GPA_INTERCEPT 0x80000001 +#define HV_MESSAGE_TIMER_EXPIRED 0x80000010 +#define HV_MESSAGE_INVALID_VP_REGISTER_VALUE 0x80000020 +#define HV_MESSAGE_UNRECOVERABLE_EXCEPTION 0x80000021 +#define HV_MESSAGE_UNSUPPORTED_FEATURE 0x80000022 +#define HV_MESSAGE_EVENTLOG_BUFFERCOMPLETE 0x80000040 +#define HV_MESSAGE_X64_IOPORT_INTERCEPT 0x80010000 +#define HV_MESSAGE_X64_MSR_INTERCEPT 0x80010001 +#define HV_MESSAGE_X64_CPUID_INTERCEPT 0x80010002 +#define HV_MESSAGE_X64_EXCEPTION_INTERCEPT 0x80010003 +#define HV_MESSAGE_X64_APIC_EOI 0x80010004 +#define HV_MESSAGE_X64_LEGACY_FP_ERROR 0x80010005 + +/* + * Message flags + */ +#define HV_MESSAGE_FLAG_PENDING 0x1 + +/* + * Number of synthetic interrupts + */ +#define HV_SINT_COUNT 16 + +/* + * Event flags number per SINT + */ +#define HV_EVENT_FLAGS_COUNT (256 * 8) + +/* + * Connection id valid bits + */ +#define HV_CONNECTION_ID_MASK 0x00ffffff + +/* + * Input structure for POST_MESSAGE hypercall + */ +struct hyperv_post_message_input { + uint32_t connection_id; + uint32_t _reserved; + uint32_t message_type; + uint32_t payload_size; + uint8_t payload[HV_MESSAGE_PAYLOAD_SIZE]; +}; + +/* + * Input structure for SIGNAL_EVENT hypercall + */ +struct hyperv_signal_event_input { + uint32_t connection_id; + uint16_t flag_number; + uint16_t _reserved_zero; +}; + +/* + * SynIC message structures + */ +struct hyperv_message_header { + uint32_t message_type; + uint8_t payload_size; + uint8_t message_flags; /* HV_MESSAGE_FLAG_XX */ + uint8_t _reserved[2]; + uint64_t sender; +}; + +struct hyperv_message { + struct hyperv_message_header header; + uint8_t payload[HV_MESSAGE_PAYLOAD_SIZE]; +}; + +struct hyperv_message_page { + struct hyperv_message slot[HV_SINT_COUNT]; +}; + +/* + * SynIC event flags structures + */ +struct hyperv_event_flags { + DECLARE_BITMAP(flags, HV_EVENT_FLAGS_COUNT); +}; + +struct hyperv_event_flags_page { + struct hyperv_event_flags slot[HV_SINT_COUNT]; +}; + +#endif diff --git a/include/hw/hyperv/hyperv.h b/include/hw/hyperv/hyperv.h new file mode 100644 index 0000000000..597381cb01 --- /dev/null +++ b/include/hw/hyperv/hyperv.h @@ -0,0 +1,83 @@ +/* + * Hyper-V guest/hypervisor interaction + * + * Copyright (c) 2015-2018 Virtuozzo International GmbH. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef HW_HYPERV_HYPERV_H +#define HW_HYPERV_HYPERV_H + +#include "cpu-qom.h" +#include "hw/hyperv/hyperv-proto.h" + +typedef struct HvSintRoute HvSintRoute; + +/* + * Callback executed in a bottom-half when the status of posting the message + * becomes known, before unblocking the connection for further messages + */ +typedef void (*HvSintMsgCb)(void *data, int status); + +HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint, + HvSintMsgCb cb, void *cb_data); +void hyperv_sint_route_ref(HvSintRoute *sint_route); +void hyperv_sint_route_unref(HvSintRoute *sint_route); + +int hyperv_sint_route_set_sint(HvSintRoute *sint_route); + +/* + * Submit a message to be posted in vcpu context. If the submission succeeds, + * the status of posting the message is reported via the callback associated + * with the @sint_route; until then no more messages are accepted. + */ +int hyperv_post_msg(HvSintRoute *sint_route, struct hyperv_message *msg); +/* + * Set event flag @eventno, and signal the SINT if the flag has changed. + */ +int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno); + +/* + * Handler for messages arriving from the guest via HV_POST_MESSAGE hypercall. + * Executed in vcpu context. + */ +typedef uint16_t (*HvMsgHandler)(const struct hyperv_post_message_input *msg, + void *data); +/* + * Associate @handler with the message connection @conn_id, such that @handler + * is called with @data when the guest executes HV_POST_MESSAGE hypercall on + * @conn_id. If @handler is NULL clear the association. + */ +int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data); +/* + * Associate @notifier with the event connection @conn_id, such that @notifier + * is signaled when the guest executes HV_SIGNAL_EVENT hypercall on @conn_id. + * If @notifier is NULL clear the association. + */ +int hyperv_set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier); + +/* + * Process HV_POST_MESSAGE hypercall: parse the data in the guest memory as + * specified in @param, and call the HvMsgHandler associated with the + * connection on the message contained therein. + */ +uint16_t hyperv_hcall_post_message(uint64_t param, bool fast); +/* + * Process HV_SIGNAL_EVENT hypercall: signal the EventNotifier associated with + * the connection as specified in @param. + */ +uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast); + +static inline uint32_t hyperv_vp_index(CPUState *cs) +{ + return cs->cpu_index; +} + +void hyperv_synic_add(CPUState *cs); +void hyperv_synic_reset(CPUState *cs); +void hyperv_synic_update(CPUState *cs, bool enable, + hwaddr msg_page_addr, hwaddr event_page_addr); + +#endif diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index 6894f37df1..dfe6746692 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -294,6 +294,14 @@ int e820_add_entry(uint64_t, uint64_t, uint32_t); int e820_get_num_entries(void); bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *); +#define PC_COMPAT_3_0 \ + HW_COMPAT_3_0 \ + {\ + .driver = TYPE_X86_CPU,\ + .property = "x-hv-synic-kvm-only",\ + .value = "on",\ + } + #define PC_COMPAT_2_12 \ HW_COMPAT_2_12 \ {\ diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index e46a28910a..1b434d02f6 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -31,8 +31,7 @@ #include <linux/vfio.h> #endif -#define ERR_PREFIX "vfio error: %s: " -#define WARN_PREFIX "vfio warning: %s: " +#define VFIO_MSG_PREFIX "vfio %s: " enum { VFIO_DEVICE_TYPE_PCI = 0, diff --git a/include/migration/colo.h b/include/migration/colo.h index 2fe48ad353..99ce17aca7 100644 --- a/include/migration/colo.h +++ b/include/migration/colo.h @@ -16,14 +16,21 @@ #include "qemu-common.h" #include "qapi/qapi-types-migration.h" +enum colo_event { + COLO_EVENT_NONE, + COLO_EVENT_CHECKPOINT, + COLO_EVENT_FAILOVER, +}; + void colo_info_init(void); void migrate_start_colo_process(MigrationState *s); bool migration_in_colo_state(void); /* loadvm */ -bool migration_incoming_enable_colo(void); -void migration_incoming_exit_colo(void); +void migration_incoming_enable_colo(void); +void migration_incoming_disable_colo(void); +bool migration_incoming_colo_enabled(void); void *colo_process_incoming_thread(void *opaque); bool migration_incoming_in_colo_state(void); diff --git a/include/net/filter.h b/include/net/filter.h index 435acd6f82..49da666ac0 100644 --- a/include/net/filter.h +++ b/include/net/filter.h @@ -38,6 +38,8 @@ typedef ssize_t (FilterReceiveIOV)(NetFilterState *nc, typedef void (FilterStatusChanged) (NetFilterState *nf, Error **errp); +typedef void (FilterHandleEvent) (NetFilterState *nf, int event, Error **errp); + typedef struct NetFilterClass { ObjectClass parent_class; @@ -45,6 +47,7 @@ typedef struct NetFilterClass { FilterSetup *setup; FilterCleanup *cleanup; FilterStatusChanged *status_changed; + FilterHandleEvent *handle_event; /* mandatory */ FilterReceiveIOV *receive_iov; } NetFilterClass; @@ -77,4 +80,6 @@ ssize_t qemu_netfilter_pass_to_next(NetClientState *sender, int iovcnt, void *opaque); +void colo_notify_filters_event(int event, Error **errp); + #endif /* QEMU_NET_FILTER_H */ diff --git a/include/qapi/error.h b/include/qapi/error.h index bcb86a79f5..51b63dd4b5 100644 --- a/include/qapi/error.h +++ b/include/qapi/error.h @@ -52,8 +52,12 @@ * where Error **errp is a parameter, by convention the last one. * * Pass an existing error to the caller with the message modified: + * error_propagate_prepend(errp, err); + * + * Avoid * error_propagate(errp, err); * error_prepend(errp, "Could not frobnicate '%s': ", name); + * because this fails to prepend when @errp is &error_fatal. * * Create a new error and pass it to the caller: * error_setg(errp, "situation normal, all fouled up"); @@ -215,6 +219,16 @@ void error_setg_win32_internal(Error **errp, */ void error_propagate(Error **dst_errp, Error *local_err); + +/* + * Propagate error object (if any) with some text prepended. + * Behaves like + * error_prepend(&local_err, fmt, ...); + * error_propagate(dst_errp, local_err); + */ +void error_propagate_prepend(Error **dst_errp, Error *local_err, + const char *fmt, ...); + /* * Prepend some text to @errp's human-readable error message. * The text is made by formatting @fmt, @ap like vprintf(). diff --git a/include/qapi/qmp/qerror.h b/include/qapi/qmp/qerror.h index 145571f618..7c76e24aa7 100644 --- a/include/qapi/qmp/qerror.h +++ b/include/qapi/qmp/qerror.h @@ -79,6 +79,9 @@ #define QERR_QGA_COMMAND_FAILED \ "Guest agent command failed, error was '%s'" +#define QERR_REPLAY_NOT_SUPPORTED \ + "Record/replay feature is not supported for '%s'" + #define QERR_SET_PASSWD_FAILED \ "Could not set password" @@ -88,7 +91,4 @@ #define QERR_UNSUPPORTED \ "this feature or command is not currently supported" -#define QERR_REPLAY_NOT_SUPPORTED \ - "Record/replay feature is not supported for '%s'" - #endif /* QERROR_H */ diff --git a/include/qemu-common.h b/include/qemu-common.h index 85f4749aef..ed60ba251d 100644 --- a/include/qemu-common.h +++ b/include/qemu-common.h @@ -17,7 +17,7 @@ #define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR) /* Copyright string for -version arguments, About dialogs, etc */ -#define QEMU_COPYRIGHT "Copyright (c) 2003-2017 " \ +#define QEMU_COPYRIGHT "Copyright (c) 2003-2018 " \ "Fabrice Bellard and the QEMU Project developers" /* Bug reporting information for --help arguments, About dialogs, etc */ diff --git a/include/qemu/atomic128.h b/include/qemu/atomic128.h new file mode 100644 index 0000000000..a6af22ff10 --- /dev/null +++ b/include/qemu/atomic128.h @@ -0,0 +1,153 @@ +/* + * Simple interface for 128-bit atomic operations. + * + * Copyright (C) 2018 Linaro, Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * See docs/devel/atomics.txt for discussion about the guarantees each + * atomic primitive is meant to provide. + */ + +#ifndef QEMU_ATOMIC128_H +#define QEMU_ATOMIC128_H + +/* + * GCC is a house divided about supporting large atomic operations. + * + * For hosts that only have large compare-and-swap, a legalistic reading + * of the C++ standard means that one cannot implement __atomic_read on + * read-only memory, and thus all atomic operations must synchronize + * through libatomic. + * + * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878 + * + * This interpretation is not especially helpful for QEMU. + * For softmmu, all RAM is always read/write from the hypervisor. + * For user-only, if the guest doesn't implement such an __atomic_read + * then the host need not worry about it either. + * + * Moreover, using libatomic is not an option, because its interface is + * built for std::atomic<T>, and requires that *all* accesses to such an + * object go through the library. In our case we do not have an object + * in the C/C++ sense, but a view of memory as seen by the guest. + * The guest may issue a large atomic operation and then access those + * pieces using word-sized accesses. From the hypervisor, we have no + * way to connect those two actions. + * + * Therefore, special case each platform. + */ + +#if defined(CONFIG_ATOMIC128) +static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) +{ + return atomic_cmpxchg__nocheck(ptr, cmp, new); +} +# define HAVE_CMPXCHG128 1 +#elif defined(CONFIG_CMPXCHG128) +static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) +{ + return __sync_val_compare_and_swap_16(ptr, cmp, new); +} +# define HAVE_CMPXCHG128 1 +#elif defined(__aarch64__) +/* Through gcc 8, aarch64 has no support for 128-bit at all. */ +static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) +{ + uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp); + uint64_t newl = int128_getlo(new), newh = int128_gethi(new); + uint64_t oldl, oldh; + uint32_t tmp; + + asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t" + "cmp %[oldl], %[cmpl]\n\t" + "ccmp %[oldh], %[cmph], #0, eq\n\t" + "b.ne 1f\n\t" + "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t" + "cbnz %w[tmp], 0b\n" + "1:" + : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), + [oldl] "=&r"(oldl), [oldh] "=r"(oldh) + : [cmpl] "r"(cmpl), [cmph] "r"(cmph), + [newl] "r"(newl), [newh] "r"(newh) + : "memory", "cc"); + + return int128_make128(oldl, oldh); +} +# define HAVE_CMPXCHG128 1 +#else +/* Fallback definition that must be optimized away, or error. */ +Int128 QEMU_ERROR("unsupported atomic") + atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new); +# define HAVE_CMPXCHG128 0 +#endif /* Some definition for HAVE_CMPXCHG128 */ + + +#if defined(CONFIG_ATOMIC128) +static inline Int128 atomic16_read(Int128 *ptr) +{ + return atomic_read__nocheck(ptr); +} + +static inline void atomic16_set(Int128 *ptr, Int128 val) +{ + atomic_set__nocheck(ptr, val); +} + +# define HAVE_ATOMIC128 1 +#elif !defined(CONFIG_USER_ONLY) && defined(__aarch64__) +/* We can do better than cmpxchg for AArch64. */ +static inline Int128 atomic16_read(Int128 *ptr) +{ + uint64_t l, h; + uint32_t tmp; + + /* The load must be paired with the store to guarantee not tearing. */ + asm("0: ldxp %[l], %[h], %[mem]\n\t" + "stxp %w[tmp], %[l], %[h], %[mem]\n\t" + "cbnz %w[tmp], 0b" + : [mem] "+m"(*ptr), [tmp] "=r"(tmp), [l] "=r"(l), [h] "=r"(h)); + + return int128_make128(l, h); +} + +static inline void atomic16_set(Int128 *ptr, Int128 val) +{ + uint64_t l = int128_getlo(val), h = int128_gethi(val); + uint64_t t1, t2; + + /* Load into temporaries to acquire the exclusive access lock. */ + asm("0: ldxp %[t1], %[t2], %[mem]\n\t" + "stxp %w[t1], %[l], %[h], %[mem]\n\t" + "cbnz %w[t1], 0b" + : [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2) + : [l] "r"(l), [h] "r"(h)); +} + +# define HAVE_ATOMIC128 1 +#elif !defined(CONFIG_USER_ONLY) && HAVE_CMPXCHG128 +static inline Int128 atomic16_read(Int128 *ptr) +{ + /* Maybe replace 0 with 0, returning the old value. */ + return atomic16_cmpxchg(ptr, 0, 0); +} + +static inline void atomic16_set(Int128 *ptr, Int128 val) +{ + Int128 old = *ptr, cmp; + do { + cmp = old; + old = atomic16_cmpxchg(ptr, cmp, val); + } while (old != cmp); +} + +# define HAVE_ATOMIC128 1 +#else +/* Fallback definitions that must be optimized away, or error. */ +Int128 QEMU_ERROR("unsupported atomic") atomic16_read(Int128 *ptr); +void QEMU_ERROR("unsupported atomic") atomic16_set(Int128 *ptr, Int128 val); +# define HAVE_ATOMIC128 0 +#endif /* Some definition for HAVE_ATOMIC128 */ + +#endif /* QEMU_ATOMIC128_H */ diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h index bf47e7bee4..6b92710487 100644 --- a/include/qemu/compiler.h +++ b/include/qemu/compiler.h @@ -131,6 +131,32 @@ #define HAS_ASSUME_ALIGNED #endif +#ifndef __has_attribute +#define __has_attribute(x) 0 /* compatibility with older GCC */ +#endif + +/* + * GCC doesn't provide __has_attribute() until GCC 5, but we know all the GCC + * versions we support have the "flatten" attribute. Clang may not have the + * "flatten" attribute but always has __has_attribute() to check for it. + */ +#if __has_attribute(flatten) || !defined(__clang__) +# define QEMU_FLATTEN __attribute__((flatten)) +#else +# define QEMU_FLATTEN +#endif + +/* + * If __attribute__((error)) is present, use it to produce an error at + * compile time. Otherwise, one must wait for the linker to diagnose + * the missing symbol. + */ +#if __has_attribute(error) +# define QEMU_ERROR(X) __attribute__((error(X))) +#else +# define QEMU_ERROR(X) +#endif + /* Implement C11 _Generic via GCC builtins. Example: * * QEMU_GENERIC(x, (float, sinf), (long double, sinl), sin) (x) diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index 4f8559e550..3bf48bcdec 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -123,6 +123,18 @@ extern int daemon(int, int); #include "qemu/typedefs.h" /* + * For mingw, as of v6.0.0, the function implementing the assert macro is + * not marked as noreturn, so the compiler cannot delete code following an + * assert(false) as unused. We rely on this within the code base to delete + * code that is unreachable when features are disabled. + * All supported versions of Glib's g_assert() satisfy this requirement. + */ +#ifdef __MINGW32__ +#undef assert +#define assert(x) g_assert(x) +#endif + +/* * According to waitpid man page: * WCOREDUMP * This macro is not specified in POSIX.1-2001 and is not diff --git a/include/qemu/timer.h b/include/qemu/timer.h index a005ed2692..a86330c987 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -2,6 +2,7 @@ #define QEMU_TIMER_H #include "qemu-common.h" +#include "qemu/bitops.h" #include "qemu/notify.h" #include "qemu/host-utils.h" @@ -42,14 +43,6 @@ * In icount mode, this clock counts nanoseconds while the virtual * machine is running. It is used to increase @QEMU_CLOCK_VIRTUAL * while the CPUs are sleeping and thus not executing instructions. - * - * @QEMU_CLOCK_VIRTUAL_EXT: virtual clock for external subsystems - * - * The virtual clock only runs during the emulation. It stops - * when the virtual machine is stopped. The timers for this clock - * do not recorded in rr mode, therefore this clock could be used - * for the subsystems that operate outside the guest core. - * */ typedef enum { @@ -57,10 +50,27 @@ typedef enum { QEMU_CLOCK_VIRTUAL = 1, QEMU_CLOCK_HOST = 2, QEMU_CLOCK_VIRTUAL_RT = 3, - QEMU_CLOCK_VIRTUAL_EXT = 4, QEMU_CLOCK_MAX } QEMUClockType; +/** + * QEMU Timer attributes: + * + * An individual timer may be given one or multiple attributes when initialized. + * Each attribute corresponds to one bit. Attributes modify the processing + * of timers when they fire. + * + * The following attributes are available: + * + * QEMU_TIMER_ATTR_EXTERNAL: drives external subsystem + * + * Timers with this attribute do not recorded in rr mode, therefore it could be + * used for the subsystems that operate outside the guest core. Applicable only + * with virtual clock type. + */ + +#define QEMU_TIMER_ATTR_EXTERNAL BIT(0) + typedef struct QEMUTimerList QEMUTimerList; struct QEMUTimerListGroup { @@ -76,6 +86,7 @@ struct QEMUTimer { QEMUTimerCB *cb; void *opaque; QEMUTimer *next; + int attributes; int scale; }; @@ -427,22 +438,27 @@ int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg); */ /** - * timer_init_tl: + * timer_init_full: * @ts: the timer to be initialised - * @timer_list: the timer list to attach the timer to + * @timer_list_group: (optional) the timer list group to attach the timer to + * @type: the clock type to use * @scale: the scale value for the timer + * @attributes: 0, or one or more OR'ed QEMU_TIMER_ATTR_<id> values * @cb: the callback to be called when the timer expires * @opaque: the opaque pointer to be passed to the callback * - * Initialise a new timer and associate it with @timer_list. + * Initialise a timer with the given scale and attributes, + * and associate it with timer list for given clock @type in @timer_list_group + * (or default timer list group, if NULL). * The caller is responsible for allocating the memory. * * You need not call an explicit deinit call. Simply make * sure it is not on a list with timer_del. */ -void timer_init_tl(QEMUTimer *ts, - QEMUTimerList *timer_list, int scale, - QEMUTimerCB *cb, void *opaque); +void timer_init_full(QEMUTimer *ts, + QEMUTimerListGroup *timer_list_group, QEMUClockType type, + int scale, int attributes, + QEMUTimerCB *cb, void *opaque); /** * timer_init: @@ -454,14 +470,12 @@ void timer_init_tl(QEMUTimer *ts, * * Initialize a timer with the given scale on the default timer list * associated with the clock. - * - * You need not call an explicit deinit call. Simply make - * sure it is not on a list with timer_del. + * See timer_init_full for details. */ static inline void timer_init(QEMUTimer *ts, QEMUClockType type, int scale, QEMUTimerCB *cb, void *opaque) { - timer_init_tl(ts, main_loop_tlg.tl[type], scale, cb, opaque); + timer_init_full(ts, NULL, type, scale, 0, cb, opaque); } /** @@ -473,9 +487,7 @@ static inline void timer_init(QEMUTimer *ts, QEMUClockType type, int scale, * * Initialize a timer with nanosecond scale on the default timer list * associated with the clock. - * - * You need not call an explicit deinit call. Simply make - * sure it is not on a list with timer_del. + * See timer_init_full for details. */ static inline void timer_init_ns(QEMUTimer *ts, QEMUClockType type, QEMUTimerCB *cb, void *opaque) @@ -492,9 +504,7 @@ static inline void timer_init_ns(QEMUTimer *ts, QEMUClockType type, * * Initialize a timer with microsecond scale on the default timer list * associated with the clock. - * - * You need not call an explicit deinit call. Simply make - * sure it is not on a list with timer_del. + * See timer_init_full for details. */ static inline void timer_init_us(QEMUTimer *ts, QEMUClockType type, QEMUTimerCB *cb, void *opaque) @@ -511,9 +521,7 @@ static inline void timer_init_us(QEMUTimer *ts, QEMUClockType type, * * Initialize a timer with millisecond scale on the default timer list * associated with the clock. - * - * You need not call an explicit deinit call. Simply make - * sure it is not on a list with timer_del. + * See timer_init_full for details. */ static inline void timer_init_ms(QEMUTimer *ts, QEMUClockType type, QEMUTimerCB *cb, void *opaque) @@ -522,27 +530,37 @@ static inline void timer_init_ms(QEMUTimer *ts, QEMUClockType type, } /** - * timer_new_tl: - * @timer_list: the timer list to attach the timer to + * timer_new_full: + * @timer_list_group: (optional) the timer list group to attach the timer to + * @type: the clock type to use * @scale: the scale value for the timer + * @attributes: 0, or one or more OR'ed QEMU_TIMER_ATTR_<id> values * @cb: the callback to be called when the timer expires * @opaque: the opaque pointer to be passed to the callback * - * Create a new timer and associate it with @timer_list. + * Create a new timer with the given scale and attributes, + * and associate it with timer list for given clock @type in @timer_list_group + * (or default timer list group, if NULL). * The memory is allocated by the function. * * This is not the preferred interface unless you know you - * are going to call timer_free. Use timer_init instead. + * are going to call timer_free. Use timer_init or timer_init_full instead. + * + * The default timer list has one special feature: in icount mode, + * %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is + * not true of other timer lists, which are typically associated + * with an AioContext---each of them runs its timer callbacks in its own + * AioContext thread. * * Returns: a pointer to the timer */ -static inline QEMUTimer *timer_new_tl(QEMUTimerList *timer_list, - int scale, - QEMUTimerCB *cb, - void *opaque) +static inline QEMUTimer *timer_new_full(QEMUTimerListGroup *timer_list_group, + QEMUClockType type, + int scale, int attributes, + QEMUTimerCB *cb, void *opaque) { QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer)); - timer_init_tl(ts, timer_list, scale, cb, opaque); + timer_init_full(ts, timer_list_group, type, scale, attributes, cb, opaque); return ts; } @@ -553,21 +571,16 @@ static inline QEMUTimer *timer_new_tl(QEMUTimerList *timer_list, * @cb: the callback to be called when the timer expires * @opaque: the opaque pointer to be passed to the callback * - * Create a new timer and associate it with the default - * timer list for the clock type @type. - * - * The default timer list has one special feature: in icount mode, - * %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is - * not true of other timer lists, which are typically associated - * with an AioContext---each of them runs its timer callbacks in its own - * AioContext thread. + * Create a new timer with the given scale, + * and associate it with the default timer list for the clock type @type. + * See timer_new_full for details. * * Returns: a pointer to the timer */ static inline QEMUTimer *timer_new(QEMUClockType type, int scale, QEMUTimerCB *cb, void *opaque) { - return timer_new_tl(main_loop_tlg.tl[type], scale, cb, opaque); + return timer_new_full(NULL, type, scale, 0, cb, opaque); } /** @@ -578,12 +591,7 @@ static inline QEMUTimer *timer_new(QEMUClockType type, int scale, * * Create a new timer with nanosecond scale on the default timer list * associated with the clock. - * - * The default timer list has one special feature: in icount mode, - * %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is - * not true of other timer lists, which are typically associated - * with an AioContext---each of them runs its timer callbacks in its own - * AioContext thread. + * See timer_new_full for details. * * Returns: a pointer to the newly created timer */ @@ -599,14 +607,9 @@ static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb, * @cb: the callback to call when the timer expires * @opaque: the opaque pointer to pass to the callback * - * The default timer list has one special feature: in icount mode, - * %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is - * not true of other timer lists, which are typically associated - * with an AioContext---each of them runs its timer callbacks in its own - * AioContext thread. - * * Create a new timer with microsecond scale on the default timer list * associated with the clock. + * See timer_new_full for details. * * Returns: a pointer to the newly created timer */ @@ -622,14 +625,9 @@ static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb, * @cb: the callback to call when the timer expires * @opaque: the opaque pointer to pass to the callback * - * The default timer list has one special feature: in icount mode, - * %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is - * not true of other timer lists, which are typically associated - * with an AioContext---each of them runs its timer callbacks in its own - * AioContext thread. - * * Create a new timer with millisecond scale on the default timer list * associated with the clock. + * See timer_new_full for details. * * Returns: a pointer to the newly created timer */ @@ -1046,7 +1044,6 @@ static inline int64_t profile_getclock(void) return get_clock(); } -extern int64_t tcg_time; extern int64_t dev_time; #endif diff --git a/include/qom/cpu.h b/include/qom/cpu.h index dc130cd307..4e238b0d9f 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -1085,6 +1085,17 @@ void cpu_exec_initfn(CPUState *cpu); void cpu_exec_realizefn(CPUState *cpu, Error **errp); void cpu_exec_unrealizefn(CPUState *cpu); +/** + * target_words_bigendian: + * Returns true if the (default) endianness of the target is big endian, + * false otherwise. Note that in target-specific code, you can use + * TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common + * code should normally never need to know about the endianness of the + * target, so please do *not* use this function unless you know very well + * what you are doing! + */ +bool target_words_bigendian(void); + #ifdef NEED_CPU_H #ifdef CONFIG_SOFTMMU diff --git a/include/sysemu/blockdev.h b/include/sysemu/blockdev.h index 24954b94e0..d34c4920dc 100644 --- a/include/sysemu/blockdev.h +++ b/include/sysemu/blockdev.h @@ -54,7 +54,8 @@ DriveInfo *drive_get_next(BlockInterfaceType type); QemuOpts *drive_def(const char *optstr); QemuOpts *drive_add(BlockInterfaceType type, int index, const char *file, const char *optstr); -DriveInfo *drive_new(QemuOpts *arg, BlockInterfaceType block_default_type); +DriveInfo *drive_new(QemuOpts *arg, BlockInterfaceType block_default_type, + Error **errp); /* device-hotplug */ diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h index 241118845c..aaa51d2c51 100644 --- a/include/sysemu/hvf.h +++ b/include/sysemu/hvf.h @@ -17,7 +17,7 @@ #include "exec/memory.h" #include "sysemu/accel.h" -extern int hvf_disabled; +extern bool hvf_allowed; #ifdef CONFIG_HVF #include <Hypervisor/hv.h> #include <Hypervisor/hv_vmx.h> @@ -26,7 +26,7 @@ extern int hvf_disabled; #include "hw/hw.h" uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx, int reg); -#define hvf_enabled() !hvf_disabled +#define hvf_enabled() (hvf_allowed) #else #define hvf_enabled() 0 #define hvf_get_supported_cpuid(func, idx, reg) 0 diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h index 7a0ae751aa..21713b7e2f 100644 --- a/include/sysemu/numa.h +++ b/include/sysemu/numa.h @@ -22,7 +22,6 @@ struct NumaNodeMem { }; extern NodeInfo numa_info[MAX_NODES]; -int parse_numa(void *opaque, QemuOpts *opts, Error **errp); void parse_numa_opts(MachineState *ms); void numa_complete_configuration(MachineState *ms); void query_numa_node_mem(NumaNodeMem node_mem[]); diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h index 7f7a594eca..3a7c58e423 100644 --- a/include/sysemu/replay.h +++ b/include/sysemu/replay.h @@ -100,14 +100,20 @@ bool replay_has_interrupt(void); /* Processing clocks and other time sources */ /*! Save the specified clock */ -int64_t replay_save_clock(ReplayClockKind kind, int64_t clock); +int64_t replay_save_clock(ReplayClockKind kind, int64_t clock, + int64_t raw_icount); /*! Read the specified clock from the log or return cached data */ int64_t replay_read_clock(ReplayClockKind kind); /*! Saves or reads the clock depending on the current replay mode. */ #define REPLAY_CLOCK(clock, value) \ (replay_mode == REPLAY_MODE_PLAY ? replay_read_clock((clock)) \ : replay_mode == REPLAY_MODE_RECORD \ - ? replay_save_clock((clock), (value)) \ + ? replay_save_clock((clock), (value), cpu_get_icount_raw()) \ + : (value)) +#define REPLAY_CLOCK_LOCKED(clock, value) \ + (replay_mode == REPLAY_MODE_PLAY ? replay_read_clock((clock)) \ + : replay_mode == REPLAY_MODE_RECORD \ + ? replay_save_clock((clock), (value), cpu_get_icount_raw_locked()) \ : (value)) /* Events */ diff --git a/include/sysemu/tpm.h b/include/sysemu/tpm.h index 9ae1ab6da3..17a97ed77a 100644 --- a/include/sysemu/tpm.h +++ b/include/sysemu/tpm.h @@ -16,7 +16,7 @@ #include "qom/object.h" int tpm_config_parse(QemuOptsList *opts_list, const char *optarg); -int tpm_init(void); +void tpm_init(void); void tpm_cleanup(void); typedef enum TPMVersion { diff --git a/include/ui/console.h b/include/ui/console.h index fb969caf70..c17803c530 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -453,7 +453,7 @@ void qemu_display_early_init(DisplayOptions *opts); void qemu_display_init(DisplayState *ds, DisplayOptions *opts); /* vnc.c */ -void vnc_display_init(const char *id); +void vnc_display_init(const char *id, Error **errp); void vnc_display_open(const char *id, Error **errp); void vnc_display_add_client(const char *id, int csock, bool skipauth); int vnc_display_password(const char *id, const char *password); diff --git a/linux-headers/asm-powerpc/kvm.h b/linux-headers/asm-powerpc/kvm.h index 1b32b56a03..8c876c166e 100644 --- a/linux-headers/asm-powerpc/kvm.h +++ b/linux-headers/asm-powerpc/kvm.h @@ -634,6 +634,7 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) #define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf) +#define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h index fd23d5778e..dabfcf7c39 100644 --- a/linux-headers/asm-x86/kvm.h +++ b/linux-headers/asm-x86/kvm.h @@ -288,6 +288,7 @@ struct kvm_reinject_control { #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 #define KVM_VCPUEVENT_VALID_SMM 0x00000008 +#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010 /* Interrupt shadow states */ #define KVM_X86_SHADOW_INT_MOV_SS 0x01 @@ -299,7 +300,7 @@ struct kvm_vcpu_events { __u8 injected; __u8 nr; __u8 has_error_code; - __u8 pad; + __u8 pending; __u32 error_code; } exception; struct { @@ -322,7 +323,9 @@ struct kvm_vcpu_events { __u8 smm_inside_nmi; __u8 latched_init; } smi; - __u32 reserved[9]; + __u8 reserved[27]; + __u8 exception_has_payload; + __u64 exception_payload; }; /* for KVM_GET/SET_DEBUGREGS */ @@ -381,6 +384,7 @@ struct kvm_sync_regs { #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 +#define KVM_STATE_NESTED_EVMCS 0x00000004 #define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_SMM_VMXON 0x00000002 diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index 83ba4eb571..f11a7eb49c 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -420,13 +420,19 @@ struct kvm_run { struct kvm_coalesced_mmio_zone { __u64 addr; __u32 size; - __u32 pad; + union { + __u32 pad; + __u32 pio; + }; }; struct kvm_coalesced_mmio { __u64 phys_addr; __u32 len; - __u32 pad; + union { + __u32 pad; + __u32 pio; + }; __u8 data[8]; }; @@ -719,6 +725,7 @@ struct kvm_ppc_one_seg_page_size { #define KVM_PPC_PAGE_SIZES_REAL 0x00000001 #define KVM_PPC_1T_SEGMENTS 0x00000002 +#define KVM_PPC_NO_HASH 0x00000004 struct kvm_ppc_smmu_info { __u64 flags; @@ -953,6 +960,11 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_NESTED_STATE 157 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158 #define KVM_CAP_MSR_PLATFORM_INFO 159 +#define KVM_CAP_PPC_NESTED_HV 160 +#define KVM_CAP_HYPERV_SEND_IPI 161 +#define KVM_CAP_COALESCED_PIO 162 +#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 +#define KVM_CAP_EXCEPTION_PAYLOAD 164 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c index 07fedfc33c..f84a9cf28a 100644 --- a/linux-user/aarch64/signal.c +++ b/linux-user/aarch64/signal.c @@ -314,7 +314,7 @@ static int target_restore_sigframe(CPUARMState *env, break; case TARGET_SVE_MAGIC: - if (arm_feature(env, ARM_FEATURE_SVE)) { + if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) { vq = (env->vfp.zcr_el[1] & 0xf) + 1; sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); if (!sve && size == sve_size) { @@ -433,7 +433,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, &layout); /* SVE state needs saving only if it exists. */ - if (arm_feature(env, ARM_FEATURE_SVE)) { + if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) { vq = (env->vfp.zcr_el[1] & 0xf) + 1; sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); sve_ofs = alloc_sigframe_space(sve_size, &layout); diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 10bca65b99..055f6a95ab 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -458,6 +458,10 @@ static uint32_t get_elf_hwcap(void) /* probe for the extra features */ #define GET_FEATURE(feat, hwcap) \ do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) + +#define GET_FEATURE_ID(feat, hwcap) \ + do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) + /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP); @@ -467,8 +471,8 @@ static uint32_t get_elf_hwcap(void) GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3); GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4); - GET_FEATURE(ARM_FEATURE_ARM_DIV, ARM_HWCAP_ARM_IDIVA); - GET_FEATURE(ARM_FEATURE_THUMB_DIV, ARM_HWCAP_ARM_IDIVT); + GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA); + GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT); /* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c. * Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of * ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated @@ -485,15 +489,16 @@ static uint32_t get_elf_hwcap2(void) ARMCPU *cpu = ARM_CPU(thread_cpu); uint32_t hwcaps = 0; - GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP2_ARM_AES); - GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP2_ARM_PMULL); - GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP2_ARM_SHA1); - GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP2_ARM_SHA2); - GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP2_ARM_CRC32); + GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); + GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); + GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); + GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); + GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); return hwcaps; } #undef GET_FEATURE +#undef GET_FEATURE_ID #else /* 64 bit ARM definitions */ @@ -568,25 +573,26 @@ static uint32_t get_elf_hwcap(void) hwcaps |= ARM_HWCAP_A64_ASIMD; /* probe for the extra features */ -#define GET_FEATURE(feat, hwcap) \ - do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) - GET_FEATURE(ARM_FEATURE_V8_AES, ARM_HWCAP_A64_AES); - GET_FEATURE(ARM_FEATURE_V8_PMULL, ARM_HWCAP_A64_PMULL); - GET_FEATURE(ARM_FEATURE_V8_SHA1, ARM_HWCAP_A64_SHA1); - GET_FEATURE(ARM_FEATURE_V8_SHA256, ARM_HWCAP_A64_SHA2); - GET_FEATURE(ARM_FEATURE_CRC, ARM_HWCAP_A64_CRC32); - GET_FEATURE(ARM_FEATURE_V8_SHA3, ARM_HWCAP_A64_SHA3); - GET_FEATURE(ARM_FEATURE_V8_SM3, ARM_HWCAP_A64_SM3); - GET_FEATURE(ARM_FEATURE_V8_SM4, ARM_HWCAP_A64_SM4); - GET_FEATURE(ARM_FEATURE_V8_SHA512, ARM_HWCAP_A64_SHA512); - GET_FEATURE(ARM_FEATURE_V8_FP16, - ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); - GET_FEATURE(ARM_FEATURE_V8_ATOMICS, ARM_HWCAP_A64_ATOMICS); - GET_FEATURE(ARM_FEATURE_V8_RDM, ARM_HWCAP_A64_ASIMDRDM); - GET_FEATURE(ARM_FEATURE_V8_DOTPROD, ARM_HWCAP_A64_ASIMDDP); - GET_FEATURE(ARM_FEATURE_V8_FCMA, ARM_HWCAP_A64_FCMA); - GET_FEATURE(ARM_FEATURE_SVE, ARM_HWCAP_A64_SVE); -#undef GET_FEATURE +#define GET_FEATURE_ID(feat, hwcap) \ + do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) + + GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); + GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); + GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); + GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); + GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); + GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); + GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); + GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); + GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); + GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); + GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); + GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); + GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); + GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); + GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); + +#undef GET_FEATURE_ID return hwcaps; } diff --git a/linux-user/ioctls.h b/linux-user/ioctls.h index 586c794639..ae8951625f 100644 --- a/linux-user/ioctls.h +++ b/linux-user/ioctls.h @@ -131,6 +131,52 @@ IOCTL(FS_IOC_GETFLAGS, IOC_R, MK_PTR(TYPE_INT)) IOCTL(FS_IOC_SETFLAGS, IOC_W, MK_PTR(TYPE_INT)) +#ifdef CONFIG_USBFS + /* USB ioctls */ + IOCTL(USBDEVFS_CONTROL, IOC_RW, + MK_PTR(MK_STRUCT(STRUCT_usbdevfs_ctrltransfer))) + IOCTL(USBDEVFS_BULK, IOC_RW, + MK_PTR(MK_STRUCT(STRUCT_usbdevfs_bulktransfer))) + IOCTL(USBDEVFS_RESETEP, IOC_W, MK_PTR(TYPE_INT)) + IOCTL(USBDEVFS_SETINTERFACE, IOC_W, + MK_PTR(MK_STRUCT(STRUCT_usbdevfs_setinterface))) + IOCTL(USBDEVFS_SETCONFIGURATION, IOC_W, MK_PTR(TYPE_INT)) + IOCTL(USBDEVFS_GETDRIVER, IOC_R, + MK_PTR(MK_STRUCT(STRUCT_usbdevfs_getdriver))) + IOCTL_SPECIAL(USBDEVFS_SUBMITURB, IOC_W, do_ioctl_usbdevfs_submiturb, + MK_PTR(MK_STRUCT(STRUCT_usbdevfs_urb))) + IOCTL_SPECIAL(USBDEVFS_DISCARDURB, IOC_RW, do_ioctl_usbdevfs_discardurb, + MK_PTR(MK_STRUCT(STRUCT_usbdevfs_urb))) + IOCTL_SPECIAL(USBDEVFS_REAPURB, IOC_R, do_ioctl_usbdevfs_reapurb, + MK_PTR(TYPE_PTRVOID)) + IOCTL_SPECIAL(USBDEVFS_REAPURBNDELAY, IOC_R, do_ioctl_usbdevfs_reapurb, + MK_PTR(TYPE_PTRVOID)) + IOCTL(USBDEVFS_DISCSIGNAL, IOC_W, + MK_PTR(MK_STRUCT(STRUCT_usbdevfs_disconnectsignal))) + IOCTL(USBDEVFS_CLAIMINTERFACE, IOC_W, MK_PTR(TYPE_INT)) + IOCTL(USBDEVFS_RELEASEINTERFACE, IOC_W, MK_PTR(TYPE_INT)) + IOCTL(USBDEVFS_CONNECTINFO, IOC_R, + MK_PTR(MK_STRUCT(STRUCT_usbdevfs_connectinfo))) + IOCTL(USBDEVFS_IOCTL, IOC_RW, MK_PTR(MK_STRUCT(STRUCT_usbdevfs_ioctl))) + IOCTL(USBDEVFS_HUB_PORTINFO, IOC_R, + MK_PTR(MK_STRUCT(STRUCT_usbdevfs_hub_portinfo))) + IOCTL(USBDEVFS_RESET, 0, TYPE_NULL) + IOCTL(USBDEVFS_CLEAR_HALT, IOC_W, MK_PTR(TYPE_INT)) + IOCTL(USBDEVFS_DISCONNECT, 0, TYPE_NULL) + IOCTL(USBDEVFS_CONNECT, 0, TYPE_NULL) + IOCTL(USBDEVFS_CLAIM_PORT, IOC_W, MK_PTR(TYPE_INT)) + IOCTL(USBDEVFS_RELEASE_PORT, IOC_W, MK_PTR(TYPE_INT)) + IOCTL(USBDEVFS_GET_CAPABILITIES, IOC_R, MK_PTR(TYPE_INT)) + IOCTL(USBDEVFS_DISCONNECT_CLAIM, IOC_W, + MK_PTR(MK_STRUCT(STRUCT_usbdevfs_disconnect_claim))) +#ifdef USBDEVFS_DROP_PRIVILEGES + IOCTL(USBDEVFS_DROP_PRIVILEGES, IOC_W, MK_PTR(TYPE_INT)) +#endif +#ifdef USBDEVFS_GET_SPEED + IOCTL(USBDEVFS_GET_SPEED, 0, TYPE_NULL) +#endif +#endif /* CONFIG_USBFS */ + IOCTL(SIOCATMARK, IOC_R, MK_PTR(TYPE_INT)) IOCTL(SIOCGIFNAME, IOC_RW, MK_PTR(TYPE_INT)) IOCTL(SIOCGIFFLAGS, IOC_W | IOC_R, MK_PTR(MK_STRUCT(STRUCT_short_ifreq))) diff --git a/linux-user/mips/target_elf.h b/linux-user/mips/target_elf.h index fa5d30bf99..a98c9bd6ad 100644 --- a/linux-user/mips/target_elf.h +++ b/linux-user/mips/target_elf.h @@ -12,6 +12,9 @@ static inline const char *cpu_get_model(uint32_t eflags) if ((eflags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) { return "mips32r6-generic"; } + if ((eflags & EF_MIPS_MACH) == EF_MIPS_MACH_5900) { + return "R5900"; + } return "24Kf"; } #endif diff --git a/linux-user/mips/target_syscall.h b/linux-user/mips/target_syscall.h index 2fca1c6bf9..33177af113 100644 --- a/linux-user/mips/target_syscall.h +++ b/linux-user/mips/target_syscall.h @@ -244,4 +244,8 @@ static inline abi_ulong target_shmlba(CPUMIPSState *env) return 0x40000; } +/* MIPS-specific prctl() options */ +#define TARGET_PR_SET_FP_MODE 45 +#define TARGET_PR_GET_FP_MODE 46 + #endif /* MIPS_TARGET_SYSCALL_H */ diff --git a/linux-user/mips64/target_syscall.h b/linux-user/mips64/target_syscall.h index 078437d765..c1160e69f8 100644 --- a/linux-user/mips64/target_syscall.h +++ b/linux-user/mips64/target_syscall.h @@ -241,4 +241,8 @@ static inline abi_ulong target_shmlba(CPUMIPSState *env) return 0x40000; } +/* MIPS-specific prctl() options */ +#define TARGET_PR_SET_FP_MODE 45 +#define TARGET_PR_GET_FP_MODE 46 + #endif /* MIPS64_TARGET_SYSCALL_H */ diff --git a/linux-user/qemu.h b/linux-user/qemu.h index b4959e41c6..1beb6a2cfc 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -461,27 +461,59 @@ static inline int access_ok(int type, abi_ulong addr, abi_ulong size) These are usually used to access struct data members once the struct has been locked - usually with lock_user_struct. */ -/* Tricky points: - - Use __builtin_choose_expr to avoid type promotion from ?:, - - Invalid sizes result in a compile time error stemming from - the fact that abort has no parameters. - - It's easier to use the endian-specific unaligned load/store - functions than host-endian unaligned load/store plus tswapN. */ - -#define __put_user_e(x, hptr, e) \ - (__builtin_choose_expr(sizeof(*(hptr)) == 1, stb_p, \ - __builtin_choose_expr(sizeof(*(hptr)) == 2, stw_##e##_p, \ - __builtin_choose_expr(sizeof(*(hptr)) == 4, stl_##e##_p, \ - __builtin_choose_expr(sizeof(*(hptr)) == 8, stq_##e##_p, abort)))) \ - ((hptr), (x)), (void)0) - -#define __get_user_e(x, hptr, e) \ - ((x) = (typeof(*hptr))( \ - __builtin_choose_expr(sizeof(*(hptr)) == 1, ldub_p, \ - __builtin_choose_expr(sizeof(*(hptr)) == 2, lduw_##e##_p, \ - __builtin_choose_expr(sizeof(*(hptr)) == 4, ldl_##e##_p, \ - __builtin_choose_expr(sizeof(*(hptr)) == 8, ldq_##e##_p, abort)))) \ - (hptr)), (void)0) +/* + * Tricky points: + * - Use __builtin_choose_expr to avoid type promotion from ?:, + * - Invalid sizes result in a compile time error stemming from + * the fact that abort has no parameters. + * - It's easier to use the endian-specific unaligned load/store + * functions than host-endian unaligned load/store plus tswapN. + * - The pragmas are necessary only to silence a clang false-positive + * warning: see https://bugs.llvm.org/show_bug.cgi?id=39113 . + * - We have to disable -Wpragmas warnings to avoid a complaint about + * an unknown warning type from older compilers that don't know about + * -Waddress-of-packed-member. + * - gcc has bugs in its _Pragma() support in some versions, eg + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83256 -- so we only + * include the warning-suppression pragmas for clang + */ +#ifdef __clang__ +#define PRAGMA_DISABLE_PACKED_WARNING \ + _Pragma("GCC diagnostic push"); \ + _Pragma("GCC diagnostic ignored \"-Wpragmas\""); \ + _Pragma("GCC diagnostic ignored \"-Waddress-of-packed-member\"") + +#define PRAGMA_REENABLE_PACKED_WARNING \ + _Pragma("GCC diagnostic pop") + +#else +#define PRAGMA_DISABLE_PACKED_WARNING +#define PRAGMA_REENABLE_PACKED_WARNING +#endif + +#define __put_user_e(x, hptr, e) \ + do { \ + PRAGMA_DISABLE_PACKED_WARNING; \ + (__builtin_choose_expr(sizeof(*(hptr)) == 1, stb_p, \ + __builtin_choose_expr(sizeof(*(hptr)) == 2, stw_##e##_p, \ + __builtin_choose_expr(sizeof(*(hptr)) == 4, stl_##e##_p, \ + __builtin_choose_expr(sizeof(*(hptr)) == 8, stq_##e##_p, abort)))) \ + ((hptr), (x)), (void)0); \ + PRAGMA_REENABLE_PACKED_WARNING; \ + } while (0) + +#define __get_user_e(x, hptr, e) \ + do { \ + PRAGMA_DISABLE_PACKED_WARNING; \ + ((x) = (typeof(*hptr))( \ + __builtin_choose_expr(sizeof(*(hptr)) == 1, ldub_p, \ + __builtin_choose_expr(sizeof(*(hptr)) == 2, lduw_##e##_p, \ + __builtin_choose_expr(sizeof(*(hptr)) == 4, ldl_##e##_p, \ + __builtin_choose_expr(sizeof(*(hptr)) == 8, ldq_##e##_p, abort)))) \ + (hptr)), (void)0); \ + PRAGMA_REENABLE_PACKED_WARNING; \ + } while (0) + #ifdef TARGET_WORDS_BIGENDIAN # define __put_user(x, hptr) __put_user_e(x, hptr, be) diff --git a/linux-user/sparc/signal.c b/linux-user/sparc/signal.c index b4c60aa446..e44e99993c 100644 --- a/linux-user/sparc/signal.c +++ b/linux-user/sparc/signal.c @@ -258,10 +258,6 @@ void setup_frame(int sig, struct target_sigaction *ka, __put_user(val32, &sf->insns[1]); if (err) goto sigsegv; - - /* Flush instruction space. */ - // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); - // tb_flush(env); } unlock_user(sf, sf_addr, sizeof(struct target_signal_frame)); return; diff --git a/linux-user/syscall.c b/linux-user/syscall.c index ae3c0dfef7..15b03e17b9 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -94,6 +94,10 @@ #include <linux/fiemap.h> #endif #include <linux/fb.h> +#if defined(CONFIG_USBFS) +#include <linux/usbdevice_fs.h> +#include <linux/usb/ch9.h> +#endif #include <linux/vt.h> #include <linux/dm-ioctl.h> #include <linux/reboot.h> @@ -4196,6 +4200,182 @@ static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp, return ret; } +#if defined(CONFIG_USBFS) +#if HOST_LONG_BITS > 64 +#error USBDEVFS thunks do not support >64 bit hosts yet. +#endif +struct live_urb { + uint64_t target_urb_adr; + uint64_t target_buf_adr; + char *target_buf_ptr; + struct usbdevfs_urb host_urb; +}; + +static GHashTable *usbdevfs_urb_hashtable(void) +{ + static GHashTable *urb_hashtable; + + if (!urb_hashtable) { + urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal); + } + return urb_hashtable; +} + +static void urb_hashtable_insert(struct live_urb *urb) +{ + GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); + g_hash_table_insert(urb_hashtable, urb, urb); +} + +static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr) +{ + GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); + return g_hash_table_lookup(urb_hashtable, &target_urb_adr); +} + +static void urb_hashtable_remove(struct live_urb *urb) +{ + GHashTable *urb_hashtable = usbdevfs_urb_hashtable(); + g_hash_table_remove(urb_hashtable, urb); +} + +static abi_long +do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp, + int fd, int cmd, abi_long arg) +{ + const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) }; + const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 }; + struct live_urb *lurb; + void *argptr; + uint64_t hurb; + int target_size; + uintptr_t target_urb_adr; + abi_long ret; + + target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET); + + memset(buf_temp, 0, sizeof(uint64_t)); + ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp)); + if (is_error(ret)) { + return ret; + } + + memcpy(&hurb, buf_temp, sizeof(uint64_t)); + lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb)); + if (!lurb->target_urb_adr) { + return -TARGET_EFAULT; + } + urb_hashtable_remove(lurb); + unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, + lurb->host_urb.buffer_length); + lurb->target_buf_ptr = NULL; + + /* restore the guest buffer pointer */ + lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr; + + /* update the guest urb struct */ + argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0); + if (!argptr) { + g_free(lurb); + return -TARGET_EFAULT; + } + thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET); + unlock_user(argptr, lurb->target_urb_adr, target_size); + + target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET); + /* write back the urb handle */ + argptr = lock_user(VERIFY_WRITE, arg, target_size, 0); + if (!argptr) { + g_free(lurb); + return -TARGET_EFAULT; + } + + /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */ + target_urb_adr = lurb->target_urb_adr; + thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET); + unlock_user(argptr, arg, target_size); + + g_free(lurb); + return ret; +} + +static abi_long +do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie, + uint8_t *buf_temp __attribute__((unused)), + int fd, int cmd, abi_long arg) +{ + struct live_urb *lurb; + + /* map target address back to host URB with metadata. */ + lurb = urb_hashtable_lookup(arg); + if (!lurb) { + return -TARGET_EFAULT; + } + return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); +} + +static abi_long +do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp, + int fd, int cmd, abi_long arg) +{ + const argtype *arg_type = ie->arg_type; + int target_size; + abi_long ret; + void *argptr; + int rw_dir; + struct live_urb *lurb; + + /* + * each submitted URB needs to map to a unique ID for the + * kernel, and that unique ID needs to be a pointer to + * host memory. hence, we need to malloc for each URB. + * isochronous transfers have a variable length struct. + */ + arg_type++; + target_size = thunk_type_size(arg_type, THUNK_TARGET); + + /* construct host copy of urb and metadata */ + lurb = g_try_malloc0(sizeof(struct live_urb)); + if (!lurb) { + return -TARGET_ENOMEM; + } + + argptr = lock_user(VERIFY_READ, arg, target_size, 1); + if (!argptr) { + g_free(lurb); + return -TARGET_EFAULT; + } + thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST); + unlock_user(argptr, arg, 0); + + lurb->target_urb_adr = arg; + lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer; + + /* buffer space used depends on endpoint type so lock the entire buffer */ + /* control type urbs should check the buffer contents for true direction */ + rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ; + lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr, + lurb->host_urb.buffer_length, 1); + if (lurb->target_buf_ptr == NULL) { + g_free(lurb); + return -TARGET_EFAULT; + } + + /* update buffer pointer in host copy */ + lurb->host_urb.buffer = lurb->target_buf_ptr; + + ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb)); + if (is_error(ret)) { + unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0); + g_free(lurb); + } else { + urb_hashtable_insert(lurb); + } + + return ret; +} +#endif /* CONFIG_USBFS */ + static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd, int cmd, abi_long arg) { @@ -9347,6 +9527,14 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return ret; } #endif +#ifdef TARGET_MIPS + case TARGET_PR_GET_FP_MODE: + /* TODO: Implement TARGET_PR_SET_FP_MODE handling.*/ + return -TARGET_EINVAL; + case TARGET_PR_SET_FP_MODE: + /* TODO: Implement TARGET_PR_GET_FP_MODE handling.*/ + return -TARGET_EINVAL; +#endif /* MIPS */ #ifdef TARGET_AARCH64 case TARGET_PR_SVE_SET_VL: /* @@ -9356,7 +9544,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, * even though the current architectural maximum is VQ=16. */ ret = -TARGET_EINVAL; - if (arm_feature(cpu_env, ARM_FEATURE_SVE) + if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env)) && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { CPUARMState *env = cpu_env; ARMCPU *cpu = arm_env_get_cpu(env); @@ -9375,9 +9563,11 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return ret; case TARGET_PR_SVE_GET_VL: ret = -TARGET_EINVAL; - if (arm_feature(cpu_env, ARM_FEATURE_SVE)) { - CPUARMState *env = cpu_env; - ret = ((env->vfp.zcr_el[1] & 0xf) + 1) * 16; + { + ARMCPU *cpu = arm_env_get_cpu(cpu_env); + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16; + } } return ret; #endif /* AARCH64 */ diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index 18d434d6dc..99bbce083c 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -863,6 +863,34 @@ struct target_pollfd { #define TARGET_FS_IOC_FIEMAP TARGET_IOWR('f',11,struct fiemap) +/* usb ioctls */ +#define TARGET_USBDEVFS_CONTROL TARGET_IOWRU('U', 0) +#define TARGET_USBDEVFS_BULK TARGET_IOWRU('U', 2) +#define TARGET_USBDEVFS_RESETEP TARGET_IORU('U', 3) +#define TARGET_USBDEVFS_SETINTERFACE TARGET_IORU('U', 4) +#define TARGET_USBDEVFS_SETCONFIGURATION TARGET_IORU('U', 5) +#define TARGET_USBDEVFS_GETDRIVER TARGET_IOWU('U', 8) +#define TARGET_USBDEVFS_SUBMITURB TARGET_IORU('U', 10) +#define TARGET_USBDEVFS_DISCARDURB TARGET_IO('U', 11) +#define TARGET_USBDEVFS_REAPURB TARGET_IOWU('U', 12) +#define TARGET_USBDEVFS_REAPURBNDELAY TARGET_IOWU('U', 13) +#define TARGET_USBDEVFS_DISCSIGNAL TARGET_IORU('U', 14) +#define TARGET_USBDEVFS_CLAIMINTERFACE TARGET_IORU('U', 15) +#define TARGET_USBDEVFS_RELEASEINTERFACE TARGET_IORU('U', 16) +#define TARGET_USBDEVFS_CONNECTINFO TARGET_IOWU('U', 17) +#define TARGET_USBDEVFS_IOCTL TARGET_IOWRU('U', 18) +#define TARGET_USBDEVFS_HUB_PORTINFO TARGET_IORU('U', 19) +#define TARGET_USBDEVFS_RESET TARGET_IO('U', 20) +#define TARGET_USBDEVFS_CLEAR_HALT TARGET_IORU('U', 21) +#define TARGET_USBDEVFS_DISCONNECT TARGET_IO('U', 22) +#define TARGET_USBDEVFS_CONNECT TARGET_IO('U', 23) +#define TARGET_USBDEVFS_CLAIM_PORT TARGET_IORU('U', 24) +#define TARGET_USBDEVFS_RELEASE_PORT TARGET_IORU('U', 25) +#define TARGET_USBDEVFS_GET_CAPABILITIES TARGET_IORU('U', 26) +#define TARGET_USBDEVFS_DISCONNECT_CLAIM TARGET_IORU('U', 27) +#define TARGET_USBDEVFS_DROP_PRIVILEGES TARGET_IOWU('U', 30) +#define TARGET_USBDEVFS_GET_SPEED TARGET_IO('U', 31) + /* cdrom commands */ #define TARGET_CDROMPAUSE 0x5301 /* Pause Audio Operation */ #define TARGET_CDROMRESUME 0x5302 /* Resume paused Audio Operation */ diff --git a/linux-user/syscall_types.h b/linux-user/syscall_types.h index 24631b09be..b98a23b0f1 100644 --- a/linux-user/syscall_types.h +++ b/linux-user/syscall_types.h @@ -266,3 +266,71 @@ STRUCT(blkpg_ioctl_arg, TYPE_INT, /* flags */ TYPE_INT, /* datalen */ TYPE_PTRVOID) /* data */ + +#if defined(CONFIG_USBFS) +/* usb device ioctls */ +STRUCT(usbdevfs_ctrltransfer, + TYPE_CHAR, /* bRequestType */ + TYPE_CHAR, /* bRequest */ + TYPE_SHORT, /* wValue */ + TYPE_SHORT, /* wIndex */ + TYPE_SHORT, /* wLength */ + TYPE_INT, /* timeout */ + TYPE_PTRVOID) /* data */ + +STRUCT(usbdevfs_bulktransfer, + TYPE_INT, /* ep */ + TYPE_INT, /* len */ + TYPE_INT, /* timeout */ + TYPE_PTRVOID) /* data */ + +STRUCT(usbdevfs_setinterface, + TYPE_INT, /* interface */ + TYPE_INT) /* altsetting */ + +STRUCT(usbdevfs_disconnectsignal, + TYPE_INT, /* signr */ + TYPE_PTRVOID) /* context */ + +STRUCT(usbdevfs_getdriver, + TYPE_INT, /* interface */ + MK_ARRAY(TYPE_CHAR, USBDEVFS_MAXDRIVERNAME + 1)) /* driver */ + +STRUCT(usbdevfs_connectinfo, + TYPE_INT, /* devnum */ + TYPE_CHAR) /* slow */ + +STRUCT(usbdevfs_iso_packet_desc, + TYPE_INT, /* length */ + TYPE_INT, /* actual_length */ + TYPE_INT) /* status */ + +STRUCT(usbdevfs_urb, + TYPE_CHAR, /* type */ + TYPE_CHAR, /* endpoint */ + TYPE_INT, /* status */ + TYPE_INT, /* flags */ + TYPE_PTRVOID, /* buffer */ + TYPE_INT, /* buffer_length */ + TYPE_INT, /* actual_length */ + TYPE_INT, /* start_frame */ + TYPE_INT, /* union number_of_packets stream_id */ + TYPE_INT, /* error_count */ + TYPE_INT, /* signr */ + TYPE_PTRVOID, /* usercontext */ + MK_ARRAY(MK_STRUCT(STRUCT_usbdevfs_iso_packet_desc), 0)) /* desc */ + +STRUCT(usbdevfs_ioctl, + TYPE_INT, /* ifno */ + TYPE_INT, /* ioctl_code */ + TYPE_PTRVOID) /* data */ + +STRUCT(usbdevfs_hub_portinfo, + TYPE_CHAR, /* nports */ + MK_ARRAY(TYPE_CHAR, 127)) /* port */ + +STRUCT(usbdevfs_disconnect_claim, + TYPE_INT, /* interface */ + TYPE_INT, /* flags */ + MK_ARRAY(TYPE_CHAR, USBDEVFS_MAXDRIVERNAME + 1)) /* driver */ +#endif /* CONFIG_USBFS */ @@ -2129,7 +2129,7 @@ static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpa .size = fr->addr.size, }; - MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, §ion, + MEMORY_LISTENER_CALL(as, coalesced_io_del, Reverse, §ion, int128_get64(fr->addr.start), int128_get64(fr->addr.size)); QTAILQ_FOREACH(cmr, &mr->coalesced, link) { @@ -2140,7 +2140,7 @@ static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpa continue; } tmp = addrrange_intersection(tmp, fr->addr); - MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, §ion, + MEMORY_LISTENER_CALL(as, coalesced_io_add, Forward, §ion, int128_get64(tmp.start), int128_get64(tmp.size)); } diff --git a/migration/Makefile.objs b/migration/Makefile.objs index c83ec47ba8..a4f3bafd86 100644 --- a/migration/Makefile.objs +++ b/migration/Makefile.objs @@ -1,6 +1,6 @@ common-obj-y += migration.o socket.o fd.o exec.o common-obj-y += tls.o channel.o savevm.o -common-obj-y += colo-comm.o colo.o colo-failover.o +common-obj-y += colo.o colo-failover.o common-obj-y += vmstate.o vmstate-types.o page_cache.o common-obj-y += qemu-file.o global_state.o common-obj-y += qemu-file-channel.o diff --git a/migration/colo-comm.c b/migration/colo-comm.c deleted file mode 100644 index df26e4dfe7..0000000000 --- a/migration/colo-comm.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * COarse-grain LOck-stepping Virtual Machines for Non-stop Service (COLO) - * (a.k.a. Fault Tolerance or Continuous Replication) - * - * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD. - * Copyright (c) 2016 FUJITSU LIMITED - * Copyright (c) 2016 Intel Corporation - * - * This work is licensed under the terms of the GNU GPL, version 2 or - * later. See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "migration.h" -#include "migration/colo.h" -#include "migration/vmstate.h" -#include "trace.h" - -typedef struct { - bool colo_requested; -} COLOInfo; - -static COLOInfo colo_info; - -COLOMode get_colo_mode(void) -{ - if (migration_in_colo_state()) { - return COLO_MODE_PRIMARY; - } else if (migration_incoming_in_colo_state()) { - return COLO_MODE_SECONDARY; - } else { - return COLO_MODE_UNKNOWN; - } -} - -static int colo_info_pre_save(void *opaque) -{ - COLOInfo *s = opaque; - - s->colo_requested = migrate_colo_enabled(); - - return 0; -} - -static bool colo_info_need(void *opaque) -{ - return migrate_colo_enabled(); -} - -static const VMStateDescription colo_state = { - .name = "COLOState", - .version_id = 1, - .minimum_version_id = 1, - .pre_save = colo_info_pre_save, - .needed = colo_info_need, - .fields = (VMStateField[]) { - VMSTATE_BOOL(colo_requested, COLOInfo), - VMSTATE_END_OF_LIST() - }, -}; - -void colo_info_init(void) -{ - vmstate_register(NULL, 0, &colo_state, &colo_info); -} - -bool migration_incoming_enable_colo(void) -{ - return colo_info.colo_requested; -} - -void migration_incoming_exit_colo(void) -{ - colo_info.colo_requested = false; -} diff --git a/migration/colo-failover.c b/migration/colo-failover.c index 0ae0c41221..4854a96c92 100644 --- a/migration/colo-failover.c +++ b/migration/colo-failover.c @@ -77,7 +77,7 @@ FailoverStatus failover_get_state(void) void qmp_x_colo_lost_heartbeat(Error **errp) { - if (get_colo_mode() == COLO_MODE_UNKNOWN) { + if (get_colo_mode() == COLO_MODE_NONE) { error_setg(errp, QERR_FEATURE_DISABLED, "colo"); return; } diff --git a/migration/colo.c b/migration/colo.c index 88936f5962..956ac236b7 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -25,8 +25,16 @@ #include "qemu/error-report.h" #include "migration/failover.h" #include "replication.h" +#include "net/colo-compare.h" +#include "net/colo.h" +#include "block/block.h" +#include "qapi/qapi-events-migration.h" +#include "qapi/qmp/qerror.h" +#include "sysemu/cpus.h" +#include "net/filter.h" static bool vmstate_loading; +static Notifier packets_compare_notifier; #define COLO_BUFFER_BASE_SIZE (4 * 1024 * 1024) @@ -53,6 +61,7 @@ static void secondary_vm_do_failover(void) { int old_state; MigrationIncomingState *mis = migration_incoming_get_current(); + Error *local_err = NULL; /* Can not do failover during the process of VM's loading VMstate, Or * it will break the secondary VM. @@ -70,6 +79,17 @@ static void secondary_vm_do_failover(void) migrate_set_state(&mis->state, MIGRATION_STATUS_COLO, MIGRATION_STATUS_COMPLETED); + replication_stop_all(true, &local_err); + if (local_err) { + error_report_err(local_err); + } + + /* Notify all filters of all NIC to do checkpoint */ + colo_notify_filters_event(COLO_EVENT_FAILOVER, &local_err); + if (local_err) { + error_report_err(local_err); + } + if (!autostart) { error_report("\"-S\" qemu option will be ignored in secondary side"); /* recover runstate to normal migration finish state */ @@ -107,9 +127,15 @@ static void primary_vm_do_failover(void) { MigrationState *s = migrate_get_current(); int old_state; + Error *local_err = NULL; migrate_set_state(&s->state, MIGRATION_STATUS_COLO, MIGRATION_STATUS_COMPLETED); + /* + * kick COLO thread which might wait at + * qemu_sem_wait(&s->colo_checkpoint_sem). + */ + colo_checkpoint_notify(migrate_get_current()); /* * Wake up COLO thread which may blocked in recv() or send(), @@ -130,10 +156,28 @@ static void primary_vm_do_failover(void) FailoverStatus_str(old_state)); return; } + + replication_stop_all(true, &local_err); + if (local_err) { + error_report_err(local_err); + local_err = NULL; + } + /* Notify COLO thread that failover work is finished */ qemu_sem_post(&s->colo_exit_sem); } +COLOMode get_colo_mode(void) +{ + if (migration_in_colo_state()) { + return COLO_MODE_PRIMARY; + } else if (migration_incoming_in_colo_state()) { + return COLO_MODE_SECONDARY; + } else { + return COLO_MODE_NONE; + } +} + void colo_do_failover(MigrationState *s) { /* Make sure VM stopped while failover happened. */ @@ -207,6 +251,26 @@ void qmp_xen_colo_do_checkpoint(Error **errp) #endif } +COLOStatus *qmp_query_colo_status(Error **errp) +{ + COLOStatus *s = g_new0(COLOStatus, 1); + + s->mode = get_colo_mode(); + + switch (failover_get_state()) { + case FAILOVER_STATUS_NONE: + s->reason = COLO_EXIT_REASON_NONE; + break; + case FAILOVER_STATUS_REQUIRE: + s->reason = COLO_EXIT_REASON_REQUEST; + break; + default: + s->reason = COLO_EXIT_REASON_ERROR; + } + + return s; +} + static void colo_send_message(QEMUFile *f, COLOMessage msg, Error **errp) { @@ -343,21 +407,42 @@ static int colo_do_checkpoint_transaction(MigrationState *s, goto out; } + colo_notify_compares_event(NULL, COLO_EVENT_CHECKPOINT, &local_err); + if (local_err) { + goto out; + } + /* Disable block migration */ migrate_set_block_enabled(false, &local_err); - qemu_savevm_state_header(fb); - qemu_savevm_state_setup(fb); qemu_mutex_lock_iothread(); - qemu_savevm_state_complete_precopy(fb, false, false); - qemu_mutex_unlock_iothread(); - - qemu_fflush(fb); + replication_do_checkpoint_all(&local_err); + if (local_err) { + qemu_mutex_unlock_iothread(); + goto out; + } colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err); if (local_err) { + qemu_mutex_unlock_iothread(); + goto out; + } + /* Note: device state is saved into buffer */ + ret = qemu_save_device_state(fb); + + qemu_mutex_unlock_iothread(); + if (ret < 0) { goto out; } /* + * Only save VM's live state, which not including device state. + * TODO: We may need a timeout mechanism to prevent COLO process + * to be blocked here. + */ + qemu_savevm_live_state(s->to_dst_file); + + qemu_fflush(fb); + + /* * We need the size of the VMstate data in Secondary side, * With which we can decide how much data should be read. */ @@ -400,6 +485,11 @@ out: return ret; } +static void colo_compare_notify_checkpoint(Notifier *notifier, void *data) +{ + colo_checkpoint_notify(data); +} + static void colo_process_checkpoint(MigrationState *s) { QIOChannelBuffer *bioc; @@ -416,6 +506,9 @@ static void colo_process_checkpoint(MigrationState *s) goto out; } + packets_compare_notifier.notify = colo_compare_notify_checkpoint; + colo_compare_register_notifier(&packets_compare_notifier); + /* * Wait for Secondary finish loading VM states and enter COLO * restore. @@ -430,6 +523,12 @@ static void colo_process_checkpoint(MigrationState *s) object_unref(OBJECT(bioc)); qemu_mutex_lock_iothread(); + replication_start_all(REPLICATION_MODE_PRIMARY, &local_err); + if (local_err) { + qemu_mutex_unlock_iothread(); + goto out; + } + vm_start(); qemu_mutex_unlock_iothread(); trace_colo_vm_state_change("stop", "run"); @@ -445,6 +544,9 @@ static void colo_process_checkpoint(MigrationState *s) qemu_sem_wait(&s->colo_checkpoint_sem); + if (s->state != MIGRATION_STATUS_COLO) { + goto out; + } ret = colo_do_checkpoint_transaction(s, bioc, fb); if (ret < 0) { goto out; @@ -461,11 +563,38 @@ out: qemu_fclose(fb); } - timer_del(s->colo_delay_timer); + /* + * There are only two reasons we can get here, some error happened + * or the user triggered failover. + */ + switch (failover_get_state()) { + case FAILOVER_STATUS_NONE: + qapi_event_send_colo_exit(COLO_MODE_PRIMARY, + COLO_EXIT_REASON_ERROR); + break; + case FAILOVER_STATUS_REQUIRE: + qapi_event_send_colo_exit(COLO_MODE_PRIMARY, + COLO_EXIT_REASON_REQUEST); + break; + default: + abort(); + } /* Hope this not to be too long to wait here */ qemu_sem_wait(&s->colo_exit_sem); qemu_sem_destroy(&s->colo_exit_sem); + + /* + * It is safe to unregister notifier after failover finished. + * Besides, colo_delay_timer and colo_checkpoint_sem can't be + * released befor unregister notifier, or there will be use-after-free + * error. + */ + colo_compare_unregister_notifier(&packets_compare_notifier); + timer_del(s->colo_delay_timer); + timer_free(s->colo_delay_timer); + qemu_sem_destroy(&s->colo_checkpoint_sem); + /* * Must be called after failover BH is completed, * Or the failover BH may shutdown the wrong fd that @@ -533,6 +662,7 @@ void *colo_process_incoming_thread(void *opaque) uint64_t total_size; uint64_t value; Error *local_err = NULL; + int ret; rcu_register_thread(); qemu_sem_init(&mis->colo_incoming_sem, 0); @@ -559,6 +689,16 @@ void *colo_process_incoming_thread(void *opaque) fb = qemu_fopen_channel_input(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); + qemu_mutex_lock_iothread(); + replication_start_all(REPLICATION_MODE_SECONDARY, &local_err); + if (local_err) { + qemu_mutex_unlock_iothread(); + goto out; + } + vm_start(); + trace_colo_vm_state_change("stop", "run"); + qemu_mutex_unlock_iothread(); + colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY, &local_err); if (local_err) { @@ -578,6 +718,11 @@ void *colo_process_incoming_thread(void *opaque) goto out; } + qemu_mutex_lock_iothread(); + vm_stop_force_state(RUN_STATE_COLO); + trace_colo_vm_state_change("run", "stop"); + qemu_mutex_unlock_iothread(); + /* FIXME: This is unnecessary for periodic checkpoint mode */ colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_REPLY, &local_err); @@ -591,6 +736,16 @@ void *colo_process_incoming_thread(void *opaque) goto out; } + qemu_mutex_lock_iothread(); + cpu_synchronize_all_pre_loadvm(); + ret = qemu_loadvm_state_main(mis->from_src_file, mis); + qemu_mutex_unlock_iothread(); + + if (ret < 0) { + error_report("Load VM's live state (ram) error"); + goto out; + } + value = colo_receive_message_value(mis->from_src_file, COLO_MESSAGE_VMSTATE_SIZE, &local_err); if (local_err) { @@ -622,15 +777,37 @@ void *colo_process_incoming_thread(void *opaque) } qemu_mutex_lock_iothread(); - qemu_system_reset(SHUTDOWN_CAUSE_NONE); vmstate_loading = true; - if (qemu_loadvm_state(fb) < 0) { - error_report("COLO: loadvm failed"); + ret = qemu_load_device_state(fb); + if (ret < 0) { + error_report("COLO: load device state failed"); + qemu_mutex_unlock_iothread(); + goto out; + } + + replication_get_error_all(&local_err); + if (local_err) { + qemu_mutex_unlock_iothread(); + goto out; + } + /* discard colo disk buffer */ + replication_do_checkpoint_all(&local_err); + if (local_err) { + qemu_mutex_unlock_iothread(); + goto out; + } + + /* Notify all filters of all NIC to do checkpoint */ + colo_notify_filters_event(COLO_EVENT_CHECKPOINT, &local_err); + + if (local_err) { qemu_mutex_unlock_iothread(); goto out; } vmstate_loading = false; + vm_start(); + trace_colo_vm_state_change("stop", "run"); qemu_mutex_unlock_iothread(); if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) { @@ -654,6 +831,19 @@ out: error_report_err(local_err); } + switch (failover_get_state()) { + case FAILOVER_STATUS_NONE: + qapi_event_send_colo_exit(COLO_MODE_SECONDARY, + COLO_EXIT_REASON_ERROR); + break; + case FAILOVER_STATUS_REQUIRE: + qapi_event_send_colo_exit(COLO_MODE_SECONDARY, + COLO_EXIT_REASON_REQUEST); + break; + default: + abort(); + } + if (fb) { qemu_fclose(fb); } @@ -665,7 +855,7 @@ out: if (mis->to_src_file) { qemu_fclose(mis->to_src_file); } - migration_incoming_exit_colo(); + migration_incoming_disable_colo(); rcu_unregister_thread(); return NULL; diff --git a/migration/migration.c b/migration/migration.c index d6ae879dc8..8b36e7f184 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -76,10 +76,8 @@ /* Migration XBZRLE default cache size */ #define DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE (64 * 1024 * 1024) -/* The delay time (in ms) between two COLO checkpoints - * Note: Please change this default value to 10000 when we support hybrid mode. - */ -#define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY 200 +/* The delay time (in ms) between two COLO checkpoints */ +#define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100) #define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2 #define DEFAULT_MIGRATE_MULTIFD_PAGE_COUNT 16 @@ -298,6 +296,22 @@ int migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname, return migrate_send_rp_message(mis, msg_type, msglen, bufc); } +static bool migration_colo_enabled; +bool migration_incoming_colo_enabled(void) +{ + return migration_colo_enabled; +} + +void migration_incoming_disable_colo(void) +{ + migration_colo_enabled = false; +} + +void migration_incoming_enable_colo(void) +{ + migration_colo_enabled = true; +} + void qemu_start_incoming_migration(const char *uri, Error **errp) { const char *p; @@ -388,6 +402,7 @@ static void process_incoming_migration_co(void *opaque) MigrationIncomingState *mis = migration_incoming_get_current(); PostcopyState ps; int ret; + Error *local_err = NULL; assert(mis->from_src_file); mis->migration_incoming_co = qemu_coroutine_self(); @@ -419,7 +434,21 @@ static void process_incoming_migration_co(void *opaque) } /* we get COLO info, and know if we are in COLO mode */ - if (!ret && migration_incoming_enable_colo()) { + if (!ret && migration_incoming_colo_enabled()) { + /* Make sure all file formats flush their mutable metadata */ + bdrv_invalidate_cache_all(&local_err); + if (local_err) { + migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, + MIGRATION_STATUS_FAILED); + error_report_err(local_err); + exit(EXIT_FAILURE); + } + + if (colo_init_ram_cache() < 0) { + error_report("Init ram cache failed"); + exit(EXIT_FAILURE); + } + qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming", colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE); mis->have_colo_incoming_thread = true; @@ -427,6 +456,8 @@ static void process_incoming_migration_co(void *opaque) /* Wait checkpoint incoming thread exit before free resource */ qemu_thread_join(&mis->colo_incoming_thread); + /* We hold the global iothread lock, so it is safe here */ + colo_release_ram_cache(); } if (ret < 0) { @@ -1546,9 +1577,9 @@ static GSList *migration_blockers; int migrate_add_blocker(Error *reason, Error **errp) { if (migrate_get_current()->only_migratable) { - error_propagate(errp, error_copy(reason)); - error_prepend(errp, "disallowing migration blocker " - "(--only_migratable) for: "); + error_propagate_prepend(errp, error_copy(reason), + "disallowing migration blocker " + "(--only_migratable) for: "); return -EACCES; } @@ -1557,9 +1588,9 @@ int migrate_add_blocker(Error *reason, Error **errp) return 0; } - error_propagate(errp, error_copy(reason)); - error_prepend(errp, "disallowing migration blocker (migration in " - "progress) for: "); + error_propagate_prepend(errp, error_copy(reason), + "disallowing migration blocker " + "(migration in progress) for: "); return -EBUSY; } @@ -3017,6 +3048,11 @@ static void *migration_thread(void *opaque) qemu_savevm_send_postcopy_advise(s->to_dst_file); } + if (migrate_colo_enabled()) { + /* Notify migration destination that we enable COLO */ + qemu_savevm_send_colo_enable(s->to_dst_file); + } + qemu_savevm_state_setup(s->to_dst_file); s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; diff --git a/migration/ram.c b/migration/ram.c index bc38d98cc3..7e7deec4d8 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -3447,6 +3447,29 @@ static inline void *host_from_ram_block_offset(RAMBlock *block, return block->host + offset; } +static inline void *colo_cache_from_block_offset(RAMBlock *block, + ram_addr_t offset) +{ + if (!offset_in_ramblock(block, offset)) { + return NULL; + } + if (!block->colo_cache) { + error_report("%s: colo_cache is NULL in block :%s", + __func__, block->idstr); + return NULL; + } + + /* + * During colo checkpoint, we need bitmap of these migrated pages. + * It help us to decide which pages in ram cache should be flushed + * into VM's RAM later. + */ + if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) { + ram_state->migration_dirty_pages++; + } + return block->colo_cache + offset; +} + /** * ram_handle_compressed: handle the zero page case * @@ -3651,6 +3674,88 @@ static void decompress_data_with_multi_threads(QEMUFile *f, qemu_mutex_unlock(&decomp_done_lock); } +/* + * colo cache: this is for secondary VM, we cache the whole + * memory of the secondary VM, it is need to hold the global lock + * to call this helper. + */ +int colo_init_ram_cache(void) +{ + RAMBlock *block; + + rcu_read_lock(); + RAMBLOCK_FOREACH_MIGRATABLE(block) { + block->colo_cache = qemu_anon_ram_alloc(block->used_length, + NULL, + false); + if (!block->colo_cache) { + error_report("%s: Can't alloc memory for COLO cache of block %s," + "size 0x" RAM_ADDR_FMT, __func__, block->idstr, + block->used_length); + goto out_locked; + } + memcpy(block->colo_cache, block->host, block->used_length); + } + rcu_read_unlock(); + /* + * Record the dirty pages that sent by PVM, we use this dirty bitmap together + * with to decide which page in cache should be flushed into SVM's RAM. Here + * we use the same name 'ram_bitmap' as for migration. + */ + if (ram_bytes_total()) { + RAMBlock *block; + + RAMBLOCK_FOREACH_MIGRATABLE(block) { + unsigned long pages = block->max_length >> TARGET_PAGE_BITS; + + block->bmap = bitmap_new(pages); + bitmap_set(block->bmap, 0, pages); + } + } + ram_state = g_new0(RAMState, 1); + ram_state->migration_dirty_pages = 0; + memory_global_dirty_log_start(); + + return 0; + +out_locked: + + RAMBLOCK_FOREACH_MIGRATABLE(block) { + if (block->colo_cache) { + qemu_anon_ram_free(block->colo_cache, block->used_length); + block->colo_cache = NULL; + } + } + + rcu_read_unlock(); + return -errno; +} + +/* It is need to hold the global lock to call this helper */ +void colo_release_ram_cache(void) +{ + RAMBlock *block; + + memory_global_dirty_log_stop(); + RAMBLOCK_FOREACH_MIGRATABLE(block) { + g_free(block->bmap); + block->bmap = NULL; + } + + rcu_read_lock(); + + RAMBLOCK_FOREACH_MIGRATABLE(block) { + if (block->colo_cache) { + qemu_anon_ram_free(block->colo_cache, block->used_length); + block->colo_cache = NULL; + } + } + + rcu_read_unlock(); + g_free(ram_state); + ram_state = NULL; +} + /** * ram_load_setup: Setup RAM for migration incoming side * @@ -3667,6 +3772,7 @@ static int ram_load_setup(QEMUFile *f, void *opaque) xbzrle_load_setup(); ramblock_recv_map_init(); + return 0; } @@ -3687,6 +3793,7 @@ static int ram_load_cleanup(void *opaque) g_free(rb->receivedmap); rb->receivedmap = NULL; } + return 0; } @@ -3869,6 +3976,46 @@ static bool postcopy_is_running(void) return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END; } +/* + * Flush content of RAM cache into SVM's memory. + * Only flush the pages that be dirtied by PVM or SVM or both. + */ +static void colo_flush_ram_cache(void) +{ + RAMBlock *block = NULL; + void *dst_host; + void *src_host; + unsigned long offset = 0; + + memory_global_dirty_log_sync(); + rcu_read_lock(); + RAMBLOCK_FOREACH_MIGRATABLE(block) { + migration_bitmap_sync_range(ram_state, block, 0, block->used_length); + } + rcu_read_unlock(); + + trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages); + rcu_read_lock(); + block = QLIST_FIRST_RCU(&ram_list.blocks); + + while (block) { + offset = migration_bitmap_find_dirty(ram_state, block, offset); + + if (offset << TARGET_PAGE_BITS >= block->used_length) { + offset = 0; + block = QLIST_NEXT_RCU(block, next); + } else { + migration_bitmap_clear_dirty(ram_state, block, offset); + dst_host = block->host + (offset << TARGET_PAGE_BITS); + src_host = block->colo_cache + (offset << TARGET_PAGE_BITS); + memcpy(dst_host, src_host, TARGET_PAGE_SIZE); + } + } + + rcu_read_unlock(); + trace_colo_flush_ram_cache_end(); +} + static int ram_load(QEMUFile *f, void *opaque, int version_id) { int flags = 0, ret = 0, invalid_flags = 0; @@ -3924,13 +4071,24 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { RAMBlock *block = ram_block_from_stream(f, flags); - host = host_from_ram_block_offset(block, addr); + /* + * After going into COLO, we should load the Page into colo_cache. + */ + if (migration_incoming_in_colo_state()) { + host = colo_cache_from_block_offset(block, addr); + } else { + host = host_from_ram_block_offset(block, addr); + } if (!host) { error_report("Illegal RAM offset " RAM_ADDR_FMT, addr); ret = -EINVAL; break; } - ramblock_recv_bitmap_set(block, host); + + if (!migration_incoming_in_colo_state()) { + ramblock_recv_bitmap_set(block, host); + } + trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host); } @@ -4034,6 +4192,10 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) ret |= wait_for_decompress_done(); rcu_read_unlock(); trace_ram_load_complete(ret, seq_iter); + + if (!ret && migration_incoming_in_colo_state()) { + colo_flush_ram_cache(); + } return ret; } diff --git a/migration/ram.h b/migration/ram.h index a139066846..83ff1bc11a 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -71,4 +71,8 @@ int64_t ramblock_recv_bitmap_send(QEMUFile *file, const char *block_name); int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb); +/* ram cache */ +int colo_init_ram_cache(void); +void colo_release_ram_cache(void); + #endif diff --git a/migration/savevm.c b/migration/savevm.c index 2d10e45582..9992af4db4 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -56,6 +56,7 @@ #include "io/channel-file.h" #include "sysemu/replay.h" #include "qjson.h" +#include "migration/colo.h" #ifndef ETH_P_RARP #define ETH_P_RARP 0x8035 @@ -82,6 +83,7 @@ enum qemu_vm_cmd { were previously sent during precopy but are dirty. */ MIG_CMD_PACKAGED, /* Send a wrapped stream within this stream */ + MIG_CMD_ENABLE_COLO, /* Enable COLO */ MIG_CMD_POSTCOPY_RESUME, /* resume postcopy on dest */ MIG_CMD_RECV_BITMAP, /* Request for recved bitmap on dst */ MIG_CMD_MAX @@ -841,6 +843,12 @@ static void qemu_savevm_command_send(QEMUFile *f, qemu_fflush(f); } +void qemu_savevm_send_colo_enable(QEMUFile *f) +{ + trace_savevm_send_colo_enable(); + qemu_savevm_command_send(f, MIG_CMD_ENABLE_COLO, 0, NULL); +} + void qemu_savevm_send_ping(QEMUFile *f, uint32_t value) { uint32_t buf; @@ -1370,13 +1378,21 @@ done: return ret; } -static int qemu_save_device_state(QEMUFile *f) +void qemu_savevm_live_state(QEMUFile *f) { - SaveStateEntry *se; + /* save QEMU_VM_SECTION_END section */ + qemu_savevm_state_complete_precopy(f, true, false); + qemu_put_byte(f, QEMU_VM_EOF); +} - qemu_put_be32(f, QEMU_VM_FILE_MAGIC); - qemu_put_be32(f, QEMU_VM_FILE_VERSION); +int qemu_save_device_state(QEMUFile *f) +{ + SaveStateEntry *se; + if (!migration_in_colo_state()) { + qemu_put_be32(f, QEMU_VM_FILE_MAGIC); + qemu_put_be32(f, QEMU_VM_FILE_VERSION); + } cpu_synchronize_all_states(); QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { @@ -1432,8 +1448,6 @@ enum LoadVMExitCodes { LOADVM_QUIT = 1, }; -static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis); - /* ------ incoming postcopy messages ------ */ /* 'advise' arrives before any transfers just to tell us that a postcopy * *might* happen - it might be skipped if precopy transferred everything @@ -1922,6 +1936,12 @@ static int loadvm_handle_recv_bitmap(MigrationIncomingState *mis, return 0; } +static int loadvm_process_enable_colo(MigrationIncomingState *mis) +{ + migration_incoming_enable_colo(); + return colo_init_ram_cache(); +} + /* * Process an incoming 'QEMU_VM_COMMAND' * 0 just a normal return @@ -2001,6 +2021,9 @@ static int loadvm_process_command(QEMUFile *f) case MIG_CMD_RECV_BITMAP: return loadvm_handle_recv_bitmap(mis, len); + + case MIG_CMD_ENABLE_COLO: + return loadvm_process_enable_colo(mis); } return 0; @@ -2230,7 +2253,7 @@ static bool postcopy_pause_incoming(MigrationIncomingState *mis) return true; } -static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) +int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) { uint8_t section_type; int ret = 0; @@ -2401,6 +2424,22 @@ int qemu_loadvm_state(QEMUFile *f) return ret; } +int qemu_load_device_state(QEMUFile *f) +{ + MigrationIncomingState *mis = migration_incoming_get_current(); + int ret; + + /* Load QEMU_VM_SECTION_FULL section */ + ret = qemu_loadvm_state_main(f, mis); + if (ret < 0) { + error_report("Failed to load device state: %d", ret); + return ret; + } + + cpu_synchronize_all_post_init(); + return 0; +} + int save_snapshot(const char *name, Error **errp) { BlockDriverState *bs, *bs1; @@ -2414,8 +2453,8 @@ int save_snapshot(const char *name, Error **errp) AioContext *aio_context; if (!replay_can_snapshot()) { - error_report("Record/replay does not allow making snapshot " - "right now. Try once more later."); + error_setg(errp, "Record/replay does not allow making snapshot " + "right now. Try once more later."); return ret; } @@ -2611,8 +2650,8 @@ int load_snapshot(const char *name, Error **errp) MigrationIncomingState *mis = migration_incoming_get_current(); if (!replay_can_snapshot()) { - error_report("Record/replay does not allow loading snapshot " - "right now. Try once more later."); + error_setg(errp, "Record/replay does not allow loading snapshot " + "right now. Try once more later."); return -EINVAL; } diff --git a/migration/savevm.h b/migration/savevm.h index a5e65b8ae3..51a4b9caa8 100644 --- a/migration/savevm.h +++ b/migration/savevm.h @@ -55,8 +55,13 @@ void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name, uint16_t len, uint64_t *start_list, uint64_t *length_list); +void qemu_savevm_send_colo_enable(QEMUFile *f); +void qemu_savevm_live_state(QEMUFile *f); +int qemu_save_device_state(QEMUFile *f); int qemu_loadvm_state(QEMUFile *f); void qemu_loadvm_state_cleanup(void); +int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis); +int qemu_load_device_state(QEMUFile *f); #endif diff --git a/migration/trace-events b/migration/trace-events index 9430f3cbe0..bd2d0cd25a 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -37,6 +37,7 @@ savevm_send_ping(uint32_t val) "0x%x" savevm_send_postcopy_listen(void) "" savevm_send_postcopy_run(void) "" savevm_send_postcopy_resume(void) "" +savevm_send_colo_enable(void) "" savevm_send_recv_bitmap(char *name) "%s" savevm_state_setup(void) "" savevm_state_resume_prepare(void) "" @@ -101,6 +102,8 @@ ram_dirty_bitmap_sync_start(void) "" ram_dirty_bitmap_sync_wait(void) "" ram_dirty_bitmap_sync_complete(void) "" ram_state_resume_prepare(uint64_t v) "%" PRId64 +colo_flush_ram_cache_begin(uint64_t dirty_pages) "dirty_pages %" PRIu64 +colo_flush_ram_cache_end(void) "" # migration/migration.c await_return_path_close_on_source_close(void) "" @@ -83,6 +83,7 @@ #include "sysemu/cpus.h" #include "sysemu/iothread.h" #include "qemu/cutils.h" +#include "tcg/tcg.h" #if defined(TARGET_S390X) #include "hw/s390x/storage-keys.h" @@ -1966,16 +1967,22 @@ static void hmp_info_numa(Monitor *mon, const QDict *qdict) #ifdef CONFIG_PROFILER -int64_t tcg_time; int64_t dev_time; static void hmp_info_profile(Monitor *mon, const QDict *qdict) { + static int64_t last_cpu_exec_time; + int64_t cpu_exec_time; + int64_t delta; + + cpu_exec_time = tcg_cpu_exec_time(); + delta = cpu_exec_time - last_cpu_exec_time; + monitor_printf(mon, "async time %" PRId64 " (%0.3f)\n", dev_time, dev_time / (double)NANOSECONDS_PER_SECOND); monitor_printf(mon, "qemu time %" PRId64 " (%0.3f)\n", - tcg_time, tcg_time / (double)NANOSECONDS_PER_SECOND); - tcg_time = 0; + delta, delta / (double)NANOSECONDS_PER_SECOND); + last_cpu_exec_time = cpu_exec_time; dev_time = 0; } #else diff --git a/net/colo-compare.c b/net/colo-compare.c index dd745a491b..a39191d522 100644 --- a/net/colo-compare.c +++ b/net/colo-compare.c @@ -27,11 +27,20 @@ #include "qemu/sockets.h" #include "colo.h" #include "sysemu/iothread.h" +#include "net/colo-compare.h" +#include "migration/colo.h" +#include "migration/migration.h" #define TYPE_COLO_COMPARE "colo-compare" #define COLO_COMPARE(obj) \ OBJECT_CHECK(CompareState, (obj), TYPE_COLO_COMPARE) +static QTAILQ_HEAD(, CompareState) net_compares = + QTAILQ_HEAD_INITIALIZER(net_compares); + +static NotifierList colo_compare_notifiers = + NOTIFIER_LIST_INITIALIZER(colo_compare_notifiers); + #define COMPARE_READ_LEN_MAX NET_BUFSIZE #define MAX_QUEUE_SIZE 1024 @@ -41,6 +50,10 @@ /* TODO: Should be configurable */ #define REGULAR_PACKET_CHECK_MS 3000 +static QemuMutex event_mtx; +static QemuCond event_complete_cond; +static int event_unhandled_count; + /* * + CompareState ++ * | | @@ -87,6 +100,11 @@ typedef struct CompareState { IOThread *iothread; GMainContext *worker_context; QEMUTimer *packet_check_timer; + + QEMUBH *event_bh; + enum colo_event event; + + QTAILQ_ENTRY(CompareState) next; } CompareState; typedef struct CompareClass { @@ -98,6 +116,12 @@ enum { SECONDARY_IN, }; +static void colo_compare_inconsistency_notify(void) +{ + notifier_list_notify(&colo_compare_notifiers, + migrate_get_current()); +} + static int compare_chr_send(CompareState *s, const uint8_t *buf, uint32_t size, @@ -413,10 +437,7 @@ sec: qemu_hexdump((char *)spkt->data, stderr, "colo-compare spkt", spkt->size); - /* - * colo_compare_inconsistent_notify(); - * TODO: notice to checkpoint(); - */ + colo_compare_inconsistency_notify(); } } @@ -547,8 +568,18 @@ static int colo_old_packet_check_one(Packet *pkt, int64_t *check_time) } } +void colo_compare_register_notifier(Notifier *notify) +{ + notifier_list_add(&colo_compare_notifiers, notify); +} + +void colo_compare_unregister_notifier(Notifier *notify) +{ + notifier_remove(notify); +} + static int colo_old_packet_check_one_conn(Connection *conn, - void *user_data) + void *user_data) { GList *result = NULL; int64_t check_time = REGULAR_PACKET_CHECK_MS; @@ -559,10 +590,7 @@ static int colo_old_packet_check_one_conn(Connection *conn, if (result) { /* Do checkpoint will flush old packet */ - /* - * TODO: Notify colo frame to do checkpoint. - * colo_compare_inconsistent_notify(); - */ + colo_compare_inconsistency_notify(); return 0; } @@ -606,11 +634,12 @@ static void colo_compare_packet(CompareState *s, Connection *conn, /* * If one packet arrive late, the secondary_list or * primary_list will be empty, so we can't compare it - * until next comparison. + * until next comparison. If the packets in the list are + * timeout, it will trigger a checkpoint request. */ trace_colo_compare_main("packet different"); g_queue_push_head(&conn->primary_list, pkt); - /* TODO: colo_notify_checkpoint();*/ + colo_compare_inconsistency_notify(); break; } } @@ -736,6 +765,25 @@ static void check_old_packet_regular(void *opaque) REGULAR_PACKET_CHECK_MS); } +/* Public API, Used for COLO frame to notify compare event */ +void colo_notify_compares_event(void *opaque, int event, Error **errp) +{ + CompareState *s; + + qemu_mutex_lock(&event_mtx); + QTAILQ_FOREACH(s, &net_compares, next) { + s->event = event; + qemu_bh_schedule(s->event_bh); + event_unhandled_count++; + } + /* Wait all compare threads to finish handling this event */ + while (event_unhandled_count > 0) { + qemu_cond_wait(&event_complete_cond, &event_mtx); + } + + qemu_mutex_unlock(&event_mtx); +} + static void colo_compare_timer_init(CompareState *s) { AioContext *ctx = iothread_get_aio_context(s->iothread); @@ -756,6 +804,30 @@ static void colo_compare_timer_del(CompareState *s) } } +static void colo_flush_packets(void *opaque, void *user_data); + +static void colo_compare_handle_event(void *opaque) +{ + CompareState *s = opaque; + + switch (s->event) { + case COLO_EVENT_CHECKPOINT: + g_queue_foreach(&s->conn_list, colo_flush_packets, s); + break; + case COLO_EVENT_FAILOVER: + break; + default: + break; + } + + assert(event_unhandled_count > 0); + + qemu_mutex_lock(&event_mtx); + event_unhandled_count--; + qemu_cond_broadcast(&event_complete_cond); + qemu_mutex_unlock(&event_mtx); +} + static void colo_compare_iothread(CompareState *s) { object_ref(OBJECT(s->iothread)); @@ -769,6 +841,7 @@ static void colo_compare_iothread(CompareState *s) s, s->worker_context, true); colo_compare_timer_init(s); + s->event_bh = qemu_bh_new(colo_compare_handle_event, s); } static char *compare_get_pri_indev(Object *obj, Error **errp) @@ -926,8 +999,13 @@ static void colo_compare_complete(UserCreatable *uc, Error **errp) net_socket_rs_init(&s->pri_rs, compare_pri_rs_finalize, s->vnet_hdr); net_socket_rs_init(&s->sec_rs, compare_sec_rs_finalize, s->vnet_hdr); + QTAILQ_INSERT_TAIL(&net_compares, s, next); + g_queue_init(&s->conn_list); + qemu_mutex_init(&event_mtx); + qemu_cond_init(&event_complete_cond); + s->connection_track_table = g_hash_table_new_full(connection_key_hash, connection_key_equal, g_free, @@ -990,6 +1068,7 @@ static void colo_compare_init(Object *obj) static void colo_compare_finalize(Object *obj) { CompareState *s = COLO_COMPARE(obj); + CompareState *tmp = NULL; qemu_chr_fe_deinit(&s->chr_pri_in, false); qemu_chr_fe_deinit(&s->chr_sec_in, false); @@ -997,6 +1076,16 @@ static void colo_compare_finalize(Object *obj) if (s->iothread) { colo_compare_timer_del(s); } + + qemu_bh_delete(s->event_bh); + + QTAILQ_FOREACH(tmp, &net_compares, next) { + if (tmp == s) { + QTAILQ_REMOVE(&net_compares, s, next); + break; + } + } + /* Release all unhandled packets after compare thead exited */ g_queue_foreach(&s->conn_list, colo_flush_packets, s); @@ -1009,6 +1098,10 @@ static void colo_compare_finalize(Object *obj) if (s->iothread) { object_unref(OBJECT(s->iothread)); } + + qemu_mutex_destroy(&event_mtx); + qemu_cond_destroy(&event_complete_cond); + g_free(s->pri_indev); g_free(s->sec_indev); g_free(s->outdev); diff --git a/net/colo-compare.h b/net/colo-compare.h new file mode 100644 index 0000000000..22ddd512e2 --- /dev/null +++ b/net/colo-compare.h @@ -0,0 +1,24 @@ +/* + * COarse-grain LOck-stepping Virtual Machines for Non-stop Service (COLO) + * (a.k.a. Fault Tolerance or Continuous Replication) + * + * Copyright (c) 2017 HUAWEI TECHNOLOGIES CO., LTD. + * Copyright (c) 2017 FUJITSU LIMITED + * Copyright (c) 2017 Intel Corporation + * + * Authors: + * zhanghailiang <zhang.zhanghailiang@huawei.com> + * Zhang Chen <zhangckid@gmail.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * later. See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_COLO_COMPARE_H +#define QEMU_COLO_COMPARE_H + +void colo_notify_compares_event(void *opaque, int event, Error **errp); +void colo_compare_register_notifier(Notifier *notify); +void colo_compare_unregister_notifier(Notifier *notify); + +#endif /* QEMU_COLO_COMPARE_H */ diff --git a/net/colo.c b/net/colo.c index 6dda4ed66e..49176bf07b 100644 --- a/net/colo.c +++ b/net/colo.c @@ -137,7 +137,7 @@ Connection *connection_new(ConnectionKey *key) conn->ip_proto = key->ip_proto; conn->processing = false; conn->offset = 0; - conn->syn_flag = 0; + conn->tcp_state = TCPS_CLOSED; conn->pack = 0; conn->sack = 0; g_queue_init(&conn->primary_list); @@ -221,3 +221,11 @@ Connection *connection_get(GHashTable *connection_track_table, return conn; } + +bool connection_has_tracked(GHashTable *connection_track_table, + ConnectionKey *key) +{ + Connection *conn = g_hash_table_lookup(connection_track_table, key); + + return conn ? true : false; +} diff --git a/net/colo.h b/net/colo.h index da6c36dcf7..11c5226488 100644 --- a/net/colo.h +++ b/net/colo.h @@ -18,6 +18,7 @@ #include "slirp/slirp.h" #include "qemu/jhash.h" #include "qemu/timer.h" +#include "slirp/tcp.h" #define HASHTABLE_MAX_SIZE 16384 @@ -81,11 +82,9 @@ typedef struct Connection { uint32_t sack; /* offset = secondary_seq - primary_seq */ tcp_seq offset; - /* - * we use this flag update offset func - * run once in independent tcp connection - */ - int syn_flag; + + int tcp_state; /* TCP FSM state */ + tcp_seq fin_ack_seq; /* the seq of 'fin=1,ack=1' */ } Connection; uint32_t connection_key_hash(const void *opaque); @@ -99,6 +98,8 @@ void connection_destroy(void *opaque); Connection *connection_get(GHashTable *connection_track_table, ConnectionKey *key, GQueue *conn_list); +bool connection_has_tracked(GHashTable *connection_track_table, + ConnectionKey *key); void connection_hashtable_reset(GHashTable *connection_track_table); Packet *packet_new(const void *data, int size, int vnet_hdr_len); void packet_destroy(void *opaque, void *user_data); diff --git a/net/filter-rewriter.c b/net/filter-rewriter.c index f584e4eba4..bb8f4d93b1 100644 --- a/net/filter-rewriter.c +++ b/net/filter-rewriter.c @@ -20,11 +20,15 @@ #include "qemu/main-loop.h" #include "qemu/iov.h" #include "net/checksum.h" +#include "net/colo.h" +#include "migration/colo.h" #define FILTER_COLO_REWRITER(obj) \ OBJECT_CHECK(RewriterState, (obj), TYPE_FILTER_REWRITER) #define TYPE_FILTER_REWRITER "filter-rewriter" +#define FAILOVER_MODE_ON true +#define FAILOVER_MODE_OFF false typedef struct RewriterState { NetFilterState parent_obj; @@ -32,8 +36,14 @@ typedef struct RewriterState { /* hashtable to save connection */ GHashTable *connection_track_table; bool vnet_hdr; + bool failover_mode; } RewriterState; +static void filter_rewriter_failover_mode(RewriterState *s) +{ + s->failover_mode = FAILOVER_MODE_ON; +} + static void filter_rewriter_flush(NetFilterState *nf) { RewriterState *s = FILTER_COLO_REWRITER(nf); @@ -59,9 +69,9 @@ static int is_tcp_packet(Packet *pkt) } /* handle tcp packet from primary guest */ -static int handle_primary_tcp_pkt(NetFilterState *nf, +static int handle_primary_tcp_pkt(RewriterState *rf, Connection *conn, - Packet *pkt) + Packet *pkt, ConnectionKey *key) { struct tcphdr *tcp_pkt; @@ -74,23 +84,28 @@ static int handle_primary_tcp_pkt(NetFilterState *nf, trace_colo_filter_rewriter_conn_offset(conn->offset); } + if (((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == (TH_ACK | TH_SYN)) && + conn->tcp_state == TCPS_SYN_SENT) { + conn->tcp_state = TCPS_ESTABLISHED; + } + if (((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == TH_SYN)) { /* * we use this flag update offset func * run once in independent tcp connection */ - conn->syn_flag = 1; + conn->tcp_state = TCPS_SYN_RECEIVED; } if (((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == TH_ACK)) { - if (conn->syn_flag) { + if (conn->tcp_state == TCPS_SYN_RECEIVED) { /* * offset = secondary_seq - primary seq * ack packet sent by guest from primary node, * so we use th_ack - 1 get primary_seq */ conn->offset -= (ntohl(tcp_pkt->th_ack) - 1); - conn->syn_flag = 0; + conn->tcp_state = TCPS_ESTABLISHED; } if (conn->offset) { /* handle packets to the secondary from the primary */ @@ -99,15 +114,66 @@ static int handle_primary_tcp_pkt(NetFilterState *nf, net_checksum_calculate((uint8_t *)pkt->data + pkt->vnet_hdr_len, pkt->size - pkt->vnet_hdr_len); } + + /* + * Passive close step 3 + */ + if ((conn->tcp_state == TCPS_LAST_ACK) && + (ntohl(tcp_pkt->th_ack) == (conn->fin_ack_seq + 1))) { + conn->tcp_state = TCPS_CLOSED; + g_hash_table_remove(rf->connection_track_table, key); + } + } + + if ((tcp_pkt->th_flags & TH_FIN) == TH_FIN) { + /* + * Passive close. + * Step 1: + * The *server* side of this connect is VM, *client* tries to close + * the connection. We will into CLOSE_WAIT status. + * + * Step 2: + * In this step we will into LAST_ACK status. + * + * We got 'fin=1, ack=1' packet from server side, we need to + * record the seq of 'fin=1, ack=1' packet. + * + * Step 3: + * We got 'ack=1' packets from client side, it acks 'fin=1, ack=1' + * packet from server side. From this point, we can ensure that there + * will be no packets in the connection, except that, some errors + * happen between the path of 'filter object' and vNIC, if this rare + * case really happen, we can still create a new connection, + * So it is safe to remove the connection from connection_track_table. + * + */ + if (conn->tcp_state == TCPS_ESTABLISHED) { + conn->tcp_state = TCPS_CLOSE_WAIT; + } + + /* + * Active close step 2. + */ + if (conn->tcp_state == TCPS_FIN_WAIT_1) { + conn->tcp_state = TCPS_TIME_WAIT; + /* + * For simplify implementation, we needn't wait 2MSL time + * in filter rewriter. Because guest kernel will track the + * TCP status and wait 2MSL time, if client resend the FIN + * packet, guest will apply the last ACK too. + */ + conn->tcp_state = TCPS_CLOSED; + g_hash_table_remove(rf->connection_track_table, key); + } } return 0; } /* handle tcp packet from secondary guest */ -static int handle_secondary_tcp_pkt(NetFilterState *nf, +static int handle_secondary_tcp_pkt(RewriterState *rf, Connection *conn, - Packet *pkt) + Packet *pkt, ConnectionKey *key) { struct tcphdr *tcp_pkt; @@ -121,7 +187,8 @@ static int handle_secondary_tcp_pkt(NetFilterState *nf, trace_colo_filter_rewriter_conn_offset(conn->offset); } - if (((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == (TH_ACK | TH_SYN))) { + if (conn->tcp_state == TCPS_SYN_RECEIVED && + ((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == (TH_ACK | TH_SYN))) { /* * save offset = secondary_seq and then * in handle_primary_tcp_pkt make offset @@ -130,6 +197,12 @@ static int handle_secondary_tcp_pkt(NetFilterState *nf, conn->offset = ntohl(tcp_pkt->th_seq); } + /* VM active connect */ + if (conn->tcp_state == TCPS_CLOSED && + ((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == TH_SYN)) { + conn->tcp_state = TCPS_SYN_SENT; + } + if ((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == TH_ACK) { /* Only need to adjust seq while offset is Non-zero */ if (conn->offset) { @@ -141,6 +214,32 @@ static int handle_secondary_tcp_pkt(NetFilterState *nf, } } + /* + * Passive close step 2: + */ + if (conn->tcp_state == TCPS_CLOSE_WAIT && + (tcp_pkt->th_flags & (TH_ACK | TH_FIN)) == (TH_ACK | TH_FIN)) { + conn->fin_ack_seq = ntohl(tcp_pkt->th_seq); + conn->tcp_state = TCPS_LAST_ACK; + } + + /* + * Active close + * + * Step 1: + * The *server* side of this connect is VM, *server* tries to close + * the connection. + * + * Step 2: + * We will into CLOSE_WAIT status. + * We simplify the TCPS_FIN_WAIT_2, TCPS_TIME_WAIT and + * CLOSING status. + */ + if (conn->tcp_state == TCPS_ESTABLISHED && + (tcp_pkt->th_flags & (TH_ACK | TH_FIN)) == TH_FIN) { + conn->tcp_state = TCPS_FIN_WAIT_1; + } + return 0; } @@ -184,13 +283,20 @@ static ssize_t colo_rewriter_receive_iov(NetFilterState *nf, */ reverse_connection_key(&key); } + + /* After failover we needn't change new TCP packet */ + if (s->failover_mode && + !connection_has_tracked(s->connection_track_table, &key)) { + goto out; + } + conn = connection_get(s->connection_track_table, &key, NULL); if (sender == nf->netdev) { /* NET_FILTER_DIRECTION_TX */ - if (!handle_primary_tcp_pkt(nf, conn, pkt)) { + if (!handle_primary_tcp_pkt(s, conn, pkt, &key)) { qemu_net_queue_send(s->incoming_queue, sender, 0, (const uint8_t *)pkt->data, pkt->size, NULL); packet_destroy(pkt, NULL); @@ -203,7 +309,7 @@ static ssize_t colo_rewriter_receive_iov(NetFilterState *nf, } } else { /* NET_FILTER_DIRECTION_RX */ - if (!handle_secondary_tcp_pkt(nf, conn, pkt)) { + if (!handle_secondary_tcp_pkt(s, conn, pkt, &key)) { qemu_net_queue_send(s->incoming_queue, sender, 0, (const uint8_t *)pkt->data, pkt->size, NULL); packet_destroy(pkt, NULL); @@ -217,11 +323,49 @@ static ssize_t colo_rewriter_receive_iov(NetFilterState *nf, } } +out: packet_destroy(pkt, NULL); pkt = NULL; return 0; } +static void reset_seq_offset(gpointer key, gpointer value, gpointer user_data) +{ + Connection *conn = (Connection *)value; + + conn->offset = 0; +} + +static gboolean offset_is_nonzero(gpointer key, + gpointer value, + gpointer user_data) +{ + Connection *conn = (Connection *)value; + + return conn->offset ? true : false; +} + +static void colo_rewriter_handle_event(NetFilterState *nf, int event, + Error **errp) +{ + RewriterState *rs = FILTER_COLO_REWRITER(nf); + + switch (event) { + case COLO_EVENT_CHECKPOINT: + g_hash_table_foreach(rs->connection_track_table, + reset_seq_offset, NULL); + break; + case COLO_EVENT_FAILOVER: + if (!g_hash_table_find(rs->connection_track_table, + offset_is_nonzero, NULL)) { + filter_rewriter_failover_mode(rs); + } + break; + default: + break; + } +} + static void colo_rewriter_cleanup(NetFilterState *nf) { RewriterState *s = FILTER_COLO_REWRITER(nf); @@ -265,6 +409,7 @@ static void filter_rewriter_init(Object *obj) RewriterState *s = FILTER_COLO_REWRITER(obj); s->vnet_hdr = false; + s->failover_mode = FAILOVER_MODE_OFF; object_property_add_bool(obj, "vnet_hdr_support", filter_rewriter_get_vnet_hdr, filter_rewriter_set_vnet_hdr, NULL); @@ -277,6 +422,7 @@ static void colo_rewriter_class_init(ObjectClass *oc, void *data) nfc->setup = colo_rewriter_setup; nfc->cleanup = colo_rewriter_cleanup; nfc->receive_iov = colo_rewriter_receive_iov; + nfc->handle_event = colo_rewriter_handle_event; } static const TypeInfo colo_rewriter_info = { diff --git a/net/filter.c b/net/filter.c index 2fd7d7d663..c9f9e5fa08 100644 --- a/net/filter.c +++ b/net/filter.c @@ -17,6 +17,8 @@ #include "net/vhost_net.h" #include "qom/object_interfaces.h" #include "qemu/iov.h" +#include "net/colo.h" +#include "migration/colo.h" static inline bool qemu_can_skip_netfilter(NetFilterState *nf) { @@ -245,11 +247,26 @@ static void netfilter_finalize(Object *obj) g_free(nf->netdev_id); } +static void default_handle_event(NetFilterState *nf, int event, Error **errp) +{ + switch (event) { + case COLO_EVENT_CHECKPOINT: + break; + case COLO_EVENT_FAILOVER: + object_property_set_str(OBJECT(nf), "off", "status", errp); + break; + default: + break; + } +} + static void netfilter_class_init(ObjectClass *oc, void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); + NetFilterClass *nfc = NETFILTER_CLASS(oc); ucc->complete = netfilter_complete; + nfc->handle_event = default_handle_event; } static const TypeInfo netfilter_info = { diff --git a/net/l2tpv3.c b/net/l2tpv3.c index 6745b78990..81db24dc8c 100644 --- a/net/l2tpv3.c +++ b/net/l2tpv3.c @@ -28,6 +28,7 @@ #include <netdb.h> #include "net/net.h" #include "clients.h" +#include "qapi/error.h" #include "qemu-common.h" #include "qemu/error-report.h" #include "qemu/option.h" @@ -528,7 +529,6 @@ int net_init_l2tpv3(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { - /* FIXME error_setg(errp, ...) on failure */ const NetdevL2TPv3Options *l2tpv3; NetL2TPV3State *s; NetClientState *nc; @@ -555,7 +555,7 @@ int net_init_l2tpv3(const Netdev *netdev, } if ((l2tpv3->has_offset) && (l2tpv3->offset > 256)) { - error_report("l2tpv3_open : offset must be less than 256 bytes"); + error_setg(errp, "offset must be less than 256 bytes"); goto outerr; } @@ -563,6 +563,8 @@ int net_init_l2tpv3(const Netdev *netdev, if (l2tpv3->has_rxcookie && l2tpv3->has_txcookie) { s->cookie = true; } else { + error_setg(errp, + "require both 'rxcookie' and 'txcookie' or neither"); goto outerr; } } else { @@ -578,7 +580,7 @@ int net_init_l2tpv3(const Netdev *netdev, if (l2tpv3->has_udp && l2tpv3->udp) { s->udp = true; if (!(l2tpv3->has_srcport && l2tpv3->has_dstport)) { - error_report("l2tpv3_open : need both src and dst port for udp"); + error_setg(errp, "need both src and dst port for udp"); goto outerr; } else { srcport = l2tpv3->srcport; @@ -639,20 +641,19 @@ int net_init_l2tpv3(const Netdev *netdev, gairet = getaddrinfo(l2tpv3->src, srcport, &hints, &result); if ((gairet != 0) || (result == NULL)) { - error_report( - "l2tpv3_open : could not resolve src, errno = %s", - gai_strerror(gairet) - ); + error_setg(errp, "could not resolve src, errno = %s", + gai_strerror(gairet)); goto outerr; } fd = socket(result->ai_family, result->ai_socktype, result->ai_protocol); if (fd == -1) { fd = -errno; - error_report("l2tpv3_open : socket creation failed, errno = %d", -fd); + error_setg(errp, "socket creation failed, errno = %d", + -fd); goto outerr; } if (bind(fd, (struct sockaddr *) result->ai_addr, result->ai_addrlen)) { - error_report("l2tpv3_open : could not bind socket err=%i", errno); + error_setg(errp, "could not bind socket err=%i", errno); goto outerr; } if (result) { @@ -677,10 +678,8 @@ int net_init_l2tpv3(const Netdev *netdev, result = NULL; gairet = getaddrinfo(l2tpv3->dst, dstport, &hints, &result); if ((gairet != 0) || (result == NULL)) { - error_report( - "l2tpv3_open : could not resolve dst, error = %s", - gai_strerror(gairet) - ); + error_setg(errp, "could not resolve dst, error = %s", + gai_strerror(gairet)); goto outerr; } @@ -712,10 +712,15 @@ ssize_t qemu_deliver_packet_iov(NetClientState *sender, void *opaque) { NetClientState *nc = opaque; + size_t size = iov_size(iov, iovcnt); int ret; + if (size > INT_MAX) { + return size; + } + if (nc->link_down) { - return iov_size(iov, iovcnt); + return size; } if (nc->receive_disabled) { @@ -1335,6 +1340,25 @@ void hmp_info_network(Monitor *mon, const QDict *qdict) } } +void colo_notify_filters_event(int event, Error **errp) +{ + NetClientState *nc; + NetFilterState *nf; + NetFilterClass *nfc = NULL; + Error *local_err = NULL; + + QTAILQ_FOREACH(nc, &net_clients, next) { + QTAILQ_FOREACH(nf, &nc->filters, next) { + nfc = NETFILTER_GET_CLASS(OBJECT(nf)); + nfc->handle_event(nf, event, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } + } +} + void qmp_set_link(const char *name, bool up, Error **errp) { NetClientState *ncs[MAX_QUEUE_NUM]; diff --git a/net/slirp.c b/net/slirp.c index 99884de204..f6dc03963a 100644 --- a/net/slirp.c +++ b/net/slirp.c @@ -150,6 +150,7 @@ static int net_slirp_init(NetClientState *peer, const char *model, const char *vnameserver, const char *vnameserver6, const char *smb_export, const char *vsmbserver, const char **dnssearch, const char *vdomainname, + const char *tftp_server_name, Error **errp) { /* default settings according to historic slirp */ @@ -350,6 +351,20 @@ static int net_slirp_init(NetClientState *peer, const char *model, return -1; } + if (vdomainname && strlen(vdomainname) > 255) { + error_setg(errp, "'domainname' parameter cannot exceed 255 bytes"); + return -1; + } + + if (vhostname && strlen(vhostname) > 255) { + error_setg(errp, "'vhostname' parameter cannot exceed 255 bytes"); + return -1; + } + + if (tftp_server_name && strlen(tftp_server_name) > 255) { + error_setg(errp, "'tftp-server-name' parameter cannot exceed 255 bytes"); + return -1; + } nc = qemu_new_net_client(&net_slirp_info, peer, model, name); @@ -361,7 +376,8 @@ static int net_slirp_init(NetClientState *peer, const char *model, s->slirp = slirp_init(restricted, ipv4, net, mask, host, ipv6, ip6_prefix, vprefix6_len, ip6_host, - vhostname, tftp_export, bootfile, dhcp, + vhostname, tftp_server_name, + tftp_export, bootfile, dhcp, dns, ip6_dns, dnssearch, vdomainname, s); QTAILQ_INSERT_TAIL(&slirp_stacks, s, entry); @@ -898,7 +914,8 @@ int net_init_slirp(const Netdev *netdev, const char *name, user->ipv6_host, user->hostname, user->tftp, user->bootfile, user->dhcpstart, user->dns, user->ipv6_dns, user->smb, - user->smbserver, dnssearch, user->domainname, errp); + user->smbserver, dnssearch, user->domainname, + user->tftp_server_name, errp); while (slirp_configs) { config = slirp_configs; diff --git a/net/socket.c b/net/socket.c index 6917fbcbf5..90ef3517be 100644 --- a/net/socket.c +++ b/net/socket.c @@ -453,8 +453,8 @@ static NetSocketState *net_socket_fd_init(NetClientState *peer, case SOCK_STREAM: return net_socket_fd_init_stream(peer, model, name, fd, is_connected); default: - error_report("socket type=%d for fd=%d must be either" - " SOCK_DGRAM or SOCK_STREAM", so_type, fd); + error_setg(errp, "socket type=%d for fd=%d must be either" + " SOCK_DGRAM or SOCK_STREAM", so_type, fd); closesocket(fd); } return NULL; @@ -60,6 +60,7 @@ NodeInfo numa_info[MAX_NODES]; static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, Error **errp) { + Error *err = NULL; uint16_t nodenr; uint16List *cpus = NULL; MachineClass *mc = MACHINE_GET_CLASS(ms); @@ -82,8 +83,8 @@ static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, } if (!mc->cpu_index_to_instance_props || !mc->get_default_cpu_node_id) { - error_report("NUMA is not supported by this machine-type"); - exit(1); + error_setg(errp, "NUMA is not supported by this machine-type"); + return; } for (cpus = node->cpus; cpus; cpus = cpus->next) { CpuInstanceProperties props; @@ -97,7 +98,11 @@ static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, props = mc->cpu_index_to_instance_props(ms, cpus->value); props.node_id = nodenr; props.has_node_id = true; - machine_set_cpu_numa_node(ms, &props, &error_fatal); + machine_set_cpu_numa_node(ms, &props, &err); + if (err) { + error_propagate(errp, err); + return; + } } if (node->has_mem && node->has_memdev) { @@ -210,7 +215,7 @@ end: error_propagate(errp, err); } -int parse_numa(void *opaque, QemuOpts *opts, Error **errp) +static int parse_numa(void *opaque, QemuOpts *opts, Error **errp) { NumaOptions *object = NULL; MachineState *ms = MACHINE(opaque); @@ -234,7 +239,7 @@ int parse_numa(void *opaque, QemuOpts *opts, Error **errp) end: qapi_free_NumaOptions(object); if (err) { - error_report_err(err); + error_propagate(errp, err); return -1; } @@ -367,7 +372,7 @@ void numa_complete_configuration(MachineState *ms) if (ms->ram_slots > 0 && nb_numa_nodes == 0 && mc->auto_enable_numa_with_memhp) { NumaNodeOptions node = { }; - parse_numa_node(ms, &node, NULL); + parse_numa_node(ms, &node, &error_abort); } assert(max_numa_nodeid <= MAX_NODES); @@ -439,9 +444,7 @@ void numa_complete_configuration(MachineState *ms) void parse_numa_opts(MachineState *ms) { - if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, NULL)) { - exit(1); - } + qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, &error_fatal); } void qmp_set_numa_node(NumaOptions *cmd, Error **errp) diff --git a/po/Makefile b/po/Makefile index e47e262ee6..c041f4c858 100644 --- a/po/Makefile +++ b/po/Makefile @@ -36,7 +36,7 @@ clean: install: $(OBJS) for obj in $(OBJS); do \ - base=`basename $$obj .mo`; \ + base=$$(basename $$obj .mo); \ $(INSTALL) -d $(DESTDIR)$(prefix)/share/locale/$$base/LC_MESSAGES; \ $(INSTALL) -m644 $$obj $(DESTDIR)$(prefix)/share/locale/$$base/LC_MESSAGES/qemu.mo; \ done diff --git a/qapi/migration.json b/qapi/migration.json index 6e8c21258a..0928f4b727 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -923,18 +923,18 @@ ## # @COLOMode: # -# The colo mode +# The COLO current mode. # -# @unknown: unknown mode +# @none: COLO is disabled. # -# @primary: master side +# @primary: COLO node in primary side. # -# @secondary: slave side +# @secondary: COLO node in slave side. # # Since: 2.8 ## { 'enum': 'COLOMode', - 'data': [ 'unknown', 'primary', 'secondary'] } + 'data': [ 'none', 'primary', 'secondary'] } ## # @FailoverStatus: @@ -957,6 +957,44 @@ 'data': [ 'none', 'require', 'active', 'completed', 'relaunch' ] } ## +# @COLO_EXIT: +# +# Emitted when VM finishes COLO mode due to some errors happening or +# at the request of users. +# +# @mode: report COLO mode when COLO exited. +# +# @reason: describes the reason for the COLO exit. +# +# Since: 3.1 +# +# Example: +# +# <- { "timestamp": {"seconds": 2032141960, "microseconds": 417172}, +# "event": "COLO_EXIT", "data": {"mode": "primary", "reason": "request" } } +# +## +{ 'event': 'COLO_EXIT', + 'data': {'mode': 'COLOMode', 'reason': 'COLOExitReason' } } + +## +# @COLOExitReason: +# +# The reason for a COLO exit +# +# @none: no failover has ever happened. This can't occur in the +# COLO_EXIT event, only in the result of query-colo-status. +# +# @request: COLO exit is due to an external request +# +# @error: COLO exit is due to an internal error +# +# Since: 3.1 +## +{ 'enum': 'COLOExitReason', + 'data': [ 'none', 'request', 'error' ] } + +## # @x-colo-lost-heartbeat: # # Tell qemu that heartbeat is lost, request it to do takeover procedures. @@ -1270,6 +1308,38 @@ { 'command': 'xen-colo-do-checkpoint' } ## +# @COLOStatus: +# +# The result format for 'query-colo-status'. +# +# @mode: COLO running mode. If COLO is running, this field will return +# 'primary' or 'secondary'. +# +# @reason: describes the reason for the COLO exit. +# +# Since: 3.0 +## +{ 'struct': 'COLOStatus', + 'data': { 'mode': 'COLOMode', 'reason': 'COLOExitReason' } } + +## +# @query-colo-status: +# +# Query COLO status while the vm is running. +# +# Returns: A @COLOStatus object showing the status. +# +# Example: +# +# -> { "execute": "query-colo-status" } +# <- { "return": { "mode": "primary", "active": true, "reason": "request" } } +# +# Since: 3.0 +## +{ 'command': 'query-colo-status', + 'returns': 'COLOStatus' } + +## # @migrate-recover: # # Provide a recovery migration stream URI. diff --git a/qapi/misc.json b/qapi/misc.json index 3a68af9ca3..c85c6c8ca3 100644 --- a/qapi/misc.json +++ b/qapi/misc.json @@ -3070,7 +3070,8 @@ # Emitted when the guest changes the RTC time. # # @offset: offset between base RTC clock (as specified by -rtc base), and -# new RTC clock value +# new RTC clock value. Note that value will be different depending +# on clock chosen to drive RTC (specified by -rtc clock). # # Note: This event is rate-limited. # diff --git a/qapi/net.json b/qapi/net.json index c86f351161..8f99fd911d 100644 --- a/qapi/net.json +++ b/qapi/net.json @@ -174,6 +174,8 @@ # # @guestfwd: forward guest TCP connections # +# @tftp-server-name: RFC2132 "TFTP server name" string (Since 3.1) +# # Since: 1.2 ## { 'struct': 'NetdevUserOptions', @@ -198,7 +200,8 @@ '*smb': 'str', '*smbserver': 'str', '*hostfwd': ['String'], - '*guestfwd': ['String'] } } + '*guestfwd': ['String'], + '*tftp-server-name': 'str' } } ## # @NetdevTapOptions: @@ -620,11 +620,9 @@ int main(int argc, char **argv) exit(1); } - if (qemu_opts_foreach(&qemu_object_opts, - user_creatable_add_opts_foreach, - NULL, NULL)) { - exit(1); - } + qemu_opts_foreach(&qemu_object_opts, + user_creatable_add_opts_foreach, + NULL, &error_fatal); if (!trace_init_backends()) { exit(1); diff --git a/qemu-nbd.c b/qemu-nbd.c index e76fe3082a..ca7109652e 100644 --- a/qemu-nbd.c +++ b/qemu-nbd.c @@ -766,11 +766,9 @@ int main(int argc, char **argv) exit(EXIT_FAILURE); } - if (qemu_opts_foreach(&qemu_object_opts, - user_creatable_add_opts_foreach, - NULL, NULL)) { - exit(EXIT_FAILURE); - } + qemu_opts_foreach(&qemu_object_opts, + user_creatable_add_opts_foreach, + NULL, &error_fatal); if (!trace_init_backends()) { exit(1); @@ -1002,11 +1000,7 @@ int main(int argc, char **argv) } exp = nbd_export_new(bs, dev_offset, fd_size, nbdflags, nbd_export_closed, - writethrough, NULL, &local_err); - if (!exp) { - error_report_err(local_err); - exit(EXIT_FAILURE); - } + writethrough, NULL, &error_fatal); nbd_export_set_name(exp, export_name); nbd_export_set_description(exp, export_description); diff --git a/qemu-options.hx b/qemu-options.hx index f139459e80..08f8516a9a 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -1823,7 +1823,7 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev, " [,ipv6[=on|off]][,ipv6-net=addr[/int]][,ipv6-host=addr]\n" " [,restrict=on|off][,hostname=host][,dhcpstart=addr]\n" " [,dns=addr][,ipv6-dns=addr][,dnssearch=domain][,domainname=domain]\n" - " [,tftp=dir][,bootfile=f][,hostfwd=rule][,guestfwd=rule]" + " [,tftp=dir][,tftp-server-name=name][,bootfile=f][,hostfwd=rule][,guestfwd=rule]" #ifndef _WIN32 "[,smb=dir[,smbserver=addr]]\n" #endif @@ -2060,6 +2060,11 @@ server. The files in @var{dir} will be exposed as the root of a TFTP server. The TFTP client on the guest must be configured in binary mode (use the command @code{bin} of the Unix TFTP client). +@item tftp-server-name=@var{name} +In BOOTP reply, broadcast @var{name} as the "TFTP server name" (RFC2132 option +66). This can be used to advise the guest to load boot files or configurations +from a different server than the host address. + @item bootfile=@var{file} When using the user mode network stack, broadcast @var{file} as the BOOTP filename. In conjunction with @option{tftp}, this can be used to network boot @@ -2256,7 +2261,7 @@ qemu-system-i386 linux.img \ -netdev socket,id=n2,mcast=230.0.0.1:1234 # launch yet another QEMU instance on same "bus" qemu-system-i386 linux.img \ - -device e1000,netdev=n3,macaddr=52:54:00:12:34:58 \ + -device e1000,netdev=n3,mac=52:54:00:12:34:58 \ -netdev socket,id=n3,mcast=230.0.0.1:1234 @end example @@ -3458,25 +3463,29 @@ HXCOMM Silently ignored for compatibility DEF("clock", HAS_ARG, QEMU_OPTION_clock, "", QEMU_ARCH_ALL) DEF("rtc", HAS_ARG, QEMU_OPTION_rtc, \ - "-rtc [base=utc|localtime|date][,clock=host|rt|vm][,driftfix=none|slew]\n" \ + "-rtc [base=utc|localtime|<datetime>][,clock=host|rt|vm][,driftfix=none|slew]\n" \ " set the RTC base and clock, enable drift fix for clock ticks (x86 only)\n", QEMU_ARCH_ALL) STEXI -@item -rtc [base=utc|localtime|@var{date}][,clock=host|vm][,driftfix=none|slew] +@item -rtc [base=utc|localtime|@var{datetime}][,clock=host|rt|vm][,driftfix=none|slew] @findex -rtc Specify @option{base} as @code{utc} or @code{localtime} to let the RTC start at the current UTC or local time, respectively. @code{localtime} is required for correct date in -MS-DOS or Windows. To start at a specific point in time, provide @var{date} in the +MS-DOS or Windows. To start at a specific point in time, provide @var{datetime} in the format @code{2006-06-17T16:01:21} or @code{2006-06-17}. The default base is UTC. By default the RTC is driven by the host system time. This allows using of the RTC as accurate reference clock inside the guest, specifically if the host time is smoothly following an accurate external reference clock, e.g. via NTP. If you want to isolate the guest time from the host, you can set @option{clock} -to @code{rt} instead. To even prevent it from progressing during suspension, -you can set it to @code{vm}. +to @code{rt} instead, which provides a host monotonic clock if host support it. +To even prevent the RTC from progressing during suspension, you can set @option{clock} +to @code{vm} (virtual clock). @samp{clock=vm} is recommended especially in +icount mode in order to preserve determinism; however, note that in icount mode +the speed of the virtual clock is variable and can in general differ from the +host clock. Enable @option{driftfix} (i386 targets only) if you experience time drift problems, specifically with Windows' ACPI HAL. This option will try to figure out how diff --git a/qemu-seccomp.c b/qemu-seccomp.c index 1baa5c69ed..5c73e6ad05 100644 --- a/qemu-seccomp.c +++ b/qemu-seccomp.c @@ -12,11 +12,12 @@ * Contributions after 2012-01-13 are licensed under the terms of the * GNU GPL, version 2 or (at your option) any later version. */ + #include "qemu/osdep.h" +#include "qapi/error.h" #include "qemu/config-file.h" #include "qemu/option.h" #include "qemu/module.h" -#include "qemu/error-report.h" #include <sys/prctl.h> #include <seccomp.h> #include "sysemu/seccomp.h" @@ -190,7 +191,7 @@ int parse_sandbox(void *opaque, QemuOpts *opts, Error **errp) * to provide a little bit of consistency for * the command line */ } else { - error_report("invalid argument for obsolete"); + error_setg(errp, "invalid argument for obsolete"); return -1; } } @@ -205,14 +206,13 @@ int parse_sandbox(void *opaque, QemuOpts *opts, Error **errp) /* calling prctl directly because we're * not sure if host has CAP_SYS_ADMIN set*/ if (prctl(PR_SET_NO_NEW_PRIVS, 1)) { - error_report("failed to set no_new_privs " - "aborting"); + error_setg(errp, "failed to set no_new_privs aborting"); return -1; } } else if (g_str_equal(value, "allow")) { /* default value */ } else { - error_report("invalid argument for elevateprivileges"); + error_setg(errp, "invalid argument for elevateprivileges"); return -1; } } @@ -224,7 +224,7 @@ int parse_sandbox(void *opaque, QemuOpts *opts, Error **errp) } else if (g_str_equal(value, "allow")) { /* default value */ } else { - error_report("invalid argument for spawn"); + error_setg(errp, "invalid argument for spawn"); return -1; } } @@ -236,14 +236,14 @@ int parse_sandbox(void *opaque, QemuOpts *opts, Error **errp) } else if (g_str_equal(value, "allow")) { /* default value */ } else { - error_report("invalid argument for resourcecontrol"); + error_setg(errp, "invalid argument for resourcecontrol"); return -1; } } if (seccomp_start(seccomp_opts) < 0) { - error_report("failed to install seccomp syscall filter " - "in the kernel"); + error_setg(errp, "failed to install seccomp syscall filter " + "in the kernel"); return -1; } } @@ -194,7 +194,6 @@ static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp) return true; } -bool target_words_bigendian(void); static bool cpu_common_virtio_is_big_endian(CPUState *cpu) { return target_words_bigendian(); @@ -266,7 +265,7 @@ static void cpu_common_reset(CPUState *cpu) cpu->mem_io_pc = 0; cpu->mem_io_vaddr = 0; cpu->icount_extra = 0; - cpu->icount_decr.u32 = 0; + atomic_set(&cpu->icount_decr.u32, 0); cpu->can_do_io = 1; cpu->exception_index = -1; cpu->crash_occurred = false; diff --git a/qom/object_interfaces.c b/qom/object_interfaces.c index 941fd63afd..97b79b48bb 100644 --- a/qom/object_interfaces.c +++ b/qom/object_interfaces.c @@ -143,7 +143,6 @@ int user_creatable_add_opts_foreach(void *opaque, QemuOpts *opts, Error **errp) { bool (*type_opt_predicate)(const char *, QemuOpts *) = opaque; Object *obj = NULL; - Error *err = NULL; const char *type; type = qemu_opt_get(opts, "qom-type"); @@ -152,9 +151,8 @@ int user_creatable_add_opts_foreach(void *opaque, QemuOpts *opts, Error **errp) return 0; } - obj = user_creatable_add_opts(opts, &err); + obj = user_creatable_add_opts(opts, errp); if (!obj) { - error_report_err(err); return -1; } object_unref(obj); diff --git a/replay/replay-events.c b/replay/replay-events.c index 0964a82838..d9a2d495b9 100644 --- a/replay/replay-events.c +++ b/replay/replay-events.c @@ -190,6 +190,7 @@ void replay_save_events(int checkpoint) { g_assert(replay_mutex_locked()); g_assert(checkpoint != CHECKPOINT_CLOCK_WARP_START); + g_assert(checkpoint != CHECKPOINT_CLOCK_VIRTUAL); while (!QTAILQ_EMPTY(&events_list)) { Event *event = QTAILQ_FIRST(&events_list); replay_save_event(event, checkpoint); diff --git a/replay/replay-internal.c b/replay/replay-internal.c index b077cb5fd5..1cea1d4dc9 100644 --- a/replay/replay-internal.c +++ b/replay/replay-internal.c @@ -217,20 +217,25 @@ void replay_mutex_unlock(void) } } +void replay_advance_current_step(uint64_t current_step) +{ + int diff = (int)(replay_get_current_step() - replay_state.current_step); + + /* Time can only go forward */ + assert(diff >= 0); + + if (diff > 0) { + replay_put_event(EVENT_INSTRUCTION); + replay_put_dword(diff); + replay_state.current_step += diff; + } +} + /*! Saves cached instructions. */ void replay_save_instructions(void) { if (replay_file && replay_mode == REPLAY_MODE_RECORD) { g_assert(replay_mutex_locked()); - int diff = (int)(replay_get_current_step() - replay_state.current_step); - - /* Time can only go forward */ - assert(diff >= 0); - - if (diff > 0) { - replay_put_event(EVENT_INSTRUCTION); - replay_put_dword(diff); - replay_state.current_step += diff; - } + replay_advance_current_step(replay_get_current_step()); } } diff --git a/replay/replay-internal.h b/replay/replay-internal.h index 9b0fd916a3..af6f4d55d4 100644 --- a/replay/replay-internal.h +++ b/replay/replay-internal.h @@ -122,6 +122,8 @@ void replay_finish_event(void); data_kind variable. */ void replay_fetch_data_kind(void); +/*! Advance replay_state.current_step to the specified value. */ +void replay_advance_current_step(uint64_t current_step); /*! Saves queued events (like instructions and sound). */ void replay_save_instructions(void); diff --git a/replay/replay-time.c b/replay/replay-time.c index 6a7565ec8d..0df1693337 100644 --- a/replay/replay-time.c +++ b/replay/replay-time.c @@ -15,13 +15,15 @@ #include "replay-internal.h" #include "qemu/error-report.h" -int64_t replay_save_clock(ReplayClockKind kind, int64_t clock) +int64_t replay_save_clock(ReplayClockKind kind, int64_t clock, int64_t raw_icount) { - if (replay_file) { g_assert(replay_mutex_locked()); - replay_save_instructions(); + /* Due to the caller's locking requirements we get the icount from it + * instead of using replay_save_instructions(). + */ + replay_advance_current_step(raw_icount); replay_put_event(EVENT_CLOCK + kind); replay_put_qword(clock); } diff --git a/replay/replay.c b/replay/replay.c index 379b51ab46..8b172b2d1b 100644 --- a/replay/replay.c +++ b/replay/replay.c @@ -214,7 +214,14 @@ bool replay_checkpoint(ReplayCheckpoint checkpoint) /* This checkpoint belongs to several threads. Processing events from different threads is non-deterministic */ - if (checkpoint != CHECKPOINT_CLOCK_WARP_START) { + if (checkpoint != CHECKPOINT_CLOCK_WARP_START + /* FIXME: this is temporary fix, other checkpoints + may also be invoked from the different threads someday. + Asynchronous event processing should be refactored + to create additional replay event kind which is + nailed to the one of the threads and which processes + the event queue. */ + && checkpoint != CHECKPOINT_CLOCK_VIRTUAL) { replay_save_events(checkpoint); } res = true; diff --git a/scripts/archive-source.sh b/scripts/archive-source.sh index 4e63774f9a..62bd22578b 100755 --- a/scripts/archive-source.sh +++ b/scripts/archive-source.sh @@ -18,7 +18,7 @@ if test $# -lt 1; then error "Usage: $0 <output tarball>" fi -tar_file=`realpath "$1"` +tar_file=$(realpath "$1") list_file="${tar_file}.list" vroot_dir="${tar_file}.vroot" @@ -34,7 +34,7 @@ if git diff-index --quiet HEAD -- &>/dev/null then HEAD=HEAD else - HEAD=`git stash create` + HEAD=$(git stash create) fi git clone --shared . "$vroot_dir" test $? -ne 0 && error "failed to clone into '$vroot_dir'" diff --git a/scripts/check-qerror.sh b/scripts/check-qerror.sh deleted file mode 100755 index af7fbd5249..0000000000 --- a/scripts/check-qerror.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh -# This script verifies that qerror definitions and table entries are -# alphabetically ordered. - -check_order() { - errmsg=$1 - shift - - # sort -C verifies order but does not print a message. sort -c does print a - # message. These options are both in POSIX. - if ! "$@" | sort -C; then - echo "$errmsg" - "$@" | sort -c - exit 1 - fi - return 0 -} - -check_order 'Definitions in qerror.h must be in alphabetical order:' \ - grep '^#define QERR_' qerror.h -check_order 'Entries in qerror.c:qerror_table must be in alphabetical order:' \ - sed -n '/^static.*qerror_table\[\]/,/^};/s/QERR_/&/gp' qerror.c diff --git a/scripts/coccinelle/use-error_fatal.cocci b/scripts/coccinelle/use-error_fatal.cocci new file mode 100644 index 0000000000..10fff0aec4 --- /dev/null +++ b/scripts/coccinelle/use-error_fatal.cocci @@ -0,0 +1,20 @@ +@@ +type T; +identifier FUN, RET; +expression list ARGS; +expression ERR, EC, FAIL; +@@ +( +- T RET = FUN(ARGS, &ERR); ++ T RET = FUN(ARGS, &error_fatal); +| +- RET = FUN(ARGS, &ERR); ++ RET = FUN(ARGS, &error_fatal); +| +- FUN(ARGS, &ERR); ++ FUN(ARGS, &error_fatal); +) +- if (FAIL) { +- error_report_err(ERR); +- exit(EC); +- } diff --git a/scripts/git-submodule.sh b/scripts/git-submodule.sh index 807ca0b4f8..98ca0f2737 100755 --- a/scripts/git-submodule.sh +++ b/scripts/git-submodule.sh @@ -59,8 +59,8 @@ status) fi test -f "$substat" || exit 1 - CURSTATUS=`$GIT submodule status $modules` - OLDSTATUS=`cat $substat` + CURSTATUS=$($GIT submodule status $modules) + OLDSTATUS=$(cat $substat) test "$CURSTATUS" = "$OLDSTATUS" exit $? ;; diff --git a/scripts/show-fixed-bugs.sh b/scripts/show-fixed-bugs.sh index 36f306898f..a095a4d6ba 100755 --- a/scripts/show-fixed-bugs.sh +++ b/scripts/show-fixed-bugs.sh @@ -23,10 +23,10 @@ while getopts "s:e:cbh" opt; do done if [ "x$start" = "x" ]; then - start=`git tag -l 'v[0-9]*\.[0-9]*\.0' | tail -n 2 | head -n 1` + start=$(git tag -l 'v[0-9]*\.[0-9]*\.0' | tail -n 2 | head -n 1) fi if [ "x$end" = "x" ]; then - end=`git tag -l 'v[0-9]*\.[0-9]*\.0' | tail -n 1` + end=$(git tag -l 'v[0-9]*\.[0-9]*\.0' | tail -n 1) fi if [ "x$start" = "x" ] || [ "x$end" = "x" ]; then @@ -38,9 +38,9 @@ fi echo "Searching git log for bugs in the range $start..$end" urlstr='https://bugs.launchpad.net/\(bugs\|qemu/+bug\)/' -bug_urls=`git log $start..$end \ +bug_urls=$(git log $start..$end \ | sed -n '\,'"$urlstr"', s,\(.*\)\('"$urlstr"'\)\([0-9]*\).*,\2\4,p' \ - | sort -u` + | sort -u) echo Found bug URLs: for i in $bug_urls ; do echo " $i" ; done @@ -68,7 +68,7 @@ elif [ "x$show_in_browser" = "x1" ]; then bugbrowser=xdg-open elif command -v gnome-open >/dev/null 2>&1; then bugbrowser=gnome-open - elif [ "`uname`" = "Darwin" ]; then + elif [ "$(uname)" = "Darwin" ]; then bugbrowser=open elif command -v sensible-browser >/dev/null 2>&1; then bugbrowser=sensible-browser diff --git a/slirp/bootp.c b/slirp/bootp.c index 9e7b53ba94..7b1af73c95 100644 --- a/slirp/bootp.c +++ b/slirp/bootp.c @@ -159,6 +159,7 @@ static void bootp_reply(Slirp *slirp, const struct bootp_t *bp) struct in_addr preq_addr; int dhcp_msg_type, val; uint8_t *q; + uint8_t *end; uint8_t client_ethaddr[ETH_ALEN]; /* extract exact DHCP msg type */ @@ -240,6 +241,7 @@ static void bootp_reply(Slirp *slirp, const struct bootp_t *bp) rbp->bp_siaddr = saddr.sin_addr; /* Server IP address */ q = rbp->bp_vend; + end = (uint8_t *)&rbp[1]; memcpy(q, rfc1533_cookie, 4); q += 4; @@ -292,24 +294,46 @@ static void bootp_reply(Slirp *slirp, const struct bootp_t *bp) if (*slirp->client_hostname) { val = strlen(slirp->client_hostname); - *q++ = RFC1533_HOSTNAME; - *q++ = val; - memcpy(q, slirp->client_hostname, val); - q += val; + if (q + val + 2 >= end) { + g_warning("DHCP packet size exceeded, " + "omitting host name option."); + } else { + *q++ = RFC1533_HOSTNAME; + *q++ = val; + memcpy(q, slirp->client_hostname, val); + q += val; + } } if (slirp->vdomainname) { val = strlen(slirp->vdomainname); - *q++ = RFC1533_DOMAINNAME; - *q++ = val; - memcpy(q, slirp->vdomainname, val); - q += val; + if (q + val + 2 >= end) { + g_warning("DHCP packet size exceeded, " + "omitting domain name option."); + } else { + *q++ = RFC1533_DOMAINNAME; + *q++ = val; + memcpy(q, slirp->vdomainname, val); + q += val; + } + } + + if (slirp->tftp_server_name) { + val = strlen(slirp->tftp_server_name); + if (q + val + 2 >= end) { + g_warning("DHCP packet size exceeded, " + "omitting tftp-server-name option."); + } else { + *q++ = RFC2132_TFTP_SERVER_NAME; + *q++ = val; + memcpy(q, slirp->tftp_server_name, val); + q += val; + } } if (slirp->vdnssearch) { - size_t spaceleft = sizeof(rbp->bp_vend) - (q - rbp->bp_vend); val = slirp->vdnssearch_len; - if (val + 1 > spaceleft) { + if (q + val >= end) { g_warning("DHCP packet size exceeded, " "omitting domain-search option."); } else { @@ -331,6 +355,7 @@ static void bootp_reply(Slirp *slirp, const struct bootp_t *bp) memcpy(q, nak_msg, sizeof(nak_msg) - 1); q += sizeof(nak_msg) - 1; } + assert(q < end); *q = RFC1533_END; daddr.sin_addr.s_addr = 0xffffffffu; diff --git a/slirp/bootp.h b/slirp/bootp.h index 394525733e..4043489835 100644 --- a/slirp/bootp.h +++ b/slirp/bootp.h @@ -70,6 +70,7 @@ #define RFC2132_MAX_SIZE 57 #define RFC2132_RENEWAL_TIME 58 #define RFC2132_REBIND_TIME 59 +#define RFC2132_TFTP_SERVER_NAME 66 #define DHCPDISCOVER 1 #define DHCPOFFER 2 diff --git a/slirp/ip6_icmp.c b/slirp/ip6_icmp.c index 3f41187cfe..cd1e0b9fe1 100644 --- a/slirp/ip6_icmp.c +++ b/slirp/ip6_icmp.c @@ -17,7 +17,7 @@ static void ra_timer_handler(void *opaque) { Slirp *slirp = opaque; timer_mod(slirp->ra_timer, - qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_EXT) + NDP_Interval); + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + NDP_Interval); ndp_send_ra(slirp); } @@ -27,10 +27,11 @@ void icmp6_init(Slirp *slirp) return; } - slirp->ra_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_EXT, - ra_timer_handler, slirp); + slirp->ra_timer = timer_new_full(NULL, QEMU_CLOCK_VIRTUAL, + SCALE_MS, QEMU_TIMER_ATTR_EXTERNAL, + ra_timer_handler, slirp); timer_mod(slirp->ra_timer, - qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_EXT) + NDP_Interval); + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + NDP_Interval); } void icmp6_cleanup(Slirp *slirp) diff --git a/slirp/libslirp.h b/slirp/libslirp.h index 740408a96e..42e42e9a2a 100644 --- a/slirp/libslirp.h +++ b/slirp/libslirp.h @@ -13,6 +13,7 @@ Slirp *slirp_init(int restricted, bool in_enabled, struct in_addr vnetwork, bool in6_enabled, struct in6_addr vprefix_addr6, uint8_t vprefix_len, struct in6_addr vhost6, const char *vhostname, + const char *tftp_server_name, const char *tftp_path, const char *bootfile, struct in_addr vdhcp_start, struct in_addr vnameserver, struct in6_addr vnameserver6, const char **vdnssearch, diff --git a/slirp/slirp.c b/slirp/slirp.c index 5c3bd6163f..51de41fc02 100644 --- a/slirp/slirp.c +++ b/slirp/slirp.c @@ -283,6 +283,7 @@ Slirp *slirp_init(int restricted, bool in_enabled, struct in_addr vnetwork, bool in6_enabled, struct in6_addr vprefix_addr6, uint8_t vprefix_len, struct in6_addr vhost6, const char *vhostname, + const char *tftp_server_name, const char *tftp_path, const char *bootfile, struct in_addr vdhcp_start, struct in_addr vnameserver, struct in6_addr vnameserver6, const char **vdnssearch, @@ -321,6 +322,7 @@ Slirp *slirp_init(int restricted, bool in_enabled, struct in_addr vnetwork, slirp->vdhcp_startaddr = vdhcp_start; slirp->vnameserver_addr = vnameserver; slirp->vnameserver_addr6 = vnameserver6; + slirp->tftp_server_name = g_strdup(tftp_server_name); if (vdnssearch) { translate_dnssearch(slirp, vdnssearch); diff --git a/slirp/slirp.h b/slirp/slirp.h index 10b410898a..b80725a0d6 100644 --- a/slirp/slirp.h +++ b/slirp/slirp.h @@ -212,6 +212,7 @@ struct Slirp { /* tftp states */ char *tftp_prefix; struct tftp_session tftp_sessions[TFTP_SESSIONS_MAX]; + char *tftp_server_name; ArpTable arp_table; NdpTable ndp_table; diff --git a/stubs/cpu-get-icount.c b/stubs/cpu-get-icount.c index 0b7239d721..35f0c1e24c 100644 --- a/stubs/cpu-get-icount.c +++ b/stubs/cpu-get-icount.c @@ -11,6 +11,11 @@ int64_t cpu_get_icount(void) abort(); } +int64_t cpu_get_icount_raw(void) +{ + abort(); +} + void qemu_timer_notify_cb(void *opaque, QEMUClockType type) { qemu_notify_event(); diff --git a/stubs/replay.c b/stubs/replay.c index 04279abb2c..4ac607895d 100644 --- a/stubs/replay.c +++ b/stubs/replay.c @@ -4,7 +4,7 @@ ReplayMode replay_mode; -int64_t replay_save_clock(unsigned int kind, int64_t clock) +int64_t replay_save_clock(unsigned int kind, int64_t clock, int64_t raw_icount) { abort(); return 0; diff --git a/stubs/tpm.c b/stubs/tpm.c index 6729bc8517..80939cd3db 100644 --- a/stubs/tpm.c +++ b/stubs/tpm.c @@ -9,9 +9,8 @@ #include "qapi/qapi-commands-tpm.h" #include "sysemu/tpm.h" -int tpm_init(void) +void tpm_init(void) { - return 0; } void tpm_cleanup(void) diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index b08078e7fc..a953897fcc 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -201,7 +201,6 @@ static void alpha_cpu_initfn(Object *obj) CPUAlphaState *env = &cpu->env; cs->env_ptr = env; - tlb_flush(cs); env->lock_addr = -1; #if defined(CONFIG_USER_ONLY) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index cd48ad42d8..8f16e96b6c 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -144,9 +144,9 @@ static void arm_cpu_reset(CPUState *s) g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu); env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid; - env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0; - env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1; - env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2; + env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0; + env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1; + env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2; cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON; s->halted = cpu->start_powered_off; @@ -814,7 +814,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) /* Some features automatically imply others: */ if (arm_feature(env, ARM_FEATURE_V8)) { - set_feature(env, ARM_FEATURE_V7VE); + if (arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_V7); + } else { + set_feature(env, ARM_FEATURE_V7VE); + } } if (arm_feature(env, ARM_FEATURE_V7VE)) { /* v7 Virtualization Extensions. In real hardware this implies @@ -825,7 +829,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * Presence of EL2 itself is ARM_FEATURE_EL2, and of the * Security Extensions is ARM_FEATURE_EL3. */ - set_feature(env, ARM_FEATURE_ARM_DIV); + assert(cpu_isar_feature(arm_div, cpu)); set_feature(env, ARM_FEATURE_LPAE); set_feature(env, ARM_FEATURE_V7); } @@ -850,20 +854,14 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } if (arm_feature(env, ARM_FEATURE_V6)) { set_feature(env, ARM_FEATURE_V5); - set_feature(env, ARM_FEATURE_JAZELLE); if (!arm_feature(env, ARM_FEATURE_M)) { + assert(cpu_isar_feature(jazelle, cpu)); set_feature(env, ARM_FEATURE_AUXCR); } } if (arm_feature(env, ARM_FEATURE_V5)) { set_feature(env, ARM_FEATURE_V4T); } - if (arm_feature(env, ARM_FEATURE_M)) { - set_feature(env, ARM_FEATURE_THUMB_DIV); - } - if (arm_feature(env, ARM_FEATURE_ARM_DIV)) { - set_feature(env, ARM_FEATURE_THUMB_DIV); - } if (arm_feature(env, ARM_FEATURE_VFP4)) { set_feature(env, ARM_FEATURE_VFP3); set_feature(env, ARM_FEATURE_VFP_FP16); @@ -938,7 +936,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12]. */ cpu->id_pfr1 &= ~0xf0; - cpu->id_aa64pfr0 &= ~0xf000; + cpu->isar.id_aa64pfr0 &= ~0xf000; } if (!cpu->has_el2) { @@ -955,7 +953,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * registers if we don't have EL2. These are id_pfr1[15:12] and * id_aa64pfr0_el1[11:8]. */ - cpu->id_aa64pfr0 &= ~0xf00; + cpu->isar.id_aa64pfr0 &= ~0xf00; cpu->id_pfr1 &= ~0xf000; } @@ -1084,11 +1082,16 @@ static void arm926_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_VFP); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); - set_feature(&cpu->env, ARM_FEATURE_JAZELLE); cpu->midr = 0x41069265; cpu->reset_fpsid = 0x41011090; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00090078; + + /* + * ARMv5 does not have the ID_ISAR registers, but we can still + * set the field to indicate Jazelle support within QEMU. + */ + cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); } static void arm946_initfn(Object *obj) @@ -1114,12 +1117,18 @@ static void arm1026_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_AUXCR); set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); - set_feature(&cpu->env, ARM_FEATURE_JAZELLE); cpu->midr = 0x4106a262; cpu->reset_fpsid = 0x410110a0; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00090078; cpu->reset_auxcr = 1; + + /* + * ARMv5 does not have the ID_ISAR registers, but we can still + * set the field to indicate Jazelle support within QEMU. + */ + cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); + { /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */ ARMCPRegInfo ifar = { @@ -1151,8 +1160,8 @@ static void arm1136_r2_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); cpu->midr = 0x4107b362; cpu->reset_fpsid = 0x410120b4; - cpu->mvfr0 = 0x11111111; - cpu->mvfr1 = 0x00000000; + cpu->isar.mvfr0 = 0x11111111; + cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; @@ -1162,11 +1171,11 @@ static void arm1136_r2_initfn(Object *obj) cpu->id_mmfr0 = 0x01130003; cpu->id_mmfr1 = 0x10030302; cpu->id_mmfr2 = 0x01222110; - cpu->id_isar0 = 0x00140011; - cpu->id_isar1 = 0x12002111; - cpu->id_isar2 = 0x11231111; - cpu->id_isar3 = 0x01102131; - cpu->id_isar4 = 0x141; + cpu->isar.id_isar0 = 0x00140011; + cpu->isar.id_isar1 = 0x12002111; + cpu->isar.id_isar2 = 0x11231111; + cpu->isar.id_isar3 = 0x01102131; + cpu->isar.id_isar4 = 0x141; cpu->reset_auxcr = 7; } @@ -1183,8 +1192,8 @@ static void arm1136_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); cpu->midr = 0x4117b363; cpu->reset_fpsid = 0x410120b4; - cpu->mvfr0 = 0x11111111; - cpu->mvfr1 = 0x00000000; + cpu->isar.mvfr0 = 0x11111111; + cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; @@ -1194,11 +1203,11 @@ static void arm1136_initfn(Object *obj) cpu->id_mmfr0 = 0x01130003; cpu->id_mmfr1 = 0x10030302; cpu->id_mmfr2 = 0x01222110; - cpu->id_isar0 = 0x00140011; - cpu->id_isar1 = 0x12002111; - cpu->id_isar2 = 0x11231111; - cpu->id_isar3 = 0x01102131; - cpu->id_isar4 = 0x141; + cpu->isar.id_isar0 = 0x00140011; + cpu->isar.id_isar1 = 0x12002111; + cpu->isar.id_isar2 = 0x11231111; + cpu->isar.id_isar3 = 0x01102131; + cpu->isar.id_isar4 = 0x141; cpu->reset_auxcr = 7; } @@ -1216,8 +1225,8 @@ static void arm1176_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_EL3); cpu->midr = 0x410fb767; cpu->reset_fpsid = 0x410120b5; - cpu->mvfr0 = 0x11111111; - cpu->mvfr1 = 0x00000000; + cpu->isar.mvfr0 = 0x11111111; + cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; @@ -1227,11 +1236,11 @@ static void arm1176_initfn(Object *obj) cpu->id_mmfr0 = 0x01130003; cpu->id_mmfr1 = 0x10030302; cpu->id_mmfr2 = 0x01222100; - cpu->id_isar0 = 0x0140011; - cpu->id_isar1 = 0x12002111; - cpu->id_isar2 = 0x11231121; - cpu->id_isar3 = 0x01102131; - cpu->id_isar4 = 0x01141; + cpu->isar.id_isar0 = 0x0140011; + cpu->isar.id_isar1 = 0x12002111; + cpu->isar.id_isar2 = 0x11231121; + cpu->isar.id_isar3 = 0x01102131; + cpu->isar.id_isar4 = 0x01141; cpu->reset_auxcr = 7; } @@ -1247,8 +1256,8 @@ static void arm11mpcore_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); cpu->midr = 0x410fb022; cpu->reset_fpsid = 0x410120b4; - cpu->mvfr0 = 0x11111111; - cpu->mvfr1 = 0x00000000; + cpu->isar.mvfr0 = 0x11111111; + cpu->isar.mvfr1 = 0x00000000; cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; @@ -1257,11 +1266,11 @@ static void arm11mpcore_initfn(Object *obj) cpu->id_mmfr0 = 0x01100103; cpu->id_mmfr1 = 0x10020302; cpu->id_mmfr2 = 0x01222000; - cpu->id_isar0 = 0x00100011; - cpu->id_isar1 = 0x12002111; - cpu->id_isar2 = 0x11221011; - cpu->id_isar3 = 0x01102131; - cpu->id_isar4 = 0x141; + cpu->isar.id_isar0 = 0x00100011; + cpu->isar.id_isar1 = 0x12002111; + cpu->isar.id_isar2 = 0x11221011; + cpu->isar.id_isar3 = 0x01102131; + cpu->isar.id_isar4 = 0x141; cpu->reset_auxcr = 1; } @@ -1290,13 +1299,13 @@ static void cortex_m3_initfn(Object *obj) cpu->id_mmfr1 = 0x00000000; cpu->id_mmfr2 = 0x00000000; cpu->id_mmfr3 = 0x00000000; - cpu->id_isar0 = 0x01141110; - cpu->id_isar1 = 0x02111000; - cpu->id_isar2 = 0x21112231; - cpu->id_isar3 = 0x01111110; - cpu->id_isar4 = 0x01310102; - cpu->id_isar5 = 0x00000000; - cpu->id_isar6 = 0x00000000; + cpu->isar.id_isar0 = 0x01141110; + cpu->isar.id_isar1 = 0x02111000; + cpu->isar.id_isar2 = 0x21112231; + cpu->isar.id_isar3 = 0x01111110; + cpu->isar.id_isar4 = 0x01310102; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; } static void cortex_m4_initfn(Object *obj) @@ -1317,13 +1326,13 @@ static void cortex_m4_initfn(Object *obj) cpu->id_mmfr1 = 0x00000000; cpu->id_mmfr2 = 0x00000000; cpu->id_mmfr3 = 0x00000000; - cpu->id_isar0 = 0x01141110; - cpu->id_isar1 = 0x02111000; - cpu->id_isar2 = 0x21112231; - cpu->id_isar3 = 0x01111110; - cpu->id_isar4 = 0x01310102; - cpu->id_isar5 = 0x00000000; - cpu->id_isar6 = 0x00000000; + cpu->isar.id_isar0 = 0x01141110; + cpu->isar.id_isar1 = 0x02111000; + cpu->isar.id_isar2 = 0x21112231; + cpu->isar.id_isar3 = 0x01111110; + cpu->isar.id_isar4 = 0x01310102; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; } static void cortex_m33_initfn(Object *obj) @@ -1346,13 +1355,13 @@ static void cortex_m33_initfn(Object *obj) cpu->id_mmfr1 = 0x00000000; cpu->id_mmfr2 = 0x01000000; cpu->id_mmfr3 = 0x00000000; - cpu->id_isar0 = 0x01101110; - cpu->id_isar1 = 0x02212000; - cpu->id_isar2 = 0x20232232; - cpu->id_isar3 = 0x01111131; - cpu->id_isar4 = 0x01310132; - cpu->id_isar5 = 0x00000000; - cpu->id_isar6 = 0x00000000; + cpu->isar.id_isar0 = 0x01101110; + cpu->isar.id_isar1 = 0x02212000; + cpu->isar.id_isar2 = 0x20232232; + cpu->isar.id_isar3 = 0x01111131; + cpu->isar.id_isar4 = 0x01310132; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; cpu->clidr = 0x00000000; cpu->ctr = 0x8000c000; } @@ -1384,8 +1393,6 @@ static void cortex_r5_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); set_feature(&cpu->env, ARM_FEATURE_V7); - set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV); - set_feature(&cpu->env, ARM_FEATURE_ARM_DIV); set_feature(&cpu->env, ARM_FEATURE_V7MP); set_feature(&cpu->env, ARM_FEATURE_PMSA); cpu->midr = 0x411fc153; /* r1p3 */ @@ -1397,13 +1404,13 @@ static void cortex_r5_initfn(Object *obj) cpu->id_mmfr1 = 0x00000000; cpu->id_mmfr2 = 0x01200000; cpu->id_mmfr3 = 0x0211; - cpu->id_isar0 = 0x02101111; - cpu->id_isar1 = 0x13112111; - cpu->id_isar2 = 0x21232141; - cpu->id_isar3 = 0x01112131; - cpu->id_isar4 = 0x0010142; - cpu->id_isar5 = 0x0; - cpu->id_isar6 = 0x0; + cpu->isar.id_isar0 = 0x02101111; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232141; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x0010142; + cpu->isar.id_isar5 = 0x0; + cpu->isar.id_isar6 = 0x0; cpu->mp_is_up = true; cpu->pmsav7_dregion = 16; define_arm_cp_regs(cpu, cortexr5_cp_reginfo); @@ -1438,8 +1445,8 @@ static void cortex_a8_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_EL3); cpu->midr = 0x410fc080; cpu->reset_fpsid = 0x410330c0; - cpu->mvfr0 = 0x11110222; - cpu->mvfr1 = 0x00011111; + cpu->isar.mvfr0 = 0x11110222; + cpu->isar.mvfr1 = 0x00011111; cpu->ctr = 0x82048004; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x1031; @@ -1450,11 +1457,11 @@ static void cortex_a8_initfn(Object *obj) cpu->id_mmfr1 = 0x20000000; cpu->id_mmfr2 = 0x01202000; cpu->id_mmfr3 = 0x11; - cpu->id_isar0 = 0x00101111; - cpu->id_isar1 = 0x12112111; - cpu->id_isar2 = 0x21232031; - cpu->id_isar3 = 0x11112131; - cpu->id_isar4 = 0x00111142; + cpu->isar.id_isar0 = 0x00101111; + cpu->isar.id_isar1 = 0x12112111; + cpu->isar.id_isar2 = 0x21232031; + cpu->isar.id_isar3 = 0x11112131; + cpu->isar.id_isar4 = 0x00111142; cpu->dbgdidr = 0x15141000; cpu->clidr = (1 << 27) | (2 << 24) | 3; cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ @@ -1512,8 +1519,8 @@ static void cortex_a9_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_CBAR); cpu->midr = 0x410fc090; cpu->reset_fpsid = 0x41033090; - cpu->mvfr0 = 0x11110222; - cpu->mvfr1 = 0x01111111; + cpu->isar.mvfr0 = 0x11110222; + cpu->isar.mvfr1 = 0x01111111; cpu->ctr = 0x80038003; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x1031; @@ -1524,11 +1531,11 @@ static void cortex_a9_initfn(Object *obj) cpu->id_mmfr1 = 0x20000000; cpu->id_mmfr2 = 0x01230000; cpu->id_mmfr3 = 0x00002111; - cpu->id_isar0 = 0x00101111; - cpu->id_isar1 = 0x13112111; - cpu->id_isar2 = 0x21232041; - cpu->id_isar3 = 0x11112131; - cpu->id_isar4 = 0x00111142; + cpu->isar.id_isar0 = 0x00101111; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232041; + cpu->isar.id_isar3 = 0x11112131; + cpu->isar.id_isar4 = 0x00111142; cpu->dbgdidr = 0x35141000; cpu->clidr = (1 << 27) | (1 << 24) | 3; cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ @@ -1573,8 +1580,8 @@ static void cortex_a7_initfn(Object *obj) cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7; cpu->midr = 0x410fc075; cpu->reset_fpsid = 0x41023075; - cpu->mvfr0 = 0x10110222; - cpu->mvfr1 = 0x11111111; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x11111111; cpu->ctr = 0x84448003; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x00001131; @@ -1590,11 +1597,11 @@ static void cortex_a7_initfn(Object *obj) /* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but * table 4-41 gives 0x02101110, which includes the arm div insns. */ - cpu->id_isar0 = 0x02101110; - cpu->id_isar1 = 0x13112111; - cpu->id_isar2 = 0x21232041; - cpu->id_isar3 = 0x11112131; - cpu->id_isar4 = 0x10011142; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232041; + cpu->isar.id_isar3 = 0x11112131; + cpu->isar.id_isar4 = 0x10011142; cpu->dbgdidr = 0x3515f005; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ @@ -1619,8 +1626,8 @@ static void cortex_a15_initfn(Object *obj) cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15; cpu->midr = 0x412fc0f1; cpu->reset_fpsid = 0x410430f0; - cpu->mvfr0 = 0x10110222; - cpu->mvfr1 = 0x11111111; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x11111111; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x00001131; @@ -1633,11 +1640,11 @@ static void cortex_a15_initfn(Object *obj) cpu->id_mmfr1 = 0x20000000; cpu->id_mmfr2 = 0x01240000; cpu->id_mmfr3 = 0x02102211; - cpu->id_isar0 = 0x02101110; - cpu->id_isar1 = 0x13112111; - cpu->id_isar2 = 0x21232041; - cpu->id_isar3 = 0x11112131; - cpu->id_isar4 = 0x10011142; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232041; + cpu->isar.id_isar3 = 0x11112131; + cpu->isar.id_isar4 = 0x10011142; cpu->dbgdidr = 0x3515f021; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ @@ -1830,17 +1837,26 @@ static void arm_max_initfn(Object *obj) cortex_a15_initfn(obj); #ifdef CONFIG_USER_ONLY /* We don't set these in system emulation mode for the moment, - * since we don't correctly set the ID registers to advertise them, + * since we don't correctly set (all of) the ID registers to + * advertise them. */ set_feature(&cpu->env, ARM_FEATURE_V8); - set_feature(&cpu->env, ARM_FEATURE_V8_AES); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); - set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); - set_feature(&cpu->env, ARM_FEATURE_CRC); - set_feature(&cpu->env, ARM_FEATURE_V8_RDM); - set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD); - set_feature(&cpu->env, ARM_FEATURE_V8_FCMA); + { + uint32_t t; + + t = cpu->isar.id_isar5; + t = FIELD_DP32(t, ID_ISAR5, AES, 2); + t = FIELD_DP32(t, ID_ISAR5, SHA1, 1); + t = FIELD_DP32(t, ID_ISAR5, SHA2, 1); + t = FIELD_DP32(t, ID_ISAR5, CRC32, 1); + t = FIELD_DP32(t, ID_ISAR5, RDM, 1); + t = FIELD_DP32(t, ID_ISAR5, VCMA, 1); + cpu->isar.id_isar5 = t; + + t = cpu->isar.id_isar6; + t = FIELD_DP32(t, ID_ISAR6, DP, 1); + cpu->isar.id_isar6 = t; + } #endif } } diff --git a/target/arm/cpu.h b/target/arm/cpu.h index f00c0444c4..8e6779936e 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -531,6 +531,13 @@ typedef struct CPUARMState { */ } exception; + /* Information associated with an SError */ + struct { + uint8_t pending; + uint8_t has_esr; + uint64_t esr; + } serror; + /* Thumb-2 EE state. */ uint32_t teecr; uint32_t teehbr; @@ -669,6 +676,8 @@ typedef enum ARMPSCIState { PSCI_ON_PENDING = 2 } ARMPSCIState; +typedef struct ARMISARegisters ARMISARegisters; + /** * ARMCPU: * @env: #CPUARMState @@ -788,13 +797,28 @@ struct ARMCPU { * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix * is used for reset values of non-constant registers; no reset_ * prefix means a constant register. + * Some of these registers are split out into a substructure that + * is shared with the translators to control the ISA. */ + struct ARMISARegisters { + uint32_t id_isar0; + uint32_t id_isar1; + uint32_t id_isar2; + uint32_t id_isar3; + uint32_t id_isar4; + uint32_t id_isar5; + uint32_t id_isar6; + uint32_t mvfr0; + uint32_t mvfr1; + uint32_t mvfr2; + uint64_t id_aa64isar0; + uint64_t id_aa64isar1; + uint64_t id_aa64pfr0; + uint64_t id_aa64pfr1; + } isar; uint32_t midr; uint32_t revidr; uint32_t reset_fpsid; - uint32_t mvfr0; - uint32_t mvfr1; - uint32_t mvfr2; uint32_t ctr; uint32_t reset_sctlr; uint32_t id_pfr0; @@ -808,21 +832,10 @@ struct ARMCPU { uint32_t id_mmfr2; uint32_t id_mmfr3; uint32_t id_mmfr4; - uint32_t id_isar0; - uint32_t id_isar1; - uint32_t id_isar2; - uint32_t id_isar3; - uint32_t id_isar4; - uint32_t id_isar5; - uint32_t id_isar6; - uint64_t id_aa64pfr0; - uint64_t id_aa64pfr1; uint64_t id_aa64dfr0; uint64_t id_aa64dfr1; uint64_t id_aa64afr0; uint64_t id_aa64afr1; - uint64_t id_aa64isar0; - uint64_t id_aa64isar1; uint64_t id_aa64mmfr0; uint64_t id_aa64mmfr1; uint32_t dbgdidr; @@ -1531,6 +1544,16 @@ FIELD(ID_AA64ISAR1, FRINTTS, 32, 4) FIELD(ID_AA64ISAR1, SB, 36, 4) FIELD(ID_AA64ISAR1, SPECRES, 40, 4) +FIELD(ID_AA64PFR0, EL0, 0, 4) +FIELD(ID_AA64PFR0, EL1, 4, 4) +FIELD(ID_AA64PFR0, EL2, 8, 4) +FIELD(ID_AA64PFR0, EL3, 12, 4) +FIELD(ID_AA64PFR0, FP, 16, 4) +FIELD(ID_AA64PFR0, ADVSIMD, 20, 4) +FIELD(ID_AA64PFR0, GIC, 24, 4) +FIELD(ID_AA64PFR0, RAS, 28, 4) +FIELD(ID_AA64PFR0, SVE, 32, 4) + QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK); /* If adding a feature bit which corresponds to a Linux ELF @@ -1550,7 +1573,6 @@ enum arm_features { ARM_FEATURE_VFP3, ARM_FEATURE_VFP_FP16, ARM_FEATURE_NEON, - ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */ ARM_FEATURE_M, /* Microcontroller profile. */ ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ ARM_FEATURE_THUMB2EE, @@ -1560,7 +1582,6 @@ enum arm_features { ARM_FEATURE_V5, ARM_FEATURE_STRONGARM, ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ - ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */ ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */ ARM_FEATURE_GENERIC_TIMER, ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ @@ -1573,30 +1594,15 @@ enum arm_features { ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ ARM_FEATURE_V8, ARM_FEATURE_AARCH64, /* supports 64 bit mode */ - ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */ ARM_FEATURE_CBAR, /* has cp15 CBAR */ ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ ARM_FEATURE_EL2, /* has EL2 Virtualization support */ ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ - ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */ ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */ ARM_FEATURE_PMU, /* has PMU support */ ARM_FEATURE_VBAR, /* has cp15 VBAR */ ARM_FEATURE_M_SECURITY, /* M profile Security Extension */ - ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */ - ARM_FEATURE_SVE, /* has Scalable Vector Extension */ - ARM_FEATURE_V8_SHA512, /* implements SHA512 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_SHA3, /* implements SHA3 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_SM3, /* implements SM3 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */ - ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */ - ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */ - ARM_FEATURE_V8_DOTPROD, /* implements v8.2 simd dot product */ - ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */ - ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */ ARM_FEATURE_M_MAIN, /* M profile Main Extension */ }; @@ -3148,4 +3154,157 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno) /* Shared between translate-sve.c and sve_helper.c. */ extern const uint64_t pred_esz_masks[4]; +/* + * 32-bit feature tests via id registers. + */ +static inline bool isar_feature_thumb_div(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0; +} + +static inline bool isar_feature_arm_div(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; +} + +static inline bool isar_feature_jazelle(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; +} + +static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; +} + +static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1; +} + +static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0; +} + +static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0; +} + +static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0; +} + +static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0; +} + +static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0; +} + +static inline bool isar_feature_aa32_dp(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0; +} + +static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) +{ + /* + * This is a placeholder for use by VCMA until the rest of + * the ARMv8.2-FP16 extension is implemented for aa32 mode. + * At which point we can properly set and check MVFR1.FPHP. + */ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; +} + +/* + * 64-bit feature tests via id registers. + */ +static inline bool isar_feature_aa64_aes(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0; +} + +static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1; +} + +static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0; +} + +static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0; +} + +static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1; +} + +static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0; +} + +static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0; +} + +static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0; +} + +static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0; +} + +static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0; +} + +static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0; +} + +static inline bool isar_feature_aa64_dp(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0; +} + +static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; +} + +static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) +{ + /* We always set the AdvSIMD and FP fields identically wrt FP16. */ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; +} + +static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; +} + +/* + * Forward to the above feature tests given an ARMCPU pointer. + */ +#define cpu_isar_feature(name, cpu) \ + ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); }) + #endif diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 44fdf0f6fa..873f059bf2 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -109,11 +109,6 @@ static void aarch64_a57_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); set_feature(&cpu->env, ARM_FEATURE_AARCH64); set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_V8_AES); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); - set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); - set_feature(&cpu->env, ARM_FEATURE_CRC); set_feature(&cpu->env, ARM_FEATURE_EL2); set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); @@ -121,9 +116,9 @@ static void aarch64_a57_initfn(Object *obj) cpu->midr = 0x411fd070; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034070; - cpu->mvfr0 = 0x10110222; - cpu->mvfr1 = 0x12111111; - cpu->mvfr2 = 0x00000043; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; @@ -134,18 +129,18 @@ static void aarch64_a57_initfn(Object *obj) cpu->id_mmfr1 = 0x40000000; cpu->id_mmfr2 = 0x01260000; cpu->id_mmfr3 = 0x02102211; - cpu->id_isar0 = 0x02101110; - cpu->id_isar1 = 0x13112111; - cpu->id_isar2 = 0x21232042; - cpu->id_isar3 = 0x01112131; - cpu->id_isar4 = 0x00011142; - cpu->id_isar5 = 0x00011121; - cpu->id_isar6 = 0; - cpu->id_aa64pfr0 = 0x00002222; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_isar6 = 0; + cpu->isar.id_aa64pfr0 = 0x00002222; cpu->id_aa64dfr0 = 0x10305106; cpu->pmceid0 = 0x00000000; cpu->pmceid1 = 0x00000000; - cpu->id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64isar0 = 0x00011120; cpu->id_aa64mmfr0 = 0x00001124; cpu->dbgdidr = 0x3516d000; cpu->clidr = 0x0a200023; @@ -170,11 +165,6 @@ static void aarch64_a53_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); set_feature(&cpu->env, ARM_FEATURE_AARCH64); set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_V8_AES); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); - set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); - set_feature(&cpu->env, ARM_FEATURE_CRC); set_feature(&cpu->env, ARM_FEATURE_EL2); set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); @@ -182,9 +172,9 @@ static void aarch64_a53_initfn(Object *obj) cpu->midr = 0x410fd034; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034070; - cpu->mvfr0 = 0x10110222; - cpu->mvfr1 = 0x12111111; - cpu->mvfr2 = 0x00000043; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; cpu->ctr = 0x84448004; /* L1Ip = VIPT */ cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; @@ -195,16 +185,16 @@ static void aarch64_a53_initfn(Object *obj) cpu->id_mmfr1 = 0x40000000; cpu->id_mmfr2 = 0x01260000; cpu->id_mmfr3 = 0x02102211; - cpu->id_isar0 = 0x02101110; - cpu->id_isar1 = 0x13112111; - cpu->id_isar2 = 0x21232042; - cpu->id_isar3 = 0x01112131; - cpu->id_isar4 = 0x00011142; - cpu->id_isar5 = 0x00011121; - cpu->id_isar6 = 0; - cpu->id_aa64pfr0 = 0x00002222; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_isar6 = 0; + cpu->isar.id_aa64pfr0 = 0x00002222; cpu->id_aa64dfr0 = 0x10305106; - cpu->id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64isar0 = 0x00011120; cpu->id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ cpu->dbgdidr = 0x3516d000; cpu->clidr = 0x0a200023; @@ -229,20 +219,15 @@ static void aarch64_a72_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); set_feature(&cpu->env, ARM_FEATURE_AARCH64); set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); - set_feature(&cpu->env, ARM_FEATURE_V8_AES); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA1); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA256); - set_feature(&cpu->env, ARM_FEATURE_V8_PMULL); - set_feature(&cpu->env, ARM_FEATURE_CRC); set_feature(&cpu->env, ARM_FEATURE_EL2); set_feature(&cpu->env, ARM_FEATURE_EL3); set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->midr = 0x410fd083; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034080; - cpu->mvfr0 = 0x10110222; - cpu->mvfr1 = 0x12111111; - cpu->mvfr2 = 0x00000043; + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x12111111; + cpu->isar.mvfr2 = 0x00000043; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; @@ -253,17 +238,17 @@ static void aarch64_a72_initfn(Object *obj) cpu->id_mmfr1 = 0x40000000; cpu->id_mmfr2 = 0x01260000; cpu->id_mmfr3 = 0x02102211; - cpu->id_isar0 = 0x02101110; - cpu->id_isar1 = 0x13112111; - cpu->id_isar2 = 0x21232042; - cpu->id_isar3 = 0x01112131; - cpu->id_isar4 = 0x00011142; - cpu->id_isar5 = 0x00011121; - cpu->id_aa64pfr0 = 0x00002222; + cpu->isar.id_isar0 = 0x02101110; + cpu->isar.id_isar1 = 0x13112111; + cpu->isar.id_isar2 = 0x21232042; + cpu->isar.id_isar3 = 0x01112131; + cpu->isar.id_isar4 = 0x00011142; + cpu->isar.id_isar5 = 0x00011121; + cpu->isar.id_aa64pfr0 = 0x00002222; cpu->id_aa64dfr0 = 0x10305106; cpu->pmceid0 = 0x00000000; cpu->pmceid1 = 0x00000000; - cpu->id_aa64isar0 = 0x00011120; + cpu->isar.id_aa64isar0 = 0x00011120; cpu->id_aa64mmfr0 = 0x00001124; cpu->dbgdidr = 0x3516d000; cpu->clidr = 0x0a200023; @@ -312,24 +297,55 @@ static void aarch64_max_initfn(Object *obj) if (kvm_enabled()) { kvm_arm_set_cpu_features_from_host(cpu); } else { + uint64_t t; + uint32_t u; aarch64_a57_initfn(obj); -#ifdef CONFIG_USER_ONLY - /* We don't set these in system emulation mode for the moment, - * since we don't correctly set the ID registers to advertise them, - * and in some cases they're only available in AArch64 and not AArch32, - * whereas the architecture requires them to be present in both if - * present in either. + + t = cpu->isar.id_aa64isar0; + t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */ + t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); + t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */ + t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1); + t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); + t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); + t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); + t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1); + t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 1); + t = FIELD_DP64(t, ID_AA64ISAR0, DP, 1); + cpu->isar.id_aa64isar0 = t; + + t = cpu->isar.id_aa64isar1; + t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); + cpu->isar.id_aa64isar1 = t; + + t = cpu->isar.id_aa64pfr0; + t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); + t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); + t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); + cpu->isar.id_aa64pfr0 = t; + + /* Replicate the same data to the 32-bit id registers. */ + u = cpu->isar.id_isar5; + u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */ + u = FIELD_DP32(u, ID_ISAR5, SHA1, 1); + u = FIELD_DP32(u, ID_ISAR5, SHA2, 1); + u = FIELD_DP32(u, ID_ISAR5, CRC32, 1); + u = FIELD_DP32(u, ID_ISAR5, RDM, 1); + u = FIELD_DP32(u, ID_ISAR5, VCMA, 1); + cpu->isar.id_isar5 = u; + + u = cpu->isar.id_isar6; + u = FIELD_DP32(u, ID_ISAR6, DP, 1); + cpu->isar.id_isar6 = u; + + /* + * FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet, + * so do not set MVFR1.FPHP. Strictly speaking this is not legal, + * but it is also not legal to enable SVE without support for FP16, + * and enabling SVE in system mode is more useful in the short term. */ - set_feature(&cpu->env, ARM_FEATURE_V8_SHA512); - set_feature(&cpu->env, ARM_FEATURE_V8_SHA3); - set_feature(&cpu->env, ARM_FEATURE_V8_SM3); - set_feature(&cpu->env, ARM_FEATURE_V8_SM4); - set_feature(&cpu->env, ARM_FEATURE_V8_ATOMICS); - set_feature(&cpu->env, ARM_FEATURE_V8_RDM); - set_feature(&cpu->env, ARM_FEATURE_V8_DOTPROD); - set_feature(&cpu->env, ARM_FEATURE_V8_FP16); - set_feature(&cpu->env, ARM_FEATURE_V8_FCMA); - set_feature(&cpu->env, ARM_FEATURE_SVE); + +#ifdef CONFIG_USER_ONLY /* For usermode -cpu max we can use a larger and more efficient DCZ * blocksize since we don't have to follow what the hardware does. */ diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c index 7f6ad3000b..61799d20e1 100644 --- a/target/arm/helper-a64.c +++ b/target/arm/helper-a64.c @@ -30,6 +30,7 @@ #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "qemu/int128.h" +#include "qemu/atomic128.h" #include "tcg.h" #include "fpu/softfloat.h" #include <zlib.h> /* For crc32 */ @@ -509,189 +510,187 @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes) return crc32c(acc, buf, bytes) ^ 0xffffffff; } -/* Returns 0 on success; 1 otherwise. */ -static uint64_t do_paired_cmpxchg64_le(CPUARMState *env, uint64_t addr, - uint64_t new_lo, uint64_t new_hi, - bool parallel, uintptr_t ra) +uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, + uint64_t new_lo, uint64_t new_hi) { - Int128 oldv, cmpv, newv; + Int128 cmpv = int128_make128(env->exclusive_val, env->exclusive_high); + Int128 newv = int128_make128(new_lo, new_hi); + Int128 oldv; + uintptr_t ra = GETPC(); + uint64_t o0, o1; bool success; - cmpv = int128_make128(env->exclusive_val, env->exclusive_high); - newv = int128_make128(new_lo, new_hi); - - if (parallel) { -#ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); -#else - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); - oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra); - success = int128_eq(oldv, cmpv); -#endif - } else { - uint64_t o0, o1; - #ifdef CONFIG_USER_ONLY - /* ??? Enforce alignment. */ - uint64_t *haddr = g2h(addr); - - helper_retaddr = ra; - o0 = ldq_le_p(haddr + 0); - o1 = ldq_le_p(haddr + 1); - oldv = int128_make128(o0, o1); - - success = int128_eq(oldv, cmpv); - if (success) { - stq_le_p(haddr + 0, int128_getlo(newv)); - stq_le_p(haddr + 1, int128_gethi(newv)); - } - helper_retaddr = 0; + /* ??? Enforce alignment. */ + uint64_t *haddr = g2h(addr); + + helper_retaddr = ra; + o0 = ldq_le_p(haddr + 0); + o1 = ldq_le_p(haddr + 1); + oldv = int128_make128(o0, o1); + + success = int128_eq(oldv, cmpv); + if (success) { + stq_le_p(haddr + 0, int128_getlo(newv)); + stq_le_p(haddr + 1, int128_gethi(newv)); + } + helper_retaddr = 0; #else - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); - TCGMemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx); - - o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra); - o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra); - oldv = int128_make128(o0, o1); - - success = int128_eq(oldv, cmpv); - if (success) { - helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra); - helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra); - } -#endif + int mem_idx = cpu_mmu_index(env, false); + TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + TCGMemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx); + + o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra); + o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra); + oldv = int128_make128(o0, o1); + + success = int128_eq(oldv, cmpv); + if (success) { + helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra); + helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra); } +#endif return !success; } -uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, - uint64_t new_lo, uint64_t new_hi) -{ - return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, false, GETPC()); -} - uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr, uint64_t new_lo, uint64_t new_hi) { - return do_paired_cmpxchg64_le(env, addr, new_lo, new_hi, true, GETPC()); -} - -static uint64_t do_paired_cmpxchg64_be(CPUARMState *env, uint64_t addr, - uint64_t new_lo, uint64_t new_hi, - bool parallel, uintptr_t ra) -{ Int128 oldv, cmpv, newv; + uintptr_t ra = GETPC(); bool success; + int mem_idx; + TCGMemOpIdx oi; - /* high and low need to be switched here because this is not actually a - * 128bit store but two doublewords stored consecutively - */ - cmpv = int128_make128(env->exclusive_high, env->exclusive_val); - newv = int128_make128(new_hi, new_lo); + assert(HAVE_CMPXCHG128); - if (parallel) { -#ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); -#else - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); - oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); - success = int128_eq(oldv, cmpv); -#endif - } else { - uint64_t o0, o1; + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); -#ifdef CONFIG_USER_ONLY - /* ??? Enforce alignment. */ - uint64_t *haddr = g2h(addr); - - helper_retaddr = ra; - o1 = ldq_be_p(haddr + 0); - o0 = ldq_be_p(haddr + 1); - oldv = int128_make128(o0, o1); - - success = int128_eq(oldv, cmpv); - if (success) { - stq_be_p(haddr + 0, int128_gethi(newv)); - stq_be_p(haddr + 1, int128_getlo(newv)); - } - helper_retaddr = 0; -#else - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); - TCGMemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx); - - o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra); - o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra); - oldv = int128_make128(o0, o1); - - success = int128_eq(oldv, cmpv); - if (success) { - helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra); - helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra); - } -#endif - } + cmpv = int128_make128(env->exclusive_val, env->exclusive_high); + newv = int128_make128(new_lo, new_hi); + oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra); + success = int128_eq(oldv, cmpv); return !success; } uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr, uint64_t new_lo, uint64_t new_hi) { - return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, false, GETPC()); + /* + * High and low need to be switched here because this is not actually a + * 128bit store but two doublewords stored consecutively + */ + Int128 cmpv = int128_make128(env->exclusive_val, env->exclusive_high); + Int128 newv = int128_make128(new_lo, new_hi); + Int128 oldv; + uintptr_t ra = GETPC(); + uint64_t o0, o1; + bool success; + +#ifdef CONFIG_USER_ONLY + /* ??? Enforce alignment. */ + uint64_t *haddr = g2h(addr); + + helper_retaddr = ra; + o1 = ldq_be_p(haddr + 0); + o0 = ldq_be_p(haddr + 1); + oldv = int128_make128(o0, o1); + + success = int128_eq(oldv, cmpv); + if (success) { + stq_be_p(haddr + 0, int128_gethi(newv)); + stq_be_p(haddr + 1, int128_getlo(newv)); + } + helper_retaddr = 0; +#else + int mem_idx = cpu_mmu_index(env, false); + TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); + TCGMemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx); + + o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra); + o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra); + oldv = int128_make128(o0, o1); + + success = int128_eq(oldv, cmpv); + if (success) { + helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra); + helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra); + } +#endif + + return !success; } uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr, - uint64_t new_lo, uint64_t new_hi) + uint64_t new_lo, uint64_t new_hi) { - return do_paired_cmpxchg64_be(env, addr, new_lo, new_hi, true, GETPC()); + Int128 oldv, cmpv, newv; + uintptr_t ra = GETPC(); + bool success; + int mem_idx; + TCGMemOpIdx oi; + + assert(HAVE_CMPXCHG128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); + + /* + * High and low need to be switched here because this is not actually a + * 128bit store but two doublewords stored consecutively + */ + cmpv = int128_make128(env->exclusive_high, env->exclusive_val); + newv = int128_make128(new_hi, new_lo); + oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); + + success = int128_eq(oldv, cmpv); + return !success; } /* Writes back the old data into Rs. */ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, uint64_t new_lo, uint64_t new_hi) { - uintptr_t ra = GETPC(); -#ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); -#else Int128 oldv, cmpv, newv; + uintptr_t ra = GETPC(); + int mem_idx; + TCGMemOpIdx oi; + + assert(HAVE_CMPXCHG128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]); newv = int128_make128(new_lo, new_hi); - - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra); env->xregs[rs] = int128_getlo(oldv); env->xregs[rs + 1] = int128_gethi(oldv); -#endif } void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, uint64_t new_hi, uint64_t new_lo) { - uintptr_t ra = GETPC(); -#ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); -#else Int128 oldv, cmpv, newv; + uintptr_t ra = GETPC(); + int mem_idx; + TCGMemOpIdx oi; + + assert(HAVE_CMPXCHG128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]); newv = int128_make128(new_lo, new_hi); - - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); env->xregs[rs + 1] = int128_getlo(oldv); env->xregs[rs] = int128_gethi(oldv); -#endif } /* diff --git a/target/arm/helper.c b/target/arm/helper.c index e3946562aa..0ea95b0815 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -56,6 +56,8 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address, V8M_SAttributes *sattrs); #endif +static void switch_mode(CPUARMState *env, int mode); + static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) { int nregs; @@ -552,12 +554,61 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, raw_write(env, ri, value); } +/* IS variants of TLB operations must affect all cores */ +static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = ENV_GET_CPU(env); + + tlb_flush_all_cpus_synced(cs); +} + +static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = ENV_GET_CPU(env); + + tlb_flush_all_cpus_synced(cs); +} + +static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = ENV_GET_CPU(env); + + tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); +} + +static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = ENV_GET_CPU(env); + + tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); +} + +/* + * Non-IS variants of TLB operations are upgraded to + * IS versions if we are at NS EL1 and HCR_EL2.FB is set to + * force broadcast of these operations. + */ +static bool tlb_force_broadcast(CPUARMState *env) +{ + return (env->cp15.hcr_el2 & HCR_FB) && + arm_current_el(env) == 1 && arm_is_secure_below_el3(env); +} + static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* Invalidate all (TLBIALL) */ ARMCPU *cpu = arm_env_get_cpu(env); + if (tlb_force_broadcast(env)) { + tlbiall_is_write(env, NULL, value); + return; + } + tlb_flush(CPU(cpu)); } @@ -567,6 +618,11 @@ static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ ARMCPU *cpu = arm_env_get_cpu(env); + if (tlb_force_broadcast(env)) { + tlbimva_is_write(env, NULL, value); + return; + } + tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); } @@ -576,6 +632,11 @@ static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, /* Invalidate by ASID (TLBIASID) */ ARMCPU *cpu = arm_env_get_cpu(env); + if (tlb_force_broadcast(env)) { + tlbiasid_is_write(env, NULL, value); + return; + } + tlb_flush(CPU(cpu)); } @@ -585,40 +646,12 @@ static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ ARMCPU *cpu = arm_env_get_cpu(env); - tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); -} - -/* IS variants of TLB operations must affect all cores */ -static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = ENV_GET_CPU(env); - - tlb_flush_all_cpus_synced(cs); -} - -static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = ENV_GET_CPU(env); - - tlb_flush_all_cpus_synced(cs); -} - -static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = ENV_GET_CPU(env); - - tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); -} - -static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = ENV_GET_CPU(env); + if (tlb_force_broadcast(env)) { + tlbimvaa_is_write(env, NULL, value); + return; + } - tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); + tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); } static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -1296,12 +1329,26 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) CPUState *cs = ENV_GET_CPU(env); uint64_t ret = 0; - if (cs->interrupt_request & CPU_INTERRUPT_HARD) { - ret |= CPSR_I; + if (arm_hcr_el2_imo(env)) { + if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { + ret |= CPSR_I; + } + } else { + if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + ret |= CPSR_I; + } } - if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { - ret |= CPSR_F; + + if (arm_hcr_el2_fmo(env)) { + if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { + ret |= CPSR_F; + } + } else { + if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { + ret |= CPSR_F; + } } + /* External aborts are not possible in QEMU so A bit is always clear */ return ret; } @@ -2270,13 +2317,15 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, * * The Non-secure TTBCR.EAE bit is set to 1 * * The implementation includes EL2, and the value of HCR.VM is 1 * + * (Note that HCR.DC makes HCR.VM behave as if it is 1.) + * * ATS1Hx always uses the 64bit format (not supported yet). */ format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); if (arm_feature(env, ARM_FEATURE_EL2)) { if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { - format64 |= env->cp15.hcr_el2 & HCR_VM; + format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); } else { format64 |= arm_current_el(env) == 2; } @@ -2709,12 +2758,10 @@ static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - /* 64 bit accesses to the TTBRs can change the ASID and so we - * must flush the TLB. - */ - if (cpreg_field_is_64bit(ri)) { + /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ + if (cpreg_field_is_64bit(ri) && + extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { ARMCPU *cpu = arm_env_get_cpu(env); - tlb_flush(CPU(cpu)); } raw_write(env, ri, value); @@ -3083,22 +3130,6 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env, * Page D4-1736 (DDI0487A.b) */ -static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPUState *cs = ENV_GET_CPU(env); - - if (arm_is_secure_below_el3(env)) { - tlb_flush_by_mmuidx(cs, - ARMMMUIdxBit_S1SE1 | - ARMMMUIdxBit_S1SE0); - } else { - tlb_flush_by_mmuidx(cs, - ARMMMUIdxBit_S12NSE1 | - ARMMMUIdxBit_S12NSE0); - } -} - static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -3116,6 +3147,27 @@ static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, } } +static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + CPUState *cs = ENV_GET_CPU(env); + + if (tlb_force_broadcast(env)) { + tlbi_aa64_vmalle1_write(env, NULL, value); + return; + } + + if (arm_is_secure_below_el3(env)) { + tlb_flush_by_mmuidx(cs, + ARMMMUIdxBit_S1SE1 | + ARMMMUIdxBit_S1SE0); + } else { + tlb_flush_by_mmuidx(cs, + ARMMMUIdxBit_S12NSE1 | + ARMMMUIdxBit_S12NSE0); + } +} + static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -3205,29 +3257,6 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3); } -static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Invalidate by VA, EL1&0 (AArch64 version). - * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, - * since we don't support flush-for-specific-ASID-only or - * flush-last-level-only. - */ - ARMCPU *cpu = arm_env_get_cpu(env); - CPUState *cs = CPU(cpu); - uint64_t pageaddr = sextract64(value << 12, 0, 56); - - if (arm_is_secure_below_el3(env)) { - tlb_flush_page_by_mmuidx(cs, pageaddr, - ARMMMUIdxBit_S1SE1 | - ARMMMUIdxBit_S1SE0); - } else { - tlb_flush_page_by_mmuidx(cs, pageaddr, - ARMMMUIdxBit_S12NSE1 | - ARMMMUIdxBit_S12NSE0); - } -} - static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -3275,6 +3304,34 @@ static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, } } +static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Invalidate by VA, EL1&0 (AArch64 version). + * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, + * since we don't support flush-for-specific-ASID-only or + * flush-last-level-only. + */ + ARMCPU *cpu = arm_env_get_cpu(env); + CPUState *cs = CPU(cpu); + uint64_t pageaddr = sextract64(value << 12, 0, 56); + + if (tlb_force_broadcast(env)) { + tlbi_aa64_vae1is_write(env, NULL, value); + return; + } + + if (arm_is_secure_below_el3(env)) { + tlb_flush_page_by_mmuidx(cs, pageaddr, + ARMMMUIdxBit_S1SE1 | + ARMMMUIdxBit_S1SE0); + } else { + tlb_flush_page_by_mmuidx(cs, pageaddr, + ARMMMUIdxBit_S12NSE1 | + ARMMMUIdxBit_S12NSE0); + } +} + static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -3872,6 +3929,7 @@ static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = arm_env_get_cpu(env); + CPUState *cs = ENV_GET_CPU(env); uint64_t valid_mask = HCR_MASK; if (arm_feature(env, ARM_FEATURE_EL3)) { @@ -3890,6 +3948,28 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) /* Clear RES0 bits. */ value &= valid_mask; + /* + * VI and VF are kept in cs->interrupt_request. Modifying that + * requires that we have the iothread lock, which is done by + * marking the reginfo structs as ARM_CP_IO. + * Note that if a write to HCR pends a VIRQ or VFIQ it is never + * possible for it to be taken immediately, because VIRQ and + * VFIQ are masked unless running at EL0 or EL1, and HCR + * can only be written at EL2. + */ + g_assert(qemu_mutex_iothread_locked()); + if (value & HCR_VI) { + cs->interrupt_request |= CPU_INTERRUPT_VIRQ; + } else { + cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + } + if (value & HCR_VF) { + cs->interrupt_request |= CPU_INTERRUPT_VFIQ; + } else { + cs->interrupt_request &= ~CPU_INTERRUPT_VFIQ; + } + value &= ~(HCR_VI | HCR_VF); + /* These bits change the MMU setup: * HCR_VM enables stage 2 translation * HCR_PTW forbids certain page-table setups @@ -3917,16 +3997,32 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, hcr_write(env, NULL, value); } +static uint64_t hcr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* The VI and VF bits live in cs->interrupt_request */ + uint64_t ret = env->cp15.hcr_el2 & ~(HCR_VI | HCR_VF); + CPUState *cs = ENV_GET_CPU(env); + + if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { + ret |= HCR_VI; + } + if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { + ret |= HCR_VF; + } + return ret; +} + static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_IO, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), - .writefn = hcr_write }, + .writefn = hcr_write, .readfn = hcr_read }, { .name = "HCR", .state = ARM_CP_STATE_AA32, - .type = ARM_CP_ALIAS, + .type = ARM_CP_ALIAS | ARM_CP_IO, .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), - .writefn = hcr_writelow }, + .writefn = hcr_writelow, .readfn = hcr_read }, { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, @@ -4163,7 +4259,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { static const ARMCPRegInfo el2_v8_cp_reginfo[] = { { .name = "HCR2", .state = ARM_CP_STATE_AA32, - .type = ARM_CP_ALIAS, + .type = ARM_CP_ALIAS | ARM_CP_IO, .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, .access = PL2_RW, .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), @@ -4214,7 +4310,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, - .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, + .access = PL3_RW, .resetvalue = 0, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, @@ -4873,7 +4969,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = arm_env_get_cpu(env); - uint64_t pfr0 = cpu->id_aa64pfr0; + uint64_t pfr0 = cpu->isar.id_aa64pfr0; if (env->gicv3state) { pfr0 |= 1 << 24; @@ -4940,27 +5036,27 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_isar0 }, + .resetvalue = cpu->isar.id_isar0 }, { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_isar1 }, + .resetvalue = cpu->isar.id_isar1 }, { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_isar2 }, + .resetvalue = cpu->isar.id_isar2 }, { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_isar3 }, + .resetvalue = cpu->isar.id_isar3 }, { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_isar4 }, + .resetvalue = cpu->isar.id_isar4 }, { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_isar5 }, + .resetvalue = cpu->isar.id_isar5 }, { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, @@ -4968,7 +5064,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_isar6 }, + .resetvalue = cpu->isar.id_isar6 }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, v6_idregs); @@ -5039,7 +5135,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_aa64pfr1}, + .resetvalue = cpu->isar.id_aa64pfr1}, { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -5100,11 +5196,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_aa64isar0 }, + .resetvalue = cpu->isar.id_aa64isar0 }, { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->id_aa64isar1 }, + .resetvalue = cpu->isar.id_aa64isar1 }, { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -5164,15 +5260,15 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->mvfr0 }, + .resetvalue = cpu->isar.mvfr0 }, { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->mvfr1 }, + .resetvalue = cpu->isar.mvfr1 }, { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, - .resetvalue = cpu->mvfr2 }, + .resetvalue = cpu->isar.mvfr2 }, { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -5618,7 +5714,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_one_arm_cp_reg(cpu, &sctlr); } - if (arm_feature(env, ARM_FEATURE_SVE)) { + if (cpu_isar_feature(aa64_sve, cpu)) { define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); if (arm_feature(env, ARM_FEATURE_EL2)) { define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); @@ -6208,7 +6304,17 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, mask |= CPSR_IL; val |= CPSR_IL; } + qemu_log_mask(LOG_GUEST_ERROR, + "Illegal AArch32 mode switch attempt from %s to %s\n", + aarch32_mode_name(env->uncached_cpsr), + aarch32_mode_name(val)); } else { + qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", + write_type == CPSRWriteExceptionReturn ? + "Exception return from AArch32" : + "AArch32 mode switch from", + aarch32_mode_name(env->uncached_cpsr), + aarch32_mode_name(val), env->regs[15]); switch_mode(env, val & CPSR_M); } } @@ -6306,7 +6412,7 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) return 0; } -void switch_mode(CPUARMState *env, int mode) +static void switch_mode(CPUARMState *env, int mode) { ARMCPU *cpu = arm_env_get_cpu(env); @@ -6328,7 +6434,7 @@ void aarch64_sync_64_to_32(CPUARMState *env) #else -void switch_mode(CPUARMState *env, int mode) +static void switch_mode(CPUARMState *env, int mode) { int old_mode; int i; @@ -8194,6 +8300,19 @@ static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) } if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { + if (!arm_feature(env, ARM_FEATURE_V8)) { + /* + * QEMU syndrome values are v8-style. v7 has the IL bit + * UNK/SBZP for "field not valid" cases, where v8 uses RES1. + * If this is a v7 CPU, squash the IL bit in those cases. + */ + if (cs->exception_index == EXCP_PREFETCH_ABORT || + (cs->exception_index == EXCP_DATA_ABORT && + !(env->exception.syndrome & ARM_EL_ISV)) || + syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { + env->exception.syndrome &= ~ARM_EL_IL; + } + } env->cp15.esr_el[2] = env->exception.syndrome; } @@ -8228,7 +8347,7 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs) uint32_t moe; /* If this is a debug exception we must update the DBGDSCR.MOE bits */ - switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) { + switch (syn_get_ec(env->exception.syndrome)) { case EC_BREAKPOINT: case EC_BREAKPOINT_SAME_EL: moe = 1; @@ -8425,6 +8544,15 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) case EXCP_HVC: case EXCP_HYP_TRAP: case EXCP_SMC: + if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) { + /* + * QEMU internal FP/SIMD syndromes from AArch32 include the + * TA and coproc fields which are only exposed if the exception + * is taken to AArch32 Hyp mode. Mask them out to get a valid + * AArch64 format syndrome. + */ + env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); + } env->cp15.esr_el[new_el] = env->exception.syndrome; break; case EXCP_IRQ: @@ -8568,7 +8696,7 @@ void arm_cpu_do_interrupt(CPUState *cs) if (qemu_loglevel_mask(CPU_LOG_INT) && !excp_is_internal(cs->exception_index)) { qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", - env->exception.syndrome >> ARM_EL_EC_SHIFT, + syn_get_ec(env->exception.syndrome), env->exception.syndrome); } @@ -8665,7 +8793,8 @@ static inline bool regime_translation_disabled(CPUARMState *env, } if (mmu_idx == ARMMMUIdx_S2NS) { - return (env->cp15.hcr_el2 & HCR_VM) == 0; + /* HCR.DC means HCR.VM behaves as 1 */ + return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0; } if (env->cp15.hcr_el2 & HCR_TGE) { @@ -8675,6 +8804,12 @@ static inline bool regime_translation_disabled(CPUARMState *env, } } + if ((env->cp15.hcr_el2 & HCR_DC) && + (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) { + /* HCR.DC means SCTLR_EL1.M behaves as 0 */ + return true; + } + return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; } @@ -9026,9 +9161,20 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, hwaddr s2pa; int s2prot; int ret; + ARMCacheAttrs cacheattrs = {}; + ARMCacheAttrs *pcacheattrs = NULL; + + if (env->cp15.hcr_el2 & HCR_PTW) { + /* + * PTW means we must fault if this S1 walk touches S2 Device + * memory; otherwise we don't care about the attributes and can + * save the S2 translation the effort of computing them. + */ + pcacheattrs = &cacheattrs; + } ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa, - &txattrs, &s2prot, &s2size, fi, NULL); + &txattrs, &s2prot, &s2size, fi, pcacheattrs); if (ret) { assert(fi->type != ARMFault_None); fi->s2addr = addr; @@ -9036,6 +9182,14 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, fi->s1ptw = true; return ~0; } + if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) { + /* Access was to Device memory: generate Permission fault */ + fi->type = ARMFault_Permission; + fi->s2addr = addr; + fi->stage2 = true; + fi->s1ptw = true; + return ~0; + } addr = s2pa; } return addr; @@ -10655,6 +10809,16 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address, /* Combine the S1 and S2 cache attributes, if needed */ if (!ret && cacheattrs != NULL) { + if (env->cp15.hcr_el2 & HCR_DC) { + /* + * HCR.DC forces the first stage attributes to + * Normal Non-Shareable, + * Inner Write-Back Read-Allocate Write-Allocate, + * Outer Write-Back Read-Allocate Write-Allocate. + */ + cacheattrs->attrs = 0xff; + cacheattrs->shareability = 0; + } *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); } @@ -11612,7 +11776,7 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) uint32_t changed; /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ - if (!arm_feature(env, ARM_FEATURE_V8_FP16)) { + if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) { val &= ~FPCR_FZ16; } @@ -12671,13 +12835,15 @@ void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, uint32_t flags; if (is_a64(env)) { + ARMCPU *cpu = arm_env_get_cpu(env); + *pc = env->pc; flags = ARM_TBFLAG_AARCH64_STATE_MASK; /* Get control bits for tagged addresses */ flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT); flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT); - if (arm_feature(env, ARM_FEATURE_SVE)) { + if (cpu_isar_feature(aa64_sve, cpu)) { int sve_el = sve_exception_el(env, current_el); uint32_t zcr_len; @@ -12801,11 +12967,12 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el, bool el0_a64) { + ARMCPU *cpu = arm_env_get_cpu(env); int old_len, new_len; bool old_a64, new_a64; /* Nothing to do if no SVE. */ - if (!arm_feature(env, ARM_FEATURE_SVE)) { + if (!cpu_isar_feature(aa64_sve, cpu)) { return; } diff --git a/target/arm/internals.h b/target/arm/internals.h index a4fc709bcc..6c2bb2deeb 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -145,7 +145,6 @@ static inline int bank_number(int mode) g_assert_not_reached(); } -void switch_mode(CPUARMState *, int); void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); void arm_translate_init(void); @@ -279,14 +278,19 @@ enum arm_exception_class { #define ARM_EL_IL (1 << ARM_EL_IL_SHIFT) #define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT) +static inline uint32_t syn_get_ec(uint32_t syn) +{ + return syn >> ARM_EL_EC_SHIFT; +} + /* Utility functions for constructing various kinds of syndrome value. * Note that in general we follow the AArch64 syndrome values; in a * few cases the value in HSR for exceptions taken to AArch32 Hyp - * mode differs slightly, so if we ever implemented Hyp mode then the - * syndrome value would need some massaging on exception entry. - * (One example of this is that AArch64 defaults to IL bit set for - * exceptions which don't specifically indicate information about the - * trapping instruction, whereas AArch32 defaults to IL bit clear.) + * mode differs slightly, and we fix this up when populating HSR in + * arm_cpu_do_interrupt_aarch32_hyp(). + * The exception is FP/SIMD access traps -- these report extra information + * when taking an exception to AArch32. For those we include the extra coproc + * and TA fields, and mask them out when taking the exception to AArch64. */ static inline uint32_t syn_uncategorized(void) { @@ -386,9 +390,18 @@ static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm, static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit) { + /* AArch32 FP trap or any AArch64 FP/SIMD trap: TA == 0 coproc == 0xa */ + return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) + | (is_16bit ? 0 : ARM_EL_IL) + | (cv << 24) | (cond << 20) | 0xa; +} + +static inline uint32_t syn_simd_access_trap(int cv, int cond, bool is_16bit) +{ + /* AArch32 SIMD trap: TA == 1 coproc == 0 */ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) | (is_16bit ? 0 : ARM_EL_IL) - | (cv << 24) | (cond << 20); + | (cv << 24) | (cond << 20) | (1 << 5); } static inline uint32_t syn_sve_access_trap(void) @@ -840,4 +853,22 @@ static inline uint32_t v7m_sp_limit(CPUARMState *env) } } +/** + * aarch32_mode_name(): Return name of the AArch32 CPU mode + * @psr: Program Status Register indicating CPU mode + * + * Returns, for debug logging purposes, a printable representation + * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by + * the low bits of the specified PSR. + */ +static inline const char *aarch32_mode_name(uint32_t psr) +{ + static const char cpu_mode_names[16][4] = { + "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", + "???", "???", "hyp", "und", "???", "???", "???", "sys" + }; + + return cpu_mode_names[psr & 0xf]; +} + #endif diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 54ef5f711b..09a86e2820 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -34,6 +34,7 @@ const KVMCapabilityInfo kvm_arch_required_capabilities[] = { }; static bool cap_has_mp_state; +static bool cap_has_inject_serror_esr; static ARMHostCPUFeatures arm_host_cpu_features; @@ -48,6 +49,12 @@ int kvm_arm_vcpu_init(CPUState *cs) return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init); } +void kvm_arm_init_serror_injection(CPUState *cs) +{ + cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state, + KVM_CAP_ARM_INJECT_SERROR_ESR); +} + bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, int *fdarray, struct kvm_vcpu_init *init) @@ -522,6 +529,59 @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) return 0; } +int kvm_put_vcpu_events(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; + struct kvm_vcpu_events events; + int ret; + + if (!kvm_has_vcpu_events()) { + return 0; + } + + memset(&events, 0, sizeof(events)); + events.exception.serror_pending = env->serror.pending; + + /* Inject SError to guest with specified syndrome if host kernel + * supports it, otherwise inject SError without syndrome. + */ + if (cap_has_inject_serror_esr) { + events.exception.serror_has_esr = env->serror.has_esr; + events.exception.serror_esr = env->serror.esr; + } + + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events); + if (ret) { + error_report("failed to put vcpu events"); + } + + return ret; +} + +int kvm_get_vcpu_events(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; + struct kvm_vcpu_events events; + int ret; + + if (!kvm_has_vcpu_events()) { + return 0; + } + + memset(&events, 0, sizeof(events)); + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); + if (ret) { + error_report("failed to get vcpu events"); + return ret; + } + + env->serror.pending = events.exception.serror_pending; + env->serror.has_esr = events.exception.serror_has_esr; + env->serror.esr = events.exception.serror_esr; + + return 0; +} + void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) { } diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c index 4e91c11796..0f1e94c7b5 100644 --- a/target/arm/kvm32.c +++ b/target/arm/kvm32.c @@ -217,6 +217,9 @@ int kvm_arch_init_vcpu(CPUState *cs) } cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK; + /* Check whether userspace can specify guest syndrome value */ + kvm_arm_init_serror_injection(cs); + return kvm_arm_init_cpreg_list(cpu); } @@ -358,6 +361,11 @@ int kvm_arch_put_registers(CPUState *cs, int level) return ret; } + ret = kvm_put_vcpu_events(cpu); + if (ret) { + return ret; + } + /* Note that we do not call write_cpustate_to_list() * here, so we are only writing the tuple list back to * KVM. This is safe because nothing can change the @@ -445,6 +453,11 @@ int kvm_arch_get_registers(CPUState *cs) } vfp_set_fpscr(env, fpscr); + ret = kvm_get_vcpu_events(cpu); + if (ret) { + return ret; + } + if (!write_kvmstate_to_list(cpu)) { return EINVAL; } diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index e0b8246283..5de8ff0ac5 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -546,6 +546,9 @@ int kvm_arch_init_vcpu(CPUState *cs) kvm_arm_init_debug(cs); + /* Check whether user space can specify guest syndrome value */ + kvm_arm_init_serror_injection(cs); + return kvm_arm_init_cpreg_list(cpu); } @@ -727,6 +730,11 @@ int kvm_arch_put_registers(CPUState *cs, int level) return ret; } + ret = kvm_put_vcpu_events(cpu); + if (ret) { + return ret; + } + if (!write_list_to_kvmstate(cpu, level)) { return EINVAL; } @@ -863,6 +871,11 @@ int kvm_arch_get_registers(CPUState *cs) } vfp_set_fpcr(env, fpr); + ret = kvm_get_vcpu_events(cpu); + if (ret) { + return ret; + } + if (!write_kvmstate_to_list(cpu)) { return EINVAL; } @@ -920,7 +933,7 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) { - int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT; + int hsr_ec = syn_get_ec(debug_exit->hsr); ARMCPU *cpu = ARM_CPU(cs); CPUClass *cc = CPU_GET_CLASS(cs); CPUARMState *env = &cpu->env; diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 5948e8b560..21c0129da2 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -121,6 +121,30 @@ bool write_kvmstate_to_list(ARMCPU *cpu); */ void kvm_arm_reset_vcpu(ARMCPU *cpu); +/** + * kvm_arm_init_serror_injection: + * @cs: CPUState + * + * Check whether KVM can set guest SError syndrome. + */ +void kvm_arm_init_serror_injection(CPUState *cs); + +/** + * kvm_get_vcpu_events: + * @cpu: ARMCPU + * + * Get VCPU related state from kvm. + */ +int kvm_get_vcpu_events(ARMCPU *cpu); + +/** + * kvm_put_vcpu_events: + * @cpu: ARMCPU + * + * Put VCPU related state to kvm. + */ +int kvm_put_vcpu_events(ARMCPU *cpu); + #ifdef CONFIG_KVM /** * kvm_arm_create_scratch_host_vcpu: diff --git a/target/arm/machine.c b/target/arm/machine.c index ff4ec22bf7..239fe4e84d 100644 --- a/target/arm/machine.c +++ b/target/arm/machine.c @@ -131,9 +131,8 @@ static const VMStateDescription vmstate_iwmmxt = { static bool sve_needed(void *opaque) { ARMCPU *cpu = opaque; - CPUARMState *env = &cpu->env; - return arm_feature(env, ARM_FEATURE_SVE); + return cpu_isar_feature(aa64_sve, cpu); } /* The first two words of each Zreg is stored in VFP state. */ @@ -172,6 +171,27 @@ static const VMStateDescription vmstate_sve = { }; #endif /* AARCH64 */ +static bool serror_needed(void *opaque) +{ + ARMCPU *cpu = opaque; + CPUARMState *env = &cpu->env; + + return env->serror.pending != 0; +} + +static const VMStateDescription vmstate_serror = { + .name = "cpu/serror", + .version_id = 1, + .minimum_version_id = 1, + .needed = serror_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(env.serror.pending, ARMCPU), + VMSTATE_UINT8(env.serror.has_esr, ARMCPU), + VMSTATE_UINT64(env.serror.esr, ARMCPU), + VMSTATE_END_OF_LIST() + } +}; + static bool m_needed(void *opaque) { ARMCPU *cpu = opaque; @@ -726,6 +746,7 @@ const VMStateDescription vmstate_arm_cpu = { #ifdef TARGET_AARCH64 &vmstate_sve, #endif + &vmstate_serror, NULL } }; diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c index d915579712..90741f6331 100644 --- a/target/arm/op_helper.c +++ b/target/arm/op_helper.c @@ -42,7 +42,7 @@ void raise_exception(CPUARMState *env, uint32_t excp, * (see DDI0478C.a D1.10.4) */ target_el = 2; - if (syndrome >> ARM_EL_EC_SHIFT == EC_ADVSIMDFPACCESSTRAP) { + if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) { syndrome = syn_uncategorized(); } } diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 8a24278d79..88195ab949 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -37,6 +37,7 @@ #include "trace-tcg.h" #include "translate-a64.h" +#include "qemu/atomic128.h" static TCGv_i64 cpu_X[32]; static TCGv_i64 cpu_pc; @@ -173,7 +174,7 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f, cpu_fprintf(f, " FPCR=%08x FPSR=%08x\n", vfp_get_fpcr(env), vfp_get_fpsr(env)); - if (arm_feature(env, ARM_FEATURE_SVE) && sve_exception_el(env, el) == 0) { + if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) { int j, zcr_len = sve_zcr_len_for_el(env, el); for (i = 0; i <= FFR_PRED_NUM; i++) { @@ -1200,25 +1201,23 @@ static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src, /* Store from vector register to memory */ static void do_vec_st(DisasContext *s, int srcidx, int element, - TCGv_i64 tcg_addr, int size) + TCGv_i64 tcg_addr, int size, TCGMemOp endian) { - TCGMemOp memop = s->be_data + size; TCGv_i64 tcg_tmp = tcg_temp_new_i64(); read_vec_element(s, tcg_tmp, srcidx, element, size); - tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop); + tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size); tcg_temp_free_i64(tcg_tmp); } /* Load from memory to vector register */ static void do_vec_ld(DisasContext *s, int destidx, int element, - TCGv_i64 tcg_addr, int size) + TCGv_i64 tcg_addr, int size, TCGMemOp endian) { - TCGMemOp memop = s->be_data + size; TCGv_i64 tcg_tmp = tcg_temp_new_i64(); - tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop); + tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), endian | size); write_vec_element(s, tcg_tmp, destidx, element, size); tcg_temp_free_i64(tcg_tmp); @@ -2086,26 +2085,27 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2, get_mem_index(s), MO_64 | MO_ALIGN | s->be_data); tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val); - } else if (s->be_data == MO_LE) { - if (tb_cflags(s->base.tb) & CF_PARALLEL) { + } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { + if (!HAVE_CMPXCHG128) { + gen_helper_exit_atomic(cpu_env); + s->base.is_jmp = DISAS_NORETURN; + } else if (s->be_data == MO_LE) { gen_helper_paired_cmpxchg64_le_parallel(tmp, cpu_env, cpu_exclusive_addr, cpu_reg(s, rt), cpu_reg(s, rt2)); } else { - gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr, - cpu_reg(s, rt), cpu_reg(s, rt2)); - } - } else { - if (tb_cflags(s->base.tb) & CF_PARALLEL) { gen_helper_paired_cmpxchg64_be_parallel(tmp, cpu_env, cpu_exclusive_addr, cpu_reg(s, rt), cpu_reg(s, rt2)); - } else { - gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr, - cpu_reg(s, rt), cpu_reg(s, rt2)); } + } else if (s->be_data == MO_LE) { + gen_helper_paired_cmpxchg64_le(tmp, cpu_env, cpu_exclusive_addr, + cpu_reg(s, rt), cpu_reg(s, rt2)); + } else { + gen_helper_paired_cmpxchg64_be(tmp, cpu_env, cpu_exclusive_addr, + cpu_reg(s, rt), cpu_reg(s, rt2)); } } else { tcg_gen_atomic_cmpxchg_i64(tmp, cpu_exclusive_addr, cpu_exclusive_val, @@ -2175,14 +2175,18 @@ static void gen_compare_and_swap_pair(DisasContext *s, int rs, int rt, } tcg_temp_free_i64(cmp); } else if (tb_cflags(s->base.tb) & CF_PARALLEL) { - TCGv_i32 tcg_rs = tcg_const_i32(rs); - - if (s->be_data == MO_LE) { - gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2); + if (HAVE_CMPXCHG128) { + TCGv_i32 tcg_rs = tcg_const_i32(rs); + if (s->be_data == MO_LE) { + gen_helper_casp_le_parallel(cpu_env, tcg_rs, addr, t1, t2); + } else { + gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2); + } + tcg_temp_free_i32(tcg_rs); } else { - gen_helper_casp_be_parallel(cpu_env, tcg_rs, addr, t1, t2); + gen_helper_exit_atomic(cpu_env); + s->base.is_jmp = DISAS_NORETURN; } - tcg_temp_free_i32(tcg_rs); } else { TCGv_i64 d1 = tcg_temp_new_i64(); TCGv_i64 d2 = tcg_temp_new_i64(); @@ -2322,7 +2326,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) } if (rt2 == 31 && ((rt | rs) & 1) == 0 - && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) { + && dc_isar_feature(aa64_atomics, s)) { /* CASP / CASPL */ gen_compare_and_swap_pair(s, rs, rt, rn, size | 2); return; @@ -2344,7 +2348,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) } if (rt2 == 31 && ((rt | rs) & 1) == 0 - && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) { + && dc_isar_feature(aa64_atomics, s)) { /* CASPA / CASPAL */ gen_compare_and_swap_pair(s, rs, rt, rn, size | 2); return; @@ -2355,7 +2359,7 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn) case 0xb: /* CASL */ case 0xe: /* CASA */ case 0xf: /* CASAL */ - if (rt2 == 31 && arm_dc_feature(s, ARM_FEATURE_V8_ATOMICS)) { + if (rt2 == 31 && dc_isar_feature(aa64_atomics, s)) { gen_compare_and_swap(s, rs, rt, rn, size); return; } @@ -2894,11 +2898,10 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, int rs = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int o3_opc = extract32(insn, 12, 4); - int feature = ARM_FEATURE_V8_ATOMICS; TCGv_i64 tcg_rn, tcg_rs; AtomicThreeOpFn *fn; - if (is_vector) { + if (is_vector || !dc_isar_feature(aa64_atomics, s)) { unallocated_encoding(s); return; } @@ -2934,10 +2937,6 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn, unallocated_encoding(s); return; } - if (!arm_dc_feature(s, feature)) { - unallocated_encoding(s); - return; - } if (rn == 31) { gen_check_sp_alignment(s); @@ -3017,10 +3016,11 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) bool is_store = !extract32(insn, 22, 1); bool is_postidx = extract32(insn, 23, 1); bool is_q = extract32(insn, 30, 1); - TCGv_i64 tcg_addr, tcg_rn; + TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes; + TCGMemOp endian = s->be_data; - int ebytes = 1 << size; - int elements = (is_q ? 128 : 64) / (8 << size); + int ebytes; /* bytes per element */ + int elements; /* elements per vector */ int rpt; /* num iterations */ int selem; /* structure elements */ int r; @@ -3079,39 +3079,55 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) gen_check_sp_alignment(s); } + /* For our purposes, bytes are always little-endian. */ + if (size == 0) { + endian = MO_LE; + } + + /* Consecutive little-endian elements from a single register + * can be promoted to a larger little-endian operation. + */ + if (selem == 1 && endian == MO_LE) { + size = 3; + } + ebytes = 1 << size; + elements = (is_q ? 16 : 8) / ebytes; + tcg_rn = cpu_reg_sp(s, rn); tcg_addr = tcg_temp_new_i64(); tcg_gen_mov_i64(tcg_addr, tcg_rn); + tcg_ebytes = tcg_const_i64(ebytes); for (r = 0; r < rpt; r++) { int e; for (e = 0; e < elements; e++) { - int tt = (rt + r) % 32; int xs; for (xs = 0; xs < selem; xs++) { + int tt = (rt + r + xs) % 32; if (is_store) { - do_vec_st(s, tt, e, tcg_addr, size); + do_vec_st(s, tt, e, tcg_addr, size, endian); } else { - do_vec_ld(s, tt, e, tcg_addr, size); - - /* For non-quad operations, setting a slice of the low - * 64 bits of the register clears the high 64 bits (in - * the ARM ARM pseudocode this is implicit in the fact - * that 'rval' is a 64 bit wide variable). - * For quad operations, we might still need to zero the - * high bits of SVE. We optimize by noticing that we only - * need to do this the first time we touch a register. - */ - if (e == 0 && (r == 0 || xs == selem - 1)) { - clear_vec_high(s, is_q, tt); - } + do_vec_ld(s, tt, e, tcg_addr, size, endian); } - tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes); - tt = (tt + 1) % 32; + tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes); } } } + if (!is_store) { + /* For non-quad operations, setting a slice of the low + * 64 bits of the register clears the high 64 bits (in + * the ARM ARM pseudocode this is implicit in the fact + * that 'rval' is a 64 bit wide variable). + * For quad operations, we might still need to zero the + * high bits of SVE. + */ + for (r = 0; r < rpt * selem; r++) { + int tt = (rt + r) % 32; + clear_vec_high(s, is_q, tt); + } + } + if (is_postidx) { int rm = extract32(insn, 16, 5); if (rm == 31) { @@ -3120,6 +3136,7 @@ static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn) tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm)); } } + tcg_temp_free_i64(tcg_ebytes); tcg_temp_free_i64(tcg_addr); } @@ -3162,7 +3179,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) bool replicate = false; int index = is_q << 3 | S << 2 | size; int ebytes, xs; - TCGv_i64 tcg_addr, tcg_rn; + TCGv_i64 tcg_addr, tcg_rn, tcg_ebytes; switch (scale) { case 3: @@ -3215,49 +3232,28 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) tcg_rn = cpu_reg_sp(s, rn); tcg_addr = tcg_temp_new_i64(); tcg_gen_mov_i64(tcg_addr, tcg_rn); + tcg_ebytes = tcg_const_i64(ebytes); for (xs = 0; xs < selem; xs++) { if (replicate) { /* Load and replicate to all elements */ - uint64_t mulconst; TCGv_i64 tcg_tmp = tcg_temp_new_i64(); tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), s->be_data + scale); - switch (scale) { - case 0: - mulconst = 0x0101010101010101ULL; - break; - case 1: - mulconst = 0x0001000100010001ULL; - break; - case 2: - mulconst = 0x0000000100000001ULL; - break; - case 3: - mulconst = 0; - break; - default: - g_assert_not_reached(); - } - if (mulconst) { - tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst); - } - write_vec_element(s, tcg_tmp, rt, 0, MO_64); - if (is_q) { - write_vec_element(s, tcg_tmp, rt, 1, MO_64); - } + tcg_gen_gvec_dup_i64(scale, vec_full_reg_offset(s, rt), + (is_q + 1) * 8, vec_full_reg_size(s), + tcg_tmp); tcg_temp_free_i64(tcg_tmp); - clear_vec_high(s, is_q, rt); } else { /* Load/store one element per register */ if (is_load) { - do_vec_ld(s, rt, index, tcg_addr, scale); + do_vec_ld(s, rt, index, tcg_addr, scale, s->be_data); } else { - do_vec_st(s, rt, index, tcg_addr, scale); + do_vec_st(s, rt, index, tcg_addr, scale, s->be_data); } } - tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes); + tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_ebytes); rt = (rt + 1) % 32; } @@ -3269,6 +3265,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn) tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm)); } } + tcg_temp_free_i64(tcg_ebytes); tcg_temp_free_i64(tcg_addr); } @@ -4568,7 +4565,7 @@ static void handle_crc32(DisasContext *s, TCGv_i64 tcg_acc, tcg_val; TCGv_i32 tcg_bytes; - if (!arm_dc_feature(s, ARM_FEATURE_CRC) + if (!dc_isar_feature(aa64_crc32, s) || (sf == 1 && sz != 3) || (sf == 0 && sz == 3)) { unallocated_encoding(s); @@ -4810,7 +4807,7 @@ static void disas_fp_compare(DisasContext *s, uint32_t insn) break; case 3: size = MO_16; - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ @@ -4861,7 +4858,7 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn) break; case 3: size = MO_16; - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ @@ -4927,7 +4924,7 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn) break; case 3: sz = MO_16; - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ @@ -5260,7 +5257,7 @@ static void disas_fp_1src(DisasContext *s, uint32_t insn) handle_fp_1src_double(s, opcode, rd, rn); break; case 3: - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } @@ -5475,7 +5472,7 @@ static void disas_fp_2src(DisasContext *s, uint32_t insn) handle_fp_2src_double(s, opcode, rd, rn, rm); break; case 3: - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } @@ -5633,7 +5630,7 @@ static void disas_fp_3src(DisasContext *s, uint32_t insn) handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra); break; case 3: - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } @@ -5703,7 +5700,7 @@ static void disas_fp_imm(DisasContext *s, uint32_t insn) break; case 3: sz = MO_16; - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ @@ -5928,7 +5925,7 @@ static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn) case 1: /* float64 */ break; case 3: /* float16 */ - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ @@ -6058,7 +6055,7 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn) break; case 0x6: /* 16-bit float, 32-bit int */ case 0xe: /* 16-bit float, 64-bit int */ - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ @@ -6085,7 +6082,7 @@ static void disas_fp_int_conv(DisasContext *s, uint32_t insn) case 1: /* float64 */ break; case 3: /* float16 */ - if (arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (dc_isar_feature(aa64_fp16, s)) { break; } /* fallthru */ @@ -6522,7 +6519,7 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) */ is_min = extract32(size, 1, 1); is_fp = true; - if (!is_u && arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (!is_u && dc_isar_feature(aa64_fp16, s)) { size = 1; } else if (!is_u || !is_q || extract32(size, 0, 1)) { unallocated_encoding(s); @@ -6918,7 +6915,7 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn) if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) { /* Check for FMOV (vector, immediate) - half-precision */ - if (!(arm_dc_feature(s, ARM_FEATURE_V8_FP16) && o2 && cmode == 0xf)) { + if (!(dc_isar_feature(aa64_fp16, s) && o2 && cmode == 0xf)) { unallocated_encoding(s); return; } @@ -7085,7 +7082,7 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn) case 0x2f: /* FMINP */ /* FP op, size[0] is 32 or 64 bit*/ if (!u) { - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } else { @@ -7730,7 +7727,7 @@ static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar, size = MO_32; } else if (immh & 2) { size = MO_16; - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } @@ -7775,7 +7772,7 @@ static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar, size = MO_32; } else if (immh & 0x2) { size = MO_16; - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } @@ -8040,28 +8037,6 @@ static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn) } } -/* CMTST : test is "if (X & Y != 0)". */ -static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) -{ - tcg_gen_and_i32(d, a, b); - tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0); - tcg_gen_neg_i32(d, d); -} - -static void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) -{ - tcg_gen_and_i64(d, a, b); - tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0); - tcg_gen_neg_i64(d, d); -} - -static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) -{ - tcg_gen_and_vec(vece, d, a, b); - tcg_gen_dupi_vec(vece, a, 0); - tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a); -} - static void handle_3same_64(DisasContext *s, int opcode, bool u, TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm) { @@ -8539,7 +8514,7 @@ static void disas_simd_scalar_three_reg_same_fp16(DisasContext *s, return; } - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); } @@ -8612,7 +8587,7 @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s, bool u = extract32(insn, 29, 1); TCGv_i32 ele1, ele2, ele3; TCGv_i64 res; - int feature; + bool feature; switch (u * 16 + opcode) { case 0x10: /* SQRDMLAH (vector) */ @@ -8621,13 +8596,13 @@ static void disas_simd_scalar_three_reg_same_extra(DisasContext *s, unallocated_encoding(s); return; } - feature = ARM_FEATURE_V8_RDM; + feature = dc_isar_feature(aa64_rdm, s); break; default: unallocated_encoding(s); return; } - if (!arm_dc_feature(s, feature)) { + if (!feature) { unallocated_encoding(s); return; } @@ -9401,191 +9376,10 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn) } } -static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - tcg_gen_vec_sar8i_i64(a, a, shift); - tcg_gen_vec_add8_i64(d, d, a); -} - -static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - tcg_gen_vec_sar16i_i64(a, a, shift); - tcg_gen_vec_add16_i64(d, d, a); -} - -static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) -{ - tcg_gen_sari_i32(a, a, shift); - tcg_gen_add_i32(d, d, a); -} - -static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - tcg_gen_sari_i64(a, a, shift); - tcg_gen_add_i64(d, d, a); -} - -static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) -{ - tcg_gen_sari_vec(vece, a, a, sh); - tcg_gen_add_vec(vece, d, d, a); -} - -static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - tcg_gen_vec_shr8i_i64(a, a, shift); - tcg_gen_vec_add8_i64(d, d, a); -} - -static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - tcg_gen_vec_shr16i_i64(a, a, shift); - tcg_gen_vec_add16_i64(d, d, a); -} - -static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) -{ - tcg_gen_shri_i32(a, a, shift); - tcg_gen_add_i32(d, d, a); -} - -static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - tcg_gen_shri_i64(a, a, shift); - tcg_gen_add_i64(d, d, a); -} - -static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) -{ - tcg_gen_shri_vec(vece, a, a, sh); - tcg_gen_add_vec(vece, d, d, a); -} - -static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - uint64_t mask = dup_const(MO_8, 0xff >> shift); - TCGv_i64 t = tcg_temp_new_i64(); - - tcg_gen_shri_i64(t, a, shift); - tcg_gen_andi_i64(t, t, mask); - tcg_gen_andi_i64(d, d, ~mask); - tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); -} - -static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - uint64_t mask = dup_const(MO_16, 0xffff >> shift); - TCGv_i64 t = tcg_temp_new_i64(); - - tcg_gen_shri_i64(t, a, shift); - tcg_gen_andi_i64(t, t, mask); - tcg_gen_andi_i64(d, d, ~mask); - tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); -} - -static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) -{ - tcg_gen_shri_i32(a, a, shift); - tcg_gen_deposit_i32(d, d, a, 0, 32 - shift); -} - -static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - tcg_gen_shri_i64(a, a, shift); - tcg_gen_deposit_i64(d, d, a, 0, 64 - shift); -} - -static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) -{ - uint64_t mask = (2ull << ((8 << vece) - 1)) - 1; - TCGv_vec t = tcg_temp_new_vec_matching(d); - TCGv_vec m = tcg_temp_new_vec_matching(d); - - tcg_gen_dupi_vec(vece, m, mask ^ (mask >> sh)); - tcg_gen_shri_vec(vece, t, a, sh); - tcg_gen_and_vec(vece, d, d, m); - tcg_gen_or_vec(vece, d, d, t); - - tcg_temp_free_vec(t); - tcg_temp_free_vec(m); -} - /* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, int immh, int immb, int opcode, int rn, int rd) { - static const GVecGen2i ssra_op[4] = { - { .fni8 = gen_ssra8_i64, - .fniv = gen_ssra_vec, - .load_dest = true, - .opc = INDEX_op_sari_vec, - .vece = MO_8 }, - { .fni8 = gen_ssra16_i64, - .fniv = gen_ssra_vec, - .load_dest = true, - .opc = INDEX_op_sari_vec, - .vece = MO_16 }, - { .fni4 = gen_ssra32_i32, - .fniv = gen_ssra_vec, - .load_dest = true, - .opc = INDEX_op_sari_vec, - .vece = MO_32 }, - { .fni8 = gen_ssra64_i64, - .fniv = gen_ssra_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true, - .opc = INDEX_op_sari_vec, - .vece = MO_64 }, - }; - static const GVecGen2i usra_op[4] = { - { .fni8 = gen_usra8_i64, - .fniv = gen_usra_vec, - .load_dest = true, - .opc = INDEX_op_shri_vec, - .vece = MO_8, }, - { .fni8 = gen_usra16_i64, - .fniv = gen_usra_vec, - .load_dest = true, - .opc = INDEX_op_shri_vec, - .vece = MO_16, }, - { .fni4 = gen_usra32_i32, - .fniv = gen_usra_vec, - .load_dest = true, - .opc = INDEX_op_shri_vec, - .vece = MO_32, }, - { .fni8 = gen_usra64_i64, - .fniv = gen_usra_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true, - .opc = INDEX_op_shri_vec, - .vece = MO_64, }, - }; - static const GVecGen2i sri_op[4] = { - { .fni8 = gen_shr8_ins_i64, - .fniv = gen_shr_ins_vec, - .load_dest = true, - .opc = INDEX_op_shri_vec, - .vece = MO_8 }, - { .fni8 = gen_shr16_ins_i64, - .fniv = gen_shr_ins_vec, - .load_dest = true, - .opc = INDEX_op_shri_vec, - .vece = MO_16 }, - { .fni4 = gen_shr32_ins_i32, - .fniv = gen_shr_ins_vec, - .load_dest = true, - .opc = INDEX_op_shri_vec, - .vece = MO_32 }, - { .fni8 = gen_shr64_ins_i64, - .fniv = gen_shr_ins_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true, - .opc = INDEX_op_shri_vec, - .vece = MO_64 }, - }; - int size = 32 - clz32(immh) - 1; int immhb = immh << 3 | immb; int shift = 2 * (8 << size) - immhb; @@ -9681,85 +9475,10 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u, clear_vec_high(s, is_q, rd); } -static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - uint64_t mask = dup_const(MO_8, 0xff << shift); - TCGv_i64 t = tcg_temp_new_i64(); - - tcg_gen_shli_i64(t, a, shift); - tcg_gen_andi_i64(t, t, mask); - tcg_gen_andi_i64(d, d, ~mask); - tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); -} - -static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - uint64_t mask = dup_const(MO_16, 0xffff << shift); - TCGv_i64 t = tcg_temp_new_i64(); - - tcg_gen_shli_i64(t, a, shift); - tcg_gen_andi_i64(t, t, mask); - tcg_gen_andi_i64(d, d, ~mask); - tcg_gen_or_i64(d, d, t); - tcg_temp_free_i64(t); -} - -static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) -{ - tcg_gen_deposit_i32(d, d, a, shift, 32 - shift); -} - -static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) -{ - tcg_gen_deposit_i64(d, d, a, shift, 64 - shift); -} - -static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) -{ - uint64_t mask = (1ull << sh) - 1; - TCGv_vec t = tcg_temp_new_vec_matching(d); - TCGv_vec m = tcg_temp_new_vec_matching(d); - - tcg_gen_dupi_vec(vece, m, mask); - tcg_gen_shli_vec(vece, t, a, sh); - tcg_gen_and_vec(vece, d, d, m); - tcg_gen_or_vec(vece, d, d, t); - - tcg_temp_free_vec(t); - tcg_temp_free_vec(m); -} - /* SHL/SLI - Vector shift left */ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert, int immh, int immb, int opcode, int rn, int rd) { - static const GVecGen2i shi_op[4] = { - { .fni8 = gen_shl8_ins_i64, - .fniv = gen_shl_ins_vec, - .opc = INDEX_op_shli_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true, - .vece = MO_8 }, - { .fni8 = gen_shl16_ins_i64, - .fniv = gen_shl_ins_vec, - .opc = INDEX_op_shli_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true, - .vece = MO_16 }, - { .fni4 = gen_shl32_ins_i32, - .fniv = gen_shl_ins_vec, - .opc = INDEX_op_shli_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true, - .vece = MO_32 }, - { .fni8 = gen_shl64_ins_i64, - .fniv = gen_shl_ins_vec, - .opc = INDEX_op_shli_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true, - .vece = MO_64 }, - }; int size = 32 - clz32(immh) - 1; int immhb = immh << 3 | immb; int shift = immhb - (8 << size); @@ -9779,7 +9498,7 @@ static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert, } if (insert) { - gen_gvec_op2i(s, is_q, rd, rn, shift, &shi_op[size]); + gen_gvec_op2i(s, is_q, rd, rn, shift, &sli_op[size]); } else { gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shli, size); } @@ -10356,7 +10075,7 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) return; } if (size == 3) { - if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) { + if (!dc_isar_feature(aa64_pmull, s)) { unallocated_encoding(s); return; } @@ -10401,70 +10120,9 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) } } -static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) -{ - tcg_gen_xor_i64(rn, rn, rm); - tcg_gen_and_i64(rn, rn, rd); - tcg_gen_xor_i64(rd, rm, rn); -} - -static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) -{ - tcg_gen_xor_i64(rn, rn, rd); - tcg_gen_and_i64(rn, rn, rm); - tcg_gen_xor_i64(rd, rd, rn); -} - -static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) -{ - tcg_gen_xor_i64(rn, rn, rd); - tcg_gen_andc_i64(rn, rn, rm); - tcg_gen_xor_i64(rd, rd, rn); -} - -static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) -{ - tcg_gen_xor_vec(vece, rn, rn, rm); - tcg_gen_and_vec(vece, rn, rn, rd); - tcg_gen_xor_vec(vece, rd, rm, rn); -} - -static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) -{ - tcg_gen_xor_vec(vece, rn, rn, rd); - tcg_gen_and_vec(vece, rn, rn, rm); - tcg_gen_xor_vec(vece, rd, rd, rn); -} - -static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) -{ - tcg_gen_xor_vec(vece, rn, rn, rd); - tcg_gen_andc_vec(vece, rn, rn, rm); - tcg_gen_xor_vec(vece, rd, rd, rn); -} - /* Logic op (opcode == 3) subgroup of C3.6.16. */ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn) { - static const GVecGen3 bsl_op = { - .fni8 = gen_bsl_i64, - .fniv = gen_bsl_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true - }; - static const GVecGen3 bit_op = { - .fni8 = gen_bit_i64, - .fniv = gen_bit_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true - }; - static const GVecGen3 bif_op = { - .fni8 = gen_bif_i64, - .fniv = gen_bif_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true - }; - int rd = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rm = extract32(insn, 16, 5); @@ -10736,131 +10394,9 @@ static void disas_simd_3same_float(DisasContext *s, uint32_t insn) } } -static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) -{ - gen_helper_neon_mul_u8(a, a, b); - gen_helper_neon_add_u8(d, d, a); -} - -static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) -{ - gen_helper_neon_mul_u16(a, a, b); - gen_helper_neon_add_u16(d, d, a); -} - -static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) -{ - tcg_gen_mul_i32(a, a, b); - tcg_gen_add_i32(d, d, a); -} - -static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) -{ - tcg_gen_mul_i64(a, a, b); - tcg_gen_add_i64(d, d, a); -} - -static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) -{ - tcg_gen_mul_vec(vece, a, a, b); - tcg_gen_add_vec(vece, d, d, a); -} - -static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) -{ - gen_helper_neon_mul_u8(a, a, b); - gen_helper_neon_sub_u8(d, d, a); -} - -static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) -{ - gen_helper_neon_mul_u16(a, a, b); - gen_helper_neon_sub_u16(d, d, a); -} - -static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) -{ - tcg_gen_mul_i32(a, a, b); - tcg_gen_sub_i32(d, d, a); -} - -static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) -{ - tcg_gen_mul_i64(a, a, b); - tcg_gen_sub_i64(d, d, a); -} - -static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) -{ - tcg_gen_mul_vec(vece, a, a, b); - tcg_gen_sub_vec(vece, d, d, a); -} - /* Integer op subgroup of C3.6.16. */ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) { - static const GVecGen3 cmtst_op[4] = { - { .fni4 = gen_helper_neon_tst_u8, - .fniv = gen_cmtst_vec, - .vece = MO_8 }, - { .fni4 = gen_helper_neon_tst_u16, - .fniv = gen_cmtst_vec, - .vece = MO_16 }, - { .fni4 = gen_cmtst_i32, - .fniv = gen_cmtst_vec, - .vece = MO_32 }, - { .fni8 = gen_cmtst_i64, - .fniv = gen_cmtst_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .vece = MO_64 }, - }; - static const GVecGen3 mla_op[4] = { - { .fni4 = gen_mla8_i32, - .fniv = gen_mla_vec, - .opc = INDEX_op_mul_vec, - .load_dest = true, - .vece = MO_8 }, - { .fni4 = gen_mla16_i32, - .fniv = gen_mla_vec, - .opc = INDEX_op_mul_vec, - .load_dest = true, - .vece = MO_16 }, - { .fni4 = gen_mla32_i32, - .fniv = gen_mla_vec, - .opc = INDEX_op_mul_vec, - .load_dest = true, - .vece = MO_32 }, - { .fni8 = gen_mla64_i64, - .fniv = gen_mla_vec, - .opc = INDEX_op_mul_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true, - .vece = MO_64 }, - }; - static const GVecGen3 mls_op[4] = { - { .fni4 = gen_mls8_i32, - .fniv = gen_mls_vec, - .opc = INDEX_op_mul_vec, - .load_dest = true, - .vece = MO_8 }, - { .fni4 = gen_mls16_i32, - .fniv = gen_mls_vec, - .opc = INDEX_op_mul_vec, - .load_dest = true, - .vece = MO_16 }, - { .fni4 = gen_mls32_i32, - .fniv = gen_mls_vec, - .opc = INDEX_op_mul_vec, - .load_dest = true, - .vece = MO_32 }, - { .fni8 = gen_mls64_i64, - .fniv = gen_mls_vec, - .opc = INDEX_op_mul_vec, - .prefer_i64 = TCG_TARGET_REG_BITS == 64, - .load_dest = true, - .vece = MO_64 }, - }; - int is_q = extract32(insn, 30, 1); int u = extract32(insn, 29, 1); int size = extract32(insn, 22, 2); @@ -11220,7 +10756,7 @@ static void disas_simd_three_reg_same_fp16(DisasContext *s, uint32_t insn) TCGv_ptr fpst; bool pairwise = false; - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } @@ -11408,7 +10944,8 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) int size = extract32(insn, 22, 2); bool u = extract32(insn, 29, 1); bool is_q = extract32(insn, 30, 1); - int feature, rot; + bool feature; + int rot; switch (u * 16 + opcode) { case 0x10: /* SQRDMLAH (vector) */ @@ -11417,7 +10954,7 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) unallocated_encoding(s); return; } - feature = ARM_FEATURE_V8_RDM; + feature = dc_isar_feature(aa64_rdm, s); break; case 0x02: /* SDOT (vector) */ case 0x12: /* UDOT (vector) */ @@ -11425,7 +10962,7 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) unallocated_encoding(s); return; } - feature = ARM_FEATURE_V8_DOTPROD; + feature = dc_isar_feature(aa64_dp, s); break; case 0x18: /* FCMLA, #0 */ case 0x19: /* FCMLA, #90 */ @@ -11434,18 +10971,18 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) case 0x1c: /* FCADD, #90 */ case 0x1e: /* FCADD, #270 */ if (size == 0 - || (size == 1 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16)) + || (size == 1 && !dc_isar_feature(aa64_fp16, s)) || (size == 3 && !is_q)) { unallocated_encoding(s); return; } - feature = ARM_FEATURE_V8_FCMA; + feature = dc_isar_feature(aa64_fcma, s); break; default: unallocated_encoding(s); return; } - if (!arm_dc_feature(s, feature)) { + if (!feature) { unallocated_encoding(s); return; } @@ -12314,7 +11851,7 @@ static void disas_simd_two_reg_misc_fp16(DisasContext *s, uint32_t insn) bool need_fpst = true; int rmode; - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (!dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } @@ -12659,14 +12196,14 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) break; case 0x1d: /* SQRDMLAH */ case 0x1f: /* SQRDMLSH */ - if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) { + if (!dc_isar_feature(aa64_rdm, s)) { unallocated_encoding(s); return; } break; case 0x0e: /* SDOT */ case 0x1e: /* UDOT */ - if (size != MO_32 || !arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) { + if (size != MO_32 || !dc_isar_feature(aa64_dp, s)) { unallocated_encoding(s); return; } @@ -12675,7 +12212,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) case 0x13: /* FCMLA #90 */ case 0x15: /* FCMLA #180 */ case 0x17: /* FCMLA #270 */ - if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) { + if (!dc_isar_feature(aa64_fcma, s)) { unallocated_encoding(s); return; } @@ -12731,7 +12268,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) } break; } - if (is_fp16 && !arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (is_fp16 && !dc_isar_feature(aa64_fp16, s)) { unallocated_encoding(s); return; } @@ -13202,8 +12739,7 @@ static void disas_crypto_aes(DisasContext *s, uint32_t insn) TCGv_i32 tcg_decrypt; CryptoThreeOpIntFn *genfn; - if (!arm_dc_feature(s, ARM_FEATURE_V8_AES) - || size != 0) { + if (!dc_isar_feature(aa64_aes, s) || size != 0) { unallocated_encoding(s); return; } @@ -13260,7 +12796,7 @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn) int rd = extract32(insn, 0, 5); CryptoThreeOpFn *genfn; TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr; - int feature = ARM_FEATURE_V8_SHA256; + bool feature; if (size != 0) { unallocated_encoding(s); @@ -13273,23 +12809,26 @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn) case 2: /* SHA1M */ case 3: /* SHA1SU0 */ genfn = NULL; - feature = ARM_FEATURE_V8_SHA1; + feature = dc_isar_feature(aa64_sha1, s); break; case 4: /* SHA256H */ genfn = gen_helper_crypto_sha256h; + feature = dc_isar_feature(aa64_sha256, s); break; case 5: /* SHA256H2 */ genfn = gen_helper_crypto_sha256h2; + feature = dc_isar_feature(aa64_sha256, s); break; case 6: /* SHA256SU1 */ genfn = gen_helper_crypto_sha256su1; + feature = dc_isar_feature(aa64_sha256, s); break; default: unallocated_encoding(s); return; } - if (!arm_dc_feature(s, feature)) { + if (!feature) { unallocated_encoding(s); return; } @@ -13330,7 +12869,7 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); CryptoTwoOpFn *genfn; - int feature; + bool feature; TCGv_ptr tcg_rd_ptr, tcg_rn_ptr; if (size != 0) { @@ -13340,15 +12879,15 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) switch (opcode) { case 0: /* SHA1H */ - feature = ARM_FEATURE_V8_SHA1; + feature = dc_isar_feature(aa64_sha1, s); genfn = gen_helper_crypto_sha1h; break; case 1: /* SHA1SU1 */ - feature = ARM_FEATURE_V8_SHA1; + feature = dc_isar_feature(aa64_sha1, s); genfn = gen_helper_crypto_sha1su1; break; case 2: /* SHA256SU0 */ - feature = ARM_FEATURE_V8_SHA256; + feature = dc_isar_feature(aa64_sha256, s); genfn = gen_helper_crypto_sha256su0; break; default: @@ -13356,7 +12895,7 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn) return; } - if (!arm_dc_feature(s, feature)) { + if (!feature) { unallocated_encoding(s); return; } @@ -13387,40 +12926,40 @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn) int rm = extract32(insn, 16, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); - int feature; + bool feature; CryptoThreeOpFn *genfn; if (o == 0) { switch (opcode) { case 0: /* SHA512H */ - feature = ARM_FEATURE_V8_SHA512; + feature = dc_isar_feature(aa64_sha512, s); genfn = gen_helper_crypto_sha512h; break; case 1: /* SHA512H2 */ - feature = ARM_FEATURE_V8_SHA512; + feature = dc_isar_feature(aa64_sha512, s); genfn = gen_helper_crypto_sha512h2; break; case 2: /* SHA512SU1 */ - feature = ARM_FEATURE_V8_SHA512; + feature = dc_isar_feature(aa64_sha512, s); genfn = gen_helper_crypto_sha512su1; break; case 3: /* RAX1 */ - feature = ARM_FEATURE_V8_SHA3; + feature = dc_isar_feature(aa64_sha3, s); genfn = NULL; break; } } else { switch (opcode) { case 0: /* SM3PARTW1 */ - feature = ARM_FEATURE_V8_SM3; + feature = dc_isar_feature(aa64_sm3, s); genfn = gen_helper_crypto_sm3partw1; break; case 1: /* SM3PARTW2 */ - feature = ARM_FEATURE_V8_SM3; + feature = dc_isar_feature(aa64_sm3, s); genfn = gen_helper_crypto_sm3partw2; break; case 2: /* SM4EKEY */ - feature = ARM_FEATURE_V8_SM4; + feature = dc_isar_feature(aa64_sm4, s); genfn = gen_helper_crypto_sm4ekey; break; default: @@ -13429,7 +12968,7 @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn) } } - if (!arm_dc_feature(s, feature)) { + if (!feature) { unallocated_encoding(s); return; } @@ -13488,16 +13027,16 @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn) int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); TCGv_ptr tcg_rd_ptr, tcg_rn_ptr; - int feature; + bool feature; CryptoTwoOpFn *genfn; switch (opcode) { case 0: /* SHA512SU0 */ - feature = ARM_FEATURE_V8_SHA512; + feature = dc_isar_feature(aa64_sha512, s); genfn = gen_helper_crypto_sha512su0; break; case 1: /* SM4E */ - feature = ARM_FEATURE_V8_SM4; + feature = dc_isar_feature(aa64_sm4, s); genfn = gen_helper_crypto_sm4e; break; default: @@ -13505,7 +13044,7 @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn) return; } - if (!arm_dc_feature(s, feature)) { + if (!feature) { unallocated_encoding(s); return; } @@ -13536,22 +13075,22 @@ static void disas_crypto_four_reg(DisasContext *s, uint32_t insn) int ra = extract32(insn, 10, 5); int rn = extract32(insn, 5, 5); int rd = extract32(insn, 0, 5); - int feature; + bool feature; switch (op0) { case 0: /* EOR3 */ case 1: /* BCAX */ - feature = ARM_FEATURE_V8_SHA3; + feature = dc_isar_feature(aa64_sha3, s); break; case 2: /* SM3SS1 */ - feature = ARM_FEATURE_V8_SM3; + feature = dc_isar_feature(aa64_sm3, s); break; default: unallocated_encoding(s); return; } - if (!arm_dc_feature(s, feature)) { + if (!feature) { unallocated_encoding(s); return; } @@ -13638,7 +13177,7 @@ static void disas_crypto_xar(DisasContext *s, uint32_t insn) TCGv_i64 tcg_op1, tcg_op2, tcg_res[2]; int pass; - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA3)) { + if (!dc_isar_feature(aa64_sha3, s)) { unallocated_encoding(s); return; } @@ -13684,7 +13223,7 @@ static void disas_crypto_three_reg_imm2(DisasContext *s, uint32_t insn) TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr; TCGv_i32 tcg_imm2, tcg_opcode; - if (!arm_dc_feature(s, ARM_FEATURE_V8_SM3)) { + if (!dc_isar_feature(aa64_sm3, s)) { unallocated_encoding(s); return; } @@ -13792,7 +13331,7 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s) unallocated_encoding(s); break; case 0x2: - if (!arm_dc_feature(s, ARM_FEATURE_SVE) || !disas_sve(s, insn)) { + if (!dc_isar_feature(aa64_sve, s) || !disas_sve(s, insn)) { unallocated_encoding(s); } break; @@ -13833,6 +13372,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, ARMCPU *arm_cpu = arm_env_get_cpu(env); int bound; + dc->isar = &arm_cpu->isar; dc->pc = dc->base.pc_first; dc->condjmp = 0; @@ -13896,7 +13436,6 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, static void aarch64_tr_tb_start(DisasContextBase *db, CPUState *cpu) { - tcg_clear_temp_count(); } static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) diff --git a/target/arm/translate.c b/target/arm/translate.c index 1b4bacb522..7c4675ffd8 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -42,7 +42,7 @@ #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5) /* currently all emulated v5 cores are also v5TE, so don't bother */ #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5) -#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE) +#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s) #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6) #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K) #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2) @@ -72,7 +72,7 @@ static TCGv_i64 cpu_F0d, cpu_F1d; #include "exec/gen-icount.h" -static const char *regnames[] = +static const char * const regnames[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" }; @@ -1585,6 +1585,25 @@ neon_reg_offset (int reg, int n) return vfp_reg_offset(0, sreg); } +/* Return the offset of a 2**SIZE piece of a NEON register, at index ELE, + * where 0 is the least significant end of the register. + */ +static inline long +neon_element_offset(int reg, int element, TCGMemOp size) +{ + int element_size = 1 << size; + int ofs = element * element_size; +#ifdef HOST_WORDS_BIGENDIAN + /* Calculate the offset assuming fully little-endian, + * then XOR to account for the order of the 8-byte units. + */ + if (element_size < 8) { + ofs ^= 8 - element_size; + } +#endif + return neon_reg_offset(reg, 0) + ofs; +} + static TCGv_i32 neon_load_reg(int reg, int pass) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -1592,12 +1611,94 @@ static TCGv_i32 neon_load_reg(int reg, int pass) return tmp; } +static void neon_load_element(TCGv_i32 var, int reg, int ele, TCGMemOp mop) +{ + long offset = neon_element_offset(reg, ele, mop & MO_SIZE); + + switch (mop) { + case MO_UB: + tcg_gen_ld8u_i32(var, cpu_env, offset); + break; + case MO_UW: + tcg_gen_ld16u_i32(var, cpu_env, offset); + break; + case MO_UL: + tcg_gen_ld_i32(var, cpu_env, offset); + break; + default: + g_assert_not_reached(); + } +} + +static void neon_load_element64(TCGv_i64 var, int reg, int ele, TCGMemOp mop) +{ + long offset = neon_element_offset(reg, ele, mop & MO_SIZE); + + switch (mop) { + case MO_UB: + tcg_gen_ld8u_i64(var, cpu_env, offset); + break; + case MO_UW: + tcg_gen_ld16u_i64(var, cpu_env, offset); + break; + case MO_UL: + tcg_gen_ld32u_i64(var, cpu_env, offset); + break; + case MO_Q: + tcg_gen_ld_i64(var, cpu_env, offset); + break; + default: + g_assert_not_reached(); + } +} + static void neon_store_reg(int reg, int pass, TCGv_i32 var) { tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass)); tcg_temp_free_i32(var); } +static void neon_store_element(int reg, int ele, TCGMemOp size, TCGv_i32 var) +{ + long offset = neon_element_offset(reg, ele, size); + + switch (size) { + case MO_8: + tcg_gen_st8_i32(var, cpu_env, offset); + break; + case MO_16: + tcg_gen_st16_i32(var, cpu_env, offset); + break; + case MO_32: + tcg_gen_st_i32(var, cpu_env, offset); + break; + default: + g_assert_not_reached(); + } +} + +static void neon_store_element64(int reg, int ele, TCGMemOp size, TCGv_i64 var) +{ + long offset = neon_element_offset(reg, ele, size); + + switch (size) { + case MO_8: + tcg_gen_st8_i64(var, cpu_env, offset); + break; + case MO_16: + tcg_gen_st16_i64(var, cpu_env, offset); + break; + case MO_32: + tcg_gen_st32_i64(var, cpu_env, offset); + break; + case MO_64: + tcg_gen_st_i64(var, cpu_env, offset); + break; + default: + g_assert_not_reached(); + } +} + static inline void neon_load_reg64(TCGv_i64 var, int reg) { tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg)); @@ -2974,19 +3075,6 @@ static void gen_vfp_msr(TCGv_i32 tmp) tcg_temp_free_i32(tmp); } -static void gen_neon_dup_u8(TCGv_i32 var, int shift) -{ - TCGv_i32 tmp = tcg_temp_new_i32(); - if (shift) - tcg_gen_shri_i32(var, var, shift); - tcg_gen_ext8u_i32(var, var); - tcg_gen_shli_i32(tmp, var, 8); - tcg_gen_or_i32(var, var, tmp); - tcg_gen_shli_i32(tmp, var, 16); - tcg_gen_or_i32(var, var, tmp); - tcg_temp_free_i32(tmp); -} - static void gen_neon_dup_low16(TCGv_i32 var) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -3005,28 +3093,6 @@ static void gen_neon_dup_high16(TCGv_i32 var) tcg_temp_free_i32(tmp); } -static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size) -{ - /* Load a single Neon element and replicate into a 32 bit TCG reg */ - TCGv_i32 tmp = tcg_temp_new_i32(); - switch (size) { - case 0: - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - gen_neon_dup_u8(tmp, 0); - break; - case 1: - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - gen_neon_dup_low16(tmp); - break; - case 2: - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - break; - default: /* Avoid compiler warnings. */ - abort(); - } - return tmp; -} - static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm, uint32_t dp) { @@ -3432,17 +3498,10 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) tmp = load_reg(s, rd); if (insn & (1 << 23)) { /* VDUP */ - if (size == 0) { - gen_neon_dup_u8(tmp, 0); - } else if (size == 1) { - gen_neon_dup_low16(tmp); - } - for (n = 0; n <= pass * 2; n++) { - tmp2 = tcg_temp_new_i32(); - tcg_gen_mov_i32(tmp2, tmp); - neon_store_reg(rn, n, tmp2); - } - neon_store_reg(rn, n, tmp); + int vec_size = pass ? 16 : 8; + tcg_gen_gvec_dup_i32(size, neon_reg_offset(rn, 0), + vec_size, vec_size, tmp); + tcg_temp_free_i32(tmp); } else { /* VMOV */ switch (size) { @@ -4907,17 +4966,17 @@ static struct { int nregs; int interleave; int spacing; -} neon_ls_element_type[11] = { - {4, 4, 1}, - {4, 4, 2}, +} const neon_ls_element_type[11] = { + {1, 4, 1}, + {1, 4, 2}, {4, 1, 1}, - {4, 2, 1}, - {3, 3, 1}, - {3, 3, 2}, + {2, 2, 2}, + {1, 3, 1}, + {1, 3, 2}, {3, 1, 1}, {1, 1, 1}, - {2, 2, 1}, - {2, 2, 2}, + {1, 2, 1}, + {1, 2, 2}, {2, 1, 1} }; @@ -4933,10 +4992,11 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) int stride; int size; int reg; - int pass; int load; - int shift; int n; + int vec_size; + int mmu_idx; + TCGMemOp endian; TCGv_i32 addr; TCGv_i32 tmp; TCGv_i32 tmp2; @@ -4948,7 +5008,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) */ if (s->fp_excp_el) { gen_exception_insn(s, 4, EXCP_UDEF, - syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); return 0; } @@ -4958,6 +5018,8 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) rn = (insn >> 16) & 0xf; rm = insn & 0xf; load = (insn & (1 << 21)) != 0; + endian = s->be_data; + mmu_idx = get_mem_index(s); if ((insn & (1 << 23)) == 0) { /* Load store all elements. */ op = (insn >> 8) & 0xf; @@ -4982,104 +5044,44 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) nregs = neon_ls_element_type[op].nregs; interleave = neon_ls_element_type[op].interleave; spacing = neon_ls_element_type[op].spacing; - if (size == 3 && (interleave | spacing) != 1) + if (size == 3 && (interleave | spacing) != 1) { return 1; + } + /* For our purposes, bytes are always little-endian. */ + if (size == 0) { + endian = MO_LE; + } + /* Consecutive little-endian elements from a single register + * can be promoted to a larger little-endian operation. + */ + if (interleave == 1 && endian == MO_LE) { + size = 3; + } + tmp64 = tcg_temp_new_i64(); addr = tcg_temp_new_i32(); + tmp2 = tcg_const_i32(1 << size); load_reg_var(s, addr, rn); - stride = (1 << size) * interleave; for (reg = 0; reg < nregs; reg++) { - if (interleave > 2 || (interleave == 2 && nregs == 2)) { - load_reg_var(s, addr, rn); - tcg_gen_addi_i32(addr, addr, (1 << size) * reg); - } else if (interleave == 2 && nregs == 4 && reg == 2) { - load_reg_var(s, addr, rn); - tcg_gen_addi_i32(addr, addr, 1 << size); - } - if (size == 3) { - tmp64 = tcg_temp_new_i64(); - if (load) { - gen_aa32_ld64(s, tmp64, addr, get_mem_index(s)); - neon_store_reg64(tmp64, rd); - } else { - neon_load_reg64(tmp64, rd); - gen_aa32_st64(s, tmp64, addr, get_mem_index(s)); - } - tcg_temp_free_i64(tmp64); - tcg_gen_addi_i32(addr, addr, stride); - } else { - for (pass = 0; pass < 2; pass++) { - if (size == 2) { - if (load) { - tmp = tcg_temp_new_i32(); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - neon_store_reg(rd, pass, tmp); - } else { - tmp = neon_load_reg(rd, pass); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tmp); - } - tcg_gen_addi_i32(addr, addr, stride); - } else if (size == 1) { - if (load) { - tmp = tcg_temp_new_i32(); - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - tcg_gen_addi_i32(addr, addr, stride); - tmp2 = tcg_temp_new_i32(); - gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s)); - tcg_gen_addi_i32(addr, addr, stride); - tcg_gen_shli_i32(tmp2, tmp2, 16); - tcg_gen_or_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); - neon_store_reg(rd, pass, tmp); - } else { - tmp = neon_load_reg(rd, pass); - tmp2 = tcg_temp_new_i32(); - tcg_gen_shri_i32(tmp2, tmp, 16); - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tmp); - tcg_gen_addi_i32(addr, addr, stride); - gen_aa32_st16(s, tmp2, addr, get_mem_index(s)); - tcg_temp_free_i32(tmp2); - tcg_gen_addi_i32(addr, addr, stride); - } - } else /* size == 0 */ { - if (load) { - tmp2 = NULL; - for (n = 0; n < 4; n++) { - tmp = tcg_temp_new_i32(); - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - tcg_gen_addi_i32(addr, addr, stride); - if (n == 0) { - tmp2 = tmp; - } else { - tcg_gen_shli_i32(tmp, tmp, n * 8); - tcg_gen_or_i32(tmp2, tmp2, tmp); - tcg_temp_free_i32(tmp); - } - } - neon_store_reg(rd, pass, tmp2); - } else { - tmp2 = neon_load_reg(rd, pass); - for (n = 0; n < 4; n++) { - tmp = tcg_temp_new_i32(); - if (n == 0) { - tcg_gen_mov_i32(tmp, tmp2); - } else { - tcg_gen_shri_i32(tmp, tmp2, n * 8); - } - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - tcg_temp_free_i32(tmp); - tcg_gen_addi_i32(addr, addr, stride); - } - tcg_temp_free_i32(tmp2); - } + for (n = 0; n < 8 >> size; n++) { + int xs; + for (xs = 0; xs < interleave; xs++) { + int tt = rd + reg + spacing * xs; + + if (load) { + gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size); + neon_store_element64(tt, n, size, tmp64); + } else { + neon_load_element64(tmp64, tt, n, size); + gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size); } + tcg_gen_add_i32(addr, addr, tmp2); } } - rd += spacing; } tcg_temp_free_i32(addr); - stride = nregs * 8; + tcg_temp_free_i32(tmp2); + tcg_temp_free_i64(tmp64); + stride = nregs * interleave * 8; } else { size = (insn >> 10) & 3; if (size == 3) { @@ -5106,45 +5108,50 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) } addr = tcg_temp_new_i32(); load_reg_var(s, addr, rn); - if (nregs == 1) { - /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */ - tmp = gen_load_and_replicate(s, addr, size); - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0)); - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1)); - if (insn & (1 << 5)) { - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0)); - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1)); - } - tcg_temp_free_i32(tmp); - } else { - /* VLD2/3/4 to all lanes: bit 5 indicates register stride */ - stride = (insn & (1 << 5)) ? 2 : 1; - for (reg = 0; reg < nregs; reg++) { - tmp = gen_load_and_replicate(s, addr, size); - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0)); - tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1)); - tcg_temp_free_i32(tmp); - tcg_gen_addi_i32(addr, addr, 1 << size); - rd += stride; + + /* VLD1 to all lanes: bit 5 indicates how many Dregs to write. + * VLD2/3/4 to all lanes: bit 5 indicates register stride. + */ + stride = (insn & (1 << 5)) ? 2 : 1; + vec_size = nregs == 1 ? stride * 8 : 8; + + tmp = tcg_temp_new_i32(); + for (reg = 0; reg < nregs; reg++) { + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), + s->be_data | size); + if ((rd & 1) && vec_size == 16) { + /* We cannot write 16 bytes at once because the + * destination is unaligned. + */ + tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0), + 8, 8, tmp); + tcg_gen_gvec_mov(0, neon_reg_offset(rd + 1, 0), + neon_reg_offset(rd, 0), 8, 8); + } else { + tcg_gen_gvec_dup_i32(size, neon_reg_offset(rd, 0), + vec_size, vec_size, tmp); } + tcg_gen_addi_i32(addr, addr, 1 << size); + rd += stride; } + tcg_temp_free_i32(tmp); tcg_temp_free_i32(addr); stride = (1 << size) * nregs; } else { /* Single element. */ int idx = (insn >> 4) & 0xf; - pass = (insn >> 7) & 1; + int reg_idx; switch (size) { case 0: - shift = ((insn >> 5) & 3) * 8; + reg_idx = (insn >> 5) & 7; stride = 1; break; case 1: - shift = ((insn >> 6) & 1) * 16; + reg_idx = (insn >> 6) & 3; stride = (insn & (1 << 5)) ? 2 : 1; break; case 2: - shift = 0; + reg_idx = (insn >> 7) & 1; stride = (insn & (1 << 6)) ? 2 : 1; break; default: @@ -5184,52 +5191,24 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) */ return 1; } + tmp = tcg_temp_new_i32(); addr = tcg_temp_new_i32(); load_reg_var(s, addr, rn); for (reg = 0; reg < nregs; reg++) { if (load) { - tmp = tcg_temp_new_i32(); - switch (size) { - case 0: - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - break; - case 1: - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - break; - case 2: - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - break; - default: /* Avoid compiler warnings. */ - abort(); - } - if (size != 2) { - tmp2 = neon_load_reg(rd, pass); - tcg_gen_deposit_i32(tmp, tmp2, tmp, - shift, size ? 16 : 8); - tcg_temp_free_i32(tmp2); - } - neon_store_reg(rd, pass, tmp); + gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), + s->be_data | size); + neon_store_element(rd, reg_idx, size, tmp); } else { /* Store */ - tmp = neon_load_reg(rd, pass); - if (shift) - tcg_gen_shri_i32(tmp, tmp, shift); - switch (size) { - case 0: - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - break; - case 1: - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - break; - case 2: - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - break; - } - tcg_temp_free_i32(tmp); + neon_load_element(tmp, rd, reg_idx, size); + gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), + s->be_data | size); } rd += stride; tcg_gen_addi_i32(addr, addr, 1 << size); } tcg_temp_free_i32(addr); + tcg_temp_free_i32(tmp); stride = nregs * (1 << size); } } @@ -5250,14 +5229,6 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) return 0; } -/* Bitwise select. dest = c ? t : f. Clobbers T and F. */ -static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c) -{ - tcg_gen_and_i32(t, t, c); - tcg_gen_andc_i32(f, f, c); - tcg_gen_or_i32(dest, t, f); -} - static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src) { switch (size) { @@ -5464,7 +5435,7 @@ static void gen_neon_narrow_op(int op, int u, int size, #define NEON_3R_VABA 15 #define NEON_3R_VADD_VSUB 16 #define NEON_3R_VTST_VCEQ 17 -#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */ +#define NEON_3R_VML 18 /* VMLA, VMLS */ #define NEON_3R_VMUL 19 #define NEON_3R_VPMAX 20 #define NEON_3R_VPMIN 21 @@ -5689,7 +5660,7 @@ static const uint8_t neon_2rm_sizes[] = { static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn, int q, int rd, int rn, int rm) { - if (arm_dc_feature(s, ARM_FEATURE_V8_RDM)) { + if (dc_isar_feature(aa32_rdm, s)) { int opr_sz = (1 + q) * 8; tcg_gen_gvec_3_ptr(vfp_reg_offset(1, rd), vfp_reg_offset(1, rn), @@ -5700,6 +5671,483 @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn, return 1; } +/* + * Expanders for VBitOps_VBIF, VBIT, VBSL. + */ +static void gen_bsl_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) +{ + tcg_gen_xor_i64(rn, rn, rm); + tcg_gen_and_i64(rn, rn, rd); + tcg_gen_xor_i64(rd, rm, rn); +} + +static void gen_bit_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) +{ + tcg_gen_xor_i64(rn, rn, rd); + tcg_gen_and_i64(rn, rn, rm); + tcg_gen_xor_i64(rd, rd, rn); +} + +static void gen_bif_i64(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm) +{ + tcg_gen_xor_i64(rn, rn, rd); + tcg_gen_andc_i64(rn, rn, rm); + tcg_gen_xor_i64(rd, rd, rn); +} + +static void gen_bsl_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) +{ + tcg_gen_xor_vec(vece, rn, rn, rm); + tcg_gen_and_vec(vece, rn, rn, rd); + tcg_gen_xor_vec(vece, rd, rm, rn); +} + +static void gen_bit_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) +{ + tcg_gen_xor_vec(vece, rn, rn, rd); + tcg_gen_and_vec(vece, rn, rn, rm); + tcg_gen_xor_vec(vece, rd, rd, rn); +} + +static void gen_bif_vec(unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm) +{ + tcg_gen_xor_vec(vece, rn, rn, rd); + tcg_gen_andc_vec(vece, rn, rn, rm); + tcg_gen_xor_vec(vece, rd, rd, rn); +} + +const GVecGen3 bsl_op = { + .fni8 = gen_bsl_i64, + .fniv = gen_bsl_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true +}; + +const GVecGen3 bit_op = { + .fni8 = gen_bit_i64, + .fniv = gen_bit_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true +}; + +const GVecGen3 bif_op = { + .fni8 = gen_bif_i64, + .fniv = gen_bif_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true +}; + +static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_vec_sar8i_i64(a, a, shift); + tcg_gen_vec_add8_i64(d, d, a); +} + +static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_vec_sar16i_i64(a, a, shift); + tcg_gen_vec_add16_i64(d, d, a); +} + +static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) +{ + tcg_gen_sari_i32(a, a, shift); + tcg_gen_add_i32(d, d, a); +} + +static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_sari_i64(a, a, shift); + tcg_gen_add_i64(d, d, a); +} + +static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) +{ + tcg_gen_sari_vec(vece, a, a, sh); + tcg_gen_add_vec(vece, d, d, a); +} + +const GVecGen2i ssra_op[4] = { + { .fni8 = gen_ssra8_i64, + .fniv = gen_ssra_vec, + .load_dest = true, + .opc = INDEX_op_sari_vec, + .vece = MO_8 }, + { .fni8 = gen_ssra16_i64, + .fniv = gen_ssra_vec, + .load_dest = true, + .opc = INDEX_op_sari_vec, + .vece = MO_16 }, + { .fni4 = gen_ssra32_i32, + .fniv = gen_ssra_vec, + .load_dest = true, + .opc = INDEX_op_sari_vec, + .vece = MO_32 }, + { .fni8 = gen_ssra64_i64, + .fniv = gen_ssra_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true, + .opc = INDEX_op_sari_vec, + .vece = MO_64 }, +}; + +static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_vec_shr8i_i64(a, a, shift); + tcg_gen_vec_add8_i64(d, d, a); +} + +static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_vec_shr16i_i64(a, a, shift); + tcg_gen_vec_add16_i64(d, d, a); +} + +static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) +{ + tcg_gen_shri_i32(a, a, shift); + tcg_gen_add_i32(d, d, a); +} + +static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_shri_i64(a, a, shift); + tcg_gen_add_i64(d, d, a); +} + +static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) +{ + tcg_gen_shri_vec(vece, a, a, sh); + tcg_gen_add_vec(vece, d, d, a); +} + +const GVecGen2i usra_op[4] = { + { .fni8 = gen_usra8_i64, + .fniv = gen_usra_vec, + .load_dest = true, + .opc = INDEX_op_shri_vec, + .vece = MO_8, }, + { .fni8 = gen_usra16_i64, + .fniv = gen_usra_vec, + .load_dest = true, + .opc = INDEX_op_shri_vec, + .vece = MO_16, }, + { .fni4 = gen_usra32_i32, + .fniv = gen_usra_vec, + .load_dest = true, + .opc = INDEX_op_shri_vec, + .vece = MO_32, }, + { .fni8 = gen_usra64_i64, + .fniv = gen_usra_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true, + .opc = INDEX_op_shri_vec, + .vece = MO_64, }, +}; + +static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + uint64_t mask = dup_const(MO_8, 0xff >> shift); + TCGv_i64 t = tcg_temp_new_i64(); + + tcg_gen_shri_i64(t, a, shift); + tcg_gen_andi_i64(t, t, mask); + tcg_gen_andi_i64(d, d, ~mask); + tcg_gen_or_i64(d, d, t); + tcg_temp_free_i64(t); +} + +static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + uint64_t mask = dup_const(MO_16, 0xffff >> shift); + TCGv_i64 t = tcg_temp_new_i64(); + + tcg_gen_shri_i64(t, a, shift); + tcg_gen_andi_i64(t, t, mask); + tcg_gen_andi_i64(d, d, ~mask); + tcg_gen_or_i64(d, d, t); + tcg_temp_free_i64(t); +} + +static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) +{ + tcg_gen_shri_i32(a, a, shift); + tcg_gen_deposit_i32(d, d, a, 0, 32 - shift); +} + +static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_shri_i64(a, a, shift); + tcg_gen_deposit_i64(d, d, a, 0, 64 - shift); +} + +static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) +{ + if (sh == 0) { + tcg_gen_mov_vec(d, a); + } else { + TCGv_vec t = tcg_temp_new_vec_matching(d); + TCGv_vec m = tcg_temp_new_vec_matching(d); + + tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh)); + tcg_gen_shri_vec(vece, t, a, sh); + tcg_gen_and_vec(vece, d, d, m); + tcg_gen_or_vec(vece, d, d, t); + + tcg_temp_free_vec(t); + tcg_temp_free_vec(m); + } +} + +const GVecGen2i sri_op[4] = { + { .fni8 = gen_shr8_ins_i64, + .fniv = gen_shr_ins_vec, + .load_dest = true, + .opc = INDEX_op_shri_vec, + .vece = MO_8 }, + { .fni8 = gen_shr16_ins_i64, + .fniv = gen_shr_ins_vec, + .load_dest = true, + .opc = INDEX_op_shri_vec, + .vece = MO_16 }, + { .fni4 = gen_shr32_ins_i32, + .fniv = gen_shr_ins_vec, + .load_dest = true, + .opc = INDEX_op_shri_vec, + .vece = MO_32 }, + { .fni8 = gen_shr64_ins_i64, + .fniv = gen_shr_ins_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true, + .opc = INDEX_op_shri_vec, + .vece = MO_64 }, +}; + +static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + uint64_t mask = dup_const(MO_8, 0xff << shift); + TCGv_i64 t = tcg_temp_new_i64(); + + tcg_gen_shli_i64(t, a, shift); + tcg_gen_andi_i64(t, t, mask); + tcg_gen_andi_i64(d, d, ~mask); + tcg_gen_or_i64(d, d, t); + tcg_temp_free_i64(t); +} + +static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + uint64_t mask = dup_const(MO_16, 0xffff << shift); + TCGv_i64 t = tcg_temp_new_i64(); + + tcg_gen_shli_i64(t, a, shift); + tcg_gen_andi_i64(t, t, mask); + tcg_gen_andi_i64(d, d, ~mask); + tcg_gen_or_i64(d, d, t); + tcg_temp_free_i64(t); +} + +static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift) +{ + tcg_gen_deposit_i32(d, d, a, shift, 32 - shift); +} + +static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift) +{ + tcg_gen_deposit_i64(d, d, a, shift, 64 - shift); +} + +static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh) +{ + if (sh == 0) { + tcg_gen_mov_vec(d, a); + } else { + TCGv_vec t = tcg_temp_new_vec_matching(d); + TCGv_vec m = tcg_temp_new_vec_matching(d); + + tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh)); + tcg_gen_shli_vec(vece, t, a, sh); + tcg_gen_and_vec(vece, d, d, m); + tcg_gen_or_vec(vece, d, d, t); + + tcg_temp_free_vec(t); + tcg_temp_free_vec(m); + } +} + +const GVecGen2i sli_op[4] = { + { .fni8 = gen_shl8_ins_i64, + .fniv = gen_shl_ins_vec, + .load_dest = true, + .opc = INDEX_op_shli_vec, + .vece = MO_8 }, + { .fni8 = gen_shl16_ins_i64, + .fniv = gen_shl_ins_vec, + .load_dest = true, + .opc = INDEX_op_shli_vec, + .vece = MO_16 }, + { .fni4 = gen_shl32_ins_i32, + .fniv = gen_shl_ins_vec, + .load_dest = true, + .opc = INDEX_op_shli_vec, + .vece = MO_32 }, + { .fni8 = gen_shl64_ins_i64, + .fniv = gen_shl_ins_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true, + .opc = INDEX_op_shli_vec, + .vece = MO_64 }, +}; + +static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + gen_helper_neon_mul_u8(a, a, b); + gen_helper_neon_add_u8(d, d, a); +} + +static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + gen_helper_neon_mul_u8(a, a, b); + gen_helper_neon_sub_u8(d, d, a); +} + +static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + gen_helper_neon_mul_u16(a, a, b); + gen_helper_neon_add_u16(d, d, a); +} + +static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + gen_helper_neon_mul_u16(a, a, b); + gen_helper_neon_sub_u16(d, d, a); +} + +static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_mul_i32(a, a, b); + tcg_gen_add_i32(d, d, a); +} + +static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_mul_i32(a, a, b); + tcg_gen_sub_i32(d, d, a); +} + +static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_mul_i64(a, a, b); + tcg_gen_add_i64(d, d, a); +} + +static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_mul_i64(a, a, b); + tcg_gen_sub_i64(d, d, a); +} + +static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_mul_vec(vece, a, a, b); + tcg_gen_add_vec(vece, d, d, a); +} + +static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_mul_vec(vece, a, a, b); + tcg_gen_sub_vec(vece, d, d, a); +} + +/* Note that while NEON does not support VMLA and VMLS as 64-bit ops, + * these tables are shared with AArch64 which does support them. + */ +const GVecGen3 mla_op[4] = { + { .fni4 = gen_mla8_i32, + .fniv = gen_mla_vec, + .opc = INDEX_op_mul_vec, + .load_dest = true, + .vece = MO_8 }, + { .fni4 = gen_mla16_i32, + .fniv = gen_mla_vec, + .opc = INDEX_op_mul_vec, + .load_dest = true, + .vece = MO_16 }, + { .fni4 = gen_mla32_i32, + .fniv = gen_mla_vec, + .opc = INDEX_op_mul_vec, + .load_dest = true, + .vece = MO_32 }, + { .fni8 = gen_mla64_i64, + .fniv = gen_mla_vec, + .opc = INDEX_op_mul_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true, + .vece = MO_64 }, +}; + +const GVecGen3 mls_op[4] = { + { .fni4 = gen_mls8_i32, + .fniv = gen_mls_vec, + .opc = INDEX_op_mul_vec, + .load_dest = true, + .vece = MO_8 }, + { .fni4 = gen_mls16_i32, + .fniv = gen_mls_vec, + .opc = INDEX_op_mul_vec, + .load_dest = true, + .vece = MO_16 }, + { .fni4 = gen_mls32_i32, + .fniv = gen_mls_vec, + .opc = INDEX_op_mul_vec, + .load_dest = true, + .vece = MO_32 }, + { .fni8 = gen_mls64_i64, + .fniv = gen_mls_vec, + .opc = INDEX_op_mul_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .load_dest = true, + .vece = MO_64 }, +}; + +/* CMTST : test is "if (X & Y != 0)". */ +static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b) +{ + tcg_gen_and_i32(d, a, b); + tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0); + tcg_gen_neg_i32(d, d); +} + +void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b) +{ + tcg_gen_and_i64(d, a, b); + tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0); + tcg_gen_neg_i64(d, d); +} + +static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b) +{ + tcg_gen_and_vec(vece, d, a, b); + tcg_gen_dupi_vec(vece, a, 0); + tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a); +} + +const GVecGen3 cmtst_op[4] = { + { .fni4 = gen_helper_neon_tst_u8, + .fniv = gen_cmtst_vec, + .vece = MO_8 }, + { .fni4 = gen_helper_neon_tst_u16, + .fniv = gen_cmtst_vec, + .vece = MO_16 }, + { .fni4 = gen_cmtst_i32, + .fniv = gen_cmtst_vec, + .vece = MO_32 }, + { .fni8 = gen_cmtst_i64, + .fniv = gen_cmtst_vec, + .prefer_i64 = TCG_TARGET_REG_BITS == 64, + .vece = MO_64 }, +}; + /* Translate a NEON data processing instruction. Return nonzero if the instruction is invalid. We process data in a mixture of 32-bit and 64-bit chunks. @@ -5709,14 +6157,15 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) { int op; int q; - int rd, rn, rm; + int rd, rn, rm, rd_ofs, rn_ofs, rm_ofs; int size; int shift; int pass; int count; int pairwise; int u; - uint32_t imm, mask; + int vec_size; + uint32_t imm; TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5; TCGv_ptr ptr1, ptr2, ptr3; TCGv_i64 tmp64; @@ -5727,7 +6176,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) */ if (s->fp_excp_el) { gen_exception_insn(s, 4, EXCP_UDEF, - syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); return 0; } @@ -5739,6 +6188,11 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) VFP_DREG_N(rn, insn); VFP_DREG_M(rm, insn); size = (insn >> 20) & 3; + vec_size = q ? 16 : 8; + rd_ofs = neon_reg_offset(rd, 0); + rn_ofs = neon_reg_offset(rn, 0); + rm_ofs = neon_reg_offset(rm, 0); + if ((insn & (1 << 23)) == 0) { /* Three register same length. */ op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1); @@ -5763,7 +6217,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) return 1; } if (!u) { /* SHA-1 */ - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) { + if (!dc_isar_feature(aa32_sha1, s)) { return 1; } ptr1 = vfp_reg_ptr(true, rd); @@ -5773,7 +6227,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4); tcg_temp_free_i32(tmp4); } else { /* SHA-256 */ - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) { + if (!dc_isar_feature(aa32_sha2, s) || size == 3) { return 1; } ptr1 = vfp_reg_ptr(true, rd); @@ -5829,8 +6283,100 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) q, rd, rn, rm); } return 1; + + case NEON_3R_LOGIC: /* Logic ops. */ + switch ((u << 2) | size) { + case 0: /* VAND */ + tcg_gen_gvec_and(0, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + break; + case 1: /* VBIC */ + tcg_gen_gvec_andc(0, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + break; + case 2: + if (rn == rm) { + /* VMOV */ + tcg_gen_gvec_mov(0, rd_ofs, rn_ofs, vec_size, vec_size); + } else { + /* VORR */ + tcg_gen_gvec_or(0, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } + break; + case 3: /* VORN */ + tcg_gen_gvec_orc(0, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + break; + case 4: /* VEOR */ + tcg_gen_gvec_xor(0, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + break; + case 5: /* VBSL */ + tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size, &bsl_op); + break; + case 6: /* VBIT */ + tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size, &bit_op); + break; + case 7: /* VBIF */ + tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size, &bif_op); + break; + } + return 0; + + case NEON_3R_VADD_VSUB: + if (u) { + tcg_gen_gvec_sub(size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } else { + tcg_gen_gvec_add(size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } + return 0; + + case NEON_3R_VMUL: /* VMUL */ + if (u) { + /* Polynomial case allows only P8 and is handled below. */ + if (size != 0) { + return 1; + } + } else { + tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + return 0; + } + break; + + case NEON_3R_VML: /* VMLA, VMLS */ + tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size, + u ? &mls_op[size] : &mla_op[size]); + return 0; + + case NEON_3R_VTST_VCEQ: + if (u) { /* VCEQ */ + tcg_gen_gvec_cmp(TCG_COND_EQ, size, rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size); + } else { /* VTST */ + tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, + vec_size, vec_size, &cmtst_op[size]); + } + return 0; + + case NEON_3R_VCGT: + tcg_gen_gvec_cmp(u ? TCG_COND_GTU : TCG_COND_GT, size, + rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); + return 0; + + case NEON_3R_VCGE: + tcg_gen_gvec_cmp(u ? TCG_COND_GEU : TCG_COND_GE, size, + rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); + return 0; } - if (size == 3 && op != NEON_3R_LOGIC) { + + if (size == 3) { /* 64-bit element instructions. */ for (pass = 0; pass < (q ? 2 : 1); pass++) { neon_load_reg64(cpu_V0, rn + pass); @@ -5886,13 +6432,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) cpu_V1, cpu_V0); } break; - case NEON_3R_VADD_VSUB: - if (u) { - tcg_gen_sub_i64(CPU_V001); - } else { - tcg_gen_add_i64(CPU_V001); - } - break; default: abort(); } @@ -5942,12 +6481,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) return 1; } break; - case NEON_3R_VMUL: - if (u && (size != 0)) { - /* UNDEF on invalid size for polynomial subcase */ - return 1; - } - break; case NEON_3R_VFM_VQRDMLSH: if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) { return 1; @@ -5988,52 +6521,12 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) case NEON_3R_VRHADD: GEN_NEON_INTEGER_OP(rhadd); break; - case NEON_3R_LOGIC: /* Logic ops. */ - switch ((u << 2) | size) { - case 0: /* VAND */ - tcg_gen_and_i32(tmp, tmp, tmp2); - break; - case 1: /* BIC */ - tcg_gen_andc_i32(tmp, tmp, tmp2); - break; - case 2: /* VORR */ - tcg_gen_or_i32(tmp, tmp, tmp2); - break; - case 3: /* VORN */ - tcg_gen_orc_i32(tmp, tmp, tmp2); - break; - case 4: /* VEOR */ - tcg_gen_xor_i32(tmp, tmp, tmp2); - break; - case 5: /* VBSL */ - tmp3 = neon_load_reg(rd, pass); - gen_neon_bsl(tmp, tmp, tmp2, tmp3); - tcg_temp_free_i32(tmp3); - break; - case 6: /* VBIT */ - tmp3 = neon_load_reg(rd, pass); - gen_neon_bsl(tmp, tmp, tmp3, tmp2); - tcg_temp_free_i32(tmp3); - break; - case 7: /* VBIF */ - tmp3 = neon_load_reg(rd, pass); - gen_neon_bsl(tmp, tmp3, tmp, tmp2); - tcg_temp_free_i32(tmp3); - break; - } - break; case NEON_3R_VHSUB: GEN_NEON_INTEGER_OP(hsub); break; case NEON_3R_VQSUB: GEN_NEON_INTEGER_OP_ENV(qsub); break; - case NEON_3R_VCGT: - GEN_NEON_INTEGER_OP(cgt); - break; - case NEON_3R_VCGE: - GEN_NEON_INTEGER_OP(cge); - break; case NEON_3R_VSHL: GEN_NEON_INTEGER_OP(shl); break; @@ -6061,61 +6554,9 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) tmp2 = neon_load_reg(rd, pass); gen_neon_add(size, tmp, tmp2); break; - case NEON_3R_VADD_VSUB: - if (!u) { /* VADD */ - gen_neon_add(size, tmp, tmp2); - } else { /* VSUB */ - switch (size) { - case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break; - default: abort(); - } - } - break; - case NEON_3R_VTST_VCEQ: - if (!u) { /* VTST */ - switch (size) { - case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break; - default: abort(); - } - } else { /* VCEQ */ - switch (size) { - case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break; - default: abort(); - } - } - break; - case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */ - switch (size) { - case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; - default: abort(); - } - tcg_temp_free_i32(tmp2); - tmp2 = neon_load_reg(rd, pass); - if (u) { /* VMLS */ - gen_neon_rsb(size, tmp, tmp2); - } else { /* VMLA */ - gen_neon_add(size, tmp, tmp2); - } - break; case NEON_3R_VMUL: - if (u) { /* polynomial */ - gen_helper_neon_mul_p8(tmp, tmp, tmp2); - } else { /* Integer */ - switch (size) { - case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break; - case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break; - default: abort(); - } - } + /* VMUL.P8; other cases already eliminated. */ + gen_helper_neon_mul_p8(tmp, tmp, tmp2); break; case NEON_3R_VPMAX: GEN_NEON_INTEGER_OP(pmax); @@ -6297,8 +6738,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) size--; } shift = (insn >> 16) & ((1 << (3 + size)) - 1); - /* To avoid excessive duplication of ops we implement shift - by immediate using the variable shift operations. */ if (op < 8) { /* Shift by immediate: VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */ @@ -6310,43 +6749,99 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) } /* Right shifts are encoded as N - shift, where N is the element size in bits. */ - if (op <= 4) + if (op <= 4) { shift = shift - (1 << (size + 3)); + } + + switch (op) { + case 0: /* VSHR */ + /* Right shift comes here negative. */ + shift = -shift; + /* Shifts larger than the element size are architecturally + * valid. Unsigned results in all zeros; signed results + * in all sign bits. + */ + if (!u) { + tcg_gen_gvec_sari(size, rd_ofs, rm_ofs, + MIN(shift, (8 << size) - 1), + vec_size, vec_size); + } else if (shift >= 8 << size) { + tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0); + } else { + tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift, + vec_size, vec_size); + } + return 0; + + case 1: /* VSRA */ + /* Right shift comes here negative. */ + shift = -shift; + /* Shifts larger than the element size are architecturally + * valid. Unsigned results in all zeros; signed results + * in all sign bits. + */ + if (!u) { + tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size, + MIN(shift, (8 << size) - 1), + &ssra_op[size]); + } else if (shift >= 8 << size) { + /* rd += 0 */ + } else { + tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size, + shift, &usra_op[size]); + } + return 0; + + case 4: /* VSRI */ + if (!u) { + return 1; + } + /* Right shift comes here negative. */ + shift = -shift; + /* Shift out of range leaves destination unchanged. */ + if (shift < 8 << size) { + tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, vec_size, + shift, &sri_op[size]); + } + return 0; + + case 5: /* VSHL, VSLI */ + if (u) { /* VSLI */ + /* Shift out of range leaves destination unchanged. */ + if (shift < 8 << size) { + tcg_gen_gvec_2i(rd_ofs, rm_ofs, vec_size, + vec_size, shift, &sli_op[size]); + } + } else { /* VSHL */ + /* Shifts larger than the element size are + * architecturally valid and results in zero. + */ + if (shift >= 8 << size) { + tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0); + } else { + tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift, + vec_size, vec_size); + } + } + return 0; + } + if (size == 3) { count = q + 1; } else { count = q ? 4: 2; } - switch (size) { - case 0: - imm = (uint8_t) shift; - imm |= imm << 8; - imm |= imm << 16; - break; - case 1: - imm = (uint16_t) shift; - imm |= imm << 16; - break; - case 2: - case 3: - imm = shift; - break; - default: - abort(); - } + + /* To avoid excessive duplication of ops we implement shift + * by immediate using the variable shift operations. + */ + imm = dup_const(size, shift); for (pass = 0; pass < count; pass++) { if (size == 3) { neon_load_reg64(cpu_V0, rm + pass); tcg_gen_movi_i64(cpu_V1, imm); switch (op) { - case 0: /* VSHR */ - case 1: /* VSRA */ - if (u) - gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); - else - gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1); - break; case 2: /* VRSHR */ case 3: /* VRSRA */ if (u) @@ -6354,10 +6849,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) else gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1); break; - case 4: /* VSRI */ - case 5: /* VSHL, VSLI */ - gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1); - break; case 6: /* VQSHLU */ gen_helper_neon_qshlu_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1); @@ -6371,26 +6862,13 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) cpu_V0, cpu_V1); } break; + default: + g_assert_not_reached(); } - if (op == 1 || op == 3) { + if (op == 3) { /* Accumulate. */ neon_load_reg64(cpu_V1, rd + pass); tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1); - } else if (op == 4 || (op == 5 && u)) { - /* Insert */ - neon_load_reg64(cpu_V1, rd + pass); - uint64_t mask; - if (shift < -63 || shift > 63) { - mask = 0; - } else { - if (op == 4) { - mask = 0xffffffffffffffffull >> -shift; - } else { - mask = 0xffffffffffffffffull << shift; - } - } - tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask); - tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1); } neon_store_reg64(cpu_V0, rd + pass); } else { /* size < 3 */ @@ -6399,23 +6877,10 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, imm); switch (op) { - case 0: /* VSHR */ - case 1: /* VSRA */ - GEN_NEON_INTEGER_OP(shl); - break; case 2: /* VRSHR */ case 3: /* VRSRA */ GEN_NEON_INTEGER_OP(rshl); break; - case 4: /* VSRI */ - case 5: /* VSHL, VSLI */ - switch (size) { - case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break; - case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break; - case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break; - default: abort(); - } - break; case 6: /* VQSHLU */ switch (size) { case 0: @@ -6437,50 +6902,16 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) case 7: /* VQSHL */ GEN_NEON_INTEGER_OP_ENV(qshl); break; + default: + g_assert_not_reached(); } tcg_temp_free_i32(tmp2); - if (op == 1 || op == 3) { + if (op == 3) { /* Accumulate. */ tmp2 = neon_load_reg(rd, pass); gen_neon_add(size, tmp, tmp2); tcg_temp_free_i32(tmp2); - } else if (op == 4 || (op == 5 && u)) { - /* Insert */ - switch (size) { - case 0: - if (op == 4) - mask = 0xff >> -shift; - else - mask = (uint8_t)(0xff << shift); - mask |= mask << 8; - mask |= mask << 16; - break; - case 1: - if (op == 4) - mask = 0xffff >> -shift; - else - mask = (uint16_t)(0xffff << shift); - mask |= mask << 16; - break; - case 2: - if (shift < -31 || shift > 31) { - mask = 0; - } else { - if (op == 4) - mask = 0xffffffffu >> -shift; - else - mask = 0xffffffffu << shift; - } - break; - default: - abort(); - } - tmp2 = neon_load_reg(rd, pass); - tcg_gen_andi_i32(tmp, tmp, mask); - tcg_gen_andi_i32(tmp2, tmp2, ~mask); - tcg_gen_or_i32(tmp, tmp, tmp2); - tcg_temp_free_i32(tmp2); } neon_store_reg(rd, pass, tmp); } @@ -6629,7 +7060,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) return 1; } } else { /* (insn & 0x00380080) == 0 */ - int invert; + int invert, reg_ofs, vec_size; + if (q && (rd & 1)) { return 1; } @@ -6669,8 +7101,9 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) break; case 14: imm |= (imm << 8) | (imm << 16) | (imm << 24); - if (invert) + if (invert) { imm = ~imm; + } break; case 15: if (invert) { @@ -6680,36 +7113,45 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) | ((imm & 0x40) ? (0x1f << 25) : (1 << 30)); break; } - if (invert) + if (invert) { imm = ~imm; + } - for (pass = 0; pass < (q ? 4 : 2); pass++) { - if (op & 1 && op < 12) { - tmp = neon_load_reg(rd, pass); - if (invert) { - /* The immediate value has already been inverted, so - BIC becomes AND. */ - tcg_gen_andi_i32(tmp, tmp, imm); - } else { - tcg_gen_ori_i32(tmp, tmp, imm); - } + reg_ofs = neon_reg_offset(rd, 0); + vec_size = q ? 16 : 8; + + if (op & 1 && op < 12) { + if (invert) { + /* The immediate value has already been inverted, + * so BIC becomes AND. + */ + tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm, + vec_size, vec_size); } else { - /* VMOV, VMVN. */ - tmp = tcg_temp_new_i32(); - if (op == 14 && invert) { + tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm, + vec_size, vec_size); + } + } else { + /* VMOV, VMVN. */ + if (op == 14 && invert) { + TCGv_i64 t64 = tcg_temp_new_i64(); + + for (pass = 0; pass <= q; ++pass) { + uint64_t val = 0; int n; - uint32_t val; - val = 0; - for (n = 0; n < 4; n++) { - if (imm & (1 << (n + (pass & 1) * 4))) - val |= 0xff << (n * 8); + + for (n = 0; n < 8; n++) { + if (imm & (1 << (n + pass * 8))) { + val |= 0xffull << (n * 8); + } } - tcg_gen_movi_i32(tmp, val); - } else { - tcg_gen_movi_i32(tmp, imm); + tcg_gen_movi_i64(t64, val); + neon_store_reg64(t64, rd + pass); } + tcg_temp_free_i64(t64); + } else { + tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm); } - neon_store_reg(rd, pass, tmp); } } } else { /* (insn & 0x00800010 == 0x00800000) */ @@ -6768,7 +7210,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) if (op == 14 && size == 2) { TCGv_i64 tcg_rn, tcg_rm, tcg_rd; - if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) { + if (!dc_isar_feature(aa32_pmull, s)) { return 1; } tcg_rn = tcg_temp_new_i64(); @@ -7085,7 +7527,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) { NeonGenThreeOpEnvFn *fn; - if (!arm_dc_feature(s, ARM_FEATURE_V8_RDM)) { + if (!dc_isar_feature(aa32_rdm, s)) { return 1; } if (u && ((rd | rn) & 1)) { @@ -7359,8 +7801,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) break; } case NEON_2RM_AESE: case NEON_2RM_AESMC: - if (!arm_dc_feature(s, ARM_FEATURE_V8_AES) - || ((rm | rd) & 1)) { + if (!dc_isar_feature(aa32_aes, s) || ((rm | rd) & 1)) { return 1; } ptr1 = vfp_reg_ptr(true, rd); @@ -7381,8 +7822,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) tcg_temp_free_i32(tmp3); break; case NEON_2RM_SHA1H: - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1) - || ((rm | rd) & 1)) { + if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) { return 1; } ptr1 = vfp_reg_ptr(true, rd); @@ -7399,10 +7839,10 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) } /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */ if (q) { - if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) { + if (!dc_isar_feature(aa32_sha2, s)) { return 1; } - } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) { + } else if (!dc_isar_feature(aa32_sha1, s)) { return 1; } ptr1 = vfp_reg_ptr(true, rd); @@ -7415,6 +7855,14 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) tcg_temp_free_ptr(ptr1); tcg_temp_free_ptr(ptr2); break; + + case NEON_2RM_VMVN: + tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size); + break; + case NEON_2RM_VNEG: + tcg_gen_gvec_neg(size, rd_ofs, rm_ofs, vec_size, vec_size); + break; + default: elementwise: for (pass = 0; pass < (q ? 4 : 2); pass++) { @@ -7455,9 +7903,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) case NEON_2RM_VCNT: gen_helper_neon_cnt_u8(tmp, tmp); break; - case NEON_2RM_VMVN: - tcg_gen_not_i32(tmp, tmp); - break; case NEON_2RM_VQABS: switch (size) { case 0: @@ -7530,11 +7975,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) default: abort(); } break; - case NEON_2RM_VNEG: - tmp2 = tcg_const_i32(0); - gen_neon_rsb(size, tmp, tmp2); - tcg_temp_free_i32(tmp2); - break; case NEON_2RM_VCGT0_F: { TCGv_ptr fpstatus = get_fpstatus_ptr(1); @@ -7757,28 +8197,25 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) tcg_temp_free_i32(tmp); } else if ((insn & 0x380) == 0) { /* VDUP */ + int element; + TCGMemOp size; + if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) { return 1; } - if (insn & (1 << 19)) { - tmp = neon_load_reg(rm, 1); - } else { - tmp = neon_load_reg(rm, 0); - } if (insn & (1 << 16)) { - gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8); + size = MO_8; + element = (insn >> 17) & 7; } else if (insn & (1 << 17)) { - if ((insn >> 18) & 1) - gen_neon_dup_high16(tmp); - else - gen_neon_dup_low16(tmp); - } - for (pass = 0; pass < (q ? 4 : 2); pass++) { - tmp2 = tcg_temp_new_i32(); - tcg_gen_mov_i32(tmp2, tmp); - neon_store_reg(rd, pass, tmp2); + size = MO_16; + element = (insn >> 18) & 3; + } else { + size = MO_32; + element = (insn >> 19) & 1; } - tcg_temp_free_i32(tmp); + tcg_gen_gvec_dup_mem(size, neon_reg_offset(rd, 0), + neon_element_offset(rm, element, size), + q ? 16 : 8, q ? 16 : 8); } else { return 1; } @@ -7813,8 +8250,8 @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn) /* VCMLA -- 1111 110R R.1S .... .... 1000 ...0 .... */ int size = extract32(insn, 20, 1); data = extract32(insn, 23, 2); /* rot */ - if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA) - || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) { + if (!dc_isar_feature(aa32_vcma, s) + || (!size && !dc_isar_feature(aa32_fp16_arith, s))) { return 1; } fn_gvec_ptr = size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah; @@ -7822,15 +8259,15 @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn) /* VCADD -- 1111 110R 1.0S .... .... 1000 ...0 .... */ int size = extract32(insn, 20, 1); data = extract32(insn, 24, 1); /* rot */ - if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA) - || (!size && !arm_dc_feature(s, ARM_FEATURE_V8_FP16))) { + if (!dc_isar_feature(aa32_vcma, s) + || (!size && !dc_isar_feature(aa32_fp16_arith, s))) { return 1; } fn_gvec_ptr = size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh; } else if ((insn & 0xfeb00f00) == 0xfc200d00) { /* V[US]DOT -- 1111 1100 0.10 .... .... 1101 .Q.U .... */ bool u = extract32(insn, 4, 1); - if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) { + if (!dc_isar_feature(aa32_dp, s)) { return 1; } fn_gvec = u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b; @@ -7840,7 +8277,7 @@ static int disas_neon_insn_3same_ext(DisasContext *s, uint32_t insn) if (s->fp_excp_el) { gen_exception_insn(s, 4, EXCP_UDEF, - syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); return 0; } if (!s->vfp_enabled) { @@ -7892,11 +8329,11 @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn) int size = extract32(insn, 23, 1); int index; - if (!arm_dc_feature(s, ARM_FEATURE_V8_FCMA)) { + if (!dc_isar_feature(aa32_vcma, s)) { return 1; } if (size == 0) { - if (!arm_dc_feature(s, ARM_FEATURE_V8_FP16)) { + if (!dc_isar_feature(aa32_fp16_arith, s)) { return 1; } /* For fp16, rm is just Vm, and index is M. */ @@ -7913,7 +8350,7 @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn) } else if ((insn & 0xffb00f00) == 0xfe200d00) { /* V[US]DOT -- 1111 1110 0.10 .... .... 1101 .Q.U .... */ int u = extract32(insn, 4, 1); - if (!arm_dc_feature(s, ARM_FEATURE_V8_DOTPROD)) { + if (!dc_isar_feature(aa32_dp, s)) { return 1; } fn_gvec = u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b; @@ -7926,7 +8363,7 @@ static int disas_neon_insn_2reg_scalar_ext(DisasContext *s, uint32_t insn) if (s->fp_excp_el) { gen_exception_insn(s, 4, EXCP_UDEF, - syn_fp_access_trap(1, 0xe, false), s->fp_excp_el); + syn_simd_access_trap(1, 0xe, false), s->fp_excp_el); return 0; } if (!s->vfp_enabled) { @@ -8889,8 +9326,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED. * Bits 8, 10 and 11 should be zero. */ - if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 || - (c & 0xd) != 0) { + if (!dc_isar_feature(aa32_crc32, s) || op1 == 0x3 || (c & 0xd) != 0) { goto illegal_op; } @@ -9758,7 +10194,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) case 1: case 3: /* SDIV, UDIV */ - if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) { + if (!dc_isar_feature(arm_div, s)) { goto illegal_op; } if (((insn >> 5) & 7) || (rd != 15)) { @@ -10785,7 +11221,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn) case 0x28: case 0x29: case 0x2a: - if (!arm_dc_feature(s, ARM_FEATURE_CRC)) { + if (!dc_isar_feature(aa32_crc32, s)) { goto illegal_op; } break; @@ -10966,7 +11402,7 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn) tmp2 = load_reg(s, rm); if ((op & 0x50) == 0x10) { /* sdiv, udiv */ - if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) { + if (!dc_isar_feature(thumb_div, s)) { goto illegal_op; } if (op & 0x20) @@ -12586,6 +13022,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) CPUARMState *env = cs->env_ptr; ARMCPU *cpu = arm_env_get_cpu(env); + dc->isar = &cpu->isar; dc->pc = dc->base.pc_first; dc->condjmp = 0; @@ -12703,7 +13140,6 @@ static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) tcg_gen_movi_i32(tmp, 0); store_cpu_field(tmp, condexec_bits); } - tcg_clear_temp_count(); } static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) @@ -13092,11 +13528,6 @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb) translator_loop(ops, &dc.base, cpu, tb); } -static const char *cpu_mode_names[16] = { - "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", - "???", "???", "hyp", "und", "???", "???", "???", "sys" -}; - void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, int flags) { @@ -13162,7 +13593,7 @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, psr & CPSR_V ? 'V' : '-', psr & CPSR_T ? 'T' : 'A', ns_status, - cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26); + aarch32_mode_name(psr), (psr & 0x10) ? 32 : 26); } if (flags & CPU_DUMP_FPU) { diff --git a/target/arm/translate.h b/target/arm/translate.h index c1b65f3efb..1550aa8bc7 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -7,6 +7,7 @@ /* internal defines */ typedef struct DisasContext { DisasContextBase base; + const ARMISARegisters *isar; target_ulong pc; target_ulong page_start; @@ -190,4 +191,24 @@ static inline TCGv_i32 get_ahp_flag(void) return ret; } + +/* Vector operations shared between ARM and AArch64. */ +extern const GVecGen3 bsl_op; +extern const GVecGen3 bit_op; +extern const GVecGen3 bif_op; +extern const GVecGen3 mla_op[4]; +extern const GVecGen3 mls_op[4]; +extern const GVecGen3 cmtst_op[4]; +extern const GVecGen2i ssra_op[4]; +extern const GVecGen2i usra_op[4]; +extern const GVecGen2i sri_op[4]; +extern const GVecGen2i sli_op[4]; +void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); + +/* + * Forward to the isar_feature_* tests given a DisasContext pointer. + */ +#define dc_isar_feature(name, ctx) \ + ({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); }) + #endif /* TARGET_ARM_TRANSLATE_H */ diff --git a/target/cris/translate.c b/target/cris/translate.c index 4ae1c04daf..11b2c11174 100644 --- a/target/cris/translate.c +++ b/target/cris/translate.c @@ -137,11 +137,7 @@ typedef struct DisasContext { static void gen_BUG(DisasContext *dc, const char *file, int line) { - fprintf(stderr, "BUG: pc=%x %s %d\n", dc->pc, file, line); - if (qemu_log_separate()) { - qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line); - } - cpu_abort(CPU(dc->cpu), "%s:%d\n", file, line); + cpu_abort(CPU(dc->cpu), "%s:%d pc=%x\n", file, line, dc->pc); } static const char *regnames_v32[] = diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index ab160c2a74..aecf3075f6 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -137,7 +137,8 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, if (unlikely(!(prot & type))) { /* The access isn't allowed -- Inst/Data Memory Protection Fault. */ - ret = (type & PAGE_EXEC ? EXCP_IMP : EXCP_DMP); + ret = (type & PAGE_EXEC ? EXCP_IMP : + prot & PAGE_READ ? EXCP_DMP : EXCP_DMAR); goto egress; } diff --git a/target/i386/Makefile.objs b/target/i386/Makefile.objs index 04678f5503..32bf966300 100644 --- a/target/i386/Makefile.objs +++ b/target/i386/Makefile.objs @@ -3,17 +3,20 @@ obj-$(CONFIG_TCG) += translate.o obj-$(CONFIG_TCG) += bpt_helper.o cc_helper.o excp_helper.o fpu_helper.o obj-$(CONFIG_TCG) += int_helper.o mem_helper.o misc_helper.o mpx_helper.o obj-$(CONFIG_TCG) += seg_helper.o smm_helper.o svm_helper.o -obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o monitor.o -obj-$(CONFIG_KVM) += kvm.o hyperv.o -obj-$(CONFIG_SEV) += sev.o +ifeq ($(CONFIG_SOFTMMU),y) +obj-y += machine.o arch_memory_mapping.o arch_dump.o monitor.o +obj-$(CONFIG_KVM) += kvm.o obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o -obj-$(call lnot,$(CONFIG_SEV)) += sev-stub.o -# HAX support -ifdef CONFIG_WIN32 +obj-$(CONFIG_HYPERV) += hyperv.o +obj-$(call lnot,$(CONFIG_HYPERV)) += hyperv-stub.o +ifeq ($(CONFIG_WIN32),y) obj-$(CONFIG_HAX) += hax-all.o hax-mem.o hax-windows.o endif -ifdef CONFIG_DARWIN +ifeq ($(CONFIG_DARWIN),y) obj-$(CONFIG_HAX) += hax-all.o hax-mem.o hax-darwin.o obj-$(CONFIG_HVF) += hvf/ endif obj-$(CONFIG_WHPX) += whpx-all.o +endif +obj-$(CONFIG_SEV) += sev.o +obj-$(call lnot,$(CONFIG_SEV)) += sev-stub.o diff --git a/target/i386/cpu.c b/target/i386/cpu.c index c88876dfe3..1469a1be01 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -5123,14 +5123,15 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise * cs->nr_threads hasn't be populated yet and the checking is incorrect. */ - if (IS_AMD_CPU(env) && - !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && - cs->nr_threads > 1 && !ht_warned) { - error_report("This family of AMD CPU doesn't support " - "hyperthreading(%d). Please configure -smp " - "options properly or try enabling topoext feature.", - cs->nr_threads); - ht_warned = true; + if (IS_AMD_CPU(env) && + !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && + cs->nr_threads > 1 && !ht_warned) { + warn_report("This family of AMD CPU doesn't support " + "hyperthreading(%d)", + cs->nr_threads); + error_printf("Please configure -smp options properly" + " or try enabling topoext feature.\n"); + ht_warned = true; } x86_cpu_apic_realize(cpu, &local_err); @@ -5564,6 +5565,7 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false), DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false), DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false), + DEFINE_PROP_BOOL("hv-ipi", X86CPU, hyperv_ipi, false), DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), @@ -5606,6 +5608,8 @@ static Property x86_cpu_properties[] = { * to the specific Windows version being used." */ DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), + DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, + false), DEFINE_PROP_END_OF_LIST() }; diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 730c06f80a..663f3a5e67 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1378,10 +1378,12 @@ struct X86CPU { bool hyperv_vpindex; bool hyperv_runtime; bool hyperv_synic; + bool hyperv_synic_kvm_only; bool hyperv_stimer; bool hyperv_frequencies; bool hyperv_reenlightenment; bool hyperv_tlbflush; + bool hyperv_ipi; bool check_cpuid; bool enforce_cpuid; bool expose_kvm; diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 9f52bc413a..e193022c03 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -73,7 +73,6 @@ #include "target/i386/cpu.h" HVFState *hvf_state; -int hvf_disabled = 1; static void assert_hvf_ok(hv_return_t ret) { @@ -604,11 +603,6 @@ int hvf_init_vcpu(CPUState *cpu) return 0; } -void hvf_disable(int shouldDisable) -{ - hvf_disabled = shouldDisable; -} - static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_info) { X86CPU *x86_cpu = X86_CPU(cpu); @@ -934,7 +928,7 @@ int hvf_vcpu_exec(CPUState *cpu) return ret; } -static bool hvf_allowed; +bool hvf_allowed; static int hvf_accel_init(MachineState *ms) { @@ -942,7 +936,6 @@ static int hvf_accel_init(MachineState *ms) hv_return_t ret; HVFState *s; - hvf_disable(0); ret = hv_vm_create(HV_VM_DEFAULT); assert_hvf_ok(ret); diff --git a/target/i386/hvf/x86_decode.c b/target/i386/hvf/x86_decode.c index 2d7540fe7c..2e33b69541 100644 --- a/target/i386/hvf/x86_decode.c +++ b/target/i386/hvf/x86_decode.c @@ -113,7 +113,8 @@ static void decode_modrm_reg(CPUX86State *env, struct x86_decode *decode, { op->type = X86_VAR_REG; op->reg = decode->modrm.reg; - op->ptr = get_reg_ref(env, op->reg, decode->rex.r, decode->operand_size); + op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.r, + decode->operand_size); } static void decode_rax(CPUX86State *env, struct x86_decode *decode, @@ -121,7 +122,8 @@ static void decode_rax(CPUX86State *env, struct x86_decode *decode, { op->type = X86_VAR_REG; op->reg = R_EAX; - op->ptr = get_reg_ref(env, op->reg, 0, decode->operand_size); + op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, 0, + decode->operand_size); } static inline void decode_immediate(CPUX86State *env, struct x86_decode *decode, @@ -263,16 +265,16 @@ static void decode_incgroup(CPUX86State *env, struct x86_decode *decode) { decode->op[0].type = X86_VAR_REG; decode->op[0].reg = decode->opcode[0] - 0x40; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b, - decode->operand_size); + decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); } static void decode_decgroup(CPUX86State *env, struct x86_decode *decode) { decode->op[0].type = X86_VAR_REG; decode->op[0].reg = decode->opcode[0] - 0x48; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b, - decode->operand_size); + decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); } static void decode_incgroup2(CPUX86State *env, struct x86_decode *decode) @@ -288,16 +290,16 @@ static void decode_pushgroup(CPUX86State *env, struct x86_decode *decode) { decode->op[0].type = X86_VAR_REG; decode->op[0].reg = decode->opcode[0] - 0x50; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b, - decode->operand_size); + decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); } static void decode_popgroup(CPUX86State *env, struct x86_decode *decode) { decode->op[0].type = X86_VAR_REG; decode->op[0].reg = decode->opcode[0] - 0x58; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b, - decode->operand_size); + decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); } static void decode_jxx(CPUX86State *env, struct x86_decode *decode) @@ -378,16 +380,16 @@ static void decode_xchgroup(CPUX86State *env, struct x86_decode *decode) { decode->op[0].type = X86_VAR_REG; decode->op[0].reg = decode->opcode[0] - 0x90; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b, - decode->operand_size); + decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); } static void decode_movgroup(CPUX86State *env, struct x86_decode *decode) { decode->op[0].type = X86_VAR_REG; decode->op[0].reg = decode->opcode[0] - 0xb8; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b, - decode->operand_size); + decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); decode_immediate(env, decode, &decode->op[1], decode->operand_size); } @@ -402,8 +404,8 @@ static void decode_movgroup8(CPUX86State *env, struct x86_decode *decode) { decode->op[0].type = X86_VAR_REG; decode->op[0].reg = decode->opcode[0] - 0xb0; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b, - decode->operand_size); + decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); decode_immediate(env, decode, &decode->op[1], decode->operand_size); } @@ -412,7 +414,8 @@ static void decode_rcx(CPUX86State *env, struct x86_decode *decode, { op->type = X86_VAR_REG; op->reg = R_ECX; - op->ptr = get_reg_ref(env, op->reg, decode->rex.b, decode->operand_size); + op->ptr = get_reg_ref(env, op->reg, decode->rex.rex, decode->rex.b, + decode->operand_size); } struct decode_tbl { @@ -639,8 +642,8 @@ static void decode_bswap(CPUX86State *env, struct x86_decode *decode) { decode->op[0].type = X86_VAR_REG; decode->op[0].reg = decode->opcode[1] - 0xc8; - decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b, - decode->operand_size); + decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.rex, + decode->rex.b, decode->operand_size); } static void decode_d9_4(CPUX86State *env, struct x86_decode *decode) @@ -1686,7 +1689,8 @@ calc_addr: } } -target_ulong get_reg_ref(CPUX86State *env, int reg, int is_extended, int size) +target_ulong get_reg_ref(CPUX86State *env, int reg, int rex, int is_extended, + int size) { target_ulong ptr = 0; int which = 0; @@ -1698,7 +1702,7 @@ target_ulong get_reg_ref(CPUX86State *env, int reg, int is_extended, int size) switch (size) { case 1: - if (is_extended || reg < 4) { + if (is_extended || reg < 4 || rex) { which = 1; ptr = (target_ulong)&RL(env, reg); } else { @@ -1714,10 +1718,11 @@ target_ulong get_reg_ref(CPUX86State *env, int reg, int is_extended, int size) return ptr; } -target_ulong get_reg_val(CPUX86State *env, int reg, int is_extended, int size) +target_ulong get_reg_val(CPUX86State *env, int reg, int rex, int is_extended, + int size) { target_ulong val = 0; - memcpy(&val, (void *)get_reg_ref(env, reg, is_extended, size), size); + memcpy(&val, (void *)get_reg_ref(env, reg, rex, is_extended, size), size); return val; } @@ -1739,7 +1744,8 @@ static target_ulong get_sib_val(CPUX86State *env, struct x86_decode *decode, if (base_reg == R_ESP || base_reg == R_EBP) { *sel = R_SS; } - base = get_reg_val(env, decode->sib.base, decode->rex.b, addr_size); + base = get_reg_val(env, decode->sib.base, decode->rex.rex, + decode->rex.b, addr_size); } if (decode->rex.x) { @@ -1747,7 +1753,8 @@ static target_ulong get_sib_val(CPUX86State *env, struct x86_decode *decode, } if (index_reg != R_ESP) { - scaled_index = get_reg_val(env, index_reg, decode->rex.x, addr_size) << + scaled_index = get_reg_val(env, index_reg, decode->rex.rex, + decode->rex.x, addr_size) << decode->sib.scale; } return base + scaled_index; @@ -1776,7 +1783,8 @@ void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode, if (decode->modrm.rm == R_EBP || decode->modrm.rm == R_ESP) { seg = R_SS; } - ptr += get_reg_val(env, decode->modrm.rm, decode->rex.b, addr_size); + ptr += get_reg_val(env, decode->modrm.rm, decode->rex.rex, + decode->rex.b, addr_size); } if (X86_DECODE_CMD_LEA == decode->cmd) { @@ -1805,7 +1813,8 @@ void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode, } else if (0 == mod && 5 == rm) { ptr = RIP(env) + decode->len + (int32_t) offset; } else { - ptr = get_reg_val(env, src, decode->rex.b, 8) + (int64_t) offset; + ptr = get_reg_val(env, src, decode->rex.rex, decode->rex.b, 8) + + (int64_t) offset; } if (X86_DECODE_CMD_LEA == decode->cmd) { @@ -1822,8 +1831,8 @@ void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode, if (3 == decode->modrm.mod) { op->reg = decode->modrm.reg; op->type = X86_VAR_REG; - op->ptr = get_reg_ref(env, decode->modrm.rm, decode->rex.b, - decode->operand_size); + op->ptr = get_reg_ref(env, decode->modrm.rm, decode->rex.rex, + decode->rex.b, decode->operand_size); return; } diff --git a/target/i386/hvf/x86_decode.h b/target/i386/hvf/x86_decode.h index 5ab6f31fa5..ef4bcab310 100644 --- a/target/i386/hvf/x86_decode.h +++ b/target/i386/hvf/x86_decode.h @@ -303,8 +303,10 @@ uint64_t sign(uint64_t val, int size); uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode); -target_ulong get_reg_ref(CPUX86State *env, int reg, int is_extended, int size); -target_ulong get_reg_val(CPUX86State *env, int reg, int is_extended, int size); +target_ulong get_reg_ref(CPUX86State *env, int reg, int rex, int is_extended, + int size); +target_ulong get_reg_val(CPUX86State *env, int reg, int rex, int is_extended, + int size); void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode, struct x86_decode_op *op); target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode, diff --git a/target/i386/hyperv-proto.h b/target/i386/hyperv-proto.h index d6d5a79293..8c572cd7c2 100644 --- a/target/i386/hyperv-proto.h +++ b/target/i386/hyperv-proto.h @@ -1,7 +1,7 @@ /* - * Definitions for Hyper-V guest/hypervisor interaction + * Definitions for Hyper-V guest/hypervisor interaction - x86-specific part * - * Copyright (C) 2017 Parallels International GmbH + * Copyright (c) 2017-2018 Virtuozzo International GmbH. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. @@ -10,7 +10,7 @@ #ifndef TARGET_I386_HYPERV_PROTO_H #define TARGET_I386_HYPERV_PROTO_H -#include "qemu/bitmap.h" +#include "hw/hyperv/hyperv-proto.h" #define HV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000 #define HV_CPUID_INTERFACE 0x40000001 @@ -58,6 +58,7 @@ #define HV_APIC_ACCESS_RECOMMENDED (1u << 3) #define HV_SYSTEM_RESET_RECOMMENDED (1u << 4) #define HV_RELAXED_TIMING_RECOMMENDED (1u << 5) +#define HV_CLUSTER_IPI_RECOMMENDED (1u << 10) #define HV_EX_PROCESSOR_MASKS_RECOMMENDED (1u << 11) /* @@ -138,25 +139,6 @@ #define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 /* - * Hypercall status code - */ -#define HV_STATUS_SUCCESS 0 -#define HV_STATUS_INVALID_HYPERCALL_CODE 2 -#define HV_STATUS_INVALID_HYPERCALL_INPUT 3 -#define HV_STATUS_INVALID_ALIGNMENT 4 -#define HV_STATUS_INVALID_PARAMETER 5 -#define HV_STATUS_INSUFFICIENT_MEMORY 11 -#define HV_STATUS_INVALID_CONNECTION_ID 18 -#define HV_STATUS_INSUFFICIENT_BUFFERS 19 - -/* - * Hypercall numbers - */ -#define HV_POST_MESSAGE 0x005c -#define HV_SIGNAL_EVENT 0x005d -#define HV_HYPERCALL_FAST (1u << 16) - -/* * Hypercall MSR bits */ #define HV_HYPERCALL_ENABLE (1u << 0) @@ -165,7 +147,6 @@ * Synthetic interrupt controller definitions */ #define HV_SYNIC_VERSION 1 -#define HV_SINT_COUNT 16 #define HV_SYNIC_ENABLE (1u << 0) #define HV_SIMP_ENABLE (1u << 0) #define HV_SIEFP_ENABLE (1u << 0) @@ -175,94 +156,5 @@ #define HV_STIMER_COUNT 4 -/* - * Message size - */ -#define HV_MESSAGE_PAYLOAD_SIZE 240 - -/* - * Message types - */ -#define HV_MESSAGE_NONE 0x00000000 -#define HV_MESSAGE_VMBUS 0x00000001 -#define HV_MESSAGE_UNMAPPED_GPA 0x80000000 -#define HV_MESSAGE_GPA_INTERCEPT 0x80000001 -#define HV_MESSAGE_TIMER_EXPIRED 0x80000010 -#define HV_MESSAGE_INVALID_VP_REGISTER_VALUE 0x80000020 -#define HV_MESSAGE_UNRECOVERABLE_EXCEPTION 0x80000021 -#define HV_MESSAGE_UNSUPPORTED_FEATURE 0x80000022 -#define HV_MESSAGE_EVENTLOG_BUFFERCOMPLETE 0x80000040 -#define HV_MESSAGE_X64_IOPORT_INTERCEPT 0x80010000 -#define HV_MESSAGE_X64_MSR_INTERCEPT 0x80010001 -#define HV_MESSAGE_X64_CPUID_INTERCEPT 0x80010002 -#define HV_MESSAGE_X64_EXCEPTION_INTERCEPT 0x80010003 -#define HV_MESSAGE_X64_APIC_EOI 0x80010004 -#define HV_MESSAGE_X64_LEGACY_FP_ERROR 0x80010005 - -/* - * Message flags - */ -#define HV_MESSAGE_FLAG_PENDING 0x1 - -/* - * Event flags number per SINT - */ -#define HV_EVENT_FLAGS_COUNT (256 * 8) - -/* - * Connection id valid bits - */ -#define HV_CONNECTION_ID_MASK 0x00ffffff - -/* - * Input structure for POST_MESSAGE hypercall - */ -struct hyperv_post_message_input { - uint32_t connection_id; - uint32_t _reserved; - uint32_t message_type; - uint32_t payload_size; - uint8_t payload[HV_MESSAGE_PAYLOAD_SIZE]; -}; - -/* - * Input structure for SIGNAL_EVENT hypercall - */ -struct hyperv_signal_event_input { - uint32_t connection_id; - uint16_t flag_number; - uint16_t _reserved_zero; -}; - -/* - * SynIC message structures - */ -struct hyperv_message_header { - uint32_t message_type; - uint8_t payload_size; - uint8_t message_flags; /* HV_MESSAGE_FLAG_XX */ - uint8_t _reserved[2]; - uint64_t sender; -}; - -struct hyperv_message { - struct hyperv_message_header header; - uint8_t payload[HV_MESSAGE_PAYLOAD_SIZE]; -}; - -struct hyperv_message_page { - struct hyperv_message slot[HV_SINT_COUNT]; -}; - -/* - * SynIC event flags structures - */ -struct hyperv_event_flags { - DECLARE_BITMAP(flags, HV_EVENT_FLAGS_COUNT); -}; - -struct hyperv_event_flags_page { - struct hyperv_event_flags slot[HV_SINT_COUNT]; -}; #endif diff --git a/target/i386/hyperv-stub.c b/target/i386/hyperv-stub.c new file mode 100644 index 0000000000..fe548cbae2 --- /dev/null +++ b/target/i386/hyperv-stub.c @@ -0,0 +1,48 @@ +/* + * Stubs for CONFIG_HYPERV=n + * + * Copyright (c) 2015-2018 Virtuozzo International GmbH. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hyperv.h" + +#ifdef CONFIG_KVM +int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) +{ + switch (exit->type) { + case KVM_EXIT_HYPERV_SYNIC: + if (!cpu->hyperv_synic) { + return -1; + } + + /* + * Tracking the changes in the MSRs is unnecessary as there are no + * users for them beside save/load, which is handled nicely by the + * generic MSR save/load code + */ + return 0; + case KVM_EXIT_HYPERV_HCALL: + exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE; + return 0; + default: + return -1; + } +} +#endif + +int hyperv_x86_synic_add(X86CPU *cpu) +{ + return -ENOSYS; +} + +void hyperv_x86_synic_reset(X86CPU *cpu) +{ +} + +void hyperv_x86_synic_update(X86CPU *cpu) +{ +} diff --git a/target/i386/hyperv.c b/target/i386/hyperv.c index 3065d765ed..b264a28620 100644 --- a/target/i386/hyperv.c +++ b/target/i386/hyperv.c @@ -14,16 +14,36 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "hyperv.h" +#include "hw/hyperv/hyperv.h" #include "hyperv-proto.h" -uint32_t hyperv_vp_index(X86CPU *cpu) +int hyperv_x86_synic_add(X86CPU *cpu) { - return CPU(cpu)->cpu_index; + hyperv_synic_add(CPU(cpu)); + return 0; } -X86CPU *hyperv_find_vcpu(uint32_t vp_index) +void hyperv_x86_synic_reset(X86CPU *cpu) { - return X86_CPU(qemu_get_cpu(vp_index)); + hyperv_synic_reset(CPU(cpu)); +} + +void hyperv_x86_synic_update(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + bool enable = env->msr_hv_synic_control & HV_SYNIC_ENABLE; + hwaddr msg_page_addr = (env->msr_hv_synic_msg_page & HV_SIMP_ENABLE) ? + (env->msr_hv_synic_msg_page & TARGET_PAGE_MASK) : 0; + hwaddr event_page_addr = (env->msr_hv_synic_evt_page & HV_SIEFP_ENABLE) ? + (env->msr_hv_synic_evt_page & TARGET_PAGE_MASK) : 0; + hyperv_synic_update(CPU(cpu), enable, msg_page_addr, event_page_addr); +} + +static void async_synic_update(CPUState *cs, run_on_cpu_data data) +{ + qemu_mutex_lock_iothread(); + hyperv_x86_synic_update(X86_CPU(cs)); + qemu_mutex_unlock_iothread(); } int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) @@ -36,11 +56,6 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) return -1; } - /* - * For now just track changes in SynIC control and msg/evt pages msr's. - * When SynIC messaging/events processing will be added in future - * here we will do messages queues flushing and pages remapping. - */ switch (exit->u.synic.msr) { case HV_X64_MSR_SCONTROL: env->msr_hv_synic_control = exit->u.synic.control; @@ -54,98 +69,33 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) default: return -1; } + + /* + * this will run in this cpu thread before it returns to KVM, but in a + * safe environment (i.e. when all cpus are quiescent) -- this is + * necessary because memory hierarchy is being changed + */ + async_safe_run_on_cpu(CPU(cpu), async_synic_update, RUN_ON_CPU_NULL); + return 0; case KVM_EXIT_HYPERV_HCALL: { - uint16_t code; + uint16_t code = exit->u.hcall.input & 0xffff; + bool fast = exit->u.hcall.input & HV_HYPERCALL_FAST; + uint64_t param = exit->u.hcall.params[0]; - code = exit->u.hcall.input & 0xffff; switch (code) { case HV_POST_MESSAGE: + exit->u.hcall.result = hyperv_hcall_post_message(param, fast); + break; case HV_SIGNAL_EVENT: + exit->u.hcall.result = hyperv_hcall_signal_event(param, fast); + break; default: exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE; - return 0; } + return 0; } default: return -1; } } - -static void kvm_hv_sint_ack_handler(EventNotifier *notifier) -{ - HvSintRoute *sint_route = container_of(notifier, HvSintRoute, - sint_ack_notifier); - event_notifier_test_and_clear(notifier); - if (sint_route->sint_ack_clb) { - sint_route->sint_ack_clb(sint_route); - } -} - -HvSintRoute *kvm_hv_sint_route_create(uint32_t vp_index, uint32_t sint, - HvSintAckClb sint_ack_clb) -{ - HvSintRoute *sint_route; - int r, gsi; - - sint_route = g_malloc0(sizeof(*sint_route)); - r = event_notifier_init(&sint_route->sint_set_notifier, false); - if (r) { - goto err; - } - - r = event_notifier_init(&sint_route->sint_ack_notifier, false); - if (r) { - goto err_sint_set_notifier; - } - - event_notifier_set_handler(&sint_route->sint_ack_notifier, - kvm_hv_sint_ack_handler); - - gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint); - if (gsi < 0) { - goto err_gsi; - } - - r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, - &sint_route->sint_set_notifier, - &sint_route->sint_ack_notifier, gsi); - if (r) { - goto err_irqfd; - } - sint_route->gsi = gsi; - sint_route->sint_ack_clb = sint_ack_clb; - sint_route->vp_index = vp_index; - sint_route->sint = sint; - - return sint_route; - -err_irqfd: - kvm_irqchip_release_virq(kvm_state, gsi); -err_gsi: - event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL); - event_notifier_cleanup(&sint_route->sint_ack_notifier); -err_sint_set_notifier: - event_notifier_cleanup(&sint_route->sint_set_notifier); -err: - g_free(sint_route); - - return NULL; -} - -void kvm_hv_sint_route_destroy(HvSintRoute *sint_route) -{ - kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, - &sint_route->sint_set_notifier, - sint_route->gsi); - kvm_irqchip_release_virq(kvm_state, sint_route->gsi); - event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL); - event_notifier_cleanup(&sint_route->sint_ack_notifier); - event_notifier_cleanup(&sint_route->sint_set_notifier); - g_free(sint_route); -} - -int kvm_hv_sint_route_set_sint(HvSintRoute *sint_route) -{ - return event_notifier_set(&sint_route->sint_set_notifier); -} diff --git a/target/i386/hyperv.h b/target/i386/hyperv.h index 00c9b454bb..67543296c3 100644 --- a/target/i386/hyperv.h +++ b/target/i386/hyperv.h @@ -16,30 +16,14 @@ #include "cpu.h" #include "sysemu/kvm.h" -#include "qemu/event_notifier.h" - -typedef struct HvSintRoute HvSintRoute; -typedef void (*HvSintAckClb)(HvSintRoute *sint_route); - -struct HvSintRoute { - uint32_t sint; - uint32_t vp_index; - int gsi; - EventNotifier sint_set_notifier; - EventNotifier sint_ack_notifier; - HvSintAckClb sint_ack_clb; -}; +#include "hw/hyperv/hyperv.h" +#ifdef CONFIG_KVM int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit); +#endif -HvSintRoute *kvm_hv_sint_route_create(uint32_t vp_index, uint32_t sint, - HvSintAckClb sint_ack_clb); - -void kvm_hv_sint_route_destroy(HvSintRoute *sint_route); - -int kvm_hv_sint_route_set_sint(HvSintRoute *sint_route); - -uint32_t hyperv_vp_index(X86CPU *cpu); -X86CPU *hyperv_find_vcpu(uint32_t vp_index); +int hyperv_x86_synic_add(X86CPU *cpu); +void hyperv_x86_synic_reset(X86CPU *cpu); +void hyperv_x86_synic_update(X86CPU *cpu); #endif diff --git a/target/i386/kvm.c b/target/i386/kvm.c index dc4047b02f..115d8b4c14 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -608,7 +608,8 @@ static bool hyperv_enabled(X86CPU *cpu) cpu->hyperv_synic || cpu->hyperv_stimer || cpu->hyperv_reenlightenment || - cpu->hyperv_tlbflush); + cpu->hyperv_tlbflush || + cpu->hyperv_ipi); } static int kvm_arch_set_tsc_khz(CPUState *cs) @@ -733,9 +734,20 @@ static int hyperv_handle_properties(CPUState *cs) env->features[FEAT_HYPERV_EAX] |= HV_VP_RUNTIME_AVAILABLE; } if (cpu->hyperv_synic) { - if (!has_msr_hv_synic || - kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) { - fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n"); + unsigned int cap = KVM_CAP_HYPERV_SYNIC; + if (!cpu->hyperv_synic_kvm_only) { + if (!cpu->hyperv_vpindex) { + fprintf(stderr, "Hyper-V SynIC " + "(requested by 'hv-synic' cpu flag) " + "requires Hyper-V VP_INDEX ('hv-vpindex')\n"); + return -ENOSYS; + } + cap = KVM_CAP_HYPERV_SYNIC2; + } + + if (!has_msr_hv_synic || !kvm_check_extension(cs->kvm_state, cap)) { + fprintf(stderr, "Hyper-V SynIC (requested by 'hv-synic' cpu flag) " + "is not supported by kernel\n"); return -ENOSYS; } @@ -753,12 +765,14 @@ static int hyperv_handle_properties(CPUState *cs) static int hyperv_init_vcpu(X86CPU *cpu) { + CPUState *cs = CPU(cpu); + int ret; + if (cpu->hyperv_vpindex && !hv_vpindex_settable) { /* * the kernel doesn't support setting vp_index; assert that its value * is in sync */ - int ret; struct { struct kvm_msrs info; struct kvm_msr_entry entries[1]; @@ -767,18 +781,38 @@ static int hyperv_init_vcpu(X86CPU *cpu) .entries[0].index = HV_X64_MSR_VP_INDEX, }; - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); + ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data); if (ret < 0) { return ret; } assert(ret == 1); - if (msr_data.entries[0].data != hyperv_vp_index(cpu)) { + if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) { error_report("kernel's vp_index != QEMU's vp_index"); return -ENXIO; } } + if (cpu->hyperv_synic) { + uint32_t synic_cap = cpu->hyperv_synic_kvm_only ? + KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2; + ret = kvm_vcpu_enable_cap(cs, synic_cap, 0); + if (ret < 0) { + error_report("failed to turn on HyperV SynIC in KVM: %s", + strerror(-ret)); + return ret; + } + + if (!cpu->hyperv_synic_kvm_only) { + ret = hyperv_x86_synic_add(cpu); + if (ret < 0) { + error_report("failed to create HyperV SynIC: %s", + strerror(-ret)); + return ret; + } + } + } + return 0; } @@ -888,6 +922,17 @@ int kvm_arch_init_vcpu(CPUState *cs) c->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED; c->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; } + if (cpu->hyperv_ipi) { + if (kvm_check_extension(cs->kvm_state, + KVM_CAP_HYPERV_SEND_IPI) <= 0) { + fprintf(stderr, "Hyper-V IPI send support " + "(requested by 'hv-ipi' cpu flag) " + " is not supported by kernel\n"); + return -ENOSYS; + } + c->eax |= HV_CLUSTER_IPI_RECOMMENDED; + c->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; + } c->ebx = cpu->hyperv_spinlock_attempts; @@ -1153,7 +1198,7 @@ int kvm_arch_init_vcpu(CPUState *cs) if (local_err) { error_report_err(local_err); error_free(invtsc_mig_blocker); - goto fail; + return r; } /* for savevm */ vmstate_x86_cpu.unmigratable = 1; @@ -1226,6 +1271,8 @@ void kvm_arch_reset_vcpu(X86CPU *cpu) for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { env->msr_hv_synic_sint[i] = HV_SINT_MASKED; } + + hyperv_x86_synic_reset(cpu); } } @@ -1937,7 +1984,8 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); } if (cpu->hyperv_vpindex && hv_vpindex_settable) { - kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX, hyperv_vp_index(cpu)); + kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX, + hyperv_vp_index(CPU(cpu))); } if (cpu->hyperv_synic) { int j; @@ -2686,7 +2734,6 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level) events.exception.nr = env->exception_injected; events.exception.has_error_code = env->has_error_code; events.exception.error_code = env->error_code; - events.exception.pad = 0; events.interrupt.injected = (env->interrupt_injected >= 0); events.interrupt.nr = env->interrupt_injected; @@ -2695,7 +2742,6 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level) events.nmi.injected = env->nmi_injected; events.nmi.pending = env->nmi_pending; events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); - events.nmi.pad = 0; events.sipi_vector = env->sipi_vector; events.flags = 0; diff --git a/target/i386/machine.c b/target/i386/machine.c index 084c2c73a8..225b5d433b 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -7,6 +7,7 @@ #include "hw/i386/pc.h" #include "hw/isa/isa.h" #include "migration/cpu.h" +#include "hyperv.h" #include "sysemu/kvm.h" @@ -672,11 +673,19 @@ static bool hyperv_synic_enable_needed(void *opaque) return false; } +static int hyperv_synic_post_load(void *opaque, int version_id) +{ + X86CPU *cpu = opaque; + hyperv_x86_synic_update(cpu); + return 0; +} + static const VMStateDescription vmstate_msr_hyperv_synic = { .name = "cpu/msr_hyperv_synic", .version_id = 1, .minimum_version_id = 1, .needed = hyperv_synic_enable_needed, + .post_load = hyperv_synic_post_load, .fields = (VMStateField[]) { VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU), VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU), diff --git a/target/i386/mem_helper.c b/target/i386/mem_helper.c index 30c26b9d9c..6cc53bcb40 100644 --- a/target/i386/mem_helper.c +++ b/target/i386/mem_helper.c @@ -23,6 +23,7 @@ #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "qemu/int128.h" +#include "qemu/atomic128.h" #include "tcg.h" void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0) @@ -137,10 +138,7 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) if ((a0 & 0xf) != 0) { raise_exception_ra(env, EXCP0D_GPF, ra); - } else { -#ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); -#else + } else if (HAVE_CMPXCHG128) { int eflags = cpu_cc_compute_all(env, CC_OP); Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]); @@ -159,7 +157,8 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) eflags &= ~CC_Z; } CC_SRC = eflags; -#endif + } else { + cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); } } #endif diff --git a/target/mips/cpu.h b/target/mips/cpu.h index 28af4d191c..e48be4b334 100644 --- a/target/mips/cpu.h +++ b/target/mips/cpu.h @@ -195,10 +195,125 @@ struct CPUMIPSState { #define MSAIR_ProcID 8 #define MSAIR_Rev 0 +/* + * Summary of CP0 registers + * ======================== + * + * + * Register 0 Register 1 Register 2 Register 3 + * ---------- ---------- ---------- ---------- + * + * 0 Index Random EntryLo0 EntryLo1 + * 1 MVPControl VPEControl TCStatus GlobalNumber + * 2 MVPConf0 VPEConf0 TCBind + * 3 MVPConf1 VPEConf1 TCRestart + * 4 VPControl YQMask TCHalt + * 5 VPESchedule TCContext + * 6 VPEScheFBack TCSchedule + * 7 VPEOpt TCScheFBack TCOpt + * + * + * Register 4 Register 5 Register 6 Register 7 + * ---------- ---------- ---------- ---------- + * + * 0 Context PageMask Wired HWREna + * 1 ContextConfig PageGrain SRSConf0 + * 2 UserLocal SegCtl0 SRSConf1 + * 3 XContextConfig SegCtl1 SRSConf2 + * 4 DebugContextID SegCtl2 SRSConf3 + * 5 MemoryMapID PWBase SRSConf4 + * 6 PWField PWCtl + * 7 PWSize + * + * + * Register 8 Register 9 Register 10 Register 11 + * ---------- ---------- ----------- ----------- + * + * 0 BadVAddr Count EntryHi Compare + * 1 BadInstr + * 2 BadInstrP + * 3 BadInstrX + * 4 GuestCtl1 GuestCtl0Ext + * 5 GuestCtl2 + * 6 GuestCtl3 + * 7 + * + * + * Register 12 Register 13 Register 14 Register 15 + * ----------- ----------- ----------- ----------- + * + * 0 Status Cause EPC PRId + * 1 IntCtl EBase + * 2 SRSCtl NestedEPC CDMMBase + * 3 SRSMap CMGCRBase + * 4 View_IPL View_RIPL BEVVA + * 5 SRSMap2 NestedExc + * 6 GuestCtl0 + * 7 GTOffset + * + * + * Register 16 Register 17 Register 18 Register 19 + * ----------- ----------- ----------- ----------- + * + * 0 Config LLAddr WatchLo WatchHi + * 1 Config1 MAAR WatchLo WatchHi + * 2 Config2 MAARI WatchLo WatchHi + * 3 Config3 WatchLo WatchHi + * 4 Config4 WatchLo WatchHi + * 5 Config5 WatchLo WatchHi + * 6 WatchLo WatchHi + * 7 WatchLo WatchHi + * + * + * Register 20 Register 21 Register 22 Register 23 + * ----------- ----------- ----------- ----------- + * + * 0 XContext Debug + * 1 TraceControl + * 2 TraceControl2 + * 3 UserTraceData1 + * 4 TraceIBPC + * 5 TraceDBPC + * 6 Debug2 + * 7 + * + * + * Register 24 Register 25 Register 26 Register 27 + * ----------- ----------- ----------- ----------- + * + * 0 DEPC PerfCnt ErrCtl CacheErr + * 1 PerfCnt + * 2 TraceControl3 PerfCnt + * 3 UserTraceData2 PerfCnt + * 4 PerfCnt + * 5 PerfCnt + * 6 PerfCnt + * 7 PerfCnt + * + * + * Register 28 Register 29 Register 30 Register 31 + * ----------- ----------- ----------- ----------- + * + * 0 DataLo DataHi ErrorEPC DESAVE + * 1 TagLo TagHi + * 2 DataLo DataHi KScratch<n> + * 3 TagLo TagHi KScratch<n> + * 4 DataLo DataHi KScratch<n> + * 5 TagLo TagHi KScratch<n> + * 6 DataLo DataHi KScratch<n> + * 7 TagLo TagHi KScratch<n> + * + */ +/* + * CP0 Register 0 + */ int32_t CP0_Index; /* CP0_MVP* are per MVP registers. */ int32_t CP0_VPControl; #define CP0VPCtl_DIS 0 +/* + * CP0 Register 1 + */ int32_t CP0_Random; int32_t CP0_VPEControl; #define CP0VPECo_YSI 21 @@ -239,7 +354,13 @@ struct CPUMIPSState { #define CP0VPEOpt_DWX2 2 #define CP0VPEOpt_DWX1 1 #define CP0VPEOpt_DWX0 0 +/* + * CP0 Register 2 + */ uint64_t CP0_EntryLo0; +/* + * CP0 Register 3 + */ uint64_t CP0_EntryLo1; #if defined(TARGET_MIPS64) # define CP0EnLo_RI 63 @@ -250,8 +371,14 @@ struct CPUMIPSState { #endif int32_t CP0_GlobalNumber; #define CP0GN_VPId 0 +/* + * CP0 Register 4 + */ target_ulong CP0_Context; target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM]; +/* + * CP0 Register 5 + */ int32_t CP0_PageMask; int32_t CP0_PageGrain_rw_bitmask; int32_t CP0_PageGrain; @@ -289,7 +416,47 @@ struct CPUMIPSState { #define CP0SC2_XR 56 #define CP0SC2_XR_MASK (0xFFULL << CP0SC2_XR) #define CP0SC2_MASK (CP0SC_1GMASK | (CP0SC_1GMASK << 16) | CP0SC2_XR_MASK) + target_ulong CP0_PWBase; + target_ulong CP0_PWField; +#if defined(TARGET_MIPS64) +#define CP0PF_BDI 32 /* 37..32 */ +#define CP0PF_GDI 24 /* 29..24 */ +#define CP0PF_UDI 18 /* 23..18 */ +#define CP0PF_MDI 12 /* 17..12 */ +#define CP0PF_PTI 6 /* 11..6 */ +#define CP0PF_PTEI 0 /* 5..0 */ +#else +#define CP0PF_GDW 24 /* 29..24 */ +#define CP0PF_UDW 18 /* 23..18 */ +#define CP0PF_MDW 12 /* 17..12 */ +#define CP0PF_PTW 6 /* 11..6 */ +#define CP0PF_PTEW 0 /* 5..0 */ +#endif + target_ulong CP0_PWSize; +#if defined(TARGET_MIPS64) +#define CP0PS_BDW 32 /* 37..32 */ +#endif +#define CP0PS_PS 30 +#define CP0PS_GDW 24 /* 29..24 */ +#define CP0PS_UDW 18 /* 23..18 */ +#define CP0PS_MDW 12 /* 17..12 */ +#define CP0PS_PTW 6 /* 11..6 */ +#define CP0PS_PTEW 0 /* 5..0 */ +/* + * CP0 Register 6 + */ int32_t CP0_Wired; + int32_t CP0_PWCtl; +#define CP0PC_PWEN 31 +#if defined(TARGET_MIPS64) +#define CP0PC_PWDIREXT 30 +#define CP0PC_XK 28 +#define CP0PC_XS 27 +#define CP0PC_XU 26 +#endif +#define CP0PC_DPH 7 +#define CP0PC_HUGEPG 6 +#define CP0PC_PSN 0 /* 5..0 */ int32_t CP0_SRSConf0_rw_bitmask; int32_t CP0_SRSConf0; #define CP0SRSC0_M 31 @@ -319,16 +486,34 @@ struct CPUMIPSState { #define CP0SRSC4_SRS15 20 #define CP0SRSC4_SRS14 10 #define CP0SRSC4_SRS13 0 +/* + * CP0 Register 7 + */ int32_t CP0_HWREna; +/* + * CP0 Register 8 + */ target_ulong CP0_BadVAddr; uint32_t CP0_BadInstr; uint32_t CP0_BadInstrP; uint32_t CP0_BadInstrX; +/* + * CP0 Register 9 + */ int32_t CP0_Count; +/* + * CP0 Register 10 + */ target_ulong CP0_EntryHi; #define CP0EnHi_EHINV 10 target_ulong CP0_EntryHi_ASID_mask; +/* + * CP0 Register 11 + */ int32_t CP0_Compare; +/* + * CP0 Register 12 + */ int32_t CP0_Status; #define CP0St_CU3 31 #define CP0St_CU2 30 @@ -370,6 +555,9 @@ struct CPUMIPSState { #define CP0SRSMap_SSV2 8 #define CP0SRSMap_SSV1 4 #define CP0SRSMap_SSV0 0 +/* + * CP0 Register 13 + */ int32_t CP0_Cause; #define CP0Ca_BD 31 #define CP0Ca_TI 30 @@ -381,12 +569,21 @@ struct CPUMIPSState { #define CP0Ca_IP 8 #define CP0Ca_IP_mask 0x0000FF00 #define CP0Ca_EC 2 +/* + * CP0 Register 14 + */ target_ulong CP0_EPC; +/* + * CP0 Register 15 + */ int32_t CP0_PRid; target_ulong CP0_EBase; target_ulong CP0_EBaseWG_rw_bitmask; #define CP0EBase_WG 11 target_ulong CP0_CMGCRBase; +/* + * CP0 Register 16 + */ int32_t CP0_Config0; #define CP0C0_M 31 #define CP0C0_K23 28 /* 30..28 */ @@ -503,6 +700,9 @@ struct CPUMIPSState { uint64_t CP0_MAAR[MIPS_MAAR_MAX]; int32_t CP0_MAARI; /* XXX: Maybe make LLAddr per-TC? */ +/* + * CP0 Register 17 + */ uint64_t lladdr; target_ulong llval; target_ulong llnewval; @@ -511,11 +711,23 @@ struct CPUMIPSState { target_ulong llreg; uint64_t CP0_LLAddr_rw_bitmask; int CP0_LLAddr_shift; +/* + * CP0 Register 18 + */ target_ulong CP0_WatchLo[8]; +/* + * CP0 Register 19 + */ int32_t CP0_WatchHi[8]; #define CP0WH_ASID 16 +/* + * CP0 Register 20 + */ target_ulong CP0_XContext; int32_t CP0_Framemask; +/* + * CP0 Register 23 + */ int32_t CP0_Debug; #define CP0DB_DBD 31 #define CP0DB_DM 30 @@ -535,18 +747,40 @@ struct CPUMIPSState { #define CP0DB_DDBL 2 #define CP0DB_DBp 1 #define CP0DB_DSS 0 +/* + * CP0 Register 24 + */ target_ulong CP0_DEPC; +/* + * CP0 Register 25 + */ int32_t CP0_Performance0; +/* + * CP0 Register 26 + */ int32_t CP0_ErrCtl; #define CP0EC_WST 29 #define CP0EC_SPR 28 #define CP0EC_ITC 26 +/* + * CP0 Register 28 + */ uint64_t CP0_TagLo; int32_t CP0_DataLo; +/* + * CP0 Register 29 + */ int32_t CP0_TagHi; int32_t CP0_DataHi; +/* + * CP0 Register 30 + */ target_ulong CP0_ErrorEPC; +/* + * CP0 Register 31 + */ int32_t CP0_DESAVE; + /* We waste some space so we can handle shadow registers like TCs. */ TCState tcs[MIPS_SHADOW_SET_MAX]; CPUMIPSFPUContext fpus[MIPS_FPU_MAX]; @@ -596,8 +830,9 @@ struct CPUMIPSState { #define MIPS_HFLAG_BX 0x40000 /* branch exchanges execution mode */ #define MIPS_HFLAG_BMASK (MIPS_HFLAG_BMASK_BASE | MIPS_HFLAG_BMASK_EXT) /* MIPS DSP resources access. */ -#define MIPS_HFLAG_DSP 0x080000 /* Enable access to MIPS DSP resources. */ -#define MIPS_HFLAG_DSPR2 0x100000 /* Enable access to MIPS DSPR2 resources. */ +#define MIPS_HFLAG_DSP 0x080000 /* Enable access to DSP resources. */ +#define MIPS_HFLAG_DSP_R2 0x100000 /* Enable access to DSP R2 resources. */ +#define MIPS_HFLAG_DSP_R3 0x20000000 /* Enable access to DSP R3 resources. */ /* Extra flag about HWREna register. */ #define MIPS_HFLAG_HWRENA_ULR 0x200000 /* ULR bit from HWREna is set. */ #define MIPS_HFLAG_SBRI 0x400000 /* R6 SDBBP causes RI excpt. in user mode */ @@ -614,7 +849,7 @@ struct CPUMIPSState { int CCRes; /* Cycle count resolution/divisor */ uint32_t CP0_Status_rw_bitmask; /* Read/write bits in CP0_Status */ uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */ - int insn_flags; /* Supported instruction set */ + uint64_t insn_flags; /* Supported instruction set */ /* Fields up to this point are cleared by a CPU reset */ struct {} end_reset_fields; diff --git a/target/mips/helper.c b/target/mips/helper.c index f0c268b83c..8988452dbd 100644 --- a/target/mips/helper.c +++ b/target/mips/helper.c @@ -537,6 +537,342 @@ hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) } #endif +#if !defined(CONFIG_USER_ONLY) +#if !defined(TARGET_MIPS64) + +/* + * Perform hardware page table walk + * + * Memory accesses are performed using the KERNEL privilege level. + * Synchronous exceptions detected on memory accesses cause a silent exit + * from page table walking, resulting in a TLB or XTLB Refill exception. + * + * Implementations are not required to support page table walk memory + * accesses from mapped memory regions. When an unsupported access is + * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill + * exception. + * + * Note that if an exception is caused by AddressTranslation or LoadMemory + * functions, the exception is not taken, a silent exit is taken, + * resulting in a TLB or XTLB Refill exception. + */ + +static bool get_pte(CPUMIPSState *env, uint64_t vaddr, int entry_size, + uint64_t *pte) +{ + if ((vaddr & ((entry_size >> 3) - 1)) != 0) { + return false; + } + if (entry_size == 64) { + *pte = cpu_ldq_code(env, vaddr); + } else { + *pte = cpu_ldl_code(env, vaddr); + } + return true; +} + +static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry, + int entry_size, int ptei) +{ + uint64_t result = entry; + uint64_t rixi; + if (ptei > entry_size) { + ptei -= 32; + } + result >>= (ptei - 2); + rixi = result & 3; + result >>= 2; + result |= rixi << CP0EnLo_XI; + return result; +} + +static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, + int directory_index, bool *huge_page, bool *hgpg_directory_hit, + uint64_t *pw_entrylo0, uint64_t *pw_entrylo1) +{ + int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1; + int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F; + int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; + int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; + int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F; + int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3; + int directory_shift = (ptew > 1) ? -1 : + (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift; + int leaf_shift = (ptew > 1) ? -1 : + (ptew == 1) ? native_shift + 1 : native_shift; + uint32_t direntry_size = 1 << (directory_shift + 3); + uint32_t leafentry_size = 1 << (leaf_shift + 3); + uint64_t entry; + uint64_t paddr; + int prot; + uint64_t lsb = 0; + uint64_t w = 0; + + if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD, + ACCESS_INT, cpu_mmu_index(env, false)) != + TLBRET_MATCH) { + /* wrong base address */ + return 0; + } + if (!get_pte(env, *vaddr, direntry_size, &entry)) { + return 0; + } + + if ((entry & (1 << psn)) && hugepg) { + *huge_page = true; + *hgpg_directory_hit = true; + entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); + w = directory_index - 1; + if (directory_index & 0x1) { + /* Generate adjacent page from same PTE for odd TLB page */ + lsb = (1 << w) >> 6; + *pw_entrylo0 = entry & ~lsb; /* even page */ + *pw_entrylo1 = entry | lsb; /* odd page */ + } else if (dph) { + int oddpagebit = 1 << leaf_shift; + uint64_t vaddr2 = *vaddr ^ oddpagebit; + if (*vaddr & oddpagebit) { + *pw_entrylo1 = entry; + } else { + *pw_entrylo0 = entry; + } + if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD, + ACCESS_INT, cpu_mmu_index(env, false)) != + TLBRET_MATCH) { + return 0; + } + if (!get_pte(env, vaddr2, leafentry_size, &entry)) { + return 0; + } + entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); + if (*vaddr & oddpagebit) { + *pw_entrylo0 = entry; + } else { + *pw_entrylo1 = entry; + } + } else { + return 0; + } + return 1; + } else { + *vaddr = entry; + return 2; + } +} + +static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, int rw, + int mmu_idx) +{ + int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F; + int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F; + int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F; + int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F; + int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F; + + /* Initial values */ + bool huge_page = false; + bool hgpg_bdhit = false; + bool hgpg_gdhit = false; + bool hgpg_udhit = false; + bool hgpg_mdhit = false; + + int32_t pw_pagemask = 0; + target_ulong pw_entryhi = 0; + uint64_t pw_entrylo0 = 0; + uint64_t pw_entrylo1 = 0; + + /* Native pointer size */ + /*For the 32-bit architectures, this bit is fixed to 0.*/ + int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3; + + /* Indices from PWField */ + int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F; + int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F; + int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F; + int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F; + int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; + + /* Indices computed from faulting address */ + int gindex = (address >> pf_gdw) & ((1 << gdw) - 1); + int uindex = (address >> pf_udw) & ((1 << udw) - 1); + int mindex = (address >> pf_mdw) & ((1 << mdw) - 1); + int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1); + + /* Other HTW configs */ + int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; + + /* HTW Shift values (depend on entry size) */ + int directory_shift = (ptew > 1) ? -1 : + (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift; + int leaf_shift = (ptew > 1) ? -1 : + (ptew == 1) ? native_shift + 1 : native_shift; + + /* Offsets into tables */ + int goffset = gindex << directory_shift; + int uoffset = uindex << directory_shift; + int moffset = mindex << directory_shift; + int ptoffset0 = (ptindex >> 1) << (leaf_shift + 1); + int ptoffset1 = ptoffset0 | (1 << (leaf_shift)); + + uint32_t leafentry_size = 1 << (leaf_shift + 3); + + /* Starting address - Page Table Base */ + uint64_t vaddr = env->CP0_PWBase; + + uint64_t dir_entry; + uint64_t paddr; + int prot; + int m; + + if (!(env->CP0_Config3 & (1 << CP0C3_PW))) { + /* walker is unimplemented */ + return false; + } + if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) { + /* walker is disabled */ + return false; + } + if (!(gdw > 0 || udw > 0 || mdw > 0)) { + /* no structure to walk */ + return false; + } + if ((directory_shift == -1) || (leaf_shift == -1)) { + return false; + } + + /* Global Directory */ + if (gdw > 0) { + vaddr |= goffset; + switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit, + &pw_entrylo0, &pw_entrylo1)) + { + case 0: + return false; + case 1: + goto refill; + case 2: + default: + break; + } + } + + /* Upper directory */ + if (udw > 0) { + vaddr |= uoffset; + switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit, + &pw_entrylo0, &pw_entrylo1)) + { + case 0: + return false; + case 1: + goto refill; + case 2: + default: + break; + } + } + + /* Middle directory */ + if (mdw > 0) { + vaddr |= moffset; + switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit, + &pw_entrylo0, &pw_entrylo1)) + { + case 0: + return false; + case 1: + goto refill; + case 2: + default: + break; + } + } + + /* Leaf Level Page Table - First half of PTE pair */ + vaddr |= ptoffset0; + if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD, + ACCESS_INT, cpu_mmu_index(env, false)) != + TLBRET_MATCH) { + return false; + } + if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { + return false; + } + dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew); + pw_entrylo0 = dir_entry; + + /* Leaf Level Page Table - Second half of PTE pair */ + vaddr |= ptoffset1; + if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD, + ACCESS_INT, cpu_mmu_index(env, false)) != + TLBRET_MATCH) { + return false; + } + if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { + return false; + } + dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew); + pw_entrylo1 = dir_entry; + +refill: + + m = (1 << pf_ptw) - 1; + + if (huge_page) { + switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 | + hgpg_mdhit) + { + case 4: + m = (1 << pf_gdw) - 1; + if (pf_gdw & 1) { + m >>= 1; + } + break; + case 2: + m = (1 << pf_udw) - 1; + if (pf_udw & 1) { + m >>= 1; + } + break; + case 1: + m = (1 << pf_mdw) - 1; + if (pf_mdw & 1) { + m >>= 1; + } + break; + } + } + pw_pagemask = m >> 12; + update_pagemask(env, pw_pagemask << 13, &pw_pagemask); + pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF); + { + target_ulong tmp_entryhi = env->CP0_EntryHi; + int32_t tmp_pagemask = env->CP0_PageMask; + uint64_t tmp_entrylo0 = env->CP0_EntryLo0; + uint64_t tmp_entrylo1 = env->CP0_EntryLo1; + + env->CP0_EntryHi = pw_entryhi; + env->CP0_PageMask = pw_pagemask; + env->CP0_EntryLo0 = pw_entrylo0; + env->CP0_EntryLo1 = pw_entrylo1; + + /* + * The hardware page walker inserts a page into the TLB in a manner + * identical to a TLBWR instruction as executed by the software refill + * handler. + */ + r4k_helper_tlbwr(env); + + env->CP0_EntryHi = tmp_entryhi; + env->CP0_PageMask = tmp_pagemask; + env->CP0_EntryLo0 = tmp_entrylo0; + env->CP0_EntryLo1 = tmp_entrylo1; + } + return true; +} +#endif +#endif + int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, int mmu_idx) { @@ -558,8 +894,7 @@ int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, /* data access */ #if !defined(CONFIG_USER_ONLY) - /* XXX: put correct access by using cpu_restore_state() - correctly */ + /* XXX: put correct access by using cpu_restore_state() correctly */ access_type = ACCESS_INT; ret = get_physical_address(env, &physical, &prot, address, rw, access_type, mmu_idx); @@ -583,6 +918,32 @@ int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, } else if (ret < 0) #endif { +#if !defined(CONFIG_USER_ONLY) +#if !defined(TARGET_MIPS64) + if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) { + /* + * Memory reads during hardware page table walking are performed + * as if they were kernel-mode load instructions. + */ + int mode = (env->hflags & MIPS_HFLAG_KSU); + bool ret_walker; + env->hflags &= ~MIPS_HFLAG_KSU; + ret_walker = page_table_walk_refill(env, address, rw, mmu_idx); + env->hflags |= mode; + if (ret_walker) { + ret = get_physical_address(env, &physical, &prot, + address, rw, access_type, mmu_idx); + if (ret == TLBRET_MATCH) { + tlb_set_page(cs, address & TARGET_PAGE_MASK, + physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, + mmu_idx, TARGET_PAGE_SIZE); + ret = 0; + return ret; + } + } + } +#endif +#endif raise_mmu_exception(env, address, rw, ret); ret = 1; } diff --git a/target/mips/helper.h b/target/mips/helper.h index b2a780a6f2..c23e4e5d97 100644 --- a/target/mips/helper.h +++ b/target/mips/helper.h @@ -120,6 +120,8 @@ DEF_HELPER_2(mtc0_pagegrain, void, env, tl) DEF_HELPER_2(mtc0_segctl0, void, env, tl) DEF_HELPER_2(mtc0_segctl1, void, env, tl) DEF_HELPER_2(mtc0_segctl2, void, env, tl) +DEF_HELPER_2(mtc0_pwfield, void, env, tl) +DEF_HELPER_2(mtc0_pwsize, void, env, tl) DEF_HELPER_2(mtc0_wired, void, env, tl) DEF_HELPER_2(mtc0_srsconf0, void, env, tl) DEF_HELPER_2(mtc0_srsconf1, void, env, tl) @@ -127,6 +129,7 @@ DEF_HELPER_2(mtc0_srsconf2, void, env, tl) DEF_HELPER_2(mtc0_srsconf3, void, env, tl) DEF_HELPER_2(mtc0_srsconf4, void, env, tl) DEF_HELPER_2(mtc0_hwrena, void, env, tl) +DEF_HELPER_2(mtc0_pwctl, void, env, tl) DEF_HELPER_2(mtc0_count, void, env, tl) DEF_HELPER_2(mtc0_entryhi, void, env, tl) DEF_HELPER_2(mttc0_entryhi, void, env, tl) diff --git a/target/mips/internal.h b/target/mips/internal.h index e41051f8e6..8b1b2456af 100644 --- a/target/mips/internal.h +++ b/target/mips/internal.h @@ -59,7 +59,7 @@ struct mips_def_t { int32_t CP0_PageGrain_rw_bitmask; int32_t CP0_PageGrain; target_ulong CP0_EBaseWG_rw_bitmask; - int insn_flags; + uint64_t insn_flags; enum mips_mmu_types mmu_type; }; @@ -211,6 +211,7 @@ uint64_t float_class_d(uint64_t arg, float_status *fst); extern unsigned int ieee_rm[]; int ieee_ex_to_mips(int xcpt); +void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask); static inline void restore_rounding_mode(CPUMIPSState *env) { @@ -306,9 +307,9 @@ static inline void compute_hflags(CPUMIPSState *env) { env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 | MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU | - MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2 | - MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA | MIPS_HFLAG_FRE | - MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL); + MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | + MIPS_HFLAG_DSP_R3 | MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA | + MIPS_HFLAG_FRE | MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL); if (env->CP0_Status & (1 << CP0St_ERL)) { env->hflags |= MIPS_HFLAG_ERL; } @@ -355,16 +356,29 @@ static inline void compute_hflags(CPUMIPSState *env) (env->CP0_Config5 & (1 << CP0C5_SBRI))) { env->hflags |= MIPS_HFLAG_SBRI; } - if (env->insn_flags & ASE_DSPR2) { - /* Enables access MIPS DSP resources, now our cpu is DSP ASER2, - so enable to access DSPR2 resources. */ + if (env->insn_flags & ASE_DSP_R3) { + /* + * Our cpu supports DSP R3 ASE, so enable + * access to DSP R3 resources. + */ if (env->CP0_Status & (1 << CP0St_MX)) { - env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2; + env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 | + MIPS_HFLAG_DSP_R3; + } + } else if (env->insn_flags & ASE_DSP_R2) { + /* + * Our cpu supports DSP R2 ASE, so enable + * access to DSP R2 resources. + */ + if (env->CP0_Status & (1 << CP0St_MX)) { + env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2; } } else if (env->insn_flags & ASE_DSP) { - /* Enables access MIPS DSP resources, now our cpu is DSP ASE, - so enable to access DSP resources. */ + /* + * Our cpu supports DSP ASE, so enable + * access to DSP resources. + */ if (env->CP0_Status & (1 << CP0St_MX)) { env->hflags |= MIPS_HFLAG_DSP; } diff --git a/target/mips/machine.c b/target/mips/machine.c index 5ba78acd6d..70a8909b90 100644 --- a/target/mips/machine.c +++ b/target/mips/machine.c @@ -212,8 +212,8 @@ const VMStateDescription vmstate_tlb = { const VMStateDescription vmstate_mips_cpu = { .name = "cpu", - .version_id = 11, - .minimum_version_id = 11, + .version_id = 15, + .minimum_version_id = 15, .post_load = cpu_post_load, .fields = (VMStateField[]) { /* Active TC */ @@ -256,7 +256,11 @@ const VMStateDescription vmstate_mips_cpu = { VMSTATE_UINTTL(env.CP0_SegCtl0, MIPSCPU), VMSTATE_UINTTL(env.CP0_SegCtl1, MIPSCPU), VMSTATE_UINTTL(env.CP0_SegCtl2, MIPSCPU), + VMSTATE_UINTTL(env.CP0_PWBase, MIPSCPU), + VMSTATE_UINTTL(env.CP0_PWField, MIPSCPU), + VMSTATE_UINTTL(env.CP0_PWSize, MIPSCPU), VMSTATE_INT32(env.CP0_Wired, MIPSCPU), + VMSTATE_INT32(env.CP0_PWCtl, MIPSCPU), VMSTATE_INT32(env.CP0_SRSConf0, MIPSCPU), VMSTATE_INT32(env.CP0_SRSConf1, MIPSCPU), VMSTATE_INT32(env.CP0_SRSConf2, MIPSCPU), diff --git a/target/mips/mips-defs.h b/target/mips/mips-defs.h index c8e99791ad..5177618615 100644 --- a/target/mips/mips-defs.h +++ b/target/mips/mips-defs.h @@ -22,40 +22,53 @@ #endif #endif -/* Masks used to mark instructions to indicate which ISA level they - were introduced in. */ -#define ISA_MIPS1 0x00000001 -#define ISA_MIPS2 0x00000002 -#define ISA_MIPS3 0x00000004 -#define ISA_MIPS4 0x00000008 -#define ISA_MIPS5 0x00000010 -#define ISA_MIPS32 0x00000020 -#define ISA_MIPS32R2 0x00000040 -#define ISA_MIPS64 0x00000080 -#define ISA_MIPS64R2 0x00000100 -#define ISA_MIPS32R3 0x00000200 -#define ISA_MIPS64R3 0x00000400 -#define ISA_MIPS32R5 0x00000800 -#define ISA_MIPS64R5 0x00001000 -#define ISA_MIPS32R6 0x00002000 -#define ISA_MIPS64R6 0x00004000 -#define ISA_NANOMIPS32 0x00008000 - -/* MIPS ASEs. */ -#define ASE_MIPS16 0x00010000 -#define ASE_MIPS3D 0x00020000 -#define ASE_MDMX 0x00040000 -#define ASE_DSP 0x00080000 -#define ASE_DSPR2 0x00100000 -#define ASE_MT 0x00200000 -#define ASE_SMARTMIPS 0x00400000 -#define ASE_MICROMIPS 0x00800000 -#define ASE_MSA 0x01000000 - -/* Chip specific instructions. */ -#define INSN_LOONGSON2E 0x20000000 -#define INSN_LOONGSON2F 0x40000000 -#define INSN_VR54XX 0x80000000 +/* + * bit definitions for insn_flags (ISAs/ASEs flags) + * ------------------------------------------------ + */ +/* + * bits 0-31: MIPS base instruction sets + */ +#define ISA_MIPS1 0x0000000000000001ULL +#define ISA_MIPS2 0x0000000000000002ULL +#define ISA_MIPS3 0x0000000000000004ULL +#define ISA_MIPS4 0x0000000000000008ULL +#define ISA_MIPS5 0x0000000000000010ULL +#define ISA_MIPS32 0x0000000000000020ULL +#define ISA_MIPS32R2 0x0000000000000040ULL +#define ISA_MIPS64 0x0000000000000080ULL +#define ISA_MIPS64R2 0x0000000000000100ULL +#define ISA_MIPS32R3 0x0000000000000200ULL +#define ISA_MIPS64R3 0x0000000000000400ULL +#define ISA_MIPS32R5 0x0000000000000800ULL +#define ISA_MIPS64R5 0x0000000000001000ULL +#define ISA_MIPS32R6 0x0000000000002000ULL +#define ISA_MIPS64R6 0x0000000000004000ULL +#define ISA_NANOMIPS32 0x0000000000008000ULL +/* + * bits 32-47: MIPS ASEs + */ +#define ASE_MIPS16 0x0000000100000000ULL +#define ASE_MIPS3D 0x0000000200000000ULL +#define ASE_MDMX 0x0000000400000000ULL +#define ASE_DSP 0x0000000800000000ULL +#define ASE_DSP_R2 0x0000001000000000ULL +#define ASE_DSP_R3 0x0000002000000000ULL +#define ASE_MT 0x0000004000000000ULL +#define ASE_SMARTMIPS 0x0000008000000000ULL +#define ASE_MICROMIPS 0x0000010000000000ULL +#define ASE_MSA 0x0000020000000000ULL +/* + * bits 48-55: vendor-specific base instruction sets + */ +#define INSN_LOONGSON2E 0x0001000000000000ULL +#define INSN_LOONGSON2F 0x0002000000000000ULL +#define INSN_VR54XX 0x0004000000000000ULL +#define INSN_R5900 0x0008000000000000ULL +/* + * bits 56-63: vendor-specific ASEs + */ +#define ASE_MMI 0x0100000000000000ULL /* MIPS CPU defines. */ #define CPU_MIPS1 (ISA_MIPS1) @@ -63,6 +76,7 @@ #define CPU_MIPS3 (CPU_MIPS2 | ISA_MIPS3) #define CPU_MIPS4 (CPU_MIPS3 | ISA_MIPS4) #define CPU_VR54XX (CPU_MIPS4 | INSN_VR54XX) +#define CPU_R5900 (CPU_MIPS3 | INSN_R5900) #define CPU_LOONGSON2E (CPU_MIPS3 | INSN_LOONGSON2E) #define CPU_LOONGSON2F (CPU_MIPS3 | INSN_LOONGSON2F) diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c index c148b310cd..d1f1d1aa35 100644 --- a/target/mips/op_helper.c +++ b/target/mips/op_helper.c @@ -1400,7 +1400,7 @@ void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1) env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF); } -void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1) +void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask) { uint64_t mask = arg1 >> (TARGET_PAGE_BITS + 1); if (!(env->insn_flags & ISA_MIPS32R6) || (arg1 == ~0) || @@ -1411,6 +1411,11 @@ void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1) } } +void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1) +{ + update_pagemask(env, arg1, &env->CP0_PageMask); +} + void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1) { /* SmartMIPS not implemented */ @@ -1445,6 +1450,77 @@ void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1) tlb_flush(cs); } +void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1) +{ +#if defined(TARGET_MIPS64) + uint64_t mask = 0x3F3FFFFFFFULL; + uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL; + uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL; + + if ((env->insn_flags & ISA_MIPS32R6)) { + if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) { + mask &= ~(0x3FULL << CP0PF_BDI); + } + if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) { + mask &= ~(0x3FULL << CP0PF_GDI); + } + if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) { + mask &= ~(0x3FULL << CP0PF_UDI); + } + if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) { + mask &= ~(0x3FULL << CP0PF_MDI); + } + if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) { + mask &= ~(0x3FULL << CP0PF_PTI); + } + } + env->CP0_PWField = arg1 & mask; + + if ((new_ptei >= 32) || + ((env->insn_flags & ISA_MIPS32R6) && + (new_ptei == 0 || new_ptei == 1))) { + env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) | + (old_ptei << CP0PF_PTEI); + } +#else + uint32_t mask = 0x3FFFFFFF; + uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; + uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F; + + if ((env->insn_flags & ISA_MIPS32R6)) { + if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) { + mask &= ~(0x3F << CP0PF_GDW); + } + if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) { + mask &= ~(0x3F << CP0PF_UDW); + } + if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) { + mask &= ~(0x3F << CP0PF_MDW); + } + if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) { + mask &= ~(0x3F << CP0PF_PTW); + } + } + env->CP0_PWField = arg1 & mask; + + if ((new_ptew >= 32) || + ((env->insn_flags & ISA_MIPS32R6) && + (new_ptew == 0 || new_ptew == 1))) { + env->CP0_PWField = (env->CP0_PWField & ~0x3F) | + (old_ptew << CP0PF_PTEW); + } +#endif +} + +void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1) +{ +#if defined(TARGET_MIPS64) + env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL; +#else + env->CP0_PWSize = arg1 & 0x3FFFFFFF; +#endif +} + void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1) { if (env->insn_flags & ISA_MIPS32R6) { @@ -1456,6 +1532,16 @@ void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1) } } +void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1) +{ +#if defined(TARGET_MIPS64) + /* PWEn = 0. Hardware page table walking is not implemented. */ + env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F); +#else + env->CP0_PWCtl = (arg1 & 0x800000FF); +#endif +} + void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1) { env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask; diff --git a/target/mips/translate.c b/target/mips/translate.c index ab16cdb911..c44a751be9 100644 --- a/target/mips/translate.c +++ b/target/mips/translate.c @@ -1,5 +1,5 @@ /* - * MIPS32 emulation for qemu: main translation routines. + * MIPS emulation for QEMU - main translation routines * * Copyright (c) 2004-2005 Jocelyn Mayer * Copyright (c) 2006 Marius Groeger (FPU operations) @@ -463,8 +463,10 @@ enum { OPC_WSBH = (0x02 << 6) | OPC_BSHFL, OPC_SEB = (0x10 << 6) | OPC_BSHFL, OPC_SEH = (0x18 << 6) | OPC_BSHFL, - OPC_ALIGN = (0x08 << 6) | OPC_BSHFL, /* 010.bp */ - OPC_ALIGN_END = (0x0B << 6) | OPC_BSHFL, /* 010.00 to 010.11 */ + OPC_ALIGN = (0x08 << 6) | OPC_BSHFL, /* 010.bp (010.00 to 010.11) */ + OPC_ALIGN_1 = (0x09 << 6) | OPC_BSHFL, + OPC_ALIGN_2 = (0x0A << 6) | OPC_BSHFL, + OPC_ALIGN_3 = (0x0B << 6) | OPC_BSHFL, OPC_BITSWAP = (0x00 << 6) | OPC_BSHFL /* 00000 */ }; @@ -474,8 +476,14 @@ enum { enum { OPC_DSBH = (0x02 << 6) | OPC_DBSHFL, OPC_DSHD = (0x05 << 6) | OPC_DBSHFL, - OPC_DALIGN = (0x08 << 6) | OPC_DBSHFL, /* 01.bp */ - OPC_DALIGN_END = (0x0F << 6) | OPC_DBSHFL, /* 01.000 to 01.111 */ + OPC_DALIGN = (0x08 << 6) | OPC_DBSHFL, /* 01.bp (01.000 to 01.111) */ + OPC_DALIGN_1 = (0x09 << 6) | OPC_DBSHFL, + OPC_DALIGN_2 = (0x0A << 6) | OPC_DBSHFL, + OPC_DALIGN_3 = (0x0B << 6) | OPC_DBSHFL, + OPC_DALIGN_4 = (0x0C << 6) | OPC_DBSHFL, + OPC_DALIGN_5 = (0x0D << 6) | OPC_DBSHFL, + OPC_DALIGN_6 = (0x0E << 6) | OPC_DBSHFL, + OPC_DALIGN_7 = (0x0F << 6) | OPC_DBSHFL, OPC_DBITSWAP = (0x00 << 6) | OPC_DBSHFL, /* 00000 */ }; @@ -1389,6 +1397,979 @@ enum { OPC_BINSRI_df = (0x7 << 23) | OPC_MSA_BIT_09, }; + +/* + * AN OVERVIEW OF MXU EXTENSION INSTRUCTION SET + * ============================================ + * + * MXU (full name: MIPS eXtension/enhanced Unit) is an SIMD extension of MIPS32 + * instructions set. It is designed to fit the needs of signal, graphical and + * video processing applications. MXU instruction set is used in Xburst family + * of microprocessors by Ingenic. + * + * MXU unit contains 17 registers called X0-X16. X0 is always zero, and X16 is + * the control register. + * + * The notation used in MXU assembler mnemonics: + * + * XRa, XRb, XRc, XRd - MXU registers + * Rb, Rc, Rd, Rs, Rt - general purpose MIPS registers + * s12 - a subfield of an instruction code + * strd2 - a subfield of an instruction code + * eptn2 - a subfield of an instruction code + * eptn3 - a subfield of an instruction code + * optn2 - a subfield of an instruction code + * optn3 - a subfield of an instruction code + * sft4 - a subfield of an instruction code + * + * Load/Store instructions Multiplication instructions + * ----------------------- --------------------------- + * + * S32LDD XRa, Rb, s12 S32MADD XRa, XRd, Rs, Rt + * S32STD XRa, Rb, s12 S32MADDU XRa, XRd, Rs, Rt + * S32LDDV XRa, Rb, rc, strd2 S32SUB XRa, XRd, Rs, Rt + * S32STDV XRa, Rb, rc, strd2 S32SUBU XRa, XRd, Rs, Rt + * S32LDI XRa, Rb, s12 S32MUL XRa, XRd, Rs, Rt + * S32SDI XRa, Rb, s12 S32MULU XRa, XRd, Rs, Rt + * S32LDIV XRa, Rb, rc, strd2 D16MUL XRa, XRb, XRc, XRd, optn2 + * S32SDIV XRa, Rb, rc, strd2 D16MULE XRa, XRb, XRc, optn2 + * S32LDDR XRa, Rb, s12 D16MULF XRa, XRb, XRc, optn2 + * S32STDR XRa, Rb, s12 D16MAC XRa, XRb, XRc, XRd, aptn2, optn2 + * S32LDDVR XRa, Rb, rc, strd2 D16MACE XRa, XRb, XRc, XRd, aptn2, optn2 + * S32STDVR XRa, Rb, rc, strd2 D16MACF XRa, XRb, XRc, XRd, aptn2, optn2 + * S32LDIR XRa, Rb, s12 D16MADL XRa, XRb, XRc, XRd, aptn2, optn2 + * S32SDIR XRa, Rb, s12 S16MAD XRa, XRb, XRc, XRd, aptn1, optn2 + * S32LDIVR XRa, Rb, rc, strd2 Q8MUL XRa, XRb, XRc, XRd + * S32SDIVR XRa, Rb, rc, strd2 Q8MULSU XRa, XRb, XRc, XRd + * S16LDD XRa, Rb, s10, eptn2 Q8MAC XRa, XRb, XRc, XRd, aptn2 + * S16STD XRa, Rb, s10, eptn2 Q8MACSU XRa, XRb, XRc, XRd, aptn2 + * S16LDI XRa, Rb, s10, eptn2 Q8MADL XRa, XRb, XRc, XRd, aptn2 + * S16SDI XRa, Rb, s10, eptn2 + * S8LDD XRa, Rb, s8, eptn3 + * S8STD XRa, Rb, s8, eptn3 Addition and subtraction instructions + * S8LDI XRa, Rb, s8, eptn3 ------------------------------------- + * S8SDI XRa, Rb, s8, eptn3 + * LXW Rd, Rs, Rt, strd2 D32ADD XRa, XRb, XRc, XRd, eptn2 + * LXH Rd, Rs, Rt, strd2 D32ADDC XRa, XRb, XRc, XRd + * LXHU Rd, Rs, Rt, strd2 D32ACC XRa, XRb, XRc, XRd, eptn2 + * LXB Rd, Rs, Rt, strd2 D32ACCM XRa, XRb, XRc, XRd, eptn2 + * LXBU Rd, Rs, Rt, strd2 D32ASUM XRa, XRb, XRc, XRd, eptn2 + * S32CPS XRa, XRb, XRc + * Q16ADD XRa, XRb, XRc, XRd, eptn2, optn2 + * Comparison instructions Q16ACC XRa, XRb, XRc, XRd, eptn2 + * ----------------------- Q16ACCM XRa, XRb, XRc, XRd, eptn2 + * D16ASUM XRa, XRb, XRc, XRd, eptn2 + * S32MAX XRa, XRb, XRc D16CPS XRa, XRb, + * S32MIN XRa, XRb, XRc D16AVG XRa, XRb, XRc + * S32SLT XRa, XRb, XRc D16AVGR XRa, XRb, XRc + * S32MOVZ XRa, XRb, XRc Q8ADD XRa, XRb, XRc, eptn2 + * S32MOVN XRa, XRb, XRc Q8ADDE XRa, XRb, XRc, XRd, eptn2 + * D16MAX XRa, XRb, XRc Q8ACCE XRa, XRb, XRc, XRd, eptn2 + * D16MIN XRa, XRb, XRc Q8ABD XRa, XRb, XRc + * D16SLT XRa, XRb, XRc Q8SAD XRa, XRb, XRc, XRd + * D16MOVZ XRa, XRb, XRc Q8AVG XRa, XRb, XRc + * D16MOVN XRa, XRb, XRc Q8AVGR XRa, XRb, XRc + * Q8MAX XRa, XRb, XRc D8SUM XRa, XRb, XRc, XRd + * Q8MIN XRa, XRb, XRc D8SUMC XRa, XRb, XRc, XRd + * Q8SLT XRa, XRb, XRc + * Q8SLTU XRa, XRb, XRc + * Q8MOVZ XRa, XRb, XRc Shift instructions + * Q8MOVN XRa, XRb, XRc ------------------ + * + * D32SLL XRa, XRb, XRc, XRd, sft4 + * Bitwise instructions D32SLR XRa, XRb, XRc, XRd, sft4 + * -------------------- D32SAR XRa, XRb, XRc, XRd, sft4 + * D32SARL XRa, XRb, XRc, sft4 + * S32NOR XRa, XRb, XRc D32SLLV XRa, XRb, Rb + * S32AND XRa, XRb, XRc D32SLRV XRa, XRb, Rb + * S32XOR XRa, XRb, XRc D32SARV XRa, XRb, Rb + * S32OR XRa, XRb, XRc D32SARW XRa, XRb, XRc, Rb + * Q16SLL XRa, XRb, XRc, XRd, sft4 + * Q16SLR XRa, XRb, XRc, XRd, sft4 + * Miscelaneous instructions Q16SAR XRa, XRb, XRc, XRd, sft4 + * ------------------------- Q16SLLV XRa, XRb, Rb + * Q16SLRV XRa, XRb, Rb + * S32SFL XRa, XRb, XRc, XRd, optn2 Q16SARV XRa, XRb, Rb + * S32ALN XRa, XRb, XRc, Rb + * S32ALNI XRa, XRb, XRc, s3 + * S32LUI XRa, s8, optn3 Move instructions + * S32EXTR XRa, XRb, Rb, bits5 ----------------- + * S32EXTRV XRa, XRb, Rs, Rt + * Q16SCOP XRa, XRb, XRc, XRd S32M2I XRa, Rb + * Q16SAT XRa, XRb, XRc S32I2M XRa, Rb + * + * + * bits + * 05..00 + * + * ┌─ 000000 ─ OPC_MXU_S32MADD + * ├─ 000001 ─ OPC_MXU_S32MADDU + * ├─ 000010 ─ <not assigned> + * │ 20..18 + * ├─ 000011 ─ OPC_MXU__POOL00 ─┬─ 000 ─ OPC_MXU_S32MAX + * │ ├─ 001 ─ OPC_MXU_S32MIN + * │ ├─ 010 ─ OPC_MXU_D16MAX + * │ ├─ 011 ─ OPC_MXU_D16MIN + * │ ├─ 100 ─ OPC_MXU_Q8MAX + * │ ├─ 101 ─ OPC_MXU_Q8MIN + * │ ├─ 110 ─ OPC_MXU_Q8SLT + * │ └─ 111 ─ OPC_MXU_Q8SLTU + * ├─ 000100 ─ OPC_MXU_S32MSUB + * ├─ 000101 ─ OPC_MXU_S32MSUBU 20..18 + * ├─ 000110 ─ OPC_MXU__POOL01 ─┬─ 000 ─ OPC_MXU_S32SLT + * │ ├─ 001 ─ OPC_MXU_D16SLT + * │ ├─ 010 ─ OPC_MXU_D16AVG + * │ ├─ 011 ─ OPC_MXU_D16AVGR + * │ ├─ 100 ─ OPC_MXU_Q8AVG + * │ ├─ 101 ─ OPC_MXU_Q8AVGR + * │ └─ 111 ─ OPC_MXU_Q8ADD + * │ + * │ 20..18 + * ├─ 000111 ─ OPC_MXU__POOL02 ─┬─ 000 ─ OPC_MXU_S32CPS + * │ ├─ 010 ─ OPC_MXU_D16CPS + * │ ├─ 100 ─ OPC_MXU_Q8ABD + * │ └─ 110 ─ OPC_MXU_Q16SAT + * ├─ 001000 ─ OPC_MXU_D16MUL + * │ 25..24 + * ├─ 001001 ─ OPC_MXU__POOL03 ─┬─ 00 ─ OPC_MXU_D16MULF + * │ └─ 01 ─ OPC_MXU_D16MULE + * ├─ 001010 ─ OPC_MXU_D16MAC + * ├─ 001011 ─ OPC_MXU_D16MACF + * ├─ 001100 ─ OPC_MXU_D16MADL + * │ 25..24 + * ├─ 001101 ─ OPC_MXU__POOL04 ─┬─ 00 ─ OPC_MXU_S16MAD + * │ └─ 01 ─ OPC_MXU_S16MAD_1 + * ├─ 001110 ─ OPC_MXU_Q16ADD + * ├─ 001111 ─ OPC_MXU_D16MACE + * │ 23 + * ├─ 010000 ─ OPC_MXU__POOL05 ─┬─ 0 ─ OPC_MXU_S32LDD + * │ └─ 1 ─ OPC_MXU_S32LDDR + * │ + * │ 23 + * ├─ 010001 ─ OPC_MXU__POOL06 ─┬─ 0 ─ OPC_MXU_S32STD + * │ └─ 1 ─ OPC_MXU_S32STDR + * │ + * │ 13..10 + * ├─ 010010 ─ OPC_MXU__POOL07 ─┬─ 0000 ─ OPC_MXU_S32LDDV + * │ └─ 0001 ─ OPC_MXU_S32LDDVR + * │ + * │ 13..10 + * ├─ 010011 ─ OPC_MXU__POOL08 ─┬─ 0000 ─ OPC_MXU_S32STDV + * │ └─ 0001 ─ OPC_MXU_S32STDVR + * │ + * │ 23 + * ├─ 010100 ─ OPC_MXU__POOL09 ─┬─ 0 ─ OPC_MXU_S32LDI + * │ └─ 1 ─ OPC_MXU_S32LDIR + * │ + * │ 23 + * ├─ 010101 ─ OPC_MXU__POOL10 ─┬─ 0 ─ OPC_MXU_S32SDI + * │ └─ 1 ─ OPC_MXU_S32SDIR + * │ + * │ 13..10 + * ├─ 010110 ─ OPC_MXU__POOL11 ─┬─ 0000 ─ OPC_MXU_S32LDIV + * │ └─ 0001 ─ OPC_MXU_S32LDIVR + * │ + * │ 13..10 + * ├─ 010111 ─ OPC_MXU__POOL12 ─┬─ 0000 ─ OPC_MXU_S32SDIV + * │ └─ 0001 ─ OPC_MXU_S32SDIVR + * ├─ 011000 ─ OPC_MXU_D32ADD + * │ 23..22 + * MXU ├─ 011001 ─ OPC_MXU__POOL13 ─┬─ 00 ─ OPC_MXU_D32ACC + * opcodes ─┤ ├─ 01 ─ OPC_MXU_D32ACCM + * │ └─ 10 ─ OPC_MXU_D32ASUM + * ├─ 011010 ─ <not assigned> + * │ 23..22 + * ├─ 011011 ─ OPC_MXU__POOL14 ─┬─ 00 ─ OPC_MXU_Q16ACC + * │ ├─ 01 ─ OPC_MXU_Q16ACCM + * │ └─ 10 ─ OPC_MXU_Q16ASUM + * │ + * │ 23..22 + * ├─ 011100 ─ OPC_MXU__POOL15 ─┬─ 00 ─ OPC_MXU_Q8ADDE + * │ ├─ 01 ─ OPC_MXU_D8SUM + * ├─ 011101 ─ OPC_MXU_Q8ACCE └─ 10 ─ OPC_MXU_D8SUMC + * ├─ 011110 ─ <not assigned> + * ├─ 011111 ─ <not assigned> + * ├─ 100000 ─ <not assigned> + * ├─ 100001 ─ <not assigned> + * ├─ 100010 ─ OPC_MXU_S8LDD + * ├─ 100011 ─ OPC_MXU_S8STD + * ├─ 100100 ─ OPC_MXU_S8LDI + * ├─ 100101 ─ OPC_MXU_S8SDI + * │ 15..14 + * ├─ 100110 ─ OPC_MXU__POOL16 ─┬─ 00 ─ OPC_MXU_S32MUL + * │ ├─ 00 ─ OPC_MXU_S32MULU + * │ ├─ 00 ─ OPC_MXU_S32EXTR + * │ └─ 00 ─ OPC_MXU_S32EXTRV + * │ + * │ 20..18 + * ├─ 100111 ─ OPC_MXU__POOL17 ─┬─ 000 ─ OPC_MXU_D32SARW + * │ ├─ 001 ─ OPC_MXU_S32ALN + * ├─ 101000 ─ OPC_MXU_LXB ├─ 010 ─ OPC_MXU_S32ALNI + * ├─ 101001 ─ <not assigned> ├─ 011 ─ OPC_MXU_S32NOR + * ├─ 101010 ─ OPC_MXU_S16LDD ├─ 100 ─ OPC_MXU_S32AND + * ├─ 101011 ─ OPC_MXU_S16STD ├─ 101 ─ OPC_MXU_S32OR + * ├─ 101100 ─ OPC_MXU_S16LDI ├─ 110 ─ OPC_MXU_S32XOR + * ├─ 101101 ─ OPC_MXU_S16SDI └─ 111 ─ OPC_MXU_S32LUI + * ├─ 101000 ─ <not assigned> + * ├─ 101001 ─ <not assigned> + * ├─ 101010 ─ <not assigned> + * ├─ 101011 ─ <not assigned> + * ├─ 101100 ─ <not assigned> + * ├─ 101101 ─ <not assigned> + * ├─ 101110 ─ OPC_MXU_S32M2I + * ├─ 101111 ─ OPC_MXU_S32I2M + * ├─ 110000 ─ OPC_MXU_D32SLL + * ├─ 110001 ─ OPC_MXU_D32SLR + * ├─ 110010 ─ OPC_MXU_D32SARL + * ├─ 110011 ─ OPC_MXU_D32SAR + * ├─ 110100 ─ OPC_MXU_Q16SLL + * ├─ 110101 ─ OPC_MXU_Q16SLR 20..18 + * ├─ 110110 ─ OPC_MXU__POOL18 ─┬─ 000 ─ OPC_MXU_D32SLLV + * │ ├─ 001 ─ OPC_MXU_D32SLRV + * │ ├─ 010 ─ OPC_MXU_D32SARV + * │ ├─ 011 ─ OPC_MXU_Q16SLLV + * │ ├─ 100 ─ OPC_MXU_Q16SLRV + * │ └─ 101 ─ OPC_MXU_Q16SARV + * ├─ 110111 ─ OPC_MXU_Q16SAR + * │ 23..22 + * ├─ 111000 ─ OPC_MXU__POOL19 ─┬─ 00 ─ OPC_MXU_Q8MUL + * │ └─ 01 ─ OPC_MXU_Q8MULSU + * │ + * │ 20..18 + * ├─ 111001 ─ OPC_MXU__POOL20 ─┬─ 000 ─ OPC_MXU_Q8MOVZ + * │ ├─ 001 ─ OPC_MXU_Q8MOVN + * │ ├─ 010 ─ OPC_MXU_D16MOVZ + * │ ├─ 011 ─ OPC_MXU_D16MOVN + * │ ├─ 100 ─ OPC_MXU_S32MOVZ + * │ └─ 101 ─ OPC_MXU_S32MOV + * │ + * │ 23..22 + * ├─ 111010 ─ OPC_MXU__POOL21 ─┬─ 00 ─ OPC_MXU_Q8MAC + * │ └─ 10 ─ OPC_MXU_Q8MACSU + * ├─ 111011 ─ OPC_MXU_Q16SCOP + * ├─ 111100 ─ OPC_MXU_Q8MADL + * ├─ 111101 ─ OPC_MXU_S32SFL + * ├─ 111110 ─ OPC_MXU_Q8SAD + * └─ 111111 ─ <not assigned> + * + * + * Compiled after: + * + * "XBurst® Instruction Set Architecture MIPS eXtension/enhanced Unit + * Programming Manual", Ingenic Semiconductor Co, Ltd., 2017 + */ + +enum { + OPC_MXU_S32MADD = 0x00, + OPC_MXU_S32MADDU = 0x01, + /* not assigned 0x02 */ + OPC_MXU__POOL00 = 0x03, + OPC_MXU_S32MSUB = 0x04, + OPC_MXU_S32MSUBU = 0x05, + OPC_MXU__POOL01 = 0x06, + OPC_MXU__POOL02 = 0x07, + OPC_MXU_D16MUL = 0x08, + OPC_MXU__POOL03 = 0x09, + OPC_MXU_D16MAC = 0x0A, + OPC_MXU_D16MACF = 0x0B, + OPC_MXU_D16MADL = 0x0C, + OPC_MXU__POOL04 = 0x0D, + OPC_MXU_Q16ADD = 0x0E, + OPC_MXU_D16MACE = 0x0F, + OPC_MXU__POOL05 = 0x10, + OPC_MXU__POOL06 = 0x11, + OPC_MXU__POOL07 = 0x12, + OPC_MXU__POOL08 = 0x13, + OPC_MXU__POOL09 = 0x14, + OPC_MXU__POOL10 = 0x15, + OPC_MXU__POOL11 = 0x16, + OPC_MXU__POOL12 = 0x17, + OPC_MXU_D32ADD = 0x18, + OPC_MXU__POOL13 = 0x19, + /* not assigned 0x1A */ + OPC_MXU__POOL14 = 0x1B, + OPC_MXU__POOL15 = 0x1C, + OPC_MXU_Q8ACCE = 0x1D, + /* not assigned 0x1E */ + /* not assigned 0x1F */ + /* not assigned 0x20 */ + /* not assigned 0x21 */ + OPC_MXU_S8LDD = 0x22, + OPC_MXU_S8STD = 0x23, + OPC_MXU_S8LDI = 0x24, + OPC_MXU_S8SDI = 0x25, + OPC_MXU__POOL16 = 0x26, + OPC_MXU__POOL17 = 0x27, + OPC_MXU_LXB = 0x28, + /* not assigned 0x29 */ + OPC_MXU_S16LDD = 0x2A, + OPC_MXU_S16STD = 0x2B, + OPC_MXU_S16LDI = 0x2C, + OPC_MXU_S16SDI = 0x2D, + OPC_MXU_S32M2I = 0x2E, + OPC_MXU_S32I2M = 0x2F, + OPC_MXU_D32SLL = 0x30, + OPC_MXU_D32SLR = 0x31, + OPC_MXU_D32SARL = 0x32, + OPC_MXU_D32SAR = 0x33, + OPC_MXU_Q16SLL = 0x34, + OPC_MXU_Q16SLR = 0x35, + OPC_MXU__POOL18 = 0x36, + OPC_MXU_Q16SAR = 0x37, + OPC_MXU__POOL19 = 0x38, + OPC_MXU__POOL20 = 0x39, + OPC_MXU__POOL21 = 0x3A, + OPC_MXU_Q16SCOP = 0x3B, + OPC_MXU_Q8MADL = 0x3C, + OPC_MXU_S32SFL = 0x3D, + OPC_MXU_Q8SAD = 0x3E, + /* not assigned 0x3F */ +}; + + +/* + * MXU pool 00 + */ +enum { + OPC_MXU_S32MAX = 0x00, + OPC_MXU_S32MIN = 0x01, + OPC_MXU_D16MAX = 0x02, + OPC_MXU_D16MIN = 0x03, + OPC_MXU_Q8MAX = 0x04, + OPC_MXU_Q8MIN = 0x05, + OPC_MXU_Q8SLT = 0x06, + OPC_MXU_Q8SLTU = 0x07, +}; + +/* + * MXU pool 01 + */ +enum { + OPC_MXU_S32SLT = 0x00, + OPC_MXU_D16SLT = 0x01, + OPC_MXU_D16AVG = 0x02, + OPC_MXU_D16AVGR = 0x03, + OPC_MXU_Q8AVG = 0x04, + OPC_MXU_Q8AVGR = 0x05, + OPC_MXU_Q8ADD = 0x07, +}; + +/* + * MXU pool 02 + */ +enum { + OPC_MXU_S32CPS = 0x00, + OPC_MXU_D16CPS = 0x02, + OPC_MXU_Q8ABD = 0x04, + OPC_MXU_Q16SAT = 0x06, +}; + +/* + * MXU pool 03 + */ +enum { + OPC_MXU_D16MULF = 0x00, + OPC_MXU_D16MULE = 0x01, +}; + +/* + * MXU pool 04 + */ +enum { + OPC_MXU_S16MAD = 0x00, + OPC_MXU_S16MAD_1 = 0x01, +}; + +/* + * MXU pool 05 + */ +enum { + OPC_MXU_S32LDD = 0x00, + OPC_MXU_S32LDDR = 0x01, +}; + +/* + * MXU pool 06 + */ +enum { + OPC_MXU_S32STD = 0x00, + OPC_MXU_S32STDR = 0x01, +}; + +/* + * MXU pool 07 + */ +enum { + OPC_MXU_S32LDDV = 0x00, + OPC_MXU_S32LDDVR = 0x01, +}; + +/* + * MXU pool 08 + */ +enum { + OPC_MXU_S32STDV = 0x00, + OPC_MXU_S32STDVR = 0x01, +}; + +/* + * MXU pool 09 + */ +enum { + OPC_MXU_S32LDI = 0x00, + OPC_MXU_S32LDIR = 0x01, +}; + +/* + * MXU pool 10 + */ +enum { + OPC_MXU_S32SDI = 0x00, + OPC_MXU_S32SDIR = 0x01, +}; + +/* + * MXU pool 11 + */ +enum { + OPC_MXU_S32LDIV = 0x00, + OPC_MXU_S32LDIVR = 0x01, +}; + +/* + * MXU pool 12 + */ +enum { + OPC_MXU_S32SDIV = 0x00, + OPC_MXU_S32SDIVR = 0x01, +}; + +/* + * MXU pool 13 + */ +enum { + OPC_MXU_D32ACC = 0x00, + OPC_MXU_D32ACCM = 0x01, + OPC_MXU_D32ASUM = 0x02, +}; + +/* + * MXU pool 14 + */ +enum { + OPC_MXU_Q16ACC = 0x00, + OPC_MXU_Q16ACCM = 0x01, + OPC_MXU_Q16ASUM = 0x02, +}; + +/* + * MXU pool 15 + */ +enum { + OPC_MXU_Q8ADDE = 0x00, + OPC_MXU_D8SUM = 0x01, + OPC_MXU_D8SUMC = 0x02, +}; + +/* + * MXU pool 16 + */ +enum { + OPC_MXU_S32MUL = 0x00, + OPC_MXU_S32MULU = 0x01, + OPC_MXU_S32EXTR = 0x02, + OPC_MXU_S32EXTRV = 0x03, +}; + +/* + * MXU pool 17 + */ +enum { + OPC_MXU_D32SARW = 0x00, + OPC_MXU_S32ALN = 0x01, + OPC_MXU_S32ALNI = 0x02, + OPC_MXU_S32NOR = 0x03, + OPC_MXU_S32AND = 0x04, + OPC_MXU_S32OR = 0x05, + OPC_MXU_S32XOR = 0x06, + OPC_MXU_S32LUI = 0x07, +}; + +/* + * MXU pool 18 + */ +enum { + OPC_MXU_D32SLLV = 0x00, + OPC_MXU_D32SLRV = 0x01, + OPC_MXU_D32SARV = 0x03, + OPC_MXU_Q16SLLV = 0x04, + OPC_MXU_Q16SLRV = 0x05, + OPC_MXU_Q16SARV = 0x07, +}; + +/* + * MXU pool 19 + */ +enum { + OPC_MXU_Q8MUL = 0x00, + OPC_MXU_Q8MULSU = 0x01, +}; + +/* + * MXU pool 20 + */ +enum { + OPC_MXU_Q8MOVZ = 0x00, + OPC_MXU_Q8MOVN = 0x01, + OPC_MXU_D16MOVZ = 0x02, + OPC_MXU_D16MOVN = 0x03, + OPC_MXU_S32MOVZ = 0x04, + OPC_MXU_S32MOVN = 0x05, +}; + +/* + * MXU pool 21 + */ +enum { + OPC_MXU_Q8MAC = 0x00, + OPC_MXU_Q8MACSU = 0x01, +}; + +/* + * Overview of the TX79-specific instruction set + * ============================================= + * + * The R5900 and the C790 have 128-bit wide GPRs, where the upper 64 bits + * are only used by the specific quadword (128-bit) LQ/SQ load/store + * instructions and certain multimedia instructions (MMIs). These MMIs + * configure the 128-bit data path as two 64-bit, four 32-bit, eight 16-bit + * or sixteen 8-bit paths. + * + * Reference: + * + * The Toshiba TX System RISC TX79 Core Architecture manual, + * https://wiki.qemu.org/File:C790.pdf + * + * Three-Operand Multiply and Multiply-Add (4 instructions) + * -------------------------------------------------------- + * MADD [rd,] rs, rt Multiply/Add + * MADDU [rd,] rs, rt Multiply/Add Unsigned + * MULT [rd,] rs, rt Multiply (3-operand) + * MULTU [rd,] rs, rt Multiply Unsigned (3-operand) + * + * Multiply Instructions for Pipeline 1 (10 instructions) + * ------------------------------------------------------ + * MULT1 [rd,] rs, rt Multiply Pipeline 1 + * MULTU1 [rd,] rs, rt Multiply Unsigned Pipeline 1 + * DIV1 rs, rt Divide Pipeline 1 + * DIVU1 rs, rt Divide Unsigned Pipeline 1 + * MADD1 [rd,] rs, rt Multiply-Add Pipeline 1 + * MADDU1 [rd,] rs, rt Multiply-Add Unsigned Pipeline 1 + * MFHI1 rd Move From HI1 Register + * MFLO1 rd Move From LO1 Register + * MTHI1 rs Move To HI1 Register + * MTLO1 rs Move To LO1 Register + * + * Arithmetic (19 instructions) + * ---------------------------- + * PADDB rd, rs, rt Parallel Add Byte + * PSUBB rd, rs, rt Parallel Subtract Byte + * PADDH rd, rs, rt Parallel Add Halfword + * PSUBH rd, rs, rt Parallel Subtract Halfword + * PADDW rd, rs, rt Parallel Add Word + * PSUBW rd, rs, rt Parallel Subtract Word + * PADSBH rd, rs, rt Parallel Add/Subtract Halfword + * PADDSB rd, rs, rt Parallel Add with Signed Saturation Byte + * PSUBSB rd, rs, rt Parallel Subtract with Signed Saturation Byte + * PADDSH rd, rs, rt Parallel Add with Signed Saturation Halfword + * PSUBSH rd, rs, rt Parallel Subtract with Signed Saturation Halfword + * PADDSW rd, rs, rt Parallel Add with Signed Saturation Word + * PSUBSW rd, rs, rt Parallel Subtract with Signed Saturation Word + * PADDUB rd, rs, rt Parallel Add with Unsigned saturation Byte + * PSUBUB rd, rs, rt Parallel Subtract with Unsigned saturation Byte + * PADDUH rd, rs, rt Parallel Add with Unsigned saturation Halfword + * PSUBUH rd, rs, rt Parallel Subtract with Unsigned saturation Halfword + * PADDUW rd, rs, rt Parallel Add with Unsigned saturation Word + * PSUBUW rd, rs, rt Parallel Subtract with Unsigned saturation Word + * + * Min/Max (4 instructions) + * ------------------------ + * PMAXH rd, rs, rt Parallel Maximum Halfword + * PMINH rd, rs, rt Parallel Minimum Halfword + * PMAXW rd, rs, rt Parallel Maximum Word + * PMINW rd, rs, rt Parallel Minimum Word + * + * Absolute (2 instructions) + * ------------------------- + * PABSH rd, rt Parallel Absolute Halfword + * PABSW rd, rt Parallel Absolute Word + * + * Logical (4 instructions) + * ------------------------ + * PAND rd, rs, rt Parallel AND + * POR rd, rs, rt Parallel OR + * PXOR rd, rs, rt Parallel XOR + * PNOR rd, rs, rt Parallel NOR + * + * Shift (9 instructions) + * ---------------------- + * PSLLH rd, rt, sa Parallel Shift Left Logical Halfword + * PSRLH rd, rt, sa Parallel Shift Right Logical Halfword + * PSRAH rd, rt, sa Parallel Shift Right Arithmetic Halfword + * PSLLW rd, rt, sa Parallel Shift Left Logical Word + * PSRLW rd, rt, sa Parallel Shift Right Logical Word + * PSRAW rd, rt, sa Parallel Shift Right Arithmetic Word + * PSLLVW rd, rt, rs Parallel Shift Left Logical Variable Word + * PSRLVW rd, rt, rs Parallel Shift Right Logical Variable Word + * PSRAVW rd, rt, rs Parallel Shift Right Arithmetic Variable Word + * + * Compare (6 instructions) + * ------------------------ + * PCGTB rd, rs, rt Parallel Compare for Greater Than Byte + * PCEQB rd, rs, rt Parallel Compare for Equal Byte + * PCGTH rd, rs, rt Parallel Compare for Greater Than Halfword + * PCEQH rd, rs, rt Parallel Compare for Equal Halfword + * PCGTW rd, rs, rt Parallel Compare for Greater Than Word + * PCEQW rd, rs, rt Parallel Compare for Equal Word + * + * LZC (1 instruction) + * ------------------- + * PLZCW rd, rs Parallel Leading Zero or One Count Word + * + * Quadword Load and Store (2 instructions) + * ---------------------------------------- + * LQ rt, offset(base) Load Quadword + * SQ rt, offset(base) Store Quadword + * + * Multiply and Divide (19 instructions) + * ------------------------------------- + * PMULTW rd, rs, rt Parallel Multiply Word + * PMULTUW rd, rs, rt Parallel Multiply Unsigned Word + * PDIVW rs, rt Parallel Divide Word + * PDIVUW rs, rt Parallel Divide Unsigned Word + * PMADDW rd, rs, rt Parallel Multiply-Add Word + * PMADDUW rd, rs, rt Parallel Multiply-Add Unsigned Word + * PMSUBW rd, rs, rt Parallel Multiply-Subtract Word + * PMULTH rd, rs, rt Parallel Multiply Halfword + * PMADDH rd, rs, rt Parallel Multiply-Add Halfword + * PMSUBH rd, rs, rt Parallel Multiply-Subtract Halfword + * PHMADH rd, rs, rt Parallel Horizontal Multiply-Add Halfword + * PHMSBH rd, rs, rt Parallel Horizontal Multiply-Subtract Halfword + * PDIVBW rs, rt Parallel Divide Broadcast Word + * PMFHI rd Parallel Move From HI Register + * PMFLO rd Parallel Move From LO Register + * PMTHI rs Parallel Move To HI Register + * PMTLO rs Parallel Move To LO Register + * PMFHL rd Parallel Move From HI/LO Register + * PMTHL rs Parallel Move To HI/LO Register + * + * Pack/Extend (11 instructions) + * ----------------------------- + * PPAC5 rd, rt Parallel Pack to 5 bits + * PPACB rd, rs, rt Parallel Pack to Byte + * PPACH rd, rs, rt Parallel Pack to Halfword + * PPACW rd, rs, rt Parallel Pack to Word + * PEXT5 rd, rt Parallel Extend Upper from 5 bits + * PEXTUB rd, rs, rt Parallel Extend Upper from Byte + * PEXTLB rd, rs, rt Parallel Extend Lower from Byte + * PEXTUH rd, rs, rt Parallel Extend Upper from Halfword + * PEXTLH rd, rs, rt Parallel Extend Lower from Halfword + * PEXTUW rd, rs, rt Parallel Extend Upper from Word + * PEXTLW rd, rs, rt Parallel Extend Lower from Word + * + * Others (16 instructions) + * ------------------------ + * PCPYH rd, rt Parallel Copy Halfword + * PCPYLD rd, rs, rt Parallel Copy Lower Doubleword + * PCPYUD rd, rs, rt Parallel Copy Upper Doubleword + * PREVH rd, rt Parallel Reverse Halfword + * PINTH rd, rs, rt Parallel Interleave Halfword + * PINTEH rd, rs, rt Parallel Interleave Even Halfword + * PEXEH rd, rt Parallel Exchange Even Halfword + * PEXCH rd, rt Parallel Exchange Center Halfword + * PEXEW rd, rt Parallel Exchange Even Word + * PEXCW rd, rt Parallel Exchange Center Word + * QFSRV rd, rs, rt Quadword Funnel Shift Right Variable + * MFSA rd Move from Shift Amount Register + * MTSA rs Move to Shift Amount Register + * MTSAB rs, immediate Move Byte Count to Shift Amount Register + * MTSAH rs, immediate Move Halfword Count to Shift Amount Register + * PROT3W rd, rt Parallel Rotate 3 Words + * + * The TX79-specific Multimedia Instruction encodings + * ================================================== + * + * TX79 Multimedia Instruction encoding table keys: + * + * * This code is reserved for future use. An attempt to execute it + * causes a Reserved Instruction exception. + * % This code indicates an instruction class. The instruction word + * must be further decoded by examining additional tables that show + * the values for other instruction fields. + * # This code is reserved for the unsupported instructions DMULT, + * DMULTU, DDIV, DDIVU, LL, LLD, SC, SCD, LWC2 and SWC2. An attempt + * to execute it causes a Reserved Instruction exception. + * + * TX79 Multimedia Instructions encoded by opcode field (MMI, LQ, SQ): + * + * 31 26 0 + * +--------+----------------------------------------+ + * | opcode | | + * +--------+----------------------------------------+ + * + * opcode bits 28..26 + * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 + * 31..29 | 000 | 001 | 010 | 011 | 100 | 101 | 110 | 111 + * -------+-------+-------+-------+-------+-------+-------+-------+------- + * 0 000 |SPECIAL| REGIMM| J | JAL | BEQ | BNE | BLEZ | BGTZ + * 1 001 | ADDI | ADDIU | SLTI | SLTIU | ANDI | ORI | XORI | LUI + * 2 010 | COP0 | COP1 | * | * | BEQL | BNEL | BLEZL | BGTZL + * 3 011 | DADDI | DADDIU| LDL | LDR | MMI% | * | LQ | SQ + * 4 100 | LB | LH | LWL | LW | LBU | LHU | LWR | LWU + * 5 101 | SB | SH | SWL | SW | SDL | SDR | SWR | CACHE + * 6 110 | # | LWC1 | # | PREF | # | LDC1 | # | LD + * 7 111 | # | SWC1 | # | * | # | SDC1 | # | SD + */ + +enum { + TX79_CLASS_MMI = 0x1C << 26, /* Same as OPC_SPECIAL2 */ + TX79_LQ = 0x1E << 26, /* Same as OPC_MSA */ + TX79_SQ = 0x1F << 26, /* Same as OPC_SPECIAL3 */ +}; + +/* + * TX79 Multimedia Instructions with opcode field = MMI: + * + * 31 26 5 0 + * +--------+-------------------------------+--------+ + * | MMI | |function| + * +--------+-------------------------------+--------+ + * + * function bits 2..0 + * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 + * 5..3 | 000 | 001 | 010 | 011 | 100 | 101 | 110 | 111 + * -------+-------+-------+-------+-------+-------+-------+-------+------- + * 0 000 | MADD | MADDU | * | * | PLZCW | * | * | * + * 1 001 | MMI0% | MMI2% | * | * | * | * | * | * + * 2 010 | MFHI1 | MTHI1 | MFLO1 | MTLO1 | * | * | * | * + * 3 011 | MULT1 | MULTU1| DIV1 | DIVU1 | * | * | * | * + * 4 100 | MADD1 | MADDU1| * | * | * | * | * | * + * 5 101 | MMI1% | MMI3% | * | * | * | * | * | * + * 6 110 | PMFHL | PMTHL | * | * | PSLLH | * | PSRLH | PSRAH + * 7 111 | * | * | * | * | PSLLW | * | PSRLW | PSRAW + */ + +#define MASK_TX79_MMI(op) (MASK_OP_MAJOR(op) | ((op) & 0x3F)) +enum { + TX79_MMI_MADD = 0x00 | TX79_CLASS_MMI, /* Same as OPC_MADD */ + TX79_MMI_MADDU = 0x01 | TX79_CLASS_MMI, /* Same as OPC_MADDU */ + TX79_MMI_PLZCW = 0x04 | TX79_CLASS_MMI, + TX79_MMI_CLASS_MMI0 = 0x08 | TX79_CLASS_MMI, + TX79_MMI_CLASS_MMI2 = 0x09 | TX79_CLASS_MMI, + TX79_MMI_MFHI1 = 0x10 | TX79_CLASS_MMI, /* Same minor as OPC_MFHI */ + TX79_MMI_MTHI1 = 0x11 | TX79_CLASS_MMI, /* Same minor as OPC_MTHI */ + TX79_MMI_MFLO1 = 0x12 | TX79_CLASS_MMI, /* Same minor as OPC_MFLO */ + TX79_MMI_MTLO1 = 0x13 | TX79_CLASS_MMI, /* Same minor as OPC_MTLO */ + TX79_MMI_MULT1 = 0x18 | TX79_CLASS_MMI, /* Same minor as OPC_MULT */ + TX79_MMI_MULTU1 = 0x19 | TX79_CLASS_MMI, /* Same minor as OPC_MULTU */ + TX79_MMI_DIV1 = 0x1A | TX79_CLASS_MMI, /* Same minor as OPC_DIV */ + TX79_MMI_DIVU1 = 0x1B | TX79_CLASS_MMI, /* Same minor as OPC_DIVU */ + TX79_MMI_MADD1 = 0x20 | TX79_CLASS_MMI, + TX79_MMI_MADDU1 = 0x21 | TX79_CLASS_MMI, + TX79_MMI_CLASS_MMI1 = 0x28 | TX79_CLASS_MMI, + TX79_MMI_CLASS_MMI3 = 0x29 | TX79_CLASS_MMI, + TX79_MMI_PMFHL = 0x30 | TX79_CLASS_MMI, + TX79_MMI_PMTHL = 0x31 | TX79_CLASS_MMI, + TX79_MMI_PSLLH = 0x34 | TX79_CLASS_MMI, + TX79_MMI_PSRLH = 0x36 | TX79_CLASS_MMI, + TX79_MMI_PSRAH = 0x37 | TX79_CLASS_MMI, + TX79_MMI_PSLLW = 0x3C | TX79_CLASS_MMI, + TX79_MMI_PSRLW = 0x3E | TX79_CLASS_MMI, + TX79_MMI_PSRAW = 0x3F | TX79_CLASS_MMI, +}; + +/* + * TX79 Multimedia Instructions with opcode field = MMI and bits 5..0 = MMI0: + * + * 31 26 10 6 5 0 + * +--------+----------------------+--------+--------+ + * | MMI | |function| MMI0 | + * +--------+----------------------+--------+--------+ + * + * function bits 7..6 + * bits | 0 | 1 | 2 | 3 + * 10..8 | 00 | 01 | 10 | 11 + * -------+-------+-------+-------+------- + * 0 000 | PADDW | PSUBW | PCGTW | PMAXW + * 1 001 | PADDH | PSUBH | PCGTH | PMAXH + * 2 010 | PADDB | PSUBB | PCGTB | * + * 3 011 | * | * | * | * + * 4 100 | PADDSW| PSUBSW| PEXTLW| PPACW + * 5 101 | PADDSH| PSUBSH| PEXTLH| PPACH + * 6 110 | PADDSB| PSUBSB| PEXTLB| PPACB + * 7 111 | * | * | PEXT5 | PPAC5 + */ + +#define MASK_TX79_MMI0(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) +enum { + TX79_MMI0_PADDW = (0x00 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PSUBW = (0x01 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PCGTW = (0x02 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PMAXW = (0x03 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PADDH = (0x04 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PSUBH = (0x05 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PCGTH = (0x06 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PMAXH = (0x07 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PADDB = (0x08 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PSUBB = (0x09 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PCGTB = (0x0A << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PADDSW = (0x10 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PSUBSW = (0x11 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PEXTLW = (0x12 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PPACW = (0x13 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PADDSH = (0x14 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PSUBSH = (0x15 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PEXTLH = (0x16 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PPACH = (0x17 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PADDSB = (0x18 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PSUBSB = (0x19 << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PEXTLB = (0x1A << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PPACB = (0x1B << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PEXT5 = (0x1E << 6) | TX79_MMI_CLASS_MMI0, + TX79_MMI0_PPAC5 = (0x1F << 6) | TX79_MMI_CLASS_MMI0, +}; + +/* + * TX79 Multimedia Instructions with opcode field = MMI and bits 5..0 = MMI1: + * + * 31 26 10 6 5 0 + * +--------+----------------------+--------+--------+ + * | MMI | |function| MMI1 | + * +--------+----------------------+--------+--------+ + * + * function bits 7..6 + * bits | 0 | 1 | 2 | 3 + * 10..8 | 00 | 01 | 10 | 11 + * -------+-------+-------+-------+------- + * 0 000 | * | PABSW | PCEQW | PMINW + * 1 001 | PADSBH| PABSH | PCEQH | PMINH + * 2 010 | * | * | PCEQB | * + * 3 011 | * | * | * | * + * 4 100 | PADDUW| PSUBUW| PEXTUW| * + * 5 101 | PADDUH| PSUBUH| PEXTUH| * + * 6 110 | PADDUB| PSUBUB| PEXTUB| QFSRV + * 7 111 | * | * | * | * + */ + +#define MASK_TX79_MMI1(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) +enum { + TX79_MMI1_PABSW = (0x01 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PCEQW = (0x02 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PMINW = (0x03 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PADSBH = (0x04 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PABSH = (0x05 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PCEQH = (0x06 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PMINH = (0x07 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PCEQB = (0x0A << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PADDUW = (0x10 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PSUBUW = (0x11 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PEXTUW = (0x12 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PADDUH = (0x14 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PSUBUH = (0x15 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PEXTUH = (0x16 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PADDUB = (0x18 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PSUBUB = (0x19 << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_PEXTUB = (0x1A << 6) | TX79_MMI_CLASS_MMI1, + TX79_MMI1_QFSRV = (0x1B << 6) | TX79_MMI_CLASS_MMI1, +}; + +/* + * TX79 Multimedia Instructions with opcode field = MMI and bits 5..0 = MMI2: + * + * 31 26 10 6 5 0 + * +--------+----------------------+--------+--------+ + * | MMI | |function| MMI2 | + * +--------+----------------------+--------+--------+ + * + * function bits 7..6 + * bits | 0 | 1 | 2 | 3 + * 10..8 | 00 | 01 | 10 | 11 + * -------+-------+-------+-------+------- + * 0 000 | PMADDW| * | PSLLVW| PSRLVW + * 1 001 | PMSUBW| * | * | * + * 2 010 | PMFHI | PMFLO | PINTH | * + * 3 011 | PMULTW| PDIVW | PCPYLD| * + * 4 100 | PMADDH| PHMADH| PAND | PXOR + * 5 101 | PMSUBH| PHMSBH| * | * + * 6 110 | * | * | PEXEH | PREVH + * 7 111 | PMULTH| PDIVBW| PEXEW | PROT3W + */ + +#define MASK_TX79_MMI2(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) +enum { + TX79_MMI2_PMADDW = (0x00 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PSLLVW = (0x02 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PSRLVW = (0x03 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PMSUBW = (0x04 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PMFHI = (0x08 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PMFLO = (0x09 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PINTH = (0x0A << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PMULTW = (0x0C << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PDIVW = (0x0D << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PCPYLD = (0x0E << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PMADDH = (0x10 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PHMADH = (0x11 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PAND = (0x12 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PXOR = (0x13 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PMSUBH = (0x14 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PHMSBH = (0x15 << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PEXEH = (0x1A << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PREVH = (0x1B << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PMULTH = (0x1C << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PDIVBW = (0x1D << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PEXEW = (0x1E << 6) | TX79_MMI_CLASS_MMI2, + TX79_MMI2_PROT3W = (0x1F << 6) | TX79_MMI_CLASS_MMI2, +}; + +/* + * TX79 Multimedia Instructions with opcode field = MMI and bits 5..0 = MMI3: + * + * 31 26 10 6 5 0 + * +--------+----------------------+--------+--------+ + * | MMI | |function| MMI3 | + * +--------+----------------------+--------+--------+ + * + * function bits 7..6 + * bits | 0 | 1 | 2 | 3 + * 10..8 | 00 | 01 | 10 | 11 + * -------+-------+-------+-------+------- + * 0 000 |PMADDUW| * | * | PSRAVW + * 1 001 | * | * | * | * + * 2 010 | PMTHI | PMTLO | PINTEH| * + * 3 011 |PMULTUW| PDIVUW| PCPYUD| * + * 4 100 | * | * | POR | PNOR + * 5 101 | * | * | * | * + * 6 110 | * | * | PEXCH | PCPYH + * 7 111 | * | * | PEXCW | * + */ + +#define MASK_TX79_MMI3(op) (MASK_OP_MAJOR(op) | ((op) & 0x7FF)) +enum { + TX79_MMI3_PMADDUW = (0x00 << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_PSRAVW = (0x03 << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_PMTHI = (0x08 << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_PMTLO = (0x09 << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_PINTEH = (0x0A << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_PMULTUW = (0x0C << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_PDIVUW = (0x0D << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_PCPYUD = (0x0E << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_POR = (0x12 << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_PNOR = (0x13 << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_PEXCH = (0x1A << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_PCPYH = (0x1B << 6) | TX79_MMI_CLASS_MMI3, + TX79_MMI3_PEXCW = (0x1E << 6) | TX79_MMI_CLASS_MMI3, +}; + /* global register indices */ static TCGv cpu_gpr[32], cpu_PC; static TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC]; @@ -1447,8 +2428,9 @@ typedef struct DisasContext { target_ulong saved_pc; target_ulong page_start; uint32_t opcode; - int insn_flags; + uint64_t insn_flags; int32_t CP0_Config1; + int32_t CP0_Config2; int32_t CP0_Config3; int32_t CP0_Config5; /* Routine used to access memory */ @@ -1857,9 +2839,20 @@ static inline void check_dsp(DisasContext *ctx) } } -static inline void check_dspr2(DisasContext *ctx) +static inline void check_dsp_r2(DisasContext *ctx) { - if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSPR2))) { + if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP_R2))) { + if (ctx->insn_flags & ASE_DSP) { + generate_exception_end(ctx, EXCP_DSPDIS); + } else { + generate_exception_end(ctx, EXCP_RI); + } + } +} + +static inline void check_dsp_r3(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP_R3))) { if (ctx->insn_flags & ASE_DSP) { generate_exception_end(ctx, EXCP_DSPDIS); } else { @@ -1870,7 +2863,7 @@ static inline void check_dspr2(DisasContext *ctx) /* This code generates a "reserved instruction" exception if the CPU does not support the instruction set corresponding to flags. */ -static inline void check_insn(DisasContext *ctx, int flags) +static inline void check_insn(DisasContext *ctx, uint64_t flags) { if (unlikely(!(ctx->insn_flags & flags))) { generate_exception_end(ctx, EXCP_RI); @@ -1880,13 +2873,28 @@ static inline void check_insn(DisasContext *ctx, int flags) /* This code generates a "reserved instruction" exception if the CPU has corresponding flag set which indicates that the instruction has been removed. */ -static inline void check_insn_opc_removed(DisasContext *ctx, int flags) +static inline void check_insn_opc_removed(DisasContext *ctx, uint64_t flags) { if (unlikely(ctx->insn_flags & flags)) { generate_exception_end(ctx, EXCP_RI); } } +/* + * The Linux kernel traps certain reserved instruction exceptions to + * emulate the corresponding instructions. QEMU is the kernel in user + * mode, so those traps are emulated by accepting the instructions. + * + * A reserved instruction exception is generated for flagged CPUs if + * QEMU runs in system mode. + */ +static inline void check_insn_opc_user_only(DisasContext *ctx, uint64_t flags) +{ +#ifndef CONFIG_USER_ONLY + check_insn_opc_removed(ctx, flags); +#endif +} + /* This code generates a "reserved instruction" exception if the CPU does not support 64-bit paired-single (PS) floating point data type */ static inline void check_ps(DisasContext *ctx) @@ -1927,6 +2935,19 @@ static inline void check_xnp(DisasContext *ctx) } } +#ifndef CONFIG_USER_ONLY +/* + * This code generates a "reserved instruction" exception if the + * Config3 PW bit is NOT set. + */ +static inline void check_pw(DisasContext *ctx) +{ + if (unlikely(!(ctx->CP0_Config3 & (1 << CP0C3_PW)))) { + generate_exception_end(ctx, EXCP_RI); + } +} +#endif + /* * This code generates a "reserved instruction" exception if the * Config3 MT bit is NOT set. @@ -3231,17 +4252,21 @@ static void gen_shift(DisasContext *ctx, uint32_t opc, /* Arithmetic on HI/LO registers */ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) { - if (reg == 0 && (opc == OPC_MFHI || opc == OPC_MFLO)) { + if (reg == 0 && (opc == OPC_MFHI || opc == TX79_MMI_MFHI1 || + opc == OPC_MFLO || opc == TX79_MMI_MFLO1)) { /* Treat as NOP. */ return; } if (acc != 0) { - check_dsp(ctx); + if (!(ctx->insn_flags & INSN_R5900)) { + check_dsp(ctx); + } } switch (opc) { case OPC_MFHI: + case TX79_MMI_MFHI1: #if defined(TARGET_MIPS64) if (acc != 0) { tcg_gen_ext32s_tl(cpu_gpr[reg], cpu_HI[acc]); @@ -3252,6 +4277,7 @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) } break; case OPC_MFLO: + case TX79_MMI_MFLO1: #if defined(TARGET_MIPS64) if (acc != 0) { tcg_gen_ext32s_tl(cpu_gpr[reg], cpu_LO[acc]); @@ -3262,6 +4288,7 @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) } break; case OPC_MTHI: + case TX79_MMI_MTHI1: if (reg != 0) { #if defined(TARGET_MIPS64) if (acc != 0) { @@ -3276,6 +4303,7 @@ static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg) } break; case OPC_MTLO: + case TX79_MMI_MTLO1: if (reg != 0) { #if defined(TARGET_MIPS64) if (acc != 0) { @@ -3588,11 +4616,14 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, gen_load_gpr(t1, rt); if (acc != 0) { - check_dsp(ctx); + if (!(ctx->insn_flags & INSN_R5900)) { + check_dsp(ctx); + } } switch (opc) { case OPC_DIV: + case TX79_MMI_DIV1: { TCGv t2 = tcg_temp_new(); TCGv t3 = tcg_temp_new(); @@ -3614,6 +4645,7 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, } break; case OPC_DIVU: + case TX79_MMI_DIVU1: { TCGv t2 = tcg_const_tl(0); TCGv t3 = tcg_const_tl(1); @@ -3768,6 +4800,84 @@ static void gen_muldiv(DisasContext *ctx, uint32_t opc, tcg_temp_free(t1); } +/* + * These MULT and MULTU instructions implemented in for example the + * Toshiba/Sony R5900 and the Toshiba TX19, TX39 and TX79 core + * architectures are special three-operand variants with the syntax + * + * MULT[U][1] rd, rs, rt + * + * such that + * + * (rd, LO, HI) <- rs * rt + * + * where the low-order 32-bits of the result is placed into both the + * GPR rd and the special register LO. The high-order 32-bits of the + * result is placed into the special register HI. + * + * If the GPR rd is omitted in assembly language, it is taken to be 0, + * which is the zero register that always reads as 0. + */ +static void gen_mul_txx9(DisasContext *ctx, uint32_t opc, + int rd, int rs, int rt) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int acc = 0; + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + switch (opc) { + case TX79_MMI_MULT1: + acc = 1; + /* Fall through */ + case OPC_MULT: + { + TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t3 = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(t2, t0); + tcg_gen_trunc_tl_i32(t3, t1); + tcg_gen_muls2_i32(t2, t3, t2, t3); + if (rd) { + tcg_gen_ext_i32_tl(cpu_gpr[rd], t2); + } + tcg_gen_ext_i32_tl(cpu_LO[acc], t2); + tcg_gen_ext_i32_tl(cpu_HI[acc], t3); + tcg_temp_free_i32(t2); + tcg_temp_free_i32(t3); + } + break; + case TX79_MMI_MULTU1: + acc = 1; + /* Fall through */ + case OPC_MULTU: + { + TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t3 = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(t2, t0); + tcg_gen_trunc_tl_i32(t3, t1); + tcg_gen_mulu2_i32(t2, t3, t2, t3); + if (rd) { + tcg_gen_ext_i32_tl(cpu_gpr[rd], t2); + } + tcg_gen_ext_i32_tl(cpu_LO[acc], t2); + tcg_gen_ext_i32_tl(cpu_HI[acc], t3); + tcg_temp_free_i32(t2); + tcg_temp_free_i32(t3); + } + break; + default: + MIPS_INVAL("mul TXx9"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + out: + tcg_temp_free(t0); + tcg_temp_free(t1); +} + static void gen_mul_vr54xx (DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) { @@ -5537,6 +6647,21 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) tcg_gen_ext32s_tl(arg, arg); rn = "SegCtl2"; break; + case 5: + check_pw(ctx); + gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PWBase)); + rn = "PWBase"; + break; + case 6: + check_pw(ctx); + gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PWField)); + rn = "PWField"; + break; + case 7: + check_pw(ctx); + gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PWSize)); + rn = "PWSize"; + break; default: goto cp0_unimplemented; } @@ -5572,6 +6697,11 @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf4)); rn = "SRSConf4"; break; + case 6: + check_pw(ctx); + gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PWCtl)); + rn = "PWCtl"; + break; default: goto cp0_unimplemented; } @@ -6238,6 +7368,21 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_helper_mtc0_segctl2(cpu_env, arg); rn = "SegCtl2"; break; + case 5: + check_pw(ctx); + gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_PWBase)); + rn = "PWBase"; + break; + case 6: + check_pw(ctx); + gen_helper_mtc0_pwfield(cpu_env, arg); + rn = "PWField"; + break; + case 7: + check_pw(ctx); + gen_helper_mtc0_pwsize(cpu_env, arg); + rn = "PWSize"; + break; default: goto cp0_unimplemented; } @@ -6273,6 +7418,11 @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_helper_mtc0_srsconf4(cpu_env, arg); rn = "SRSConf4"; break; + case 6: + check_pw(ctx); + gen_helper_mtc0_pwctl(cpu_env, arg); + rn = "PWCtl"; + break; default: goto cp0_unimplemented; } @@ -6948,6 +8098,21 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_SegCtl2)); rn = "SegCtl2"; break; + case 5: + check_pw(ctx); + tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWBase)); + rn = "PWBase"; + break; + case 6: + check_pw(ctx); + tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWField)); + rn = "PWField"; + break; + case 7: + check_pw(ctx); + tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWSize)); + rn = "PWSize"; + break; default: goto cp0_unimplemented; } @@ -6983,6 +8148,11 @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf4)); rn = "SRSConf4"; break; + case 6: + check_pw(ctx); + gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PWCtl)); + rn = "PWCtl"; + break; default: goto cp0_unimplemented; } @@ -7631,6 +8801,21 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_helper_mtc0_segctl2(cpu_env, arg); rn = "SegCtl2"; break; + case 5: + check_pw(ctx); + tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_PWBase)); + rn = "PWBase"; + break; + case 6: + check_pw(ctx); + gen_helper_mtc0_pwfield(cpu_env, arg); + rn = "PWField"; + break; + case 7: + check_pw(ctx); + gen_helper_mtc0_pwsize(cpu_env, arg); + rn = "PWSize"; + break; default: goto cp0_unimplemented; } @@ -7666,6 +8851,11 @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) gen_helper_mtc0_srsconf4(cpu_env, arg); rn = "SRSConf4"; break; + case 6: + check_pw(ctx); + gen_helper_mtc0_pwctl(cpu_env, arg); + rn = "PWCtl"; + break; default: goto cp0_unimplemented; } @@ -14999,15 +16189,15 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) case 0x38: /* cmovs */ switch ((ctx->opcode >> 6) & 0x7) { - case MOVN_FMT: /* SELNEZ_FMT */ + case MOVN_FMT: /* SELEQZ_FMT */ if (ctx->insn_flags & ISA_MIPS32R6) { - /* SELNEZ_FMT */ + /* SELEQZ_FMT */ switch ((ctx->opcode >> 9) & 0x3) { case FMT_SDPS_S: - gen_sel_s(ctx, OPC_SELNEZ_S, rd, rt, rs); + gen_sel_s(ctx, OPC_SELEQZ_S, rd, rt, rs); break; case FMT_SDPS_D: - gen_sel_d(ctx, OPC_SELNEZ_D, rd, rt, rs); + gen_sel_d(ctx, OPC_SELEQZ_D, rd, rt, rs); break; default: goto pool32f_invalid; @@ -15021,15 +16211,15 @@ static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx) check_insn_opc_removed(ctx, ISA_MIPS32R6); FINSN_3ARG_SDPS(MOVN); break; - case MOVZ_FMT: /* SELEQZ_FMT */ + case MOVZ_FMT: /* SELNEZ_FMT */ if (ctx->insn_flags & ISA_MIPS32R6) { - /* SELEQZ_FMT */ + /* SELNEZ_FMT */ switch ((ctx->opcode >> 9) & 0x3) { case FMT_SDPS_S: - gen_sel_s(ctx, OPC_SELEQZ_S, rd, rt, rs); + gen_sel_s(ctx, OPC_SELNEZ_S, rd, rt, rs); break; case FMT_SDPS_D: - gen_sel_d(ctx, OPC_SELEQZ_D, rd, rt, rs); + gen_sel_d(ctx, OPC_SELNEZ_D, rd, rt, rs); break; default: goto pool32f_invalid; @@ -16488,6 +17678,40 @@ enum { NM_P_SC = 0x0b, }; +/* P.LS.E0 instruction pool */ +enum { + NM_LBE = 0x00, + NM_SBE = 0x01, + NM_LBUE = 0x02, + NM_P_PREFE = 0x03, + NM_LHE = 0x04, + NM_SHE = 0x05, + NM_LHUE = 0x06, + NM_CACHEE = 0x07, + NM_LWE = 0x08, + NM_SWE = 0x09, + NM_P_LLE = 0x0a, + NM_P_SCE = 0x0b, +}; + +/* P.PREFE instruction pool */ +enum { + NM_SYNCIE = 0x00, + NM_PREFE = 0x01, +}; + +/* P.LLE instruction pool */ +enum { + NM_LLE = 0x00, + NM_LLWPE = 0x01, +}; + +/* P.SCE instruction pool */ +enum { + NM_SCE = 0x00, + NM_SCWPE = 0x01, +}; + /* P.LS.WM instruction pool */ enum { NM_LWM = 0x00, @@ -17444,7 +18668,7 @@ static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, case NM_POOL32AXF_2_0_7: switch (extract32(ctx->opcode, 9, 3)) { case NM_DPA_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpa_w_ph(t0, v1, v0, cpu_env); break; case NM_DPAQ_S_W_PH: @@ -17452,7 +18676,7 @@ static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, gen_helper_dpaq_s_w_ph(t0, v1, v0, cpu_env); break; case NM_DPS_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dps_w_ph(t0, v1, v0, cpu_env); break; case NM_DPSQ_S_W_PH: @@ -17467,7 +18691,7 @@ static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, case NM_POOL32AXF_2_8_15: switch (extract32(ctx->opcode, 9, 3)) { case NM_DPAX_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpax_w_ph(t0, v0, v1, cpu_env); break; case NM_DPAQ_SA_L_W: @@ -17475,7 +18699,7 @@ static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, gen_helper_dpaq_sa_l_w(t0, v0, v1, cpu_env); break; case NM_DPSX_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpsx_w_ph(t0, v0, v1, cpu_env); break; case NM_DPSQ_SA_L_W: @@ -17494,7 +18718,7 @@ static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, gen_helper_dpau_h_qbl(t0, v0, v1, cpu_env); break; case NM_DPAQX_S_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpaqx_s_w_ph(t0, v0, v1, cpu_env); break; case NM_DPSU_H_QBL: @@ -17502,11 +18726,11 @@ static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, gen_helper_dpsu_h_qbl(t0, v0, v1, cpu_env); break; case NM_DPSQX_S_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpsqx_s_w_ph(t0, v0, v1, cpu_env); break; case NM_MULSA_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_mulsa_w_ph(t0, v0, v1, cpu_env); break; default: @@ -17521,7 +18745,7 @@ static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, gen_helper_dpau_h_qbr(t0, v1, v0, cpu_env); break; case NM_DPAQX_SA_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpaqx_sa_w_ph(t0, v1, v0, cpu_env); break; case NM_DPSU_H_QBR: @@ -17529,7 +18753,7 @@ static void gen_pool32axf_2_multiply(DisasContext *ctx, uint32_t opc, gen_helper_dpsu_h_qbr(t0, v1, v0, cpu_env); break; case NM_DPSQX_SA_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpsqx_sa_w_ph(t0, v1, v0, cpu_env); break; case NM_MULSAQ_S_W_PH: @@ -17571,7 +18795,7 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, gen_pool32axf_2_multiply(ctx, opc, v0_t, v1_t, rd); break; case NM_BALIGN: - check_dspr2(ctx); + check_dsp_r2(ctx); if (rt != 0) { gen_load_gpr(t0, rs); rd &= 3; @@ -17801,7 +19025,7 @@ static void gen_pool32axf_4_nanomips_insn(DisasContext *ctx, uint32_t opc, switch (opc) { case NM_ABSQ_S_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_absq_s_qb(v0_t, v0_t, cpu_env); gen_store_gpr(v0_t, ret); break; @@ -17940,7 +19164,7 @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, switch (opc) { case NM_SHRA_R_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); tcg_gen_movi_tl(t0, rd >> 2); switch (extract32(ctx->opcode, 12, 1)) { case 0: @@ -17956,7 +19180,7 @@ static void gen_pool32axf_7_nanomips_insn(DisasContext *ctx, uint32_t opc, } break; case NM_SHRL_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); tcg_gen_movi_tl(t0, rd >> 1); gen_helper_shrl_ph(t0, t0, rs_t); gen_store_gpr(t0, rt); @@ -18881,19 +20105,19 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, gen_store_gpr(v1_t, ret); break; case NM_CMPGDU_EQ_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_cmpgu_eq_qb(v1_t, v1_t, v2_t); tcg_gen_deposit_tl(cpu_dspctrl, cpu_dspctrl, v1_t, 24, 4); gen_store_gpr(v1_t, ret); break; case NM_CMPGDU_LT_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_cmpgu_lt_qb(v1_t, v1_t, v2_t); tcg_gen_deposit_tl(cpu_dspctrl, cpu_dspctrl, v1_t, 24, 4); gen_store_gpr(v1_t, ret); break; case NM_CMPGDU_LE_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_cmpgu_le_qb(v1_t, v1_t, v2_t); tcg_gen_deposit_tl(cpu_dspctrl, cpu_dspctrl, v1_t, 24, 4); gen_store_gpr(v1_t, ret); @@ -18949,7 +20173,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, } break; case NM_ADDQH_R_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* ADDQH_PH */ @@ -18964,7 +20188,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, } break; case NM_ADDQH_R_W: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* ADDQH_W */ @@ -18994,7 +20218,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, } break; case NM_ADDU_S_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* ADDU_PH */ @@ -19009,7 +20233,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, } break; case NM_ADDUH_R_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* ADDUH_QB */ @@ -19039,7 +20263,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, } break; case NM_SHRAV_R_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SHRAV_QB */ @@ -19069,7 +20293,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, } break; case NM_SUBQH_R_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SUBQH_PH */ @@ -19084,7 +20308,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, } break; case NM_SUBQH_R_W: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SUBQH_W */ @@ -19114,7 +20338,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, } break; case NM_SUBU_S_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SUBU_PH */ @@ -19129,7 +20353,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, } break; case NM_SUBUH_R_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* SUBUH_QB */ @@ -19159,7 +20383,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, } break; case NM_PRECR_SRA_R_PH_W: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* PRECR_SRA_PH_W */ @@ -19199,22 +20423,22 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, gen_store_gpr(v1_t, ret); break; case NM_MULQ_S_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_mulq_s_ph(v1_t, v1_t, v2_t, cpu_env); gen_store_gpr(v1_t, ret); break; case NM_MULQ_RS_W: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_mulq_rs_w(v1_t, v1_t, v2_t, cpu_env); gen_store_gpr(v1_t, ret); break; case NM_MULQ_S_W: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_mulq_s_w(v1_t, v1_t, v2_t, cpu_env); gen_store_gpr(v1_t, ret); break; case NM_APPEND: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_load_gpr(t0, rs); if (rd != 0) { tcg_gen_deposit_tl(cpu_gpr[rt], t0, cpu_gpr[rt], rd, 32 - rd); @@ -19232,7 +20456,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, gen_store_gpr(v1_t, ret); break; case NM_SHRLV_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shrl_ph(v1_t, v1_t, v2_t); gen_store_gpr(v1_t, ret); break; @@ -19274,7 +20498,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, gen_store_gpr(v1_t, ret); break; case NM_MUL_S_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (extract32(ctx->opcode, 10, 1)) { case 0: /* MUL_PH */ @@ -19289,7 +20513,7 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, } break; case NM_PRECR_QB_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_precr_qb_ph(v1_t, v1_t, v2_t); gen_store_gpr(v1_t, ret); break; @@ -19326,8 +20550,8 @@ static void gen_pool32a5_nanomips_insn(DisasContext *ctx, int opc, case 0: /* SHRA_PH */ gen_helper_shra_ph(v1_t, t0, v1_t); - break; gen_store_gpr(v1_t, rt); + break; case 1: /* SHRA_R_PH */ gen_helper_shra_r_ph(v1_t, t0, v1_t); @@ -20098,7 +21322,7 @@ static int decode_nanomips_32_48_opc(CPUMIPSState *env, DisasContext *ctx) gen_compute_branch_cp1_nm(ctx, OPC_BC1NEZ, rt, s); break; case NM_BPOSGE32C: - check_dspr2(ctx); + check_dsp_r3(ctx); { int32_t imm = extract32(ctx->opcode, 1, 13) | extract32(ctx->opcode, 0, 1) << 13; @@ -20607,7 +21831,7 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, switch (op1) { /* OPC_MULT_G_2E is equal OPC_ADDUH_QB_DSP */ case OPC_MULT_G_2E: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (op2) { case OPC_ADDUH_QB: gen_helper_adduh_qb(cpu_gpr[ret], v1_t, v2_t); @@ -20650,7 +21874,7 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, case OPC_ABSQ_S_PH_DSP: switch (op2) { case OPC_ABSQ_S_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_absq_s_qb(cpu_gpr[ret], v2_t, cpu_env); break; case OPC_ABSQ_S_PH: @@ -20729,11 +21953,11 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_addu_s_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_ADDU_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_addu_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_ADDU_S_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_addu_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_SUBQ_PH: @@ -20757,11 +21981,11 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_subu_s_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_SUBU_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_subu_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_SUBU_S_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_subu_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_ADDSC: @@ -20785,7 +22009,7 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, case OPC_CMPU_EQ_QB_DSP: switch (op2) { case OPC_PRECR_QB_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_precr_qb_ph(cpu_gpr[ret], v1_t, v2_t); break; case OPC_PRECRQ_QB_PH: @@ -20793,7 +22017,7 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_precrq_qb_ph(cpu_gpr[ret], v1_t, v2_t); break; case OPC_PRECR_SRA_PH_W: - check_dspr2(ctx); + check_dsp_r2(ctx); { TCGv_i32 sa_t = tcg_const_i32(v2); gen_helper_precr_sra_ph_w(cpu_gpr[ret], sa_t, v1_t, @@ -20802,7 +22026,7 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, break; } case OPC_PRECR_SRA_R_PH_W: - check_dspr2(ctx); + check_dsp_r2(ctx); { TCGv_i32 sa_t = tcg_const_i32(v2); gen_helper_precr_sra_r_ph_w(cpu_gpr[ret], sa_t, v1_t, @@ -20884,7 +22108,7 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_preceu_qh_obra(cpu_gpr[ret], v2_t); break; case OPC_ABSQ_S_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_absq_s_ob(cpu_gpr[ret], v2_t, cpu_env); break; case OPC_ABSQ_S_PW: @@ -20928,19 +22152,19 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_subu_s_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_SUBU_QH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_subu_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_SUBU_S_QH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_subu_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_SUBUH_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_subuh_ob(cpu_gpr[ret], v1_t, v2_t); break; case OPC_SUBUH_R_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_subuh_r_ob(cpu_gpr[ret], v1_t, v2_t); break; case OPC_ADDQ_PW: @@ -20968,19 +22192,19 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_addu_s_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_ADDU_QH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_addu_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_ADDU_S_QH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_addu_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_ADDUH_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_adduh_ob(cpu_gpr[ret], v1_t, v2_t); break; case OPC_ADDUH_R_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_adduh_r_ob(cpu_gpr[ret], v1_t, v2_t); break; } @@ -20988,11 +22212,11 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, case OPC_CMPU_EQ_OB_DSP: switch (op2) { case OPC_PRECR_OB_QH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_precr_ob_qh(cpu_gpr[ret], v1_t, v2_t); break; case OPC_PRECR_SRA_QH_PW: - check_dspr2(ctx); + check_dsp_r2(ctx); { TCGv_i32 ret_t = tcg_const_i32(ret); gen_helper_precr_sra_qh_pw(v2_t, v1_t, v2_t, ret_t); @@ -21000,7 +22224,7 @@ static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2, break; } case OPC_PRECR_SRA_R_QH_PW: - check_dspr2(ctx); + check_dsp_r2(ctx); { TCGv_i32 sa_v = tcg_const_i32(ret); gen_helper_precr_sra_r_qh_pw(v2_t, v1_t, v2_t, sa_v); @@ -21103,27 +22327,27 @@ static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc, gen_helper_shrl_qb(cpu_gpr[ret], v1_t, v2_t); break; case OPC_SHRL_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shrl_ph(cpu_gpr[ret], t0, v2_t); break; case OPC_SHRLV_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shrl_ph(cpu_gpr[ret], v1_t, v2_t); break; case OPC_SHRA_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shra_qb(cpu_gpr[ret], t0, v2_t); break; case OPC_SHRA_R_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shra_r_qb(cpu_gpr[ret], t0, v2_t); break; case OPC_SHRAV_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shra_qb(cpu_gpr[ret], v1_t, v2_t); break; case OPC_SHRAV_R_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shra_r_qb(cpu_gpr[ret], v1_t, v2_t); break; case OPC_SHRA_PH: @@ -21202,19 +22426,19 @@ static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc, gen_helper_shll_s_qh(cpu_gpr[ret], v2_t, v1_t, cpu_env); break; case OPC_SHRA_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shra_ob(cpu_gpr[ret], v2_t, t0); break; case OPC_SHRAV_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shra_ob(cpu_gpr[ret], v2_t, v1_t); break; case OPC_SHRA_R_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shra_r_ob(cpu_gpr[ret], v2_t, t0); break; case OPC_SHRAV_R_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shra_r_ob(cpu_gpr[ret], v2_t, v1_t); break; case OPC_SHRA_PW: @@ -21258,11 +22482,11 @@ static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc, gen_helper_shrl_ob(cpu_gpr[ret], v2_t, v1_t); break; case OPC_SHRL_QH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shrl_qh(cpu_gpr[ret], v2_t, t0); break; case OPC_SHRLV_QH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_shrl_qh(cpu_gpr[ret], v2_t, v1_t); break; default: /* Invalid */ @@ -21303,7 +22527,7 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, /* OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have * the same mask and op1. */ case OPC_MULT_G_2E: - check_dspr2(ctx); + check_dsp_r2(ctx); switch (op2) { case OPC_MUL_PH: gen_helper_mul_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env); @@ -21338,11 +22562,11 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_dpsu_h_qbr(t0, v1_t, v2_t, cpu_env); break; case OPC_DPA_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpa_w_ph(t0, v1_t, v2_t, cpu_env); break; case OPC_DPAX_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpax_w_ph(t0, v1_t, v2_t, cpu_env); break; case OPC_DPAQ_S_W_PH: @@ -21350,19 +22574,19 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_dpaq_s_w_ph(t0, v1_t, v2_t, cpu_env); break; case OPC_DPAQX_S_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpaqx_s_w_ph(t0, v1_t, v2_t, cpu_env); break; case OPC_DPAQX_SA_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpaqx_sa_w_ph(t0, v1_t, v2_t, cpu_env); break; case OPC_DPS_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dps_w_ph(t0, v1_t, v2_t, cpu_env); break; case OPC_DPSX_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpsx_w_ph(t0, v1_t, v2_t, cpu_env); break; case OPC_DPSQ_S_W_PH: @@ -21370,11 +22594,11 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_dpsq_s_w_ph(t0, v1_t, v2_t, cpu_env); break; case OPC_DPSQX_S_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpsqx_s_w_ph(t0, v1_t, v2_t, cpu_env); break; case OPC_DPSQX_SA_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpsqx_sa_w_ph(t0, v1_t, v2_t, cpu_env); break; case OPC_MULSAQ_S_W_PH: @@ -21406,7 +22630,7 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_maq_sa_w_phr(t0, v1_t, v2_t, cpu_env); break; case OPC_MULSA_W_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_mulsa_w_ph(t0, v1_t, v2_t, cpu_env); break; } @@ -21435,7 +22659,7 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_dmsubu(v1_t, v2_t, t0, cpu_env); break; case OPC_DPA_W_QH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dpa_w_qh(v1_t, v2_t, t0, cpu_env); break; case OPC_DPAQ_S_W_QH: @@ -21455,7 +22679,7 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_dpau_h_obr(v1_t, v2_t, t0, cpu_env); break; case OPC_DPS_W_QH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_dps_w_qh(v1_t, v2_t, t0, cpu_env); break; case OPC_DPSQ_S_W_QH: @@ -21549,7 +22773,7 @@ static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2, gen_helper_muleq_s_w_phr(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_MULQ_S_PH: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_mulq_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; } @@ -21773,7 +22997,7 @@ static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, gen_helper_cmpgu_le_qb(cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPGDU_EQ_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_cmpgu_eq_qb(t1, v1_t, v2_t); tcg_gen_mov_tl(cpu_gpr[ret], t1); tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF); @@ -21781,7 +23005,7 @@ static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, tcg_gen_or_tl(cpu_dspctrl, cpu_dspctrl, t1); break; case OPC_CMPGDU_LT_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_cmpgu_lt_qb(t1, v1_t, v2_t); tcg_gen_mov_tl(cpu_gpr[ret], t1); tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF); @@ -21789,7 +23013,7 @@ static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, tcg_gen_or_tl(cpu_dspctrl, cpu_dspctrl, t1); break; case OPC_CMPGDU_LE_QB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_cmpgu_le_qb(t1, v1_t, v2_t); tcg_gen_mov_tl(cpu_gpr[ret], t1); tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF); @@ -21850,15 +23074,15 @@ static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, gen_helper_cmp_le_qh(v1_t, v2_t, cpu_env); break; case OPC_CMPGDU_EQ_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_cmpgdu_eq_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_CMPGDU_LT_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_cmpgdu_lt_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_CMPGDU_LE_OB: - check_dspr2(ctx); + check_dsp_r2(ctx); gen_helper_cmpgdu_le_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_CMPGU_EQ_OB: @@ -21916,7 +23140,7 @@ static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx, { TCGv t0; - check_dspr2(ctx); + check_dsp_r2(ctx); if (rt == 0) { /* Treat as NOP. */ @@ -22351,7 +23575,7 @@ static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx) case OPC_MOVN: /* Conditional move */ case OPC_MOVZ: check_insn(ctx, ISA_MIPS4 | ISA_MIPS32 | - INSN_LOONGSON2E | INSN_LOONGSON2F); + INSN_LOONGSON2E | INSN_LOONGSON2F | INSN_R5900); gen_cond_move(ctx, op1, rd, rs, rt); break; case OPC_MFHI: /* Move from HI/LO */ @@ -22378,6 +23602,8 @@ static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx) check_insn(ctx, INSN_VR54XX); op1 = MASK_MUL_VR54XX(ctx->opcode); gen_mul_vr54xx(ctx, op1, rd, rs, rt); + } else if (ctx->insn_flags & INSN_R5900) { + gen_mul_txx9(ctx, op1, rd, rs, rt); } else { gen_muldiv(ctx, op1, rd & 3, rs, rt); } @@ -22392,6 +23618,7 @@ static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx) case OPC_DDIV: case OPC_DDIVU: check_insn(ctx, ISA_MIPS3); + check_insn_opc_user_only(ctx, INSN_R5900); check_mips_64(ctx); gen_muldiv(ctx, op1, 0, rs, rt); break; @@ -22738,7 +23965,9 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) op2 = MASK_BSHFL(ctx->opcode); switch (op2) { case OPC_ALIGN: - case OPC_ALIGN_END: + case OPC_ALIGN_1: + case OPC_ALIGN_2: + case OPC_ALIGN_3: gen_align(ctx, 32, rd, rs, rt, sa & 3); break; case OPC_BITSWAP: @@ -22764,7 +23993,13 @@ static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx) op2 = MASK_DBSHFL(ctx->opcode); switch (op2) { case OPC_DALIGN: - case OPC_DALIGN_END: + case OPC_DALIGN_1: + case OPC_DALIGN_2: + case OPC_DALIGN_3: + case OPC_DALIGN_4: + case OPC_DALIGN_5: + case OPC_DALIGN_6: + case OPC_DALIGN_7: gen_align(ctx, 64, rd, rs, rt, sa & 7); break; case OPC_DBITSWAP: @@ -22801,7 +24036,7 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) case OPC_MULTU_G_2E: /* OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have * the same mask and op1. */ - if ((ctx->insn_flags & ASE_DSPR2) && (op1 == OPC_MULT_G_2E)) { + if ((ctx->insn_flags & ASE_DSP_R2) && (op1 == OPC_MULT_G_2E)) { op2 = MASK_ADDUH_QB(ctx->opcode); switch (op2) { case OPC_ADDUH_QB: @@ -23308,6 +24543,250 @@ static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx) } } +static void decode_tx79_mmi0(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opc = MASK_TX79_MMI0(ctx->opcode); + + switch (opc) { + case TX79_MMI0_PADDW: /* TODO: TX79_MMI0_PADDW */ + case TX79_MMI0_PSUBW: /* TODO: TX79_MMI0_PSUBW */ + case TX79_MMI0_PCGTW: /* TODO: TX79_MMI0_PCGTW */ + case TX79_MMI0_PMAXW: /* TODO: TX79_MMI0_PMAXW */ + case TX79_MMI0_PADDH: /* TODO: TX79_MMI0_PADDH */ + case TX79_MMI0_PSUBH: /* TODO: TX79_MMI0_PSUBH */ + case TX79_MMI0_PCGTH: /* TODO: TX79_MMI0_PCGTH */ + case TX79_MMI0_PMAXH: /* TODO: TX79_MMI0_PMAXH */ + case TX79_MMI0_PADDB: /* TODO: TX79_MMI0_PADDB */ + case TX79_MMI0_PSUBB: /* TODO: TX79_MMI0_PSUBB */ + case TX79_MMI0_PCGTB: /* TODO: TX79_MMI0_PCGTB */ + case TX79_MMI0_PADDSW: /* TODO: TX79_MMI0_PADDSW */ + case TX79_MMI0_PSUBSW: /* TODO: TX79_MMI0_PSUBSW */ + case TX79_MMI0_PEXTLW: /* TODO: TX79_MMI0_PEXTLW */ + case TX79_MMI0_PPACW: /* TODO: TX79_MMI0_PPACW */ + case TX79_MMI0_PADDSH: /* TODO: TX79_MMI0_PADDSH */ + case TX79_MMI0_PSUBSH: /* TODO: TX79_MMI0_PSUBSH */ + case TX79_MMI0_PEXTLH: /* TODO: TX79_MMI0_PEXTLH */ + case TX79_MMI0_PPACH: /* TODO: TX79_MMI0_PPACH */ + case TX79_MMI0_PADDSB: /* TODO: TX79_MMI0_PADDSB */ + case TX79_MMI0_PSUBSB: /* TODO: TX79_MMI0_PSUBSB */ + case TX79_MMI0_PEXTLB: /* TODO: TX79_MMI0_PEXTLB */ + case TX79_MMI0_PPACB: /* TODO: TX79_MMI0_PPACB */ + case TX79_MMI0_PEXT5: /* TODO: TX79_MMI0_PEXT5 */ + case TX79_MMI0_PPAC5: /* TODO: TX79_MMI0_PPAC5 */ + generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_MMI_CLASS_MMI0 */ + break; + default: + MIPS_INVAL("TX79 MMI class MMI0"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_tx79_mmi1(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opc = MASK_TX79_MMI1(ctx->opcode); + + switch (opc) { + case TX79_MMI1_PABSW: /* TODO: TX79_MMI1_PABSW */ + case TX79_MMI1_PCEQW: /* TODO: TX79_MMI1_PCEQW */ + case TX79_MMI1_PMINW: /* TODO: TX79_MMI1_PMINW */ + case TX79_MMI1_PADSBH: /* TODO: TX79_MMI1_PADSBH */ + case TX79_MMI1_PABSH: /* TODO: TX79_MMI1_PABSH */ + case TX79_MMI1_PCEQH: /* TODO: TX79_MMI1_PCEQH */ + case TX79_MMI1_PMINH: /* TODO: TX79_MMI1_PMINH */ + case TX79_MMI1_PCEQB: /* TODO: TX79_MMI1_PCEQB */ + case TX79_MMI1_PADDUW: /* TODO: TX79_MMI1_PADDUW */ + case TX79_MMI1_PSUBUW: /* TODO: TX79_MMI1_PSUBUW */ + case TX79_MMI1_PEXTUW: /* TODO: TX79_MMI1_PEXTUW */ + case TX79_MMI1_PADDUH: /* TODO: TX79_MMI1_PADDUH */ + case TX79_MMI1_PSUBUH: /* TODO: TX79_MMI1_PSUBUH */ + case TX79_MMI1_PEXTUH: /* TODO: TX79_MMI1_PEXTUH */ + case TX79_MMI1_PADDUB: /* TODO: TX79_MMI1_PADDUB */ + case TX79_MMI1_PSUBUB: /* TODO: TX79_MMI1_PSUBUB */ + case TX79_MMI1_PEXTUB: /* TODO: TX79_MMI1_PEXTUB */ + case TX79_MMI1_QFSRV: /* TODO: TX79_MMI1_QFSRV */ + generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_MMI_CLASS_MMI1 */ + break; + default: + MIPS_INVAL("TX79 MMI class MMI1"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_tx79_mmi2(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opc = MASK_TX79_MMI2(ctx->opcode); + + switch (opc) { + case TX79_MMI2_PMADDW: /* TODO: TX79_MMI2_PMADDW */ + case TX79_MMI2_PSLLVW: /* TODO: TX79_MMI2_PSLLVW */ + case TX79_MMI2_PSRLVW: /* TODO: TX79_MMI2_PSRLVW */ + case TX79_MMI2_PMSUBW: /* TODO: TX79_MMI2_PMSUBW */ + case TX79_MMI2_PMFHI: /* TODO: TX79_MMI2_PMFHI */ + case TX79_MMI2_PMFLO: /* TODO: TX79_MMI2_PMFLO */ + case TX79_MMI2_PINTH: /* TODO: TX79_MMI2_PINTH */ + case TX79_MMI2_PMULTW: /* TODO: TX79_MMI2_PMULTW */ + case TX79_MMI2_PDIVW: /* TODO: TX79_MMI2_PDIVW */ + case TX79_MMI2_PCPYLD: /* TODO: TX79_MMI2_PCPYLD */ + case TX79_MMI2_PMADDH: /* TODO: TX79_MMI2_PMADDH */ + case TX79_MMI2_PHMADH: /* TODO: TX79_MMI2_PHMADH */ + case TX79_MMI2_PAND: /* TODO: TX79_MMI2_PAND */ + case TX79_MMI2_PXOR: /* TODO: TX79_MMI2_PXOR */ + case TX79_MMI2_PMSUBH: /* TODO: TX79_MMI2_PMSUBH */ + case TX79_MMI2_PHMSBH: /* TODO: TX79_MMI2_PHMSBH */ + case TX79_MMI2_PEXEH: /* TODO: TX79_MMI2_PEXEH */ + case TX79_MMI2_PREVH: /* TODO: TX79_MMI2_PREVH */ + case TX79_MMI2_PMULTH: /* TODO: TX79_MMI2_PMULTH */ + case TX79_MMI2_PDIVBW: /* TODO: TX79_MMI2_PDIVBW */ + case TX79_MMI2_PEXEW: /* TODO: TX79_MMI2_PEXEW */ + case TX79_MMI2_PROT3W: /* TODO: TX79_MMI2_PROT3W */ + generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_MMI_CLASS_MMI2 */ + break; + default: + MIPS_INVAL("TX79 MMI class MMI2"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_tx79_mmi3(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opc = MASK_TX79_MMI3(ctx->opcode); + + switch (opc) { + case TX79_MMI3_PMADDUW: /* TODO: TX79_MMI3_PMADDUW */ + case TX79_MMI3_PSRAVW: /* TODO: TX79_MMI3_PSRAVW */ + case TX79_MMI3_PMTHI: /* TODO: TX79_MMI3_PMTHI */ + case TX79_MMI3_PMTLO: /* TODO: TX79_MMI3_PMTLO */ + case TX79_MMI3_PINTEH: /* TODO: TX79_MMI3_PINTEH */ + case TX79_MMI3_PMULTUW: /* TODO: TX79_MMI3_PMULTUW */ + case TX79_MMI3_PDIVUW: /* TODO: TX79_MMI3_PDIVUW */ + case TX79_MMI3_PCPYUD: /* TODO: TX79_MMI3_PCPYUD */ + case TX79_MMI3_POR: /* TODO: TX79_MMI3_POR */ + case TX79_MMI3_PNOR: /* TODO: TX79_MMI3_PNOR */ + case TX79_MMI3_PEXCH: /* TODO: TX79_MMI3_PEXCH */ + case TX79_MMI3_PCPYH: /* TODO: TX79_MMI3_PCPYH */ + case TX79_MMI3_PEXCW: /* TODO: TX79_MMI3_PEXCW */ + generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_MMI_CLASS_MMI3 */ + break; + default: + MIPS_INVAL("TX79 MMI class MMI3"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_tx79_mmi(CPUMIPSState *env, DisasContext *ctx) +{ + uint32_t opc = MASK_TX79_MMI(ctx->opcode); + int rs = extract32(ctx->opcode, 21, 5); + int rt = extract32(ctx->opcode, 16, 5); + int rd = extract32(ctx->opcode, 11, 5); + + switch (opc) { + case TX79_MMI_CLASS_MMI0: + decode_tx79_mmi0(env, ctx); + break; + case TX79_MMI_CLASS_MMI1: + decode_tx79_mmi1(env, ctx); + break; + case TX79_MMI_CLASS_MMI2: + decode_tx79_mmi2(env, ctx); + break; + case TX79_MMI_CLASS_MMI3: + decode_tx79_mmi3(env, ctx); + break; + case TX79_MMI_MULT1: + case TX79_MMI_MULTU1: + gen_mul_txx9(ctx, opc, rd, rs, rt); + break; + case TX79_MMI_DIV1: + case TX79_MMI_DIVU1: + gen_muldiv(ctx, opc, 1, rs, rt); + break; + case TX79_MMI_MTLO1: + case TX79_MMI_MTHI1: + gen_HILO(ctx, opc, 1, rs); + break; + case TX79_MMI_MFLO1: + case TX79_MMI_MFHI1: + gen_HILO(ctx, opc, 1, rd); + break; + case TX79_MMI_MADD: /* TODO: TX79_MMI_MADD */ + case TX79_MMI_MADDU: /* TODO: TX79_MMI_MADDU */ + case TX79_MMI_PLZCW: /* TODO: TX79_MMI_PLZCW */ + case TX79_MMI_MADD1: /* TODO: TX79_MMI_MADD1 */ + case TX79_MMI_MADDU1: /* TODO: TX79_MMI_MADDU1 */ + case TX79_MMI_PMFHL: /* TODO: TX79_MMI_PMFHL */ + case TX79_MMI_PMTHL: /* TODO: TX79_MMI_PMTHL */ + case TX79_MMI_PSLLH: /* TODO: TX79_MMI_PSLLH */ + case TX79_MMI_PSRLH: /* TODO: TX79_MMI_PSRLH */ + case TX79_MMI_PSRAH: /* TODO: TX79_MMI_PSRAH */ + case TX79_MMI_PSLLW: /* TODO: TX79_MMI_PSLLW */ + case TX79_MMI_PSRLW: /* TODO: TX79_MMI_PSRLW */ + case TX79_MMI_PSRAW: /* TODO: TX79_MMI_PSRAW */ + generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_CLASS_MMI */ + break; + default: + MIPS_INVAL("TX79 MMI class"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void decode_tx79_lq(CPUMIPSState *env, DisasContext *ctx) +{ + generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_LQ */ +} + +static void gen_tx79_sq(DisasContext *ctx, int base, int rt, int offset) +{ + generate_exception_end(ctx, EXCP_RI); /* TODO: TX79_SQ */ +} + +/* + * The TX79-specific instruction Store Quadword + * + * +--------+-------+-------+------------------------+ + * | 011111 | base | rt | offset | SQ + * +--------+-------+-------+------------------------+ + * 6 5 5 16 + * + * has the same opcode as the Read Hardware Register instruction + * + * +--------+-------+-------+-------+-------+--------+ + * | 011111 | 00000 | rt | rd | 00000 | 111011 | RDHWR + * +--------+-------+-------+-------+-------+--------+ + * 6 5 5 5 5 6 + * + * that is required, trapped and emulated by the Linux kernel. However, all + * RDHWR encodings yield address error exceptions on the TX79 since the SQ + * offset is odd. Therefore all valid SQ instructions can execute normally. + * In user mode, QEMU must verify the upper and lower 11 bits to distinguish + * between SQ and RDHWR, as the Linux kernel does. + */ +static void decode_tx79_sq(CPUMIPSState *env, DisasContext *ctx) +{ + int base = extract32(ctx->opcode, 21, 5); + int rt = extract32(ctx->opcode, 16, 5); + int offset = extract32(ctx->opcode, 0, 16); + +#ifdef CONFIG_USER_ONLY + uint32_t op1 = MASK_SPECIAL3(ctx->opcode); + uint32_t op2 = extract32(ctx->opcode, 6, 5); + + if (base == 0 && op2 == 0 && op1 == OPC_RDHWR) { + int rd = extract32(ctx->opcode, 11, 5); + + gen_rdhwr(ctx, rt, rd, 0); + return; + } +#endif + + gen_tx79_sq(ctx, base, rt, offset); +} + static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) { int rs, rt, rd, sa; @@ -23380,7 +24859,9 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) op2 = MASK_BSHFL(ctx->opcode); switch (op2) { case OPC_ALIGN: - case OPC_ALIGN_END: + case OPC_ALIGN_1: + case OPC_ALIGN_2: + case OPC_ALIGN_3: case OPC_BITSWAP: check_insn(ctx, ISA_MIPS32R6); decode_opc_special3_r6(env, ctx); @@ -23406,7 +24887,13 @@ static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx) op2 = MASK_DBSHFL(ctx->opcode); switch (op2) { case OPC_DALIGN: - case OPC_DALIGN_END: + case OPC_DALIGN_1: + case OPC_DALIGN_2: + case OPC_DALIGN_3: + case OPC_DALIGN_4: + case OPC_DALIGN_5: + case OPC_DALIGN_6: + case OPC_DALIGN_7: case OPC_DBITSWAP: check_insn(ctx, ISA_MIPS32R6); decode_opc_special3_r6(env, ctx); @@ -24605,10 +26092,18 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) decode_opc_special(env, ctx); break; case OPC_SPECIAL2: - decode_opc_special2_legacy(env, ctx); + if ((ctx->insn_flags & INSN_R5900) && (ctx->insn_flags & ASE_MMI)) { + decode_tx79_mmi(env, ctx); + } else { + decode_opc_special2_legacy(env, ctx); + } break; case OPC_SPECIAL3: - decode_opc_special3(env, ctx); + if (ctx->insn_flags & INSN_R5900) { + decode_tx79_sq(env, ctx); /* TX79_SQ */ + } else { + decode_opc_special3(env, ctx); + } break; case OPC_REGIMM: op1 = MASK_REGIMM(ctx->opcode); @@ -24895,6 +26390,7 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) break; case OPC_LL: /* Load and stores */ check_insn(ctx, ISA_MIPS2); + check_insn_opc_user_only(ctx, INSN_R5900); /* Fallthrough */ case OPC_LWL: case OPC_LWR: @@ -24920,6 +26416,7 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) case OPC_SC: check_insn(ctx, ISA_MIPS2); check_insn_opc_removed(ctx, ISA_MIPS32R6); + check_insn_opc_user_only(ctx, INSN_R5900); gen_st_cond(ctx, op, rt, rs, imm); break; case OPC_CACHE: @@ -24933,7 +26430,8 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) break; case OPC_PREF: check_insn_opc_removed(ctx, ISA_MIPS32R6); - check_insn(ctx, ISA_MIPS4 | ISA_MIPS32); + check_insn(ctx, ISA_MIPS4 | ISA_MIPS32 | + INSN_R5900); /* Treat as NOP. */ break; @@ -25185,9 +26683,11 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) #if defined(TARGET_MIPS64) /* MIPS64 opcodes */ + case OPC_LLD: + check_insn_opc_user_only(ctx, INSN_R5900); + /* fall through */ case OPC_LDL: case OPC_LDR: - case OPC_LLD: check_insn_opc_removed(ctx, ISA_MIPS32R6); /* fall through */ case OPC_LWU: @@ -25208,6 +26708,7 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) case OPC_SCD: check_insn_opc_removed(ctx, ISA_MIPS32R6); check_insn(ctx, ISA_MIPS3); + check_insn_opc_user_only(ctx, INSN_R5900); check_mips_64(ctx); gen_st_cond(ctx, op, rt, rs, imm); break; @@ -25262,8 +26763,12 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx) } break; case OPC_MSA: /* OPC_MDMX */ - /* MDMX: Not implemented. */ - gen_msa(env, ctx); + if (ctx->insn_flags & INSN_R5900) { + decode_tx79_lq(env, ctx); /* TX79_LQ */ + } else { + /* MDMX: Not implemented. */ + gen_msa(env, ctx); + } break; case OPC_PCREL: check_insn(ctx, ISA_MIPS32R6); @@ -25285,6 +26790,7 @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->saved_pc = -1; ctx->insn_flags = env->insn_flags; ctx->CP0_Config1 = env->CP0_Config1; + ctx->CP0_Config2 = env->CP0_Config2; ctx->CP0_Config3 = env->CP0_Config3; ctx->CP0_Config5 = env->CP0_Config5; ctx->btarget = 0; @@ -25799,6 +27305,24 @@ void cpu_state_reset(CPUMIPSState *env) env->CP0_Status |= (1 << CP0St_FR); } + if (env->insn_flags & ISA_MIPS32R6) { + /* PTW = 1 */ + env->CP0_PWSize = 0x40; + /* GDI = 12 */ + /* UDI = 12 */ + /* MDI = 12 */ + /* PRI = 12 */ + /* PTEI = 2 */ + env->CP0_PWField = 0x0C30C302; + } else { + /* GDI = 0 */ + /* UDI = 0 */ + /* MDI = 0 */ + /* PRI = 0 */ + /* PTEI = 2 */ + env->CP0_PWField = 0x02; + } + if (env->CP0_Config3 & (1 << CP0C3_ISA) & (1 << (CP0C3_ISA + 1))) { /* microMIPS on reset when Config3.ISA is 3 */ env->hflags |= MIPS_HFLAG_M16; diff --git a/target/mips/translate_init.inc.c b/target/mips/translate_init.inc.c index b3320b9dc7..85da4a269c 100644 --- a/target/mips/translate_init.inc.c +++ b/target/mips/translate_init.inc.c @@ -320,7 +320,7 @@ const mips_def_t mips_defs[] = .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 32, .PABITS = 32, - .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_DSPR2, + .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_DSP_R2, .mmu_type = MMU_TYPE_R4000, }, { @@ -411,6 +411,65 @@ const mips_def_t mips_defs[] = .mmu_type = MMU_TYPE_R4000, }, { + /* + * The Toshiba TX System RISC TX79 Core Architecture manual + * + * https://wiki.qemu.org/File:C790.pdf + * + * describes the C790 processor that is a follow-up to the R5900. + * There are a few notable differences in that the R5900 FPU + * + * - is not IEEE 754-1985 compliant, + * - does not implement double format, and + * - its machine code is nonstandard. + */ + .name = "R5900", + .CP0_PRid = 0x00002E00, + /* No L2 cache, icache size 32k, dcache size 32k, uncached coherency. */ + .CP0_Config0 = (0x3 << 9) | (0x3 << 6) | (0x2 << CP0C0_K0), + .CP0_Status_rw_bitmask = 0xF4C79C1F, +#ifdef CONFIG_USER_ONLY + /* + * R5900 hardware traps to the Linux kernel for IEEE 754-1985 and LL/SC + * emulation. For user only, QEMU is the kernel, so we emulate the traps + * by simply emulating the instructions directly. + * + * Note: Config1 is only used internally, the R5900 has only Config0. + */ + .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU), + .CP0_LLAddr_rw_bitmask = 0xFFFFFFFF, + .CP0_LLAddr_shift = 4, + .CP1_fcr0 = (0x38 << FCR0_PRID) | (0x0 << FCR0_REV), + .CP1_fcr31 = 0, + .CP1_fcr31_rw_bitmask = 0x0183FFFF, +#else + /* + * The R5900 COP1 FPU implements single-precision floating-point + * operations but is not entirely IEEE 754-1985 compatible. In + * particular, + * + * - NaN (not a number) and +/- infinities are not supported; + * - exception mechanisms are not fully supported; + * - denormalized numbers are not supported; + * - rounding towards nearest and +/- infinities are not supported; + * - computed results usually differs in the least significant bit; + * - saturations can differ more than the least significant bit. + * + * Since only rounding towards zero is supported, the two least + * significant bits of FCR31 are hardwired to 01. + * + * FPU emulation is disabled here until it is implemented. + * + * Note: Config1 is only used internally, the R5900 has only Config0. + */ + .CP0_Config1 = (47 << CP0C1_MMU), +#endif /* !CONFIG_USER_ONLY */ + .SEGBITS = 32, + .PABITS = 32, + .insn_flags = CPU_R5900 | ASE_MMI, + .mmu_type = MMU_TYPE_R4000, + }, + { /* A generic CPU supporting MIPS32 Release 6 ISA. FIXME: Support IEEE 754-2008 FP. Eventually this should be replaced by a real CPU model. */ @@ -485,7 +544,8 @@ const mips_def_t mips_defs[] = .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008), .SEGBITS = 32, .PABITS = 32, - .insn_flags = CPU_NANOMIPS32 | ASE_DSP | ASE_DSPR2 | ASE_MT, + .insn_flags = CPU_NANOMIPS32 | ASE_DSP | ASE_DSP_R2 | ASE_DSP_R3 | + ASE_MT, .mmu_type = MMU_TYPE_R4000, }, #if defined(TARGET_MIPS64) @@ -761,7 +821,7 @@ const mips_def_t mips_defs[] = .mmu_type = MMU_TYPE_R4000, }, { - /* A generic CPU providing MIPS64 ASE DSP 2 features. + /* A generic CPU providing MIPS64 DSP R2 ASE features. FIXME: Eventually this should be replaced by a real CPU model. */ .name = "mips64dspr2", .CP0_PRid = 0x00010000, @@ -786,7 +846,7 @@ const mips_def_t mips_defs[] = .CP1_fcr31_rw_bitmask = 0xFF83FFFF, .SEGBITS = 42, .PABITS = 36, - .insn_flags = CPU_MIPS64R2 | ASE_DSP | ASE_DSPR2, + .insn_flags = CPU_MIPS64R2 | ASE_DSP | ASE_DSP_R2, .mmu_type = MMU_TYPE_R4000, }, diff --git a/target/ppc/helper.h b/target/ppc/helper.h index ef64248bc4..7a1481fd0b 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -800,7 +800,7 @@ DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32) DEF_HELPER_1(tbegin, void, env) DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env) -#if defined(TARGET_PPC64) && defined(CONFIG_ATOMIC128) +#ifdef TARGET_PPC64 DEF_HELPER_FLAGS_3(lq_le_parallel, TCG_CALL_NO_WG, i64, env, tl, i32) DEF_HELPER_FLAGS_3(lq_be_parallel, TCG_CALL_NO_WG, i64, env, tl, i32) DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG, diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c index 8f0d86d104..a1485fad9b 100644 --- a/target/ppc/mem_helper.c +++ b/target/ppc/mem_helper.c @@ -25,6 +25,7 @@ #include "exec/cpu_ldst.h" #include "tcg.h" #include "internal.h" +#include "qemu/atomic128.h" //#define DEBUG_OP @@ -215,11 +216,15 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, return i; } -#if defined(TARGET_PPC64) && defined(CONFIG_ATOMIC128) +#ifdef TARGET_PPC64 uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr, uint32_t opidx) { - Int128 ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC()); + Int128 ret; + + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_ATOMIC128); + ret = helper_atomic_ldo_le_mmu(env, addr, opidx, GETPC()); env->retxh = int128_gethi(ret); return int128_getlo(ret); } @@ -227,7 +232,11 @@ uint64_t helper_lq_le_parallel(CPUPPCState *env, target_ulong addr, uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr, uint32_t opidx) { - Int128 ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC()); + Int128 ret; + + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_ATOMIC128); + ret = helper_atomic_ldo_be_mmu(env, addr, opidx, GETPC()); env->retxh = int128_gethi(ret); return int128_getlo(ret); } @@ -235,14 +244,22 @@ uint64_t helper_lq_be_parallel(CPUPPCState *env, target_ulong addr, void helper_stq_le_parallel(CPUPPCState *env, target_ulong addr, uint64_t lo, uint64_t hi, uint32_t opidx) { - Int128 val = int128_make128(lo, hi); + Int128 val; + + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_ATOMIC128); + val = int128_make128(lo, hi); helper_atomic_sto_le_mmu(env, addr, val, opidx, GETPC()); } void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr, uint64_t lo, uint64_t hi, uint32_t opidx) { - Int128 val = int128_make128(lo, hi); + Int128 val; + + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_ATOMIC128); + val = int128_make128(lo, hi); helper_atomic_sto_be_mmu(env, addr, val, opidx, GETPC()); } @@ -252,6 +269,9 @@ uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr, { bool success = false; + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_CMPXCHG128); + if (likely(addr == env->reserve_addr)) { Int128 oldv, cmpv, newv; @@ -271,6 +291,9 @@ uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr, { bool success = false; + /* We will have raised EXCP_ATOMIC from the translator. */ + assert(HAVE_CMPXCHG128); + if (likely(addr == env->reserve_addr)) { Int128 oldv, cmpv, newv; diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 881743571b..4e59dd5f42 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -33,6 +33,7 @@ #include "trace-tcg.h" #include "exec/translator.h" #include "exec/log.h" +#include "qemu/atomic128.h" #define CPU_SINGLE_STEP 0x1 @@ -2654,22 +2655,22 @@ static void gen_lq(DisasContext *ctx) hi = cpu_gpr[rd]; if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { -#ifdef CONFIG_ATOMIC128 - TCGv_i32 oi = tcg_temp_new_i32(); - if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); - gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); + if (HAVE_ATOMIC128) { + TCGv_i32 oi = tcg_temp_new_i32(); + if (ctx->le_mode) { + tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); + gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); + } else { + tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); + gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); + } + tcg_temp_free_i32(oi); + tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh)); } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); - gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; } - tcg_temp_free_i32(oi); - tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh)); -#else - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; -#endif } else if (ctx->le_mode) { tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ); gen_addr_add(ctx, EA, EA, 8); @@ -2805,21 +2806,21 @@ static void gen_std(DisasContext *ctx) hi = cpu_gpr[rs]; if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { -#ifdef CONFIG_ATOMIC128 - TCGv_i32 oi = tcg_temp_new_i32(); - if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); - gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi); + if (HAVE_ATOMIC128) { + TCGv_i32 oi = tcg_temp_new_i32(); + if (ctx->le_mode) { + tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); + gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi); + } else { + tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); + gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi); + } + tcg_temp_free_i32(oi); } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); - gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi); + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; } - tcg_temp_free_i32(oi); -#else - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; -#endif } else if (ctx->le_mode) { tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_LEQ); gen_addr_add(ctx, EA, EA, 8); @@ -3404,26 +3405,26 @@ static void gen_lqarx(DisasContext *ctx) hi = cpu_gpr[rd]; if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { -#ifdef CONFIG_ATOMIC128 - TCGv_i32 oi = tcg_temp_new_i32(); - if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16, - ctx->mem_idx)); - gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); + if (HAVE_ATOMIC128) { + TCGv_i32 oi = tcg_temp_new_i32(); + if (ctx->le_mode) { + tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16, + ctx->mem_idx)); + gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); + } else { + tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16, + ctx->mem_idx)); + gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); + } + tcg_temp_free_i32(oi); + tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh)); } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16, - ctx->mem_idx)); - gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + tcg_temp_free(EA); + return; } - tcg_temp_free_i32(oi); - tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh)); -#else - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; - tcg_temp_free(EA); - return; -#endif } else if (ctx->le_mode) { tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16); tcg_gen_mov_tl(cpu_reserve, EA); @@ -3461,20 +3462,22 @@ static void gen_stqcx_(DisasContext *ctx) hi = cpu_gpr[rs]; if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { - TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16); -#ifdef CONFIG_ATOMIC128 - if (ctx->le_mode) { - gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi); + if (HAVE_CMPXCHG128) { + TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16); + if (ctx->le_mode) { + gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, + EA, lo, hi, oi); + } else { + gen_helper_stqcx_be_parallel(cpu_crf[0], cpu_env, + EA, lo, hi, oi); + } + tcg_temp_free_i32(oi); } else { - gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi); + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; } -#else - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; -#endif tcg_temp_free(EA); - tcg_temp_free_i32(oi); } else { TCGLabel *lab_fail = gen_new_label(); TCGLabel *lab_over = gen_new_label(); diff --git a/target/ppc/translate_init.inc.c b/target/ppc/translate_init.inc.c index 263e63cb03..ee9432eb15 100644 --- a/target/ppc/translate_init.inc.c +++ b/target/ppc/translate_init.inc.c @@ -8381,8 +8381,8 @@ static void getset_compat_deprecated(Object *obj, Visitor *v, const char *name, QNull *null = NULL; if (!qtest_enabled()) { - error_report("CPU 'compat' property is deprecated and has no effect; " - "use max-cpu-compat machine property instead"); + warn_report("CPU 'compat' property is deprecated and has no effect; " + "use max-cpu-compat machine property instead"); } visit_type_null(v, name, &null, NULL); qobject_unref(null); diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c index bacae4f503..490c43e6e6 100644 --- a/target/s390x/mem_helper.c +++ b/target/s390x/mem_helper.c @@ -25,6 +25,7 @@ #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "qemu/int128.h" +#include "qemu/atomic128.h" #if !defined(CONFIG_USER_ONLY) #include "hw/s390x/storage-keys.h" @@ -1379,65 +1380,62 @@ uint32_t HELPER(trXX)(CPUS390XState *env, uint32_t r1, uint32_t r2, return cc; } -static void do_cdsg(CPUS390XState *env, uint64_t addr, - uint32_t r1, uint32_t r3, bool parallel) +void HELPER(cdsg)(CPUS390XState *env, uint64_t addr, + uint32_t r1, uint32_t r3) { uintptr_t ra = GETPC(); Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); Int128 oldv; + uint64_t oldh, oldl; bool fail; - if (parallel) { -#ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); -#else - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); - oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); - fail = !int128_eq(oldv, cmpv); -#endif - } else { - uint64_t oldh, oldl; + check_alignment(env, addr, 16, ra); - check_alignment(env, addr, 16, ra); + oldh = cpu_ldq_data_ra(env, addr + 0, ra); + oldl = cpu_ldq_data_ra(env, addr + 8, ra); - oldh = cpu_ldq_data_ra(env, addr + 0, ra); - oldl = cpu_ldq_data_ra(env, addr + 8, ra); - - oldv = int128_make128(oldl, oldh); - fail = !int128_eq(oldv, cmpv); - if (fail) { - newv = oldv; - } - - cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra); - cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra); + oldv = int128_make128(oldl, oldh); + fail = !int128_eq(oldv, cmpv); + if (fail) { + newv = oldv; } + cpu_stq_data_ra(env, addr + 0, int128_gethi(newv), ra); + cpu_stq_data_ra(env, addr + 8, int128_getlo(newv), ra); + env->cc_op = fail; env->regs[r1] = int128_gethi(oldv); env->regs[r1 + 1] = int128_getlo(oldv); } -void HELPER(cdsg)(CPUS390XState *env, uint64_t addr, - uint32_t r1, uint32_t r3) -{ - do_cdsg(env, addr, r1, r3, false); -} - void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr, uint32_t r1, uint32_t r3) { - do_cdsg(env, addr, r1, r3, true); + uintptr_t ra = GETPC(); + Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); + Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); + int mem_idx; + TCGMemOpIdx oi; + Int128 oldv; + bool fail; + + assert(HAVE_CMPXCHG128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); + fail = !int128_eq(oldv, cmpv); + + env->cc_op = fail; + env->regs[r1] = int128_gethi(oldv); + env->regs[r1 + 1] = int128_getlo(oldv); } static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, uint64_t a2, bool parallel) { -#if !defined(CONFIG_USER_ONLY) || defined(CONFIG_ATOMIC128) uint32_t mem_idx = cpu_mmu_index(env, false); -#endif uintptr_t ra = GETPC(); uint32_t fc = extract32(env->regs[0], 0, 8); uint32_t sc = extract32(env->regs[0], 8, 8); @@ -1465,18 +1463,20 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, probe_write(env, a2, 0, mem_idx, ra); #endif - /* Note that the compare-and-swap is atomic, and the store is atomic, but - the complete operation is not. Therefore we do not need to assert serial - context in order to implement this. That said, restart early if we can't - support either operation that is supposed to be atomic. */ + /* + * Note that the compare-and-swap is atomic, and the store is atomic, + * but the complete operation is not. Therefore we do not need to + * assert serial context in order to implement this. That said, + * restart early if we can't support either operation that is supposed + * to be atomic. + */ if (parallel) { - int mask = 0; -#if !defined(CONFIG_ATOMIC64) - mask = -8; -#elif !defined(CONFIG_ATOMIC128) - mask = -16; + uint32_t max = 2; +#ifdef CONFIG_ATOMIC64 + max = 3; #endif - if (((4 << fc) | (1 << sc)) & mask) { + if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) || + (HAVE_ATOMIC128 ? 0 : sc > max)) { cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); } } @@ -1546,16 +1546,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, Int128 cv = int128_make128(env->regs[r3 + 1], env->regs[r3]); Int128 ov; - if (parallel) { -#ifdef CONFIG_ATOMIC128 - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); - ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); - cc = !int128_eq(ov, cv); -#else - /* Note that we asserted !parallel above. */ - g_assert_not_reached(); -#endif - } else { + if (!parallel) { uint64_t oh = cpu_ldq_data_ra(env, a1 + 0, ra); uint64_t ol = cpu_ldq_data_ra(env, a1 + 8, ra); @@ -1567,6 +1558,13 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); + } else if (HAVE_CMPXCHG128) { + TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + ov = helper_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); + cc = !int128_eq(ov, cv); + } else { + /* Note that we asserted !parallel above. */ + g_assert_not_reached(); } env->regs[r3 + 0] = int128_gethi(ov); @@ -1596,18 +1594,16 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, cpu_stq_data_ra(env, a2, svh, ra); break; case 4: - if (parallel) { -#ifdef CONFIG_ATOMIC128 + if (!parallel) { + cpu_stq_data_ra(env, a2 + 0, svh, ra); + cpu_stq_data_ra(env, a2 + 8, svl, ra); + } else if (HAVE_ATOMIC128) { TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); Int128 sv = int128_make128(svl, svh); helper_atomic_sto_be_mmu(env, a2, sv, oi, ra); -#else + } else { /* Note that we asserted !parallel above. */ g_assert_not_reached(); -#endif - } else { - cpu_stq_data_ra(env, a2 + 0, svh, ra); - cpu_stq_data_ra(env, a2 + 8, svl, ra); } break; default: @@ -2100,76 +2096,64 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr) #endif /* load pair from quadword */ -static uint64_t do_lpq(CPUS390XState *env, uint64_t addr, bool parallel) +uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr) { uintptr_t ra = GETPC(); uint64_t hi, lo; - if (parallel) { -#ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); -#else - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); - Int128 v = helper_atomic_ldo_be_mmu(env, addr, oi, ra); - hi = int128_gethi(v); - lo = int128_getlo(v); -#endif - } else { - check_alignment(env, addr, 16, ra); - - hi = cpu_ldq_data_ra(env, addr + 0, ra); - lo = cpu_ldq_data_ra(env, addr + 8, ra); - } + check_alignment(env, addr, 16, ra); + hi = cpu_ldq_data_ra(env, addr + 0, ra); + lo = cpu_ldq_data_ra(env, addr + 8, ra); env->retxl = lo; return hi; } -uint64_t HELPER(lpq)(CPUS390XState *env, uint64_t addr) -{ - return do_lpq(env, addr, false); -} - uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr) { - return do_lpq(env, addr, true); -} - -/* store pair to quadword */ -static void do_stpq(CPUS390XState *env, uint64_t addr, - uint64_t low, uint64_t high, bool parallel) -{ uintptr_t ra = GETPC(); + uint64_t hi, lo; + int mem_idx; + TCGMemOpIdx oi; + Int128 v; - if (parallel) { -#ifndef CONFIG_ATOMIC128 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); -#else - int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + assert(HAVE_ATOMIC128); - Int128 v = int128_make128(low, high); - helper_atomic_sto_be_mmu(env, addr, v, oi, ra); -#endif - } else { - check_alignment(env, addr, 16, ra); + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + v = helper_atomic_ldo_be_mmu(env, addr, oi, ra); + hi = int128_gethi(v); + lo = int128_getlo(v); - cpu_stq_data_ra(env, addr + 0, high, ra); - cpu_stq_data_ra(env, addr + 8, low, ra); - } + env->retxl = lo; + return hi; } +/* store pair to quadword */ void HELPER(stpq)(CPUS390XState *env, uint64_t addr, uint64_t low, uint64_t high) { - do_stpq(env, addr, low, high, false); + uintptr_t ra = GETPC(); + + check_alignment(env, addr, 16, ra); + cpu_stq_data_ra(env, addr + 0, high, ra); + cpu_stq_data_ra(env, addr + 8, low, ra); } void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr, uint64_t low, uint64_t high) { - do_stpq(env, addr, low, high, true); + uintptr_t ra = GETPC(); + int mem_idx; + TCGMemOpIdx oi; + Int128 v; + + assert(HAVE_ATOMIC128); + + mem_idx = cpu_mmu_index(env, false); + oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + v = int128_make128(low, high); + helper_atomic_sto_be_mmu(env, addr, v, oi, ra); } /* Execute instruction. This instruction executes an insn modified with diff --git a/target/s390x/translate.c b/target/s390x/translate.c index 18861cd186..b5bd56b7ee 100644 --- a/target/s390x/translate.c +++ b/target/s390x/translate.c @@ -44,6 +44,7 @@ #include "trace-tcg.h" #include "exec/translator.h" #include "exec/log.h" +#include "qemu/atomic128.h" /* Information that (most) every instruction needs to manipulate. */ @@ -1128,11 +1129,19 @@ struct DisasInsn { const char *name; + /* Pre-process arguments before HELP_OP. */ void (*help_in1)(DisasContext *, DisasFields *, DisasOps *); void (*help_in2)(DisasContext *, DisasFields *, DisasOps *); void (*help_prep)(DisasContext *, DisasFields *, DisasOps *); + + /* + * Post-process output after HELP_OP. + * Note that these are not called if HELP_OP returns DISAS_NORETURN. + */ void (*help_wout)(DisasContext *, DisasFields *, DisasOps *); void (*help_cout)(DisasContext *, DisasOps *); + + /* Implement the operation itself. */ DisasJumpType (*help_op)(DisasContext *, DisasOps *); uint64_t data; @@ -2032,6 +2041,7 @@ static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) int r3 = get_field(s->fields, r3); int d2 = get_field(s->fields, d2); int b2 = get_field(s->fields, b2); + DisasJumpType ret = DISAS_NEXT; TCGv_i64 addr; TCGv_i32 t_r1, t_r3; @@ -2039,17 +2049,20 @@ static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) addr = get_address(s, 0, b2, d2); t_r1 = tcg_const_i32(r1); t_r3 = tcg_const_i32(r3); - if (tb_cflags(s->base.tb) & CF_PARALLEL) { + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_helper_cdsg(cpu_env, addr, t_r1, t_r3); + } else if (HAVE_CMPXCHG128) { gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3); } else { - gen_helper_cdsg(cpu_env, addr, t_r1, t_r3); + gen_helper_exit_atomic(cpu_env); + ret = DISAS_NORETURN; } tcg_temp_free_i64(addr); tcg_temp_free_i32(t_r1); tcg_temp_free_i32(t_r3); set_cc_static(s); - return DISAS_NEXT; + return ret; } static DisasJumpType op_csst(DisasContext *s, DisasOps *o) @@ -3026,10 +3039,13 @@ static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) static DisasJumpType op_lpq(DisasContext *s, DisasOps *o) { - if (tb_cflags(s->base.tb) & CF_PARALLEL) { + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_helper_lpq(o->out, cpu_env, o->in2); + } else if (HAVE_ATOMIC128) { gen_helper_lpq_parallel(o->out, cpu_env, o->in2); } else { - gen_helper_lpq(o->out, cpu_env, o->in2); + gen_helper_exit_atomic(cpu_env); + return DISAS_NORETURN; } return_low128(o->out2); return DISAS_NEXT; @@ -4406,10 +4422,13 @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) static DisasJumpType op_stpq(DisasContext *s, DisasOps *o) { - if (tb_cflags(s->base.tb) & CF_PARALLEL) { + if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { + gen_helper_stpq(cpu_env, o->in2, o->out2, o->out); + } else if (HAVE_ATOMIC128) { gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out); } else { - gen_helper_stpq(cpu_env, o->in2, o->out2, o->out); + gen_helper_exit_atomic(cpu_env); + return DISAS_NORETURN; } return DISAS_NEXT; } @@ -6125,11 +6144,13 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) if (insn->help_op) { ret = insn->help_op(s, &o); } - if (insn->help_wout) { - insn->help_wout(s, &f, &o); - } - if (insn->help_cout) { - insn->help_cout(s, &o); + if (ret != DISAS_NORETURN) { + if (insn->help_wout) { + insn->help_wout(s, &f, &o); + } + if (insn->help_cout) { + insn->help_cout(s, &o); + } } /* Free any temporaries created by the helpers. */ diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c index 68f978d80b..2b49d1ca40 100644 --- a/target/unicore32/cpu.c +++ b/target/unicore32/cpu.c @@ -116,8 +116,6 @@ static void uc32_cpu_initfn(Object *obj) env->uncached_asr = ASR_MODE_PRIV; env->regs[31] = 0x03000000; #endif - - tlb_flush(cs); } static const VMStateDescription vmstate_uc32_cpu = { diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index daa416a143..7a8015c5a9 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -2586,6 +2586,10 @@ void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx) seen this numbered exit before, via tcg_gen_goto_tb. */ tcg_debug_assert(tcg_ctx->goto_tb_issue_mask & (1 << idx)); #endif + /* When not chaining, exit without indicating a link. */ + if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { + val = 0; + } } else { /* This is an exit via the exitreq label. */ tcg_debug_assert(idx == TB_EXIT_REQUESTED); @@ -2603,7 +2607,10 @@ void tcg_gen_goto_tb(unsigned idx) tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0); tcg_ctx->goto_tb_issue_mask |= 1 << idx; #endif - tcg_gen_op1i(INDEX_op_goto_tb, idx); + /* When not chaining, we simply fall through to the "fallback" exit. */ + if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { + tcg_gen_op1i(INDEX_op_goto_tb, idx); + } } void tcg_gen_lookup_and_goto_ptr(void) @@ -30,6 +30,7 @@ /* Define to jump the ELF file used to communicate with GDB. */ #undef DEBUG_JIT +#include "qemu/error-report.h" #include "qemu/cutils.h" #include "qemu/host-utils.h" #include "qemu/timer.h" @@ -3361,6 +3362,7 @@ void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table) const TCGProfile *orig = &s->prof; if (counters) { + PROF_ADD(prof, orig, cpu_exec_time); PROF_ADD(prof, orig, tb_count1); PROF_ADD(prof, orig, tb_count); PROF_ADD(prof, orig, op_count); @@ -3412,11 +3414,32 @@ void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf) prof.table_op_count[i]); } } + +int64_t tcg_cpu_exec_time(void) +{ + unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); + unsigned int i; + int64_t ret = 0; + + for (i = 0; i < n_ctxs; i++) { + const TCGContext *s = atomic_read(&tcg_ctxs[i]); + const TCGProfile *prof = &s->prof; + + ret += atomic_read(&prof->cpu_exec_time); + } + return ret; +} #else void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf) { cpu_fprintf(f, "[TCG profiler not compiled]\n"); } + +int64_t tcg_cpu_exec_time(void) +{ + error_report("%s: TCG profiler not compiled", __func__); + exit(EXIT_FAILURE); +} #endif @@ -3430,7 +3453,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) #ifdef CONFIG_PROFILER { - int n; + int n = 0; QTAILQ_FOREACH(op, &s->ops, link) { n++; @@ -32,6 +32,7 @@ #include "qemu/queue.h" #include "tcg-mo.h" #include "tcg-target.h" +#include "qemu/int128.h" /* XXX: make safe guess about sizes */ #define MAX_OP_PER_INSTR 266 @@ -629,12 +630,13 @@ typedef struct TCGOp { QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8)); typedef struct TCGProfile { + int64_t cpu_exec_time; int64_t tb_count1; int64_t tb_count; int64_t op_count; /* total insn count */ int op_count_max; /* max insn per TB */ - int64_t temp_count; int temp_count_max; + int64_t temp_count; int64_t del_op_count; int64_t code_in_len; int64_t code_out_len; @@ -1002,6 +1004,7 @@ int tcg_check_temp_count(void); #define tcg_check_temp_count() 0 #endif +int64_t tcg_cpu_exec_time(void); void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf); void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf); @@ -1454,11 +1457,14 @@ GEN_ATOMIC_HELPER_ALL(xchg) #undef GEN_ATOMIC_HELPER #endif /* CONFIG_SOFTMMU */ -#ifdef CONFIG_ATOMIC128 -#include "qemu/int128.h" - -/* These aren't really a "proper" helpers because TCG cannot manage Int128. - However, use the same format as the others, for use by the backends. */ +/* + * These aren't really a "proper" helpers because TCG cannot manage Int128. + * However, use the same format as the others, for use by the backends. + * + * The cmpxchg functions are only defined if HAVE_CMPXCHG128; + * the ld/st functions are only defined if HAVE_ATOMIC128, + * as defined by <qemu/atomic128.h>. + */ Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, Int128 cmpv, Int128 newv, TCGMemOpIdx oi, uintptr_t retaddr); @@ -1475,6 +1481,4 @@ void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, TCGMemOpIdx oi, uintptr_t retaddr); -#endif /* CONFIG_ATOMIC128 */ - #endif /* TCG_H */ diff --git a/tests/Makefile.include b/tests/Makefile.include index 5eadfd52f9..f77a495109 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -38,107 +38,62 @@ $(SRC_PATH)/scripts/qapi-gen.py SYSEMU_TARGET_LIST := $(subst -softmmu.mak,,$(notdir \ $(wildcard $(SRC_PATH)/default-configs/*-softmmu.mak))) -check-unit-y = tests/check-qdict$(EXESUF) -gcov-files-check-qdict-y = qobject/qdict.c +check-unit-y += tests/check-qdict$(EXESUF) check-unit-y += tests/check-block-qdict$(EXESUF) -gcov-files-check-block-qdict-y = qobject/block-qdict.c check-unit-y += tests/test-char$(EXESUF) -gcov-files-check-qdict-y = chardev/char.c check-unit-y += tests/check-qnum$(EXESUF) -gcov-files-check-qnum-y = qobject/qnum.c check-unit-y += tests/check-qstring$(EXESUF) -gcov-files-check-qstring-y = qobject/qstring.c check-unit-y += tests/check-qlist$(EXESUF) -gcov-files-check-qlist-y = qobject/qlist.c check-unit-y += tests/check-qnull$(EXESUF) -gcov-files-check-qnull-y = qobject/qnull.c check-unit-y += tests/check-qobject$(EXESUF) check-unit-y += tests/check-qjson$(EXESUF) -gcov-files-check-qjson-y = qobject/qjson.c check-unit-y += tests/check-qlit$(EXESUF) -gcov-files-check-qlit-y = qobject/qlit.c check-unit-y += tests/test-qobject-output-visitor$(EXESUF) -gcov-files-test-qobject-output-visitor-y = qapi/qobject-output-visitor.c check-unit-y += tests/test-clone-visitor$(EXESUF) -gcov-files-test-clone-visitor-y = qapi/qapi-clone-visitor.c check-unit-y += tests/test-qobject-input-visitor$(EXESUF) -gcov-files-test-qobject-input-visitor-y = qapi/qobject-input-visitor.c check-unit-y += tests/test-qmp-cmds$(EXESUF) -gcov-files-test-qmp-cmds-y = qapi/qmp-dispatch.c check-unit-y += tests/test-string-input-visitor$(EXESUF) -gcov-files-test-string-input-visitor-y = qapi/string-input-visitor.c check-unit-y += tests/test-string-output-visitor$(EXESUF) -gcov-files-test-string-output-visitor-y = qapi/string-output-visitor.c check-unit-y += tests/test-qmp-event$(EXESUF) -gcov-files-test-qmp-event-y += qapi/qmp-event.c check-unit-y += tests/test-opts-visitor$(EXESUF) -gcov-files-test-opts-visitor-y = qapi/opts-visitor.c check-unit-y += tests/test-coroutine$(EXESUF) -gcov-files-test-coroutine-y = coroutine-$(CONFIG_COROUTINE_BACKEND).c check-unit-y += tests/test-visitor-serialization$(EXESUF) check-unit-y += tests/test-iov$(EXESUF) -gcov-files-test-iov-y = util/iov.c check-unit-y += tests/test-aio$(EXESUF) -gcov-files-test-aio-y = util/async.c util/qemu-timer.o -gcov-files-test-aio-$(CONFIG_WIN32) += util/aio-win32.c -gcov-files-test-aio-$(CONFIG_POSIX) += util/aio-posix.c check-unit-y += tests/test-aio-multithread$(EXESUF) -gcov-files-test-aio-multithread-y = $(gcov-files-test-aio-y) -gcov-files-test-aio-multithread-y += util/qemu-coroutine.c tests/iothread.c check-unit-y += tests/test-throttle$(EXESUF) check-unit-y += tests/test-thread-pool$(EXESUF) -gcov-files-test-thread-pool-y = thread-pool.c -gcov-files-test-hbitmap-y = util/hbitmap.c check-unit-y += tests/test-hbitmap$(EXESUF) -gcov-files-test-hbitmap-y = blockjob.c check-unit-y += tests/test-bdrv-drain$(EXESUF) check-unit-y += tests/test-blockjob$(EXESUF) check-unit-y += tests/test-blockjob-txn$(EXESUF) check-unit-y += tests/test-block-backend$(EXESUF) check-unit-y += tests/test-x86-cpuid$(EXESUF) # all code tested by test-x86-cpuid is inside topology.h -gcov-files-test-x86-cpuid-y = ifeq ($(CONFIG_SOFTMMU),y) check-unit-y += tests/test-xbzrle$(EXESUF) -gcov-files-test-xbzrle-y = migration/xbzrle.c check-unit-$(CONFIG_POSIX) += tests/test-vmstate$(EXESUF) endif check-unit-y += tests/test-cutils$(EXESUF) -gcov-files-test-cutils-y += util/cutils.c check-unit-y += tests/test-shift128$(EXESUF) -gcov-files-test-shift128-y = util/host-utils.c check-unit-y += tests/test-mul64$(EXESUF) -gcov-files-test-mul64-y = util/host-utils.c check-unit-y += tests/test-int128$(EXESUF) # all code tested by test-int128 is inside int128.h -gcov-files-test-int128-y = check-unit-y += tests/rcutorture$(EXESUF) -gcov-files-rcutorture-y = util/rcu.c check-unit-y += tests/test-rcu-list$(EXESUF) -gcov-files-test-rcu-list-y = util/rcu.c check-unit-y += tests/test-rcu-simpleq$(EXESUF) -gcov-files-test-rcu-simpleq-y = util/rcu.c check-unit-y += tests/test-rcu-tailq$(EXESUF) -gcov-files-test-rcu-tailq-y = util/rcu.c check-unit-y += tests/test-qdist$(EXESUF) -gcov-files-test-qdist-y = util/qdist.c check-unit-y += tests/test-qht$(EXESUF) -gcov-files-test-qht-y = util/qht.c check-unit-y += tests/test-qht-par$(EXESUF) -gcov-files-test-qht-par-y = util/qht.c check-unit-y += tests/test-bitops$(EXESUF) check-unit-y += tests/test-bitcnt$(EXESUF) check-unit-y += tests/test-qdev-global-props$(EXESUF) check-unit-y += tests/check-qom-interface$(EXESUF) -gcov-files-check-qom-interface-y = qom/object.c check-unit-y += tests/check-qom-proplist$(EXESUF) -gcov-files-check-qom-proplist-y = qom/object.c check-unit-y += tests/test-qemu-opts$(EXESUF) -gcov-files-test-qemu-opts-y = util/qemu-option.c check-unit-y += tests/test-keyval$(EXESUF) -gcov-files-test-keyval-y = util/keyval.c check-unit-y += tests/test-write-threshold$(EXESUF) -gcov-files-test-write-threshold-y = block/write-threshold.c check-unit-y += tests/test-crypto-hash$(EXESUF) check-speed-y += tests/benchmark-crypto-hash$(EXESUF) check-unit-y += tests/test-crypto-hmac$(EXESUF) @@ -160,109 +115,66 @@ check-unit-$(CONFIG_GNUTLS) += tests/test-io-channel-tls$(EXESUF) check-unit-y += tests/test-io-channel-command$(EXESUF) check-unit-y += tests/test-io-channel-buffer$(EXESUF) check-unit-y += tests/test-base64$(EXESUF) -check-unit-$(if $(CONFIG_NETTLE_KDF),y,$(CONFIG_GCRYPT_KDF)) += tests/test-crypto-pbkdf$(EXESUF) +check-unit-$(if $(CONFIG_NETTLE),y,$(CONFIG_GCRYPT)) += tests/test-crypto-pbkdf$(EXESUF) check-unit-y += tests/test-crypto-ivgen$(EXESUF) check-unit-y += tests/test-crypto-afsplit$(EXESUF) check-unit-y += tests/test-crypto-xts$(EXESUF) check-unit-y += tests/test-crypto-block$(EXESUF) check-unit-y += tests/test-logging$(EXESUF) -gcov-files-test-logging-y = util/log.c check-unit-$(CONFIG_REPLICATION) += tests/test-replication$(EXESUF) check-unit-y += tests/test-bufferiszero$(EXESUF) -gcov-files-check-bufferiszero-y = util/bufferiszero.c check-unit-y += tests/test-uuid$(EXESUF) check-unit-y += tests/ptimer-test$(EXESUF) -gcov-files-ptimer-test-y = hw/core/ptimer.c check-unit-y += tests/test-qapi-util$(EXESUF) -gcov-files-test-qapi-util-y = qapi/qapi-util.c check-block-$(CONFIG_POSIX) += tests/qemu-iotests-quick.sh # All QTests for now are POSIX-only, but the dependencies are # really in libqtest, not in the testcases themselves. -check-qtest-generic-y = tests/qmp-test$(EXESUF) -gcov-files-generic-y = monitor.c qapi/qmp-dispatch.c +check-qtest-generic-y += tests/qmp-test$(EXESUF) check-qtest-generic-y += tests/qmp-cmd-test$(EXESUF) check-qtest-generic-y += tests/device-introspect-test$(EXESUF) -gcov-files-generic-y = qdev-monitor.c qmp.c check-qtest-generic-y += tests/cdrom-test$(EXESUF) -gcov-files-ipack-y += hw/ipack/ipack.c check-qtest-ipack-y += tests/ipoctal232-test$(EXESUF) -gcov-files-ipack-y += hw/char/ipoctal232.c check-qtest-virtioserial-y += tests/virtio-console-test$(EXESUF) -gcov-files-virtioserial-y += hw/char/virtio-console.c -gcov-files-virtio-y += i386-softmmu/hw/virtio/virtio.c check-qtest-virtio-y += tests/virtio-net-test$(EXESUF) -gcov-files-virtio-y += i386-softmmu/hw/net/virtio-net.c check-qtest-virtio-y += tests/virtio-balloon-test$(EXESUF) -gcov-files-virtio-y += i386-softmmu/hw/virtio/virtio-balloon.c check-qtest-virtio-y += tests/virtio-blk-test$(EXESUF) -gcov-files-virtio-y += i386-softmmu/hw/block/virtio-blk.c check-qtest-virtio-y += tests/virtio-rng-test$(EXESUF) -gcov-files-virtio-y += hw/virtio/virtio-rng.c check-qtest-virtio-y += tests/virtio-scsi-test$(EXESUF) -gcov-files-virtio-y += i386-softmmu/hw/scsi/virtio-scsi.c ifeq ($(CONFIG_VIRTIO)$(CONFIG_VIRTFS)$(CONFIG_PCI),yyy) check-qtest-virtio-y += tests/virtio-9p-test$(EXESUF) -gcov-files-virtio-y += hw/9pfs/virtio-9p.c -gcov-files-virtio-y += i386-softmmu/hw/9pfs/virtio-9p-device.c endif check-qtest-virtio-y += tests/virtio-serial-test$(EXESUF) -gcov-files-virtio-y += i386-softmmu/hw/char/virtio-serial-bus.c check-qtest-virtio-y += $(check-qtest-virtioserial-y) -gcov-files-virtio-y += $(gcov-files-virtioserial-y) check-qtest-pci-y += tests/e1000-test$(EXESUF) -gcov-files-pci-y += hw/net/e1000.c check-qtest-pci-y += tests/e1000e-test$(EXESUF) -gcov-files-pci-y += hw/net/e1000e.c hw/net/e1000e_core.c check-qtest-pci-$(CONFIG_RTL8139_PCI) += tests/rtl8139-test$(EXESUF) -gcov-files-pci-$(CONFIG_RTL8139_PCI) += hw/net/rtl8139.c check-qtest-pci-$(CONFIG_PCNET_PCI) += tests/pcnet-test$(EXESUF) -gcov-files-pci-$(CONFIG_PCNET_PCI) += hw/net/pcnet.c -gcov-files-pci-$(CONFIG_PCNET_PCI) += hw/net/pcnet-pci.c check-qtest-pci-$(CONFIG_EEPRO100_PCI) += tests/eepro100-test$(EXESUF) -gcov-files-pci-$(CONFIG_EEPRO100_PCI) += hw/net/eepro100.c check-qtest-pci-$(CONFIG_NE2000_PCI) += tests/ne2000-test$(EXESUF) -gcov-files-pci-$(CONFIG_NE2000_PCI) += hw/net/ne2000.c check-qtest-pci-$(CONFIG_NVME_PCI) += tests/nvme-test$(EXESUF) -gcov-files-pci-$(CONFIG_NVME_PCI) += hw/block/nvme.c check-qtest-pci-$(CONFIG_AC97) += tests/ac97-test$(EXESUF) -gcov-files-pci-$(CONFIG_AC97) += hw/audio/ac97.c check-qtest-pci-$(CONFIG_ES1370) += tests/es1370-test$(EXESUF) -gcov-files-pci-$(CONFIG_ES1370) += hw/audio/es1370.c check-qtest-pci-y += $(check-qtest-virtio-y) -gcov-files-pci-y += $(gcov-files-virtio-y) hw/virtio/virtio-pci.c check-qtest-pci-$(CONFIG_IPACK) += tests/tpci200-test$(EXESUF) -gcov-files-pci-$(CONFIG_IPACK) += hw/ipack/tpci200.c check-qtest-pci-$(CONFIG_IPACK) += $(check-qtest-ipack-y) -gcov-files-pci-$(CONFIG_IPACK) += $(gcov-files-ipack-y) check-qtest-pci-y += tests/display-vga-test$(EXESUF) -gcov-files-pci-y += hw/display/vga.c -gcov-files-pci-y += hw/display/cirrus_vga.c -gcov-files-pci-y += hw/display/vga-pci.c -gcov-files-pci-y += hw/display/virtio-gpu.c -gcov-files-pci-y += hw/display/virtio-gpu-pci.c -gcov-files-pci-$(CONFIG_VIRTIO_VGA) += hw/display/virtio-vga.c check-qtest-pci-$(CONFIG_HDA) += tests/intel-hda-test$(EXESUF) -gcov-files-pci-$(CONFIG_HDA) += hw/audio/intel-hda.c hw/audio/hda-codec.c check-qtest-pci-$(CONFIG_IVSHMEM_DEVICE) += tests/ivshmem-test$(EXESUF) -gcov-files-pci-$(CONFIG_IVSHMEM_DEVICE) += hw/misc/ivshmem.c check-qtest-pci-y += tests/megasas-test$(EXESUF) -gcov-files-pci-y += hw/scsi/megasas.c check-qtest-i386-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF) check-qtest-i386-y += tests/fdc-test$(EXESUF) -gcov-files-i386-y = hw/block/fdc.c check-qtest-i386-y += tests/ide-test$(EXESUF) check-qtest-i386-y += tests/ahci-test$(EXESUF) check-qtest-i386-y += tests/hd-geo-test$(EXESUF) -gcov-files-i386-y += hw/block/hd-geometry.c check-qtest-i386-y += tests/boot-order-test$(EXESUF) check-qtest-i386-y += tests/bios-tables-test$(EXESUF) check-qtest-i386-$(CONFIG_SGA) += tests/boot-serial-test$(EXESUF) @@ -274,37 +186,21 @@ check-qtest-i386-y += tests/i440fx-test$(EXESUF) check-qtest-i386-y += tests/fw_cfg-test$(EXESUF) check-qtest-i386-y += tests/drive_del-test$(EXESUF) check-qtest-i386-$(CONFIG_WDT_IB700) += tests/wdt_ib700-test$(EXESUF) -gcov-files-i386-$(CONFIG_WDT_IB700) += hw/watchdog/watchdog.c hw/watchdog/wdt_ib700.c check-qtest-i386-y += tests/tco-test$(EXESUF) check-qtest-i386-y += $(check-qtest-pci-y) -gcov-files-i386-y += $(gcov-files-pci-y) check-qtest-i386-$(CONFIG_VMXNET3_PCI) += tests/vmxnet3-test$(EXESUF) -gcov-files-i386-$(CONFIG_VMXNET3_PCI) += hw/net/vmxnet3.c -gcov-files-i386-y += hw/net/net_rx_pkt.c -gcov-files-i386-y += hw/net/net_tx_pkt.c check-qtest-i386-$(CONFIG_PVPANIC) += tests/pvpanic-test$(EXESUF) -gcov-files-i386-$(CONFIG_PVPANIC) += i386-softmmu/hw/misc/pvpanic.c check-qtest-i386-$(CONFIG_I82801B11) += tests/i82801b11-test$(EXESUF) -gcov-files-i386-$(CONFIG_I82801B11) += hw/pci-bridge/i82801b11.c check-qtest-i386-$(CONFIG_IOH3420) += tests/ioh3420-test$(EXESUF) -gcov-files-i386-$(CONFIG_IOH3420) += hw/pci-bridge/ioh3420.c check-qtest-i386-$(CONFIG_USB_OHCI) += tests/usb-hcd-ohci-test$(EXESUF) -gcov-files-i386-$(CONFIG_USB_OHCI) += hw/usb/hcd-ohci.c check-qtest-i386-$(CONFIG_USB_UHCI) += tests/usb-hcd-uhci-test$(EXESUF) -gcov-files-i386-$(CONFIG_USB_UHCI) += hw/usb/hcd-uhci.c ifeq ($(CONFIG_USB_ECHI)$(CONFIG_USB_UHCI),yy) check-qtest-i386-y += tests/usb-hcd-ehci-test$(EXESUF) endif -gcov-files-i386-$(CONFIG_USB_EHCI) += hw/usb/hcd-ehci.c -gcov-files-i386-y += hw/usb/dev-hid.c -gcov-files-i386-y += hw/usb/dev-storage.c check-qtest-i386-$(CONFIG_USB_XHCI_NEC) += tests/usb-hcd-xhci-test$(EXESUF) -gcov-files-i386-$(CONFIG_USB_XHCI) += hw/usb/hcd-xhci.c -gcov-files-i386-$(CONFIG_USB_XHCI) += hw/usb/hcd-xhci-nec.c check-qtest-i386-y += tests/cpu-plug-test$(EXESUF) check-qtest-i386-y += tests/q35-test$(EXESUF) check-qtest-i386-y += tests/vmgenid-test$(EXESUF) -gcov-files-i386-y += hw/pci-host/q35.c check-qtest-i386-$(CONFIG_VHOST_USER_NET_TEST_i386) += tests/vhost-user-test$(EXESUF) ifeq ($(CONFIG_VHOST_USER_NET_TEST_i386),) check-qtest-x86_64-$(CONFIG_VHOST_USER_NET_TEST_x86_64) += tests/vhost-user-test$(EXESUF) @@ -321,16 +217,14 @@ check-qtest-i386-y += tests/test-x86-cpuid-compat$(EXESUF) check-qtest-i386-y += tests/numa-test$(EXESUF) check-qtest-x86_64-y += $(check-qtest-i386-y) check-qtest-x86_64-$(CONFIG_SDHCI) += tests/sdhci-test$(EXESUF) -gcov-files-i386-y += i386-softmmu/hw/timer/mc146818rtc.c -gcov-files-x86_64-y = $(subst i386-softmmu/,x86_64-softmmu/,$(gcov-files-i386-y)) -check-qtest-alpha-y = tests/boot-serial-test$(EXESUF) +check-qtest-alpha-y += tests/boot-serial-test$(EXESUF) -check-qtest-hppa-y = tests/boot-serial-test$(EXESUF) +check-qtest-hppa-y += tests/boot-serial-test$(EXESUF) check-qtest-m68k-y = tests/boot-serial-test$(EXESUF) -check-qtest-microblaze-y = tests/boot-serial-test$(EXESUF) +check-qtest-microblaze-y += tests/boot-serial-test$(EXESUF) check-qtest-mips-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF) @@ -338,7 +232,7 @@ check-qtest-mips64-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF) check-qtest-mips64el-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF) -check-qtest-moxie-y = tests/boot-serial-test$(EXESUF) +check-qtest-moxie-y += tests/boot-serial-test$(EXESUF) check-qtest-ppc-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF) check-qtest-ppc-y += tests/boot-order-test$(EXESUF) @@ -346,23 +240,16 @@ check-qtest-ppc-y += tests/prom-env-test$(EXESUF) check-qtest-ppc-y += tests/drive_del-test$(EXESUF) check-qtest-ppc-y += tests/boot-serial-test$(EXESUF) check-qtest-ppc-y += tests/m48t59-test$(EXESUF) -gcov-files-ppc-y += hw/timer/m48t59.c -check-qtest-ppc64-y = $(check-qtest-ppc-y) -gcov-files-ppc64-y = $(subst ppc-softmmu/,ppc64-softmmu/,$(gcov-files-ppc-y)) +check-qtest-ppc64-y += $(check-qtest-ppc-y) check-qtest-ppc64-y += tests/spapr-phb-test$(EXESUF) -gcov-files-ppc64-y += ppc64-softmmu/hw/ppc/spapr_pci.c check-qtest-ppc64-y += tests/pnv-xscom-test$(EXESUF) check-qtest-ppc64-y += tests/migration-test$(EXESUF) check-qtest-ppc64-y += tests/rtas-test$(EXESUF) check-qtest-ppc64-$(CONFIG_SLIRP) += tests/pxe-test$(EXESUF) check-qtest-ppc64-$(CONFIG_USB_OHCI) += tests/usb-hcd-ohci-test$(EXESUF) -gcov-files-ppc64-$(CONFIG_USB_OHCI) += hw/usb/hcd-ohci.c check-qtest-ppc64-$(CONFIG_USB_UHCI) += tests/usb-hcd-uhci-test$(EXESUF) -gcov-files-ppc64-$(CONFIG_USB_UHCI) += hw/usb/hcd-uhci.c check-qtest-ppc64-$(CONFIG_USB_XHCI_NEC) += tests/usb-hcd-xhci-test$(EXESUF) -gcov-files-ppc64-$(CONFIG_USB_XHCI) += hw/usb/hcd-xhci.c -gcov-files-ppc64-$(CONFIG_USB_XHCI) += hw/usb/hcd-xhci-nec.c check-qtest-ppc64-y += $(check-qtest-virtio-y) check-qtest-ppc64-$(CONFIG_SLIRP) += tests/test-netfilter$(EXESUF) check-qtest-ppc64-$(CONFIG_POSIX) += tests/test-filter-mirror$(EXESUF) @@ -370,31 +257,26 @@ check-qtest-ppc64-$(CONFIG_RTL8139_PCI) += tests/test-filter-redirector$(EXESUF) check-qtest-ppc64-y += tests/display-vga-test$(EXESUF) check-qtest-ppc64-y += tests/numa-test$(EXESUF) check-qtest-ppc64-$(CONFIG_IVSHMEM_DEVICE) += tests/ivshmem-test$(EXESUF) -gcov-files-ppc64-$(CONFIG_IVSHMEM_DEVICE) += hw/misc/ivshmem.c check-qtest-ppc64-y += tests/cpu-plug-test$(EXESUF) check-qtest-sh4-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF) check-qtest-sh4eb-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF) -check-qtest-sparc-y = tests/prom-env-test$(EXESUF) +check-qtest-sparc-y += tests/prom-env-test$(EXESUF) check-qtest-sparc-y += tests/m48t59-test$(EXESUF) -gcov-files-sparc-y = hw/timer/m48t59.c check-qtest-sparc-y += tests/boot-serial-test$(EXESUF) check-qtest-sparc64-$(CONFIG_ISA_TESTDEV) = tests/endianness-test$(EXESUF) check-qtest-sparc64-y += tests/prom-env-test$(EXESUF) check-qtest-sparc64-y += tests/boot-serial-test$(EXESUF) -check-qtest-arm-y = tests/tmp105-test$(EXESUF) +check-qtest-arm-y += tests/tmp105-test$(EXESUF) check-qtest-arm-y += tests/pca9552-test$(EXESUF) check-qtest-arm-y += tests/ds1338-test$(EXESUF) check-qtest-arm-y += tests/m25p80-test$(EXESUF) -gcov-files-arm-y += hw/misc/tmp105.c check-qtest-arm-y += tests/virtio-blk-test$(EXESUF) -gcov-files-arm-y += arm-softmmu/hw/block/virtio-blk.c check-qtest-arm-y += tests/test-arm-mptimer$(EXESUF) -gcov-files-arm-y += hw/timer/arm_mptimer.c check-qtest-arm-y += tests/boot-serial-test$(EXESUF) check-qtest-arm-$(CONFIG_SDHCI) += tests/sdhci-test$(EXESUF) check-qtest-arm-y += tests/hexloader-test$(EXESUF) @@ -404,9 +286,9 @@ check-qtest-aarch64-$(CONFIG_SDHCI) += tests/sdhci-test$(EXESUF) check-qtest-aarch64-y += tests/boot-serial-test$(EXESUF) check-qtest-aarch64-y += tests/migration-test$(EXESUF) -check-qtest-microblazeel-y = $(check-qtest-microblaze-y) +check-qtest-microblazeel-y += $(check-qtest-microblaze-y) -check-qtest-xtensaeb-y = $(check-qtest-xtensa-y) +check-qtest-xtensaeb-y += $(check-qtest-xtensa-y) check-qtest-s390x-y = tests/boot-serial-test$(EXESUF) check-qtest-s390x-$(CONFIG_SLIRP) += tests/pxe-test$(EXESUF) diff --git a/tests/benchmark-crypto-cipher.c b/tests/benchmark-crypto-cipher.c index f5a0d0bc32..67fdf8c31d 100644 --- a/tests/benchmark-crypto-cipher.c +++ b/tests/benchmark-crypto-cipher.c @@ -15,17 +15,27 @@ #include "crypto/init.h" #include "crypto/cipher.h" -static void test_cipher_speed(const void *opaque) +static void test_cipher_speed(size_t chunk_size, + QCryptoCipherMode mode, + QCryptoCipherAlgorithm alg) { QCryptoCipher *cipher; Error *err = NULL; double total = 0.0; - size_t chunk_size = (size_t)opaque; uint8_t *key = NULL, *iv = NULL; uint8_t *plaintext = NULL, *ciphertext = NULL; - size_t nkey = qcrypto_cipher_get_key_len(QCRYPTO_CIPHER_ALG_AES_128); - size_t niv = qcrypto_cipher_get_iv_len(QCRYPTO_CIPHER_ALG_AES_128, - QCRYPTO_CIPHER_MODE_CBC); + size_t nkey; + size_t niv; + + if (!qcrypto_cipher_supports(alg, mode)) { + return; + } + + nkey = qcrypto_cipher_get_key_len(alg); + niv = qcrypto_cipher_get_iv_len(alg, mode); + if (mode == QCRYPTO_CIPHER_MODE_XTS) { + nkey *= 2; + } key = g_new0(uint8_t, nkey); memset(key, g_test_rand_int(), nkey); @@ -38,14 +48,14 @@ static void test_cipher_speed(const void *opaque) plaintext = g_new0(uint8_t, chunk_size); memset(plaintext, g_test_rand_int(), chunk_size); - cipher = qcrypto_cipher_new(QCRYPTO_CIPHER_ALG_AES_128, - QCRYPTO_CIPHER_MODE_CBC, + cipher = qcrypto_cipher_new(alg, mode, key, nkey, &err); g_assert(cipher != NULL); - g_assert(qcrypto_cipher_setiv(cipher, - iv, niv, - &err) == 0); + if (mode != QCRYPTO_CIPHER_MODE_ECB) + g_assert(qcrypto_cipher_setiv(cipher, + iv, niv, + &err) == 0); g_test_timer_start(); do { @@ -55,13 +65,26 @@ static void test_cipher_speed(const void *opaque) chunk_size, &err) == 0); total += chunk_size; - } while (g_test_timer_elapsed() < 5.0); + } while (g_test_timer_elapsed() < 1.0); total /= MiB; - g_print("cbc(aes128): "); - g_print("Testing chunk_size %zu bytes ", chunk_size); - g_print("done: %.2f MB in %.2f secs: ", total, g_test_timer_last()); - g_print("%.2f MB/sec\n", total / g_test_timer_last()); + g_print("Enc chunk %zu bytes ", chunk_size); + g_print("%.2f MB/sec ", total / g_test_timer_last()); + + total = 0.0; + g_test_timer_start(); + do { + g_assert(qcrypto_cipher_decrypt(cipher, + plaintext, + ciphertext, + chunk_size, + &err) == 0); + total += chunk_size; + } while (g_test_timer_elapsed() < 1.0); + + total /= MiB; + g_print("Dec chunk %zu bytes ", chunk_size); + g_print("%.2f MB/sec ", total / g_test_timer_last()); qcrypto_cipher_free(cipher); g_free(plaintext); @@ -70,19 +93,99 @@ static void test_cipher_speed(const void *opaque) g_free(key); } -int main(int argc, char **argv) + +static void test_cipher_speed_ecb_aes_128(const void *opaque) +{ + size_t chunk_size = (size_t)opaque; + test_cipher_speed(chunk_size, + QCRYPTO_CIPHER_MODE_ECB, + QCRYPTO_CIPHER_ALG_AES_128); +} + +static void test_cipher_speed_ecb_aes_256(const void *opaque) { - size_t i; - char name[64]; + size_t chunk_size = (size_t)opaque; + test_cipher_speed(chunk_size, + QCRYPTO_CIPHER_MODE_ECB, + QCRYPTO_CIPHER_ALG_AES_256); +} + +static void test_cipher_speed_cbc_aes_128(const void *opaque) +{ + size_t chunk_size = (size_t)opaque; + test_cipher_speed(chunk_size, + QCRYPTO_CIPHER_MODE_CBC, + QCRYPTO_CIPHER_ALG_AES_128); +} +static void test_cipher_speed_cbc_aes_256(const void *opaque) +{ + size_t chunk_size = (size_t)opaque; + test_cipher_speed(chunk_size, + QCRYPTO_CIPHER_MODE_CBC, + QCRYPTO_CIPHER_ALG_AES_256); +} + +static void test_cipher_speed_ctr_aes_128(const void *opaque) +{ + size_t chunk_size = (size_t)opaque; + test_cipher_speed(chunk_size, + QCRYPTO_CIPHER_MODE_CTR, + QCRYPTO_CIPHER_ALG_AES_128); +} + +static void test_cipher_speed_ctr_aes_256(const void *opaque) +{ + size_t chunk_size = (size_t)opaque; + test_cipher_speed(chunk_size, + QCRYPTO_CIPHER_MODE_CTR, + QCRYPTO_CIPHER_ALG_AES_256); +} + +static void test_cipher_speed_xts_aes_128(const void *opaque) +{ + size_t chunk_size = (size_t)opaque; + test_cipher_speed(chunk_size, + QCRYPTO_CIPHER_MODE_XTS, + QCRYPTO_CIPHER_ALG_AES_128); +} + +static void test_cipher_speed_xts_aes_256(const void *opaque) +{ + size_t chunk_size = (size_t)opaque; + test_cipher_speed(chunk_size, + QCRYPTO_CIPHER_MODE_XTS, + QCRYPTO_CIPHER_ALG_AES_256); +} + + +int main(int argc, char **argv) +{ g_test_init(&argc, &argv, NULL); g_assert(qcrypto_init(NULL) == 0); - for (i = 512; i <= 64 * KiB; i *= 2) { - memset(name, 0 , sizeof(name)); - snprintf(name, sizeof(name), "/crypto/cipher/speed-%zu", i); - g_test_add_data_func(name, (void *)i, test_cipher_speed); - } +#define ADD_TEST(mode, cipher, keysize, chunk) \ + g_test_add_data_func( \ + "/crypto/cipher/" #mode "-" #cipher "-" #keysize "/chunk-" #chunk, \ + (void *)chunk, \ + test_cipher_speed_ ## mode ## _ ## cipher ## _ ## keysize) + +#define ADD_TESTS(chunk) \ + do { \ + ADD_TEST(ecb, aes, 128, chunk); \ + ADD_TEST(ecb, aes, 256, chunk); \ + ADD_TEST(cbc, aes, 128, chunk); \ + ADD_TEST(cbc, aes, 256, chunk); \ + ADD_TEST(ctr, aes, 128, chunk); \ + ADD_TEST(ctr, aes, 256, chunk); \ + ADD_TEST(xts, aes, 128, chunk); \ + ADD_TEST(xts, aes, 256, chunk); \ + } while (0) + + ADD_TESTS(512); + ADD_TESTS(4096); + ADD_TESTS(16384); + ADD_TESTS(65536); return g_test_run(); } diff --git a/tests/crypto-tls-x509-helpers.h b/tests/crypto-tls-x509-helpers.h index 921341c649..88c30d7c94 100644 --- a/tests/crypto-tls-x509-helpers.h +++ b/tests/crypto-tls-x509-helpers.h @@ -22,8 +22,7 @@ #include <gnutls/x509.h> #if !(defined WIN32) && \ - defined(CONFIG_TASN1) && \ - (LIBGNUTLS_VERSION_NUMBER >= 0x020600) + defined(CONFIG_TASN1) # define QCRYPTO_HAVE_TLS_TEST_SUPPORT #endif diff --git a/tests/docker/dockerfiles/debian-bootstrap.pre b/tests/docker/dockerfiles/debian-bootstrap.pre index 3b0ef95374..c164778c30 100755 --- a/tests/docker/dockerfiles/debian-bootstrap.pre +++ b/tests/docker/dockerfiles/debian-bootstrap.pre @@ -2,7 +2,7 @@ # # Simple wrapper for debootstrap, run in the docker build context # -FAKEROOT=`which fakeroot 2> /dev/null` +FAKEROOT=$(which fakeroot 2> /dev/null) # debootstrap < 1.0.67 generates empty sources.list, see Debian#732255 MIN_DEBOOTSTRAP_VERSION=1.0.67 @@ -52,7 +52,7 @@ fi if [ -z $DEBOOTSTRAP_DIR ]; then NEED_DEBOOTSTRAP=false - DEBOOTSTRAP=`which debootstrap 2> /dev/null` + DEBOOTSTRAP=$(which debootstrap 2> /dev/null) if [ -z $DEBOOTSTRAP ]; then echo "No debootstrap installed, attempting to install from SCM" NEED_DEBOOTSTRAP=true diff --git a/tests/migration-test.c b/tests/migration-test.c index b7920255c5..06ca5068d8 100644 --- a/tests/migration-test.c +++ b/tests/migration-test.c @@ -803,6 +803,22 @@ int main(int argc, char **argv) return 0; } + /* + * Similar to ppc64, s390x seems to be touchy with TCG, so disable it + * there until the problems are resolved + */ + if (g_str_equal(qtest_get_arch(), "s390x")) { +#if defined(HOST_S390X) + if (access("/dev/kvm", R_OK | W_OK)) { + g_test_message("Skipping test: kvm not available"); + return 0; + } +#else + g_test_message("Skipping test: Need s390x host to work properly"); + return 0; +#endif + } + tmpfs = mkdtemp(template); if (!tmpfs) { g_test_message("mkdtemp on path (%s): %s\n", template, strerror(errno)); diff --git a/tests/ptimer-test-stubs.c b/tests/ptimer-test-stubs.c index ca5cc3b13b..54b3fd26f6 100644 --- a/tests/ptimer-test-stubs.c +++ b/tests/ptimer-test-stubs.c @@ -34,14 +34,19 @@ int64_t ptimer_test_time_ns; int use_icount = 1; bool qtest_allowed; -void timer_init_tl(QEMUTimer *ts, - QEMUTimerList *timer_list, int scale, - QEMUTimerCB *cb, void *opaque) +void timer_init_full(QEMUTimer *ts, + QEMUTimerListGroup *timer_list_group, QEMUClockType type, + int scale, int attributes, + QEMUTimerCB *cb, void *opaque) { - ts->timer_list = timer_list; + if (!timer_list_group) { + timer_list_group = &main_loop_tlg; + } + ts->timer_list = timer_list_group->tl[type]; ts->cb = cb; ts->opaque = opaque; ts->scale = scale; + ts->attributes = attributes; ts->expire_time = -1; } diff --git a/tests/qemu-iotests/049.out b/tests/qemu-iotests/049.out index 0871bff564..6b505408dd 100644 --- a/tests/qemu-iotests/049.out +++ b/tests/qemu-iotests/049.out @@ -95,35 +95,31 @@ qemu-img create -f qcow2 TEST_DIR/t.qcow2 -- -1024 qemu-img: Image size must be less than 8 EiB! qemu-img create -f qcow2 -o size=-1024 TEST_DIR/t.qcow2 -qemu-img: Value '-1024' is out of range for parameter 'size' -qemu-img: TEST_DIR/t.qcow2: Invalid options for file format 'qcow2' +qemu-img: TEST_DIR/t.qcow2: Value '-1024' is out of range for parameter 'size' qemu-img create -f qcow2 TEST_DIR/t.qcow2 -- -1k qemu-img: Image size must be less than 8 EiB! qemu-img create -f qcow2 -o size=-1k TEST_DIR/t.qcow2 -qemu-img: Value '-1k' is out of range for parameter 'size' -qemu-img: TEST_DIR/t.qcow2: Invalid options for file format 'qcow2' +qemu-img: TEST_DIR/t.qcow2: Value '-1k' is out of range for parameter 'size' qemu-img create -f qcow2 TEST_DIR/t.qcow2 -- 1kilobyte qemu-img: Invalid image size specified! You may use k, M, G, T, P or E suffixes for qemu-img: kilobytes, megabytes, gigabytes, terabytes, petabytes and exabytes. qemu-img create -f qcow2 -o size=1kilobyte TEST_DIR/t.qcow2 -qemu-img: Parameter 'size' expects a non-negative number below 2^64 +qemu-img: TEST_DIR/t.qcow2: Parameter 'size' expects a non-negative number below 2^64 Optional suffix k, M, G, T, P or E means kilo-, mega-, giga-, tera-, peta- and exabytes, respectively. -qemu-img: TEST_DIR/t.qcow2: Invalid options for file format 'qcow2' qemu-img create -f qcow2 TEST_DIR/t.qcow2 -- foobar qemu-img: Invalid image size specified! You may use k, M, G, T, P or E suffixes for qemu-img: kilobytes, megabytes, gigabytes, terabytes, petabytes and exabytes. qemu-img create -f qcow2 -o size=foobar TEST_DIR/t.qcow2 -qemu-img: Parameter 'size' expects a non-negative number below 2^64 +qemu-img: TEST_DIR/t.qcow2: Parameter 'size' expects a non-negative number below 2^64 Optional suffix k, M, G, T, P or E means kilo-, mega-, giga-, tera-, peta- and exabytes, respectively. -qemu-img: TEST_DIR/t.qcow2: Invalid options for file format 'qcow2' == Check correct interpretation of suffixes for cluster size == diff --git a/tests/tcg/mips/mipsr5900/Makefile b/tests/tcg/mips/mipsr5900/Makefile new file mode 100644 index 0000000000..a1c388bc3c --- /dev/null +++ b/tests/tcg/mips/mipsr5900/Makefile @@ -0,0 +1,30 @@ +-include ../../config-host.mak + +CROSS=mipsr5900el-unknown-linux-gnu- + +SIM=qemu-mipsel +SIM_FLAGS=-cpu R5900 + +CC = $(CROSS)gcc +CFLAGS = -Wall -mabi=32 -march=r5900 -static + +TESTCASES = div1.tst +TESTCASES += divu1.tst +TESTCASES += mflohi1.tst +TESTCASES += mtlohi1.tst +TESTCASES += mult.tst +TESTCASES += multu.tst + +all: $(TESTCASES) + +%.tst: %.c + $(CC) $(CFLAGS) $< -o $@ + +check: $(TESTCASES) + @for case in $(TESTCASES); do \ + echo $(SIM) $(SIM_FLAGS) ./$$case;\ + $(SIM) $(SIM_FLAGS) ./$$case; \ + done + +clean: + $(RM) -rf $(TESTCASES) diff --git a/tests/tcg/mips/mipsr5900/div1.c b/tests/tcg/mips/mipsr5900/div1.c new file mode 100644 index 0000000000..83dafa018b --- /dev/null +++ b/tests/tcg/mips/mipsr5900/div1.c @@ -0,0 +1,73 @@ +/* + * Test R5900-specific DIV1. + */ + +#include <stdio.h> +#include <inttypes.h> +#include <assert.h> + +struct quotient_remainder { int32_t quotient, remainder; }; + +static struct quotient_remainder div1(int32_t rs, int32_t rt) +{ + int32_t lo, hi; + + __asm__ __volatile__ ( + " div1 $0, %2, %3\n" + " mflo1 %0\n" + " mfhi1 %1\n" + : "=r" (lo), "=r" (hi) + : "r" (rs), "r" (rt)); + + assert(rs / rt == lo); + assert(rs % rt == hi); + + return (struct quotient_remainder) { .quotient = lo, .remainder = hi }; +} + +static void verify_div1(int32_t rs, int32_t rt, + int32_t expected_quotient, + int32_t expected_remainder) +{ + struct quotient_remainder qr = div1(rs, rt); + + assert(qr.quotient == expected_quotient); + assert(qr.remainder == expected_remainder); +} + +static void verify_div1_negations(int32_t rs, int32_t rt, + int32_t expected_quotient, + int32_t expected_remainder) +{ + verify_div1(rs, rt, expected_quotient, expected_remainder); + verify_div1(rs, -rt, -expected_quotient, expected_remainder); + verify_div1(-rs, rt, -expected_quotient, -expected_remainder); + verify_div1(-rs, -rt, expected_quotient, -expected_remainder); +} + +int main() +{ + verify_div1_negations(0, 1, 0, 0); + verify_div1_negations(1, 1, 1, 0); + verify_div1_negations(1, 2, 0, 1); + verify_div1_negations(17, 19, 0, 17); + verify_div1_negations(19, 17, 1, 2); + verify_div1_negations(77773, 101, 770, 3); + + verify_div1(-0x80000000, 1, -0x80000000, 0); + + /* + * Supplementary explanation from the Toshiba TX System RISC TX79 Core + * Architecture manual, A-38 and B-7, https://wiki.qemu.org/File:C790.pdf + * + * Normally, when 0x80000000 (-2147483648) the signed minimum value is + * divided by 0xFFFFFFFF (-1), the operation will result in an overflow. + * However, in this instruction an overflow exception doesn't occur and + * the result will be as follows: + * + * Quotient is 0x80000000 (-2147483648), and remainder is 0x00000000 (0). + */ + verify_div1(-0x80000000, -1, -0x80000000, 0); + + return 0; +} diff --git a/tests/tcg/mips/mipsr5900/divu1.c b/tests/tcg/mips/mipsr5900/divu1.c new file mode 100644 index 0000000000..72aeed31de --- /dev/null +++ b/tests/tcg/mips/mipsr5900/divu1.c @@ -0,0 +1,48 @@ +/* + * Test R5900-specific DIVU1. + */ + +#include <stdio.h> +#include <inttypes.h> +#include <assert.h> + +struct quotient_remainder { uint32_t quotient, remainder; }; + +static struct quotient_remainder divu1(uint32_t rs, uint32_t rt) +{ + uint32_t lo, hi; + + __asm__ __volatile__ ( + " divu1 $0, %2, %3\n" + " mflo1 %0\n" + " mfhi1 %1\n" + : "=r" (lo), "=r" (hi) + : "r" (rs), "r" (rt)); + + assert(rs / rt == lo); + assert(rs % rt == hi); + + return (struct quotient_remainder) { .quotient = lo, .remainder = hi }; +} + +static void verify_divu1(uint32_t rs, uint32_t rt, + uint32_t expected_quotient, + uint32_t expected_remainder) +{ + struct quotient_remainder qr = divu1(rs, rt); + + assert(qr.quotient == expected_quotient); + assert(qr.remainder == expected_remainder); +} + +int main() +{ + verify_divu1(0, 1, 0, 0); + verify_divu1(1, 1, 1, 0); + verify_divu1(1, 2, 0, 1); + verify_divu1(17, 19, 0, 17); + verify_divu1(19, 17, 1, 2); + verify_divu1(77773, 101, 770, 3); + + return 0; +} diff --git a/tests/tcg/mips/mipsr5900/mflohi1.c b/tests/tcg/mips/mipsr5900/mflohi1.c new file mode 100644 index 0000000000..eed3683dc5 --- /dev/null +++ b/tests/tcg/mips/mipsr5900/mflohi1.c @@ -0,0 +1,35 @@ +/* + * Test R5900-specific MFLO1 and MFHI1. + */ + +#include <stdio.h> +#include <inttypes.h> +#include <assert.h> + +int main() +{ + int32_t rs = 12207031, rt = 305175781; + int32_t rs1 = 32452867, rt1 = 49979687; + int64_t lo, hi, lo1, hi1; + int64_t r, r1; + + /* Test both LO/HI and LO1/HI1 to verify separation. */ + __asm__ __volatile__ ( + " mult $0, %4, %5\n" + " mult1 $0, %6, %7\n" + " mflo %0\n" + " mfhi %1\n" + " mflo1 %2\n" + " mfhi1 %3\n" + : "=r" (lo), "=r" (hi), + "=r" (lo1), "=r" (hi1) + : "r" (rs), "r" (rt), + "r" (rs1), "r" (rt1)); + r = ((int64_t)hi << 32) | (uint32_t)lo; + r1 = ((int64_t)hi1 << 32) | (uint32_t)lo1; + + assert(r == 3725290219116211); + assert(r1 == 1621984134912629); + + return 0; +} diff --git a/tests/tcg/mips/mipsr5900/mtlohi1.c b/tests/tcg/mips/mipsr5900/mtlohi1.c new file mode 100644 index 0000000000..7f3e72835a --- /dev/null +++ b/tests/tcg/mips/mipsr5900/mtlohi1.c @@ -0,0 +1,40 @@ +/* + * Test R5900-specific MTLO1 and MTHI1. + */ + +#include <stdio.h> +#include <inttypes.h> +#include <assert.h> + +int main() +{ + int32_t tlo = 12207031, thi = 305175781; + int32_t tlo1 = 32452867, thi1 = 49979687; + int32_t flo, fhi, flo1, fhi1; + + /* Test both LO/HI and LO1/HI1 to verify separation. */ + __asm__ __volatile__ ( + " mtlo %4\n" + " mthi %5\n" + " mtlo1 %6\n" + " mthi1 %7\n" + " move %0, $0\n" + " move %1, $0\n" + " move %2, $0\n" + " move %3, $0\n" + " mflo %0\n" + " mfhi %1\n" + " mflo1 %2\n" + " mfhi1 %3\n" + : "=r" (flo), "=r" (fhi), + "=r" (flo1), "=r" (fhi1) + : "r" (tlo), "r" (thi), + "r" (tlo1), "r" (thi1)); + + assert(flo == 12207031); + assert(fhi == 305175781); + assert(flo1 == 32452867); + assert(fhi1 == 49979687); + + return 0; +} diff --git a/tests/tcg/mips/mipsr5900/mult.c b/tests/tcg/mips/mipsr5900/mult.c new file mode 100644 index 0000000000..5710b395e6 --- /dev/null +++ b/tests/tcg/mips/mipsr5900/mult.c @@ -0,0 +1,76 @@ +/* + * Test R5900-specific three-operand MULT and MULT1. + */ + +#include <stdio.h> +#include <inttypes.h> +#include <assert.h> + +static int64_t mult(int32_t rs, int32_t rt) +{ + int32_t rd, lo, hi; + int64_t r; + + __asm__ __volatile__ ( + " mult %0, %3, %4\n" + " mflo %1\n" + " mfhi %2\n" + : "=r" (rd), "=r" (lo), "=r" (hi) + : "r" (rs), "r" (rt)); + r = ((int64_t)hi << 32) | (uint32_t)lo; + + assert((int64_t)rs * rt == r); + assert(rd == lo); + + return r; +} + +static int64_t mult1(int32_t rs, int32_t rt) +{ + int32_t rd, lo, hi; + int64_t r; + + __asm__ __volatile__ ( + " mult1 %0, %3, %4\n" + " mflo1 %1\n" + " mfhi1 %2\n" + : "=r" (rd), "=r" (lo), "=r" (hi) + : "r" (rs), "r" (rt)); + r = ((int64_t)hi << 32) | (uint32_t)lo; + + assert((int64_t)rs * rt == r); + assert(rd == lo); + + return r; +} + +static int64_t mult_variants(int32_t rs, int32_t rt) +{ + int64_t rd = mult(rs, rt); + int64_t rd1 = mult1(rs, rt); + + assert(rd == rd1); + + return rd; +} + +static void verify_mult_negations(int32_t rs, int32_t rt, int64_t expected) +{ + assert(mult_variants(rs, rt) == expected); + assert(mult_variants(-rs, rt) == -expected); + assert(mult_variants(rs, -rt) == -expected); + assert(mult_variants(-rs, -rt) == expected); +} + +int main() +{ + verify_mult_negations(17, 19, 323); + verify_mult_negations(77773, 99991, 7776600043); + verify_mult_negations(12207031, 305175781, 3725290219116211); + + assert(mult_variants(-0x80000000, 0x7FFFFFFF) == -0x3FFFFFFF80000000); + assert(mult_variants(-0x80000000, -0x7FFFFFFF) == 0x3FFFFFFF80000000); + assert(mult_variants(-0x80000000, -0x80000000) == 0x4000000000000000); + + return 0; +} diff --git a/tests/tcg/mips/mipsr5900/multu.c b/tests/tcg/mips/mipsr5900/multu.c new file mode 100644 index 0000000000..f043904d69 --- /dev/null +++ b/tests/tcg/mips/mipsr5900/multu.c @@ -0,0 +1,68 @@ +/* + * Test R5900-specific three-operand MULTU and MULTU1. + */ + +#include <stdio.h> +#include <inttypes.h> +#include <assert.h> + +static uint64_t multu(uint32_t rs, uint32_t rt) +{ + uint32_t rd, lo, hi; + uint64_t r; + + __asm__ __volatile__ ( + " multu %0, %3, %4\n" + " mflo %1\n" + " mfhi %2\n" + : "=r" (rd), "=r" (lo), "=r" (hi) + : "r" (rs), "r" (rt)); + r = ((uint64_t)hi << 32) | (uint32_t)lo; + + assert((uint64_t)rs * rt == r); + assert(rd == lo); + + return r; +} + +static uint64_t multu1(uint32_t rs, uint32_t rt) +{ + uint32_t rd, lo, hi; + uint64_t r; + + __asm__ __volatile__ ( + " multu1 %0, %3, %4\n" + " mflo1 %1\n" + " mfhi1 %2\n" + : "=r" (rd), "=r" (lo), "=r" (hi) + : "r" (rs), "r" (rt)); + r = ((uint64_t)hi << 32) | (uint32_t)lo; + + assert((uint64_t)rs * rt == r); + assert(rd == lo); + + return r; +} + +static uint64_t multu_variants(uint32_t rs, uint32_t rt) +{ + uint64_t rd = multu(rs, rt); + uint64_t rd1 = multu1(rs, rt); + + assert(rd == rd1); + + return rd; +} + +int main() +{ + assert(multu_variants(17, 19) == 323); + assert(multu_variants(77773, 99991) == 7776600043); + assert(multu_variants(12207031, 305175781) == 3725290219116211); + + assert(multu_variants(0x80000000U, 0x7FFFFFFF) == 0x3FFFFFFF80000000); + assert(multu_variants(0x80000000U, 0x80000000U) == 0x4000000000000000); + assert(multu_variants(0xFFFFFFFFU, 0xFFFFFFFFU) == 0xFFFFFFFE00000001U); + + return 0; +} diff --git a/tests/test-crypto-block.c b/tests/test-crypto-block.c index fd29a045d2..fae4ffc453 100644 --- a/tests/test-crypto-block.c +++ b/tests/test-crypto-block.c @@ -29,7 +29,7 @@ #endif #if (defined(_WIN32) || defined RUSAGE_THREAD) && \ - (defined(CONFIG_NETTLE_KDF) || defined(CONFIG_GCRYPT_KDF)) + (defined(CONFIG_NETTLE) || defined(CONFIG_GCRYPT)) #define TEST_LUKS #else #undef TEST_LUKS diff --git a/tests/test-crypto-tlscredsx509.c b/tests/test-crypto-tlscredsx509.c index 30f9ac4bbf..940a026c6e 100644 --- a/tests/test-crypto-tlscredsx509.c +++ b/tests/test-crypto-tlscredsx509.c @@ -283,14 +283,8 @@ int main(int argc, char **argv) true, true, GNUTLS_KP_TLS_WWW_SERVER, NULL, 0, 0); - /* Technically a CA cert with basic constraints - * key purpose == key signing + non-critical should - * be rejected. GNUTLS < 3.1 does not reject it and - * we don't anticipate them changing this behaviour - */ TLS_TEST_REG(badca1, true, cacert4req.filename, servercert4req.filename, - (GNUTLS_VERSION_MAJOR == 3 && GNUTLS_VERSION_MINOR >= 1) || - GNUTLS_VERSION_MAJOR > 3); + true); TLS_TEST_REG(badca2, true, cacert5req.filename, servercert5req.filename, true); TLS_TEST_REG(badca3, true, diff --git a/tests/test-crypto-xts.c b/tests/test-crypto-xts.c index 1f1412c45a..6fb61cf635 100644 --- a/tests/test-crypto-xts.c +++ b/tests/test-crypto-xts.c @@ -1,7 +1,7 @@ /* * QEMU Crypto XTS cipher mode * - * Copyright (c) 2015-2016 Red Hat, Inc. + * Copyright (c) 2015-2018 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -340,70 +340,161 @@ static void test_xts_aes_decrypt(const void *ctx, static void test_xts(const void *opaque) { const QCryptoXTSTestData *data = opaque; - unsigned char out[512], Torg[16], T[16]; + uint8_t out[512], Torg[16], T[16]; uint64_t seq; - int j; - unsigned long len; struct TestAES aesdata; struct TestAES aestweak; - for (j = 0; j < 2; j++) { - /* skip the cases where - * the length is smaller than 2*blocklen - * or the length is not a multiple of 32 - */ - if ((j == 1) && ((data->PTLEN < 32) || (data->PTLEN % 32))) { - continue; - } - len = data->PTLEN / 2; - - AES_set_encrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.enc); - AES_set_decrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.dec); - AES_set_encrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.enc); - AES_set_decrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.dec); - - seq = data->seqnum; - STORE64L(seq, Torg); - memset(Torg + 8, 0, 8); - - memcpy(T, Torg, sizeof(T)); - if (j == 0) { - xts_encrypt(&aesdata, &aestweak, - test_xts_aes_encrypt, - test_xts_aes_decrypt, - T, data->PTLEN, out, data->PTX); - } else { - xts_encrypt(&aesdata, &aestweak, - test_xts_aes_encrypt, - test_xts_aes_decrypt, - T, len, out, data->PTX); - xts_encrypt(&aesdata, &aestweak, - test_xts_aes_encrypt, - test_xts_aes_decrypt, - T, len, &out[len], &data->PTX[len]); - } + AES_set_encrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.enc); + AES_set_decrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.dec); + AES_set_encrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.enc); + AES_set_decrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.dec); - g_assert(memcmp(out, data->CTX, data->PTLEN) == 0); - - memcpy(T, Torg, sizeof(T)); - if (j == 0) { - xts_decrypt(&aesdata, &aestweak, - test_xts_aes_encrypt, - test_xts_aes_decrypt, - T, data->PTLEN, out, data->CTX); - } else { - xts_decrypt(&aesdata, &aestweak, - test_xts_aes_encrypt, - test_xts_aes_decrypt, - T, len, out, data->CTX); - xts_decrypt(&aesdata, &aestweak, - test_xts_aes_encrypt, - test_xts_aes_decrypt, - T, len, &out[len], &data->CTX[len]); - } + seq = data->seqnum; + STORE64L(seq, Torg); + memset(Torg + 8, 0, 8); - g_assert(memcmp(out, data->PTX, data->PTLEN) == 0); - } + memcpy(T, Torg, sizeof(T)); + xts_encrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T, data->PTLEN, out, data->PTX); + + g_assert(memcmp(out, data->CTX, data->PTLEN) == 0); + + memcpy(T, Torg, sizeof(T)); + xts_decrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T, data->PTLEN, out, data->CTX); + + g_assert(memcmp(out, data->PTX, data->PTLEN) == 0); +} + + +static void test_xts_split(const void *opaque) +{ + const QCryptoXTSTestData *data = opaque; + uint8_t out[512], Torg[16], T[16]; + uint64_t seq; + unsigned long len = data->PTLEN / 2; + struct TestAES aesdata; + struct TestAES aestweak; + + AES_set_encrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.enc); + AES_set_decrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.dec); + AES_set_encrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.enc); + AES_set_decrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.dec); + + seq = data->seqnum; + STORE64L(seq, Torg); + memset(Torg + 8, 0, 8); + + memcpy(T, Torg, sizeof(T)); + xts_encrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T, len, out, data->PTX); + xts_encrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T, len, &out[len], &data->PTX[len]); + + g_assert(memcmp(out, data->CTX, data->PTLEN) == 0); + + memcpy(T, Torg, sizeof(T)); + xts_decrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T, len, out, data->CTX); + xts_decrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T, len, &out[len], &data->CTX[len]); + + g_assert(memcmp(out, data->PTX, data->PTLEN) == 0); +} + + +static void test_xts_unaligned(const void *opaque) +{ +#define BAD_ALIGN 3 + const QCryptoXTSTestData *data = opaque; + uint8_t in[512 + BAD_ALIGN], out[512 + BAD_ALIGN]; + uint8_t Torg[16], T[16 + BAD_ALIGN]; + uint64_t seq; + struct TestAES aesdata; + struct TestAES aestweak; + + AES_set_encrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.enc); + AES_set_decrypt_key(data->key1, data->keylen / 2 * 8, &aesdata.dec); + AES_set_encrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.enc); + AES_set_decrypt_key(data->key2, data->keylen / 2 * 8, &aestweak.dec); + + seq = data->seqnum; + STORE64L(seq, Torg); + memset(Torg + 8, 0, 8); + + /* IV not aligned */ + memcpy(T + BAD_ALIGN, Torg, 16); + memcpy(in, data->PTX, data->PTLEN); + xts_encrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T + BAD_ALIGN, data->PTLEN, out, in); + + g_assert(memcmp(out, data->CTX, data->PTLEN) == 0); + + /* plain text not aligned */ + memcpy(T, Torg, 16); + memcpy(in + BAD_ALIGN, data->PTX, data->PTLEN); + xts_encrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T, data->PTLEN, out, in + BAD_ALIGN); + + g_assert(memcmp(out, data->CTX, data->PTLEN) == 0); + + /* cipher text not aligned */ + memcpy(T, Torg, 16); + memcpy(in, data->PTX, data->PTLEN); + xts_encrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T, data->PTLEN, out + BAD_ALIGN, in); + + g_assert(memcmp(out + BAD_ALIGN, data->CTX, data->PTLEN) == 0); + + + /* IV not aligned */ + memcpy(T + BAD_ALIGN, Torg, 16); + memcpy(in, data->CTX, data->PTLEN); + xts_decrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T + BAD_ALIGN, data->PTLEN, out, in); + + g_assert(memcmp(out, data->PTX, data->PTLEN) == 0); + + /* cipher text not aligned */ + memcpy(T, Torg, 16); + memcpy(in + BAD_ALIGN, data->CTX, data->PTLEN); + xts_decrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T, data->PTLEN, out, in + BAD_ALIGN); + + g_assert(memcmp(out, data->PTX, data->PTLEN) == 0); + + /* plain text not aligned */ + memcpy(T, Torg, 16); + memcpy(in, data->CTX, data->PTLEN); + xts_decrypt(&aesdata, &aestweak, + test_xts_aes_encrypt, + test_xts_aes_decrypt, + T, data->PTLEN, out + BAD_ALIGN, in); + + g_assert(memcmp(out + BAD_ALIGN, data->PTX, data->PTLEN) == 0); } @@ -416,7 +507,22 @@ int main(int argc, char **argv) g_assert(qcrypto_init(NULL) == 0); for (i = 0; i < G_N_ELEMENTS(test_data); i++) { - g_test_add_data_func(test_data[i].path, &test_data[i], test_xts); + gchar *path = g_strdup_printf("%s/basic", test_data[i].path); + g_test_add_data_func(path, &test_data[i], test_xts); + g_free(path); + + /* skip the cases where the length is smaller than 2*blocklen + * or the length is not a multiple of 32 + */ + if ((test_data[i].PTLEN >= 32) && !(test_data[i].PTLEN % 32)) { + path = g_strdup_printf("%s/split", test_data[i].path); + g_test_add_data_func(path, &test_data[i], test_xts_split); + g_free(path); + } + + path = g_strdup_printf("%s/unaligned", test_data[i].path); + g_test_add_data_func(path, &test_data[i], test_xts_unaligned); + g_free(path); } return g_test_run(); @@ -89,19 +89,19 @@ static int tpm_init_tpmdev(void *dummy, QemuOpts *opts, Error **errp) int i; if (!QLIST_EMPTY(&tpm_backends)) { - error_report("Only one TPM is allowed."); + error_setg(errp, "Only one TPM is allowed."); return 1; } id = qemu_opts_id(opts); if (id == NULL) { - error_report(QERR_MISSING_PARAMETER, "id"); + error_setg(errp, QERR_MISSING_PARAMETER, "id"); return 1; } value = qemu_opt_get(opts, "type"); if (!value) { - error_report(QERR_MISSING_PARAMETER, "type"); + error_setg(errp, QERR_MISSING_PARAMETER, "type"); tpm_display_backend_drivers(); return 1; } @@ -109,8 +109,8 @@ static int tpm_init_tpmdev(void *dummy, QemuOpts *opts, Error **errp) i = qapi_enum_parse(&TpmType_lookup, value, -1, NULL); be = i >= 0 ? tpm_be_find_by_type(i) : NULL; if (be == NULL) { - error_report(QERR_INVALID_PARAMETER_VALUE, - "type", "a TPM backend type"); + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "type", + "a TPM backend type"); tpm_display_backend_drivers(); return 1; } @@ -118,7 +118,7 @@ static int tpm_init_tpmdev(void *dummy, QemuOpts *opts, Error **errp) /* validate backend specific opts */ qemu_opts_validate(opts, be->opts, &local_err); if (local_err) { - error_report_err(local_err); + error_propagate(errp, local_err); return 1; } @@ -151,14 +151,10 @@ void tpm_cleanup(void) * Initialize the TPM. Process the tpmdev command line options describing the * TPM backend. */ -int tpm_init(void) +void tpm_init(void) { - if (qemu_opts_foreach(qemu_find_opts("tpmdev"), - tpm_init_tpmdev, NULL, NULL)) { - return -1; - } - - return 0; + qemu_opts_foreach(qemu_find_opts("tpmdev"), + tpm_init_tpmdev, NULL, &error_fatal); } /* diff --git a/ui/curses.c b/ui/curses.c index 59d819fd4d..f4e7a12f74 100644 --- a/ui/curses.c +++ b/ui/curses.c @@ -28,6 +28,7 @@ #include <termios.h> #endif +#include "qapi/error.h" #include "qemu-common.h" #include "ui/console.h" #include "ui/input.h" @@ -421,9 +422,8 @@ static void curses_keyboard_setup(void) keyboard_layout = "en-us"; #endif if(keyboard_layout) { - kbd_layout = init_keyboard_layout(name2keysym, keyboard_layout); - if (!kbd_layout) - exit(1); + kbd_layout = init_keyboard_layout(name2keysym, keyboard_layout, + &error_fatal); } } diff --git a/ui/input.c b/ui/input.c index dd7f6d7f21..7c9a4109c4 100644 --- a/ui/input.c +++ b/ui/input.c @@ -271,7 +271,7 @@ static void qemu_input_queue_process(void *opaque) item = QTAILQ_FIRST(queue); switch (item->type) { case QEMU_INPUT_QUEUE_DELAY: - timer_mod(item->timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_EXT) + timer_mod(item->timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + item->delay_ms); return; case QEMU_INPUT_QUEUE_EVENT: @@ -301,7 +301,7 @@ static void qemu_input_queue_delay(struct QemuInputEventQueueHead *queue, queue_count++; if (start_timer) { - timer_mod(item->timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_EXT) + timer_mod(item->timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + item->delay_ms); } } @@ -448,8 +448,9 @@ void qemu_input_event_send_key_delay(uint32_t delay_ms) } if (!kbd_timer) { - kbd_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_EXT, - qemu_input_queue_process, &kbd_queue); + kbd_timer = timer_new_full(NULL, QEMU_CLOCK_VIRTUAL, + SCALE_MS, QEMU_TIMER_ATTR_EXTERNAL, + qemu_input_queue_process, &kbd_queue); } if (queue_count < queue_limit) { qemu_input_queue_delay(&kbd_queue, kbd_timer, diff --git a/ui/keymaps.c b/ui/keymaps.c index 43fe604724..085889b555 100644 --- a/ui/keymaps.c +++ b/ui/keymaps.c @@ -27,6 +27,7 @@ #include "sysemu/sysemu.h" #include "trace.h" #include "qemu/error-report.h" +#include "qapi/error.h" struct keysym2code { uint32_t count; @@ -79,10 +80,11 @@ static void add_keysym(char *line, int keysym, int keycode, kbd_layout_t *k) trace_keymap_add(keysym, keycode, line); } -static kbd_layout_t *parse_keyboard_layout(const name2keysym_t *table, - const char *language, - kbd_layout_t *k) +static int parse_keyboard_layout(kbd_layout_t *k, + const name2keysym_t *table, + const char *language, Error **errp) { + int ret; FILE *f; char * filename; char line[1024]; @@ -94,13 +96,8 @@ static kbd_layout_t *parse_keyboard_layout(const name2keysym_t *table, f = filename ? fopen(filename, "r") : NULL; g_free(filename); if (!f) { - fprintf(stderr, "Could not read keymap file: '%s'\n", language); - return NULL; - } - - if (!k) { - k = g_new0(kbd_layout_t, 1); - k->hash = g_hash_table_new(NULL, NULL); + error_setg(errp, "could not read keymap file: '%s'", language); + return -1; } for(;;) { @@ -118,7 +115,10 @@ static kbd_layout_t *parse_keyboard_layout(const name2keysym_t *table, continue; } if (!strncmp(line, "include ", 8)) { - parse_keyboard_layout(table, line + 8, k); + if (parse_keyboard_layout(k, table, line + 8, errp) < 0) { + ret = -1; + goto out; + } } else { int offset = 0; while (line[offset] != 0 && @@ -164,15 +164,27 @@ static kbd_layout_t *parse_keyboard_layout(const name2keysym_t *table, } } } + + ret = 0; +out: fclose(f); - return k; + return ret; } kbd_layout_t *init_keyboard_layout(const name2keysym_t *table, - const char *language) + const char *language, Error **errp) { - return parse_keyboard_layout(table, language, NULL); + kbd_layout_t *k; + + k = g_new0(kbd_layout_t, 1); + k->hash = g_hash_table_new(NULL, NULL); + if (parse_keyboard_layout(k, table, language, errp) < 0) { + g_hash_table_unref(k->hash); + g_free(k); + return NULL; + } + return k; } diff --git a/ui/keymaps.h b/ui/keymaps.h index 0693588225..98213a4191 100644 --- a/ui/keymaps.h +++ b/ui/keymaps.h @@ -53,7 +53,7 @@ typedef struct { typedef struct kbd_layout_t kbd_layout_t; kbd_layout_t *init_keyboard_layout(const name2keysym_t *table, - const char *language); + const char *language, Error **errp); int keysym2scancode(kbd_layout_t *k, int keysym, bool shift, bool altgr, bool ctrl); int keycode_is_keypad(kbd_layout_t *k, int keycode); @@ -29,6 +29,7 @@ #include <SDL.h> #include <SDL_syswm.h> +#include "qapi/error.h" #include "qemu-common.h" #include "qemu/cutils.h" #include "ui/console.h" @@ -917,9 +918,8 @@ static void sdl1_display_init(DisplayState *ds, DisplayOptions *o) keyboard_layout = "en-us"; #endif if(keyboard_layout) { - kbd_layout = init_keyboard_layout(name2keysym, keyboard_layout); - if (!kbd_layout) - exit(1); + kbd_layout = init_keyboard_layout(name2keysym, keyboard_layout, + &error_fatal); } g_printerr("Running QEMU with SDL 1.2 is deprecated, and will be removed\n" diff --git a/ui/spice-core.c b/ui/spice-core.c index a4fbbc3898..ebaae24643 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -597,9 +597,9 @@ static int add_channel(void *opaque, const char *name, const char *value, if (strcmp(name, "tls-channel") == 0) { int *tls_port = opaque; if (!*tls_port) { - error_report("spice: tried to setup tls-channel" - " without specifying a TLS port"); - exit(1); + error_setg(errp, "spice: tried to setup tls-channel" + " without specifying a TLS port"); + return -1; } security = SPICE_CHANNEL_SECURITY_SSL; } @@ -615,8 +615,9 @@ static int add_channel(void *opaque, const char *name, const char *value, rc = spice_server_set_channel_security(spice_server, value, security); } if (rc != 0) { - error_report("spice: failed to set channel security for %s", value); - exit(1); + error_setg(errp, "spice: failed to set channel security for %s", + value); + return -1; } return 0; } @@ -787,7 +788,7 @@ void qemu_spice_init(void) spice_server_set_playback_compression (spice_server, qemu_opt_get_bool(opts, "playback-compression", 1)); - qemu_opt_foreach(opts, add_channel, &tls_port, NULL); + qemu_opt_foreach(opts, add_channel, &tls_port, &error_fatal); spice_server_set_name(spice_server, qemu_name); spice_server_set_uuid(spice_server, (unsigned char *)&qemu_uuid); @@ -3205,7 +3205,7 @@ static const DisplayChangeListenerOps dcl_ops = { .dpy_cursor_define = vnc_dpy_cursor_define, }; -void vnc_display_init(const char *id) +void vnc_display_init(const char *id, Error **errp) { VncDisplay *vd; @@ -3222,13 +3222,14 @@ void vnc_display_init(const char *id) if (keyboard_layout) { trace_vnc_key_map_init(keyboard_layout); - vd->kbd_layout = init_keyboard_layout(name2keysym, keyboard_layout); + vd->kbd_layout = init_keyboard_layout(name2keysym, + keyboard_layout, errp); } else { - vd->kbd_layout = init_keyboard_layout(name2keysym, "en-us"); + vd->kbd_layout = init_keyboard_layout(name2keysym, "en-us", errp); } if (!vd->kbd_layout) { - exit(1); + return; } vd->share_policy = VNC_SHARE_POLICY_ALLOW_EXCLUSIVE; @@ -4079,11 +4080,15 @@ int vnc_init_func(void *opaque, QemuOpts *opts, Error **errp) char *id = (char *)qemu_opts_id(opts); assert(id); - vnc_display_init(id); + vnc_display_init(id, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return -1; + } vnc_display_open(id, &local_err); if (local_err != NULL) { - error_reportf_err(local_err, "Failed to start VNC server: "); - exit(1); + error_propagate(errp, local_err); + return -1; } return 0; } diff --git a/util/error.c b/util/error.c index 3efdd69162..b5ccbd8eac 100644 --- a/util/error.c +++ b/util/error.c @@ -292,3 +292,16 @@ void error_propagate(Error **dst_errp, Error *local_err) error_free(local_err); } } + +void error_propagate_prepend(Error **dst_errp, Error *err, + const char *fmt, ...) +{ + va_list ap; + + if (dst_errp && !*dst_errp) { + va_start(ap, fmt); + error_vprepend(&err, fmt, ap); + va_end(ap); + } /* else error is being ignored, don't bother with prepending */ + error_propagate(dst_errp, err); +} diff --git a/util/qemu-error.c b/util/qemu-error.c index 4ab428f7e4..fcbe8a1f74 100644 --- a/util/qemu-error.c +++ b/util/qemu-error.c @@ -194,7 +194,6 @@ bool enable_timestamp_msg; * Format arguments like vsprintf(). The resulting message should be * a single phrase, with no newline or trailing punctuation. * Prepend the current location and append a newline. - * It's wrong to call this in a QMP monitor. Use error_setg() there. */ static void vreport(report_type type, const char *fmt, va_list ap) { @@ -242,7 +241,6 @@ void error_vreport(const char *fmt, va_list ap) * Format arguments like vsprintf(). The resulting message should be * a single phrase, with no newline or trailing punctuation. * Prepend the current location and append a newline. - * It's wrong to call this in a QMP monitor. Use error_setg() there. */ void warn_vreport(const char *fmt, va_list ap) { @@ -255,7 +253,6 @@ void warn_vreport(const char *fmt, va_list ap) * Format arguments like vsprintf(). The resulting message should be * a single phrase, with no newline or trailing punctuation. * Prepend the current location and append a newline. - * It's wrong to call this in a QMP monitor. Use error_setg() there. */ void info_vreport(const char *fmt, va_list ap) { @@ -283,7 +280,6 @@ void error_report(const char *fmt, ...) * Format arguments like sprintf(). The resulting message should be a * single phrase, with no newline or trailing punctuation. * Prepend the current location and append a newline. - * It's wrong to call this in a QMP monitor. Use error_setg() there. */ void warn_report(const char *fmt, ...) { @@ -300,7 +296,6 @@ void warn_report(const char *fmt, ...) * Format arguments like sprintf(). The resulting message should be a * single phrase, with no newline or trailing punctuation. * Prepend the current location and append a newline. - * It's wrong to call this in a QMP monitor. Use error_setg() there. */ void info_report(const char *fmt, ...) { diff --git a/util/qemu-timer.c b/util/qemu-timer.c index eb60d8f73a..1cc1b2f2c3 100644 --- a/util/qemu-timer.c +++ b/util/qemu-timer.c @@ -339,14 +339,19 @@ int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout) } -void timer_init_tl(QEMUTimer *ts, - QEMUTimerList *timer_list, int scale, - QEMUTimerCB *cb, void *opaque) +void timer_init_full(QEMUTimer *ts, + QEMUTimerListGroup *timer_list_group, QEMUClockType type, + int scale, int attributes, + QEMUTimerCB *cb, void *opaque) { - ts->timer_list = timer_list; + if (!timer_list_group) { + timer_list_group = &main_loop_tlg; + } + ts->timer_list = timer_list_group->tl[type]; ts->cb = cb; ts->opaque = opaque; ts->scale = scale; + ts->attributes = attributes; ts->expire_time = -1; } @@ -484,6 +489,7 @@ bool timerlist_run_timers(QEMUTimerList *timer_list) bool progress = false; QEMUTimerCB *cb; void *opaque; + bool need_replay_checkpoint = false; if (!atomic_read(&timer_list->active_timers)) { return false; @@ -496,12 +502,18 @@ bool timerlist_run_timers(QEMUTimerList *timer_list) switch (timer_list->clock->type) { case QEMU_CLOCK_REALTIME: - case QEMU_CLOCK_VIRTUAL_EXT: break; default: case QEMU_CLOCK_VIRTUAL: - if (!replay_checkpoint(CHECKPOINT_CLOCK_VIRTUAL)) { - goto out; + if (replay_mode != REPLAY_MODE_NONE) { + /* Checkpoint for virtual clock is redundant in cases where + * it's being triggered with only non-EXTERNAL timers, because + * these timers don't change guest state directly. + * Since it has conditional dependence on specific timers, it is + * subject to race conditions and requires special handling. + * See below. + */ + need_replay_checkpoint = true; } break; case QEMU_CLOCK_HOST: @@ -516,14 +528,39 @@ bool timerlist_run_timers(QEMUTimerList *timer_list) break; } + /* + * Extract expired timers from active timers list and and process them. + * + * In rr mode we need "filtered" checkpointing for virtual clock. The + * checkpoint must be recorded/replayed before processing any non-EXTERNAL timer, + * and that must only be done once since the clock value stays the same. Because + * non-EXTERNAL timers may appear in the timers list while it being processed, + * the checkpoint can be issued at a time until no timers are left and we are + * done". + */ current_time = qemu_clock_get_ns(timer_list->clock->type); - for(;;) { - qemu_mutex_lock(&timer_list->active_timers_lock); - ts = timer_list->active_timers; + qemu_mutex_lock(&timer_list->active_timers_lock); + while ((ts = timer_list->active_timers)) { if (!timer_expired_ns(ts, current_time)) { - qemu_mutex_unlock(&timer_list->active_timers_lock); + /* No expired timers left. The checkpoint can be skipped + * if no timers fired or they were all external. + */ break; } + if (need_replay_checkpoint + && !(ts->attributes & QEMU_TIMER_ATTR_EXTERNAL)) { + /* once we got here, checkpoint clock only once */ + need_replay_checkpoint = false; + qemu_mutex_unlock(&timer_list->active_timers_lock); + if (!replay_checkpoint(CHECKPOINT_CLOCK_VIRTUAL)) { + goto out; + } + qemu_mutex_lock(&timer_list->active_timers_lock); + /* The lock was released; start over again in case the list was + * modified. + */ + continue; + } /* remove timer from the list before calling the callback */ timer_list->active_timers = ts->next; @@ -531,12 +568,15 @@ bool timerlist_run_timers(QEMUTimerList *timer_list) ts->expire_time = -1; cb = ts->cb; opaque = ts->opaque; - qemu_mutex_unlock(&timer_list->active_timers_lock); /* run the callback (the timer list can be modified) */ + qemu_mutex_unlock(&timer_list->active_timers_lock); cb(opaque); + qemu_mutex_lock(&timer_list->active_timers_lock); + progress = true; } + qemu_mutex_unlock(&timer_list->active_timers_lock); out: qemu_event_set(&timer_list->timers_done_ev); @@ -598,7 +638,6 @@ int64_t qemu_clock_get_ns(QEMUClockType type) return get_clock(); default: case QEMU_CLOCK_VIRTUAL: - case QEMU_CLOCK_VIRTUAL_EXT: if (use_icount) { return cpu_get_icount(); } else { @@ -147,8 +147,15 @@ bool enable_cpu_pm = false; int nb_nics; NICInfo nd_table[MAX_NICS]; int autostart; -static int rtc_utc = 1; -static int rtc_date_offset = -1; /* -1 means no change */ +static enum { + RTC_BASE_UTC, + RTC_BASE_LOCALTIME, + RTC_BASE_DATETIME, +} rtc_base_type = RTC_BASE_UTC; +static time_t rtc_ref_start_datetime; +static int rtc_realtime_clock_offset; /* used only with QEMU_CLOCK_REALTIME */ +static int rtc_host_datetime_offset = -1; /* valid & used only with + RTC_BASE_DATETIME */ QEMUClockType rtc_clock; int vga_interface_type = VGA_NONE; static DisplayOptions dpy; @@ -242,6 +249,7 @@ static struct { static QemuOptsList qemu_rtc_opts = { .name = "rtc", .head = QTAILQ_HEAD_INITIALIZER(qemu_rtc_opts.head), + .merge_lists = true, .desc = { { .name = "base", @@ -780,28 +788,42 @@ void qemu_system_vmstop_request(RunState state) } /***********************************************************/ -/* real time host monotonic timer */ - -static time_t qemu_time(void) +/* RTC reference time/date access */ +static time_t qemu_ref_timedate(QEMUClockType clock) { - return qemu_clock_get_ms(QEMU_CLOCK_HOST) / 1000; + time_t value = qemu_clock_get_ms(clock) / 1000; + switch (clock) { + case QEMU_CLOCK_REALTIME: + value -= rtc_realtime_clock_offset; + /* no break */ + case QEMU_CLOCK_VIRTUAL: + value += rtc_ref_start_datetime; + break; + case QEMU_CLOCK_HOST: + if (rtc_base_type == RTC_BASE_DATETIME) { + value -= rtc_host_datetime_offset; + } + break; + default: + assert(0); + } + return value; } -/***********************************************************/ -/* host time/date access */ void qemu_get_timedate(struct tm *tm, int offset) { - time_t ti = qemu_time(); + time_t ti = qemu_ref_timedate(rtc_clock); ti += offset; - if (rtc_date_offset == -1) { - if (rtc_utc) - gmtime_r(&ti, tm); - else - localtime_r(&ti, tm); - } else { - ti -= rtc_date_offset; + + switch (rtc_base_type) { + case RTC_BASE_DATETIME: + case RTC_BASE_UTC: gmtime_r(&ti, tm); + break; + case RTC_BASE_LOCALTIME: + localtime_r(&ti, tm); + break; } } @@ -809,23 +831,28 @@ int qemu_timedate_diff(struct tm *tm) { time_t seconds; - if (rtc_date_offset == -1) - if (rtc_utc) - seconds = mktimegm(tm); - else { - struct tm tmp = *tm; - tmp.tm_isdst = -1; /* use timezone to figure it out */ - seconds = mktime(&tmp); - } - else - seconds = mktimegm(tm) + rtc_date_offset; - - return seconds - qemu_time(); + switch (rtc_base_type) { + case RTC_BASE_DATETIME: + case RTC_BASE_UTC: + seconds = mktimegm(tm); + break; + case RTC_BASE_LOCALTIME: + { + struct tm tmp = *tm; + tmp.tm_isdst = -1; /* use timezone to figure it out */ + seconds = mktime(&tmp); + break; + } + default: + abort(); + } + + return seconds - qemu_ref_timedate(QEMU_CLOCK_HOST); } -static void configure_rtc_date_offset(const char *startdate) +static void configure_rtc_base_datetime(const char *startdate) { - time_t rtc_start_date; + time_t rtc_start_datetime; struct tm tm; if (sscanf(startdate, "%d-%d-%dT%d:%d:%d", &tm.tm_year, &tm.tm_mon, @@ -841,33 +868,40 @@ static void configure_rtc_date_offset(const char *startdate) } tm.tm_year -= 1900; tm.tm_mon--; - rtc_start_date = mktimegm(&tm); - if (rtc_start_date == -1) { + rtc_start_datetime = mktimegm(&tm); + if (rtc_start_datetime == -1) { date_fail: - error_report("invalid date format"); + error_report("invalid datetime format"); error_printf("valid formats: " "'2006-06-17T16:01:21' or '2006-06-17'\n"); exit(1); } - rtc_date_offset = qemu_time() - rtc_start_date; + rtc_host_datetime_offset = rtc_ref_start_datetime - rtc_start_datetime; + rtc_ref_start_datetime = rtc_start_datetime; } static void configure_rtc(QemuOpts *opts) { const char *value; + /* Set defaults */ + rtc_clock = QEMU_CLOCK_HOST; + rtc_ref_start_datetime = qemu_clock_get_ms(QEMU_CLOCK_HOST) / 1000; + rtc_realtime_clock_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; + value = qemu_opt_get(opts, "base"); if (value) { if (!strcmp(value, "utc")) { - rtc_utc = 1; + rtc_base_type = RTC_BASE_UTC; } else if (!strcmp(value, "localtime")) { Error *blocker = NULL; - rtc_utc = 0; + rtc_base_type = RTC_BASE_LOCALTIME; error_setg(&blocker, QERR_REPLAY_NOT_SUPPORTED, "-rtc base=localtime"); replay_add_blocker(blocker); } else { - configure_rtc_date_offset(value); + rtc_base_type = RTC_BASE_DATETIME; + configure_rtc_base_datetime(value); } } value = qemu_opt_get(opts, "clock"); @@ -1059,12 +1093,12 @@ static int parse_add_fd(void *opaque, QemuOpts *opts, Error **errp) fd_opaque = qemu_opt_get(opts, "opaque"); if (fd < 0) { - error_report("fd option is required and must be non-negative"); + error_setg(errp, "fd option is required and must be non-negative"); return -1; } if (fd <= STDERR_FILENO) { - error_report("fd cannot be a standard I/O stream"); + error_setg(errp, "fd cannot be a standard I/O stream"); return -1; } @@ -1074,12 +1108,12 @@ static int parse_add_fd(void *opaque, QemuOpts *opts, Error **errp) */ flags = fcntl(fd, F_GETFD); if (flags == -1 || (flags & FD_CLOEXEC)) { - error_report("fd is not valid or already in use"); + error_setg(errp, "fd is not valid or already in use"); return -1; } if (fdset_id < 0) { - error_report("set option is required and must be non-negative"); + error_setg(errp, "set option is required and must be non-negative"); return -1; } @@ -1092,7 +1126,7 @@ static int parse_add_fd(void *opaque, QemuOpts *opts, Error **errp) } #endif if (dupfd == -1) { - error_report("error duplicating fd: %s", strerror(errno)); + error_setg(errp, "error duplicating fd: %s", strerror(errno)); return -1; } @@ -1129,7 +1163,7 @@ static int drive_init_func(void *opaque, QemuOpts *opts, Error **errp) { BlockInterfaceType *block_default_type = opaque; - return drive_new(opts, *block_default_type) == NULL; + return drive_new(opts, *block_default_type, errp) == NULL; } static int drive_enable_snapshot(void *opaque, QemuOpts *opts, Error **errp) @@ -1155,10 +1189,7 @@ static void default_drive(int enable, int snapshot, BlockInterfaceType type, drive_enable_snapshot(NULL, opts, NULL); } - dinfo = drive_new(opts, type); - if (!dinfo) { - exit(1); - } + dinfo = drive_new(opts, type, &error_abort); dinfo->is_default = true; } @@ -2002,15 +2033,10 @@ static void select_vgahw(const char *p) static void parse_display_qapi(const char *optarg) { - Error *err = NULL; DisplayOptions *opts; Visitor *v; - v = qobject_input_visitor_new_str(optarg, "type", &err); - if (!v) { - error_report_err(err); - exit(1); - } + v = qobject_input_visitor_new_str(optarg, "type", &error_fatal); visit_type_DisplayOptions(v, NULL, &opts, &error_fatal); QAPI_CLONE_MEMBERS(DisplayOptions, &dpy, opts); @@ -2179,7 +2205,7 @@ static int parse_fw_cfg(void *opaque, QemuOpts *opts, Error **errp) FWCfgState *fw_cfg = (FWCfgState *) opaque; if (fw_cfg == NULL) { - error_report("fw_cfg device not available"); + error_setg(errp, "fw_cfg device not available"); return -1; } name = qemu_opt_get(opts, "name"); @@ -2188,15 +2214,16 @@ static int parse_fw_cfg(void *opaque, QemuOpts *opts, Error **errp) /* we need name and either a file or the content string */ if (!(nonempty_str(name) && (nonempty_str(file) || nonempty_str(str)))) { - error_report("invalid argument(s)"); + error_setg(errp, "invalid argument(s)"); return -1; } if (nonempty_str(file) && nonempty_str(str)) { - error_report("file and string are mutually exclusive"); + error_setg(errp, "file and string are mutually exclusive"); return -1; } if (strlen(name) > FW_CFG_MAX_FILE_PATH - 1) { - error_report("name too long (max. %d char)", FW_CFG_MAX_FILE_PATH - 1); + error_setg(errp, "name too long (max. %d char)", + FW_CFG_MAX_FILE_PATH - 1); return -1; } if (strncmp(name, "opt/", 4) != 0) { @@ -2208,7 +2235,7 @@ static int parse_fw_cfg(void *opaque, QemuOpts *opts, Error **errp) buf = g_memdup(str, size); } else { if (!g_file_get_contents(file, &buf, &size, NULL)) { - error_report("can't load %s", file); + error_setg(errp, "can't load %s", file); return -1; } } @@ -2226,12 +2253,10 @@ static int device_help_func(void *opaque, QemuOpts *opts, Error **errp) static int device_init_func(void *opaque, QemuOpts *opts, Error **errp) { - Error *err = NULL; DeviceState *dev; - dev = qdev_device_add(opts, &err); + dev = qdev_device_add(opts, errp); if (!dev) { - error_report_err(err); return -1; } object_unref(OBJECT(dev)); @@ -2244,7 +2269,7 @@ static int chardev_init_func(void *opaque, QemuOpts *opts, Error **errp) if (!qemu_chr_new_from_opts(opts, &local_err)) { if (local_err) { - error_report_err(local_err); + error_propagate(errp, local_err); return -1; } exit(0); @@ -2255,7 +2280,7 @@ static int chardev_init_func(void *opaque, QemuOpts *opts, Error **errp) #ifdef CONFIG_VIRTFS static int fsdev_init_func(void *opaque, QemuOpts *opts, Error **errp) { - return qemu_fsdev_add(opts); + return qemu_fsdev_add(opts, errp); } #endif @@ -2275,8 +2300,8 @@ static int mon_init_func(void *opaque, QemuOpts *opts, Error **errp) } else if (strcmp(mode, "control") == 0) { flags = MONITOR_USE_CONTROL; } else { - error_report("unknown monitor mode \"%s\"", mode); - exit(1); + error_setg(errp, "unknown monitor mode \"%s\"", mode); + return -1; } if (qemu_opt_get_bool(opts, "pretty", 0)) @@ -2290,8 +2315,8 @@ static int mon_init_func(void *opaque, QemuOpts *opts, Error **errp) chardev = qemu_opt_get(opts, "chardev"); chr = qemu_chr_find(chardev); if (chr == NULL) { - error_report("chardev \"%s\" not found", chardev); - exit(1); + error_setg(errp, "chardev \"%s\" not found", chardev); + return -1; } monitor_init(chr, flags); @@ -2466,6 +2491,7 @@ static int debugcon_parse(const char *devname) QemuOpts *opts; if (!qemu_chr_new_mux_mon("debugcon", devname)) { + error_report("invalid character backend '%s'", devname); exit(1); } opts = qemu_opts_create(qemu_find_opts("device"), "debugcon", 1, NULL); @@ -2681,7 +2707,7 @@ static int machine_set_property(void *opaque, g_free(qom_name); if (local_err) { - error_report_err(local_err); + error_propagate(errp, local_err); return -1; } @@ -3017,7 +3043,6 @@ int main(int argc, char **argv, char **envp) error_reportf_err(err, "cannot initialize crypto: "); exit(1); } - rtc_clock = QEMU_CLOCK_HOST; QLIST_INIT (&vm_change_state_head); os_setup_early_signal_handling(); @@ -3737,7 +3762,6 @@ int main(int argc, char **argv, char **envp) if (!opts) { exit(1); } - configure_rtc(opts); break; case QEMU_OPTION_tb_size: #ifndef CONFIG_TCG @@ -3955,6 +3979,8 @@ int main(int argc, char **argv, char **envp) exit(EXIT_FAILURE); } + configure_rtc(qemu_find_opts_singleton("rtc")); + machine_class = select_machine(); set_memory_options(&ram_slots, &maxram_size, machine_class); @@ -3977,26 +4003,20 @@ int main(int argc, char **argv, char **envp) #ifdef CONFIG_SECCOMP olist = qemu_find_opts_err("sandbox", NULL); - if (olist && qemu_opts_foreach(olist, parse_sandbox, NULL, NULL)) { - exit(1); + if (olist) { + qemu_opts_foreach(olist, parse_sandbox, NULL, &error_fatal); } #endif - if (qemu_opts_foreach(qemu_find_opts("name"), - parse_name, NULL, NULL)) { - exit(1); - } + qemu_opts_foreach(qemu_find_opts("name"), + parse_name, NULL, &error_fatal); #ifndef _WIN32 - if (qemu_opts_foreach(qemu_find_opts("add-fd"), - parse_add_fd, NULL, NULL)) { - exit(1); - } + qemu_opts_foreach(qemu_find_opts("add-fd"), + parse_add_fd, NULL, &error_fatal); - if (qemu_opts_foreach(qemu_find_opts("add-fd"), - cleanup_add_fd, NULL, NULL)) { - exit(1); - } + qemu_opts_foreach(qemu_find_opts("add-fd"), + cleanup_add_fd, NULL, &error_fatal); #endif current_machine = MACHINE(object_new(object_class_get_name( @@ -4237,22 +4257,16 @@ int main(int argc, char **argv, char **envp) page_size_init(); socket_init(); - if (qemu_opts_foreach(qemu_find_opts("object"), - user_creatable_add_opts_foreach, - object_create_initial, NULL)) { - exit(1); - } + qemu_opts_foreach(qemu_find_opts("object"), + user_creatable_add_opts_foreach, + object_create_initial, &error_fatal); - if (qemu_opts_foreach(qemu_find_opts("chardev"), - chardev_init_func, NULL, NULL)) { - exit(1); - } + qemu_opts_foreach(qemu_find_opts("chardev"), + chardev_init_func, NULL, &error_fatal); #ifdef CONFIG_VIRTFS - if (qemu_opts_foreach(qemu_find_opts("fsdev"), - fsdev_init_func, NULL, NULL)) { - exit(1); - } + qemu_opts_foreach(qemu_find_opts("fsdev"), + fsdev_init_func, NULL, &error_fatal); #endif if (qemu_opts_foreach(qemu_find_opts("device"), @@ -4261,11 +4275,8 @@ int main(int argc, char **argv, char **envp) } machine_opts = qemu_get_machine_opts(); - if (qemu_opt_foreach(machine_opts, machine_set_property, current_machine, - NULL)) { - object_unref(OBJECT(current_machine)); - exit(1); - } + qemu_opt_foreach(machine_opts, machine_set_property, current_machine, + &error_fatal); configure_accelerator(current_machine); @@ -4365,22 +4376,16 @@ int main(int argc, char **argv, char **envp) #endif } - colo_info_init(); - if (net_init_clients(&err) < 0) { error_report_err(err); exit(1); } - if (qemu_opts_foreach(qemu_find_opts("object"), - user_creatable_add_opts_foreach, - object_create_delayed, NULL)) { - exit(1); - } + qemu_opts_foreach(qemu_find_opts("object"), + user_creatable_add_opts_foreach, + object_create_delayed, &error_fatal); - if (tpm_init() < 0) { - exit(1); - } + tpm_init(); /* init the bluetooth world */ if (foreach_device_config(DEV_BT, bt_parse)) @@ -4421,8 +4426,9 @@ int main(int argc, char **argv, char **envp) NULL, NULL); } if (qemu_opts_foreach(qemu_find_opts("drive"), drive_init_func, - &machine_class->block_default_type, NULL)) { - exit(1); + &machine_class->block_default_type, &error_fatal)) { + /* We printed help */ + exit(0); } default_drive(default_cdrom, snapshot, machine_class->block_default_type, 2, @@ -4430,10 +4436,8 @@ int main(int argc, char **argv, char **envp) default_drive(default_floppy, snapshot, IF_FLOPPY, 0, FD_OPTS); default_drive(default_sdcard, snapshot, IF_SD, 0, SD_OPTS); - if (qemu_opts_foreach(qemu_find_opts("mon"), - mon_init_func, NULL, NULL)) { - exit(1); - } + qemu_opts_foreach(qemu_find_opts("mon"), + mon_init_func, NULL, &error_fatal); if (foreach_device_config(DEV_SERIAL, serial_parse) < 0) exit(1); @@ -4496,10 +4500,8 @@ int main(int argc, char **argv, char **envp) hax_sync_vcpus(); } - if (qemu_opts_foreach(qemu_find_opts("fw_cfg"), - parse_fw_cfg, fw_cfg_find(), NULL) != 0) { - exit(1); - } + qemu_opts_foreach(qemu_find_opts("fw_cfg"), + parse_fw_cfg, fw_cfg_find(), &error_fatal); /* init USB devices */ if (machine_usb(current_machine)) { @@ -4512,10 +4514,8 @@ int main(int argc, char **argv, char **envp) /* init generic devices */ rom_set_order_override(FW_CFG_ORDER_OVERRIDE_DEVICE); - if (qemu_opts_foreach(qemu_find_opts("device"), - device_init_func, NULL, NULL)) { - exit(1); - } + qemu_opts_foreach(qemu_find_opts("device"), + device_init_func, NULL, &error_fatal); cpu_synchronize_all_post_init(); @@ -4551,7 +4551,7 @@ int main(int argc, char **argv, char **envp) /* init remote displays */ #ifdef CONFIG_VNC qemu_opts_foreach(qemu_find_opts("vnc"), - vnc_init_func, NULL, NULL); + vnc_init_func, NULL, &error_fatal); #endif if (using_spice) { |