diff options
author | Andreas Färber | 2012-10-31 06:57:49 +0100 |
---|---|---|
committer | Andreas Färber | 2012-12-19 14:09:31 +0100 |
commit | 20d695a9254c1b086a456d3b79a3c311236643ba (patch) | |
tree | 085c1d82173a98f6192bca36ee4ae2c8be3cd611 | |
parent | cpu: Move kvm_fd into CPUState (diff) | |
download | qemu-20d695a9254c1b086a456d3b79a3c311236643ba.tar.gz qemu-20d695a9254c1b086a456d3b79a3c311236643ba.tar.xz qemu-20d695a9254c1b086a456d3b79a3c311236643ba.zip |
kvm: Pass CPUState to kvm_arch_*
Move kvm_vcpu_dirty field into CPUState to simplify things and change
its type to bool while at it.
Signed-off-by: Andreas Färber <afaerber@suse.de>
-rw-r--r-- | cpu-defs.h | 3 | ||||
-rw-r--r-- | include/qemu/cpu.h | 1 | ||||
-rw-r--r-- | kvm-all.c | 75 | ||||
-rw-r--r-- | kvm.h | 26 | ||||
-rw-r--r-- | target-i386/kvm.c | 59 | ||||
-rw-r--r-- | target-ppc/kvm.c | 36 | ||||
-rw-r--r-- | target-s390x/kvm.c | 38 |
7 files changed, 146 insertions, 92 deletions
diff --git a/cpu-defs.h b/cpu-defs.h index 6373a808f9..a382e35248 100644 --- a/cpu-defs.h +++ b/cpu-defs.h @@ -206,7 +206,6 @@ typedef struct CPUWatchpoint { \ const char *cpu_model_str; \ struct KVMState *kvm_state; \ - struct kvm_run *kvm_run; \ - int kvm_vcpu_dirty; + struct kvm_run *kvm_run; #endif diff --git a/include/qemu/cpu.h b/include/qemu/cpu.h index 6049a20a4d..b8f8dd1d13 100644 --- a/include/qemu/cpu.h +++ b/include/qemu/cpu.h @@ -80,6 +80,7 @@ struct CPUState { #if !defined(CONFIG_USER_ONLY) int kvm_fd; + bool kvm_vcpu_dirty; #endif /* TODO Move common fields from CPUArchState here. */ @@ -209,9 +209,9 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) static void kvm_reset_vcpu(void *opaque) { - CPUArchState *env = opaque; + CPUState *cpu = opaque; - kvm_arch_reset_vcpu(env); + kvm_arch_reset_vcpu(cpu); } int kvm_init_vcpu(CPUArchState *env) @@ -231,7 +231,7 @@ int kvm_init_vcpu(CPUArchState *env) cpu->kvm_fd = ret; env->kvm_state = s; - env->kvm_vcpu_dirty = 1; + cpu->kvm_vcpu_dirty = true; mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { @@ -253,10 +253,10 @@ int kvm_init_vcpu(CPUArchState *env) (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE; } - ret = kvm_arch_init_vcpu(env); + ret = kvm_arch_init_vcpu(cpu); if (ret == 0) { - qemu_register_reset(kvm_reset_vcpu, env); - kvm_arch_reset_vcpu(env); + qemu_register_reset(kvm_reset_vcpu, cpu); + kvm_arch_reset_vcpu(cpu); } err: return ret; @@ -1438,6 +1438,8 @@ static void kvm_handle_io(uint16_t port, void *data, int direction, int size, static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run) { + CPUState *cpu = ENV_GET_CPU(env); + fprintf(stderr, "KVM internal error."); if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) { int i; @@ -1452,7 +1454,7 @@ static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run) } if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { fprintf(stderr, "emulation failure\n"); - if (!kvm_arch_stop_on_emulation_error(env)) { + if (!kvm_arch_stop_on_emulation_error(cpu)) { cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE); return EXCP_INTERRUPT; } @@ -1489,13 +1491,13 @@ void kvm_flush_coalesced_mmio_buffer(void) s->coalesced_flush_in_progress = false; } -static void do_kvm_cpu_synchronize_state(void *_env) +static void do_kvm_cpu_synchronize_state(void *arg) { - CPUArchState *env = _env; + CPUState *cpu = arg; - if (!env->kvm_vcpu_dirty) { - kvm_arch_get_registers(env); - env->kvm_vcpu_dirty = 1; + if (!cpu->kvm_vcpu_dirty) { + kvm_arch_get_registers(cpu); + cpu->kvm_vcpu_dirty = true; } } @@ -1503,42 +1505,47 @@ void kvm_cpu_synchronize_state(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); - if (!env->kvm_vcpu_dirty) { - run_on_cpu(cpu, do_kvm_cpu_synchronize_state, env); + if (!cpu->kvm_vcpu_dirty) { + run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu); } } void kvm_cpu_synchronize_post_reset(CPUArchState *env) { - kvm_arch_put_registers(env, KVM_PUT_RESET_STATE); - env->kvm_vcpu_dirty = 0; + CPUState *cpu = ENV_GET_CPU(env); + + kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); + cpu->kvm_vcpu_dirty = false; } void kvm_cpu_synchronize_post_init(CPUArchState *env) { - kvm_arch_put_registers(env, KVM_PUT_FULL_STATE); - env->kvm_vcpu_dirty = 0; + CPUState *cpu = ENV_GET_CPU(env); + + kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + cpu->kvm_vcpu_dirty = false; } int kvm_cpu_exec(CPUArchState *env) { + CPUState *cpu = ENV_GET_CPU(env); struct kvm_run *run = env->kvm_run; int ret, run_ret; DPRINTF("kvm_cpu_exec()\n"); - if (kvm_arch_process_async_events(env)) { + if (kvm_arch_process_async_events(cpu)) { env->exit_request = 0; return EXCP_HLT; } do { - if (env->kvm_vcpu_dirty) { - kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE); - env->kvm_vcpu_dirty = 0; + if (cpu->kvm_vcpu_dirty) { + kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); + cpu->kvm_vcpu_dirty = false; } - kvm_arch_pre_run(env, run); + kvm_arch_pre_run(cpu, run); if (env->exit_request) { DPRINTF("interrupt exit requested\n"); /* @@ -1553,7 +1560,7 @@ int kvm_cpu_exec(CPUArchState *env) run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0); qemu_mutex_lock_iothread(); - kvm_arch_post_run(env, run); + kvm_arch_post_run(cpu, run); if (run_ret < 0) { if (run_ret == -EINTR || run_ret == -EAGAIN) { @@ -1603,7 +1610,7 @@ int kvm_cpu_exec(CPUArchState *env) break; default: DPRINTF("kvm_arch_handle_exit\n"); - ret = kvm_arch_handle_exit(env, run); + ret = kvm_arch_handle_exit(cpu, run); break; } } while (ret == 0); @@ -1799,7 +1806,7 @@ int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap) if (env->singlestep_enabled) { data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; } - kvm_arch_update_guest_debug(env, &data.dbg); + kvm_arch_update_guest_debug(cpu, &data.dbg); data.env = env; run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data); @@ -1809,6 +1816,7 @@ int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap) int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, target_ulong len, int type) { + CPUState *current_cpu = ENV_GET_CPU(current_env); struct kvm_sw_breakpoint *bp; CPUArchState *env; int err; @@ -1827,7 +1835,7 @@ int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, bp->pc = addr; bp->use_count = 1; - err = kvm_arch_insert_sw_breakpoint(current_env, bp); + err = kvm_arch_insert_sw_breakpoint(current_cpu, bp); if (err) { g_free(bp); return err; @@ -1854,6 +1862,7 @@ int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, target_ulong len, int type) { + CPUState *current_cpu = ENV_GET_CPU(current_env); struct kvm_sw_breakpoint *bp; CPUArchState *env; int err; @@ -1869,7 +1878,7 @@ int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, return 0; } - err = kvm_arch_remove_sw_breakpoint(current_env, bp); + err = kvm_arch_remove_sw_breakpoint(current_cpu, bp); if (err) { return err; } @@ -1894,15 +1903,18 @@ int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, void kvm_remove_all_breakpoints(CPUArchState *current_env) { + CPUState *current_cpu = ENV_GET_CPU(current_env); struct kvm_sw_breakpoint *bp, *next; KVMState *s = current_env->kvm_state; CPUArchState *env; + CPUState *cpu; QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { - if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) { + if (kvm_arch_remove_sw_breakpoint(current_cpu, bp) != 0) { /* Try harder to find a CPU that currently sees the breakpoint. */ for (env = first_cpu; env != NULL; env = env->next_cpu) { - if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) { + cpu = ENV_GET_CPU(env); + if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) { break; } } @@ -2014,7 +2026,8 @@ int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign) int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr) { - return kvm_arch_on_sigbus_vcpu(env, code, addr); + CPUState *cpu = ENV_GET_CPU(env); + return kvm_arch_on_sigbus_vcpu(cpu, code, addr); } int kvm_on_sigbus(int code, void *addr) @@ -158,14 +158,14 @@ int kvm_vcpu_ioctl(CPUArchState *env, int type, ...); extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; -void kvm_arch_pre_run(CPUArchState *env, struct kvm_run *run); -void kvm_arch_post_run(CPUArchState *env, struct kvm_run *run); +void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run); +void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run); -int kvm_arch_handle_exit(CPUArchState *env, struct kvm_run *run); +int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run); -int kvm_arch_process_async_events(CPUArchState *env); +int kvm_arch_process_async_events(CPUState *cpu); -int kvm_arch_get_registers(CPUArchState *env); +int kvm_arch_get_registers(CPUState *cpu); /* state subset only touched by the VCPU itself during runtime */ #define KVM_PUT_RUNTIME_STATE 1 @@ -174,15 +174,15 @@ int kvm_arch_get_registers(CPUArchState *env); /* full state set, modified during initialization or on vmload */ #define KVM_PUT_FULL_STATE 3 -int kvm_arch_put_registers(CPUArchState *env, int level); +int kvm_arch_put_registers(CPUState *cpu, int level); int kvm_arch_init(KVMState *s); -int kvm_arch_init_vcpu(CPUArchState *env); +int kvm_arch_init_vcpu(CPUState *cpu); -void kvm_arch_reset_vcpu(CPUArchState *env); +void kvm_arch_reset_vcpu(CPUState *cpu); -int kvm_arch_on_sigbus_vcpu(CPUArchState *env, int code, void *addr); +int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); int kvm_arch_on_sigbus(int code, void *addr); void kvm_arch_init_irq_routing(KVMState *s); @@ -212,9 +212,9 @@ struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env, int kvm_sw_breakpoints_active(CPUArchState *env); -int kvm_arch_insert_sw_breakpoint(CPUArchState *current_env, +int kvm_arch_insert_sw_breakpoint(CPUState *current_cpu, struct kvm_sw_breakpoint *bp); -int kvm_arch_remove_sw_breakpoint(CPUArchState *current_env, +int kvm_arch_remove_sw_breakpoint(CPUState *current_cpu, struct kvm_sw_breakpoint *bp); int kvm_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type); @@ -222,9 +222,9 @@ int kvm_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type); void kvm_arch_remove_all_hw_breakpoints(void); -void kvm_arch_update_guest_debug(CPUArchState *env, struct kvm_guest_debug *dbg); +void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg); -bool kvm_arch_stop_on_emulation_error(CPUArchState *env); +bool kvm_arch_stop_on_emulation_error(CPUState *cpu); int kvm_check_extension(KVMState *s, unsigned int extension); diff --git a/target-i386/kvm.c b/target-i386/kvm.c index f669281e13..80cacf3764 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -306,9 +306,10 @@ static void hardware_memory_error(void) exit(1); } -int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr) +int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) { - X86CPU *cpu = x86_env_get_cpu(env); + X86CPU *cpu = X86_CPU(c); + CPUX86State *env = &cpu->env; ram_addr_t ram_addr; hwaddr paddr; @@ -406,12 +407,14 @@ static void cpu_update_state(void *opaque, int running, RunState state) } } -int kvm_arch_init_vcpu(CPUX86State *env) +int kvm_arch_init_vcpu(CPUState *cs) { struct { struct kvm_cpuid2 cpuid; struct kvm_cpuid_entry2 entries[100]; } QEMU_PACKED cpuid_data; + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; uint32_t limit, i, j, cpuid_i; uint32_t unused; struct kvm_cpuid_entry2 *c; @@ -623,9 +626,10 @@ int kvm_arch_init_vcpu(CPUX86State *env) return 0; } -void kvm_arch_reset_vcpu(CPUX86State *env) +void kvm_arch_reset_vcpu(CPUState *cs) { - X86CPU *cpu = x86_env_get_cpu(env); + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; env->exception_injected = -1; env->interrupt_injected = -1; @@ -1582,9 +1586,10 @@ static int kvm_get_debugregs(CPUX86State *env) return 0; } -int kvm_arch_put_registers(CPUX86State *env, int level) +int kvm_arch_put_registers(CPUState *cpu, int level) { - CPUState *cpu = ENV_GET_CPU(env); + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; int ret; assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); @@ -1640,12 +1645,13 @@ int kvm_arch_put_registers(CPUX86State *env, int level) return 0; } -int kvm_arch_get_registers(CPUX86State *env) +int kvm_arch_get_registers(CPUState *cs) { - X86CPU *cpu = x86_env_get_cpu(env); + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; int ret; - assert(cpu_is_stopped(CPU(cpu)) || qemu_cpu_is_self(CPU(cpu))); + assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs)); ret = kvm_getput_regs(env, 0); if (ret < 0) { @@ -1686,8 +1692,10 @@ int kvm_arch_get_registers(CPUX86State *env) return 0; } -void kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run) +void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) { + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; int ret; /* Inject NMI */ @@ -1746,8 +1754,11 @@ void kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run) } } -void kvm_arch_post_run(CPUX86State *env, struct kvm_run *run) +void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) { + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + if (run->if_flag) { env->eflags |= IF_MASK; } else { @@ -1757,9 +1768,10 @@ void kvm_arch_post_run(CPUX86State *env, struct kvm_run *run) cpu_set_apic_base(env->apic_state, run->apic_base); } -int kvm_arch_process_async_events(CPUX86State *env) +int kvm_arch_process_async_events(CPUState *cs) { - X86CPU *cpu = x86_env_get_cpu(env); + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; if (env->interrupt_request & CPU_INTERRUPT_MCE) { /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */ @@ -1839,8 +1851,9 @@ static int kvm_handle_tpr_access(CPUX86State *env) return 1; } -int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp) +int kvm_arch_insert_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp) { + CPUX86State *env = &X86_CPU(cpu)->env; static const uint8_t int3 = 0xcc; if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || @@ -1850,8 +1863,9 @@ int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp return 0; } -int kvm_arch_remove_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp) +int kvm_arch_remove_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp) { + CPUX86State *env = &X86_CPU(cpu)->env; uint8_t int3; if (cpu_memory_rw_debug(env, bp->pc, &int3, 1, 0) || int3 != 0xcc || @@ -1994,8 +2008,9 @@ static int kvm_handle_debug(CPUX86State *env, return ret; } -void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg) +void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) { + CPUX86State *env = &X86_CPU(cpu)->env; const uint8_t type_code[] = { [GDB_BREAKPOINT_HW] = 0x0, [GDB_WATCHPOINT_WRITE] = 0x1, @@ -2031,9 +2046,10 @@ static bool host_supports_vmx(void) #define VMX_INVALID_GUEST_STATE 0x80000021 -int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run) +int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { - X86CPU *cpu = x86_env_get_cpu(env); + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; uint64_t code; int ret; @@ -2083,8 +2099,11 @@ int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run) return ret; } -bool kvm_arch_stop_on_emulation_error(CPUX86State *env) +bool kvm_arch_stop_on_emulation_error(CPUState *cs) { + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + kvm_cpu_synchronize_state(env); return !(env->cr[0] & CR0_PE_MASK) || ((env->segs[R_CS].selector & 3) != 3); diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c index 3f5df5772f..8a59b706da 100644 --- a/target-ppc/kvm.c +++ b/target-ppc/kvm.c @@ -375,9 +375,10 @@ static inline void kvm_fixup_page_sizes(CPUPPCState *env) #endif /* !defined (TARGET_PPC64) */ -int kvm_arch_init_vcpu(CPUPPCState *cenv) +int kvm_arch_init_vcpu(CPUState *cs) { - PowerPCCPU *cpu = ppc_env_get_cpu(cenv); + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *cenv = &cpu->env; int ret; /* Gather server mmu info from KVM and update the CPU state */ @@ -403,7 +404,7 @@ int kvm_arch_init_vcpu(CPUPPCState *cenv) return ret; } -void kvm_arch_reset_vcpu(CPUPPCState *env) +void kvm_arch_reset_vcpu(CPUState *cpu) { } @@ -432,8 +433,10 @@ static void kvm_sw_tlb_put(CPUPPCState *env) g_free(bitmap); } -int kvm_arch_put_registers(CPUPPCState *env, int level) +int kvm_arch_put_registers(CPUState *cs, int level) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; struct kvm_regs regs; int ret; int i; @@ -525,8 +528,10 @@ int kvm_arch_put_registers(CPUPPCState *env, int level) return ret; } -int kvm_arch_get_registers(CPUPPCState *env) +int kvm_arch_get_registers(CPUState *cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; struct kvm_regs regs; struct kvm_sregs sregs; uint32_t cr; @@ -727,8 +732,10 @@ int kvmppc_set_interrupt(CPUPPCState *env, int irq, int level) #define PPC_INPUT_INT PPC6xx_INPUT_INT #endif -void kvm_arch_pre_run(CPUPPCState *env, struct kvm_run *run) +void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; int r; unsigned irq; @@ -760,13 +767,14 @@ void kvm_arch_pre_run(CPUPPCState *env, struct kvm_run *run) * anyways, so we will get a chance to deliver the rest. */ } -void kvm_arch_post_run(CPUPPCState *env, struct kvm_run *run) +void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) { } -int kvm_arch_process_async_events(CPUPPCState *env) +int kvm_arch_process_async_events(CPUState *cs) { - return env->halted; + PowerPCCPU *cpu = POWERPC_CPU(cs); + return cpu->env.halted; } static int kvmppc_handle_halt(CPUPPCState *env) @@ -796,8 +804,10 @@ static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t dat return 0; } -int kvm_arch_handle_exit(CPUPPCState *env, struct kvm_run *run) +int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; int ret; switch (run->exit_reason) { @@ -817,7 +827,7 @@ int kvm_arch_handle_exit(CPUPPCState *env, struct kvm_run *run) #ifdef CONFIG_PSERIES case KVM_EXIT_PAPR_HCALL: dprintf("handle PAPR hypercall\n"); - run->papr_hcall.ret = spapr_hypercall(ppc_env_get_cpu(env), + run->papr_hcall.ret = spapr_hypercall(cpu, run->papr_hcall.nr, run->papr_hcall.args); ret = 0; @@ -1225,12 +1235,12 @@ int kvmppc_fixup_cpu(CPUPPCState *env) } -bool kvm_arch_stop_on_emulation_error(CPUPPCState *env) +bool kvm_arch_stop_on_emulation_error(CPUState *cpu) { return true; } -int kvm_arch_on_sigbus_vcpu(CPUPPCState *env, int code, void *addr) +int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) { return 1; } diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index 94de764264..d4e6ab2db8 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -72,8 +72,9 @@ int kvm_arch_init(KVMState *s) return 0; } -int kvm_arch_init_vcpu(CPUS390XState *env) +int kvm_arch_init_vcpu(CPUState *cpu) { + CPUS390XState *env = &S390_CPU(cpu)->env; int ret = 0; if (kvm_vcpu_ioctl(env, KVM_S390_INITIAL_RESET, NULL) < 0) { @@ -83,13 +84,15 @@ int kvm_arch_init_vcpu(CPUS390XState *env) return ret; } -void kvm_arch_reset_vcpu(CPUS390XState *env) +void kvm_arch_reset_vcpu(CPUState *cpu) { /* FIXME: add code to reset vcpu. */ } -int kvm_arch_put_registers(CPUS390XState *env, int level) +int kvm_arch_put_registers(CPUState *cs, int level) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; struct kvm_sregs sregs; struct kvm_regs regs; int ret; @@ -149,8 +152,10 @@ int kvm_arch_put_registers(CPUS390XState *env, int level) return 0; } -int kvm_arch_get_registers(CPUS390XState *env) +int kvm_arch_get_registers(CPUState *cs) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; struct kvm_sregs sregs; struct kvm_regs regs; int ret; @@ -239,8 +244,10 @@ void *kvm_arch_vmalloc(ram_addr_t size) } } -int kvm_arch_insert_sw_breakpoint(CPUS390XState *env, struct kvm_sw_breakpoint *bp) +int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; if (cpu_memory_rw_debug(env, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || @@ -250,8 +257,10 @@ int kvm_arch_insert_sw_breakpoint(CPUS390XState *env, struct kvm_sw_breakpoint * return 0; } -int kvm_arch_remove_sw_breakpoint(CPUS390XState *env, struct kvm_sw_breakpoint *bp) +int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; uint8_t t[4]; static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; @@ -266,17 +275,18 @@ int kvm_arch_remove_sw_breakpoint(CPUS390XState *env, struct kvm_sw_breakpoint * return 0; } -void kvm_arch_pre_run(CPUS390XState *env, struct kvm_run *run) +void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) { } -void kvm_arch_post_run(CPUS390XState *env, struct kvm_run *run) +void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) { } -int kvm_arch_process_async_events(CPUS390XState *env) +int kvm_arch_process_async_events(CPUState *cs) { - return env->halted; + S390CPU *cpu = S390_CPU(cs); + return cpu->env.halted; } void kvm_s390_interrupt_internal(CPUS390XState *env, int type, uint32_t parm, @@ -565,8 +575,10 @@ static int handle_intercept(CPUS390XState *env) return r; } -int kvm_arch_handle_exit(CPUS390XState *env, struct kvm_run *run) +int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; int ret = 0; switch (run->exit_reason) { @@ -587,12 +599,12 @@ int kvm_arch_handle_exit(CPUS390XState *env, struct kvm_run *run) return ret; } -bool kvm_arch_stop_on_emulation_error(CPUS390XState *env) +bool kvm_arch_stop_on_emulation_error(CPUState *cpu) { return true; } -int kvm_arch_on_sigbus_vcpu(CPUS390XState *env, int code, void *addr) +int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) { return 1; } |