From 64ddecc88bcf2689d2df5df3bea7f85856dc6519 Mon Sep 17 00:00:00 2001 From: Jon Doron Date: Wed, 16 Feb 2022 12:24:57 +0200 Subject: hyperv: SControl is optional to enable SynIc SynIc can be enabled regardless of the SControl mechanisim which can register a GSI for a given SintRoute. This behaviour can achived by setting enabling SIMP and then the guest will poll on the message slot. Once there is another message pending the host will set the message slot with the pending flag. When the guest polls from the message slot, in case the pending flag is set it will write to the HV_X64_MSR_EOM indicating it has cleared the slot and we can try and push our message again. Signed-off-by: Jon Doron Reviewed-by: Emanuele Giuseppe Esposito Message-Id: <20220216102500.692781-2-arilou@gmail.com> Signed-off-by: Paolo Bonzini --- hw/hyperv/hyperv.c | 109 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 76 insertions(+), 33 deletions(-) (limited to 'hw/hyperv/hyperv.c') diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c index cb1074f234..8b832566c1 100644 --- a/hw/hyperv/hyperv.c +++ b/hw/hyperv/hyperv.c @@ -27,13 +27,16 @@ struct SynICState { CPUState *cs; - bool enabled; + bool sctl_enabled; hwaddr msg_page_addr; hwaddr event_page_addr; MemoryRegion msg_page_mr; MemoryRegion event_page_mr; struct hyperv_message_page *msg_page; struct hyperv_event_flags_page *event_page; + + QemuMutex sint_routes_mutex; + QLIST_HEAD(, HvSintRoute) sint_routes; }; #define TYPE_SYNIC "hyperv-synic" @@ -51,11 +54,11 @@ static SynICState *get_synic(CPUState *cs) return SYNIC(object_resolve_path_component(OBJECT(cs), "synic")); } -static void synic_update(SynICState *synic, bool enable, +static void synic_update(SynICState *synic, bool sctl_enable, hwaddr msg_page_addr, hwaddr event_page_addr) { - synic->enabled = enable; + synic->sctl_enabled = sctl_enable; if (synic->msg_page_addr != msg_page_addr) { if (synic->msg_page_addr) { memory_region_del_subregion(get_system_memory(), @@ -80,7 +83,7 @@ static void synic_update(SynICState *synic, bool enable, } } -void hyperv_synic_update(CPUState *cs, bool enable, +void hyperv_synic_update(CPUState *cs, bool sctl_enable, hwaddr msg_page_addr, hwaddr event_page_addr) { SynICState *synic = get_synic(cs); @@ -89,7 +92,7 @@ void hyperv_synic_update(CPUState *cs, bool enable, return; } - synic_update(synic, enable, msg_page_addr, event_page_addr); + synic_update(synic, sctl_enable, msg_page_addr, event_page_addr); } static void synic_realize(DeviceState *dev, Error **errp) @@ -110,16 +113,20 @@ static void synic_realize(DeviceState *dev, Error **errp) sizeof(*synic->event_page), &error_abort); synic->msg_page = memory_region_get_ram_ptr(&synic->msg_page_mr); synic->event_page = memory_region_get_ram_ptr(&synic->event_page_mr); + qemu_mutex_init(&synic->sint_routes_mutex); + QLIST_INIT(&synic->sint_routes); g_free(msgp_name); g_free(eventp_name); } + static void synic_reset(DeviceState *dev) { SynICState *synic = SYNIC(dev); memset(synic->msg_page, 0, sizeof(*synic->msg_page)); memset(synic->event_page, 0, sizeof(*synic->event_page)); synic_update(synic, false, 0, 0); + assert(QLIST_EMPTY(&synic->sint_routes)); } static void synic_class_init(ObjectClass *klass, void *data) @@ -214,6 +221,7 @@ struct HvSintRoute { HvSintStagedMessage *staged_msg; unsigned refcount; + QLIST_ENTRY(HvSintRoute) link; }; static CPUState *hyperv_find_vcpu(uint32_t vp_index) @@ -259,7 +267,7 @@ static void cpu_post_msg(CPUState *cs, run_on_cpu_data data) assert(staged_msg->state == HV_STAGED_MSG_BUSY); - if (!synic->enabled || !synic->msg_page_addr) { + if (!synic->msg_page_addr) { staged_msg->status = -ENXIO; goto posted; } @@ -343,7 +351,7 @@ int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno) if (eventno > HV_EVENT_FLAGS_COUNT) { return -EINVAL; } - if (!synic->enabled || !synic->event_page_addr) { + if (!synic->sctl_enabled || !synic->event_page_addr) { return -ENXIO; } @@ -364,11 +372,12 @@ int hyperv_set_event_flag(HvSintRoute *sint_route, unsigned eventno) HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint, HvSintMsgCb cb, void *cb_data) { - HvSintRoute *sint_route; - EventNotifier *ack_notifier; + HvSintRoute *sint_route = NULL; + EventNotifier *ack_notifier = NULL; int r, gsi; CPUState *cs; SynICState *synic; + bool ack_event_initialized = false; cs = hyperv_find_vcpu(vp_index); if (!cs) { @@ -381,57 +390,77 @@ HvSintRoute *hyperv_sint_route_new(uint32_t vp_index, uint32_t sint, } sint_route = g_new0(HvSintRoute, 1); - r = event_notifier_init(&sint_route->sint_set_notifier, false); - if (r) { - goto err; + if (!sint_route) { + return NULL; } + sint_route->synic = synic; + sint_route->sint = sint; + sint_route->refcount = 1; ack_notifier = cb ? &sint_route->sint_ack_notifier : NULL; if (ack_notifier) { sint_route->staged_msg = g_new0(HvSintStagedMessage, 1); + if (!sint_route->staged_msg) { + goto cleanup_err_sint; + } sint_route->staged_msg->cb = cb; sint_route->staged_msg->cb_data = cb_data; r = event_notifier_init(ack_notifier, false); if (r) { - goto err_sint_set_notifier; + goto cleanup_err_sint; } - event_notifier_set_handler(ack_notifier, sint_ack_handler); + ack_event_initialized = true; + } + + /* See if we are done or we need to setup a GSI for this SintRoute */ + if (!synic->sctl_enabled) { + goto cleanup; + } + + /* We need to setup a GSI for this SintRoute */ + r = event_notifier_init(&sint_route->sint_set_notifier, false); + if (r) { + goto cleanup_err_sint; } gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vp_index, sint); if (gsi < 0) { - goto err_gsi; + goto cleanup_err_sint_notifier; } r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &sint_route->sint_set_notifier, ack_notifier, gsi); if (r) { - goto err_irqfd; + goto cleanup_err_irqfd; } sint_route->gsi = gsi; - sint_route->synic = synic; - sint_route->sint = sint; - sint_route->refcount = 1; - +cleanup: + qemu_mutex_lock(&synic->sint_routes_mutex); + QLIST_INSERT_HEAD(&synic->sint_routes, sint_route, link); + qemu_mutex_unlock(&synic->sint_routes_mutex); return sint_route; -err_irqfd: +cleanup_err_irqfd: kvm_irqchip_release_virq(kvm_state, gsi); -err_gsi: + +cleanup_err_sint_notifier: + event_notifier_cleanup(&sint_route->sint_set_notifier); + +cleanup_err_sint: if (ack_notifier) { - event_notifier_set_handler(ack_notifier, NULL); - event_notifier_cleanup(ack_notifier); + if (ack_event_initialized) { + event_notifier_set_handler(ack_notifier, NULL); + event_notifier_cleanup(ack_notifier); + } + g_free(sint_route->staged_msg); } -err_sint_set_notifier: - event_notifier_cleanup(&sint_route->sint_set_notifier); -err: - g_free(sint_route); + g_free(sint_route); return NULL; } @@ -442,6 +471,8 @@ void hyperv_sint_route_ref(HvSintRoute *sint_route) void hyperv_sint_route_unref(HvSintRoute *sint_route) { + SynICState *synic; + if (!sint_route) { return; } @@ -452,21 +483,33 @@ void hyperv_sint_route_unref(HvSintRoute *sint_route) return; } - kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, - &sint_route->sint_set_notifier, - sint_route->gsi); - kvm_irqchip_release_virq(kvm_state, sint_route->gsi); + synic = sint_route->synic; + qemu_mutex_lock(&synic->sint_routes_mutex); + QLIST_REMOVE(sint_route, link); + qemu_mutex_unlock(&synic->sint_routes_mutex); + + if (sint_route->gsi) { + kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, + &sint_route->sint_set_notifier, + sint_route->gsi); + kvm_irqchip_release_virq(kvm_state, sint_route->gsi); + event_notifier_cleanup(&sint_route->sint_set_notifier); + } + if (sint_route->staged_msg) { event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL); event_notifier_cleanup(&sint_route->sint_ack_notifier); g_free(sint_route->staged_msg); } - event_notifier_cleanup(&sint_route->sint_set_notifier); g_free(sint_route); } int hyperv_sint_route_set_sint(HvSintRoute *sint_route) { + if (!sint_route->gsi) { + return 0; + } + return event_notifier_set(&sint_route->sint_set_notifier); } -- cgit v1.2.3-55-g7522 From 73d24074078a2cefb5305047e3bf50b73daa3f98 Mon Sep 17 00:00:00 2001 From: Jon Doron Date: Wed, 16 Feb 2022 12:24:59 +0200 Subject: hyperv: Add support to process syndbg commands SynDbg commands can come from two different flows: 1. Hypercalls, in this mode the data being sent is fully encapsulated network packets. 2. SynDbg specific MSRs, in this mode only the data that needs to be transfered is passed. Signed-off-by: Jon Doron Reviewed-by: Emanuele Giuseppe Esposito Message-Id: <20220216102500.692781-4-arilou@gmail.com> Signed-off-by: Paolo Bonzini --- docs/hyperv.txt | 15 +++ hw/hyperv/hyperv.c | 243 ++++++++++++++++++++++++++++++++++++++++++ include/hw/hyperv/hyperv.h | 58 ++++++++++ target/i386/cpu.c | 2 + target/i386/cpu.h | 7 ++ target/i386/kvm/hyperv-stub.c | 6 ++ target/i386/kvm/hyperv.c | 52 ++++++++- target/i386/kvm/kvm.c | 76 ++++++++++++- 8 files changed, 451 insertions(+), 8 deletions(-) (limited to 'hw/hyperv/hyperv.c') diff --git a/docs/hyperv.txt b/docs/hyperv.txt index 0417c183a3..33588a0396 100644 --- a/docs/hyperv.txt +++ b/docs/hyperv.txt @@ -225,6 +225,21 @@ default (WS2016). Note: hv-version-id-* are not enlightenments and thus don't enable Hyper-V identification when specified without any other enlightenments. +3.21. hv-syndbg +=============== +Enables Hyper-V synthetic debugger interface, this is a special interface used +by Windows Kernel debugger to send the packets through, rather than sending +them via serial/network . +When enabled, this enlightenment provides additional communication facilities +to the guest: SynDbg messages. +This new communication is used by Windows Kernel debugger rather than sending +packets via serial/network, adding significant performance boost over the other +comm channels. +This enlightenment requires a VMBus device (-device vmbus-bridge,irq=15) +and the follow enlightenments to work: +hv-relaxed,hv_time,hv-vapic,hv-vpindex,hv-synic,hv-runtime,hv-stimer + + 4. Supplementary features ========================= diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c index 8b832566c1..4a1b59cb9d 100644 --- a/hw/hyperv/hyperv.c +++ b/hw/hyperv/hyperv.c @@ -704,3 +704,246 @@ uint16_t hyperv_hcall_signal_event(uint64_t param, bool fast) } return HV_STATUS_INVALID_CONNECTION_ID; } + +static HvSynDbgHandler hv_syndbg_handler; +static void *hv_syndbg_context; + +void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context) +{ + assert(!hv_syndbg_handler); + hv_syndbg_handler = handler; + hv_syndbg_context = context; +} + +uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa) +{ + uint16_t ret; + HvSynDbgMsg msg; + struct hyperv_reset_debug_session_output *reset_dbg_session = NULL; + hwaddr len; + + if (!hv_syndbg_handler) { + ret = HV_STATUS_INVALID_HYPERCALL_CODE; + goto cleanup; + } + + len = sizeof(*reset_dbg_session); + reset_dbg_session = cpu_physical_memory_map(outgpa, &len, 1); + if (!reset_dbg_session || len < sizeof(*reset_dbg_session)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + msg.type = HV_SYNDBG_MSG_CONNECTION_INFO; + ret = hv_syndbg_handler(hv_syndbg_context, &msg); + if (ret) { + goto cleanup; + } + + reset_dbg_session->host_ip = msg.u.connection_info.host_ip; + reset_dbg_session->host_port = msg.u.connection_info.host_port; + /* The following fields are only used as validation for KDVM */ + memset(&reset_dbg_session->host_mac, 0, + sizeof(reset_dbg_session->host_mac)); + reset_dbg_session->target_ip = msg.u.connection_info.host_ip; + reset_dbg_session->target_port = msg.u.connection_info.host_port; + memset(&reset_dbg_session->target_mac, 0, + sizeof(reset_dbg_session->target_mac)); +cleanup: + if (reset_dbg_session) { + cpu_physical_memory_unmap(reset_dbg_session, + sizeof(*reset_dbg_session), 1, len); + } + + return ret; +} + +uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa, + bool fast) +{ + uint16_t ret; + struct hyperv_retrieve_debug_data_input *debug_data_in = NULL; + struct hyperv_retrieve_debug_data_output *debug_data_out = NULL; + hwaddr in_len, out_len; + HvSynDbgMsg msg; + + if (fast || !hv_syndbg_handler) { + ret = HV_STATUS_INVALID_HYPERCALL_CODE; + goto cleanup; + } + + in_len = sizeof(*debug_data_in); + debug_data_in = cpu_physical_memory_map(ingpa, &in_len, 0); + if (!debug_data_in || in_len < sizeof(*debug_data_in)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + out_len = sizeof(*debug_data_out); + debug_data_out = cpu_physical_memory_map(outgpa, &out_len, 1); + if (!debug_data_out || out_len < sizeof(*debug_data_out)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + msg.type = HV_SYNDBG_MSG_RECV; + msg.u.recv.buf_gpa = outgpa + sizeof(*debug_data_out); + msg.u.recv.count = TARGET_PAGE_SIZE - sizeof(*debug_data_out); + msg.u.recv.options = debug_data_in->options; + msg.u.recv.timeout = debug_data_in->timeout; + msg.u.recv.is_raw = true; + ret = hv_syndbg_handler(hv_syndbg_context, &msg); + if (ret == HV_STATUS_NO_DATA) { + debug_data_out->retrieved_count = 0; + debug_data_out->remaining_count = debug_data_in->count; + goto cleanup; + } else if (ret != HV_STATUS_SUCCESS) { + goto cleanup; + } + + debug_data_out->retrieved_count = msg.u.recv.retrieved_count; + debug_data_out->remaining_count = + debug_data_in->count - msg.u.recv.retrieved_count; +cleanup: + if (debug_data_out) { + cpu_physical_memory_unmap(debug_data_out, sizeof(*debug_data_out), 1, + out_len); + } + + if (debug_data_in) { + cpu_physical_memory_unmap(debug_data_in, sizeof(*debug_data_in), 0, + in_len); + } + + return ret; +} + +uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast) +{ + uint16_t ret; + struct hyperv_post_debug_data_input *post_data_in = NULL; + struct hyperv_post_debug_data_output *post_data_out = NULL; + hwaddr in_len, out_len; + HvSynDbgMsg msg; + + if (fast || !hv_syndbg_handler) { + ret = HV_STATUS_INVALID_HYPERCALL_CODE; + goto cleanup; + } + + in_len = sizeof(*post_data_in); + post_data_in = cpu_physical_memory_map(ingpa, &in_len, 0); + if (!post_data_in || in_len < sizeof(*post_data_in)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + if (post_data_in->count > TARGET_PAGE_SIZE - sizeof(*post_data_in)) { + ret = HV_STATUS_INVALID_PARAMETER; + goto cleanup; + } + + out_len = sizeof(*post_data_out); + post_data_out = cpu_physical_memory_map(outgpa, &out_len, 1); + if (!post_data_out || out_len < sizeof(*post_data_out)) { + ret = HV_STATUS_INSUFFICIENT_MEMORY; + goto cleanup; + } + + msg.type = HV_SYNDBG_MSG_SEND; + msg.u.send.buf_gpa = ingpa + sizeof(*post_data_in); + msg.u.send.count = post_data_in->count; + msg.u.send.is_raw = true; + ret = hv_syndbg_handler(hv_syndbg_context, &msg); + if (ret != HV_STATUS_SUCCESS) { + goto cleanup; + } + + post_data_out->pending_count = msg.u.send.pending_count; + ret = post_data_out->pending_count ? HV_STATUS_INSUFFICIENT_BUFFERS : + HV_STATUS_SUCCESS; +cleanup: + if (post_data_out) { + cpu_physical_memory_unmap(post_data_out, + sizeof(*post_data_out), 1, out_len); + } + + if (post_data_in) { + cpu_physical_memory_unmap(post_data_in, + sizeof(*post_data_in), 0, in_len); + } + + return ret; +} + +uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count) +{ + HvSynDbgMsg msg; + + if (!hv_syndbg_handler) { + return HV_SYNDBG_STATUS_INVALID; + } + + msg.type = HV_SYNDBG_MSG_SEND; + msg.u.send.buf_gpa = ingpa; + msg.u.send.count = count; + msg.u.send.is_raw = false; + if (hv_syndbg_handler(hv_syndbg_context, &msg)) { + return HV_SYNDBG_STATUS_INVALID; + } + + return HV_SYNDBG_STATUS_SEND_SUCCESS; +} + +uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count) +{ + uint16_t ret; + HvSynDbgMsg msg; + + if (!hv_syndbg_handler) { + return HV_SYNDBG_STATUS_INVALID; + } + + msg.type = HV_SYNDBG_MSG_RECV; + msg.u.recv.buf_gpa = ingpa; + msg.u.recv.count = count; + msg.u.recv.options = 0; + msg.u.recv.timeout = 0; + msg.u.recv.is_raw = false; + ret = hv_syndbg_handler(hv_syndbg_context, &msg); + if (ret != HV_STATUS_SUCCESS) { + return 0; + } + + return HV_SYNDBG_STATUS_SET_SIZE(HV_SYNDBG_STATUS_RECV_SUCCESS, + msg.u.recv.retrieved_count); +} + +void hyperv_syndbg_set_pending_page(uint64_t ingpa) +{ + HvSynDbgMsg msg; + + if (!hv_syndbg_handler) { + return; + } + + msg.type = HV_SYNDBG_MSG_SET_PENDING_PAGE; + msg.u.pending_page.buf_gpa = ingpa; + hv_syndbg_handler(hv_syndbg_context, &msg); +} + +uint64_t hyperv_syndbg_query_options(void) +{ + HvSynDbgMsg msg; + + if (!hv_syndbg_handler) { + return 0; + } + + msg.type = HV_SYNDBG_MSG_QUERY_OPTIONS; + if (hv_syndbg_handler(hv_syndbg_context, &msg) != HV_STATUS_SUCCESS) { + return 0; + } + + return msg.u.query_options.options; +} diff --git a/include/hw/hyperv/hyperv.h b/include/hw/hyperv/hyperv.h index a63ee0003c..015c3524b1 100644 --- a/include/hw/hyperv/hyperv.h +++ b/include/hw/hyperv/hyperv.h @@ -81,4 +81,62 @@ void hyperv_synic_update(CPUState *cs, bool enable, hwaddr msg_page_addr, hwaddr event_page_addr); bool hyperv_is_synic_enabled(void); +/* + * Process HVCALL_RESET_DEBUG_SESSION hypercall. + */ +uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa); +/* + * Process HVCALL_RETREIVE_DEBUG_DATA hypercall. + */ +uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa, uint64_t outgpa, + bool fast); +/* + * Process HVCALL_POST_DEBUG_DATA hypercall. + */ +uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa, uint64_t outgpa, bool fast); + +uint32_t hyperv_syndbg_send(uint64_t ingpa, uint32_t count); +uint32_t hyperv_syndbg_recv(uint64_t ingpa, uint32_t count); +void hyperv_syndbg_set_pending_page(uint64_t ingpa); +uint64_t hyperv_syndbg_query_options(void); + +typedef enum HvSynthDbgMsgType { + HV_SYNDBG_MSG_CONNECTION_INFO, + HV_SYNDBG_MSG_SEND, + HV_SYNDBG_MSG_RECV, + HV_SYNDBG_MSG_SET_PENDING_PAGE, + HV_SYNDBG_MSG_QUERY_OPTIONS +} HvDbgSynthMsgType; + +typedef struct HvSynDbgMsg { + HvDbgSynthMsgType type; + union { + struct { + uint32_t host_ip; + uint16_t host_port; + } connection_info; + struct { + uint64_t buf_gpa; + uint32_t count; + uint32_t pending_count; + bool is_raw; + } send; + struct { + uint64_t buf_gpa; + uint32_t count; + uint32_t options; + uint64_t timeout; + uint32_t retrieved_count; + bool is_raw; + } recv; + struct { + uint64_t buf_gpa; + } pending_page; + struct { + uint64_t options; + } query_options; + } u; +} HvSynDbgMsg; +typedef uint16_t (*HvSynDbgHandler)(void *context, HvSynDbgMsg *msg); +void hyperv_set_syndbg_handler(HvSynDbgHandler handler, void *context); #endif diff --git a/target/i386/cpu.c b/target/i386/cpu.c index cb6b5467d0..99343be926 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -6927,6 +6927,8 @@ static Property x86_cpu_properties[] = { HYPERV_FEAT_AVIC, 0), DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), + DEFINE_PROP_BIT64("hv-syndbg", X86CPU, hyperv_features, + HYPERV_FEAT_SYNDBG, 0), DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), DEFINE_PROP_BOOL("hv-enforce-cpuid", X86CPU, hyperv_enforce_cpuid, false), diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 8422f6c18e..6b61124887 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1085,6 +1085,7 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, #define HYPERV_FEAT_IPI 13 #define HYPERV_FEAT_STIMER_DIRECT 14 #define HYPERV_FEAT_AVIC 15 +#define HYPERV_FEAT_SYNDBG 16 #ifndef HYPERV_SPINLOCK_NEVER_NOTIFY #define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF @@ -1601,6 +1602,12 @@ typedef struct CPUArchState { uint64_t msr_hv_hypercall; uint64_t msr_hv_guest_os_id; uint64_t msr_hv_tsc; + uint64_t msr_hv_syndbg_control; + uint64_t msr_hv_syndbg_status; + uint64_t msr_hv_syndbg_send_page; + uint64_t msr_hv_syndbg_recv_page; + uint64_t msr_hv_syndbg_pending_page; + uint64_t msr_hv_syndbg_options; /* Per-VCPU HV MSRs */ uint64_t msr_hv_vapic; diff --git a/target/i386/kvm/hyperv-stub.c b/target/i386/kvm/hyperv-stub.c index 0028527e79..778ed782e6 100644 --- a/target/i386/kvm/hyperv-stub.c +++ b/target/i386/kvm/hyperv-stub.c @@ -27,6 +27,12 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) return 0; case KVM_EXIT_HYPERV_HCALL: exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE; + return 0; + case KVM_EXIT_HYPERV_SYNDBG: + if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { + return -1; + } + return 0; default: return -1; diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c index 26efc1e0e6..9026ef3a81 100644 --- a/target/i386/kvm/hyperv.c +++ b/target/i386/kvm/hyperv.c @@ -81,20 +81,66 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) case KVM_EXIT_HYPERV_HCALL: { uint16_t code = exit->u.hcall.input & 0xffff; bool fast = exit->u.hcall.input & HV_HYPERCALL_FAST; - uint64_t param = exit->u.hcall.params[0]; + uint64_t in_param = exit->u.hcall.params[0]; + uint64_t out_param = exit->u.hcall.params[1]; switch (code) { case HV_POST_MESSAGE: - exit->u.hcall.result = hyperv_hcall_post_message(param, fast); + exit->u.hcall.result = hyperv_hcall_post_message(in_param, fast); break; case HV_SIGNAL_EVENT: - exit->u.hcall.result = hyperv_hcall_signal_event(param, fast); + exit->u.hcall.result = hyperv_hcall_signal_event(in_param, fast); + break; + case HV_POST_DEBUG_DATA: + exit->u.hcall.result = + hyperv_hcall_post_dbg_data(in_param, out_param, fast); + break; + case HV_RETRIEVE_DEBUG_DATA: + exit->u.hcall.result = + hyperv_hcall_retreive_dbg_data(in_param, out_param, fast); + break; + case HV_RESET_DEBUG_SESSION: + exit->u.hcall.result = + hyperv_hcall_reset_dbg_session(out_param); break; default: exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE; } return 0; } + + case KVM_EXIT_HYPERV_SYNDBG: + if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { + return -1; + } + + switch (exit->u.syndbg.msr) { + case HV_X64_MSR_SYNDBG_CONTROL: { + uint64_t control = exit->u.syndbg.control; + env->msr_hv_syndbg_control = control; + env->msr_hv_syndbg_send_page = exit->u.syndbg.send_page; + env->msr_hv_syndbg_recv_page = exit->u.syndbg.recv_page; + exit->u.syndbg.status = HV_STATUS_SUCCESS; + if (control & HV_SYNDBG_CONTROL_SEND) { + exit->u.syndbg.status = + hyperv_syndbg_send(env->msr_hv_syndbg_send_page, + HV_SYNDBG_CONTROL_SEND_SIZE(control)); + } else if (control & HV_SYNDBG_CONTROL_RECV) { + exit->u.syndbg.status = + hyperv_syndbg_recv(env->msr_hv_syndbg_recv_page, + TARGET_PAGE_SIZE); + } + break; + } + case HV_X64_MSR_SYNDBG_PENDING_BUFFER: + env->msr_hv_syndbg_pending_page = exit->u.syndbg.pending_page; + hyperv_syndbg_set_pending_page(env->msr_hv_syndbg_pending_page); + break; + default: + return -1; + } + + return 0; default: return -1; } diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 9cf8e03669..0bb3176edc 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -104,6 +104,7 @@ static bool has_msr_hv_synic; static bool has_msr_hv_stimer; static bool has_msr_hv_frequencies; static bool has_msr_hv_reenlightenment; +static bool has_msr_hv_syndbg_options; static bool has_msr_xss; static bool has_msr_umwait; static bool has_msr_spec_ctrl; @@ -964,6 +965,14 @@ static struct { .bits = HV_DEPRECATING_AEOI_RECOMMENDED} } }, + [HYPERV_FEAT_SYNDBG] = { + .desc = "Enable synthetic kernel debugger channel (hv-syndbg)", + .flags = { + {.func = HV_CPUID_FEATURES, .reg = R_EDX, + .bits = HV_FEATURE_DEBUG_MSRS_AVAILABLE} + }, + .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_RELAXED) + }, }; static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max, @@ -1004,8 +1013,8 @@ static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max, static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs) { struct kvm_cpuid2 *cpuid; - /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000080 leaves */ - int max = 10; + /* 0x40000000..0x40000005, 0x4000000A, 0x40000080..0x40000082 leaves */ + int max = 11; int i; bool do_sys_ioctl; @@ -1118,6 +1127,12 @@ static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs) entry_feat->eax |= HV_SYNTIMERS_AVAILABLE; } + if (has_msr_hv_syndbg_options) { + entry_feat->edx |= HV_GUEST_DEBUGGING_AVAILABLE; + entry_feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; + entry_feat->ebx |= HV_PARTITION_DEBUGGING_ALLOWED; + } + if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TLBFLUSH) > 0) { entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED; @@ -1369,12 +1384,22 @@ static int hyperv_fill_cpuids(CPUState *cs, { X86CPU *cpu = X86_CPU(cs); struct kvm_cpuid_entry2 *c; - uint32_t cpuid_i = 0; + uint32_t signature[3]; + uint32_t cpuid_i = 0, max_cpuid_leaf = 0; + + max_cpuid_leaf = HV_CPUID_IMPLEMENT_LIMITS; + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { + max_cpuid_leaf = MAX(max_cpuid_leaf, HV_CPUID_NESTED_FEATURES); + } + + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { + max_cpuid_leaf = + MAX(max_cpuid_leaf, HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); + } c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS; - c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ? - HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS; + c->eax = max_cpuid_leaf; c->ebx = cpu->hyperv_vendor_id[0]; c->ecx = cpu->hyperv_vendor_id[1]; c->edx = cpu->hyperv_vendor_id[2]; @@ -1453,6 +1478,33 @@ static int hyperv_fill_cpuids(CPUState *cs, c->eax = cpu->hyperv_nested[0]; } + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG)) { + c = &cpuid_ent[cpuid_i++]; + c->function = HV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS; + c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ? + HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS; + memcpy(signature, "Microsoft VS", 12); + c->eax = 0; + c->ebx = signature[0]; + c->ecx = signature[1]; + c->edx = signature[2]; + + c = &cpuid_ent[cpuid_i++]; + c->function = HV_CPUID_SYNDBG_INTERFACE; + memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12); + c->eax = signature[0]; + c->ebx = 0; + c->ecx = 0; + c->edx = 0; + + c = &cpuid_ent[cpuid_i++]; + c->function = HV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; + c->eax = HV_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; + c->ebx = 0; + c->ecx = 0; + c->edx = 0; + } + return cpuid_i; } @@ -2261,6 +2313,9 @@ static int kvm_get_supported_msrs(KVMState *s) case HV_X64_MSR_REENLIGHTENMENT_CONTROL: has_msr_hv_reenlightenment = true; break; + case HV_X64_MSR_SYNDBG_OPTIONS: + has_msr_hv_syndbg_options = true; + break; case MSR_IA32_SPEC_CTRL: has_msr_spec_ctrl = true; break; @@ -3178,6 +3233,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, env->msr_hv_tsc_emulation_status); } + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNDBG) && + has_msr_hv_syndbg_options) { + kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, + hyperv_syndbg_query_options()); + } } if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, @@ -3619,6 +3679,9 @@ static int kvm_get_msrs(X86CPU *cpu) kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0); } + if (has_msr_hv_syndbg_options) { + kvm_msr_entry_add(cpu, HV_X64_MSR_SYNDBG_OPTIONS, 0); + } if (has_msr_hv_crash) { int j; @@ -3910,6 +3973,9 @@ static int kvm_get_msrs(X86CPU *cpu) case HV_X64_MSR_TSC_EMULATION_STATUS: env->msr_hv_tsc_emulation_status = msrs[i].data; break; + case HV_X64_MSR_SYNDBG_OPTIONS: + env->msr_hv_syndbg_options = msrs[i].data; + break; case MSR_MTRRdefType: env->mtrr_deftype = msrs[i].data; break; -- cgit v1.2.3-55-g7522