diff options
Diffstat (limited to 'hw')
35 files changed, 1000 insertions, 330 deletions
diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c index af37889423..d0fffcf787 100644 --- a/hw/acpi/memory_hotplug.c +++ b/hw/acpi/memory_hotplug.c @@ -8,6 +8,7 @@ #include "qapi/error.h" #include "qapi/qapi-events-acpi.h" #include "qapi/qapi-events-machine.h" +#include "qapi/qapi-events-qdev.h" #define MEMORY_SLOTS_NUMBER "MDNR" #define MEMORY_HOTPLUG_IO_REGION "HPMR" @@ -178,8 +179,16 @@ static void acpi_memory_hotplug_write(void *opaque, hwaddr addr, uint64_t data, hotplug_handler_unplug(hotplug_ctrl, dev, &local_err); if (local_err) { trace_mhp_acpi_pc_dimm_delete_failed(mem_st->selector); - qapi_event_send_mem_unplug_error(dev->id, + + /* + * Send both MEM_UNPLUG_ERROR and DEVICE_UNPLUG_GUEST_ERROR + * while the deprecation of MEM_UNPLUG_ERROR is + * pending. + */ + qapi_event_send_mem_unplug_error(dev->id ? : "", error_get_pretty(local_err)); + qapi_event_send_device_unplug_guest_error(!!dev->id, dev->id, + dev->canonical_path); error_free(local_err); break; } diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index ddedcef0b2..962d2c981b 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -6,6 +6,10 @@ config SEV select X86_FW_OVMF depends on KVM +config SGX + bool + depends on KVM + config PC bool imply APPLESMC @@ -21,6 +25,7 @@ config PC imply PVPANIC_ISA imply QXL imply SEV + imply SGX imply SGA imply TEST_DEVICES imply TPM_CRB diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index dfaa47cdc2..f4d6ae3d02 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -1841,6 +1841,28 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, } #endif + if (pcms->sgx_epc.size != 0) { + uint64_t epc_base = pcms->sgx_epc.base; + uint64_t epc_size = pcms->sgx_epc.size; + + dev = aml_device("EPC"); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("INT0E0C"))); + aml_append(dev, aml_name_decl("_STR", + aml_unicode("Enclave Page Cache 1.0"))); + crs = aml_resource_template(); + aml_append(crs, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, + AML_MAX_FIXED, AML_NON_CACHEABLE, + AML_READ_WRITE, 0, epc_base, + epc_base + epc_size - 1, 0, epc_size)); + aml_append(dev, aml_name_decl("_CRS", crs)); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(0x0f))); + aml_append(dev, method); + + aml_append(sb_scope, dev); + } aml_append(dsdt, sb_scope); /* copy AML table into ACPI tables blob and patch header there */ diff --git a/hw/i386/fw_cfg.c b/hw/i386/fw_cfg.c index 4e68d5dea4..a283785a8d 100644 --- a/hw/i386/fw_cfg.c +++ b/hw/i386/fw_cfg.c @@ -159,7 +159,7 @@ void fw_cfg_build_feature_control(MachineState *ms, FWCfgState *fw_cfg) { X86CPU *cpu = X86_CPU(ms->possible_cpus->cpus[0].cpu); CPUX86State *env = &cpu->env; - uint32_t unused, ecx, edx; + uint32_t unused, ebx, ecx, edx; uint64_t feature_control_bits = 0; uint64_t *val; @@ -174,6 +174,16 @@ void fw_cfg_build_feature_control(MachineState *ms, FWCfgState *fw_cfg) feature_control_bits |= FEATURE_CONTROL_LMCE; } + if (env->cpuid_level >= 7) { + cpu_x86_cpuid(env, 0x7, 0, &unused, &ebx, &ecx, &unused); + if (ebx & CPUID_7_0_EBX_SGX) { + feature_control_bits |= FEATURE_CONTROL_SGX; + } + if (ecx & CPUID_7_0_ECX_SGX_LC) { + feature_control_bits |= FEATURE_CONTROL_SGX_LC; + } + } + if (!feature_control_bits) { return; } diff --git a/hw/i386/kvm/i8254.c b/hw/i386/kvm/i8254.c index fa68669e8a..191a26fa57 100644 --- a/hw/i386/kvm/i8254.c +++ b/hw/i386/kvm/i8254.c @@ -59,11 +59,6 @@ struct KVMPITClass { DeviceRealize parent_realize; }; -static int64_t abs64(int64_t v) -{ - return v < 0 ? -v : v; -} - static void kvm_pit_update_clock_offset(KVMPITState *s) { int64_t offset, clock_offset; @@ -81,7 +76,7 @@ static void kvm_pit_update_clock_offset(KVMPITState *s) clock_gettime(CLOCK_MONOTONIC, &ts); offset -= ts.tv_nsec; offset -= (int64_t)ts.tv_sec * 1000000000; - if (abs64(offset) < abs64(clock_offset)) { + if (uabs64(offset) < uabs64(clock_offset)) { clock_offset = offset; } } diff --git a/hw/i386/meson.build b/hw/i386/meson.build index 80dad29f2b..c502965219 100644 --- a/hw/i386/meson.build +++ b/hw/i386/meson.build @@ -16,6 +16,8 @@ i386_ss.add(when: 'CONFIG_Q35', if_true: files('pc_q35.c')) i386_ss.add(when: 'CONFIG_VMMOUSE', if_true: files('vmmouse.c')) i386_ss.add(when: 'CONFIG_VMPORT', if_true: files('vmport.c')) i386_ss.add(when: 'CONFIG_VTD', if_true: files('intel_iommu.c')) +i386_ss.add(when: 'CONFIG_SGX', if_true: files('sgx-epc.c','sgx.c'), + if_false: files('sgx-stub.c')) i386_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-common.c')) i386_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device_x86.c')) diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 557d49c9f8..df457eceba 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -889,6 +889,10 @@ void pc_memory_init(PCMachineState *pcms, e820_add_entry(0x100000000ULL, x86ms->above_4g_mem_size, E820_RAM); } + if (pcms->sgx_epc.size != 0) { + e820_add_entry(pcms->sgx_epc.base, pcms->sgx_epc.size, E820_RESERVED); + } + if (!pcmc->has_reserved_memory && (machine->ram_slots || (machine->maxram_size > machine->ram_size))) { @@ -919,8 +923,15 @@ void pc_memory_init(PCMachineState *pcms, exit(EXIT_FAILURE); } + if (pcms->sgx_epc.size != 0) { + machine->device_memory->base = sgx_epc_above_4g_end(&pcms->sgx_epc); + } else { + machine->device_memory->base = + 0x100000000ULL + x86ms->above_4g_mem_size; + } + machine->device_memory->base = - ROUND_UP(0x100000000ULL + x86ms->above_4g_mem_size, 1 * GiB); + ROUND_UP(machine->device_memory->base, 1 * GiB); if (pcmc->enforce_aligned_dimm) { /* size device region assuming 1G page max alignment per slot */ @@ -1005,6 +1016,8 @@ uint64_t pc_pci_hole64_start(void) if (!pcmc->broken_reserved_end) { hole64_start += memory_region_size(&ms->device_memory->mr); } + } else if (pcms->sgx_epc.size != 0) { + hole64_start = sgx_epc_above_4g_end(&pcms->sgx_epc); } else { hole64_start = 0x100000000ULL + x86ms->above_4g_mem_size; } diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index c5da7739ce..6cc834aff6 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -153,6 +153,7 @@ static void pc_init1(MachineState *machine, } } + pc_machine_init_sgx_epc(pcms); x86_cpus_init(x86ms, pcmc->default_cpu_version); if (pcmc->kvmclock_enabled) { diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 46cd542d17..5481d5c965 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -177,6 +177,7 @@ static void pc_q35_init(MachineState *machine) x86ms->below_4g_mem_size = machine->ram_size; } + pc_machine_init_sgx_epc(pcms); x86_cpus_init(x86ms, pcmc->default_cpu_version); kvmclock_create(pcmc->kvmclock_create_always); diff --git a/hw/i386/sgx-epc.c b/hw/i386/sgx-epc.c new file mode 100644 index 0000000000..55e2217eae --- /dev/null +++ b/hw/i386/sgx-epc.c @@ -0,0 +1,184 @@ +/* + * SGX EPC device + * + * Copyright (C) 2019 Intel Corporation + * + * Authors: + * Sean Christopherson <sean.j.christopherson@intel.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "hw/mem/memory-device.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "target/i386/cpu.h" +#include "exec/address-spaces.h" + +static Property sgx_epc_properties[] = { + DEFINE_PROP_UINT64(SGX_EPC_ADDR_PROP, SGXEPCDevice, addr, 0), + DEFINE_PROP_LINK(SGX_EPC_MEMDEV_PROP, SGXEPCDevice, hostmem, + TYPE_MEMORY_BACKEND_EPC, HostMemoryBackendEpc *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sgx_epc_get_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Error *local_err = NULL; + uint64_t value; + + value = memory_device_get_region_size(MEMORY_DEVICE(obj), &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + visit_type_uint64(v, name, &value, errp); +} + +static void sgx_epc_init(Object *obj) +{ + object_property_add(obj, SGX_EPC_SIZE_PROP, "uint64", sgx_epc_get_size, + NULL, NULL, NULL); +} + +static void sgx_epc_realize(DeviceState *dev, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + X86MachineState *x86ms = X86_MACHINE(pcms); + MemoryDeviceState *md = MEMORY_DEVICE(dev); + SGXEPCState *sgx_epc = &pcms->sgx_epc; + SGXEPCDevice *epc = SGX_EPC(dev); + HostMemoryBackend *hostmem; + const char *path; + + if (x86ms->boot_cpus != 0) { + error_setg(errp, "'" TYPE_SGX_EPC "' can't be created after vCPUs," + "e.g. via -device"); + return; + } + + if (!epc->hostmem) { + error_setg(errp, "'" SGX_EPC_MEMDEV_PROP "' property is not set"); + return; + } + hostmem = MEMORY_BACKEND(epc->hostmem); + if (host_memory_backend_is_mapped(hostmem)) { + path = object_get_canonical_path_component(OBJECT(hostmem)); + error_setg(errp, "can't use already busy memdev: %s", path); + return; + } + + epc->addr = sgx_epc->base + sgx_epc->size; + + memory_region_add_subregion(&sgx_epc->mr, epc->addr - sgx_epc->base, + host_memory_backend_get_memory(hostmem)); + + host_memory_backend_set_mapped(hostmem, true); + + sgx_epc->sections = g_renew(SGXEPCDevice *, sgx_epc->sections, + sgx_epc->nr_sections + 1); + sgx_epc->sections[sgx_epc->nr_sections++] = epc; + + sgx_epc->size += memory_device_get_region_size(md, errp); +} + +static void sgx_epc_unrealize(DeviceState *dev) +{ + SGXEPCDevice *epc = SGX_EPC(dev); + HostMemoryBackend *hostmem = MEMORY_BACKEND(epc->hostmem); + + host_memory_backend_set_mapped(hostmem, false); +} + +static uint64_t sgx_epc_md_get_addr(const MemoryDeviceState *md) +{ + const SGXEPCDevice *epc = SGX_EPC(md); + + return epc->addr; +} + +static void sgx_epc_md_set_addr(MemoryDeviceState *md, uint64_t addr, + Error **errp) +{ + object_property_set_uint(OBJECT(md), SGX_EPC_ADDR_PROP, addr, errp); +} + +static uint64_t sgx_epc_md_get_plugged_size(const MemoryDeviceState *md, + Error **errp) +{ + return 0; +} + +static MemoryRegion *sgx_epc_md_get_memory_region(MemoryDeviceState *md, + Error **errp) +{ + SGXEPCDevice *epc = SGX_EPC(md); + HostMemoryBackend *hostmem; + + if (!epc->hostmem) { + error_setg(errp, "'" SGX_EPC_MEMDEV_PROP "' property must be set"); + return NULL; + } + + hostmem = MEMORY_BACKEND(epc->hostmem); + return host_memory_backend_get_memory(hostmem); +} + +static void sgx_epc_md_fill_device_info(const MemoryDeviceState *md, + MemoryDeviceInfo *info) +{ + SgxEPCDeviceInfo *se = g_new0(SgxEPCDeviceInfo, 1); + SGXEPCDevice *epc = SGX_EPC(md); + + se->memaddr = epc->addr; + se->size = object_property_get_uint(OBJECT(epc), SGX_EPC_SIZE_PROP, + NULL); + se->memdev = object_get_canonical_path(OBJECT(epc->hostmem)); + + info->u.sgx_epc.data = se; + info->type = MEMORY_DEVICE_INFO_KIND_SGX_EPC; +} + +static void sgx_epc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc); + + dc->hotpluggable = false; + dc->realize = sgx_epc_realize; + dc->unrealize = sgx_epc_unrealize; + dc->desc = "SGX EPC section"; + device_class_set_props(dc, sgx_epc_properties); + + mdc->get_addr = sgx_epc_md_get_addr; + mdc->set_addr = sgx_epc_md_set_addr; + mdc->get_plugged_size = sgx_epc_md_get_plugged_size; + mdc->get_memory_region = sgx_epc_md_get_memory_region; + mdc->fill_device_info = sgx_epc_md_fill_device_info; +} + +static TypeInfo sgx_epc_info = { + .name = TYPE_SGX_EPC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SGXEPCDevice), + .instance_init = sgx_epc_init, + .class_init = sgx_epc_class_init, + .class_size = sizeof(DeviceClass), + .interfaces = (InterfaceInfo[]) { + { TYPE_MEMORY_DEVICE }, + { } + }, +}; + +static void sgx_epc_register_types(void) +{ + type_register_static(&sgx_epc_info); +} + +type_init(sgx_epc_register_types) diff --git a/hw/i386/sgx-stub.c b/hw/i386/sgx-stub.c new file mode 100644 index 0000000000..3be9f5ca32 --- /dev/null +++ b/hw/i386/sgx-stub.c @@ -0,0 +1,26 @@ +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "hw/i386/sgx.h" + +SGXInfo *sgx_get_info(Error **errp) +{ + error_setg(errp, "SGX support is not compiled in"); + return NULL; +} + +SGXInfo *sgx_get_capabilities(Error **errp) +{ + error_setg(errp, "SGX support is not compiled in"); + return NULL; +} + +void pc_machine_init_sgx_epc(PCMachineState *pcms) +{ + memset(&pcms->sgx_epc, 0, sizeof(SGXEPCState)); +} + +int sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size) +{ + g_assert_not_reached(); +} diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c new file mode 100644 index 0000000000..e481e9358f --- /dev/null +++ b/hw/i386/sgx.c @@ -0,0 +1,170 @@ +/* + * SGX common code + * + * Copyright (C) 2021 Intel Corporation + * + * Authors: + * Yang Zhong<yang.zhong@intel.com> + * Sean Christopherson <sean.j.christopherson@intel.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "hw/mem/memory-device.h" +#include "monitor/qdev.h" +#include "qapi/error.h" +#include "exec/address-spaces.h" +#include "hw/i386/sgx.h" +#include "sysemu/hw_accel.h" + +#define SGX_MAX_EPC_SECTIONS 8 +#define SGX_CPUID_EPC_INVALID 0x0 + +/* A valid EPC section. */ +#define SGX_CPUID_EPC_SECTION 0x1 +#define SGX_CPUID_EPC_MASK 0xF + +static uint64_t sgx_calc_section_metric(uint64_t low, uint64_t high) +{ + return (low & MAKE_64BIT_MASK(12, 20)) + + ((high & MAKE_64BIT_MASK(0, 20)) << 32); +} + +static uint64_t sgx_calc_host_epc_section_size(void) +{ + uint32_t i, type; + uint32_t eax, ebx, ecx, edx; + uint64_t size = 0; + + for (i = 0; i < SGX_MAX_EPC_SECTIONS; i++) { + host_cpuid(0x12, i + 2, &eax, &ebx, &ecx, &edx); + + type = eax & SGX_CPUID_EPC_MASK; + if (type == SGX_CPUID_EPC_INVALID) { + break; + } + + if (type != SGX_CPUID_EPC_SECTION) { + break; + } + + size += sgx_calc_section_metric(ecx, edx); + } + + return size; +} + +SGXInfo *sgx_get_capabilities(Error **errp) +{ + SGXInfo *info = NULL; + uint32_t eax, ebx, ecx, edx; + + int fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); + if (fd < 0) { + error_setg(errp, "SGX is not enabled in KVM"); + return NULL; + } + + info = g_new0(SGXInfo, 1); + host_cpuid(0x7, 0, &eax, &ebx, &ecx, &edx); + + info->sgx = ebx & (1U << 2) ? true : false; + info->flc = ecx & (1U << 30) ? true : false; + + host_cpuid(0x12, 0, &eax, &ebx, &ecx, &edx); + info->sgx1 = eax & (1U << 0) ? true : false; + info->sgx2 = eax & (1U << 1) ? true : false; + + info->section_size = sgx_calc_host_epc_section_size(); + + close(fd); + + return info; +} + +SGXInfo *sgx_get_info(Error **errp) +{ + SGXInfo *info = NULL; + X86MachineState *x86ms; + PCMachineState *pcms = + (PCMachineState *)object_dynamic_cast(qdev_get_machine(), + TYPE_PC_MACHINE); + if (!pcms) { + error_setg(errp, "SGX is only supported on PC machines"); + return NULL; + } + + x86ms = X86_MACHINE(pcms); + if (!x86ms->sgx_epc_list) { + error_setg(errp, "No EPC regions defined, SGX not available"); + return NULL; + } + + SGXEPCState *sgx_epc = &pcms->sgx_epc; + info = g_new0(SGXInfo, 1); + + info->sgx = true; + info->sgx1 = true; + info->sgx2 = true; + info->flc = true; + info->section_size = sgx_epc->size; + + return info; +} + +int sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + SGXEPCDevice *epc; + + if (pcms->sgx_epc.size == 0 || pcms->sgx_epc.nr_sections <= section_nr) { + return 1; + } + + epc = pcms->sgx_epc.sections[section_nr]; + + *addr = epc->addr; + *size = memory_device_get_region_size(MEMORY_DEVICE(epc), &error_fatal); + + return 0; +} + +void pc_machine_init_sgx_epc(PCMachineState *pcms) +{ + SGXEPCState *sgx_epc = &pcms->sgx_epc; + X86MachineState *x86ms = X86_MACHINE(pcms); + SgxEPCList *list = NULL; + Object *obj; + + memset(sgx_epc, 0, sizeof(SGXEPCState)); + if (!x86ms->sgx_epc_list) { + return; + } + + sgx_epc->base = 0x100000000ULL + x86ms->above_4g_mem_size; + + memory_region_init(&sgx_epc->mr, OBJECT(pcms), "sgx-epc", UINT64_MAX); + memory_region_add_subregion(get_system_memory(), sgx_epc->base, + &sgx_epc->mr); + + for (list = x86ms->sgx_epc_list; list; list = list->next) { + obj = object_new("sgx-epc"); + + /* set the memdev link with memory backend */ + object_property_parse(obj, SGX_EPC_MEMDEV_PROP, list->value->memdev, + &error_fatal); + object_property_set_bool(obj, "realized", true, &error_fatal); + object_unref(obj); + } + + if ((sgx_epc->base + sgx_epc->size) < sgx_epc->base) { + error_report("Size of all 'sgx-epc' =0x%"PRIu64" causes EPC to wrap", + sgx_epc->size); + exit(EXIT_FAILURE); + } + + memory_region_set_size(&sgx_epc->mr, sgx_epc->size); +} diff --git a/hw/i386/x86.c b/hw/i386/x86.c index 00448ed55a..41ef9a84a9 100644 --- a/hw/i386/x86.c +++ b/hw/i386/x86.c @@ -30,6 +30,8 @@ #include "qapi/error.h" #include "qapi/qmp/qerror.h" #include "qapi/qapi-visit-common.h" +#include "qapi/clone-visitor.h" +#include "qapi/qapi-visit-machine.h" #include "qapi/visitor.h" #include "sysemu/qtest.h" #include "sysemu/whpx.h" @@ -1263,6 +1265,27 @@ static void x86_machine_set_bus_lock_ratelimit(Object *obj, Visitor *v, visit_type_uint64(v, name, &x86ms->bus_lock_ratelimit, errp); } +static void machine_get_sgx_epc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + SgxEPCList *list = x86ms->sgx_epc_list; + + visit_type_SgxEPCList(v, name, &list, errp); +} + +static void machine_set_sgx_epc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + SgxEPCList *list; + + list = x86ms->sgx_epc_list; + visit_type_SgxEPCList(v, name, &x86ms->sgx_epc_list, errp); + + qapi_free_SgxEPCList(list); +} + static void x86_machine_initfn(Object *obj) { X86MachineState *x86ms = X86_MACHINE(obj); @@ -1322,6 +1345,12 @@ static void x86_machine_class_init(ObjectClass *oc, void *data) x86_machine_set_bus_lock_ratelimit, NULL, NULL); object_class_property_set_description(oc, X86_MACHINE_BUS_LOCK_RATELIMIT, "Set the ratelimit for the bus locks acquired in VMs"); + + object_class_property_add(oc, "sgx-epc", "SgxEPC", + machine_get_sgx_epc, machine_set_sgx_epc, + NULL, NULL); + object_class_property_set_description(oc, "sgx-epc", + "SGX EPC device"); } static const TypeInfo x86_machine_info = { diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index 9b432773f0..e3d3d5cf89 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -721,6 +721,7 @@ static void xen_log_global_stop(MemoryListener *listener) } static MemoryListener xen_memory_listener = { + .name = "xen-memory", .region_add = xen_region_add, .region_del = xen_region_del, .log_start = xen_log_start, @@ -732,6 +733,7 @@ static MemoryListener xen_memory_listener = { }; static MemoryListener xen_io_listener = { + .name = "xen-io", .region_add = xen_io_add, .region_del = xen_io_del, .priority = 10, diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c index 9b4c17854d..49504e740f 100644 --- a/hw/intc/openpic.c +++ b/hw/intc/openpic.c @@ -25,12 +25,8 @@ /* * * Based on OpenPic implementations: - * - Intel GW80314 I/O companion chip developer's manual * - Motorola MPC8245 & MPC8540 user manuals. - * - Motorola MCP750 (aka Raven) programmer manual. - * - Motorola Harrier programmer manuel - * - * Serial interrupts, as implemented in Raven chipset are not supported yet. + * - Motorola Harrier programmer manual * */ @@ -51,7 +47,7 @@ #include "qemu/timer.h" #include "qemu/error-report.h" -//#define DEBUG_OPENPIC +/* #define DEBUG_OPENPIC */ #ifdef DEBUG_OPENPIC static const int debug_openpic = 1; @@ -122,7 +118,8 @@ static FslMpicInfo fsl_mpic_42 = { #define ILR_INTTGT_CINT 0x01 /* critical */ #define ILR_INTTGT_MCP 0x02 /* machine check */ -/* The currently supported INTTGT values happen to be the same as QEMU's +/* + * The currently supported INTTGT values happen to be the same as QEMU's * openpic output codes, but don't depend on this. The output codes * could change (unlikely, but...) or support could be added for * more INTTGT values. @@ -181,10 +178,11 @@ static void openpic_cpu_write_internal(void *opaque, hwaddr addr, uint32_t val, int idx); static void openpic_reset(DeviceState *d); -/* Convert between openpic clock ticks and nanosecs. In the hardware the clock - frequency is driven by board inputs to the PIC which the PIC would then - divide by 4 or 8. For now hard code to 25MZ. -*/ +/* + * Convert between openpic clock ticks and nanosecs. In the hardware the clock + * frequency is driven by board inputs to the PIC which the PIC would then + * divide by 4 or 8. For now hard code to 25MZ. + */ #define OPENPIC_TIMER_FREQ_MHZ 25 #define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ) static inline uint64_t ns_to_ticks(uint64_t ns) @@ -257,7 +255,8 @@ static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ, __func__, src->output, n_IRQ, active, was_active, dst->outputs_active[src->output]); - /* On Freescale MPIC, critical interrupts ignore priority, + /* + * On Freescale MPIC, critical interrupts ignore priority, * IACK, EOI, etc. Before MPIC v4.1 they also ignore * masking. */ @@ -280,7 +279,8 @@ static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ, priority = IVPR_PRIORITY(src->ivpr); - /* Even if the interrupt doesn't have enough priority, + /* + * Even if the interrupt doesn't have enough priority, * it is still raised, in case ctpr is lowered later. */ if (active) { @@ -412,7 +412,8 @@ static void openpic_set_irq(void *opaque, int n_IRQ, int level) } if (src->output != OPENPIC_OUTPUT_INT) { - /* Edge-triggered interrupts shouldn't be used + /* + * Edge-triggered interrupts shouldn't be used * with non-INT delivery, but just in case, * try to make it do something sane rather than * cause an interrupt storm. This is close to @@ -505,7 +506,8 @@ static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val) { uint32_t mask; - /* NOTE when implementing newer FSL MPIC models: starting with v4.0, + /* + * NOTE when implementing newer FSL MPIC models: starting with v4.0, * the polarity bit is read-only on internal interrupts. */ mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK | @@ -515,7 +517,8 @@ static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val) opp->src[n_IRQ].ivpr = (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask); - /* For FSL internal interrupts, The sense bit is reserved and zero, + /* + * For FSL internal interrupts, The sense bit is reserved and zero, * and the interrupt is always level-triggered. Timers and IPIs * have no sense or polarity bits, and are edge-triggered. */ @@ -699,16 +702,20 @@ static void qemu_timer_cb(void *opaque) openpic_set_irq(opp, n_IRQ, 0); } -/* If enabled is true, arranges for an interrupt to be raised val clocks into - the future, if enabled is false cancels the timer. */ +/* + * If enabled is true, arranges for an interrupt to be raised val clocks into + * the future, if enabled is false cancels the timer. + */ static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled) { uint64_t ns = ticks_to_ns(val & ~TCCR_TOG); - /* A count of zero causes a timer to be set to expire immediately. This - effectively stops the simulation since the timer is constantly expiring - which prevents guest code execution, so we don't honor that - configuration. On real hardware, this situation would generate an - interrupt on every clock cycle if the interrupt was unmasked. */ + /* + * A count of zero causes a timer to be set to expire immediately. This + * effectively stops the simulation since the timer is constantly expiring + * which prevents guest code execution, so we don't honor that + * configuration. On real hardware, this situation would generate an + * interrupt on every clock cycle if the interrupt was unmasked. + */ if ((ns == 0) || !enabled) { tmr->qemu_timer_active = false; tmr->tccr = tmr->tccr & TCCR_TOG; @@ -721,8 +728,10 @@ static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled) } } -/* Returns the currrent tccr value, i.e., timer value (in clocks) with - appropriate TOG. */ +/* + * Returns the currrent tccr value, i.e., timer value (in clocks) with + * appropriate TOG. + */ static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr) { uint64_t retval; @@ -1276,6 +1285,15 @@ static void openpic_reset(DeviceState *d) break; } + /* Mask all IPI interrupts for Freescale OpenPIC */ + if ((opp->model == OPENPIC_MODEL_FSL_MPIC_20) || + (opp->model == OPENPIC_MODEL_FSL_MPIC_42)) { + if (i >= opp->irq_ipi0 && i < opp->irq_tim0) { + write_IRQreg_idr(opp, i, 0); + continue; + } + } + write_IRQreg_idr(opp, i, opp->idr_reset); } /* Initialise IRQ destinations */ @@ -1304,7 +1322,7 @@ static void openpic_reset(DeviceState *d) typedef struct MemReg { const char *name; MemoryRegionOps const *ops; - hwaddr start_addr; + hwaddr start_addr; ram_addr_t size; } MemReg; @@ -1555,28 +1573,6 @@ static void openpic_realize(DeviceState *dev, Error **errp) break; - case OPENPIC_MODEL_RAVEN: - opp->nb_irqs = RAVEN_MAX_EXT; - opp->vid = VID_REVISION_1_3; - opp->vir = VIR_GENERIC; - opp->vector_mask = 0xFF; - opp->tfrr_reset = 4160000; - opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK; - opp->idr_reset = 0; - opp->max_irq = RAVEN_MAX_IRQ; - opp->irq_ipi0 = RAVEN_IPI_IRQ; - opp->irq_tim0 = RAVEN_TMR_IRQ; - opp->brr1 = -1; - opp->mpic_mode_mask = GCR_MODE_MIXED; - - if (opp->nb_cpus != 1) { - error_setg(errp, "Only UP supported today"); - return; - } - - map_list(opp, list_le, &list_count); - break; - case OPENPIC_MODEL_KEYLARGO: opp->nb_irqs = KEYLARGO_MAX_EXT; opp->vid = VID_REVISION_1_2; diff --git a/hw/intc/openpic_kvm.c b/hw/intc/openpic_kvm.c index 21da680389..557dd0c2bf 100644 --- a/hw/intc/openpic_kvm.c +++ b/hw/intc/openpic_kvm.c @@ -234,6 +234,7 @@ static void kvm_openpic_realize(DeviceState *dev, Error **errp) opp->mem_listener.region_add = kvm_openpic_region_add; opp->mem_listener.region_del = kvm_openpic_region_del; + opp->mem_listener.name = "openpic-kvm"; memory_listener_register(&opp->mem_listener, &address_space_memory); /* indicate pic capabilities */ diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c index 3e534b9685..6d4909d0a8 100644 --- a/hw/intc/spapr_xive_kvm.c +++ b/hw/intc/spapr_xive_kvm.c @@ -236,6 +236,8 @@ int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp) SpaprXive *xive = SPAPR_XIVE(xsrc->xive); uint64_t state = 0; + trace_kvm_xive_source_reset(srcno); + assert(xive->fd != -1); if (xive_source_irq_is_lsi(xsrc, srcno)) { @@ -311,8 +313,6 @@ uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, return xive_esb_rw(xsrc, srcno, offset, data, 1); } - trace_kvm_xive_source_reset(srcno); - /* * Special Load EOI handling for LSI sources. Q bit is never set * and the interrupt should be re-triggered if the level is still diff --git a/hw/intc/xive.c b/hw/intc/xive.c index b817ee8e37..6c82326ec7 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -28,17 +28,6 @@ */ /* - * Convert a priority number to an Interrupt Pending Buffer (IPB) - * register, which indicates a pending interrupt at the priority - * corresponding to the bit number - */ -static uint8_t priority_to_ipb(uint8_t priority) -{ - return priority > XIVE_PRIORITY_MAX ? - 0 : 1 << (XIVE_PRIORITY_MAX - priority); -} - -/* * Convert an Interrupt Pending Buffer (IPB) register to a Pending * Interrupt Priority Register (PIPR), which contains the priority of * the most favored pending notification. @@ -89,7 +78,7 @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) regs[TM_CPPR] = cppr; /* Reset the pending buffer bit */ - regs[TM_IPB] &= ~priority_to_ipb(cppr); + regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); /* Drop Exception bit */ @@ -152,11 +141,6 @@ void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) xive_tctx_notify(tctx, ring); } -static inline uint32_t xive_tctx_word2(uint8_t *ring) -{ - return *((uint32_t *) &ring[TM_WORD2]); -} - /* * XIVE Thread Interrupt Management Area (TIMA) */ @@ -353,7 +337,7 @@ static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { - xive_tctx_ipb_update(tctx, TM_QW1_OS, priority_to_ipb(value & 0xff)); + xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff)); } static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, @@ -1535,7 +1519,8 @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, /* handle CPU exception delivery */ if (count) { trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring); - xive_tctx_ipb_update(match.tctx, match.ring, priority_to_ipb(priority)); + xive_tctx_ipb_update(match.tctx, match.ring, + xive_priority_to_ipb(priority)); } return !!count; @@ -1682,7 +1667,8 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, * use. The presenter will resend the interrupt when the vCPU * is dispatched again on a HW thread. */ - ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | priority_to_ipb(priority); + ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | + xive_priority_to_ipb(priority); nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb); xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 2f5358b70c..71e45515f1 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -723,6 +723,8 @@ static uint64_t pnv_chip_get_ram_size(PnvMachineState *pnv, int chip_id) return QEMU_ALIGN_DOWN(ram_per_chip, 1 * MiB); } + assert(pnv->num_chips > 1); + ram_per_chip = (machine->ram_size - 1 * GiB) / (pnv->num_chips - 1); return chip_id == 0 ? 1 * GiB : QEMU_ALIGN_DOWN(ram_per_chip, 1 * MiB); } @@ -838,8 +840,7 @@ static void pnv_init(MachineState *machine) for (i = 0; i < pnv->num_chips; i++) { char chip_name[32]; Object *chip = OBJECT(qdev_new(chip_typename)); - int chip_id = i; - uint64_t chip_ram_size = pnv_chip_get_ram_size(pnv, chip_id); + uint64_t chip_ram_size = pnv_chip_get_ram_size(pnv, i); pnv->chips[i] = PNV_CHIP(chip); @@ -850,9 +851,9 @@ static void pnv_init(MachineState *machine) &error_fatal); chip_ram_start += chip_ram_size; - snprintf(chip_name, sizeof(chip_name), "chip[%d]", chip_id); + snprintf(chip_name, sizeof(chip_name), "chip[%d]", i); object_property_add_child(OBJECT(pnv), chip_name, chip); - object_property_set_int(chip, "chip-id", chip_id, &error_fatal); + object_property_set_int(chip, "chip-id", i, &error_fatal); object_property_set_int(chip, "nr-cores", machine->smp.cores, &error_fatal); object_property_set_int(chip, "nr-threads", machine->smp.threads, @@ -1369,10 +1370,10 @@ static void pnv_chip_quad_realize(Pnv9Chip *chip9, Error **errp) sizeof(*eq), TYPE_PNV_QUAD, &error_fatal, NULL); - object_property_set_int(OBJECT(eq), "id", core_id, &error_fatal); + object_property_set_int(OBJECT(eq), "quad-id", core_id, &error_fatal); qdev_realize(DEVICE(eq), NULL, &error_fatal); - pnv_xscom_add_subregion(chip, PNV9_XSCOM_EQ_BASE(eq->id), + pnv_xscom_add_subregion(chip, PNV9_XSCOM_EQ_BASE(eq->quad_id), &eq->xscom_regs); } } diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c index 4de8414df2..19e8eb885f 100644 --- a/hw/ppc/pnv_core.c +++ b/hw/ppc/pnv_core.c @@ -407,13 +407,13 @@ static void pnv_quad_realize(DeviceState *dev, Error **errp) PnvQuad *eq = PNV_QUAD(dev); char name[32]; - snprintf(name, sizeof(name), "xscom-quad.%d", eq->id); + snprintf(name, sizeof(name), "xscom-quad.%d", eq->quad_id); pnv_xscom_region_init(&eq->xscom_regs, OBJECT(dev), &pnv_quad_xscom_ops, eq, name, PNV9_XSCOM_EQ_SIZE); } static Property pnv_quad_properties[] = { - DEFINE_PROP_UINT32("id", PnvQuad, id, 0), + DEFINE_PROP_UINT32("quad-id", PnvQuad, quad_id, 0), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c index faa488e311..9ce018dbc2 100644 --- a/hw/ppc/pnv_xscom.c +++ b/hw/ppc/pnv_xscom.c @@ -284,6 +284,10 @@ int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset, _FDT(xscom_offset); g_free(name); _FDT((fdt_setprop_cell(fdt, xscom_offset, "ibm,chip-id", chip->chip_id))); + /* + * On P10, the xscom bus id has been deprecated and the chip id is + * calculated from the "Primary topology table index". See skiboot. + */ _FDT((fdt_setprop_cell(fdt, xscom_offset, "ibm,primary-topology-index", chip->chip_id))); _FDT((fdt_setprop_cell(fdt, xscom_offset, "#address-cells", 1))); diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index 7375bf4fa9..f5d012f860 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -37,22 +37,6 @@ #include "migration/vmstate.h" #include "trace.h" -//#define PPC_DEBUG_IRQ -//#define PPC_DEBUG_TB - -#ifdef PPC_DEBUG_IRQ -# define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__) -#else -# define LOG_IRQ(...) do { } while (0) -#endif - - -#ifdef PPC_DEBUG_TB -# define LOG_TB(...) qemu_log(__VA_ARGS__) -#else -# define LOG_TB(...) do { } while (0) -#endif - static void cpu_ppc_tb_stop (CPUPPCState *env); static void cpu_ppc_tb_start (CPUPPCState *env); @@ -86,9 +70,8 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) } - LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32 - "req %08x\n", __func__, env, n_IRQ, level, - env->pending_interrupts, CPU(cpu)->interrupt_request); + trace_ppc_irq_set_exit(env, n_IRQ, level, env->pending_interrupts, + CPU(cpu)->interrupt_request); if (locked) { qemu_mutex_unlock_iothread(); @@ -102,8 +85,8 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { @@ -112,8 +95,7 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) switch (pin) { case PPC6xx_INPUT_TBEN: /* Level sensitive - active high */ - LOG_IRQ("%s: %s the time base\n", - __func__, level ? "start" : "stop"); + trace_ppc_irq_set_state("time base", level); if (level) { cpu_ppc_tb_start(env); } else { @@ -122,14 +104,12 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) break; case PPC6xx_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC6xx_INPUT_SMI: /* Level sensitive - active high */ - LOG_IRQ("%s: set the SMI IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("SMI IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level); break; case PPC6xx_INPUT_MCP: @@ -138,8 +118,7 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) * 603/604/740/750: check HID0[EMCP] */ if (cur_level == 1 && level == 0) { - LOG_IRQ("%s: raise machine check state\n", - __func__); + trace_ppc_irq_set_state("machine check", 1); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); } break; @@ -148,26 +127,23 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) /* XXX: TODO: relay the signal to CKSTP_OUT pin */ /* XXX: Note that the only way to restart the CPU is to reset it */ if (level) { - LOG_IRQ("%s: stop the CPU\n", __func__); + trace_ppc_irq_cpu("stop"); cs->halted = 1; } break; case PPC6xx_INPUT_HRESET: /* Level sensitive - active low */ if (level) { - LOG_IRQ("%s: reset the CPU\n", __func__); + trace_ppc_irq_reset("CPU"); cpu_interrupt(cs, CPU_INTERRUPT_RESET); } break; case PPC6xx_INPUT_SRESET: - LOG_IRQ("%s: set the RESET IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("RESET IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -192,8 +168,8 @@ static void ppc970_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { @@ -202,14 +178,12 @@ static void ppc970_set_irq(void *opaque, int pin, int level) switch (pin) { case PPC970_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC970_INPUT_THINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__, - level); + trace_ppc_irq_set_state("SMI IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level); break; case PPC970_INPUT_MCP: @@ -218,8 +192,7 @@ static void ppc970_set_irq(void *opaque, int pin, int level) * 603/604/740/750: check HID0[EMCP] */ if (cur_level == 1 && level == 0) { - LOG_IRQ("%s: raise machine check state\n", - __func__); + trace_ppc_irq_set_state("machine check", 1); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); } break; @@ -227,10 +200,10 @@ static void ppc970_set_irq(void *opaque, int pin, int level) /* Level sensitive - active low */ /* XXX: TODO: relay the signal to CKSTP_OUT pin */ if (level) { - LOG_IRQ("%s: stop the CPU\n", __func__); + trace_ppc_irq_cpu("stop"); cs->halted = 1; } else { - LOG_IRQ("%s: restart the CPU\n", __func__); + trace_ppc_irq_cpu("restart"); cs->halted = 0; qemu_cpu_kick(cs); } @@ -242,19 +215,15 @@ static void ppc970_set_irq(void *opaque, int pin, int level) } break; case PPC970_INPUT_SRESET: - LOG_IRQ("%s: set the RESET IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("RESET IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); break; case PPC970_INPUT_TBEN: - LOG_IRQ("%s: set the TBEN state to %d\n", __func__, - level); + trace_ppc_irq_set_state("TBEN IRQ", level); /* XXX: TODO */ break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -276,20 +245,16 @@ static void power7_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - &cpu->env, pin, level); + trace_ppc_irq_set(&cpu->env, pin, level); switch (pin) { case POWER7_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } } @@ -306,25 +271,21 @@ static void power9_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - &cpu->env, pin, level); + trace_ppc_irq_set(&cpu->env, pin, level); switch (pin) { case POWER9_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case POWER9_INPUT_HINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("HV external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); + g_assert_not_reached(); return; } } @@ -401,8 +362,8 @@ static void ppc40x_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { @@ -411,57 +372,51 @@ static void ppc40x_set_irq(void *opaque, int pin, int level) switch (pin) { case PPC40x_INPUT_RESET_SYS: if (level) { - LOG_IRQ("%s: reset the PowerPC system\n", - __func__); + trace_ppc_irq_reset("system"); ppc40x_system_reset(cpu); } break; case PPC40x_INPUT_RESET_CHIP: if (level) { - LOG_IRQ("%s: reset the PowerPC chip\n", __func__); + trace_ppc_irq_reset("chip"); ppc40x_chip_reset(cpu); } break; case PPC40x_INPUT_RESET_CORE: /* XXX: TODO: update DBSR[MRR] */ if (level) { - LOG_IRQ("%s: reset the PowerPC core\n", __func__); + trace_ppc_irq_reset("core"); ppc40x_core_reset(cpu); } break; case PPC40x_INPUT_CINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the critical IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("critical IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); break; case PPC40x_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC40x_INPUT_HALT: /* Level sensitive - active low */ if (level) { - LOG_IRQ("%s: stop the CPU\n", __func__); + trace_ppc_irq_cpu("stop"); cs->halted = 1; } else { - LOG_IRQ("%s: restart the CPU\n", __func__); + trace_ppc_irq_cpu("restart"); cs->halted = 0; qemu_cpu_kick(cs); } break; case PPC40x_INPUT_DEBUG: /* Level sensitive - active high */ - LOG_IRQ("%s: set the debug pin state to %d\n", - __func__, level); + trace_ppc_irq_set_state("debug pin", level); ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -485,47 +440,41 @@ static void ppce500_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { switch (pin) { case PPCE500_INPUT_MCK: if (level) { - LOG_IRQ("%s: reset the PowerPC system\n", - __func__); + trace_ppc_irq_reset("system"); qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); } break; case PPCE500_INPUT_RESET_CORE: if (level) { - LOG_IRQ("%s: reset the PowerPC core\n", __func__); + trace_ppc_irq_reset("core"); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level); } break; case PPCE500_INPUT_CINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the critical IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("critical IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); break; case PPCE500_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the core IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("core IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPCE500_INPUT_DEBUG: /* Level sensitive - active high */ - LOG_IRQ("%s: set the debug pin state to %d\n", - __func__, level); + trace_ppc_irq_set_state("debug pin", level); ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -576,7 +525,7 @@ uint64_t cpu_ppc_load_tbl (CPUPPCState *env) } tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb; } @@ -587,7 +536,7 @@ static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env) uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb >> 32; } @@ -607,8 +556,7 @@ static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk, *tb_offsetp = value - muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND); - LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n", - __func__, value, *tb_offsetp); + trace_ppc_tb_store(value, *tb_offsetp); } void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value) @@ -644,7 +592,7 @@ uint64_t cpu_ppc_load_atbl (CPUPPCState *env) uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb; } @@ -655,7 +603,7 @@ uint32_t cpu_ppc_load_atbu (CPUPPCState *env) uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb >> 32; } @@ -774,7 +722,7 @@ static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next) } else { decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); } - LOG_TB("%s: %016" PRIx64 "\n", __func__, decr); + trace_ppc_decr_load(decr); return decr; } @@ -833,7 +781,7 @@ uint64_t cpu_ppc_load_purr (CPUPPCState *env) static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu) { /* Raise it */ - LOG_TB("raise decrementer exception\n"); + trace_ppc_decr_excp("raise"); ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1); } @@ -847,7 +795,7 @@ static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu) CPUPPCState *env = &cpu->env; /* Raise it */ - LOG_TB("raise hv decrementer exception\n"); + trace_ppc_decr_excp("raise HV"); /* The architecture specifies that we don't deliver HDEC * interrupts in a PM state. Not only they don't cause a @@ -873,17 +821,14 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, CPUPPCState *env = &cpu->env; ppc_tb_t *tb_env = env->tb_env; uint64_t now, next; - bool negative; + int64_t signed_value; + int64_t signed_decr; /* Truncate value to decr_width and sign extend for simplicity */ - value &= ((1ULL << nr_bits) - 1); - negative = !!(value & (1ULL << (nr_bits - 1))); - if (negative) { - value |= (0xFFFFFFFFULL << nr_bits); - } + signed_value = sextract64(value, 0, nr_bits); + signed_decr = sextract64(decr, 0, nr_bits); - LOG_TB("%s: " TARGET_FMT_lx " => " TARGET_FMT_lx "\n", __func__, - decr, value); + trace_ppc_decr_store(nr_bits, decr, value); if (kvm_enabled()) { /* KVM handles decrementer exceptions, we don't need our own timer */ @@ -903,16 +848,16 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers * an edge interrupt, so raise it here too. */ - if ((value < 3) || - ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && negative) || - ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && negative - && !(decr & (1ULL << (nr_bits - 1))))) { + if ((signed_value < 3) || + ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) || + ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0 + && signed_decr >= 0)) { (*raise_excp)(cpu); return; } /* On MSB level based systems a 0 for the MSB stops interrupt delivery */ - if (!negative && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) { + if (signed_value >= 0 && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) { (*lower_excp)(cpu); } @@ -1211,9 +1156,8 @@ static void cpu_4xx_fit_cb (void *opaque) if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) { ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1); } - LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, - (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1), - env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); + trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1), + env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); } /* Programmable interval timer */ @@ -1227,11 +1171,10 @@ static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp) !((env->spr[SPR_40x_TCR] >> 26) & 0x1) || (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) { /* Stop PIT */ - LOG_TB("%s: stop PIT\n", __func__); + trace_ppc4xx_pit_stop(); timer_del(tb_env->decr_timer); } else { - LOG_TB("%s: start PIT %016" PRIx64 "\n", - __func__, ppc40x_timer->pit_reload); + trace_ppc4xx_pit_start(ppc40x_timer->pit_reload); now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); next = now + muldiv64(ppc40x_timer->pit_reload, NANOSECONDS_PER_SECOND, tb_env->decr_freq); @@ -1260,9 +1203,7 @@ static void cpu_4xx_pit_cb (void *opaque) ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1); } start_stop_pit(env, tb_env, 1); - LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " " - "%016" PRIx64 "\n", __func__, - (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1), + trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1), (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1), env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR], ppc40x_timer->pit_reload); @@ -1302,8 +1243,7 @@ static void cpu_4xx_wdt_cb (void *opaque) next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq); if (next == now) next++; - LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, - env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); + trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) { case 0x0: case 0x1: @@ -1346,7 +1286,7 @@ void store_40x_pit (CPUPPCState *env, target_ulong val) tb_env = env->tb_env; ppc40x_timer = tb_env->opaque; - LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val); + trace_ppc40x_store_pit(val); ppc40x_timer->pit_reload = val; start_stop_pit(env, tb_env, 0); } @@ -1361,8 +1301,7 @@ static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq) CPUPPCState *env = opaque; ppc_tb_t *tb_env = env->tb_env; - LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__, - freq); + trace_ppc40x_set_tb_clk(freq); tb_env->tb_freq = freq; tb_env->decr_freq = freq; /* XXX: we should also update all timers */ @@ -1381,7 +1320,7 @@ clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq, tb_env->tb_freq = freq; tb_env->decr_freq = freq; tb_env->opaque = ppc40x_timer; - LOG_TB("%s freq %" PRIu32 "\n", __func__, freq); + trace_ppc40x_timers_init(freq); if (ppc40x_timer != NULL) { /* We use decr timer for PIT */ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env); diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index d39fd4e644..b7bee5f4ff 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -29,6 +29,7 @@ #include "qemu/datadir.h" #include "qapi/error.h" #include "qapi/qapi-events-machine.h" +#include "qapi/qapi-events-qdev.h" #include "qapi/visitor.h" #include "sysemu/sysemu.h" #include "sysemu/hostmem.h" @@ -2752,6 +2753,11 @@ static void spapr_machine_init(MachineState *machine) spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); + /* Do not advertise FORM2 NUMA support for pseries-6.1 and older */ + if (!smc->pre_6_2_numa_affinity) { + spapr_ovec_set(spapr->ov5, OV5_FORM2_AFFINITY); + } + /* advertise support for dedicated HP event source to guests */ if (spapr->use_hotplug_event_source) { spapr_ovec_set(spapr->ov5, OV5_HP_EVT); @@ -2773,39 +2779,6 @@ static void spapr_machine_init(MachineState *machine) /* init CPUs */ spapr_init_cpus(spapr); - /* - * check we don't have a memory-less/cpu-less NUMA node - * Firmware relies on the existing memory/cpu topology to provide the - * NUMA topology to the kernel. - * And the linux kernel needs to know the NUMA topology at start - * to be able to hotplug CPUs later. - */ - if (machine->numa_state->num_nodes) { - for (i = 0; i < machine->numa_state->num_nodes; ++i) { - /* check for memory-less node */ - if (machine->numa_state->nodes[i].node_mem == 0) { - CPUState *cs; - int found = 0; - /* check for cpu-less node */ - CPU_FOREACH(cs) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - if (cpu->node_id == i) { - found = 1; - break; - } - } - /* memory-less and cpu-less node */ - if (!found) { - error_report( - "Memory-less/cpu-less nodes are not supported (node %d)", - i); - exit(1); - } - } - } - - } - spapr->gpu_numa_id = spapr_numa_initial_nvgpu_numa_id(machine); /* Init numa_assoc_array */ @@ -3686,11 +3659,18 @@ void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev) /* * Tell QAPI that something happened and the memory - * hotunplug wasn't successful. + * hotunplug wasn't successful. Keep sending + * MEM_UNPLUG_ERROR even while sending + * DEVICE_UNPLUG_GUEST_ERROR until the deprecation of + * MEM_UNPLUG_ERROR is due. */ qapi_error = g_strdup_printf("Memory hotunplug rejected by the guest " "for device %s", dev->id); - qapi_event_send_mem_unplug_error(dev->id, qapi_error); + + qapi_event_send_mem_unplug_error(dev->id ? : "", qapi_error); + + qapi_event_send_device_unplug_guest_error(!!dev->id, dev->id, + dev->canonical_path); } /* Callback to be called during DRC release. */ @@ -4700,8 +4680,11 @@ DEFINE_SPAPR_MACHINE(6_2, "6.2", true); */ static void spapr_machine_6_1_class_options(MachineClass *mc) { + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + spapr_machine_6_2_class_options(mc); compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); + smc->pre_6_2_numa_affinity = true; } DEFINE_SPAPR_MACHINE(6_1, "6.1", false); diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 4f316a6f9d..58e7341cb7 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -382,6 +382,7 @@ static const TypeInfo spapr_cpu_core_type_infos[] = { DEFINE_SPAPR_CPU_CORE_TYPE("power9_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power10_v1.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"), #ifdef CONFIG_KVM DEFINE_SPAPR_CPU_CORE_TYPE("host"), #endif diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index a2f2634601..f8ac0a10df 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -17,6 +17,8 @@ #include "hw/ppc/spapr_drc.h" #include "qom/object.h" #include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/qapi-events-qdev.h" #include "qapi/visitor.h" #include "qemu/error-report.h" #include "hw/ppc/spapr.h" /* for RTAS return codes */ @@ -167,13 +169,15 @@ static uint32_t drc_unisolate_logical(SpaprDrc *drc) } drc->unplug_requested = false; - error_report("Device hotunplug rejected by the guest " - "for device %s", drc->dev->id); - /* - * TODO: send a QAPI DEVICE_UNPLUG_ERROR event when - * it is implemented. - */ + if (drc->dev->id) { + error_report("Device hotunplug rejected by the guest " + "for device %s", drc->dev->id); + } + + qapi_event_send_device_unplug_guest_error(!!drc->dev->id, + drc->dev->id, + drc->dev->canonical_path); } return RTAS_OUT_SUCCESS; /* Nothing to do */ diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 0e9a5b2e40..222c1b6bbd 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -17,6 +17,7 @@ #include "kvm_ppc.h" #include "hw/ppc/fdt.h" #include "hw/ppc/spapr_ovec.h" +#include "hw/ppc/spapr_numa.h" #include "mmu-book3s-v3.h" #include "hw/mem/memory-device.h" @@ -1198,6 +1199,12 @@ target_ulong do_client_architecture_support(PowerPCCPU *cpu, spapr_ovec_cleanup(ov1_guest); /* + * Check for NUMA affinity conditions now that we know which NUMA + * affinity the guest will use. + */ + spapr_numa_associativity_check(spapr); + + /* * Ensure the guest asks for an interrupt mode we support; * otherwise terminate the boot. */ diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c index 779f18b994..5822938448 100644 --- a/hw/ppc/spapr_numa.c +++ b/hw/ppc/spapr_numa.c @@ -19,13 +19,51 @@ /* Moved from hw/ppc/spapr_pci_nvlink2.c */ #define SPAPR_GPU_NUMA_ID (cpu_to_be32(1)) -static bool spapr_machine_using_legacy_numa(SpaprMachineState *spapr) +/* + * Retrieves max_dist_ref_points of the current NUMA affinity. + */ +static int get_max_dist_ref_points(SpaprMachineState *spapr) { - MachineState *machine = MACHINE(spapr); - SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return FORM2_DIST_REF_POINTS; + } + + return FORM1_DIST_REF_POINTS; +} + +/* + * Retrieves numa_assoc_size of the current NUMA affinity. + */ +static int get_numa_assoc_size(SpaprMachineState *spapr) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return FORM2_NUMA_ASSOC_SIZE; + } + + return FORM1_NUMA_ASSOC_SIZE; +} + +/* + * Retrieves vcpu_assoc_size of the current NUMA affinity. + * + * vcpu_assoc_size is the size of ibm,associativity array + * for CPUs, which has an extra element (vcpu_id) in the end. + */ +static int get_vcpu_assoc_size(SpaprMachineState *spapr) +{ + return get_numa_assoc_size(spapr) + 1; +} - return smc->pre_5_2_numa_associativity || - machine->numa_state->num_nodes <= 1; +/* + * Retrieves the ibm,associativity array of NUMA node 'node_id' + * for the current NUMA affinity. + */ +static const uint32_t *get_associativity(SpaprMachineState *spapr, int node_id) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return spapr->FORM2_assoc_array[node_id]; + } + return spapr->FORM1_assoc_array[node_id]; } static bool spapr_numa_is_symmetrical(MachineState *ms) @@ -92,12 +130,23 @@ static uint8_t spapr_numa_get_numa_level(uint8_t distance) return 0; } -static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr) +static void spapr_numa_define_FORM1_domains(SpaprMachineState *spapr) { MachineState *ms = MACHINE(spapr); NodeInfo *numa_info = ms->numa_state->nodes; int nb_numa_nodes = ms->numa_state->num_nodes; - int src, dst, i; + int src, dst, i, j; + + /* + * Fill all associativity domains of non-zero NUMA nodes with + * node_id. This is required because the default value (0) is + * considered a match with associativity domains of node 0. + */ + for (i = 1; i < nb_numa_nodes; i++) { + for (j = 1; j < FORM1_DIST_REF_POINTS; j++) { + spapr->FORM1_assoc_array[i][j] = cpu_to_be32(i); + } + } for (src = 0; src < nb_numa_nodes; src++) { for (dst = src; dst < nb_numa_nodes; dst++) { @@ -132,7 +181,7 @@ static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr) * * The Linux kernel will assume that the distance between src and * dst, in this case of no match, is 10 (local distance) doubled - * for each NUMA it didn't match. We have MAX_DISTANCE_REF_POINTS + * for each NUMA it didn't match. We have FORM1_DIST_REF_POINTS * levels (4), so this gives us 10*2*2*2*2 = 160. * * This logic can be seen in the Linux kernel source code, as of @@ -147,25 +196,69 @@ static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr) * and going up to 0x1. */ for (i = n_level; i > 0; i--) { - assoc_src = spapr->numa_assoc_array[src][i]; - spapr->numa_assoc_array[dst][i] = assoc_src; + assoc_src = spapr->FORM1_assoc_array[src][i]; + spapr->FORM1_assoc_array[dst][i] = assoc_src; } } } } -void spapr_numa_associativity_init(SpaprMachineState *spapr, - MachineState *machine) +static void spapr_numa_FORM1_affinity_check(MachineState *machine) +{ + int i; + + /* + * Check we don't have a memory-less/cpu-less NUMA node + * Firmware relies on the existing memory/cpu topology to provide the + * NUMA topology to the kernel. + * And the linux kernel needs to know the NUMA topology at start + * to be able to hotplug CPUs later. + */ + if (machine->numa_state->num_nodes) { + for (i = 0; i < machine->numa_state->num_nodes; ++i) { + /* check for memory-less node */ + if (machine->numa_state->nodes[i].node_mem == 0) { + CPUState *cs; + int found = 0; + /* check for cpu-less node */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + if (cpu->node_id == i) { + found = 1; + break; + } + } + /* memory-less and cpu-less node */ + if (!found) { + error_report( +"Memory-less/cpu-less nodes are not supported with FORM1 NUMA (node %d)", i); + exit(EXIT_FAILURE); + } + } + } + } + + if (!spapr_numa_is_symmetrical(machine)) { + error_report( +"Asymmetrical NUMA topologies aren't supported in the pSeries machine using FORM1 NUMA"); + exit(EXIT_FAILURE); + } +} + +/* + * Set NUMA machine state data based on FORM1 affinity semantics. + */ +static void spapr_numa_FORM1_affinity_init(SpaprMachineState *spapr, + MachineState *machine) { SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); int nb_numa_nodes = machine->numa_state->num_nodes; int i, j, max_nodes_with_gpus; - bool using_legacy_numa = spapr_machine_using_legacy_numa(spapr); /* * For all associativity arrays: first position is the size, - * position MAX_DISTANCE_REF_POINTS is always the numa_id, + * position FORM1_DIST_REF_POINTS is always the numa_id, * represented by the index 'i'. * * This will break on sparse NUMA setups, when/if QEMU starts @@ -173,19 +266,8 @@ void spapr_numa_associativity_init(SpaprMachineState *spapr, * 'i' will be a valid node_id set by the user. */ for (i = 0; i < nb_numa_nodes; i++) { - spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); - spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i); - - /* - * Fill all associativity domains of non-zero NUMA nodes with - * node_id. This is required because the default value (0) is - * considered a match with associativity domains of node 0. - */ - if (!using_legacy_numa && i != 0) { - for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) { - spapr->numa_assoc_array[i][j] = cpu_to_be32(i); - } - } + spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS); + spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i); } /* @@ -199,47 +281,95 @@ void spapr_numa_associativity_init(SpaprMachineState *spapr, max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM; for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) { - spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); + spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS); - for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) { + for (j = 1; j < FORM1_DIST_REF_POINTS; j++) { uint32_t gpu_assoc = smc->pre_5_1_assoc_refpoints ? SPAPR_GPU_NUMA_ID : cpu_to_be32(i); - spapr->numa_assoc_array[i][j] = gpu_assoc; + spapr->FORM1_assoc_array[i][j] = gpu_assoc; } - spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i); + spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i); } /* - * Legacy NUMA guests (pseries-5.1 and older, or guests with only - * 1 NUMA node) will not benefit from anything we're going to do - * after this point. + * Guests pseries-5.1 and older uses zeroed associativity domains, + * i.e. no domain definition based on NUMA distance input. + * + * Same thing with guests that have only one NUMA node. */ - if (using_legacy_numa) { + if (smc->pre_5_2_numa_associativity || + machine->numa_state->num_nodes <= 1) { return; } - if (!spapr_numa_is_symmetrical(machine)) { - error_report("Asymmetrical NUMA topologies aren't supported " - "in the pSeries machine"); - exit(EXIT_FAILURE); + spapr_numa_define_FORM1_domains(spapr); +} + +/* + * Init NUMA FORM2 machine state data + */ +static void spapr_numa_FORM2_affinity_init(SpaprMachineState *spapr) +{ + int i; + + /* + * For all resources but CPUs, FORM2 associativity arrays will + * be a size 2 array with the following format: + * + * ibm,associativity = {1, numa_id} + * + * CPUs will write an additional 'vcpu_id' on top of the arrays + * being initialized here. 'numa_id' is represented by the + * index 'i' of the loop. + * + * Given that this initialization is also valid for GPU associativity + * arrays, handle everything in one single step by populating the + * arrays up to NUMA_NODES_MAX_NUM. + */ + for (i = 0; i < NUMA_NODES_MAX_NUM; i++) { + spapr->FORM2_assoc_array[i][0] = cpu_to_be32(1); + spapr->FORM2_assoc_array[i][1] = cpu_to_be32(i); } +} - spapr_numa_define_associativity_domains(spapr); +void spapr_numa_associativity_init(SpaprMachineState *spapr, + MachineState *machine) +{ + spapr_numa_FORM1_affinity_init(spapr, machine); + spapr_numa_FORM2_affinity_init(spapr); +} + +void spapr_numa_associativity_check(SpaprMachineState *spapr) +{ + /* + * FORM2 does not have any restrictions we need to handle + * at CAS time, for now. + */ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return; + } + + spapr_numa_FORM1_affinity_check(MACHINE(spapr)); } void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt, int offset, int nodeid) { + const uint32_t *associativity = get_associativity(spapr, nodeid); + _FDT((fdt_setprop(fdt, offset, "ibm,associativity", - spapr->numa_assoc_array[nodeid], - sizeof(spapr->numa_assoc_array[nodeid])))); + associativity, + get_numa_assoc_size(spapr) * sizeof(uint32_t)))); } static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr, PowerPCCPU *cpu) { - uint32_t *vcpu_assoc = g_new(uint32_t, VCPU_ASSOC_SIZE); + const uint32_t *associativity = get_associativity(spapr, cpu->node_id); + int max_distance_ref_points = get_max_dist_ref_points(spapr); + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); + uint32_t *vcpu_assoc = g_new(uint32_t, vcpu_assoc_size); int index = spapr_get_vcpu_id(cpu); /* @@ -248,10 +378,10 @@ static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr, * 0, put cpu_id last, then copy the remaining associativity * domains. */ - vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1); - vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index); - memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1, - (VCPU_ASSOC_SIZE - 2) * sizeof(uint32_t)); + vcpu_assoc[0] = cpu_to_be32(max_distance_ref_points + 1); + vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index); + memcpy(vcpu_assoc + 1, associativity + 1, + (vcpu_assoc_size - 2) * sizeof(uint32_t)); return vcpu_assoc; } @@ -260,12 +390,13 @@ int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt, int offset, PowerPCCPU *cpu) { g_autofree uint32_t *vcpu_assoc = NULL; + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu); /* Advertise NUMA via ibm,associativity */ return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc, - VCPU_ASSOC_SIZE * sizeof(uint32_t)); + vcpu_assoc_size * sizeof(uint32_t)); } @@ -273,27 +404,28 @@ int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt, int offset) { MachineState *machine = MACHINE(spapr); + int max_distance_ref_points = get_max_dist_ref_points(spapr); int nb_numa_nodes = machine->numa_state->num_nodes; int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1; uint32_t *int_buf, *cur_index, buf_len; int ret, i; /* ibm,associativity-lookup-arrays */ - buf_len = (nr_nodes * MAX_DISTANCE_REF_POINTS + 2) * sizeof(uint32_t); + buf_len = (nr_nodes * max_distance_ref_points + 2) * sizeof(uint32_t); cur_index = int_buf = g_malloc0(buf_len); int_buf[0] = cpu_to_be32(nr_nodes); /* Number of entries per associativity list */ - int_buf[1] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); + int_buf[1] = cpu_to_be32(max_distance_ref_points); cur_index += 2; for (i = 0; i < nr_nodes; i++) { /* - * For the lookup-array we use the ibm,associativity array, - * from numa_assoc_array. without the first element (size). + * For the lookup-array we use the ibm,associativity array of the + * current NUMA affinity, without the first element (size). */ - uint32_t *associativity = spapr->numa_assoc_array[i]; + const uint32_t *associativity = get_associativity(spapr, i); memcpy(cur_index, ++associativity, - sizeof(uint32_t) * MAX_DISTANCE_REF_POINTS); - cur_index += MAX_DISTANCE_REF_POINTS; + sizeof(uint32_t) * max_distance_ref_points); + cur_index += max_distance_ref_points; } ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf, (cur_index - int_buf) * sizeof(uint32_t)); @@ -302,12 +434,8 @@ int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt, return ret; } -/* - * Helper that writes ibm,associativity-reference-points and - * max-associativity-domains in the RTAS pointed by @rtas - * in the DT @fdt. - */ -void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) +static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr, + void *fdt, int rtas) { MachineState *ms = MACHINE(spapr); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); @@ -329,7 +457,8 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) cpu_to_be32(maxdomain) }; - if (spapr_machine_using_legacy_numa(spapr)) { + if (smc->pre_5_2_numa_associativity || + ms->numa_state->num_nodes <= 1) { uint32_t legacy_refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4), @@ -365,6 +494,125 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) maxdomains, sizeof(maxdomains))); } +static void spapr_numa_FORM2_write_rtas_tables(SpaprMachineState *spapr, + void *fdt, int rtas) +{ + MachineState *ms = MACHINE(spapr); + NodeInfo *numa_info = ms->numa_state->nodes; + int nb_numa_nodes = ms->numa_state->num_nodes; + int distance_table_entries = nb_numa_nodes * nb_numa_nodes; + g_autofree uint32_t *lookup_index_table = NULL; + g_autofree uint8_t *distance_table = NULL; + int src, dst, i, distance_table_size; + + /* + * ibm,numa-lookup-index-table: array with length and a + * list of NUMA ids present in the guest. + */ + lookup_index_table = g_new0(uint32_t, nb_numa_nodes + 1); + lookup_index_table[0] = cpu_to_be32(nb_numa_nodes); + + for (i = 0; i < nb_numa_nodes; i++) { + lookup_index_table[i + 1] = cpu_to_be32(i); + } + + _FDT(fdt_setprop(fdt, rtas, "ibm,numa-lookup-index-table", + lookup_index_table, + (nb_numa_nodes + 1) * sizeof(uint32_t))); + + /* + * ibm,numa-distance-table: contains all node distances. First + * element is the size of the table as uint32, followed up + * by all the uint8 distances from the first NUMA node, then all + * distances from the second NUMA node and so on. + * + * ibm,numa-lookup-index-table is used by guest to navigate this + * array because NUMA ids can be sparse (node 0 is the first, + * node 8 is the second ...). + */ + distance_table_size = distance_table_entries * sizeof(uint8_t) + + sizeof(uint32_t); + distance_table = g_new0(uint8_t, distance_table_size); + stl_be_p(distance_table, distance_table_entries); + + /* Skip the uint32_t array length at the start */ + i = sizeof(uint32_t); + + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = 0; dst < nb_numa_nodes; dst++) { + /* + * We need to be explicit with the local distance + * value to cover the case where the user didn't added any + * NUMA nodes, but QEMU adds the default NUMA node without + * adding the numa_info to retrieve distance info from. + */ + if (src == dst) { + distance_table[i++] = NUMA_DISTANCE_MIN; + continue; + } + + distance_table[i++] = numa_info[src].distance[dst]; + } + } + + _FDT(fdt_setprop(fdt, rtas, "ibm,numa-distance-table", + distance_table, distance_table_size)); +} + +/* + * This helper could be compressed in a single function with + * FORM1 logic since we're setting the same DT values, with the + * difference being a call to spapr_numa_FORM2_write_rtas_tables() + * in the end. The separation was made to avoid clogging FORM1 code + * which already has to deal with compat modes from previous + * QEMU machine types. + */ +static void spapr_numa_FORM2_write_rtas_dt(SpaprMachineState *spapr, + void *fdt, int rtas) +{ + MachineState *ms = MACHINE(spapr); + uint32_t number_nvgpus_nodes = spapr->gpu_numa_id - + spapr_numa_initial_nvgpu_numa_id(ms); + + /* + * In FORM2, ibm,associativity-reference-points will point to + * the element in the ibm,associativity array that contains the + * primary domain index (for FORM2, the first element). + * + * This value (in our case, the numa-id) is then used as an index + * to retrieve all other attributes of the node (distance, + * bandwidth, latency) via ibm,numa-lookup-index-table and other + * ibm,numa-*-table properties. + */ + uint32_t refpoints[] = { cpu_to_be32(1) }; + + uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes; + uint32_t maxdomains[] = { cpu_to_be32(1), cpu_to_be32(maxdomain) }; + + _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points", + refpoints, sizeof(refpoints))); + + _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains", + maxdomains, sizeof(maxdomains))); + + spapr_numa_FORM2_write_rtas_tables(spapr, fdt, rtas); +} + +/* + * Helper that writes ibm,associativity-reference-points and + * max-associativity-domains in the RTAS pointed by @rtas + * in the DT @fdt. + */ +void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + spapr_numa_FORM2_write_rtas_dt(spapr, fdt, rtas); + return; + } + + spapr_numa_FORM1_write_rtas_dt(spapr, fdt, rtas); +} + static target_ulong h_home_node_associativity(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, @@ -375,6 +623,7 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu, target_ulong procno = args[1]; PowerPCCPU *tcpu; int idx, assoc_idx; + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); /* only support procno from H_REGISTER_VPA */ if (flags != 0x1) { @@ -393,7 +642,7 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu, * 12 associativity domains for vcpus. Assert and bail if that's * not the case. */ - G_STATIC_ASSERT((VCPU_ASSOC_SIZE - 1) <= 12); + g_assert((vcpu_assoc_size - 1) <= 12); vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, tcpu); /* assoc_idx starts at 1 to skip associativity size */ @@ -414,9 +663,9 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu, * macro. The ternary will fill the remaining registers with -1 * after we went through vcpu_assoc[]. */ - a = assoc_idx < VCPU_ASSOC_SIZE ? + a = assoc_idx < vcpu_assoc_size ? be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1; - b = assoc_idx < VCPU_ASSOC_SIZE ? + b = assoc_idx < vcpu_assoc_size ? be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1; args[idx] = ASSOCIATIVITY(a, b); diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events index da6e74b80d..3bf43fa340 100644 --- a/hw/ppc/trace-events +++ b/hw/ppc/trace-events @@ -97,7 +97,27 @@ vof_claimed(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx6 # ppc.c ppc_tb_adjust(uint64_t offs1, uint64_t offs2, int64_t diff, int64_t seconds) "adjusted from 0x%"PRIx64" to 0x%"PRIx64", diff %"PRId64" (%"PRId64"s)" - +ppc_tb_load(uint64_t tb) "tb 0x%016" PRIx64 +ppc_tb_store(uint64_t tb, uint64_t offset) "tb 0x%016" PRIx64 " offset 0x%08" PRIx64 + +ppc_decr_load(uint64_t tb) "decr 0x%016" PRIx64 +ppc_decr_excp(const char *action) "%s decrementer" +ppc_decr_store(uint32_t nr_bits, uint64_t decr, uint64_t value) "%d-bit 0x%016" PRIx64 " => 0x%016" PRIx64 + +ppc4xx_fit(uint32_t ir, uint64_t tcr, uint64_t tsr) "ir %d TCR 0x%" PRIx64 " TSR 0x%" PRIx64 +ppc4xx_pit_stop(void) "" +ppc4xx_pit_start(uint64_t reload) "PIT 0x%016" PRIx64 +ppc4xx_pit(uint32_t ar, uint32_t ir, uint64_t tcr, uint64_t tsr, uint64_t reload) "ar %d ir %d TCR 0x%" PRIx64 " TSR 0x%" PRIx64 " PIT 0x%016" PRIx64 +ppc4xx_wdt(uint64_t tcr, uint64_t tsr) "TCR 0x%" PRIx64 " TSR 0x%" PRIx64 +ppc40x_store_pit(uint64_t value) "val 0x%" PRIx64 +ppc40x_set_tb_clk(uint32_t value) "new frequency %" PRIu32 +ppc40x_timers_init(uint32_t value) "frequency %" PRIu32 + +ppc_irq_set(void *env, uint32_t pin, uint32_t level) "env [%p] pin %d level %d" +ppc_irq_set_exit(void *env, uint32_t n_IRQ, uint32_t level, uint32_t pending, uint32_t request) "env [%p] n_IRQ %d level %d => pending 0x%08" PRIx32 " req 0x%08" PRIx32 +ppc_irq_set_state(const char *name, uint32_t level) "\"%s\" level %d" +ppc_irq_reset(const char *name) "%s" +ppc_irq_cpu(const char *action) "%s" # prep_systemio.c prep_systemio_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" diff --git a/hw/remote/proxy-memory-listener.c b/hw/remote/proxy-memory-listener.c index 901dbf1357..882c9b4854 100644 --- a/hw/remote/proxy-memory-listener.c +++ b/hw/remote/proxy-memory-listener.c @@ -219,6 +219,7 @@ void proxy_memory_listener_configure(ProxyMemoryListener *proxy_listener, proxy_listener->listener.region_add = proxy_memory_listener_region_addnop; proxy_listener->listener.region_nop = proxy_memory_listener_region_addnop; proxy_listener->listener.priority = 10; + proxy_listener->listener.name = "proxy"; memory_listener_register(&proxy_listener->listener, &address_space_memory); diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 8728d4d5c2..a784b219e6 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -562,6 +562,7 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section) { return (!memory_region_is_ram(section->mr) && !memory_region_is_iommu(section->mr)) || + memory_region_is_protected(section->mr) || /* * Sizing an enabled 64-bit BAR can cause spurious mappings to * addresses in the upper part of the 64-bit address space. These @@ -1434,6 +1435,7 @@ static void vfio_listener_log_sync(MemoryListener *listener, } static const MemoryListener vfio_memory_listener = { + .name = "vfio", .region_add = vfio_listener_region_add, .region_del = vfio_listener_region_del, .log_global_start = vfio_listener_log_global_start, diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index ea3f70bd2f..04c6e67f8f 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -136,6 +136,7 @@ static void vfio_prereg_listener_region_del(MemoryListener *listener, } const MemoryListener vfio_prereg_listener = { + .name = "vfio-pre-reg", .region_add = vfio_prereg_listener_region_add, .region_del = vfio_prereg_listener_region_del, }; diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 7633ea66d1..47d7a5a23d 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -246,6 +246,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, * depends on the addnop(). */ static const MemoryListener vhost_vdpa_memory_listener = { + .name = "vhost-vdpa", .commit = vhost_vdpa_listener_commit, .region_add = vhost_vdpa_listener_region_add, .region_del = vhost_vdpa_listener_region_del, diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index b4b29413e6..437347ad01 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -1366,6 +1366,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, hdev->features = features; hdev->memory_listener = (MemoryListener) { + .name = "vhost", .begin = vhost_begin, .commit = vhost_commit, .region_add = vhost_region_addnop, @@ -1381,6 +1382,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, }; hdev->iommu_listener = (MemoryListener) { + .name = "vhost-iommu", .region_add = vhost_iommu_region_add, .region_del = vhost_iommu_region_del, }; diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 3a1f6c520c..240759ff0b 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -3670,6 +3670,7 @@ static void virtio_device_realize(DeviceState *dev, Error **errp) } vdev->listener.commit = virtio_memory_listener_commit; + vdev->listener.name = "virtio"; memory_listener_register(&vdev->listener, vdev->dma_as); } diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index 232482d65f..ca0a98187e 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -689,12 +689,14 @@ static void xen_pt_io_region_del(MemoryListener *l, MemoryRegionSection *sec) } static const MemoryListener xen_pt_memory_listener = { + .name = "xen-pt-mem", .region_add = xen_pt_region_add, .region_del = xen_pt_region_del, .priority = 10, }; static const MemoryListener xen_pt_io_listener = { + .name = "xen-pt-io", .region_add = xen_pt_io_region_add, .region_del = xen_pt_io_region_del, .priority = 10, |