From 02e3ff548d2379c16990bac9cb84833231e0d20f Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 6 Dec 2018 00:22:15 +0100 Subject: ppc/xive: introduce a XIVE interrupt source model The first sub-engine of the overall XIVE architecture is the Interrupt Virtualization Source Engine (IVSE). An IVSE can be integrated into another logic, like in a PCI PHB or in the main interrupt controller to manage IPIs. Each IVSE instance is associated with an Event State Buffer (ESB) that contains a two bit state entry for each possible event source. When an event is signaled to the IVSE, by MMIO or some other means, the associated interrupt state bits are fetched from the ESB and modified. Depending on the resulting ESB state, the event is forwarded to the IVRE sub-engine of the controller doing the routing. Each supported ESB entry is associated with either a single or a even/odd pair of pages which provides commands to manage the source: to EOI, to turn off the source for instance. On a sPAPR machine, the O/S will obtain the page address of the ESB entry associated with a source and its characteristic using the H_INT_GET_SOURCE_INFO hcall. On PowerNV, a similar OPAL call is used. The xive_source_notify() routine is in charge forwarding the source event notification to the routing engine. It will be filled later on. Signed-off-by: Cédric Le Goater Signed-off-by: David Gibson --- hw/intc/Makefile.objs | 1 + hw/intc/xive.c | 382 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 383 insertions(+) create mode 100644 hw/intc/xive.c (limited to 'hw/intc') diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs index 0e9963f5ee..72a46ed91c 100644 --- a/hw/intc/Makefile.objs +++ b/hw/intc/Makefile.objs @@ -37,6 +37,7 @@ obj-$(CONFIG_SH4) += sh_intc.o obj-$(CONFIG_XICS) += xics.o obj-$(CONFIG_XICS_SPAPR) += xics_spapr.o obj-$(CONFIG_XICS_KVM) += xics_kvm.o +obj-$(CONFIG_XIVE) += xive.o obj-$(CONFIG_POWERNV) += xics_pnv.o obj-$(CONFIG_ALLWINNER_A10_PIC) += allwinner-a10-pic.o obj-$(CONFIG_S390_FLIC) += s390_flic.o diff --git a/hw/intc/xive.c b/hw/intc/xive.c new file mode 100644 index 0000000000..6389bd8323 --- /dev/null +++ b/hw/intc/xive.c @@ -0,0 +1,382 @@ +/* + * QEMU PowerPC XIVE interrupt controller model + * + * Copyright (c) 2017-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "target/ppc/cpu.h" +#include "sysemu/cpus.h" +#include "sysemu/dma.h" +#include "hw/qdev-properties.h" +#include "monitor/monitor.h" +#include "hw/ppc/xive.h" + +/* + * XIVE ESB helpers + */ + +static uint8_t xive_esb_set(uint8_t *pq, uint8_t value) +{ + uint8_t old_pq = *pq & 0x3; + + *pq &= ~0x3; + *pq |= value & 0x3; + + return old_pq; +} + +static bool xive_esb_trigger(uint8_t *pq) +{ + uint8_t old_pq = *pq & 0x3; + + switch (old_pq) { + case XIVE_ESB_RESET: + xive_esb_set(pq, XIVE_ESB_PENDING); + return true; + case XIVE_ESB_PENDING: + case XIVE_ESB_QUEUED: + xive_esb_set(pq, XIVE_ESB_QUEUED); + return false; + case XIVE_ESB_OFF: + xive_esb_set(pq, XIVE_ESB_OFF); + return false; + default: + g_assert_not_reached(); + } +} + +static bool xive_esb_eoi(uint8_t *pq) +{ + uint8_t old_pq = *pq & 0x3; + + switch (old_pq) { + case XIVE_ESB_RESET: + case XIVE_ESB_PENDING: + xive_esb_set(pq, XIVE_ESB_RESET); + return false; + case XIVE_ESB_QUEUED: + xive_esb_set(pq, XIVE_ESB_PENDING); + return true; + case XIVE_ESB_OFF: + xive_esb_set(pq, XIVE_ESB_OFF); + return false; + default: + g_assert_not_reached(); + } +} + +/* + * XIVE Interrupt Source (or IVSE) + */ + +uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno) +{ + assert(srcno < xsrc->nr_irqs); + + return xsrc->status[srcno] & 0x3; +} + +uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq) +{ + assert(srcno < xsrc->nr_irqs); + + return xive_esb_set(&xsrc->status[srcno], pq); +} + +/* + * Returns whether the event notification should be forwarded. + */ +static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) +{ + assert(srcno < xsrc->nr_irqs); + + return xive_esb_trigger(&xsrc->status[srcno]); +} + +/* + * Returns whether the event notification should be forwarded. + */ +static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) +{ + assert(srcno < xsrc->nr_irqs); + + return xive_esb_eoi(&xsrc->status[srcno]); +} + +/* + * Forward the source event notification to the Router + */ +static void xive_source_notify(XiveSource *xsrc, int srcno) +{ + +} + +/* + * In a two pages ESB MMIO setting, even page is the trigger page, odd + * page is for management + */ +static inline bool addr_is_even(hwaddr addr, uint32_t shift) +{ + return !((addr >> shift) & 1); +} + +static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr) +{ + return xive_source_esb_has_2page(xsrc) && + addr_is_even(addr, xsrc->esb_shift - 1); +} + +/* + * ESB MMIO loads + * Trigger page Management/EOI page + * + * ESB MMIO setting 2 pages 1 or 2 pages + * + * 0x000 .. 0x3FF -1 EOI and return 0|1 + * 0x400 .. 0x7FF -1 EOI and return 0|1 + * 0x800 .. 0xBFF -1 return PQ + * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00 + * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01 + * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10 + * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11 + */ +static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size) +{ + XiveSource *xsrc = XIVE_SOURCE(opaque); + uint32_t offset = addr & 0xFFF; + uint32_t srcno = addr >> xsrc->esb_shift; + uint64_t ret = -1; + + /* In a two pages ESB MMIO setting, trigger page should not be read */ + if (xive_source_is_trigger_page(xsrc, addr)) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: invalid load on IRQ %d trigger page at " + "0x%"HWADDR_PRIx"\n", srcno, addr); + return -1; + } + + switch (offset) { + case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: + ret = xive_source_esb_eoi(xsrc, srcno); + + /* Forward the source event notification for routing */ + if (ret) { + xive_source_notify(xsrc, srcno); + } + break; + + case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: + ret = xive_source_esb_get(xsrc, srcno); + break; + + case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: + case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: + case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: + case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: + ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n", + offset); + } + + return ret; +} + +/* + * ESB MMIO stores + * Trigger page Management/EOI page + * + * ESB MMIO setting 2 pages 1 or 2 pages + * + * 0x000 .. 0x3FF Trigger Trigger + * 0x400 .. 0x7FF Trigger EOI + * 0x800 .. 0xBFF Trigger undefined + * 0xC00 .. 0xCFF Trigger PQ=00 + * 0xD00 .. 0xDFF Trigger PQ=01 + * 0xE00 .. 0xDFF Trigger PQ=10 + * 0xF00 .. 0xDFF Trigger PQ=11 + */ +static void xive_source_esb_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + XiveSource *xsrc = XIVE_SOURCE(opaque); + uint32_t offset = addr & 0xFFF; + uint32_t srcno = addr >> xsrc->esb_shift; + bool notify = false; + + /* In a two pages ESB MMIO setting, trigger page only triggers */ + if (xive_source_is_trigger_page(xsrc, addr)) { + notify = xive_source_esb_trigger(xsrc, srcno); + goto out; + } + + switch (offset) { + case 0 ... 0x3FF: + notify = xive_source_esb_trigger(xsrc, srcno); + break; + + case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: + if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: invalid Store EOI for IRQ %d\n", srcno); + return; + } + + notify = xive_source_esb_eoi(xsrc, srcno); + break; + + case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: + case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: + case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: + case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: + xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); + break; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n", + offset); + return; + } + +out: + /* Forward the source event notification for routing */ + if (notify) { + xive_source_notify(xsrc, srcno); + } +} + +static const MemoryRegionOps xive_source_esb_ops = { + .read = xive_source_esb_read, + .write = xive_source_esb_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static void xive_source_set_irq(void *opaque, int srcno, int val) +{ + XiveSource *xsrc = XIVE_SOURCE(opaque); + bool notify = false; + + if (val) { + notify = xive_source_esb_trigger(xsrc, srcno); + } + + /* Forward the source event notification for routing */ + if (notify) { + xive_source_notify(xsrc, srcno); + } +} + +void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon) +{ + int i; + + for (i = 0; i < xsrc->nr_irqs; i++) { + uint8_t pq = xive_source_esb_get(xsrc, i); + + if (pq == XIVE_ESB_OFF) { + continue; + } + + monitor_printf(mon, " %08x %c%c\n", i + offset, + pq & XIVE_ESB_VAL_P ? 'P' : '-', + pq & XIVE_ESB_VAL_Q ? 'Q' : '-'); + } +} + +static void xive_source_reset(void *dev) +{ + XiveSource *xsrc = XIVE_SOURCE(dev); + + /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */ + memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs); +} + +static void xive_source_realize(DeviceState *dev, Error **errp) +{ + XiveSource *xsrc = XIVE_SOURCE(dev); + + if (!xsrc->nr_irqs) { + error_setg(errp, "Number of interrupt needs to be greater than 0"); + return; + } + + if (xsrc->esb_shift != XIVE_ESB_4K && + xsrc->esb_shift != XIVE_ESB_4K_2PAGE && + xsrc->esb_shift != XIVE_ESB_64K && + xsrc->esb_shift != XIVE_ESB_64K_2PAGE) { + error_setg(errp, "Invalid ESB shift setting"); + return; + } + + xsrc->status = g_malloc0(xsrc->nr_irqs); + + memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), + &xive_source_esb_ops, xsrc, "xive.esb", + (1ull << xsrc->esb_shift) * xsrc->nr_irqs); + + xsrc->qirqs = qemu_allocate_irqs(xive_source_set_irq, xsrc, + xsrc->nr_irqs); + + qemu_register_reset(xive_source_reset, dev); +} + +static const VMStateDescription vmstate_xive_source = { + .name = TYPE_XIVE_SOURCE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL), + VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs), + VMSTATE_END_OF_LIST() + }, +}; + +/* + * The default XIVE interrupt source setting for the ESB MMIOs is two + * 64k pages without Store EOI, to be in sync with KVM. + */ +static Property xive_source_properties[] = { + DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0), + DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0), + DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xive_source_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "XIVE Interrupt Source"; + dc->props = xive_source_properties; + dc->realize = xive_source_realize; + dc->vmsd = &vmstate_xive_source; +} + +static const TypeInfo xive_source_info = { + .name = TYPE_XIVE_SOURCE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XiveSource), + .class_init = xive_source_class_init, +}; + +static void xive_register_types(void) +{ + type_register_static(&xive_source_info); +} + +type_init(xive_register_types) -- cgit v1.2.3-55-g7522 From 5fd9ef18a9707c17d0f1d4262a76fa878edb65c3 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 6 Dec 2018 00:22:16 +0100 Subject: ppc/xive: add support for the LSI interrupt sources The 'sent' status of the LSI interrupt source is modeled with the 'P' bit of the ESB and the assertion status of the source is maintained with an extra bit under the main XiveSource object. The type of the source is stored in the same array for practical reasons. Signed-off-by: Cédric Le Goater [dwg: Fix style nit] Signed-off-by: David Gibson --- hw/intc/xive.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++----- include/hw/ppc/xive.h | 19 ++++++++++++++- 2 files changed, 79 insertions(+), 7 deletions(-) (limited to 'hw/intc') diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 6389bd8323..4998f128e7 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -89,14 +89,42 @@ uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq) return xive_esb_set(&xsrc->status[srcno], pq); } +/* + * Returns whether the event notification should be forwarded. + */ +static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno) +{ + uint8_t old_pq = xive_source_esb_get(xsrc, srcno); + + xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; + + switch (old_pq) { + case XIVE_ESB_RESET: + xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING); + return true; + default: + return false; + } +} + /* * Returns whether the event notification should be forwarded. */ static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) { + bool ret; + assert(srcno < xsrc->nr_irqs); - return xive_esb_trigger(&xsrc->status[srcno]); + ret = xive_esb_trigger(&xsrc->status[srcno]); + + if (xive_source_irq_is_lsi(xsrc, srcno) && + xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) { + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: queued an event on LSI IRQ %d\n", srcno); + } + + return ret; } /* @@ -104,9 +132,23 @@ static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) */ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) { + bool ret; + assert(srcno < xsrc->nr_irqs); - return xive_esb_eoi(&xsrc->status[srcno]); + ret = xive_esb_eoi(&xsrc->status[srcno]); + + /* + * LSI sources do not set the Q bit but they can still be + * asserted, in which case we should forward a new event + * notification + */ + if (xive_source_irq_is_lsi(xsrc, srcno) && + xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { + ret = xive_source_lsi_trigger(xsrc, srcno); + } + + return ret; } /* @@ -271,8 +313,16 @@ static void xive_source_set_irq(void *opaque, int srcno, int val) XiveSource *xsrc = XIVE_SOURCE(opaque); bool notify = false; - if (val) { - notify = xive_source_esb_trigger(xsrc, srcno); + if (xive_source_irq_is_lsi(xsrc, srcno)) { + if (val) { + notify = xive_source_lsi_trigger(xsrc, srcno); + } else { + xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; + } + } else { + if (val) { + notify = xive_source_esb_trigger(xsrc, srcno); + } } /* Forward the source event notification for routing */ @@ -292,9 +342,11 @@ void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon) continue; } - monitor_printf(mon, " %08x %c%c\n", i + offset, + monitor_printf(mon, " %08x %s %c%c%c\n", i + offset, + xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", pq & XIVE_ESB_VAL_P ? 'P' : '-', - pq & XIVE_ESB_VAL_Q ? 'Q' : '-'); + pq & XIVE_ESB_VAL_Q ? 'Q' : '-', + xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' '); } } @@ -302,6 +354,8 @@ static void xive_source_reset(void *dev) { XiveSource *xsrc = XIVE_SOURCE(dev); + /* Do not clear the LSI bitmap */ + /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */ memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs); } @@ -324,6 +378,7 @@ static void xive_source_realize(DeviceState *dev, Error **errp) } xsrc->status = g_malloc0(xsrc->nr_irqs); + xsrc->lsi_map = bitmap_new(xsrc->nr_irqs); memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), &xive_source_esb_ops, xsrc, "xive.esb", diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 7aa2e38012..7cebc32eba 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -162,8 +162,9 @@ typedef struct XiveSource { /* IRQs */ uint32_t nr_irqs; qemu_irq *qirqs; + unsigned long *lsi_map; - /* PQ bits */ + /* PQ bits and LSI assertion bit */ uint8_t *status; /* ESB memory region */ @@ -219,6 +220,7 @@ static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno) * When doing an EOI, the Q bit will indicate if the interrupt * needs to be re-triggered. */ +#define XIVE_STATUS_ASSERTED 0x4 /* Extra bit for LSI */ #define XIVE_ESB_VAL_P 0x2 #define XIVE_ESB_VAL_Q 0x1 @@ -257,4 +259,19 @@ static inline qemu_irq xive_source_qirq(XiveSource *xsrc, uint32_t srcno) return xsrc->qirqs[srcno]; } +static inline bool xive_source_irq_is_lsi(XiveSource *xsrc, uint32_t srcno) +{ + assert(srcno < xsrc->nr_irqs); + return test_bit(srcno, xsrc->lsi_map); +} + +static inline void xive_source_irq_set(XiveSource *xsrc, uint32_t srcno, + bool lsi) +{ + assert(srcno < xsrc->nr_irqs); + if (lsi) { + bitmap_set(xsrc->lsi_map, srcno, 1); + } +} + #endif /* PPC_XIVE_H */ -- cgit v1.2.3-55-g7522 From 5e79b155a8ca342cb6ccfcd2a779e200d34f2a9f Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 6 Dec 2018 00:22:17 +0100 Subject: ppc/xive: introduce the XiveNotifier interface The XiveNotifier offers a simple interface, between the XiveSource object and the main interrupt controller of the machine. It will forward event notifications to the XIVE Interrupt Virtualization Routing Engine (IVRE). Signed-off-by: Cédric Le Goater [dwg: Adjust type name string for XiveNotifier] Signed-off-by: David Gibson --- hw/intc/xive.c | 25 +++++++++++++++++++++++++ include/hw/ppc/xive.h | 23 +++++++++++++++++++++++ 2 files changed, 48 insertions(+) (limited to 'hw/intc') diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 4998f128e7..8d5434d6bd 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -156,7 +156,11 @@ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) */ static void xive_source_notify(XiveSource *xsrc, int srcno) { + XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive); + if (xnc->notify) { + xnc->notify(xsrc->xive, srcno); + } } /* @@ -363,6 +367,17 @@ static void xive_source_reset(void *dev) static void xive_source_realize(DeviceState *dev, Error **errp) { XiveSource *xsrc = XIVE_SOURCE(dev); + Object *obj; + Error *local_err = NULL; + + obj = object_property_get_link(OBJECT(dev), "xive", &local_err); + if (!obj) { + error_propagate(errp, local_err); + error_prepend(errp, "required link 'xive' not found: "); + return; + } + + xsrc->xive = XIVE_NOTIFIER(obj); if (!xsrc->nr_irqs) { error_setg(errp, "Number of interrupt needs to be greater than 0"); @@ -429,9 +444,19 @@ static const TypeInfo xive_source_info = { .class_init = xive_source_class_init, }; +/* + * XIVE Fabric + */ +static const TypeInfo xive_fabric_info = { + .name = TYPE_XIVE_NOTIFIER, + .parent = TYPE_INTERFACE, + .class_size = sizeof(XiveNotifierClass), +}; + static void xive_register_types(void) { type_register_static(&xive_source_info); + type_register_static(&xive_fabric_info); } type_init(xive_register_types) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 7cebc32eba..436f1bf756 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -142,6 +142,27 @@ #include "hw/qdev-core.h" +/* + * XIVE Fabric (Interface between Source and Router) + */ + +typedef struct XiveNotifier { + Object parent; +} XiveNotifier; + +#define TYPE_XIVE_NOTIFIER "xive-notifier" +#define XIVE_NOTIFIER(obj) \ + OBJECT_CHECK(XiveNotifier, (obj), TYPE_XIVE_NOTIFIER) +#define XIVE_NOTIFIER_CLASS(klass) \ + OBJECT_CLASS_CHECK(XiveNotifierClass, (klass), TYPE_XIVE_NOTIFIER) +#define XIVE_NOTIFIER_GET_CLASS(obj) \ + OBJECT_GET_CLASS(XiveNotifierClass, (obj), TYPE_XIVE_NOTIFIER) + +typedef struct XiveNotifierClass { + InterfaceClass parent; + void (*notify)(XiveNotifier *xn, uint32_t lisn); +} XiveNotifierClass; + /* * XIVE Interrupt Source */ @@ -171,6 +192,8 @@ typedef struct XiveSource { uint64_t esb_flags; uint32_t esb_shift; MemoryRegion esb_mmio; + + XiveNotifier *xive; } XiveSource; /* -- cgit v1.2.3-55-g7522 From 7ff7ea928039e418dfa584c91f3f78512284a79a Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 6 Dec 2018 00:22:18 +0100 Subject: ppc/xive: introduce the XiveRouter model The XiveRouter models the second sub-engine of the XIVE architecture : the Interrupt Virtualization Routing Engine (IVRE). The IVRE handles event notifications of the IVSE and performs the interrupt routing process. For this purpose, it uses a set of tables stored in system memory, the first of which being the Event Assignment Structure (EAS) table. The EAT associates an interrupt source number with an Event Notification Descriptor (END) which will be used in a second phase of the routing process to identify a Notification Virtual Target. The XiveRouter is an abstract class which needs to be inherited from to define a storage for the EAT, and other upcoming tables. Signed-off-by: Cédric Le Goater [dwg: Folded in parts of a later fix by Cédric fixing field access] [dwg: Fix style nits] Signed-off-by: David Gibson --- hw/intc/xive.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++ include/hw/ppc/xive.h | 31 +++++++++++++++++++ include/hw/ppc/xive_regs.h | 62 +++++++++++++++++++++++++++++++++++++ 3 files changed, 170 insertions(+) create mode 100644 include/hw/ppc/xive_regs.h (limited to 'hw/intc') diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 8d5434d6bd..8878abc317 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -444,6 +444,82 @@ static const TypeInfo xive_source_info = { .class_init = xive_source_class_init, }; +/* + * XIVE Router (aka. Virtualization Controller or IVRE) + */ + +int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, + XiveEAS *eas) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); +} + +static void xive_router_notify(XiveNotifier *xn, uint32_t lisn) +{ + XiveRouter *xrtr = XIVE_ROUTER(xn); + uint8_t eas_blk = XIVE_SRCNO_BLOCK(lisn); + uint32_t eas_idx = XIVE_SRCNO_INDEX(lisn); + XiveEAS eas; + + /* EAS cache lookup */ + if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn); + return; + } + + /* + * The IVRE checks the State Bit Cache at this point. We skip the + * SBC lookup because the state bits of the sources are modeled + * internally in QEMU. + */ + + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn); + return; + } + + if (xive_eas_is_masked(&eas)) { + /* Notification completed */ + return; + } +} + +static void xive_router_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); + + dc->desc = "XIVE Router Engine"; + xnc->notify = xive_router_notify; +} + +static const TypeInfo xive_router_info = { + .name = TYPE_XIVE_ROUTER, + .parent = TYPE_SYS_BUS_DEVICE, + .abstract = true, + .class_size = sizeof(XiveRouterClass), + .class_init = xive_router_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_XIVE_NOTIFIER }, + { } + } +}; + +void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon) +{ + if (!xive_eas_is_valid(eas)) { + return; + } + + monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n", + lisn, xive_eas_is_masked(eas) ? "M" : " ", + (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), + (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), + (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); +} + /* * XIVE Fabric */ @@ -457,6 +533,7 @@ static void xive_register_types(void) { type_register_static(&xive_source_info); type_register_static(&xive_fabric_info); + type_register_static(&xive_router_info); } type_init(xive_register_types) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 436f1bf756..527aa73366 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -141,6 +141,8 @@ #define PPC_XIVE_H #include "hw/qdev-core.h" +#include "hw/sysbus.h" +#include "hw/ppc/xive_regs.h" /* * XIVE Fabric (Interface between Source and Router) @@ -297,4 +299,33 @@ static inline void xive_source_irq_set(XiveSource *xsrc, uint32_t srcno, } } +/* + * XIVE Router + */ + +typedef struct XiveRouter { + SysBusDevice parent; +} XiveRouter; + +#define TYPE_XIVE_ROUTER "xive-router" +#define XIVE_ROUTER(obj) \ + OBJECT_CHECK(XiveRouter, (obj), TYPE_XIVE_ROUTER) +#define XIVE_ROUTER_CLASS(klass) \ + OBJECT_CLASS_CHECK(XiveRouterClass, (klass), TYPE_XIVE_ROUTER) +#define XIVE_ROUTER_GET_CLASS(obj) \ + OBJECT_GET_CLASS(XiveRouterClass, (obj), TYPE_XIVE_ROUTER) + +typedef struct XiveRouterClass { + SysBusDeviceClass parent; + + /* XIVE table accessors */ + int (*get_eas)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, + XiveEAS *eas); +} XiveRouterClass; + +void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon); + +int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, + XiveEAS *eas); + #endif /* PPC_XIVE_H */ diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h new file mode 100644 index 0000000000..e1193fb3e4 --- /dev/null +++ b/include/hw/ppc/xive_regs.h @@ -0,0 +1,62 @@ +/* + * QEMU PowerPC XIVE internal structure definitions + * + * + * The XIVE structures are accessed by the HW and their format is + * architected to be big-endian. Some macros are provided to ease + * access to the different fields. + * + * + * Copyright (c) 2016-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#ifndef PPC_XIVE_REGS_H +#define PPC_XIVE_REGS_H + +/* + * Interrupt source number encoding on PowerBUS + */ +#define XIVE_SRCNO_BLOCK(srcno) (((srcno) >> 28) & 0xf) +#define XIVE_SRCNO_INDEX(srcno) ((srcno) & 0x0fffffff) +#define XIVE_SRCNO(blk, idx) ((uint32_t)(blk) << 28 | (idx)) + +/* + * EAS (Event Assignment Structure) + * + * One per interrupt source. Targets an interrupt to a given Event + * Notification Descriptor (END) and provides the corresponding + * logical interrupt number (END data) + */ +typedef struct XiveEAS { + /* + * Use a single 64-bit definition to make it easier to perform + * atomic updates + */ + uint64_t w; +#define EAS_VALID PPC_BIT(0) +#define EAS_END_BLOCK PPC_BITMASK(4, 7) /* Destination END block# */ +#define EAS_END_INDEX PPC_BITMASK(8, 31) /* Destination END index */ +#define EAS_MASKED PPC_BIT(32) /* Masked */ +#define EAS_END_DATA PPC_BITMASK(33, 63) /* Data written to the END */ +} XiveEAS; + +#define xive_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS_VALID) +#define xive_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS_MASKED) + +static inline uint64_t xive_get_field64(uint64_t mask, uint64_t word) +{ + return (be64_to_cpu(word) & mask) >> ctz64(mask); +} + +static inline uint64_t xive_set_field64(uint64_t mask, uint64_t word, + uint64_t value) +{ + uint64_t tmp = + (be64_to_cpu(word) & ~mask) | ((value << ctz64(mask)) & mask); + return cpu_to_be64(tmp); +} + +#endif /* PPC_XIVE_REGS_H */ -- cgit v1.2.3-55-g7522 From e4ddaac67f1fbdeea207fe28d71dca744832377b Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Thu, 6 Dec 2018 00:22:19 +0100 Subject: ppc/xive: introduce the XIVE Event Notification Descriptors To complete the event routing, the IVRE sub-engine uses a second table containing Event Notification Descriptor (END) structures. An END specifies on which Event Queue (EQ) the event notification data, defined in the associated EAS, should be posted when an exception occurs. It also defines which Notification Virtual Target (NVT) should be notified. The Event Queue is a memory page provided by the O/S defining a circular buffer, one per server and priority couple, containing Event Queue entries. These are 4 bytes long, the first bit being a 'generation' bit and the 31 following bits the END Data field. They are pulled by the O/S when the exception occurs. The END Data field is a way to set an invariant logical event source number for an IRQ. On sPAPR machines, it is set with the H_INT_SET_SOURCE_CONFIG hcall when the EISN flag is used. Signed-off-by: Cédric Le Goater [dwg: Fold in a later fix from Cédric fixing field accessors] Signed-off-by: David Gibson --- hw/intc/xive.c | 174 +++++++++++++++++++++++++++++++++++++++++++++ include/hw/ppc/xive.h | 18 +++++ include/hw/ppc/xive_regs.h | 67 +++++++++++++++++ 3 files changed, 259 insertions(+) (limited to 'hw/intc') diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 8878abc317..9ec841f741 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -444,6 +444,95 @@ static const TypeInfo xive_source_info = { .class_init = xive_source_class_init, }; +/* + * XiveEND helpers + */ + +void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon) +{ + uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 + | be32_to_cpu(end->w3); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qentries = 1 << (qsize + 10); + int i; + + /* + * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window + */ + monitor_printf(mon, " [ "); + qindex = (qindex - (width - 1)) & (qentries - 1); + for (i = 0; i < width; i++) { + uint64_t qaddr = qaddr_base + (qindex << 2); + uint32_t qdata = -1; + + if (dma_memory_read(&address_space_memory, qaddr, &qdata, + sizeof(qdata))) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" + HWADDR_PRIx "\n", qaddr); + return; + } + monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "", + be32_to_cpu(qdata)); + qindex = (qindex + 1) & (qentries - 1); + } +} + +void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon) +{ + uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 + | be32_to_cpu(end->w3); + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qentries = 1 << (qsize + 10); + + uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); + uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); + + if (!xive_end_is_valid(end)) { + return; + } + + monitor_printf(mon, " %08x %c%c%c%c%c prio:%d nvt:%04x eq:@%08"PRIx64 + "% 6d/%5d ^%d", end_idx, + xive_end_is_valid(end) ? 'v' : '-', + xive_end_is_enqueue(end) ? 'q' : '-', + xive_end_is_notify(end) ? 'n' : '-', + xive_end_is_backlog(end) ? 'b' : '-', + xive_end_is_escalate(end) ? 'e' : '-', + priority, nvt, qaddr_base, qindex, qentries, qgen); + + xive_end_queue_pic_print_info(end, 6, mon); + monitor_printf(mon, "]\n"); +} + +static void xive_end_enqueue(XiveEND *end, uint32_t data) +{ + uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 + | be32_to_cpu(end->w3); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); + + uint64_t qaddr = qaddr_base + (qindex << 2); + uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); + uint32_t qentries = 1 << (qsize + 10); + + if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" + HWADDR_PRIx "\n", qaddr); + return; + } + + qindex = (qindex + 1) & (qentries - 1); + if (qindex == 0) { + qgen ^= 1; + end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen); + } + end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex); +} + /* * XIVE Router (aka. Virtualization Controller or IVRE) */ @@ -456,6 +545,83 @@ int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); } +int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->get_end(xrtr, end_blk, end_idx, end); +} + +int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end, uint8_t word_number) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); +} + +/* + * An END trigger can come from an event trigger (IPI or HW) or from + * another chip. We don't model the PowerBus but the END trigger + * message has the same parameters than in the function below. + */ +static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, + uint32_t end_idx, uint32_t end_data) +{ + XiveEND end; + uint8_t priority; + uint8_t format; + + /* END cache lookup */ + if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, + end_idx); + return; + } + + if (!xive_end_is_valid(&end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", + end_blk, end_idx); + return; + } + + if (xive_end_is_enqueue(&end)) { + xive_end_enqueue(&end, end_data); + /* Enqueuing event data modifies the EQ toggle and index */ + xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); + } + + /* + * The W7 format depends on the F bit in W6. It defines the type + * of the notification : + * + * F=0 : single or multiple NVT notification + * F=1 : User level Event-Based Branch (EBB) notification, no + * priority + */ + format = xive_get_field32(END_W6_FORMAT_BIT, end.w6); + priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7); + + /* The END is masked */ + if (format == 0 && priority == 0xff) { + return; + } + + /* + * Check the END ESn (Event State Buffer for notification) for + * even futher coalescing in the Router + */ + if (!xive_end_is_notify(&end)) { + qemu_log_mask(LOG_UNIMP, "XIVE: !UCOND_NOTIFY not implemented\n"); + return; + } + + /* + * Follows IVPE notification + */ +} + static void xive_router_notify(XiveNotifier *xn, uint32_t lisn) { XiveRouter *xrtr = XIVE_ROUTER(xn); @@ -484,6 +650,14 @@ static void xive_router_notify(XiveNotifier *xn, uint32_t lisn) /* Notification completed */ return; } + + /* + * The event trigger becomes an END trigger + */ + xive_router_end_notify(xrtr, + xive_get_field64(EAS_END_BLOCK, eas.w), + xive_get_field64(EAS_END_INDEX, eas.w), + xive_get_field64(EAS_END_DATA, eas.w)); } static void xive_router_class_init(ObjectClass *klass, void *data) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 527aa73366..4851d3b3a4 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -321,11 +321,29 @@ typedef struct XiveRouterClass { /* XIVE table accessors */ int (*get_eas)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, XiveEAS *eas); + int (*get_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end); + int (*write_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end, uint8_t word_number); } XiveRouterClass; void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon); int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, XiveEAS *eas); +int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end); +int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, + XiveEND *end, uint8_t word_number); + +/* + * For legacy compatibility, the exceptions define up to 256 different + * priorities. P9 implements only 9 levels : 8 active levels [0 - 7] + * and the least favored level 0xFF. + */ +#define XIVE_PRIORITY_MAX 7 + +void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon); +void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon); #endif /* PPC_XIVE_H */ diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h index e1193fb3e4..b1d55ecf94 100644 --- a/include/hw/ppc/xive_regs.h +++ b/include/hw/ppc/xive_regs.h @@ -59,4 +59,71 @@ static inline uint64_t xive_set_field64(uint64_t mask, uint64_t word, return cpu_to_be64(tmp); } +static inline uint32_t xive_get_field32(uint32_t mask, uint32_t word) +{ + return (be32_to_cpu(word) & mask) >> ctz32(mask); +} + +static inline uint32_t xive_set_field32(uint32_t mask, uint32_t word, + uint32_t value) +{ + uint32_t tmp = + (be32_to_cpu(word) & ~mask) | ((value << ctz32(mask)) & mask); + return cpu_to_be32(tmp); +} + +/* Event Notification Descriptor (END) */ +typedef struct XiveEND { + uint32_t w0; +#define END_W0_VALID PPC_BIT32(0) /* "v" bit */ +#define END_W0_ENQUEUE PPC_BIT32(1) /* "q" bit */ +#define END_W0_UCOND_NOTIFY PPC_BIT32(2) /* "n" bit */ +#define END_W0_BACKLOG PPC_BIT32(3) /* "b" bit */ +#define END_W0_PRECL_ESC_CTL PPC_BIT32(4) /* "p" bit */ +#define END_W0_ESCALATE_CTL PPC_BIT32(5) /* "e" bit */ +#define END_W0_UNCOND_ESCALATE PPC_BIT32(6) /* "u" bit - DD2.0 */ +#define END_W0_SILENT_ESCALATE PPC_BIT32(7) /* "s" bit - DD2.0 */ +#define END_W0_QSIZE PPC_BITMASK32(12, 15) +#define END_W0_SW0 PPC_BIT32(16) +#define END_W0_FIRMWARE END_W0_SW0 /* Owned by FW */ +#define END_QSIZE_4K 0 +#define END_QSIZE_64K 4 +#define END_W0_HWDEP PPC_BITMASK32(24, 31) + uint32_t w1; +#define END_W1_ESn PPC_BITMASK32(0, 1) +#define END_W1_ESn_P PPC_BIT32(0) +#define END_W1_ESn_Q PPC_BIT32(1) +#define END_W1_ESe PPC_BITMASK32(2, 3) +#define END_W1_ESe_P PPC_BIT32(2) +#define END_W1_ESe_Q PPC_BIT32(3) +#define END_W1_GENERATION PPC_BIT32(9) +#define END_W1_PAGE_OFF PPC_BITMASK32(10, 31) + uint32_t w2; +#define END_W2_MIGRATION_REG PPC_BITMASK32(0, 3) +#define END_W2_OP_DESC_HI PPC_BITMASK32(4, 31) + uint32_t w3; +#define END_W3_OP_DESC_LO PPC_BITMASK32(0, 31) + uint32_t w4; +#define END_W4_ESC_END_BLOCK PPC_BITMASK32(4, 7) +#define END_W4_ESC_END_INDEX PPC_BITMASK32(8, 31) + uint32_t w5; +#define END_W5_ESC_END_DATA PPC_BITMASK32(1, 31) + uint32_t w6; +#define END_W6_FORMAT_BIT PPC_BIT32(8) +#define END_W6_NVT_BLOCK PPC_BITMASK32(9, 12) +#define END_W6_NVT_INDEX PPC_BITMASK32(13, 31) + uint32_t w7; +#define END_W7_F0_IGNORE PPC_BIT32(0) +#define END_W7_F0_BLK_GROUPING PPC_BIT32(1) +#define END_W7_F0_PRIORITY PPC_BITMASK32(8, 15) +#define END_W7_F1_WAKEZ PPC_BIT32(0) +#define END_W7_F1_LOG_SERVER_ID PPC_BITMASK32(1, 31) +} XiveEND; + +#define xive_end_is_valid(end) (be32_to_cpu((end)->w0) & END_W0_VALID) +#define xive_end_is_enqueue(end) (be32_to_cpu((end)->w0) & END_W0_ENQUEUE) +#define xive_end_is_notify(end) (be32_to_cpu((end)->w0) & END_W0_UCOND_NOTIFY) +#define xive_end_is_backlog(end) (be32_to_cpu((end)->w0) & END_W0_BACKLOG) +#define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & END_W0_ESCALATE_CTL) + #endif /* PPC_XIVE_REGS_H */ -- cgit v1.2.3-55-g7522 From 002686be42784fdce4c1c8ecd1987ddf740cab77 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Sun, 9 Dec 2018 20:45:52 +0100 Subject: ppc/xive: add support for the END Event State Buffers The Event Notification Descriptor (END) XIVE structure also contains two Event State Buffers providing further coalescing of interrupts, one for the notification event (ESn) and one for the escalation events (ESe). A MMIO page is assigned for each to control the EOI through loads only. Stores are not allowed. The END ESBs are modeled through an object resembling the 'XiveSource' It is stateless as the END state bits are backed into the XiveEND structure under the XiveRouter and the MMIO accesses follow the same rules as for the XiveSource ESBs. END ESBs are not supported by the Linux drivers neither on OPAL nor on sPAPR. Nevetherless, it provides a mean to study the question in the future and validates a bit more the XIVE model. Signed-off-by: Cédric Le Goater [dwg: Fold in a later fix for field access] Signed-off-by: David Gibson --- hw/intc/xive.c | 160 +++++++++++++++++++++++++++++++++++++++++++++++++- include/hw/ppc/xive.h | 21 +++++++ 2 files changed, 179 insertions(+), 2 deletions(-) (limited to 'hw/intc') diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 9ec841f741..7b2ef7480d 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -613,8 +613,18 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, * even futher coalescing in the Router */ if (!xive_end_is_notify(&end)) { - qemu_log_mask(LOG_UNIMP, "XIVE: !UCOND_NOTIFY not implemented\n"); - return; + uint8_t pq = xive_get_field32(END_W1_ESn, end.w1); + bool notify = xive_esb_trigger(&pq); + + if (pq != xive_get_field32(END_W1_ESn, end.w1)) { + end.w1 = xive_set_field32(END_W1_ESn, end.w1, pq); + xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); + } + + /* ESn[Q]=1 : end of notification */ + if (!notify) { + return; + } } /* @@ -694,6 +704,151 @@ void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon) (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); } +/* + * END ESB MMIO loads + */ +static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size) +{ + XiveENDSource *xsrc = XIVE_END_SOURCE(opaque); + uint32_t offset = addr & 0xFFF; + uint8_t end_blk; + uint32_t end_idx; + XiveEND end; + uint32_t end_esmask; + uint8_t pq; + uint64_t ret = -1; + + end_blk = xsrc->block_id; + end_idx = addr >> (xsrc->esb_shift + 1); + + if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk, + end_idx); + return -1; + } + + if (!xive_end_is_valid(&end)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n", + end_blk, end_idx); + return -1; + } + + end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe; + pq = xive_get_field32(end_esmask, end.w1); + + switch (offset) { + case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: + ret = xive_esb_eoi(&pq); + + /* Forward the source event notification for routing ?? */ + break; + + case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: + ret = pq; + break; + + case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: + case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: + case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: + case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: + ret = xive_esb_set(&pq, (offset >> 8) & 0x3); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n", + offset); + return -1; + } + + if (pq != xive_get_field32(end_esmask, end.w1)) { + end.w1 = xive_set_field32(end_esmask, end.w1, pq); + xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); + } + + return ret; +} + +/* + * END ESB MMIO stores are invalid + */ +static void xive_end_source_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size) +{ + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%" + HWADDR_PRIx"\n", addr); +} + +static const MemoryRegionOps xive_end_source_ops = { + .read = xive_end_source_read, + .write = xive_end_source_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static void xive_end_source_realize(DeviceState *dev, Error **errp) +{ + XiveENDSource *xsrc = XIVE_END_SOURCE(dev); + Object *obj; + Error *local_err = NULL; + + obj = object_property_get_link(OBJECT(dev), "xive", &local_err); + if (!obj) { + error_propagate(errp, local_err); + error_prepend(errp, "required link 'xive' not found: "); + return; + } + + xsrc->xrtr = XIVE_ROUTER(obj); + + if (!xsrc->nr_ends) { + error_setg(errp, "Number of interrupt needs to be greater than 0"); + return; + } + + if (xsrc->esb_shift != XIVE_ESB_4K && + xsrc->esb_shift != XIVE_ESB_64K) { + error_setg(errp, "Invalid ESB shift setting"); + return; + } + + /* + * Each END is assigned an even/odd pair of MMIO pages, the even page + * manages the ESn field while the odd page manages the ESe field. + */ + memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), + &xive_end_source_ops, xsrc, "xive.end", + (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); +} + +static Property xive_end_source_properties[] = { + DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0), + DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0), + DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K), + DEFINE_PROP_END_OF_LIST(), +}; + +static void xive_end_source_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "XIVE END Source"; + dc->props = xive_end_source_properties; + dc->realize = xive_end_source_realize; +} + +static const TypeInfo xive_end_source_info = { + .name = TYPE_XIVE_END_SOURCE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XiveENDSource), + .class_init = xive_end_source_class_init, +}; + /* * XIVE Fabric */ @@ -708,6 +863,7 @@ static void xive_register_types(void) type_register_static(&xive_source_info); type_register_static(&xive_fabric_info); type_register_static(&xive_router_info); + type_register_static(&xive_end_source_info); } type_init(xive_register_types) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 4851d3b3a4..014f64aa98 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -336,6 +336,27 @@ int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, XiveEND *end, uint8_t word_number); +/* + * XIVE END ESBs + */ + +#define TYPE_XIVE_END_SOURCE "xive-end-source" +#define XIVE_END_SOURCE(obj) \ + OBJECT_CHECK(XiveENDSource, (obj), TYPE_XIVE_END_SOURCE) + +typedef struct XiveENDSource { + DeviceState parent; + + uint32_t nr_ends; + uint8_t block_id; + + /* ESB memory region */ + uint32_t esb_shift; + MemoryRegion esb_mmio; + + XiveRouter *xrtr; +} XiveENDSource; + /* * For legacy compatibility, the exceptions define up to 256 different * priorities. P9 implements only 9 levels : 8 active levels [0 - 7] -- cgit v1.2.3-55-g7522 From 207d9fe98510eaac575bfde8d1be58137e9a22ff Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Sun, 9 Dec 2018 20:45:53 +0100 Subject: ppc/xive: introduce the XIVE interrupt thread context Each POWER9 processor chip has a XIVE presenter that can generate four different exceptions to its threads: - hypervisor exception, - O/S exception - Event-Based Branch (EBB) - msgsnd (doorbell). Each exception has a state independent from the others called a Thread Interrupt Management context. This context is a set of registers which lets the thread handle priority management and interrupt acknowledgment among other things. The most important ones being : - Interrupt Priority Register (PIPR) - Interrupt Pending Buffer (IPB) - Current Processor Priority (CPPR) - Notification Source Register (NSR) These registers are accessible through a specific MMIO region, called the Thread Interrupt Management Area (TIMA), four aligned pages, each exposing a different view of the registers. First page (page address ending in 0b00) gives access to the entire context and is reserved for the ring 0 view for the physical thread context. The second (page address ending in 0b01) is for the hypervisor, ring 1 view. The third (page address ending in 0b10) is for the operating system, ring 2 view. The fourth (page address ending in 0b11) is for user level, ring 3 view. The thread interrupt context is modeled with a XiveTCTX object containing the values of the different exception registers. The TIMA region is mapped at the same address for each CPU. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: David Gibson --- hw/intc/xive.c | 424 +++++++++++++++++++++++++++++++++++++++++++++ include/hw/ppc/xive.h | 44 +++++ include/hw/ppc/xive_regs.h | 82 +++++++++ 3 files changed, 550 insertions(+) (limited to 'hw/intc') diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 7b2ef7480d..06a835c454 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -16,6 +16,429 @@ #include "hw/qdev-properties.h" #include "monitor/monitor.h" #include "hw/ppc/xive.h" +#include "hw/ppc/xive_regs.h" + +/* + * XIVE Thread Interrupt Management context + */ + +static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) +{ + return 0; +} + +static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) +{ + if (cppr > XIVE_PRIORITY_MAX) { + cppr = 0xff; + } + + tctx->regs[ring + TM_CPPR] = cppr; +} + +/* + * XIVE Thread Interrupt Management Area (TIMA) + */ + +/* + * Define an access map for each page of the TIMA that we will use in + * the memory region ops to filter values when doing loads and stores + * of raw registers values + * + * Registers accessibility bits : + * + * 0x0 - no access + * 0x1 - write only + * 0x2 - read only + * 0x3 - read/write + */ + +static const uint8_t xive_tm_hw_view[] = { + /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 3, 3, 3, 0, +}; + +static const uint8_t xive_tm_hv_view[] = { + /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-1 OS */ 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-2 POOL */ 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, + /* QW-3 PHYS */ 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 0, 3, 0, 0, 0, 0, +}; + +static const uint8_t xive_tm_os_view[] = { + /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, + /* QW-1 OS */ 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, + /* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +static const uint8_t xive_tm_user_view[] = { + /* QW-0 User */ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* QW-1 OS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* QW-2 POOL */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* QW-3 PHYS */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +/* + * Overall TIMA access map for the thread interrupt management context + * registers + */ +static const uint8_t *xive_tm_views[] = { + [XIVE_TM_HW_PAGE] = xive_tm_hw_view, + [XIVE_TM_HV_PAGE] = xive_tm_hv_view, + [XIVE_TM_OS_PAGE] = xive_tm_os_view, + [XIVE_TM_USER_PAGE] = xive_tm_user_view, +}; + +/* + * Computes a register access mask for a given offset in the TIMA + */ +static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write) +{ + uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; + uint8_t reg_offset = offset & 0x3F; + uint8_t reg_mask = write ? 0x1 : 0x2; + uint64_t mask = 0x0; + int i; + + for (i = 0; i < size; i++) { + if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) { + mask |= (uint64_t) 0xff << (8 * (size - i - 1)); + } + } + + return mask; +} + +static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, + unsigned size) +{ + uint8_t ring_offset = offset & 0x30; + uint8_t reg_offset = offset & 0x3F; + uint64_t mask = xive_tm_mask(offset, size, true); + int i; + + /* + * Only 4 or 8 bytes stores are allowed and the User ring is + * excluded + */ + if (size < 4 || !mask || ring_offset == TM_QW0_USER) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%" + HWADDR_PRIx"\n", offset); + return; + } + + /* + * Use the register offset for the raw values and filter out + * reserved values + */ + for (i = 0; i < size; i++) { + uint8_t byte_mask = (mask >> (8 * (size - i - 1))); + if (byte_mask) { + tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) & + byte_mask; + } + } +} + +static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) +{ + uint8_t ring_offset = offset & 0x30; + uint8_t reg_offset = offset & 0x3F; + uint64_t mask = xive_tm_mask(offset, size, false); + uint64_t ret; + int i; + + /* + * Only 4 or 8 bytes loads are allowed and the User ring is + * excluded + */ + if (size < 4 || !mask || ring_offset == TM_QW0_USER) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%" + HWADDR_PRIx"\n", offset); + return -1; + } + + /* Use the register offset for the raw values */ + ret = 0; + for (i = 0; i < size; i++) { + ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1)); + } + + /* filter out reserved values */ + return ret & mask; +} + +/* + * The TM context is mapped twice within each page. Stores and loads + * to the first mapping below 2K write and read the specified values + * without modification. The second mapping above 2K performs specific + * state changes (side effects) in addition to setting/returning the + * interrupt management area context of the processor thread. + */ +static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size) +{ + return xive_tctx_accept(tctx, TM_QW1_OS); +} + +static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset, + uint64_t value, unsigned size) +{ + xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); +} + +/* + * Define a mapping of "special" operations depending on the TIMA page + * offset and the size of the operation. + */ +typedef struct XiveTmOp { + uint8_t page_offset; + uint32_t op_offset; + unsigned size; + void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value, + unsigned size); + uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size); +} XiveTmOp; + +static const XiveTmOp xive_tm_operations[] = { + /* + * MMIOs below 2K : raw values and special operations without side + * effects + */ + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, + + /* MMIOs above 2K : special operations with side effects */ + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, +}; + +static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write) +{ + uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; + uint32_t op_offset = offset & 0xFFF; + int i; + + for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) { + const XiveTmOp *xto = &xive_tm_operations[i]; + + /* Accesses done from a more privileged TIMA page is allowed */ + if (xto->page_offset >= page_offset && + xto->op_offset == op_offset && + xto->size == size && + ((write && xto->write_handler) || (!write && xto->read_handler))) { + return xto; + } + } + return NULL; +} + +/* + * TIMA MMIO handlers + */ +static void xive_tm_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + PowerPCCPU *cpu = POWERPC_CPU(current_cpu); + XiveTCTX *tctx = XIVE_TCTX(cpu->intc); + const XiveTmOp *xto; + + /* + * TODO: check V bit in Q[0-3]W2, check PTER bit associated with CPU + */ + + /* + * First, check for special operations in the 2K region + */ + if (offset & 0x800) { + xto = xive_tm_find_op(offset, size, true); + if (!xto) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA" + "@%"HWADDR_PRIx"\n", offset); + } else { + xto->write_handler(tctx, offset, value, size); + } + return; + } + + /* + * Then, for special operations in the region below 2K. + */ + xto = xive_tm_find_op(offset, size, true); + if (xto) { + xto->write_handler(tctx, offset, value, size); + return; + } + + /* + * Finish with raw access to the register values + */ + xive_tm_raw_write(tctx, offset, value, size); +} + +static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size) +{ + PowerPCCPU *cpu = POWERPC_CPU(current_cpu); + XiveTCTX *tctx = XIVE_TCTX(cpu->intc); + const XiveTmOp *xto; + + /* + * TODO: check V bit in Q[0-3]W2, check PTER bit associated with CPU + */ + + /* + * First, check for special operations in the 2K region + */ + if (offset & 0x800) { + xto = xive_tm_find_op(offset, size, false); + if (!xto) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" + "@%"HWADDR_PRIx"\n", offset); + return -1; + } + return xto->read_handler(tctx, offset, size); + } + + /* + * Then, for special operations in the region below 2K. + */ + xto = xive_tm_find_op(offset, size, false); + if (xto) { + return xto->read_handler(tctx, offset, size); + } + + /* + * Finish with raw access to the register values + */ + return xive_tm_raw_read(tctx, offset, size); +} + +const MemoryRegionOps xive_tm_ops = { + .read = xive_tm_read, + .write = xive_tm_write, + .endianness = DEVICE_BIG_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static inline uint32_t xive_tctx_word2(uint8_t *ring) +{ + return *((uint32_t *) &ring[TM_WORD2]); +} + +static char *xive_tctx_ring_print(uint8_t *ring) +{ + uint32_t w2 = xive_tctx_word2(ring); + + return g_strdup_printf("%02x %02x %02x %02x %02x " + "%02x %02x %02x %08x", + ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB], + ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR], + be32_to_cpu(w2)); +} + +static const char * const xive_tctx_ring_names[] = { + "USER", "OS", "POOL", "PHYS", +}; + +void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon) +{ + int cpu_index = tctx->cs ? tctx->cs->cpu_index : -1; + int i; + + monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR" + " W2\n", cpu_index); + + for (i = 0; i < XIVE_TM_RING_COUNT; i++) { + char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]); + monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index, + xive_tctx_ring_names[i], s); + g_free(s); + } +} + +static void xive_tctx_reset(void *dev) +{ + XiveTCTX *tctx = XIVE_TCTX(dev); + + memset(tctx->regs, 0, sizeof(tctx->regs)); + + /* Set some defaults */ + tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; + tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; + tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; +} + +static void xive_tctx_realize(DeviceState *dev, Error **errp) +{ + XiveTCTX *tctx = XIVE_TCTX(dev); + PowerPCCPU *cpu; + CPUPPCState *env; + Object *obj; + Error *local_err = NULL; + + obj = object_property_get_link(OBJECT(dev), "cpu", &local_err); + if (!obj) { + error_propagate(errp, local_err); + error_prepend(errp, "required link 'cpu' not found: "); + return; + } + + cpu = POWERPC_CPU(obj); + tctx->cs = CPU(obj); + + env = &cpu->env; + switch (PPC_INPUT(env)) { + case PPC_FLAGS_INPUT_POWER7: + tctx->output = env->irq_inputs[POWER7_INPUT_INT]; + break; + + default: + error_setg(errp, "XIVE interrupt controller does not support " + "this CPU bus model"); + return; + } + + qemu_register_reset(xive_tctx_reset, dev); +} + +static void xive_tctx_unrealize(DeviceState *dev, Error **errp) +{ + qemu_unregister_reset(xive_tctx_reset, dev); +} + +static const VMStateDescription vmstate_xive_tctx = { + .name = TYPE_XIVE_TCTX, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_BUFFER(regs, XiveTCTX), + VMSTATE_END_OF_LIST() + }, +}; + +static void xive_tctx_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "XIVE Interrupt Thread Context"; + dc->realize = xive_tctx_realize; + dc->unrealize = xive_tctx_unrealize; + dc->vmsd = &vmstate_xive_tctx; +} + +static const TypeInfo xive_tctx_info = { + .name = TYPE_XIVE_TCTX, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XiveTCTX), + .class_init = xive_tctx_class_init, +}; /* * XIVE ESB helpers @@ -864,6 +1287,7 @@ static void xive_register_types(void) type_register_static(&xive_fabric_info); type_register_static(&xive_router_info); type_register_static(&xive_end_source_info); + type_register_static(&xive_tctx_info); } type_init(xive_register_types) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 014f64aa98..1e823a4c64 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -367,4 +367,48 @@ typedef struct XiveENDSource { void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon); void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon); +/* + * XIVE Thread interrupt Management (TM) context + */ + +#define TYPE_XIVE_TCTX "xive-tctx" +#define XIVE_TCTX(obj) OBJECT_CHECK(XiveTCTX, (obj), TYPE_XIVE_TCTX) + +/* + * XIVE Thread interrupt Management register rings : + * + * QW-0 User event-based exception state + * QW-1 O/S OS context for priority management, interrupt acks + * QW-2 Pool hypervisor pool context for virtual processors dispatched + * QW-3 Physical physical thread context and security context + */ +#define XIVE_TM_RING_COUNT 4 +#define XIVE_TM_RING_SIZE 0x10 + +typedef struct XiveTCTX { + DeviceState parent_obj; + + CPUState *cs; + qemu_irq output; + + uint8_t regs[XIVE_TM_RING_COUNT * XIVE_TM_RING_SIZE]; +} XiveTCTX; + +/* + * XIVE Thread Interrupt Management Aera (TIMA) + * + * This region gives access to the registers of the thread interrupt + * management context. It is four page wide, each page providing a + * different view of the registers. The page with the lower offset is + * the most privileged and gives access to the entire context. + */ +#define XIVE_TM_HW_PAGE 0x0 +#define XIVE_TM_HV_PAGE 0x1 +#define XIVE_TM_OS_PAGE 0x2 +#define XIVE_TM_USER_PAGE 0x3 + +extern const MemoryRegionOps xive_tm_ops; + +void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon); + #endif /* PPC_XIVE_H */ diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h index b1d55ecf94..8b3cc6c9b9 100644 --- a/include/hw/ppc/xive_regs.h +++ b/include/hw/ppc/xive_regs.h @@ -23,6 +23,88 @@ #define XIVE_SRCNO_INDEX(srcno) ((srcno) & 0x0fffffff) #define XIVE_SRCNO(blk, idx) ((uint32_t)(blk) << 28 | (idx)) +#define TM_SHIFT 16 + +/* TM register offsets */ +#define TM_QW0_USER 0x000 /* All rings */ +#define TM_QW1_OS 0x010 /* Ring 0..2 */ +#define TM_QW2_HV_POOL 0x020 /* Ring 0..1 */ +#define TM_QW3_HV_PHYS 0x030 /* Ring 0..1 */ + +/* Byte offsets inside a QW QW0 QW1 QW2 QW3 */ +#define TM_NSR 0x0 /* + + - + */ +#define TM_CPPR 0x1 /* - + - + */ +#define TM_IPB 0x2 /* - + + + */ +#define TM_LSMFB 0x3 /* - + + + */ +#define TM_ACK_CNT 0x4 /* - + - - */ +#define TM_INC 0x5 /* - + - + */ +#define TM_AGE 0x6 /* - + - + */ +#define TM_PIPR 0x7 /* - + - + */ + +#define TM_WORD0 0x0 +#define TM_WORD1 0x4 + +/* + * QW word 2 contains the valid bit at the top and other fields + * depending on the QW. + */ +#define TM_WORD2 0x8 +#define TM_QW0W2_VU PPC_BIT32(0) +#define TM_QW0W2_LOGIC_SERV PPC_BITMASK32(1, 31) /* XX 2,31 ? */ +#define TM_QW1W2_VO PPC_BIT32(0) +#define TM_QW1W2_OS_CAM PPC_BITMASK32(8, 31) +#define TM_QW2W2_VP PPC_BIT32(0) +#define TM_QW2W2_POOL_CAM PPC_BITMASK32(8, 31) +#define TM_QW3W2_VT PPC_BIT32(0) +#define TM_QW3W2_LP PPC_BIT32(6) +#define TM_QW3W2_LE PPC_BIT32(7) +#define TM_QW3W2_T PPC_BIT32(31) + +/* + * In addition to normal loads to "peek" and writes (only when invalid) + * using 4 and 8 bytes accesses, the above registers support these + * "special" byte operations: + * + * - Byte load from QW0[NSR] - User level NSR (EBB) + * - Byte store to QW0[NSR] - User level NSR (EBB) + * - Byte load/store to QW1[CPPR] and QW3[CPPR] - CPPR access + * - Byte load from QW3[TM_WORD2] - Read VT||00000||LP||LE on thrd 0 + * otherwise VT||0000000 + * - Byte store to QW3[TM_WORD2] - Set VT bit (and LP/LE if present) + * + * Then we have all these "special" CI ops at these offset that trigger + * all sorts of side effects: + */ +#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg*/ +#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */ +#define TM_SPC_PUSH_USR_CTX 0x808 /* Store32 Push/Validate user context */ +#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user + * context */ +#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */ +#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS + * context to reg */ +#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool + * context to reg*/ +#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */ +#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd + * line */ +#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */ +#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even + * line */ +#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */ +/* XXX more... */ + +/* NSR fields for the various QW ack types */ +#define TM_QW0_NSR_EB PPC_BIT8(0) +#define TM_QW1_NSR_EO PPC_BIT8(0) +#define TM_QW3_NSR_HE PPC_BITMASK8(0, 1) +#define TM_QW3_NSR_HE_NONE 0 +#define TM_QW3_NSR_HE_POOL 1 +#define TM_QW3_NSR_HE_PHYS 2 +#define TM_QW3_NSR_HE_LSI 3 +#define TM_QW3_NSR_I PPC_BIT8(2) +#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3, 7) + /* * EAS (Event Assignment Structure) * -- cgit v1.2.3-55-g7522 From af53dbf6227a78a25ead654998fd8caf46639810 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Sun, 9 Dec 2018 20:45:54 +0100 Subject: ppc/xive: introduce a simplified XIVE presenter The last sub-engine of the XIVE architecture is the Interrupt Virtualization Presentation Engine (IVPE). On HW, the IVRE and the IVPE share elements, the Power Bus interface (CQ), the routing table descriptors, and they can be combined in the same HW logic. We do the same in QEMU and combine both engines in the XiveRouter for simplicity. When the IVRE has completed its job of matching an event source with a Notification Virtual Target (NVT) to notify, it forwards the event notification to the IVPE sub-engine. The IVPE scans the thread interrupt contexts of the Notification Virtual Targets (NVT) dispatched on the HW processor threads and if a match is found, it signals the thread. If not, the IVPE escalates the notification to some other targets and records the notification in a backlog queue. The IVPE maintains the thread interrupt context state for each of its NVTs not dispatched on HW processor threads in the Notification Virtual Target table (NVTT). The model currently only supports single NVT notifications. Signed-off-by: Cédric Le Goater [dwg: Folded in fix for field accessors] Signed-off-by: David Gibson --- hw/intc/xive.c | 190 +++++++++++++++++++++++++++++++++++++++++++++ include/hw/ppc/xive.h | 14 ++++ include/hw/ppc/xive_regs.h | 24 ++++++ 3 files changed, 228 insertions(+) (limited to 'hw/intc') diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 06a835c454..1d737346c3 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -984,6 +984,188 @@ int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); } +int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt); +} + +int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt, uint8_t word_number) +{ + XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); + + return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number); +} + +/* + * The thread context register words are in big-endian format. + */ +static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint32_t logic_serv) +{ + uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx); + uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); + uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); + uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); + + /* + * TODO (PowerNV): ignore mode. The low order bits of the NVT + * identifier are ignored in the "CAM" match. + */ + + if (format == 0) { + if (cam_ignore == true) { + /* + * F=0 & i=1: Logical server notification (bits ignored at + * the end of the NVT identifier) + */ + qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", + nvt_blk, nvt_idx); + return -1; + } + + /* F=0 & i=0: Specific NVT notification */ + + /* TODO (PowerNV) : PHYS ring */ + + /* HV POOL ring */ + if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) && + cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) { + return TM_QW2_HV_POOL; + } + + /* OS ring */ + if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && + cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) { + return TM_QW1_OS; + } + } else { + /* F=1 : User level Event-Based Branch (EBB) notification */ + + /* USER ring */ + if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && + (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) && + (be32_to_cpu(qw0w2) & TM_QW0W2_VU) && + (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) { + return TM_QW0_USER; + } + } + return -1; +} + +typedef struct XiveTCTXMatch { + XiveTCTX *tctx; + uint8_t ring; +} XiveTCTXMatch; + +static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) +{ + CPUState *cs; + + /* + * TODO (PowerNV): handle chip_id overwrite of block field for + * hardwired CAM compares + */ + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + XiveTCTX *tctx = XIVE_TCTX(cpu->intc); + int ring; + + /* + * HW checks that the CPU is enabled in the Physical Thread + * Enable Register (PTER). + */ + + /* + * Check the thread context CAM lines and record matches. We + * will handle CPU exception delivery later + */ + ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx, + cam_ignore, logic_serv); + /* + * Save the context and follow on to catch duplicates, that we + * don't support yet. + */ + if (ring != -1) { + if (match->tctx) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " + "context NVT %x/%x\n", nvt_blk, nvt_idx); + return false; + } + + match->ring = ring; + match->tctx = tctx; + } + } + + if (!match->tctx) { + qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n", + nvt_blk, nvt_idx); + return false; + } + + return true; +} + +/* + * This is our simple Xive Presenter Engine model. It is merged in the + * Router as it does not require an extra object. + * + * It receives notification requests sent by the IVRE to find one + * matching NVT (or more) dispatched on the processor threads. In case + * of a single NVT notification, the process is abreviated and the + * thread is signaled if a match is found. In case of a logical server + * notification (bits ignored at the end of the NVT identifier), the + * IVPE and IVRE select a winning thread using different filters. This + * involves 2 or 3 exchanges on the PowerBus that the model does not + * support. + * + * The parameters represent what is sent on the PowerBus + */ +static void xive_presenter_notify(XiveRouter *xrtr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool cam_ignore, uint8_t priority, + uint32_t logic_serv) +{ + XiveNVT nvt; + XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; + bool found; + + /* NVT cache lookup */ + if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n", + nvt_blk, nvt_idx); + return; + } + + if (!xive_nvt_is_valid(&nvt)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n", + nvt_blk, nvt_idx); + return; + } + + found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore, + priority, logic_serv, &match); + if (found) { + return; + } + + /* + * If no matching NVT is dispatched on a HW thread : + * - update the NVT structure if backlog is activated + * - escalate (ESe PQ bits and EAS in w4-5) if escalation is + * activated + */ +} + /* * An END trigger can come from an event trigger (IPI or HW) or from * another chip. We don't model the PowerBus but the END trigger @@ -1053,6 +1235,14 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, /* * Follows IVPE notification */ + xive_presenter_notify(xrtr, format, + xive_get_field32(END_W6_NVT_BLOCK, end.w6), + xive_get_field32(END_W6_NVT_INDEX, end.w6), + xive_get_field32(END_W7_F0_IGNORE, end.w7), + priority, + xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); + + /* TODO: Auto EOI. */ } static void xive_router_notify(XiveNotifier *xn, uint32_t lisn) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 1e823a4c64..19309d1d65 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -325,6 +325,10 @@ typedef struct XiveRouterClass { XiveEND *end); int (*write_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, XiveEND *end, uint8_t word_number); + int (*get_nvt)(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt); + int (*write_nvt)(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt, uint8_t word_number); } XiveRouterClass; void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon); @@ -335,6 +339,11 @@ int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, XiveEND *end); int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, XiveEND *end, uint8_t word_number); +int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt); +int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, + XiveNVT *nvt, uint8_t word_number); + /* * XIVE END ESBs @@ -411,4 +420,9 @@ extern const MemoryRegionOps xive_tm_ops; void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon); +static inline uint32_t xive_nvt_cam_line(uint8_t nvt_blk, uint32_t nvt_idx) +{ + return (nvt_blk << 19) | nvt_idx; +} + #endif /* PPC_XIVE_H */ diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h index 8b3cc6c9b9..bf36678a24 100644 --- a/include/hw/ppc/xive_regs.h +++ b/include/hw/ppc/xive_regs.h @@ -208,4 +208,28 @@ typedef struct XiveEND { #define xive_end_is_backlog(end) (be32_to_cpu((end)->w0) & END_W0_BACKLOG) #define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & END_W0_ESCALATE_CTL) +/* Notification Virtual Target (NVT) */ +typedef struct XiveNVT { + uint32_t w0; +#define NVT_W0_VALID PPC_BIT32(0) + uint32_t w1; + uint32_t w2; + uint32_t w3; + uint32_t w4; + uint32_t w5; + uint32_t w6; + uint32_t w7; + uint32_t w8; +#define NVT_W8_GRP_VALID PPC_BIT32(0) + uint32_t w9; + uint32_t wa; + uint32_t wb; + uint32_t wc; + uint32_t wd; + uint32_t we; + uint32_t wf; +} XiveNVT; + +#define xive_nvt_is_valid(nvt) (be32_to_cpu((nvt)->w0) & NVT_W0_VALID) + #endif /* PPC_XIVE_REGS_H */ -- cgit v1.2.3-55-g7522 From cdd4de68edb6745d35e2a9e14c32f9a588c1fee7 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Sun, 9 Dec 2018 20:45:55 +0100 Subject: ppc/xive: notify the CPU when the interrupt priority is more privileged After the event data was enqueued in the O/S Event Queue, the IVPE raises the bit corresponding to the priority of the pending interrupt in the register IBP (Interrupt Pending Buffer) to indicate there is an event pending in one of the 8 priority queues. The Pending Interrupt Priority Register (PIPR) is also updated using the IPB. This register represent the priority of the most favored pending notification. The PIPR is then compared to the the Current Processor Priority Register (CPPR). If it is more favored (numerically less than), the CPU interrupt line is raised and the EO bit of the Notification Source Register (NSR) is updated to notify the presence of an exception for the O/S. The check needs to be done whenever the PIPR or the CPPR are changed. The O/S acknowledges the interrupt with a special load in the Thread Interrupt Management Area. If the EO bit of the NSR is set, the CPPR takes the value of PIPR. The bit number in the IBP corresponding to the priority of the pending interrupt is reseted and so is the EO bit of the NSR. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson [dwg: Fix style nits] Signed-off-by: David Gibson --- hw/intc/xive.c | 96 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 95 insertions(+), 1 deletion(-) (limited to 'hw/intc') diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 1d737346c3..607e74acd2 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -22,9 +22,75 @@ * XIVE Thread Interrupt Management context */ +/* + * Convert a priority number to an Interrupt Pending Buffer (IPB) + * register, which indicates a pending interrupt at the priority + * corresponding to the bit number + */ +static uint8_t priority_to_ipb(uint8_t priority) +{ + return priority > XIVE_PRIORITY_MAX ? + 0 : 1 << (XIVE_PRIORITY_MAX - priority); +} + +/* + * Convert an Interrupt Pending Buffer (IPB) register to a Pending + * Interrupt Priority Register (PIPR), which contains the priority of + * the most favored pending notification. + */ +static uint8_t ipb_to_pipr(uint8_t ibp) +{ + return ibp ? clz32((uint32_t)ibp << 24) : 0xff; +} + +static void ipb_update(uint8_t *regs, uint8_t priority) +{ + regs[TM_IPB] |= priority_to_ipb(priority); + regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); +} + +static uint8_t exception_mask(uint8_t ring) +{ + switch (ring) { + case TM_QW1_OS: + return TM_QW1_NSR_EO; + default: + g_assert_not_reached(); + } +} + static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) { - return 0; + uint8_t *regs = &tctx->regs[ring]; + uint8_t nsr = regs[TM_NSR]; + uint8_t mask = exception_mask(ring); + + qemu_irq_lower(tctx->output); + + if (regs[TM_NSR] & mask) { + uint8_t cppr = regs[TM_PIPR]; + + regs[TM_CPPR] = cppr; + + /* Reset the pending buffer bit */ + regs[TM_IPB] &= ~priority_to_ipb(cppr); + regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); + + /* Drop Exception bit */ + regs[TM_NSR] &= ~mask; + } + + return (nsr << 8) | regs[TM_CPPR]; +} + +static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) +{ + uint8_t *regs = &tctx->regs[ring]; + + if (regs[TM_PIPR] < regs[TM_CPPR]) { + regs[TM_NSR] |= exception_mask(ring); + qemu_irq_raise(tctx->output); + } } static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) @@ -34,6 +100,9 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) } tctx->regs[ring + TM_CPPR] = cppr; + + /* CPPR has changed, check if we need to raise a pending exception */ + xive_tctx_notify(tctx, ring); } /* @@ -189,6 +258,17 @@ static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset, xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); } +/* + * Adjust the IPB to allow a CPU to process event queues of other + * priorities during one physical interrupt cycle. + */ +static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset, + uint64_t value, unsigned size) +{ + ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff); + xive_tctx_notify(tctx, TM_QW1_OS); +} + /* * Define a mapping of "special" operations depending on the TIMA page * offset and the size of the operation. @@ -211,6 +291,7 @@ static const XiveTmOp xive_tm_operations[] = { /* MMIOs above 2K : special operations with side effects */ { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, }; static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write) @@ -373,6 +454,13 @@ static void xive_tctx_reset(void *dev) tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; + + /* + * Initialize PIPR to 0xFF to avoid phantom interrupts when the + * CPPR is first set. + */ + tctx->regs[TM_QW1_OS + TM_PIPR] = + ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); } static void xive_tctx_realize(DeviceState *dev, Error **errp) @@ -1155,9 +1243,15 @@ static void xive_presenter_notify(XiveRouter *xrtr, uint8_t format, found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore, priority, logic_serv, &match); if (found) { + ipb_update(&match.tctx->regs[match.ring], priority); + xive_tctx_notify(match.tctx, match.ring); return; } + /* Record the IPB in the associated NVT structure */ + ipb_update((uint8_t *) &nvt.w4, priority); + xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); + /* * If no matching NVT is dispatched on a HW thread : * - update the NVT structure if backlog is activated -- cgit v1.2.3-55-g7522 From 3aa597f6505b4d7b62a1b77ab95a233dd5c7c5f0 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Sun, 9 Dec 2018 20:45:56 +0100 Subject: spapr/xive: introduce a XIVE interrupt controller sPAPRXive models the XIVE interrupt controller of the sPAPR machine. It inherits from the XiveRouter and provisions storage for the routing tables : - Event Assignment Structure (EAS) - Event Notification Descriptor (END) The sPAPRXive model incorporates an internal XiveSource for the IPIs and for the interrupts of the virtual devices of the guest. This model is consistent with XIVE architecture which also incorporates an internal IVSE for IPIs and accelerator interrupts in the IVRE sub-engine. The sPAPRXive model exports two memory regions, one for the ESB trigger and management pages used to control the sources and one for the TIMA pages. They are mapped by default at the addresses found on chip 0 of a baremetal system. This is also consistent with the XIVE architecture which defines a Virtualization Controller BAR for the internal IVSE ESB pages and a Thread Managment BAR for the TIMA. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson [dwg: Fold in field accessor fixes] Signed-off-by: David Gibson --- default-configs/ppc64-softmmu.mak | 1 + hw/intc/Makefile.objs | 1 + hw/intc/spapr_xive.c | 366 ++++++++++++++++++++++++++++++++++++++ include/hw/ppc/spapr_xive.h | 45 +++++ 4 files changed, 413 insertions(+) create mode 100644 hw/intc/spapr_xive.c create mode 100644 include/hw/ppc/spapr_xive.h (limited to 'hw/intc') diff --git a/default-configs/ppc64-softmmu.mak b/default-configs/ppc64-softmmu.mak index 2d1e7c5c46..7f34ad0528 100644 --- a/default-configs/ppc64-softmmu.mak +++ b/default-configs/ppc64-softmmu.mak @@ -17,6 +17,7 @@ CONFIG_XICS=$(CONFIG_PSERIES) CONFIG_XICS_SPAPR=$(CONFIG_PSERIES) CONFIG_XICS_KVM=$(call land,$(CONFIG_PSERIES),$(CONFIG_KVM)) CONFIG_XIVE=$(CONFIG_PSERIES) +CONFIG_XIVE_SPAPR=$(CONFIG_PSERIES) CONFIG_MEM_DEVICE=y CONFIG_DIMM=y CONFIG_SPAPR_RNG=y diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs index 72a46ed91c..301a8e972d 100644 --- a/hw/intc/Makefile.objs +++ b/hw/intc/Makefile.objs @@ -38,6 +38,7 @@ obj-$(CONFIG_XICS) += xics.o obj-$(CONFIG_XICS_SPAPR) += xics_spapr.o obj-$(CONFIG_XICS_KVM) += xics_kvm.o obj-$(CONFIG_XIVE) += xive.o +obj-$(CONFIG_XIVE_SPAPR) += spapr_xive.o obj-$(CONFIG_POWERNV) += xics_pnv.o obj-$(CONFIG_ALLWINNER_A10_PIC) += allwinner-a10-pic.o obj-$(CONFIG_S390_FLIC) += s390_flic.o diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c new file mode 100644 index 0000000000..5f03adca56 --- /dev/null +++ b/hw/intc/spapr_xive.c @@ -0,0 +1,366 @@ +/* + * QEMU PowerPC sPAPR XIVE interrupt controller model + * + * Copyright (c) 2017-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "target/ppc/cpu.h" +#include "sysemu/cpus.h" +#include "monitor/monitor.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_xive.h" +#include "hw/ppc/xive.h" +#include "hw/ppc/xive_regs.h" + +/* + * XIVE Virtualization Controller BAR and Thread Managment BAR that we + * use for the ESB pages and the TIMA pages + */ +#define SPAPR_XIVE_VC_BASE 0x0006010000000000ull +#define SPAPR_XIVE_TM_BASE 0x0006030203180000ull + +/* + * On sPAPR machines, use a simplified output for the XIVE END + * structure dumping only the information related to the OS EQ. + */ +static void spapr_xive_end_pic_print_info(sPAPRXive *xive, XiveEND *end, + Monitor *mon) +{ + uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); + uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); + uint32_t qentries = 1 << (qsize + 10); + uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); + uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); + + monitor_printf(mon, "%3d/%d % 6d/%5d ^%d", nvt, + priority, qindex, qentries, qgen); + + xive_end_queue_pic_print_info(end, 6, mon); + monitor_printf(mon, "]"); +} + +void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor *mon) +{ + XiveSource *xsrc = &xive->source; + int i; + + monitor_printf(mon, " LSIN PQ EISN CPU/PRIO EQ\n"); + + for (i = 0; i < xive->nr_irqs; i++) { + uint8_t pq = xive_source_esb_get(xsrc, i); + XiveEAS *eas = &xive->eat[i]; + + if (!xive_eas_is_valid(eas)) { + continue; + } + + monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i, + xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", + pq & XIVE_ESB_VAL_P ? 'P' : '-', + pq & XIVE_ESB_VAL_Q ? 'Q' : '-', + xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ', + xive_eas_is_masked(eas) ? "M" : " ", + (int) xive_get_field64(EAS_END_DATA, eas->w)); + + if (!xive_eas_is_masked(eas)) { + uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); + XiveEND *end; + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + if (xive_end_is_valid(end)) { + spapr_xive_end_pic_print_info(xive, end, mon); + } + } + monitor_printf(mon, "\n"); + } +} + +static void spapr_xive_map_mmio(sPAPRXive *xive) +{ + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base); + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base); + sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base); +} + +static void spapr_xive_end_reset(XiveEND *end) +{ + memset(end, 0, sizeof(*end)); + + /* switch off the escalation and notification ESBs */ + end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q); +} + +static void spapr_xive_reset(void *dev) +{ + sPAPRXive *xive = SPAPR_XIVE(dev); + int i; + + /* + * The XiveSource has its own reset handler, which mask off all + * IRQs (!P|Q) + */ + + /* Mask all valid EASs in the IRQ number space. */ + for (i = 0; i < xive->nr_irqs; i++) { + XiveEAS *eas = &xive->eat[i]; + if (xive_eas_is_valid(eas)) { + eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED); + } else { + eas->w = 0; + } + } + + /* Clear all ENDs */ + for (i = 0; i < xive->nr_ends; i++) { + spapr_xive_end_reset(&xive->endt[i]); + } +} + +static void spapr_xive_instance_init(Object *obj) +{ + sPAPRXive *xive = SPAPR_XIVE(obj); + + object_initialize(&xive->source, sizeof(xive->source), TYPE_XIVE_SOURCE); + object_property_add_child(obj, "source", OBJECT(&xive->source), NULL); + + object_initialize(&xive->end_source, sizeof(xive->end_source), + TYPE_XIVE_END_SOURCE); + object_property_add_child(obj, "end_source", OBJECT(&xive->end_source), + NULL); +} + +static void spapr_xive_realize(DeviceState *dev, Error **errp) +{ + sPAPRXive *xive = SPAPR_XIVE(dev); + XiveSource *xsrc = &xive->source; + XiveENDSource *end_xsrc = &xive->end_source; + Error *local_err = NULL; + + if (!xive->nr_irqs) { + error_setg(errp, "Number of interrupt needs to be greater 0"); + return; + } + + if (!xive->nr_ends) { + error_setg(errp, "Number of interrupt needs to be greater 0"); + return; + } + + /* + * Initialize the internal sources, for IPIs and virtual devices. + */ + object_property_set_int(OBJECT(xsrc), xive->nr_irqs, "nr-irqs", + &error_fatal); + object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive), + &error_fatal); + object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* + * Initialize the END ESB source + */ + object_property_set_int(OBJECT(end_xsrc), xive->nr_irqs, "nr-ends", + &error_fatal); + object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive), + &error_fatal); + object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* Set the mapping address of the END ESB pages after the source ESBs */ + xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs; + + /* + * Allocate the routing tables + */ + xive->eat = g_new0(XiveEAS, xive->nr_irqs); + xive->endt = g_new0(XiveEND, xive->nr_ends); + + /* TIMA initialization */ + memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive, + "xive.tima", 4ull << TM_SHIFT); + + /* Define all XIVE MMIO regions on SysBus */ + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio); + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio); + sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio); + + /* Map all regions */ + spapr_xive_map_mmio(xive); + + qemu_register_reset(spapr_xive_reset, dev); +} + +static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk, + uint32_t eas_idx, XiveEAS *eas) +{ + sPAPRXive *xive = SPAPR_XIVE(xrtr); + + if (eas_idx >= xive->nr_irqs) { + return -1; + } + + *eas = xive->eat[eas_idx]; + return 0; +} + +static int spapr_xive_get_end(XiveRouter *xrtr, + uint8_t end_blk, uint32_t end_idx, XiveEND *end) +{ + sPAPRXive *xive = SPAPR_XIVE(xrtr); + + if (end_idx >= xive->nr_ends) { + return -1; + } + + memcpy(end, &xive->endt[end_idx], sizeof(XiveEND)); + return 0; +} + +static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk, + uint32_t end_idx, XiveEND *end, + uint8_t word_number) +{ + sPAPRXive *xive = SPAPR_XIVE(xrtr); + + if (end_idx >= xive->nr_ends) { + return -1; + } + + memcpy(&xive->endt[end_idx], end, sizeof(XiveEND)); + return 0; +} + +static const VMStateDescription vmstate_spapr_xive_end = { + .name = TYPE_SPAPR_XIVE "/end", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField []) { + VMSTATE_UINT32(w0, XiveEND), + VMSTATE_UINT32(w1, XiveEND), + VMSTATE_UINT32(w2, XiveEND), + VMSTATE_UINT32(w3, XiveEND), + VMSTATE_UINT32(w4, XiveEND), + VMSTATE_UINT32(w5, XiveEND), + VMSTATE_UINT32(w6, XiveEND), + VMSTATE_UINT32(w7, XiveEND), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_spapr_xive_eas = { + .name = TYPE_SPAPR_XIVE "/eas", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField []) { + VMSTATE_UINT64(w, XiveEAS), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_spapr_xive = { + .name = TYPE_SPAPR_XIVE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_EQUAL(nr_irqs, sPAPRXive, NULL), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, sPAPRXive, nr_irqs, + vmstate_spapr_xive_eas, XiveEAS), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, sPAPRXive, nr_ends, + vmstate_spapr_xive_end, XiveEND), + VMSTATE_END_OF_LIST() + }, +}; + +static Property spapr_xive_properties[] = { + DEFINE_PROP_UINT32("nr-irqs", sPAPRXive, nr_irqs, 0), + DEFINE_PROP_UINT32("nr-ends", sPAPRXive, nr_ends, 0), + DEFINE_PROP_UINT64("vc-base", sPAPRXive, vc_base, SPAPR_XIVE_VC_BASE), + DEFINE_PROP_UINT64("tm-base", sPAPRXive, tm_base, SPAPR_XIVE_TM_BASE), + DEFINE_PROP_END_OF_LIST(), +}; + +static void spapr_xive_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); + + dc->desc = "sPAPR XIVE Interrupt Controller"; + dc->props = spapr_xive_properties; + dc->realize = spapr_xive_realize; + dc->vmsd = &vmstate_spapr_xive; + + xrc->get_eas = spapr_xive_get_eas; + xrc->get_end = spapr_xive_get_end; + xrc->write_end = spapr_xive_write_end; +} + +static const TypeInfo spapr_xive_info = { + .name = TYPE_SPAPR_XIVE, + .parent = TYPE_XIVE_ROUTER, + .instance_init = spapr_xive_instance_init, + .instance_size = sizeof(sPAPRXive), + .class_init = spapr_xive_class_init, +}; + +static void spapr_xive_register_types(void) +{ + type_register_static(&spapr_xive_info); +} + +type_init(spapr_xive_register_types) + +bool spapr_xive_irq_claim(sPAPRXive *xive, uint32_t lisn, bool lsi) +{ + XiveSource *xsrc = &xive->source; + + if (lisn >= xive->nr_irqs) { + return false; + } + + xive->eat[lisn].w |= cpu_to_be64(EAS_VALID); + xive_source_irq_set(xsrc, lisn, lsi); + return true; +} + +bool spapr_xive_irq_free(sPAPRXive *xive, uint32_t lisn) +{ + XiveSource *xsrc = &xive->source; + + if (lisn >= xive->nr_irqs) { + return false; + } + + xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID); + xive_source_irq_set(xsrc, lisn, false); + return true; +} + +qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn) +{ + XiveSource *xsrc = &xive->source; + + if (lisn >= xive->nr_irqs) { + return NULL; + } + + /* The sPAPR machine/device should have claimed the IRQ before */ + assert(xive_eas_is_valid(&xive->eat[lisn])); + + return xive_source_qirq(xsrc, lisn); +} diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h new file mode 100644 index 0000000000..f087959b99 --- /dev/null +++ b/include/hw/ppc/spapr_xive.h @@ -0,0 +1,45 @@ +/* + * QEMU PowerPC sPAPR XIVE interrupt controller model + * + * Copyright (c) 2017-2018, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#ifndef PPC_SPAPR_XIVE_H +#define PPC_SPAPR_XIVE_H + +#include "hw/ppc/xive.h" + +#define TYPE_SPAPR_XIVE "spapr-xive" +#define SPAPR_XIVE(obj) OBJECT_CHECK(sPAPRXive, (obj), TYPE_SPAPR_XIVE) + +typedef struct sPAPRXive { + XiveRouter parent; + + /* Internal interrupt source for IPIs and virtual devices */ + XiveSource source; + hwaddr vc_base; + + /* END ESB MMIOs */ + XiveENDSource end_source; + hwaddr end_base; + + /* Routing table */ + XiveEAS *eat; + uint32_t nr_irqs; + XiveEND *endt; + uint32_t nr_ends; + + /* TIMA mapping address */ + hwaddr tm_base; + MemoryRegion tm_mmio; +} sPAPRXive; + +bool spapr_xive_irq_claim(sPAPRXive *xive, uint32_t lisn, bool lsi); +bool spapr_xive_irq_free(sPAPRXive *xive, uint32_t lisn); +void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor *mon); +qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn); + +#endif /* PPC_SPAPR_XIVE_H */ -- cgit v1.2.3-55-g7522 From 0cddee8d488667a7de60e75f76ead8cffe613d75 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Sun, 9 Dec 2018 20:45:57 +0100 Subject: spapr/xive: use the VCPU id as a NVT identifier The IVPE scans the O/S CAM line of the XIVE thread interrupt contexts to find a matching Notification Virtual Target (NVT) among the NVTs dispatched on the HW processor threads. On a real system, the thread interrupt contexts are updated by the hypervisor when a Virtual Processor is scheduled to run on a HW thread. Under QEMU, the model will emulate the same behavior by hardwiring the NVT identifier in the thread context registers at reset. The NVT identifier used by the sPAPRXive model is the VCPU id. The END identifier is also derived from the VCPU id. A set of helpers doing the conversion between identifiers are provided for the hcalls configuring the sources and the ENDs. The model does not need a NVT table but the XiveRouter NVT operations are provided to perform some extra checks in the routing algorithm. Signed-off-by: Cédric Le Goater Signed-off-by: David Gibson --- hw/intc/spapr_xive.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) (limited to 'hw/intc') diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 5f03adca56..d6291c6470 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -26,6 +26,26 @@ #define SPAPR_XIVE_VC_BASE 0x0006010000000000ull #define SPAPR_XIVE_TM_BASE 0x0006030203180000ull +/* + * The allocation of VP blocks is a complex operation in OPAL and the + * VP identifiers have a relation with the number of HW chips, the + * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE + * controller model does not have the same constraints and can use a + * simple mapping scheme of the CPU vcpu_id + * + * These identifiers are never returned to the OS. + */ + +#define SPAPR_XIVE_NVT_BASE 0x400 + +/* + * sPAPR NVT and END indexing helpers + */ +static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx) +{ + return nvt_idx - SPAPR_XIVE_NVT_BASE; +} + /* * On sPAPR machines, use a simplified output for the XIVE END * structure dumping only the information related to the OS EQ. @@ -40,7 +60,8 @@ static void spapr_xive_end_pic_print_info(sPAPRXive *xive, XiveEND *end, uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); - monitor_printf(mon, "%3d/%d % 6d/%5d ^%d", nvt, + monitor_printf(mon, "%3d/%d % 6d/%5d ^%d", + spapr_xive_nvt_to_target(0, nvt), priority, qindex, qentries, qgen); xive_end_queue_pic_print_info(end, 6, mon); @@ -246,6 +267,37 @@ static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk, return 0; } +static int spapr_xive_get_nvt(XiveRouter *xrtr, + uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt) +{ + uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); + PowerPCCPU *cpu = spapr_find_cpu(vcpu_id); + + if (!cpu) { + /* TODO: should we assert() if we can find a NVT ? */ + return -1; + } + + /* + * sPAPR does not maintain a NVT table. Return that the NVT is + * valid if we have found a matching CPU + */ + nvt->w0 = cpu_to_be32(NVT_W0_VALID); + return 0; +} + +static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, + uint32_t nvt_idx, XiveNVT *nvt, + uint8_t word_number) +{ + /* + * We don't need to write back to the NVTs because the sPAPR + * machine should never hit a non-scheduled NVT. It should never + * get called. + */ + g_assert_not_reached(); +} + static const VMStateDescription vmstate_spapr_xive_end = { .name = TYPE_SPAPR_XIVE "/end", .version_id = 1, @@ -308,6 +360,8 @@ static void spapr_xive_class_init(ObjectClass *klass, void *data) xrc->get_eas = spapr_xive_get_eas; xrc->get_end = spapr_xive_get_end; xrc->write_end = spapr_xive_write_end; + xrc->get_nvt = spapr_xive_get_nvt; + xrc->write_nvt = spapr_xive_write_nvt; } static const TypeInfo spapr_xive_info = { -- cgit v1.2.3-55-g7522 From 23bcd5eb9a472cc7bd147403d9ba18e293ee6adc Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Tue, 11 Dec 2018 23:38:13 +0100 Subject: spapr: add hcalls support for the XIVE exploitation interrupt mode The different XIVE virtualization structures (sources and event queues) are configured with a set of Hypervisor calls : - H_INT_GET_SOURCE_INFO used to obtain the address of the MMIO page of the Event State Buffer (ESB) entry associated with the source. - H_INT_SET_SOURCE_CONFIG assigns a source to a "target". - H_INT_GET_SOURCE_CONFIG determines which "target" and "priority" is assigned to a source - H_INT_GET_QUEUE_INFO returns the address of the notification management page associated with the specified "target" and "priority". - H_INT_SET_QUEUE_CONFIG sets or resets the event queue for a given "target" and "priority". It is also used to set the notification configuration associated with the queue, only unconditional notification is supported for the moment. Reset is performed with a queue size of 0 and queueing is disabled in that case. - H_INT_GET_QUEUE_CONFIG returns the queue settings for a given "target" and "priority". - H_INT_RESET resets all of the guest's internal interrupt structures to their initial state, losing all configuration set via the hcalls H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG. - H_INT_SYNC issue a synchronisation on a source to make sure all notifications have reached their queue. Calls that still need to be addressed : H_INT_SET_OS_REPORTING_LINE H_INT_GET_OS_REPORTING_LINE See the code for more documentation on each hcall. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson [dwg: Folded in fix for field accessors] Signed-off-by: David Gibson --- hw/intc/spapr_xive.c | 982 ++++++++++++++++++++++++++++++++++++++++++++ hw/ppc/spapr_irq.c | 2 + include/hw/ppc/spapr.h | 15 +- include/hw/ppc/spapr_xive.h | 4 + 4 files changed, 1002 insertions(+), 1 deletion(-) (limited to 'hw/intc') diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index d6291c6470..9f2820039c 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -38,6 +38,13 @@ #define SPAPR_XIVE_NVT_BASE 0x400 +/* + * The sPAPR machine has a unique XIVE IC device. Assign a fixed value + * to the controller block id value. It can nevertheless be changed + * for testing purpose. + */ +#define SPAPR_XIVE_BLOCK_ID 0x0 + /* * sPAPR NVT and END indexing helpers */ @@ -46,6 +53,64 @@ static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx) return nvt_idx - SPAPR_XIVE_NVT_BASE; } +static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu, + uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) +{ + assert(cpu); + + if (out_nvt_blk) { + *out_nvt_blk = SPAPR_XIVE_BLOCK_ID; + } + + if (out_nvt_blk) { + *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id; + } +} + +static int spapr_xive_target_to_nvt(uint32_t target, + uint8_t *out_nvt_blk, uint32_t *out_nvt_idx) +{ + PowerPCCPU *cpu = spapr_find_cpu(target); + + if (!cpu) { + return -1; + } + + spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx); + return 0; +} + +/* + * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8 + * priorities per CPU + */ +static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio, + uint8_t *out_end_blk, uint32_t *out_end_idx) +{ + assert(cpu); + + if (out_end_blk) { + *out_end_blk = SPAPR_XIVE_BLOCK_ID; + } + + if (out_end_idx) { + *out_end_idx = (cpu->vcpu_id << 3) + prio; + } +} + +static int spapr_xive_target_to_end(uint32_t target, uint8_t prio, + uint8_t *out_end_blk, uint32_t *out_end_idx) +{ + PowerPCCPU *cpu = spapr_find_cpu(target); + + if (!cpu) { + return -1; + } + + spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx); + return 0; +} + /* * On sPAPR machines, use a simplified output for the XIVE END * structure dumping only the information related to the OS EQ. @@ -418,3 +483,920 @@ qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn) return xive_source_qirq(xsrc, lisn); } + +/* + * XIVE hcalls + * + * The terminology used by the XIVE hcalls is the following : + * + * TARGET vCPU number + * EQ Event Queue assigned by OS to receive event data + * ESB page for source interrupt management + * LISN Logical Interrupt Source Number identifying a source in the + * machine + * EISN Effective Interrupt Source Number used by guest OS to + * identify source in the guest + * + * The EAS, END, NVT structures are not exposed. + */ + +/* + * Linux hosts under OPAL reserve priority 7 for their own escalation + * interrupts (DD2.X POWER9). So we only allow the guest to use + * priorities [0..6]. + */ +static bool spapr_xive_priority_is_reserved(uint8_t priority) +{ + switch (priority) { + case 0 ... 6: + return false; + case 7: /* OPAL escalation queue */ + default: + return true; + } +} + +/* + * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical + * real address of the MMIO page through which the Event State Buffer + * entry associated with the value of the "lisn" parameter is managed. + * + * Parameters: + * Input + * - R4: "flags" + * Bits 0-63 reserved + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as returned + * by the H_ALLOCATE_VAS_WINDOW hcall + * + * Output + * - R4: "flags" + * Bits 0-59: Reserved + * Bit 60: H_INT_ESB must be used for Event State Buffer + * management + * Bit 61: 1 == LSI 0 == MSI + * Bit 62: the full function page supports trigger + * Bit 63: Store EOI Supported + * - R5: Logical Real address of full function Event State Buffer + * management page, -1 if H_INT_ESB hcall flag is set to 1. + * - R6: Logical Real Address of trigger only Event State Buffer + * management page or -1. + * - R7: Power of 2 page size for the ESB management pages returned in + * R5 and R6. + */ + +#define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60) /* ESB manage with H_INT_ESB */ +#define SPAPR_XIVE_SRC_LSI PPC_BIT(61) /* Virtual LSI type */ +#define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62) /* Trigger and management + on same page */ +#define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63) /* Store EOI support */ + +static target_ulong h_int_get_source_info(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + XiveSource *xsrc = &xive->source; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + if (!xive_eas_is_valid(&xive->eat[lisn])) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* + * All sources are emulated under the main XIVE object and share + * the same characteristics. + */ + args[0] = 0; + if (!xive_source_esb_has_2page(xsrc)) { + args[0] |= SPAPR_XIVE_SRC_TRIGGER; + } + if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) { + args[0] |= SPAPR_XIVE_SRC_STORE_EOI; + } + + /* + * Force the use of the H_INT_ESB hcall in case of an LSI + * interrupt. This is necessary under KVM to re-trigger the + * interrupt if the level is still asserted + */ + if (xive_source_irq_is_lsi(xsrc, lisn)) { + args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI; + } + + if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { + args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn); + } else { + args[1] = -1; + } + + if (xive_source_esb_has_2page(xsrc) && + !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) { + args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn); + } else { + args[2] = -1; + } + + if (xive_source_esb_has_2page(xsrc)) { + args[3] = xsrc->esb_shift - 1; + } else { + args[3] = xsrc->esb_shift; + } + + return H_SUCCESS; +} + +/* + * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical + * Interrupt Source to a target. The Logical Interrupt Source is + * designated with the "lisn" parameter and the target is designated + * with the "target" and "priority" parameters. Upon return from the + * hcall(), no additional interrupts will be directed to the old EQ. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-61: Reserved + * Bit 62: set the "eisn" in the EAS + * Bit 63: masks the interrupt source in the hardware interrupt + * control structure. An interrupt masked by this mechanism will + * be dropped, but it's source state bits will still be + * set. There is no race-free way of unmasking and restoring the + * source. Thus this should only be used in interrupts that are + * also masked at the source, and only in cases where the + * interrupt is not meant to be used for a large amount of time + * because no valid target exists for it for example + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as returned by + * the H_ALLOCATE_VAS_WINDOW hcall + * - R6: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R7: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * - R8: "eisn" is the guest EISN associated with the "lisn" + * + * Output: + * - None + */ + +#define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62) +#define SPAPR_XIVE_SRC_MASK PPC_BIT(63) + +static target_ulong h_int_set_source_config(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + XiveEAS eas, new_eas; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + target_ulong target = args[2]; + target_ulong priority = args[3]; + target_ulong eisn = args[4]; + uint8_t end_blk; + uint32_t end_idx; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* priority 0xff is used to reset the EAS */ + if (priority == 0xff) { + new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED); + goto out; + } + + if (flags & SPAPR_XIVE_SRC_MASK) { + new_eas.w = eas.w | cpu_to_be64(EAS_MASKED); + } else { + new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED); + } + + if (spapr_xive_priority_is_reserved(priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P4; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P3; + } + + new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk); + new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx); + + if (flags & SPAPR_XIVE_SRC_SET_EISN) { + new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn); + } + +out: + xive->eat[lisn] = new_eas; + return H_SUCCESS; +} + +/* + * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which + * target/priority pair is assigned to the specified Logical Interrupt + * Source. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63 Reserved + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as + * returned by the H_ALLOCATE_VAS_WINDOW hcall + * + * Output: + * - R4: Target to which the specified Logical Interrupt Source is + * assigned + * - R5: Priority to which the specified Logical Interrupt Source is + * assigned + * - R6: EISN for the specified Logical Interrupt Source (this will be + * equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG) + */ +static target_ulong h_int_get_source_config(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + XiveEAS eas; + XiveEND *end; + uint8_t nvt_blk; + uint32_t end_idx, nvt_idx; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* EAS_END_BLOCK is unused on sPAPR */ + end_idx = xive_get_field64(EAS_END_INDEX, eas.w); + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); + nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); + args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx); + + if (xive_eas_is_masked(&eas)) { + args[1] = 0xff; + } else { + args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7); + } + + args[2] = xive_get_field64(EAS_END_DATA, eas.w); + + return H_SUCCESS; +} + +/* + * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real + * address of the notification management page associated with the + * specified target and priority. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63 Reserved + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * + * Output: + * - R4: Logical real address of notification page + * - R5: Power of 2 page size of the notification page + */ +static target_ulong h_int_get_queue_info(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + XiveENDSource *end_xsrc = &xive->end_source; + target_ulong flags = args[0]; + target_ulong target = args[1]; + target_ulong priority = args[2]; + XiveEND *end; + uint8_t end_blk; + uint32_t end_idx; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + if (spapr_xive_priority_is_reserved(priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P3; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P2; + } + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx; + if (xive_end_is_enqueue(end)) { + args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; + } else { + args[1] = 0; + } + + return H_SUCCESS; +} + +/* + * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for + * a given "target" and "priority". It is also used to set the + * notification config associated with the EQ. An EQ size of 0 is + * used to reset the EQ config for a given target and priority. If + * resetting the EQ config, the END associated with the given "target" + * and "priority" will be changed to disable queueing. + * + * Upon return from the hcall(), no additional interrupts will be + * directed to the old EQ (if one was set). The old EQ (if one was + * set) should be investigated for interrupts that occurred prior to + * or during the hcall(). + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-62: Reserved + * Bit 63: Unconditional Notify (n) per the XIVE spec + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * - R7: "eventQueue": The logical real address of the start of the EQ + * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes" + * + * Output: + * - None + */ + +#define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63) + +static target_ulong h_int_set_queue_config(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + target_ulong flags = args[0]; + target_ulong target = args[1]; + target_ulong priority = args[2]; + target_ulong qpage = args[3]; + target_ulong qsize = args[4]; + XiveEND end; + uint8_t end_blk, nvt_blk; + uint32_t end_idx, nvt_idx; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) { + return H_PARAMETER; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + if (spapr_xive_priority_is_reserved(priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P3; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P2; + } + + assert(end_idx < xive->nr_ends); + memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND)); + + switch (qsize) { + case 12: + case 16: + case 21: + case 24: + end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff); + end.w3 = cpu_to_be32(qpage & 0xffffffff); + end.w0 |= cpu_to_be32(END_W0_ENQUEUE); + end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12); + break; + case 0: + /* reset queue and disable queueing */ + spapr_xive_end_reset(&end); + goto out; + + default: + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n", + qsize); + return H_P5; + } + + if (qsize) { + hwaddr plen = 1 << qsize; + void *eq; + + /* + * Validate the guest EQ. We should also check that the queue + * has been zeroed by the OS. + */ + eq = address_space_map(CPU(cpu)->as, qpage, &plen, true, + MEMTXATTRS_UNSPECIFIED); + if (plen != 1 << qsize) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%" + HWADDR_PRIx "\n", qpage); + return H_P4; + } + address_space_unmap(CPU(cpu)->as, eq, plen, true, plen); + } + + /* "target" should have been validated above */ + if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) { + g_assert_not_reached(); + } + + /* + * Ensure the priority and target are correctly set (they will not + * be right after allocation) + */ + end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) | + xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx); + end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority); + + if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) { + end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY); + } else { + end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY); + } + + /* + * The generation bit for the END starts at 1 and The END page + * offset counter starts at 0. + */ + end.w1 = cpu_to_be32(END_W1_GENERATION) | + xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul); + end.w0 |= cpu_to_be32(END_W0_VALID); + + /* + * TODO: issue syncs required to ensure all in-flight interrupts + * are complete on the old END + */ + +out: + /* Update END */ + memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND)); + return H_SUCCESS; +} + +/* + * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given + * target and priority. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-62: Reserved + * Bit 63: Debug: Return debug data + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "priority" is a valid priority not in + * "ibm,plat-res-int-priorities" + * + * Output: + * - R4: "flags": + * Bits 0-61: Reserved + * Bit 62: The value of Event Queue Generation Number (g) per + * the XIVE spec if "Debug" = 1 + * Bit 63: The value of Unconditional Notify (n) per the XIVE spec + * - R5: The logical real address of the start of the EQ + * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes" + * - R7: The value of Event Queue Offset Counter per XIVE spec + * if "Debug" = 1, else 0 + * + */ + +#define SPAPR_XIVE_END_DEBUG PPC_BIT(63) + +static target_ulong h_int_get_queue_config(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + target_ulong flags = args[0]; + target_ulong target = args[1]; + target_ulong priority = args[2]; + XiveEND *end; + uint8_t end_blk; + uint32_t end_idx; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~SPAPR_XIVE_END_DEBUG) { + return H_PARAMETER; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + if (spapr_xive_priority_is_reserved(priority)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld + " is reserved\n", priority); + return H_P3; + } + + /* + * Validate that "target" is part of the list of threads allocated + * to the partition. For that, find the END corresponding to the + * target. + */ + if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) { + return H_P2; + } + + assert(end_idx < xive->nr_ends); + end = &xive->endt[end_idx]; + + args[0] = 0; + if (xive_end_is_notify(end)) { + args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY; + } + + if (xive_end_is_enqueue(end)) { + args[1] = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 + | be32_to_cpu(end->w3); + args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; + } else { + args[1] = 0; + args[2] = 0; + } + + /* TODO: do we need any locking on the END ? */ + if (flags & SPAPR_XIVE_END_DEBUG) { + /* Load the event queue generation number into the return flags */ + args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62; + + /* Load R7 with the event queue offset counter */ + args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1); + } else { + args[3] = 0; + } + + return H_SUCCESS; +} + +/* + * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the + * reporting cache line pair for the calling thread. The reporting + * cache lines will contain the OS interrupt context when the OS + * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS + * interrupt. The reporting cache lines can be reset by inputting -1 + * in "reportingLine". Issuing the CI store byte without reporting + * cache lines registered will result in the data not being accessible + * to the OS. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * - R5: "reportingLine": The logical real address of the reporting cache + * line pair + * + * Output: + * - None + */ +static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + /* TODO: H_INT_SET_OS_REPORTING_LINE */ + return H_FUNCTION; +} + +/* + * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical + * real address of the reporting cache line pair set for the input + * "target". If no reporting cache line pair has been set, -1 is + * returned. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * - R5: "target" is per "ibm,ppc-interrupt-server#s" or + * "ibm,ppc-interrupt-gserver#s" + * - R6: "reportingLine": The logical real address of the reporting + * cache line pair + * + * Output: + * - R4: The logical real address of the reporting line if set, else -1 + */ +static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + /* TODO: H_INT_GET_OS_REPORTING_LINE */ + return H_FUNCTION; +} + +/* + * The H_INT_ESB hcall() is used to issue a load or store to the ESB + * page for the input "lisn". This hcall is only supported for LISNs + * that have the ESB hcall flag set to 1 when returned from hcall() + * H_INT_GET_SOURCE_INFO. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-62: Reserved + * bit 63: Store: Store=1, store operation, else load operation + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as + * returned by the H_ALLOCATE_VAS_WINDOW hcall + * - R6: "esbOffset" is the offset into the ESB page for the load or + * store operation + * - R7: "storeData" is the data to write for a store operation + * + * Output: + * - R4: The value of the load if load operation, else -1 + */ + +#define SPAPR_XIVE_ESB_STORE PPC_BIT(63) + +static target_ulong h_int_esb(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + XiveEAS eas; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + target_ulong offset = args[2]; + target_ulong data = args[3]; + hwaddr mmio_addr; + XiveSource *xsrc = &xive->source; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags & ~SPAPR_XIVE_ESB_STORE) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + if (offset > (1ull << xsrc->esb_shift)) { + return H_P3; + } + + mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset; + + if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8, + (flags & SPAPR_XIVE_ESB_STORE))) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%" + HWADDR_PRIx "\n", mmio_addr); + return H_HARDWARE; + } + args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data; + return H_SUCCESS; +} + +/* + * The H_INT_SYNC hcall() is used to issue hardware syncs that will + * ensure any in flight events for the input lisn are in the event + * queue. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * - R5: "lisn" is per "interrupts", "interrupt-map", or + * "ibm,xive-lisn-ranges" properties, or as returned by the + * ibm,query-interrupt-source-number RTAS call, or as + * returned by the H_ALLOCATE_VAS_WINDOW hcall + * + * Output: + * - None + */ +static target_ulong h_int_sync(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + XiveEAS eas; + target_ulong flags = args[0]; + target_ulong lisn = args[1]; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + if (lisn >= xive->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + eas = xive->eat[lisn]; + if (!xive_eas_is_valid(&eas)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n", + lisn); + return H_P2; + } + + /* + * H_STATE should be returned if a H_INT_RESET is in progress. + * This is not needed when running the emulation under QEMU + */ + + /* This is not real hardware. Nothing to be done */ + return H_SUCCESS; +} + +/* + * The H_INT_RESET hcall() is used to reset all of the partition's + * interrupt exploitation structures to their initial state. This + * means losing all previously set interrupt state set via + * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG. + * + * Parameters: + * Input: + * - R4: "flags" + * Bits 0-63: Reserved + * + * Output: + * - None + */ +static target_ulong h_int_reset(PowerPCCPU *cpu, + sPAPRMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + sPAPRXive *xive = spapr->xive; + target_ulong flags = args[0]; + + if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + return H_FUNCTION; + } + + if (flags) { + return H_PARAMETER; + } + + device_reset(DEVICE(xive)); + return H_SUCCESS; +} + +void spapr_xive_hcall_init(sPAPRMachineState *spapr) +{ + spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info); + spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config); + spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config); + spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info); + spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config); + spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config); + spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE, + h_int_set_os_reporting_line); + spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE, + h_int_get_os_reporting_line); + spapr_register_hypercall(H_INT_ESB, h_int_esb); + spapr_register_hypercall(H_INT_SYNC, h_int_sync); + spapr_register_hypercall(H_INT_RESET, h_int_reset); +} diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 1f5aac55d3..9eca8a4c8c 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -239,6 +239,8 @@ static void spapr_irq_init_xive(sPAPRMachineState *spapr, Error **errp) for (i = 0; i < nr_servers; ++i) { spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false); } + + spapr_xive_hcall_init(spapr); } static int spapr_irq_claim_xive(sPAPRMachineState *spapr, int irq, bool lsi, diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index cb3082d319..6bf028a02f 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -452,7 +452,20 @@ struct sPAPRMachineState { #define H_INVALIDATE_PID 0x378 #define H_REGISTER_PROC_TBL 0x37C #define H_SIGNAL_SYS_RESET 0x380 -#define MAX_HCALL_OPCODE H_SIGNAL_SYS_RESET + +#define H_INT_GET_SOURCE_INFO 0x3A8 +#define H_INT_SET_SOURCE_CONFIG 0x3AC +#define H_INT_GET_SOURCE_CONFIG 0x3B0 +#define H_INT_GET_QUEUE_INFO 0x3B4 +#define H_INT_SET_QUEUE_CONFIG 0x3B8 +#define H_INT_GET_QUEUE_CONFIG 0x3BC +#define H_INT_SET_OS_REPORTING_LINE 0x3C0 +#define H_INT_GET_OS_REPORTING_LINE 0x3C4 +#define H_INT_ESB 0x3C8 +#define H_INT_SYNC 0x3CC +#define H_INT_RESET 0x3D0 + +#define MAX_HCALL_OPCODE H_INT_RESET /* The hcalls above are standardized in PAPR and implemented by pHyp * as well. diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h index f087959b99..9506a8f4d1 100644 --- a/include/hw/ppc/spapr_xive.h +++ b/include/hw/ppc/spapr_xive.h @@ -42,4 +42,8 @@ bool spapr_xive_irq_free(sPAPRXive *xive, uint32_t lisn); void spapr_xive_pic_print_info(sPAPRXive *xive, Monitor *mon); qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn); +typedef struct sPAPRMachineState sPAPRMachineState; + +void spapr_xive_hcall_init(sPAPRMachineState *spapr); + #endif /* PPC_SPAPR_XIVE_H */ -- cgit v1.2.3-55-g7522 From 6e21de4a50fa1caf163e12a6c90424b750119f96 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Tue, 11 Dec 2018 23:38:14 +0100 Subject: spapr: add device tree support for the XIVE exploitation mode The XIVE interface for the guest is described in the device tree under the "interrupt-controller" node. A couple of new properties are specific to XIVE : - "reg" contains the base address and size of the thread interrupt managnement areas (TIMA), for the User level and for the Guest OS level. Only the Guest OS level is taken into account today. - "ibm,xive-eq-sizes" the size of the event queues. One cell per size supported, contains log2 of size, in ascending order. - "ibm,xive-lisn-ranges" the IRQ interrupt number ranges assigned to the guest for the IPIs. and also under the root node : - "ibm,plat-res-int-priorities" contains a list of priorities that the hypervisor has reserved for its own use. OPAL uses the priority 7 queue to automatically escalate interrupts for all other queues (DD2.X POWER9). So only priorities [0..6] are allowed for the guest. Extend the sPAPR IRQ backend with a new handler to populate the DT with the appropriate "interrupt-controller" node. Signed-off-by: Cédric Le Goater [dwg: Fix style nits] Signed-off-by: David Gibson --- hw/intc/spapr_xive.c | 67 +++++++++++++++++++++++++++++++++++++++++++++ hw/intc/xics_spapr.c | 3 +- hw/ppc/spapr.c | 3 +- hw/ppc/spapr_irq.c | 3 ++ include/hw/ppc/spapr_irq.h | 2 ++ include/hw/ppc/spapr_xive.h | 2 ++ include/hw/ppc/xics.h | 4 +-- 7 files changed, 80 insertions(+), 4 deletions(-) (limited to 'hw/intc') diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 9f2820039c..682c192268 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -14,6 +14,7 @@ #include "target/ppc/cpu.h" #include "sysemu/cpus.h" #include "monitor/monitor.h" +#include "hw/ppc/fdt.h" #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_xive.h" #include "hw/ppc/xive.h" @@ -1400,3 +1401,69 @@ void spapr_xive_hcall_init(sPAPRMachineState *spapr) spapr_register_hypercall(H_INT_SYNC, h_int_sync); spapr_register_hypercall(H_INT_RESET, h_int_reset); } + +void spapr_dt_xive(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, + uint32_t phandle) +{ + sPAPRXive *xive = spapr->xive; + int node; + uint64_t timas[2 * 2]; + /* Interrupt number ranges for the IPIs */ + uint32_t lisn_ranges[] = { + cpu_to_be32(0), + cpu_to_be32(nr_servers), + }; + /* + * EQ size - the sizes of pages supported by the system 4K, 64K, + * 2M, 16M. We only advertise 64K for the moment. + */ + uint32_t eq_sizes[] = { + cpu_to_be32(16), /* 64K */ + }; + /* + * The following array is in sync with the reserved priorities + * defined by the 'spapr_xive_priority_is_reserved' routine. + */ + uint32_t plat_res_int_priorities[] = { + cpu_to_be32(7), /* start */ + cpu_to_be32(0xf8), /* count */ + }; + gchar *nodename; + + /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */ + timas[0] = cpu_to_be64(xive->tm_base + + XIVE_TM_USER_PAGE * (1ull << TM_SHIFT)); + timas[1] = cpu_to_be64(1ull << TM_SHIFT); + timas[2] = cpu_to_be64(xive->tm_base + + XIVE_TM_OS_PAGE * (1ull << TM_SHIFT)); + timas[3] = cpu_to_be64(1ull << TM_SHIFT); + + nodename = g_strdup_printf("interrupt-controller@%" PRIx64, + xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT)); + _FDT(node = fdt_add_subnode(fdt, 0, nodename)); + g_free(nodename); + + _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe")); + _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas))); + + _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe")); + _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes, + sizeof(eq_sizes))); + _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges, + sizeof(lisn_ranges))); + + /* For Linux to link the LSIs to the interrupt controller. */ + _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); + _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); + + /* For SLOF */ + _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle)); + _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle)); + + /* + * The "ibm,plat-res-int-priorities" property defines the priority + * ranges reserved by the hypervisor + */ + _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities", + plat_res_int_priorities, sizeof(plat_res_int_priorities))); +} diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c index 2e27b92b87..f67d3c80bf 100644 --- a/hw/intc/xics_spapr.c +++ b/hw/intc/xics_spapr.c @@ -244,7 +244,8 @@ void xics_spapr_init(sPAPRMachineState *spapr) spapr_register_hypercall(H_IPOLL, h_ipoll); } -void spapr_dt_xics(int nr_servers, void *fdt, uint32_t phandle) +void spapr_dt_xics(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, + uint32_t phandle) { uint32_t interrupt_server_ranges_prop[] = { 0, cpu_to_be32(nr_servers), diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index fc47a058dd..dfb617e580 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1268,7 +1268,8 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr, _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); /* /interrupt controller */ - spapr_dt_xics(spapr_max_server_number(spapr), fdt, PHANDLE_XICP); + smc->irq->dt_populate(spapr, spapr_max_server_number(spapr), fdt, + PHANDLE_XICP); ret = spapr_populate_memory(spapr, fdt); if (ret < 0) { diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 9eca8a4c8c..975954dc27 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -204,6 +204,7 @@ sPAPRIrq spapr_irq_xics = { .free = spapr_irq_free_xics, .qirq = spapr_qirq_xics, .print_info = spapr_irq_print_info_xics, + .dt_populate = spapr_dt_xics, }; /* @@ -298,6 +299,7 @@ sPAPRIrq spapr_irq_xive = { .free = spapr_irq_free_xive, .qirq = spapr_qirq_xive, .print_info = spapr_irq_print_info_xive, + .dt_populate = spapr_dt_xive, }; /* @@ -402,4 +404,5 @@ sPAPRIrq spapr_irq_xics_legacy = { .free = spapr_irq_free_xics, .qirq = spapr_qirq_xics, .print_info = spapr_irq_print_info_xics, + .dt_populate = spapr_dt_xics, }; diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h index 23cdb51b87..e51e9f052f 100644 --- a/include/hw/ppc/spapr_irq.h +++ b/include/hw/ppc/spapr_irq.h @@ -39,6 +39,8 @@ typedef struct sPAPRIrq { void (*free)(sPAPRMachineState *spapr, int irq, int num); qemu_irq (*qirq)(sPAPRMachineState *spapr, int irq); void (*print_info)(sPAPRMachineState *spapr, Monitor *mon); + void (*dt_populate)(sPAPRMachineState *spapr, uint32_t nr_servers, + void *fdt, uint32_t phandle); } sPAPRIrq; extern sPAPRIrq spapr_irq_xics; diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h index 9506a8f4d1..728a5e8dc1 100644 --- a/include/hw/ppc/spapr_xive.h +++ b/include/hw/ppc/spapr_xive.h @@ -45,5 +45,7 @@ qemu_irq spapr_xive_qirq(sPAPRXive *xive, uint32_t lisn); typedef struct sPAPRMachineState sPAPRMachineState; void spapr_xive_hcall_init(sPAPRMachineState *spapr); +void spapr_dt_xive(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, + uint32_t phandle); #endif /* PPC_SPAPR_XIVE_H */ diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h index 9958443d19..14afda198c 100644 --- a/include/hw/ppc/xics.h +++ b/include/hw/ppc/xics.h @@ -181,8 +181,6 @@ typedef struct XICSFabricClass { ICPState *(*icp_get)(XICSFabric *xi, int server); } XICSFabricClass; -void spapr_dt_xics(int nr_servers, void *fdt, uint32_t phandle); - ICPState *xics_icp_get(XICSFabric *xi, int server); /* Internal XICS interfaces */ @@ -204,6 +202,8 @@ void icp_resend(ICPState *ss); typedef struct sPAPRMachineState sPAPRMachineState; +void spapr_dt_xics(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, + uint32_t phandle); int xics_kvm_init(sPAPRMachineState *spapr, Error **errp); void xics_spapr_init(sPAPRMachineState *spapr); -- cgit v1.2.3-55-g7522 From 1a937ad7e7a1b4eef37c967cbaeeda5ec5b90855 Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Tue, 11 Dec 2018 23:38:15 +0100 Subject: spapr: allocate the interrupt thread context under the CPU core Each interrupt mode has its own specific interrupt presenter object, that we store under the CPU object, one for XICS and one for XIVE. Extend the sPAPR IRQ backend with a new handler to support them both. Signed-off-by: Cédric Le Goater Reviewed-by: David Gibson Signed-off-by: David Gibson --- hw/intc/xive.c | 22 ++++++++++++++++++++++ hw/ppc/spapr_cpu_core.c | 5 ++--- hw/ppc/spapr_irq.c | 15 +++++++++++++++ include/hw/ppc/spapr_irq.h | 2 ++ include/hw/ppc/xive.h | 1 + 5 files changed, 42 insertions(+), 3 deletions(-) (limited to 'hw/intc') diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 607e74acd2..ea33494338 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -528,6 +528,28 @@ static const TypeInfo xive_tctx_info = { .class_init = xive_tctx_class_init, }; +Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp) +{ + Error *local_err = NULL; + Object *obj; + + obj = object_new(TYPE_XIVE_TCTX); + object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort); + object_unref(obj); + object_property_add_const_link(obj, "cpu", cpu, &error_abort); + object_property_set_bool(obj, true, "realized", &local_err); + if (local_err) { + goto error; + } + + return obj; + +error: + object_unparent(obj); + error_propagate(errp, local_err); + return NULL; +} + /* * XIVE ESB helpers */ diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 2398ce62c0..1811cd48db 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -11,7 +11,6 @@ #include "hw/ppc/spapr_cpu_core.h" #include "target/ppc/cpu.h" #include "hw/ppc/spapr.h" -#include "hw/ppc/xics.h" /* for icp_create() - to be removed */ #include "hw/boards.h" #include "qapi/error.h" #include "sysemu/cpus.h" @@ -215,6 +214,7 @@ static void spapr_cpu_core_unrealize(DeviceState *dev, Error **errp) static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr, sPAPRCPUCore *sc, Error **errp) { + sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); CPUPPCState *env = &cpu->env; CPUState *cs = CPU(cpu); Error *local_err = NULL; @@ -233,8 +233,7 @@ static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr, qemu_register_reset(spapr_cpu_reset, cpu); spapr_cpu_reset(cpu); - cpu->intc = icp_create(OBJECT(cpu), spapr->icp_type, XICS_FABRIC(spapr), - &local_err); + cpu->intc = smc->irq->cpu_intc_create(spapr, OBJECT(cpu), &local_err); if (local_err) { goto error_unregister; } diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 975954dc27..fdcc7795e4 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -191,6 +191,12 @@ static void spapr_irq_print_info_xics(sPAPRMachineState *spapr, Monitor *mon) ics_pic_print_info(spapr->ics, mon); } +static Object *spapr_irq_cpu_intc_create_xics(sPAPRMachineState *spapr, + Object *cpu, Error **errp) +{ + return icp_create(cpu, spapr->icp_type, XICS_FABRIC(spapr), errp); +} + #define SPAPR_IRQ_XICS_NR_IRQS 0x1000 #define SPAPR_IRQ_XICS_NR_MSIS \ (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI) @@ -205,6 +211,7 @@ sPAPRIrq spapr_irq_xics = { .qirq = spapr_qirq_xics, .print_info = spapr_irq_print_info_xics, .dt_populate = spapr_dt_xics, + .cpu_intc_create = spapr_irq_cpu_intc_create_xics, }; /* @@ -282,6 +289,12 @@ static void spapr_irq_print_info_xive(sPAPRMachineState *spapr, spapr_xive_pic_print_info(spapr->xive, mon); } +static Object *spapr_irq_cpu_intc_create_xive(sPAPRMachineState *spapr, + Object *cpu, Error **errp) +{ + return xive_tctx_create(cpu, XIVE_ROUTER(spapr->xive), errp); +} + /* * XIVE uses the full IRQ number space. Set it to 8K to be compatible * with XICS. @@ -300,6 +313,7 @@ sPAPRIrq spapr_irq_xive = { .qirq = spapr_qirq_xive, .print_info = spapr_irq_print_info_xive, .dt_populate = spapr_dt_xive, + .cpu_intc_create = spapr_irq_cpu_intc_create_xive, }; /* @@ -405,4 +419,5 @@ sPAPRIrq spapr_irq_xics_legacy = { .qirq = spapr_qirq_xics, .print_info = spapr_irq_print_info_xics, .dt_populate = spapr_dt_xics, + .cpu_intc_create = spapr_irq_cpu_intc_create_xics, }; diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h index e51e9f052f..13db0428ab 100644 --- a/include/hw/ppc/spapr_irq.h +++ b/include/hw/ppc/spapr_irq.h @@ -41,6 +41,8 @@ typedef struct sPAPRIrq { void (*print_info)(sPAPRMachineState *spapr, Monitor *mon); void (*dt_populate)(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, uint32_t phandle); + Object *(*cpu_intc_create)(sPAPRMachineState *spapr, Object *cpu, + Error **errp); } sPAPRIrq; extern sPAPRIrq spapr_irq_xics; diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 19309d1d65..18cd114eb2 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -419,6 +419,7 @@ typedef struct XiveTCTX { extern const MemoryRegionOps xive_tm_ops; void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon); +Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp); static inline uint32_t xive_nvt_cam_line(uint8_t nvt_blk, uint32_t nvt_idx) { -- cgit v1.2.3-55-g7522 From b2e22477166a7f8a32b95317dea747f8af7a807f Mon Sep 17 00:00:00 2001 From: Cédric Le Goater Date: Tue, 11 Dec 2018 23:38:17 +0100 Subject: spapr: add a 'reset' method to the sPAPR IRQ backend For the time being, the XIVE reset handler updates the OS CAM line of the vCPU as it is done under a real hypervisor when a vCPU is scheduled to run on a HW thread. This will let the XIVE presenter engine find a match among the NVTs dispatched on the HW threads. This handler will become even more useful when we introduce the machine supporting both interrupt modes, XIVE and XICS. In this machine, the interrupt mode is chosen by the CAS negotiation process and activated after a reset. Signed-off-by: Cédric Le Goater [dwg: Fix style nits] Signed-off-by: David Gibson --- hw/intc/spapr_xive.c | 17 +++++++++++++++++ hw/ppc/spapr.c | 6 ++++++ hw/ppc/spapr_irq.c | 31 ++++++++++++++++++++++++++++++- include/hw/ppc/spapr_irq.h | 2 ++ include/hw/ppc/spapr_xive.h | 1 + 5 files changed, 56 insertions(+), 1 deletion(-) (limited to 'hw/intc') diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 682c192268..0e39c90cbd 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -179,6 +179,23 @@ static void spapr_xive_map_mmio(sPAPRXive *xive) sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base); } +/* + * When a Virtual Processor is scheduled to run on a HW thread, the + * hypervisor pushes its identifier in the OS CAM line. Emulate the + * same behavior under QEMU. + */ +void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx) +{ + uint8_t nvt_blk; + uint32_t nvt_idx; + uint32_t nvt_cam; + + spapr_xive_cpu_to_nvt(POWERPC_CPU(tctx->cs), &nvt_blk, &nvt_idx); + + nvt_cam = cpu_to_be32(TM_QW1W2_VO | xive_nvt_cam_line(nvt_blk, nvt_idx)); + memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &nvt_cam, 4); +} + static void spapr_xive_end_reset(XiveEND *end) { memset(end, 0, sizeof(*end)); diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 0b09a88753..487f80e940 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1619,6 +1619,12 @@ static void spapr_machine_reset(void) qemu_devices_reset(); + /* + * This is fixing some of the default configuration of the XIVE + * devices. To be called after the reset of the machine devices. + */ + spapr_irq_reset(spapr, &error_fatal); + /* DRC reset may cause a device to be unplugged. This will cause troubles * if this device is used by another device (eg, a running vhost backend * will crash QEMU if the DIMM holding the vring goes away). To avoid such diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index 292c448a15..9ecbf47329 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -305,7 +305,14 @@ static void spapr_irq_print_info_xive(sPAPRMachineState *spapr, static Object *spapr_irq_cpu_intc_create_xive(sPAPRMachineState *spapr, Object *cpu, Error **errp) { - return xive_tctx_create(cpu, XIVE_ROUTER(spapr->xive), errp); + Object *obj = xive_tctx_create(cpu, XIVE_ROUTER(spapr->xive), errp); + + /* + * (TCG) Early setting the OS CAM line for hotplugged CPUs as they + * don't benificiate from the reset of the XIVE IRQ backend + */ + spapr_xive_set_tctx_os_cam(XIVE_TCTX(obj)); + return obj; } static int spapr_irq_post_load_xive(sPAPRMachineState *spapr, int version_id) @@ -313,6 +320,18 @@ static int spapr_irq_post_load_xive(sPAPRMachineState *spapr, int version_id) return 0; } +static void spapr_irq_reset_xive(sPAPRMachineState *spapr, Error **errp) +{ + CPUState *cs; + + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + /* (TCG) Set the OS CAM line of the thread interrupt context. */ + spapr_xive_set_tctx_os_cam(XIVE_TCTX(cpu->intc)); + } +} + /* * XIVE uses the full IRQ number space. Set it to 8K to be compatible * with XICS. @@ -333,6 +352,7 @@ sPAPRIrq spapr_irq_xive = { .dt_populate = spapr_dt_xive, .cpu_intc_create = spapr_irq_cpu_intc_create_xive, .post_load = spapr_irq_post_load_xive, + .reset = spapr_irq_reset_xive, }; /* @@ -378,6 +398,15 @@ int spapr_irq_post_load(sPAPRMachineState *spapr, int version_id) return smc->irq->post_load(spapr, version_id); } +void spapr_irq_reset(sPAPRMachineState *spapr, Error **errp) +{ + sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); + + if (smc->irq->reset) { + smc->irq->reset(spapr, errp); + } +} + /* * XICS legacy routines - to deprecate one day */ diff --git a/include/hw/ppc/spapr_irq.h b/include/hw/ppc/spapr_irq.h index 84a25ffb6c..63061a009b 100644 --- a/include/hw/ppc/spapr_irq.h +++ b/include/hw/ppc/spapr_irq.h @@ -44,6 +44,7 @@ typedef struct sPAPRIrq { Object *(*cpu_intc_create)(sPAPRMachineState *spapr, Object *cpu, Error **errp); int (*post_load)(sPAPRMachineState *spapr, int version_id); + void (*reset)(sPAPRMachineState *spapr, Error **errp); } sPAPRIrq; extern sPAPRIrq spapr_irq_xics; @@ -55,6 +56,7 @@ int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp); void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num); qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq); int spapr_irq_post_load(sPAPRMachineState *spapr, int version_id); +void spapr_irq_reset(sPAPRMachineState *spapr, Error **errp); /* * XICS legacy routines diff --git a/include/hw/ppc/spapr_xive.h b/include/hw/ppc/spapr_xive.h index 728a5e8dc1..728735dbcf 100644 --- a/include/hw/ppc/spapr_xive.h +++ b/include/hw/ppc/spapr_xive.h @@ -47,5 +47,6 @@ typedef struct sPAPRMachineState sPAPRMachineState; void spapr_xive_hcall_init(sPAPRMachineState *spapr); void spapr_dt_xive(sPAPRMachineState *spapr, uint32_t nr_servers, void *fdt, uint32_t phandle); +void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx); #endif /* PPC_SPAPR_XIVE_H */ -- cgit v1.2.3-55-g7522