From 260cabed718c4d3137553a4a98de268b0fb166b7 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Tue, 7 Mar 2017 10:55:30 +0000 Subject: xen: make use of xen_xc implicit in xen_common.h inlines Doing this will make the transition to using the new libxendevicemodel interface less intrusive on the callers of these functions, since using the new library will require a change of handle. NOTE: The patch also moves the 'externs' for xen_xc and xen_fmem from xen_backend.h to xen_common.h, and the declarations from xen_backend.c to xen-common.c, which is where they belong. Signed-off-by: Paul Durrant Reviewed-by: Anthony Perard Reviewed-by: Stefano Stabellini --- hw/xen/xen_backend.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'hw') diff --git a/hw/xen/xen_backend.c b/hw/xen/xen_backend.c index 6c21c37d68..d34c49e152 100644 --- a/hw/xen/xen_backend.c +++ b/hw/xen/xen_backend.c @@ -43,8 +43,6 @@ BusState *xen_sysbus; /* ------------------------------------------------------------- */ /* public */ -xc_interface *xen_xc = NULL; -xenforeignmemory_handle *xen_fmem = NULL; struct xs_handle *xenstore = NULL; const char *xen_protocol; -- cgit v1.2.3-55-g7522 From 8f25e7544150abd4bfd4ef35fe152c049b5ea5e7 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Tue, 7 Mar 2017 10:55:32 +0000 Subject: xen: create wrappers for all other uses of xc_hvm_XXX() functions This patch creates inline wrapper functions in xen_common.h for all open coded calls to xc_hvm_XXX() functions outside of xen_common.h so that use of xen_xc can be made implicit. This again is in preparation for the move to using libxendevicemodel. Signed-off-by: Paul Durrant Reviewed-by: Anthony Perard Reviewed-by: Stefano Stabellini --- hw/i386/xen/xen_platform.c | 2 +- include/hw/xen/xen_common.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ xen-hvm.c | 27 +++++++++++++-------------- 3 files changed, 58 insertions(+), 15 deletions(-) (limited to 'hw') diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c index 6010f35266..1419fc96d2 100644 --- a/hw/i386/xen/xen_platform.c +++ b/hw/i386/xen/xen_platform.c @@ -195,7 +195,7 @@ static void platform_fixed_ioport_writeb(void *opaque, uint32_t addr, uint32_t v case 0: /* Platform flags */ { hvmmem_type_t mem_type = (val & PFFLAG_ROM_LOCK) ? HVMMEM_ram_ro : HVMMEM_ram_rw; - if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type, 0xc0, 0x40)) { + if (xen_set_mem_type(xen_domid, mem_type, 0xc0, 0x40)) { DPRINTF("unable to change ro/rw state of ROM memory area!\n"); } else { s->flags = val & PFFLAG_ROM_LOCK; diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h index 1e08b98617..31cf25f846 100644 --- a/include/hw/xen/xen_common.h +++ b/include/hw/xen/xen_common.h @@ -26,6 +26,50 @@ extern xc_interface *xen_xc; * We don't support Xen prior to 4.2.0. */ +static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type, + uint64_t first_pfn, uint32_t nr) +{ + return xc_hvm_set_mem_type(xen_xc, domid, type, first_pfn, nr); +} + +static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment, + uint8_t bus, uint8_t device, + uint8_t intx, unsigned int level) +{ + return xc_hvm_set_pci_intx_level(xen_xc, domid, segment, bus, device, + intx, level); +} + +static inline int xen_set_pci_link_route(domid_t domid, uint8_t link, + uint8_t irq) +{ + return xc_hvm_set_pci_link_route(xen_xc, domid, link, irq); +} + +static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr, + uint32_t msi_data) +{ + return xc_hvm_inject_msi(xen_xc, domid, msi_addr, msi_data); +} + +static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq, + unsigned int level) +{ + return xc_hvm_set_isa_irq_level(xen_xc, domid, irq, level); +} + +static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn, + uint32_t nr, unsigned long *bitmap) +{ + return xc_hvm_track_dirty_vram(xen_xc, domid, first_pfn, nr, bitmap); +} + +static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn, + uint32_t nr) +{ + return xc_hvm_modified_memory(xen_xc, domid, first_pfn, nr); +} + /* Xen 4.2 through 4.6 */ #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471 diff --git a/xen-hvm.c b/xen-hvm.c index edf4983702..4b928cfb30 100644 --- a/xen-hvm.c +++ b/xen-hvm.c @@ -125,8 +125,8 @@ int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) void xen_piix3_set_irq(void *opaque, int irq_num, int level) { - xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2, - irq_num & 3, level); + xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2, + irq_num & 3, level); } void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) @@ -141,7 +141,7 @@ void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) } v &= 0xf; if (((address + i) >= 0x60) && ((address + i) <= 0x63)) { - xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v); + xen_set_pci_link_route(xen_domid, address + i - 0x60, v); } } } @@ -156,7 +156,7 @@ int xen_is_pirq_msi(uint32_t msi_data) void xen_hvm_inject_msi(uint64_t addr, uint32_t data) { - xc_hvm_inject_msi(xen_xc, xen_domid, addr, data); + xen_inject_msi(xen_domid, addr, data); } static void xen_suspend_notifier(Notifier *notifier, void *data) @@ -168,7 +168,7 @@ static void xen_suspend_notifier(Notifier *notifier, void *data) static void xen_set_irq(void *opaque, int irq, int level) { - xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level); + xen_set_isa_irq_level(xen_domid, irq, level); } qemu_irq *xen_interrupt_controller_init(void) @@ -481,10 +481,10 @@ static void xen_set_memory(struct MemoryListener *listener, section->mr, section->offset_within_region); } else { mem_type = HVMMEM_ram_ro; - if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type, - start_addr >> TARGET_PAGE_BITS, - size >> TARGET_PAGE_BITS)) { - DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n", + if (xen_set_mem_type(xen_domid, mem_type, + start_addr >> TARGET_PAGE_BITS, + size >> TARGET_PAGE_BITS)) { + DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n", start_addr); } } @@ -586,9 +586,8 @@ static void xen_sync_dirty_bitmap(XenIOState *state, return; } - rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid, - start_addr >> TARGET_PAGE_BITS, npages, - bitmap); + rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, + npages, bitmap); if (rc < 0) { #ifndef ENODATA #define ENODATA ENOENT @@ -634,7 +633,7 @@ static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section, if (old & ~new & (1 << DIRTY_MEMORY_VGA)) { state->log_for_dirtybit = NULL; /* Disable dirty bit tracking */ - xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL); + xen_track_dirty_vram(xen_domid, 0, 0, NULL); } } @@ -1403,7 +1402,7 @@ void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) start_pfn = start >> TARGET_PAGE_BITS; nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) - start_pfn; - rc = xc_hvm_modified_memory(xen_xc, xen_domid, start_pfn, nb_pages); + rc = xen_modified_memory(xen_domid, start_pfn, nb_pages); if (rc) { fprintf(stderr, "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", -- cgit v1.2.3-55-g7522 From f1167ee684279bffabe7bb3ab23eff87577fe427 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 16 Mar 2017 15:19:52 +0100 Subject: xen: use 5 digit xen versions Today qemu is using e.g. the value 480 for Xen version 4.8.0. As some Xen version tests are using ">" relations this scheme will lead to problems when Xen version 4.10.0 is being reached. Instead of the 3 digit schem use a 5 digit scheme (e.g. 40800 for version 4.8.0). Signed-off-by: Juergen Gross Signed-off-by: Stefano Stabellini Reviewed-by: Stefano Stabellini --- configure | 16 ++++++++-------- hw/block/xen_disk.c | 2 +- include/hw/xen/xen_common.h | 22 +++++++++++----------- 3 files changed, 20 insertions(+), 20 deletions(-) (limited to 'hw') diff --git a/configure b/configure index e3335478cc..271bea8058 100755 --- a/configure +++ b/configure @@ -2014,7 +2014,7 @@ EOF compile_prog "" "$xen_libs -lxendevicemodel $xen_stable_libs" then xen_stable_libs="-lxendevicemodel $xen_stable_libs" - xen_ctrl_version=490 + xen_ctrl_version=40900 xen=yes elif cat > $TMPC < $TMPC < $TMPC <= 480 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800 static void ioreq_free_copy_buffers(struct ioreq *ioreq) { diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h index b1f5f53e35..fa990a07c0 100644 --- a/include/hw/xen/xen_common.h +++ b/include/hw/xen/xen_common.h @@ -26,7 +26,7 @@ extern xc_interface *xen_xc; * We don't support Xen prior to 4.2.0. */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 490 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900 typedef xc_interface xendevicemodel_handle; @@ -36,7 +36,7 @@ static inline xendevicemodel_handle *xendevicemodel_open( return xen_xc; } -#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 450 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 static inline int xendevicemodel_create_ioreq_server( xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq, @@ -99,7 +99,7 @@ static inline int xendevicemodel_set_ioreq_server_state( return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled); } -#endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 450 */ +#endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */ static inline int xendevicemodel_set_pci_intx_level( xendevicemodel_handle *dmod, domid_t domid, uint16_t segment, @@ -151,7 +151,7 @@ static inline int xendevicemodel_set_mem_type( return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr); } -#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 490 */ +#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */ #undef XC_WANT_COMPAT_DEVICEMODEL_API #include @@ -207,7 +207,7 @@ static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn, } /* Xen 4.2 through 4.6 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 typedef xc_interface xenforeignmemory_handle; typedef xc_evtchn xenevtchn_handle; @@ -248,7 +248,7 @@ static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE) -#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */ +#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */ #include #include @@ -284,7 +284,7 @@ static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, #endif /* Xen before 4.6 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2 @@ -330,7 +330,7 @@ static inline int xen_get_default_ioreq_server_info(domid_t dom, } /* Xen before 4.5 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN #define HVM_PARAM_BUFIOREQ_EVTCHN 26 @@ -569,7 +569,7 @@ static inline int xen_set_ioreq_server_state(domid_t dom, #endif -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid, unsigned int space, unsigned long idx, @@ -592,7 +592,7 @@ static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid #endif #ifdef CONFIG_XEN_PV_DOMAIN_BUILD -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref, xen_domain_handle_t handle, uint32_t flags, uint32_t *pdomid) @@ -611,7 +611,7 @@ static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref, /* Xen before 4.8 */ -#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480 +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800 typedef void *xengnttab_grant_copy_segment_t; -- cgit v1.2.3-55-g7522 From 1c599472b02783ee80691bfdaa465af9fbf25c8a Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Wed, 22 Mar 2017 09:39:15 +0000 Subject: xen: use libxendevice model to restrict operations This patch adds a command-line option (-xen-domid-restrict) which will use the new libxendevicemodel API to restrict devicemodel [1] operations to the specified domid. (Such operations are not applicable to the xenpv machine type). This patch also adds a tracepoint to allow successful enabling of the restriction to be monitored. [1] I.e. operations issued by libxendevicemodel. Operation issued by other xen libraries (e.g. libxenforeignmemory) are currently still unrestricted but this will be rectified by subsequent patches. Signed-off-by: Paul Durrant Reviewed-by: Stefano Stabellini --- hw/xen/trace-events | 1 + include/hw/xen/xen.h | 1 + include/hw/xen/xen_common.h | 20 ++++++++++++++++++++ qemu-options.hx | 7 +++++++ vl.c | 8 ++++++++ xen-hvm.c | 8 ++++++++ 6 files changed, 45 insertions(+) (limited to 'hw') diff --git a/hw/xen/trace-events b/hw/xen/trace-events index c4fb6f1aea..5615dce2c1 100644 --- a/hw/xen/trace-events +++ b/hw/xen/trace-events @@ -11,3 +11,4 @@ xen_map_portio_range(uint32_t id, uint64_t start_addr, uint64_t end_addr) "id: % xen_unmap_portio_range(uint32_t id, uint64_t start_addr, uint64_t end_addr) "id: %u start: %#"PRIx64" end: %#"PRIx64 xen_map_pcidev(uint32_t id, uint8_t bus, uint8_t dev, uint8_t func) "id: %u bdf: %02x.%02x.%02x" xen_unmap_pcidev(uint32_t id, uint8_t bus, uint8_t dev, uint8_t func) "id: %u bdf: %02x.%02x.%02x" +xen_domid_restrict(int err) "err: %u" diff --git a/include/hw/xen/xen.h b/include/hw/xen/xen.h index 2b1733b747..7efcdaa8fe 100644 --- a/include/hw/xen/xen.h +++ b/include/hw/xen/xen.h @@ -21,6 +21,7 @@ enum xen_mode { extern uint32_t xen_domid; extern enum xen_mode xen_mode; +extern bool xen_domid_restrict; extern bool xen_allowed; diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h index fa990a07c0..0fcbba8c54 100644 --- a/include/hw/xen/xen_common.h +++ b/include/hw/xen/xen_common.h @@ -151,6 +151,13 @@ static inline int xendevicemodel_set_mem_type( return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr); } +static inline int xendevicemodel_restrict( + xendevicemodel_handle *dmod, domid_t domid) +{ + errno = ENOTTY; + return -1; +} + #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */ #undef XC_WANT_COMPAT_DEVICEMODEL_API @@ -206,6 +213,19 @@ static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn, return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr); } +static inline int xen_restrict(domid_t domid) +{ + int rc = xendevicemodel_restrict(xen_dmod, domid); + + trace_xen_domid_restrict(errno); + + if (errno == ENOTTY) { + return 0; + } + + return rc; +} + /* Xen 4.2 through 4.6 */ #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 diff --git a/qemu-options.hx b/qemu-options.hx index 99af8edf5f..2043371260 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -3354,6 +3354,11 @@ DEF("xen-attach", 0, QEMU_OPTION_xen_attach, "-xen-attach attach to existing xen domain\n" " xend will use this when starting QEMU\n", QEMU_ARCH_ALL) +DEF("xen-domid-restrict", 0, QEMU_OPTION_xen_domid_restrict, + "-xen-domid-restrict restrict set of available xen operations\n" + " to specified domain id. (Does not affect\n" + " xenpv machine type).\n", + QEMU_ARCH_ALL) STEXI @item -xen-domid @var{id} @findex -xen-domid @@ -3366,6 +3371,8 @@ Warning: should not be used when xend is in use (XEN only). @findex -xen-attach Attach to existing xen domain. xend will use this when starting QEMU (XEN only). +@findex -xen-domid-restrict +Restrict set of available xen operations to specified domain id (XEN only). ETEXI DEF("no-reboot", 0, QEMU_OPTION_no_reboot, \ diff --git a/vl.c b/vl.c index 0b4ed5241c..f46e070e0d 100644 --- a/vl.c +++ b/vl.c @@ -205,6 +205,7 @@ static NotifierList machine_init_done_notifiers = bool xen_allowed; uint32_t xen_domid; enum xen_mode xen_mode = XEN_EMULATE; +bool xen_domid_restrict; static int has_defaults = 1; static int default_serial = 1; @@ -3933,6 +3934,13 @@ int main(int argc, char **argv, char **envp) } xen_mode = XEN_ATTACH; break; + case QEMU_OPTION_xen_domid_restrict: + if (!(xen_available())) { + error_report("Option not supported for this target"); + exit(1); + } + xen_domid_restrict = true; + break; case QEMU_OPTION_trace: g_free(trace_file); trace_file = trace_opt_parse(optarg); diff --git a/xen-hvm.c b/xen-hvm.c index 4b928cfb30..335e263834 100644 --- a/xen-hvm.c +++ b/xen-hvm.c @@ -1226,6 +1226,14 @@ void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) goto err; } + if (xen_domid_restrict) { + rc = xen_restrict(xen_domid); + if (rc < 0) { + error_report("failed to restrict: error %d", errno); + goto err; + } + } + xen_create_ioreq_server(xen_domid, &state->ioservid); state->exit.notify = xen_exit_notifier; -- cgit v1.2.3-55-g7522 From f65eadb6394340ccf7d23533c7844b6e21c056e9 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Tue, 21 Mar 2017 13:51:25 -0700 Subject: xen: import ring.h from xen Do not use the ring.h header installed on the system. Instead, import the header into the QEMU codebase. This avoids problems when QEMU is built against a Xen version too old to provide all the ring macros. Signed-off-by: Stefano Stabellini Reviewed-by: Greg Kurz CC: anthony.perard@citrix.com CC: jgross@suse.com --- hw/block/xen_blkif.h | 2 +- hw/usb/xen-usb.c | 2 +- include/hw/xen/io/ring.h | 482 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 484 insertions(+), 2 deletions(-) create mode 100644 include/hw/xen/io/ring.h (limited to 'hw') diff --git a/hw/block/xen_blkif.h b/hw/block/xen_blkif.h index 3300b6fc0a..3e6e1ea365 100644 --- a/hw/block/xen_blkif.h +++ b/hw/block/xen_blkif.h @@ -1,7 +1,7 @@ #ifndef XEN_BLKIF_H #define XEN_BLKIF_H -#include +#include "hw/xen/io/ring.h" #include #include diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c index 8e676e6c96..370b3d9387 100644 --- a/hw/usb/xen-usb.c +++ b/hw/usb/xen-usb.c @@ -33,7 +33,7 @@ #include "qapi/qmp/qint.h" #include "qapi/qmp/qstring.h" -#include +#include "hw/xen/io/ring.h" #include /* diff --git a/include/hw/xen/io/ring.h b/include/hw/xen/io/ring.h new file mode 100644 index 0000000000..abbca47687 --- /dev/null +++ b/include/hw/xen/io/ring.h @@ -0,0 +1,482 @@ +/****************************************************************************** + * ring.h + * + * Shared producer-consumer ring macros. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Tim Deegan and Andrew Warfield November 2004. + */ + +#ifndef __XEN_PUBLIC_IO_RING_H__ +#define __XEN_PUBLIC_IO_RING_H__ + +/* + * When #include'ing this header, you need to provide the following + * declaration upfront: + * - standard integers types (uint8_t, uint16_t, etc) + * They are provided by stdint.h of the standard headers. + * + * In addition, if you intend to use the FLEX macros, you also need to + * provide the following, before invoking the FLEX macros: + * - size_t + * - memcpy + * - grant_ref_t + * These declarations are provided by string.h of the standard headers, + * and grant_table.h from the Xen public headers. + */ + +#if __XEN_INTERFACE_VERSION__ < 0x00030208 +#define xen_mb() mb() +#define xen_rmb() rmb() +#define xen_wmb() wmb() +#endif + +typedef unsigned int RING_IDX; + +/* Round a 32-bit unsigned constant down to the nearest power of two. */ +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) +#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) +#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) +#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) +#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) + +/* + * Calculate size of a shared ring, given the total available space for the + * ring and indexes (_sz), and the name tag of the request/response structure. + * A ring contains as many entries as will fit, rounded down to the nearest + * power of two (so we can mask with (size-1) to loop around). + */ +#define __CONST_RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ + sizeof(((struct _s##_sring *)0)->ring[0]))) +/* + * The same for passing in an actual pointer instead of a name tag. + */ +#define __RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) + +/* + * Macros to make the correct C datatypes for a new kind of ring. + * + * To make a new ring datatype, you need to have two message structures, + * let's say request_t, and response_t already defined. + * + * In a header where you want the ring datatype declared, you then do: + * + * DEFINE_RING_TYPES(mytag, request_t, response_t); + * + * These expand out to give you a set of types, as you can see below. + * The most important of these are: + * + * mytag_sring_t - The shared ring. + * mytag_front_ring_t - The 'front' half of the ring. + * mytag_back_ring_t - The 'back' half of the ring. + * + * To initialize a ring in your code you need to know the location and size + * of the shared memory area (PAGE_SIZE, for instance). To initialise + * the front half: + * + * mytag_front_ring_t front_ring; + * SHARED_RING_INIT((mytag_sring_t *)shared_page); + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); + * + * Initializing the back follows similarly (note that only the front + * initializes the shared ring): + * + * mytag_back_ring_t back_ring; + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); + */ + +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ + \ +/* Shared ring entry */ \ +union __name##_sring_entry { \ + __req_t req; \ + __rsp_t rsp; \ +}; \ + \ +/* Shared ring page */ \ +struct __name##_sring { \ + RING_IDX req_prod, req_event; \ + RING_IDX rsp_prod, rsp_event; \ + union { \ + struct { \ + uint8_t smartpoll_active; \ + } netif; \ + struct { \ + uint8_t msg; \ + } tapif_user; \ + uint8_t pvt_pad[4]; \ + } pvt; \ + uint8_t __pad[44]; \ + union __name##_sring_entry ring[1]; /* variable-length */ \ +}; \ + \ +/* "Front" end's private variables */ \ +struct __name##_front_ring { \ + RING_IDX req_prod_pvt; \ + RING_IDX rsp_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; \ + \ +/* "Back" end's private variables */ \ +struct __name##_back_ring { \ + RING_IDX rsp_prod_pvt; \ + RING_IDX req_cons; \ + unsigned int nr_ents; \ + struct __name##_sring *sring; \ +}; \ + \ +/* Syntactic sugar */ \ +typedef struct __name##_sring __name##_sring_t; \ +typedef struct __name##_front_ring __name##_front_ring_t; \ +typedef struct __name##_back_ring __name##_back_ring_t + +/* + * Macros for manipulating rings. + * + * FRONT_RING_whatever works on the "front end" of a ring: here + * requests are pushed on to the ring and responses taken off it. + * + * BACK_RING_whatever works on the "back end" of a ring: here + * requests are taken off the ring and responses put on. + * + * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. + * This is OK in 1-for-1 request-response situations where the + * requestor (front end) never has more than RING_SIZE()-1 + * outstanding requests. + */ + +/* Initialising empty rings */ +#define SHARED_RING_INIT(_s) do { \ + (_s)->req_prod = (_s)->rsp_prod = 0; \ + (_s)->req_event = (_s)->rsp_event = 1; \ + (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \ + (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ +} while(0) + +#define FRONT_RING_INIT(_r, _s, __size) do { \ + (_r)->req_prod_pvt = 0; \ + (_r)->rsp_cons = 0; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ +} while (0) + +#define BACK_RING_INIT(_r, _s, __size) do { \ + (_r)->rsp_prod_pvt = 0; \ + (_r)->req_cons = 0; \ + (_r)->nr_ents = __RING_SIZE(_s, __size); \ + (_r)->sring = (_s); \ +} while (0) + +/* How big is this ring? */ +#define RING_SIZE(_r) \ + ((_r)->nr_ents) + +/* Number of free requests (for use on front side only). */ +#define RING_FREE_REQUESTS(_r) \ + (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) + +/* Test if there is an empty slot available on the front ring. + * (This is only meaningful from the front. ) + */ +#define RING_FULL(_r) \ + (RING_FREE_REQUESTS(_r) == 0) + +/* Test if there are outstanding messages to be processed on a ring. */ +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \ + ((_r)->sring->rsp_prod - (_r)->rsp_cons) + +#ifdef __GNUC__ +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ + unsigned int rsp = RING_SIZE(_r) - \ + ((_r)->req_cons - (_r)->rsp_prod_pvt); \ + req < rsp ? req : rsp; \ +}) +#else +/* Same as above, but without the nice GCC ({ ... }) syntax. */ +#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ + ((((_r)->sring->req_prod - (_r)->req_cons) < \ + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ + ((_r)->sring->req_prod - (_r)->req_cons) : \ + (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) +#endif + +/* Direct access to individual ring elements, by index. */ +#define RING_GET_REQUEST(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) + +/* + * Get a local copy of a request. + * + * Use this in preference to RING_GET_REQUEST() so all processing is + * done on a local copy that cannot be modified by the other end. + * + * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this + * to be ineffective where _req is a struct which consists of only bitfields. + */ +#define RING_COPY_REQUEST(_r, _idx, _req) do { \ + /* Use volatile to force the copy into _req. */ \ + *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ +} while (0) + +#define RING_GET_RESPONSE(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) + +/* Loop termination condition: Would the specified index overflow the ring? */ +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ + (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) + +/* Ill-behaved frontend determination: Can there be this many requests? */ +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ + (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) + +#define RING_PUSH_REQUESTS(_r) do { \ + xen_wmb(); /* back sees requests /before/ updated producer index */ \ + (_r)->sring->req_prod = (_r)->req_prod_pvt; \ +} while (0) + +#define RING_PUSH_RESPONSES(_r) do { \ + xen_wmb(); /* front sees resps /before/ updated producer index */ \ + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ +} while (0) + +/* + * Notification hold-off (req_event and rsp_event): + * + * When queueing requests or responses on a shared ring, it may not always be + * necessary to notify the remote end. For example, if requests are in flight + * in a backend, the front may be able to queue further requests without + * notifying the back (if the back checks for new requests when it queues + * responses). + * + * When enqueuing requests or responses: + * + * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument + * is a boolean return value. True indicates that the receiver requires an + * asynchronous notification. + * + * After dequeuing requests or responses (before sleeping the connection): + * + * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). + * The second argument is a boolean return value. True indicates that there + * are pending messages on the ring (i.e., the connection should not be put + * to sleep). + * + * These macros will set the req_event/rsp_event field to trigger a + * notification on the very next message that is enqueued. If you want to + * create batches of work (i.e., only receive a notification after several + * messages have been enqueued) then you will need to create a customised + * version of the FINAL_CHECK macro in your own code, which sets the event + * field appropriately. + */ + +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->req_prod; \ + RING_IDX __new = (_r)->req_prod_pvt; \ + xen_wmb(); /* back sees requests /before/ updated producer index */ \ + (_r)->sring->req_prod = __new; \ + xen_mb(); /* back sees new requests /before/ we check req_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ + (RING_IDX)(__new - __old)); \ +} while (0) + +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ + RING_IDX __old = (_r)->sring->rsp_prod; \ + RING_IDX __new = (_r)->rsp_prod_pvt; \ + xen_wmb(); /* front sees resps /before/ updated producer index */ \ + (_r)->sring->rsp_prod = __new; \ + xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ + (RING_IDX)(__new - __old)); \ +} while (0) + +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ + if (_work_to_do) break; \ + (_r)->sring->req_event = (_r)->req_cons + 1; \ + xen_mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ +} while (0) + +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ + if (_work_to_do) break; \ + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ + xen_mb(); \ + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ +} while (0) + + +/* + * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and + * functions to check if there is data on the ring, and to read and + * write to them. + * + * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but + * does not define the indexes page. As different protocols can have + * extensions to the basic format, this macro allow them to define their + * own struct. + * + * XEN_FLEX_RING_SIZE + * Convenience macro to calculate the size of one of the two rings + * from the overall order. + * + * $NAME_mask + * Function to apply the size mask to an index, to reduce the index + * within the range [0-size]. + * + * $NAME_read_packet + * Function to read data from the ring. The amount of data to read is + * specified by the "size" argument. + * + * $NAME_write_packet + * Function to write data to the ring. The amount of data to write is + * specified by the "size" argument. + * + * $NAME_get_ring_ptr + * Convenience function that returns a pointer to read/write to the + * ring at the right location. + * + * $NAME_data_intf + * Indexes page, shared between frontend and backend. It also + * contains the array of grant refs. + * + * $NAME_queued + * Function to calculate how many bytes are currently on the ring, + * ready to be read. It can also be used to calculate how much free + * space is currently on the ring (XEN_FLEX_RING_SIZE() - + * $NAME_queued()). + */ + +#ifndef XEN_PAGE_SHIFT +/* The PAGE_SIZE for ring protocols and hypercall interfaces is always + * 4K, regardless of the architecture, and page granularity chosen by + * operating systems. + */ +#define XEN_PAGE_SHIFT 12 +#endif +#define XEN_FLEX_RING_SIZE(order) \ + (1UL << ((order) + XEN_PAGE_SHIFT - 1)) + +#define DEFINE_XEN_FLEX_RING(name) \ +static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \ +{ \ + return idx & (ring_size - 1); \ +} \ + \ +static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \ + RING_IDX idx, \ + RING_IDX ring_size) \ +{ \ + return buf + name##_mask(idx, ring_size); \ +} \ + \ +static inline void name##_read_packet(void *opaque, \ + const unsigned char *buf, \ + size_t size, \ + RING_IDX masked_prod, \ + RING_IDX *masked_cons, \ + RING_IDX ring_size) \ +{ \ + if (*masked_cons < masked_prod || \ + size <= ring_size - *masked_cons) { \ + memcpy(opaque, buf + *masked_cons, size); \ + } else { \ + memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \ + memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \ + size - (ring_size - *masked_cons)); \ + } \ + *masked_cons = name##_mask(*masked_cons + size, ring_size); \ +} \ + \ +static inline void name##_write_packet(unsigned char *buf, \ + const void *opaque, \ + size_t size, \ + RING_IDX *masked_prod, \ + RING_IDX masked_cons, \ + RING_IDX ring_size) \ +{ \ + if (*masked_prod < masked_cons || \ + size <= ring_size - *masked_prod) { \ + memcpy(buf + *masked_prod, opaque, size); \ + } else { \ + memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \ + memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \ + size - (ring_size - *masked_prod)); \ + } \ + *masked_prod = name##_mask(*masked_prod + size, ring_size); \ +} \ + \ +static inline RING_IDX name##_queued(RING_IDX prod, \ + RING_IDX cons, \ + RING_IDX ring_size) \ +{ \ + RING_IDX size; \ + \ + if (prod == cons) \ + return 0; \ + \ + prod = name##_mask(prod, ring_size); \ + cons = name##_mask(cons, ring_size); \ + \ + if (prod == cons) \ + return ring_size; \ + \ + if (prod > cons) \ + size = prod - cons; \ + else \ + size = ring_size - (cons - prod); \ + return size; \ +} \ + \ +struct name##_data { \ + unsigned char *in; /* half of the allocation */ \ + unsigned char *out; /* half of the allocation */ \ +} + +#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \ +struct name##_data_intf { \ + RING_IDX in_cons, in_prod; \ + \ + uint8_t pad1[56]; \ + \ + RING_IDX out_cons, out_prod; \ + \ + uint8_t pad2[56]; \ + \ + RING_IDX ring_order; \ + grant_ref_t ref[]; \ +}; \ +DEFINE_XEN_FLEX_RING(name) + +#endif /* __XEN_PUBLIC_IO_RING_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ -- cgit v1.2.3-55-g7522 From c9fb47e7d028b7c76655625880eb30666619f636 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Tue, 21 Mar 2017 13:51:34 -0700 Subject: 9p: introduce a type for the 9p header Use the new type in virtio-9p-device. Signed-off-by: Stefano Stabellini Reviewed-by: Greg Kurz Reviewed-by: Philippe Mathieu-Daudé CC: anthony.perard@citrix.com CC: jgross@suse.com CC: Aneesh Kumar K.V CC: Greg Kurz --- hw/9pfs/9p.h | 6 ++++++ hw/9pfs/virtio-9p-device.c | 6 +----- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'hw') diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h index b7e836251e..5312d8a424 100644 --- a/hw/9pfs/9p.h +++ b/hw/9pfs/9p.h @@ -119,6 +119,12 @@ static inline char *rpath(FsContext *ctx, const char *path) typedef struct V9fsPDU V9fsPDU; struct V9fsState; +typedef struct { + uint32_t size_le; + uint8_t id; + uint16_t tag_le; +} QEMU_PACKED P9MsgHeader; + struct V9fsPDU { uint32_t size; diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c index 27a4a32f5c..3782f43702 100644 --- a/hw/9pfs/virtio-9p-device.c +++ b/hw/9pfs/virtio-9p-device.c @@ -46,11 +46,7 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq) VirtQueueElement *elem; while ((pdu = pdu_alloc(s))) { - struct { - uint32_t size_le; - uint8_t id; - uint16_t tag_le; - } QEMU_PACKED out; + P9MsgHeader out; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { -- cgit v1.2.3-55-g7522 From b37eeb020123d16a2ba10b16b9923088f727b005 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Tue, 21 Mar 2017 13:51:34 -0700 Subject: xen/9pfs: introduce Xen 9pfs backend Introduce the Xen 9pfs backend: add struct XenDevOps to register as a Xen backend and add struct V9fsTransport to register as v9fs transport. All functions are empty stubs for now. Signed-off-by: Stefano Stabellini Reviewed-by: Greg Kurz CC: anthony.perard@citrix.com CC: jgross@suse.com CC: Aneesh Kumar K.V CC: Greg Kurz --- hw/9pfs/xen-9p-backend.c | 96 ++++++++++++++++++++++++++++++++++++++++++++++++ hw/9pfs/xen-9pfs.h | 21 +++++++++++ 2 files changed, 117 insertions(+) create mode 100644 hw/9pfs/xen-9p-backend.c create mode 100644 hw/9pfs/xen-9pfs.h (limited to 'hw') diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c new file mode 100644 index 0000000000..da8ae185d7 --- /dev/null +++ b/hw/9pfs/xen-9p-backend.c @@ -0,0 +1,96 @@ +/* + * Xen 9p backend + * + * Copyright Aporeto 2017 + * + * Authors: + * Stefano Stabellini + * + */ + +#include "qemu/osdep.h" + +#include "hw/hw.h" +#include "hw/9pfs/9p.h" +#include "hw/xen/xen_backend.h" +#include "hw/9pfs/xen-9pfs.h" +#include "qemu/config-file.h" +#include "fsdev/qemu-fsdev.h" + +typedef struct Xen9pfsDev { + struct XenDevice xendev; /* must be first */ +} Xen9pfsDev; + +static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu, + size_t offset, + const char *fmt, + va_list ap) +{ + return 0; +} + +static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu, + size_t offset, + const char *fmt, + va_list ap) +{ + return 0; +} + +static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu, + struct iovec **piov, + unsigned int *pniov) +{ +} + +static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu, + struct iovec **piov, + unsigned int *pniov, + size_t size) +{ +} + +static void xen_9pfs_push_and_notify(V9fsPDU *pdu) +{ +} + +static const struct V9fsTransport xen_9p_transport = { + .pdu_vmarshal = xen_9pfs_pdu_vmarshal, + .pdu_vunmarshal = xen_9pfs_pdu_vunmarshal, + .init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu, + .init_out_iov_from_pdu = xen_9pfs_init_out_iov_from_pdu, + .push_and_notify = xen_9pfs_push_and_notify, +}; + +static int xen_9pfs_init(struct XenDevice *xendev) +{ + return 0; +} + +static int xen_9pfs_free(struct XenDevice *xendev) +{ + return -1; +} + +static int xen_9pfs_connect(struct XenDevice *xendev) +{ + return 0; +} + +static void xen_9pfs_alloc(struct XenDevice *xendev) +{ +} + +static void xen_9pfs_disconnect(struct XenDevice *xendev) +{ +} + +struct XenDevOps xen_9pfs_ops = { + .size = sizeof(Xen9pfsDev), + .flags = DEVOPS_FLAG_NEED_GNTDEV, + .alloc = xen_9pfs_alloc, + .init = xen_9pfs_init, + .initialise = xen_9pfs_connect, + .disconnect = xen_9pfs_disconnect, + .free = xen_9pfs_free, +}; diff --git a/hw/9pfs/xen-9pfs.h b/hw/9pfs/xen-9pfs.h new file mode 100644 index 0000000000..2d6ef7828c --- /dev/null +++ b/hw/9pfs/xen-9pfs.h @@ -0,0 +1,21 @@ +/* + * Xen 9p backend + * + * Copyright Aporeto 2017 + * + * Authors: + * Stefano Stabellini + * + * This work is licensed under the terms of the GNU GPL version 2 or + * later. See the COPYING file in the top-level directory. + * + */ + +#include +#include "hw/xen/io/ring.h" + +/* + * Do not merge into xen-9p-backend.c: clang doesn't allow unused static + * inline functions in c files. + */ +DEFINE_XEN_FLEX_RING_AND_INTF(xen_9pfs); -- cgit v1.2.3-55-g7522 From f23ef34a5dec56103e1348a622a6adf7c87c821f Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 22 Mar 2017 10:15:59 -0700 Subject: xen/9pfs: connect to the frontend Write the limits of the backend to xenstore. Connect to the frontend. Upon connection, allocate the rings according to the protocol specification. Initialize a QEMUBH to schedule work upon receiving an event channel notification from the frontend. Signed-off-by: Stefano Stabellini CC: anthony.perard@citrix.com CC: jgross@suse.com CC: Aneesh Kumar K.V CC: Greg Kurz --- hw/9pfs/xen-9p-backend.c | 182 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 181 insertions(+), 1 deletion(-) (limited to 'hw') diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index da8ae185d7..03dd88126d 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -17,8 +17,41 @@ #include "qemu/config-file.h" #include "fsdev/qemu-fsdev.h" +#define VERSIONS "1" +#define MAX_RINGS 8 +#define MAX_RING_ORDER 8 + +typedef struct Xen9pfsRing { + struct Xen9pfsDev *priv; + + int ref; + xenevtchn_handle *evtchndev; + int evtchn; + int local_port; + int ring_order; + struct xen_9pfs_data_intf *intf; + unsigned char *data; + struct xen_9pfs_data ring; + + struct iovec *sg; + QEMUBH *bh; + + /* local copies, so that we can read/write PDU data directly from + * the ring */ + RING_IDX out_cons, out_size, in_cons; + bool inprogress; +} Xen9pfsRing; + typedef struct Xen9pfsDev { struct XenDevice xendev; /* must be first */ + V9fsState state; + char *path; + char *security_model; + char *tag; + char *id; + + int num_rings; + Xen9pfsRing *rings; } Xen9pfsDev; static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu, @@ -67,22 +100,169 @@ static int xen_9pfs_init(struct XenDevice *xendev) return 0; } +static void xen_9pfs_bh(void *opaque) +{ +} + +static void xen_9pfs_evtchn_event(void *opaque) +{ +} + static int xen_9pfs_free(struct XenDevice *xendev) { - return -1; + int i; + Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); + + g_free(xen_9pdev->id); + g_free(xen_9pdev->tag); + g_free(xen_9pdev->path); + g_free(xen_9pdev->security_model); + + for (i = 0; i < xen_9pdev->num_rings; i++) { + if (xen_9pdev->rings[i].data != NULL) { + xengnttab_unmap(xen_9pdev->xendev.gnttabdev, + xen_9pdev->rings[i].data, + (1 << xen_9pdev->rings[i].ring_order)); + } + if (xen_9pdev->rings[i].intf != NULL) { + xengnttab_unmap(xen_9pdev->xendev.gnttabdev, + xen_9pdev->rings[i].intf, + 1); + } + if (xen_9pdev->rings[i].evtchndev > 0) { + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), + NULL, NULL, NULL); + xenevtchn_unbind(xen_9pdev->rings[i].evtchndev, + xen_9pdev->rings[i].local_port); + } + if (xen_9pdev->rings[i].bh != NULL) { + qemu_bh_delete(xen_9pdev->rings[i].bh); + } + } + g_free(xen_9pdev->rings); + return 0; } static int xen_9pfs_connect(struct XenDevice *xendev) { + int i; + Xen9pfsDev *xen_9pdev = container_of(xendev, Xen9pfsDev, xendev); + V9fsState *s = &xen_9pdev->state; + QemuOpts *fsdev; + + if (xenstore_read_fe_int(&xen_9pdev->xendev, "num-rings", + &xen_9pdev->num_rings) == -1 || + xen_9pdev->num_rings > MAX_RINGS || xen_9pdev->num_rings < 1) { + return -1; + } + + xen_9pdev->rings = g_malloc0(xen_9pdev->num_rings * sizeof(Xen9pfsRing)); + for (i = 0; i < xen_9pdev->num_rings; i++) { + char *str; + int ring_order; + + xen_9pdev->rings[i].priv = xen_9pdev; + xen_9pdev->rings[i].evtchn = -1; + xen_9pdev->rings[i].local_port = -1; + + str = g_strdup_printf("ring-ref%u", i); + if (xenstore_read_fe_int(&xen_9pdev->xendev, str, + &xen_9pdev->rings[i].ref) == -1) { + goto out; + } + g_free(str); + str = g_strdup_printf("event-channel-%u", i); + if (xenstore_read_fe_int(&xen_9pdev->xendev, str, + &xen_9pdev->rings[i].evtchn) == -1) { + goto out; + } + g_free(str); + + xen_9pdev->rings[i].intf = xengnttab_map_grant_ref( + xen_9pdev->xendev.gnttabdev, + xen_9pdev->xendev.dom, + xen_9pdev->rings[i].ref, + PROT_READ | PROT_WRITE); + if (!xen_9pdev->rings[i].intf) { + goto out; + } + ring_order = xen_9pdev->rings[i].intf->ring_order; + if (ring_order > MAX_RING_ORDER) { + goto out; + } + xen_9pdev->rings[i].ring_order = ring_order; + xen_9pdev->rings[i].data = xengnttab_map_domain_grant_refs( + xen_9pdev->xendev.gnttabdev, + (1 << ring_order), + xen_9pdev->xendev.dom, + xen_9pdev->rings[i].intf->ref, + PROT_READ | PROT_WRITE); + if (!xen_9pdev->rings[i].data) { + goto out; + } + xen_9pdev->rings[i].ring.in = xen_9pdev->rings[i].data; + xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + + XEN_FLEX_RING_SIZE(ring_order); + + xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]); + xen_9pdev->rings[i].out_cons = 0; + xen_9pdev->rings[i].out_size = 0; + xen_9pdev->rings[i].inprogress = false; + + + xen_9pdev->rings[i].evtchndev = xenevtchn_open(NULL, 0); + if (xen_9pdev->rings[i].evtchndev == NULL) { + goto out; + } + fcntl(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), F_SETFD, FD_CLOEXEC); + xen_9pdev->rings[i].local_port = xenevtchn_bind_interdomain + (xen_9pdev->rings[i].evtchndev, + xendev->dom, + xen_9pdev->rings[i].evtchn); + if (xen_9pdev->rings[i].local_port == -1) { + xen_pv_printf(xendev, 0, + "xenevtchn_bind_interdomain failed port=%d\n", + xen_9pdev->rings[i].evtchn); + goto out; + } + xen_pv_printf(xendev, 2, "bind evtchn port %d\n", xendev->local_port); + qemu_set_fd_handler(xenevtchn_fd(xen_9pdev->rings[i].evtchndev), + xen_9pfs_evtchn_event, NULL, &xen_9pdev->rings[i]); + } + + xen_9pdev->security_model = xenstore_read_be_str(xendev, "security_model"); + xen_9pdev->path = xenstore_read_be_str(xendev, "path"); + xen_9pdev->id = s->fsconf.fsdev_id = + g_strdup_printf("xen9p%d", xendev->dev); + xen_9pdev->tag = s->fsconf.tag = xenstore_read_fe_str(xendev, "tag"); + v9fs_register_transport(s, &xen_9p_transport); + fsdev = qemu_opts_create(qemu_find_opts("fsdev"), + s->fsconf.tag, + 1, NULL); + qemu_opt_set(fsdev, "fsdriver", "local", NULL); + qemu_opt_set(fsdev, "path", xen_9pdev->path, NULL); + qemu_opt_set(fsdev, "security_model", xen_9pdev->security_model, NULL); + qemu_opts_set_id(fsdev, s->fsconf.fsdev_id); + qemu_fsdev_add(fsdev); + v9fs_device_realize_common(s, NULL); + return 0; + +out: + xen_9pfs_free(xendev); + return -1; } static void xen_9pfs_alloc(struct XenDevice *xendev) { + xenstore_write_be_str(xendev, "versions", VERSIONS); + xenstore_write_be_int(xendev, "max-rings", MAX_RINGS); + xenstore_write_be_int(xendev, "max-ring-page-order", MAX_RING_ORDER); } static void xen_9pfs_disconnect(struct XenDevice *xendev) { + /* Dynamic hotplug of PV filesystems at runtime is not supported. */ } struct XenDevOps xen_9pfs_ops = { -- cgit v1.2.3-55-g7522 From 47b70fb1e4b619c9d6de74776a6c7c8e5c7719ee Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 22 Mar 2017 10:16:03 -0700 Subject: xen/9pfs: receive requests from the frontend Upon receiving an event channel notification from the frontend, schedule the bottom half. From the bottom half, read one request from the ring, create a pdu and call pdu_submit to handle it. For now, only handle one request per ring at a time. Signed-off-by: Stefano Stabellini CC: anthony.perard@citrix.com CC: jgross@suse.com CC: Aneesh Kumar K.V CC: Greg Kurz --- hw/9pfs/xen-9p-backend.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) (limited to 'hw') diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index 03dd88126d..8820e8fdab 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -100,12 +100,62 @@ static int xen_9pfs_init(struct XenDevice *xendev) return 0; } +static int xen_9pfs_receive(Xen9pfsRing *ring) +{ + P9MsgHeader h; + RING_IDX cons, prod, masked_prod, masked_cons; + V9fsPDU *pdu; + + if (ring->inprogress) { + return 0; + } + + cons = ring->intf->out_cons; + prod = ring->intf->out_prod; + xen_rmb(); + + if (xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order)) < + sizeof(h)) { + return 0; + } + ring->inprogress = true; + + masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order)); + masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + + xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h), + masked_prod, &masked_cons, + XEN_FLEX_RING_SIZE(ring->ring_order)); + + /* cannot fail, because we only handle one request per ring at a time */ + pdu = pdu_alloc(&ring->priv->state); + pdu->size = le32_to_cpu(h.size_le); + pdu->id = h.id; + pdu->tag = le32_to_cpu(h.tag_le); + ring->out_size = le32_to_cpu(h.size_le); + ring->out_cons = cons + le32_to_cpu(h.size_le); + + qemu_co_queue_init(&pdu->complete); + pdu_submit(pdu); + + return 0; +} + static void xen_9pfs_bh(void *opaque) { + Xen9pfsRing *ring = opaque; + xen_9pfs_receive(ring); } static void xen_9pfs_evtchn_event(void *opaque) { + Xen9pfsRing *ring = opaque; + evtchn_port_t port; + + port = xenevtchn_pending(ring->evtchndev); + xenevtchn_unmask(ring->evtchndev, port); + + qemu_bh_schedule(ring->bh); } static int xen_9pfs_free(struct XenDevice *xendev) -- cgit v1.2.3-55-g7522 From 40a2389207fb1a7ee179a9eba1b2fe9a5584bbc3 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 22 Mar 2017 10:16:06 -0700 Subject: xen/9pfs: implement in/out_iov_from_pdu and vmarshal/vunmarshal Implement xen_9pfs_init_in/out_iov_from_pdu and xen_9pfs_pdu_vmarshal/vunmarshall by creating new sg pointing to the data on the ring. This is safe as we only handle one request per ring at any given time. Signed-off-by: Stefano Stabellini CC: anthony.perard@citrix.com CC: jgross@suse.com CC: Aneesh Kumar K.V CC: Greg Kurz --- hw/9pfs/xen-9p-backend.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 97 insertions(+), 2 deletions(-) (limited to 'hw') diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index 8820e8fdab..9068703697 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -54,12 +54,81 @@ typedef struct Xen9pfsDev { Xen9pfsRing *rings; } Xen9pfsDev; +static void xen_9pfs_in_sg(Xen9pfsRing *ring, + struct iovec *in_sg, + int *num, + uint32_t idx, + uint32_t size) +{ + RING_IDX cons, prod, masked_prod, masked_cons; + + cons = ring->intf->in_cons; + prod = ring->intf->in_prod; + xen_rmb(); + masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order)); + masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + + if (masked_prod < masked_cons) { + in_sg[0].iov_base = ring->ring.in + masked_prod; + in_sg[0].iov_len = masked_cons - masked_prod; + *num = 1; + } else { + in_sg[0].iov_base = ring->ring.in + masked_prod; + in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod; + in_sg[1].iov_base = ring->ring.in; + in_sg[1].iov_len = masked_cons; + *num = 2; + } +} + +static void xen_9pfs_out_sg(Xen9pfsRing *ring, + struct iovec *out_sg, + int *num, + uint32_t idx) +{ + RING_IDX cons, prod, masked_prod, masked_cons; + + cons = ring->intf->out_cons; + prod = ring->intf->out_prod; + xen_rmb(); + masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order)); + masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order)); + + if (masked_cons < masked_prod) { + out_sg[0].iov_base = ring->ring.out + masked_cons; + out_sg[0].iov_len = ring->out_size; + *num = 1; + } else { + if (ring->out_size > + (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) { + out_sg[0].iov_base = ring->ring.out + masked_cons; + out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - + masked_cons; + out_sg[1].iov_base = ring->ring.out; + out_sg[1].iov_len = ring->out_size - + (XEN_FLEX_RING_SIZE(ring->ring_order) - + masked_cons); + *num = 2; + } else { + out_sg[0].iov_base = ring->ring.out + masked_cons; + out_sg[0].iov_len = ring->out_size; + *num = 1; + } + } +} + static ssize_t xen_9pfs_pdu_vmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, va_list ap) { - return 0; + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + struct iovec in_sg[2]; + int num; + + xen_9pfs_in_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings], + in_sg, &num, pdu->idx, ROUND_UP(offset + 128, 512)); + return v9fs_iov_vmarshal(in_sg, num, offset, 0, fmt, ap); } static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu, @@ -67,13 +136,29 @@ static ssize_t xen_9pfs_pdu_vunmarshal(V9fsPDU *pdu, const char *fmt, va_list ap) { - return 0; + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + struct iovec out_sg[2]; + int num; + + xen_9pfs_out_sg(&xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings], + out_sg, &num, pdu->idx); + return v9fs_iov_vunmarshal(out_sg, num, offset, 0, fmt, ap); } static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov, unsigned int *pniov) { + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings]; + int num; + + g_free(ring->sg); + + ring->sg = g_malloc0(sizeof(*ring->sg) * 2); + xen_9pfs_out_sg(ring, ring->sg, &num, pdu->idx); + *piov = ring->sg; + *pniov = num; } static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu, @@ -81,6 +166,16 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu, unsigned int *pniov, size_t size) { + Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); + Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings]; + int num; + + g_free(ring->sg); + + ring->sg = g_malloc0(sizeof(*ring->sg) * 2); + xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size); + *piov = ring->sg; + *pniov = num; } static void xen_9pfs_push_and_notify(V9fsPDU *pdu) -- cgit v1.2.3-55-g7522 From 4476e09e34d4257d2bfbdb70d106a154f42c928b Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 22 Mar 2017 10:16:09 -0700 Subject: xen/9pfs: send responses back to the frontend Once a request is completed, xen_9pfs_push_and_notify gets called. In xen_9pfs_push_and_notify, update the indexes (data has already been copied to the sg by the common code) and send a notification to the frontend. Schedule the bottom-half to check if we already have any other requests pending. Signed-off-by: Stefano Stabellini CC: anthony.perard@citrix.com CC: jgross@suse.com CC: Aneesh Kumar K.V CC: Greg Kurz --- hw/9pfs/xen-9p-backend.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'hw') diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index 9068703697..9c7f41af99 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -180,6 +180,25 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu, static void xen_9pfs_push_and_notify(V9fsPDU *pdu) { + RING_IDX prod; + Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state); + Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings]; + + g_free(ring->sg); + ring->sg = NULL; + + ring->intf->out_cons = ring->out_cons; + xen_wmb(); + + prod = ring->intf->in_prod; + xen_rmb(); + ring->intf->in_prod = prod + pdu->size; + xen_wmb(); + + ring->inprogress = false; + xenevtchn_notify(ring->evtchndev, ring->local_port); + + qemu_bh_schedule(ring->bh); } static const struct V9fsTransport xen_9p_transport = { -- cgit v1.2.3-55-g7522 From e737b6d5c3d69bde91c8cc554a8ce6d20e14feaa Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 22 Mar 2017 10:17:09 -0700 Subject: xen/9pfs: build and register Xen 9pfs backend Signed-off-by: Stefano Stabellini Reviewed-by: Greg Kurz CC: anthony.perard@citrix.com CC: jgross@suse.com CC: Aneesh Kumar K.V CC: Greg Kurz --- hw/9pfs/Makefile.objs | 1 + hw/xen/xen_backend.c | 3 +++ include/hw/xen/xen_backend.h | 3 +++ 3 files changed, 7 insertions(+) (limited to 'hw') diff --git a/hw/9pfs/Makefile.objs b/hw/9pfs/Makefile.objs index 32197e6671..cab5e942ed 100644 --- a/hw/9pfs/Makefile.objs +++ b/hw/9pfs/Makefile.objs @@ -5,5 +5,6 @@ common-obj-y += coth.o cofs.o codir.o cofile.o common-obj-y += coxattr.o 9p-synth.o common-obj-$(CONFIG_OPEN_BY_HANDLE) += 9p-handle.o common-obj-y += 9p-proxy.o +common-obj-$(CONFIG_XEN) += xen-9p-backend.o obj-y += virtio-9p-device.o diff --git a/hw/xen/xen_backend.c b/hw/xen/xen_backend.c index d34c49e152..c85f1637e4 100644 --- a/hw/xen/xen_backend.c +++ b/hw/xen/xen_backend.c @@ -583,6 +583,9 @@ void xen_be_register_common(void) xen_be_register("console", &xen_console_ops); xen_be_register("vkbd", &xen_kbdmouse_ops); xen_be_register("qdisk", &xen_blkdev_ops); +#ifdef CONFIG_VIRTFS + xen_be_register("9pfs", &xen_9pfs_ops); +#endif #ifdef CONFIG_USB_LIBUSB xen_be_register("qusb", &xen_usb_ops); #endif diff --git a/include/hw/xen/xen_backend.h b/include/hw/xen/xen_backend.h index 30811a1e5d..852c2ea64c 100644 --- a/include/hw/xen/xen_backend.h +++ b/include/hw/xen/xen_backend.h @@ -47,6 +47,9 @@ extern struct XenDevOps xen_console_ops; /* xen_console.c */ extern struct XenDevOps xen_kbdmouse_ops; /* xen_framebuffer.c */ extern struct XenDevOps xen_framebuffer_ops; /* xen_framebuffer.c */ extern struct XenDevOps xen_blkdev_ops; /* xen_disk.c */ +#ifdef CONFIG_VIRTFS +extern struct XenDevOps xen_9pfs_ops; /* xen-9p-backend.c */ +#endif extern struct XenDevOps xen_netdev_ops; /* xen_nic.c */ #ifdef CONFIG_USB_LIBUSB extern struct XenDevOps xen_usb_ops; /* xen-usb.c */ -- cgit v1.2.3-55-g7522 From 56e2cd24527867ac65aa86fc1820e5b700ccfa03 Mon Sep 17 00:00:00 2001 From: Anthony Xu Date: Wed, 5 Apr 2017 16:21:29 -0700 Subject: move xen-common.c to hw/xen/ move xen-common.c to hw/xen/ Signed-off -by: Anthony Xu Reviewed-by: Stefano Stabellini --- Makefile.target | 2 - hw/xen/Makefile.objs | 2 +- hw/xen/xen-common.c | 169 +++++++++++++++++++++++++++++++++++++++++++++++++++ stubs/Makefile.objs | 1 + stubs/xen-common.c | 14 +++++ xen-common-stub.c | 14 ----- xen-common.c | 169 --------------------------------------------------- 7 files changed, 185 insertions(+), 186 deletions(-) create mode 100644 hw/xen/xen-common.c create mode 100644 stubs/xen-common.c delete mode 100644 xen-common-stub.c delete mode 100644 xen-common.c (limited to 'hw') diff --git a/Makefile.target b/Makefile.target index 7df2b8c149..48c027f4b7 100644 --- a/Makefile.target +++ b/Makefile.target @@ -150,9 +150,7 @@ obj-y += migration/ram.o migration/savevm.o LIBS := $(libs_softmmu) $(LIBS) # xen support -obj-$(CONFIG_XEN) += xen-common.o obj-$(CONFIG_XEN_I386) += xen-hvm.o xen-mapcache.o -obj-$(call lnot,$(CONFIG_XEN)) += xen-common-stub.o obj-$(call lnot,$(CONFIG_XEN_I386)) += xen-hvm-stub.o # Hardware support diff --git a/hw/xen/Makefile.objs b/hw/xen/Makefile.objs index 4be3ec9c77..64a70bc6cb 100644 --- a/hw/xen/Makefile.objs +++ b/hw/xen/Makefile.objs @@ -1,5 +1,5 @@ # xen backend driver support -common-obj-$(CONFIG_XEN) += xen_backend.o xen_devconfig.o xen_pvdev.o +common-obj-$(CONFIG_XEN) += xen_backend.o xen_devconfig.o xen_pvdev.o xen-common.o obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen-host-pci-device.o obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen_pt.o xen_pt_config_init.o xen_pt_graphics.o xen_pt_msi.o diff --git a/hw/xen/xen-common.c b/hw/xen/xen-common.c new file mode 100644 index 0000000000..ae76150e8a --- /dev/null +++ b/hw/xen/xen-common.c @@ -0,0 +1,169 @@ +/* + * Copyright (C) 2014 Citrix Systems UK Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "hw/xen/xen_backend.h" +#include "qmp-commands.h" +#include "sysemu/char.h" +#include "sysemu/accel.h" +#include "migration/migration.h" + +//#define DEBUG_XEN + +#ifdef DEBUG_XEN +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + +xc_interface *xen_xc; +xenforeignmemory_handle *xen_fmem; +xendevicemodel_handle *xen_dmod; + +static int store_dev_info(int domid, Chardev *cs, const char *string) +{ + struct xs_handle *xs = NULL; + char *path = NULL; + char *newpath = NULL; + char *pts = NULL; + int ret = -1; + + /* Only continue if we're talking to a pty. */ + if (strncmp(cs->filename, "pty:", 4)) { + return 0; + } + pts = cs->filename + 4; + + /* We now have everything we need to set the xenstore entry. */ + xs = xs_open(0); + if (xs == NULL) { + fprintf(stderr, "Could not contact XenStore\n"); + goto out; + } + + path = xs_get_domain_path(xs, domid); + if (path == NULL) { + fprintf(stderr, "xs_get_domain_path() error\n"); + goto out; + } + newpath = realloc(path, (strlen(path) + strlen(string) + + strlen("/tty") + 1)); + if (newpath == NULL) { + fprintf(stderr, "realloc error\n"); + goto out; + } + path = newpath; + + strcat(path, string); + strcat(path, "/tty"); + if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) { + fprintf(stderr, "xs_write for '%s' fail", string); + goto out; + } + ret = 0; + +out: + free(path); + xs_close(xs); + + return ret; +} + +void xenstore_store_pv_console_info(int i, Chardev *chr) +{ + if (i == 0) { + store_dev_info(xen_domid, chr, "/console"); + } else { + char buf[32]; + snprintf(buf, sizeof(buf), "/device/console/%d", i); + store_dev_info(xen_domid, chr, buf); + } +} + + +static void xenstore_record_dm_state(struct xs_handle *xs, const char *state) +{ + char path[50]; + + if (xs == NULL) { + fprintf(stderr, "xenstore connection not initialized\n"); + exit(1); + } + + snprintf(path, sizeof (path), "device-model/%u/state", xen_domid); + if (!xs_write(xs, XBT_NULL, path, state, strlen(state))) { + fprintf(stderr, "error recording dm state\n"); + exit(1); + } +} + + +static void xen_change_state_handler(void *opaque, int running, + RunState state) +{ + if (running) { + /* record state running */ + xenstore_record_dm_state(xenstore, "running"); + } +} + +static int xen_init(MachineState *ms) +{ + xen_xc = xc_interface_open(0, 0, 0); + if (xen_xc == NULL) { + xen_pv_printf(NULL, 0, "can't open xen interface\n"); + return -1; + } + xen_fmem = xenforeignmemory_open(0, 0); + if (xen_fmem == NULL) { + xen_pv_printf(NULL, 0, "can't open xen fmem interface\n"); + xc_interface_close(xen_xc); + return -1; + } + xen_dmod = xendevicemodel_open(0, 0); + if (xen_dmod == NULL) { + xen_pv_printf(NULL, 0, "can't open xen devicemodel interface\n"); + xenforeignmemory_close(xen_fmem); + xc_interface_close(xen_xc); + return -1; + } + qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); + + global_state_set_optional(); + savevm_skip_configuration(); + savevm_skip_section_footers(); + + return 0; +} + +static void xen_accel_class_init(ObjectClass *oc, void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + ac->name = "Xen"; + ac->init_machine = xen_init; + ac->allowed = &xen_allowed; +} + +#define TYPE_XEN_ACCEL ACCEL_CLASS_NAME("xen") + +static const TypeInfo xen_accel_type = { + .name = TYPE_XEN_ACCEL, + .parent = TYPE_ACCEL, + .class_init = xen_accel_class_init, +}; + +static void xen_type_init(void) +{ + type_register_static(&xen_accel_type); +} + +type_init(xen_type_init); diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs index 224f04ba69..6c80613dd7 100644 --- a/stubs/Makefile.objs +++ b/stubs/Makefile.objs @@ -37,3 +37,4 @@ stub-obj-y += target-monitor-defs.o stub-obj-y += target-get-monitor-def.o stub-obj-y += pc_madt_cpu_entry.o stub-obj-y += vmgenid.o +stub-obj-y += xen-common.o diff --git a/stubs/xen-common.c b/stubs/xen-common.c new file mode 100644 index 0000000000..09fce2dd36 --- /dev/null +++ b/stubs/xen-common.c @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2014 Citrix Systems UK Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/xen/xen.h" + +void xenstore_store_pv_console_info(int i, Chardev *chr) +{ +} diff --git a/xen-common-stub.c b/xen-common-stub.c deleted file mode 100644 index 09fce2dd36..0000000000 --- a/xen-common-stub.c +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (C) 2014 Citrix Systems UK Ltd. - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#include "qemu/osdep.h" -#include "qemu-common.h" -#include "hw/xen/xen.h" - -void xenstore_store_pv_console_info(int i, Chardev *chr) -{ -} diff --git a/xen-common.c b/xen-common.c deleted file mode 100644 index ae76150e8a..0000000000 --- a/xen-common.c +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright (C) 2014 Citrix Systems UK Ltd. - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "hw/xen/xen_backend.h" -#include "qmp-commands.h" -#include "sysemu/char.h" -#include "sysemu/accel.h" -#include "migration/migration.h" - -//#define DEBUG_XEN - -#ifdef DEBUG_XEN -#define DPRINTF(fmt, ...) \ - do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) -#else -#define DPRINTF(fmt, ...) \ - do { } while (0) -#endif - -xc_interface *xen_xc; -xenforeignmemory_handle *xen_fmem; -xendevicemodel_handle *xen_dmod; - -static int store_dev_info(int domid, Chardev *cs, const char *string) -{ - struct xs_handle *xs = NULL; - char *path = NULL; - char *newpath = NULL; - char *pts = NULL; - int ret = -1; - - /* Only continue if we're talking to a pty. */ - if (strncmp(cs->filename, "pty:", 4)) { - return 0; - } - pts = cs->filename + 4; - - /* We now have everything we need to set the xenstore entry. */ - xs = xs_open(0); - if (xs == NULL) { - fprintf(stderr, "Could not contact XenStore\n"); - goto out; - } - - path = xs_get_domain_path(xs, domid); - if (path == NULL) { - fprintf(stderr, "xs_get_domain_path() error\n"); - goto out; - } - newpath = realloc(path, (strlen(path) + strlen(string) + - strlen("/tty") + 1)); - if (newpath == NULL) { - fprintf(stderr, "realloc error\n"); - goto out; - } - path = newpath; - - strcat(path, string); - strcat(path, "/tty"); - if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) { - fprintf(stderr, "xs_write for '%s' fail", string); - goto out; - } - ret = 0; - -out: - free(path); - xs_close(xs); - - return ret; -} - -void xenstore_store_pv_console_info(int i, Chardev *chr) -{ - if (i == 0) { - store_dev_info(xen_domid, chr, "/console"); - } else { - char buf[32]; - snprintf(buf, sizeof(buf), "/device/console/%d", i); - store_dev_info(xen_domid, chr, buf); - } -} - - -static void xenstore_record_dm_state(struct xs_handle *xs, const char *state) -{ - char path[50]; - - if (xs == NULL) { - fprintf(stderr, "xenstore connection not initialized\n"); - exit(1); - } - - snprintf(path, sizeof (path), "device-model/%u/state", xen_domid); - if (!xs_write(xs, XBT_NULL, path, state, strlen(state))) { - fprintf(stderr, "error recording dm state\n"); - exit(1); - } -} - - -static void xen_change_state_handler(void *opaque, int running, - RunState state) -{ - if (running) { - /* record state running */ - xenstore_record_dm_state(xenstore, "running"); - } -} - -static int xen_init(MachineState *ms) -{ - xen_xc = xc_interface_open(0, 0, 0); - if (xen_xc == NULL) { - xen_pv_printf(NULL, 0, "can't open xen interface\n"); - return -1; - } - xen_fmem = xenforeignmemory_open(0, 0); - if (xen_fmem == NULL) { - xen_pv_printf(NULL, 0, "can't open xen fmem interface\n"); - xc_interface_close(xen_xc); - return -1; - } - xen_dmod = xendevicemodel_open(0, 0); - if (xen_dmod == NULL) { - xen_pv_printf(NULL, 0, "can't open xen devicemodel interface\n"); - xenforeignmemory_close(xen_fmem); - xc_interface_close(xen_xc); - return -1; - } - qemu_add_vm_change_state_handler(xen_change_state_handler, NULL); - - global_state_set_optional(); - savevm_skip_configuration(); - savevm_skip_section_footers(); - - return 0; -} - -static void xen_accel_class_init(ObjectClass *oc, void *data) -{ - AccelClass *ac = ACCEL_CLASS(oc); - ac->name = "Xen"; - ac->init_machine = xen_init; - ac->allowed = &xen_allowed; -} - -#define TYPE_XEN_ACCEL ACCEL_CLASS_NAME("xen") - -static const TypeInfo xen_accel_type = { - .name = TYPE_XEN_ACCEL, - .parent = TYPE_ACCEL, - .class_init = xen_accel_class_init, -}; - -static void xen_type_init(void) -{ - type_register_static(&xen_accel_type); -} - -type_init(xen_type_init); -- cgit v1.2.3-55-g7522 From 93d43e7e11ad43f7aa1e648319385ecf289b1884 Mon Sep 17 00:00:00 2001 From: Anthony Xu Date: Wed, 5 Apr 2017 16:21:30 -0700 Subject: move xen-hvm.c to hw/i386/xen/ move xen-hvm.c to hw/i386/xen/ Signed-off -by: Anthony Xu Reviewed-by: Stefano Stabellini --- Makefile.target | 3 +- hw/i386/xen/Makefile.objs | 2 +- hw/i386/xen/trace-events | 11 + hw/i386/xen/xen-hvm.c | 1429 +++++++++++++++++++++++++++++++++++++++++++++ stubs/Makefile.objs | 1 + stubs/xen-hvm.c | 63 ++ trace-events | 11 - xen-hvm-stub.c | 63 -- xen-hvm.c | 1429 --------------------------------------------- 9 files changed, 1506 insertions(+), 1506 deletions(-) create mode 100644 hw/i386/xen/xen-hvm.c create mode 100644 stubs/xen-hvm.c delete mode 100644 xen-hvm-stub.c delete mode 100644 xen-hvm.c (limited to 'hw') diff --git a/Makefile.target b/Makefile.target index 48c027f4b7..d5ff0c736d 100644 --- a/Makefile.target +++ b/Makefile.target @@ -150,8 +150,7 @@ obj-y += migration/ram.o migration/savevm.o LIBS := $(libs_softmmu) $(LIBS) # xen support -obj-$(CONFIG_XEN_I386) += xen-hvm.o xen-mapcache.o -obj-$(call lnot,$(CONFIG_XEN_I386)) += xen-hvm-stub.o +obj-$(CONFIG_XEN_I386) += xen-mapcache.o # Hardware support ifeq ($(TARGET_NAME), sparc64) diff --git a/hw/i386/xen/Makefile.objs b/hw/i386/xen/Makefile.objs index 801a68d326..daf4f53fb0 100644 --- a/hw/i386/xen/Makefile.objs +++ b/hw/i386/xen/Makefile.objs @@ -1 +1 @@ -obj-y += xen_platform.o xen_apic.o xen_pvdevice.o +obj-y += xen_platform.o xen_apic.o xen_pvdevice.o xen-hvm.o diff --git a/hw/i386/xen/trace-events b/hw/i386/xen/trace-events index 321fe60fed..f25d622d09 100644 --- a/hw/i386/xen/trace-events +++ b/hw/i386/xen/trace-events @@ -4,3 +4,14 @@ xen_platform_log(char *s) "xen platform: %s" # hw/i386/xen/xen_pvdevice.c xen_pv_mmio_read(uint64_t addr) "WARNING: read from Xen PV Device MMIO space (address %"PRIx64")" xen_pv_mmio_write(uint64_t addr) "WARNING: write to Xen PV Device MMIO space (address %"PRIx64")" + +# xen-hvm.c +xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: %#lx, size %#lx" +xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "%#"PRIx64" size %#lx, log_dirty %i" +handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" +handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" +handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" +cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" +cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=%#"PRIx64" port=%#"PRIx64" size=%d" +cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=%#"PRIx64" port=%#"PRIx64" size=%d" +cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c new file mode 100644 index 0000000000..b1c05ffb86 --- /dev/null +++ b/hw/i386/xen/xen-hvm.c @@ -0,0 +1,1429 @@ +/* + * Copyright (C) 2010 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" + +#include "cpu.h" +#include "hw/pci/pci.h" +#include "hw/i386/pc.h" +#include "hw/i386/apic-msidef.h" +#include "hw/xen/xen_common.h" +#include "hw/xen/xen_backend.h" +#include "qmp-commands.h" + +#include "sysemu/char.h" +#include "qemu/error-report.h" +#include "qemu/range.h" +#include "sysemu/xen-mapcache.h" +#include "trace.h" +#include "exec/address-spaces.h" + +#include +#include +#include + +//#define DEBUG_XEN_HVM + +#ifdef DEBUG_XEN_HVM +#define DPRINTF(fmt, ...) \ + do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) +#else +#define DPRINTF(fmt, ...) \ + do { } while (0) +#endif + +static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; +static MemoryRegion *framebuffer; +static bool xen_in_migration; + +/* Compatibility with older version */ + +/* This allows QEMU to build on a system that has Xen 4.5 or earlier + * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h + * needs to be included before this block and hw/xen/xen_common.h needs to + * be included before xen/hvm/ioreq.h + */ +#ifndef IOREQ_TYPE_VMWARE_PORT +#define IOREQ_TYPE_VMWARE_PORT 3 +struct vmware_regs { + uint32_t esi; + uint32_t edi; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; +}; +typedef struct vmware_regs vmware_regs_t; + +struct shared_vmport_iopage { + struct vmware_regs vcpu_vmport_regs[1]; +}; +typedef struct shared_vmport_iopage shared_vmport_iopage_t; +#endif + +static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) +{ + return shared_page->vcpu_ioreq[i].vp_eport; +} +static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) +{ + return &shared_page->vcpu_ioreq[vcpu]; +} + +#define BUFFER_IO_MAX_DELAY 100 + +typedef struct XenPhysmap { + hwaddr start_addr; + ram_addr_t size; + const char *name; + hwaddr phys_offset; + + QLIST_ENTRY(XenPhysmap) list; +} XenPhysmap; + +typedef struct XenIOState { + ioservid_t ioservid; + shared_iopage_t *shared_page; + shared_vmport_iopage_t *shared_vmport_page; + buffered_iopage_t *buffered_io_page; + QEMUTimer *buffered_io_timer; + CPUState **cpu_by_vcpu_id; + /* the evtchn port for polling the notification, */ + evtchn_port_t *ioreq_local_port; + /* evtchn local port for buffered io */ + evtchn_port_t bufioreq_local_port; + /* the evtchn fd for polling */ + xenevtchn_handle *xce_handle; + /* which vcpu we are serving */ + int send_vcpu; + + struct xs_handle *xenstore; + MemoryListener memory_listener; + MemoryListener io_listener; + DeviceListener device_listener; + QLIST_HEAD(, XenPhysmap) physmap; + hwaddr free_phys_offset; + const XenPhysmap *log_for_dirtybit; + + Notifier exit; + Notifier suspend; + Notifier wakeup; +} XenIOState; + +/* Xen specific function for piix pci */ + +int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) +{ + return irq_num + ((pci_dev->devfn >> 3) << 2); +} + +void xen_piix3_set_irq(void *opaque, int irq_num, int level) +{ + xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2, + irq_num & 3, level); +} + +void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) +{ + int i; + + /* Scan for updates to PCI link routes (0x60-0x63). */ + for (i = 0; i < len; i++) { + uint8_t v = (val >> (8 * i)) & 0xff; + if (v & 0x80) { + v = 0; + } + v &= 0xf; + if (((address + i) >= 0x60) && ((address + i) <= 0x63)) { + xen_set_pci_link_route(xen_domid, address + i - 0x60, v); + } + } +} + +int xen_is_pirq_msi(uint32_t msi_data) +{ + /* If vector is 0, the msi is remapped into a pirq, passed as + * dest_id. + */ + return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0; +} + +void xen_hvm_inject_msi(uint64_t addr, uint32_t data) +{ + xen_inject_msi(xen_domid, addr, data); +} + +static void xen_suspend_notifier(Notifier *notifier, void *data) +{ + xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); +} + +/* Xen Interrupt Controller */ + +static void xen_set_irq(void *opaque, int irq, int level) +{ + xen_set_isa_irq_level(xen_domid, irq, level); +} + +qemu_irq *xen_interrupt_controller_init(void) +{ + return qemu_allocate_irqs(xen_set_irq, NULL, 16); +} + +/* Memory Ops */ + +static void xen_ram_init(PCMachineState *pcms, + ram_addr_t ram_size, MemoryRegion **ram_memory_p) +{ + MemoryRegion *sysmem = get_system_memory(); + ram_addr_t block_len; + uint64_t user_lowmem = object_property_get_int(qdev_get_machine(), + PC_MACHINE_MAX_RAM_BELOW_4G, + &error_abort); + + /* Handle the machine opt max-ram-below-4g. It is basically doing + * min(xen limit, user limit). + */ + if (!user_lowmem) { + user_lowmem = HVM_BELOW_4G_RAM_END; /* default */ + } + if (HVM_BELOW_4G_RAM_END <= user_lowmem) { + user_lowmem = HVM_BELOW_4G_RAM_END; + } + + if (ram_size >= user_lowmem) { + pcms->above_4g_mem_size = ram_size - user_lowmem; + pcms->below_4g_mem_size = user_lowmem; + } else { + pcms->above_4g_mem_size = 0; + pcms->below_4g_mem_size = ram_size; + } + if (!pcms->above_4g_mem_size) { + block_len = ram_size; + } else { + /* + * Xen does not allocate the memory continuously, it keeps a + * hole of the size computed above or passed in. + */ + block_len = (1ULL << 32) + pcms->above_4g_mem_size; + } + memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len, + &error_fatal); + *ram_memory_p = &ram_memory; + vmstate_register_ram_global(&ram_memory); + + memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k", + &ram_memory, 0, 0xa0000); + memory_region_add_subregion(sysmem, 0, &ram_640k); + /* Skip of the VGA IO memory space, it will be registered later by the VGA + * emulated device. + * + * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load + * the Options ROM, so it is registered here as RAM. + */ + memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", + &ram_memory, 0xc0000, + pcms->below_4g_mem_size - 0xc0000); + memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); + if (pcms->above_4g_mem_size > 0) { + memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", + &ram_memory, 0x100000000ULL, + pcms->above_4g_mem_size); + memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); + } +} + +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, + Error **errp) +{ + unsigned long nr_pfn; + xen_pfn_t *pfn_list; + int i; + + if (runstate_check(RUN_STATE_INMIGRATE)) { + /* RAM already populated in Xen */ + fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT + " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", + __func__, size, ram_addr); + return; + } + + if (mr == &ram_memory) { + return; + } + + trace_xen_ram_alloc(ram_addr, size); + + nr_pfn = size >> TARGET_PAGE_BITS; + pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn); + + for (i = 0; i < nr_pfn; i++) { + pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; + } + + if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { + error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT, + ram_addr); + } + + g_free(pfn_list); +} + +static XenPhysmap *get_physmapping(XenIOState *state, + hwaddr start_addr, ram_addr_t size) +{ + XenPhysmap *physmap = NULL; + + start_addr &= TARGET_PAGE_MASK; + + QLIST_FOREACH(physmap, &state->physmap, list) { + if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { + return physmap; + } + } + return NULL; +} + +static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr, + ram_addr_t size, void *opaque) +{ + hwaddr addr = start_addr & TARGET_PAGE_MASK; + XenIOState *xen_io_state = opaque; + XenPhysmap *physmap = NULL; + + QLIST_FOREACH(physmap, &xen_io_state->physmap, list) { + if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) { + return physmap->start_addr; + } + } + + return start_addr; +} + +static int xen_add_to_physmap(XenIOState *state, + hwaddr start_addr, + ram_addr_t size, + MemoryRegion *mr, + hwaddr offset_within_region) +{ + unsigned long i = 0; + int rc = 0; + XenPhysmap *physmap = NULL; + hwaddr pfn, start_gpfn; + hwaddr phys_offset = memory_region_get_ram_addr(mr); + char path[80], value[17]; + const char *mr_name; + + if (get_physmapping(state, start_addr, size)) { + return 0; + } + if (size <= 0) { + return -1; + } + + /* Xen can only handle a single dirty log region for now and we want + * the linear framebuffer to be that region. + * Avoid tracking any regions that is not videoram and avoid tracking + * the legacy vga region. */ + if (mr == framebuffer && start_addr > 0xbffff) { + goto go_physmap; + } + return -1; + +go_physmap: + DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", + start_addr, start_addr + size); + + pfn = phys_offset >> TARGET_PAGE_BITS; + start_gpfn = start_addr >> TARGET_PAGE_BITS; + for (i = 0; i < size >> TARGET_PAGE_BITS; i++) { + unsigned long idx = pfn + i; + xen_pfn_t gpfn = start_gpfn + i; + + rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); + if (rc) { + DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %" + PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno); + return -rc; + } + } + + mr_name = memory_region_name(mr); + + physmap = g_malloc(sizeof (XenPhysmap)); + + physmap->start_addr = start_addr; + physmap->size = size; + physmap->name = mr_name; + physmap->phys_offset = phys_offset; + + QLIST_INSERT_HEAD(&state->physmap, physmap, list); + + xc_domain_pin_memory_cacheattr(xen_xc, xen_domid, + start_addr >> TARGET_PAGE_BITS, + (start_addr + size - 1) >> TARGET_PAGE_BITS, + XEN_DOMCTL_MEM_CACHEATTR_WB); + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", + xen_domid, (uint64_t)phys_offset); + snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr); + if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { + return -1; + } + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", + xen_domid, (uint64_t)phys_offset); + snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size); + if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { + return -1; + } + if (mr_name) { + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", + xen_domid, (uint64_t)phys_offset); + if (!xs_write(state->xenstore, 0, path, mr_name, strlen(mr_name))) { + return -1; + } + } + + return 0; +} + +static int xen_remove_from_physmap(XenIOState *state, + hwaddr start_addr, + ram_addr_t size) +{ + unsigned long i = 0; + int rc = 0; + XenPhysmap *physmap = NULL; + hwaddr phys_offset = 0; + + physmap = get_physmapping(state, start_addr, size); + if (physmap == NULL) { + return -1; + } + + phys_offset = physmap->phys_offset; + size = physmap->size; + + DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at " + "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset); + + size >>= TARGET_PAGE_BITS; + start_addr >>= TARGET_PAGE_BITS; + phys_offset >>= TARGET_PAGE_BITS; + for (i = 0; i < size; i++) { + xen_pfn_t idx = start_addr + i; + xen_pfn_t gpfn = phys_offset + i; + + rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); + if (rc) { + fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %" + PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno); + return -rc; + } + } + + QLIST_REMOVE(physmap, list); + if (state->log_for_dirtybit == physmap) { + state->log_for_dirtybit = NULL; + } + g_free(physmap); + + return 0; +} + +static void xen_set_memory(struct MemoryListener *listener, + MemoryRegionSection *section, + bool add) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + hwaddr start_addr = section->offset_within_address_space; + ram_addr_t size = int128_get64(section->size); + bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA); + hvmmem_type_t mem_type; + + if (section->mr == &ram_memory) { + return; + } else { + if (add) { + xen_map_memory_section(xen_domid, state->ioservid, + section); + } else { + xen_unmap_memory_section(xen_domid, state->ioservid, + section); + } + } + + if (!memory_region_is_ram(section->mr)) { + return; + } + + if (log_dirty != add) { + return; + } + + trace_xen_client_set_memory(start_addr, size, log_dirty); + + start_addr &= TARGET_PAGE_MASK; + size = TARGET_PAGE_ALIGN(size); + + if (add) { + if (!memory_region_is_rom(section->mr)) { + xen_add_to_physmap(state, start_addr, size, + section->mr, section->offset_within_region); + } else { + mem_type = HVMMEM_ram_ro; + if (xen_set_mem_type(xen_domid, mem_type, + start_addr >> TARGET_PAGE_BITS, + size >> TARGET_PAGE_BITS)) { + DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n", + start_addr); + } + } + } else { + if (xen_remove_from_physmap(state, start_addr, size) < 0) { + DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); + } + } +} + +static void xen_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + memory_region_ref(section->mr); + xen_set_memory(listener, section, true); +} + +static void xen_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + xen_set_memory(listener, section, false); + memory_region_unref(section->mr); +} + +static void xen_io_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + XenIOState *state = container_of(listener, XenIOState, io_listener); + MemoryRegion *mr = section->mr; + + if (mr->ops == &unassigned_io_ops) { + return; + } + + memory_region_ref(mr); + + xen_map_io_section(xen_domid, state->ioservid, section); +} + +static void xen_io_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + XenIOState *state = container_of(listener, XenIOState, io_listener); + MemoryRegion *mr = section->mr; + + if (mr->ops == &unassigned_io_ops) { + return; + } + + xen_unmap_io_section(xen_domid, state->ioservid, section); + + memory_region_unref(mr); +} + +static void xen_device_realize(DeviceListener *listener, + DeviceState *dev) +{ + XenIOState *state = container_of(listener, XenIOState, device_listener); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + PCIDevice *pci_dev = PCI_DEVICE(dev); + + xen_map_pcidev(xen_domid, state->ioservid, pci_dev); + } +} + +static void xen_device_unrealize(DeviceListener *listener, + DeviceState *dev) +{ + XenIOState *state = container_of(listener, XenIOState, device_listener); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { + PCIDevice *pci_dev = PCI_DEVICE(dev); + + xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); + } +} + +static void xen_sync_dirty_bitmap(XenIOState *state, + hwaddr start_addr, + ram_addr_t size) +{ + hwaddr npages = size >> TARGET_PAGE_BITS; + const int width = sizeof(unsigned long) * 8; + unsigned long bitmap[DIV_ROUND_UP(npages, width)]; + int rc, i, j; + const XenPhysmap *physmap = NULL; + + physmap = get_physmapping(state, start_addr, size); + if (physmap == NULL) { + /* not handled */ + return; + } + + if (state->log_for_dirtybit == NULL) { + state->log_for_dirtybit = physmap; + } else if (state->log_for_dirtybit != physmap) { + /* Only one range for dirty bitmap can be tracked. */ + return; + } + + rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, + npages, bitmap); + if (rc < 0) { +#ifndef ENODATA +#define ENODATA ENOENT +#endif + if (errno == ENODATA) { + memory_region_set_dirty(framebuffer, 0, size); + DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx + ", 0x" TARGET_FMT_plx "): %s\n", + start_addr, start_addr + size, strerror(errno)); + } + return; + } + + for (i = 0; i < ARRAY_SIZE(bitmap); i++) { + unsigned long map = bitmap[i]; + while (map != 0) { + j = ctzl(map); + map &= ~(1ul << j); + memory_region_set_dirty(framebuffer, + (i * width + j) * TARGET_PAGE_SIZE, + TARGET_PAGE_SIZE); + }; + } +} + +static void xen_log_start(MemoryListener *listener, + MemoryRegionSection *section, + int old, int new) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + + if (new & ~old & (1 << DIRTY_MEMORY_VGA)) { + xen_sync_dirty_bitmap(state, section->offset_within_address_space, + int128_get64(section->size)); + } +} + +static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section, + int old, int new) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + + if (old & ~new & (1 << DIRTY_MEMORY_VGA)) { + state->log_for_dirtybit = NULL; + /* Disable dirty bit tracking */ + xen_track_dirty_vram(xen_domid, 0, 0, NULL); + } +} + +static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section) +{ + XenIOState *state = container_of(listener, XenIOState, memory_listener); + + xen_sync_dirty_bitmap(state, section->offset_within_address_space, + int128_get64(section->size)); +} + +static void xen_log_global_start(MemoryListener *listener) +{ + if (xen_enabled()) { + xen_in_migration = true; + } +} + +static void xen_log_global_stop(MemoryListener *listener) +{ + xen_in_migration = false; +} + +static MemoryListener xen_memory_listener = { + .region_add = xen_region_add, + .region_del = xen_region_del, + .log_start = xen_log_start, + .log_stop = xen_log_stop, + .log_sync = xen_log_sync, + .log_global_start = xen_log_global_start, + .log_global_stop = xen_log_global_stop, + .priority = 10, +}; + +static MemoryListener xen_io_listener = { + .region_add = xen_io_add, + .region_del = xen_io_del, + .priority = 10, +}; + +static DeviceListener xen_device_listener = { + .realize = xen_device_realize, + .unrealize = xen_device_unrealize, +}; + +/* get the ioreq packets from share mem */ +static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) +{ + ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); + + if (req->state != STATE_IOREQ_READY) { + DPRINTF("I/O request not ready: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %u, size: %u\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + return NULL; + } + + xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ + + req->state = STATE_IOREQ_INPROCESS; + return req; +} + +/* use poll to get the port notification */ +/* ioreq_vec--out,the */ +/* retval--the number of ioreq packet */ +static ioreq_t *cpu_get_ioreq(XenIOState *state) +{ + int i; + evtchn_port_t port; + + port = xenevtchn_pending(state->xce_handle); + if (port == state->bufioreq_local_port) { + timer_mod(state->buffered_io_timer, + BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); + return NULL; + } + + if (port != -1) { + for (i = 0; i < max_cpus; i++) { + if (state->ioreq_local_port[i] == port) { + break; + } + } + + if (i == max_cpus) { + hw_error("Fatal error while trying to get io event!\n"); + } + + /* unmask the wanted port again */ + xenevtchn_unmask(state->xce_handle, port); + + /* get the io packet from shared memory */ + state->send_vcpu = i; + return cpu_get_ioreq_from_shared_memory(state, i); + } + + /* read error or read nothing */ + return NULL; +} + +static uint32_t do_inp(uint32_t addr, unsigned long size) +{ + switch (size) { + case 1: + return cpu_inb(addr); + case 2: + return cpu_inw(addr); + case 4: + return cpu_inl(addr); + default: + hw_error("inp: bad size: %04x %lx", addr, size); + } +} + +static void do_outp(uint32_t addr, + unsigned long size, uint32_t val) +{ + switch (size) { + case 1: + return cpu_outb(addr, val); + case 2: + return cpu_outw(addr, val); + case 4: + return cpu_outl(addr, val); + default: + hw_error("outp: bad size: %04x %lx", addr, size); + } +} + +/* + * Helper functions which read/write an object from/to physical guest + * memory, as part of the implementation of an ioreq. + * + * Equivalent to + * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, + * val, req->size, 0/1) + * except without the integer overflow problems. + */ +static void rw_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val, int rw) +{ + /* Do everything unsigned so overflow just results in a truncated result + * and accesses to undesired parts of guest memory, which is up + * to the guest */ + hwaddr offset = (hwaddr)req->size * i; + if (req->df) { + addr -= offset; + } else { + addr += offset; + } + cpu_physical_memory_rw(addr, val, req->size, rw); +} + +static inline void read_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val) +{ + rw_phys_req_item(addr, req, i, val, 0); +} +static inline void write_phys_req_item(hwaddr addr, + ioreq_t *req, uint32_t i, void *val) +{ + rw_phys_req_item(addr, req, i, val, 1); +} + + +static void cpu_ioreq_pio(ioreq_t *req) +{ + uint32_t i; + + trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + + if (req->size > sizeof(uint32_t)) { + hw_error("PIO: bad size (%u)", req->size); + } + + if (req->dir == IOREQ_READ) { + if (!req->data_is_ptr) { + req->data = do_inp(req->addr, req->size); + trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr, + req->size); + } else { + uint32_t tmp; + + for (i = 0; i < req->count; i++) { + tmp = do_inp(req->addr, req->size); + write_phys_req_item(req->data, req, i, &tmp); + } + } + } else if (req->dir == IOREQ_WRITE) { + if (!req->data_is_ptr) { + trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr, + req->size); + do_outp(req->addr, req->size, req->data); + } else { + for (i = 0; i < req->count; i++) { + uint32_t tmp = 0; + + read_phys_req_item(req->data, req, i, &tmp); + do_outp(req->addr, req->size, tmp); + } + } + } +} + +static void cpu_ioreq_move(ioreq_t *req) +{ + uint32_t i; + + trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr, + req->data, req->count, req->size); + + if (req->size > sizeof(req->data)) { + hw_error("MMIO: bad size (%u)", req->size); + } + + if (!req->data_is_ptr) { + if (req->dir == IOREQ_READ) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->addr, req, i, &req->data); + } + } else if (req->dir == IOREQ_WRITE) { + for (i = 0; i < req->count; i++) { + write_phys_req_item(req->addr, req, i, &req->data); + } + } + } else { + uint64_t tmp; + + if (req->dir == IOREQ_READ) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->addr, req, i, &tmp); + write_phys_req_item(req->data, req, i, &tmp); + } + } else if (req->dir == IOREQ_WRITE) { + for (i = 0; i < req->count; i++) { + read_phys_req_item(req->data, req, i, &tmp); + write_phys_req_item(req->addr, req, i, &tmp); + } + } + } +} + +static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req) +{ + X86CPU *cpu; + CPUX86State *env; + + cpu = X86_CPU(current_cpu); + env = &cpu->env; + env->regs[R_EAX] = req->data; + env->regs[R_EBX] = vmport_regs->ebx; + env->regs[R_ECX] = vmport_regs->ecx; + env->regs[R_EDX] = vmport_regs->edx; + env->regs[R_ESI] = vmport_regs->esi; + env->regs[R_EDI] = vmport_regs->edi; +} + +static void regs_from_cpu(vmware_regs_t *vmport_regs) +{ + X86CPU *cpu = X86_CPU(current_cpu); + CPUX86State *env = &cpu->env; + + vmport_regs->ebx = env->regs[R_EBX]; + vmport_regs->ecx = env->regs[R_ECX]; + vmport_regs->edx = env->regs[R_EDX]; + vmport_regs->esi = env->regs[R_ESI]; + vmport_regs->edi = env->regs[R_EDI]; +} + +static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req) +{ + vmware_regs_t *vmport_regs; + + assert(state->shared_vmport_page); + vmport_regs = + &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu]; + QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs)); + + current_cpu = state->cpu_by_vcpu_id[state->send_vcpu]; + regs_to_cpu(vmport_regs, req); + cpu_ioreq_pio(req); + regs_from_cpu(vmport_regs); + current_cpu = NULL; +} + +static void handle_ioreq(XenIOState *state, ioreq_t *req) +{ + trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + + if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && + (req->size < sizeof (target_ulong))) { + req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; + } + + if (req->dir == IOREQ_WRITE) + trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + + switch (req->type) { + case IOREQ_TYPE_PIO: + cpu_ioreq_pio(req); + break; + case IOREQ_TYPE_COPY: + cpu_ioreq_move(req); + break; + case IOREQ_TYPE_VMWARE_PORT: + handle_vmport_ioreq(state, req); + break; + case IOREQ_TYPE_TIMEOFFSET: + break; + case IOREQ_TYPE_INVALIDATE: + xen_invalidate_map_cache(); + break; + case IOREQ_TYPE_PCI_CONFIG: { + uint32_t sbdf = req->addr >> 32; + uint32_t val; + + /* Fake a write to port 0xCF8 so that + * the config space access will target the + * correct device model. + */ + val = (1u << 31) | + ((req->addr & 0x0f00) << 16) | + ((sbdf & 0xffff) << 8) | + (req->addr & 0xfc); + do_outp(0xcf8, 4, val); + + /* Now issue the config space access via + * port 0xCFC + */ + req->addr = 0xcfc | (req->addr & 0x03); + cpu_ioreq_pio(req); + break; + } + default: + hw_error("Invalid ioreq type 0x%x\n", req->type); + } + if (req->dir == IOREQ_READ) { + trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr, + req->addr, req->data, req->count, req->size); + } +} + +static int handle_buffered_iopage(XenIOState *state) +{ + buffered_iopage_t *buf_page = state->buffered_io_page; + buf_ioreq_t *buf_req = NULL; + ioreq_t req; + int qw; + + if (!buf_page) { + return 0; + } + + memset(&req, 0x00, sizeof(req)); + req.state = STATE_IOREQ_READY; + req.count = 1; + req.dir = IOREQ_WRITE; + + for (;;) { + uint32_t rdptr = buf_page->read_pointer, wrptr; + + xen_rmb(); + wrptr = buf_page->write_pointer; + xen_rmb(); + if (rdptr != buf_page->read_pointer) { + continue; + } + if (rdptr == wrptr) { + break; + } + buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; + req.size = 1U << buf_req->size; + req.addr = buf_req->addr; + req.data = buf_req->data; + req.type = buf_req->type; + xen_rmb(); + qw = (req.size == 8); + if (qw) { + if (rdptr + 1 == wrptr) { + hw_error("Incomplete quad word buffered ioreq"); + } + buf_req = &buf_page->buf_ioreq[(rdptr + 1) % + IOREQ_BUFFER_SLOT_NUM]; + req.data |= ((uint64_t)buf_req->data) << 32; + xen_rmb(); + } + + handle_ioreq(state, &req); + + /* Only req.data may get updated by handle_ioreq(), albeit even that + * should not happen as such data would never make it to the guest (we + * can only usefully see writes here after all). + */ + assert(req.state == STATE_IOREQ_READY); + assert(req.count == 1); + assert(req.dir == IOREQ_WRITE); + assert(!req.data_is_ptr); + + atomic_add(&buf_page->read_pointer, qw + 1); + } + + return req.count; +} + +static void handle_buffered_io(void *opaque) +{ + XenIOState *state = opaque; + + if (handle_buffered_iopage(state)) { + timer_mod(state->buffered_io_timer, + BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); + } else { + timer_del(state->buffered_io_timer); + xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port); + } +} + +static void cpu_handle_ioreq(void *opaque) +{ + XenIOState *state = opaque; + ioreq_t *req = cpu_get_ioreq(state); + + handle_buffered_iopage(state); + if (req) { + ioreq_t copy = *req; + + xen_rmb(); + handle_ioreq(state, ©); + req->data = copy.data; + + if (req->state != STATE_IOREQ_INPROCESS) { + fprintf(stderr, "Badness in I/O request ... not in service?!: " + "%x, ptr: %x, port: %"PRIx64", " + "data: %"PRIx64", count: %u, size: %u, type: %u\n", + req->state, req->data_is_ptr, req->addr, + req->data, req->count, req->size, req->type); + destroy_hvm_domain(false); + return; + } + + xen_wmb(); /* Update ioreq contents /then/ update state. */ + + /* + * We do this before we send the response so that the tools + * have the opportunity to pick up on the reset before the + * guest resumes and does a hlt with interrupts disabled which + * causes Xen to powerdown the domain. + */ + if (runstate_is_running()) { + if (qemu_shutdown_requested_get()) { + destroy_hvm_domain(false); + } + if (qemu_reset_requested_get()) { + qemu_system_reset(VMRESET_REPORT); + destroy_hvm_domain(true); + } + } + + req->state = STATE_IORESP_READY; + xenevtchn_notify(state->xce_handle, + state->ioreq_local_port[state->send_vcpu]); + } +} + +static void xen_main_loop_prepare(XenIOState *state) +{ + int evtchn_fd = -1; + + if (state->xce_handle != NULL) { + evtchn_fd = xenevtchn_fd(state->xce_handle); + } + + state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, + state); + + if (evtchn_fd != -1) { + CPUState *cpu_state; + + DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__); + CPU_FOREACH(cpu_state) { + DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n", + __func__, cpu_state->cpu_index, cpu_state); + state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; + } + qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); + } +} + + +static void xen_hvm_change_state_handler(void *opaque, int running, + RunState rstate) +{ + XenIOState *state = opaque; + + if (running) { + xen_main_loop_prepare(state); + } + + xen_set_ioreq_server_state(xen_domid, + state->ioservid, + (rstate == RUN_STATE_RUNNING)); +} + +static void xen_exit_notifier(Notifier *n, void *data) +{ + XenIOState *state = container_of(n, XenIOState, exit); + + xenevtchn_close(state->xce_handle); + xs_daemon_close(state->xenstore); +} + +static void xen_read_physmap(XenIOState *state) +{ + XenPhysmap *physmap = NULL; + unsigned int len, num, i; + char path[80], *value = NULL; + char **entries = NULL; + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap", xen_domid); + entries = xs_directory(state->xenstore, 0, path, &num); + if (entries == NULL) + return; + + for (i = 0; i < num; i++) { + physmap = g_malloc(sizeof (XenPhysmap)); + physmap->phys_offset = strtoull(entries[i], NULL, 16); + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%s/start_addr", + xen_domid, entries[i]); + value = xs_read(state->xenstore, 0, path, &len); + if (value == NULL) { + g_free(physmap); + continue; + } + physmap->start_addr = strtoull(value, NULL, 16); + free(value); + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%s/size", + xen_domid, entries[i]); + value = xs_read(state->xenstore, 0, path, &len); + if (value == NULL) { + g_free(physmap); + continue; + } + physmap->size = strtoull(value, NULL, 16); + free(value); + + snprintf(path, sizeof(path), + "/local/domain/0/device-model/%d/physmap/%s/name", + xen_domid, entries[i]); + physmap->name = xs_read(state->xenstore, 0, path, &len); + + QLIST_INSERT_HEAD(&state->physmap, physmap, list); + } + free(entries); +} + +static void xen_wakeup_notifier(Notifier *notifier, void *data) +{ + xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0); +} + +void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) +{ + int i, rc; + xen_pfn_t ioreq_pfn; + xen_pfn_t bufioreq_pfn; + evtchn_port_t bufioreq_evtchn; + XenIOState *state; + + state = g_malloc0(sizeof (XenIOState)); + + state->xce_handle = xenevtchn_open(NULL, 0); + if (state->xce_handle == NULL) { + perror("xen: event channel open"); + goto err; + } + + state->xenstore = xs_daemon_open(); + if (state->xenstore == NULL) { + perror("xen: xenstore open"); + goto err; + } + + if (xen_domid_restrict) { + rc = xen_restrict(xen_domid); + if (rc < 0) { + error_report("failed to restrict: error %d", errno); + goto err; + } + } + + xen_create_ioreq_server(xen_domid, &state->ioservid); + + state->exit.notify = xen_exit_notifier; + qemu_add_exit_notifier(&state->exit); + + state->suspend.notify = xen_suspend_notifier; + qemu_register_suspend_notifier(&state->suspend); + + state->wakeup.notify = xen_wakeup_notifier; + qemu_register_wakeup_notifier(&state->wakeup); + + rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, + &ioreq_pfn, &bufioreq_pfn, + &bufioreq_evtchn); + if (rc < 0) { + error_report("failed to get ioreq server info: error %d handle=%p", + errno, xen_xc); + goto err; + } + + DPRINTF("shared page at pfn %lx\n", ioreq_pfn); + DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn); + DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn); + + state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, + PROT_READ|PROT_WRITE, + 1, &ioreq_pfn, NULL); + if (state->shared_page == NULL) { + error_report("map shared IO page returned error %d handle=%p", + errno, xen_xc); + goto err; + } + + rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn); + if (!rc) { + DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn); + state->shared_vmport_page = + xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, + 1, &ioreq_pfn, NULL); + if (state->shared_vmport_page == NULL) { + error_report("map shared vmport IO page returned error %d handle=%p", + errno, xen_xc); + goto err; + } + } else if (rc != -ENOSYS) { + error_report("get vmport regs pfn returned error %d, rc=%d", + errno, rc); + goto err; + } + + state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, + PROT_READ|PROT_WRITE, + 1, &bufioreq_pfn, NULL); + if (state->buffered_io_page == NULL) { + error_report("map buffered IO page returned error %d", errno); + goto err; + } + + /* Note: cpus is empty at this point in init */ + state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); + + rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); + if (rc < 0) { + error_report("failed to enable ioreq server info: error %d handle=%p", + errno, xen_xc); + goto err; + } + + state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); + + /* FIXME: how about if we overflow the page here? */ + for (i = 0; i < max_cpus; i++) { + rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, + xen_vcpu_eport(state->shared_page, i)); + if (rc == -1) { + error_report("shared evtchn %d bind error %d", i, errno); + goto err; + } + state->ioreq_local_port[i] = rc; + } + + rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, + bufioreq_evtchn); + if (rc == -1) { + error_report("buffered evtchn bind error %d", errno); + goto err; + } + state->bufioreq_local_port = rc; + + /* Init RAM management */ + xen_map_cache_init(xen_phys_offset_to_gaddr, state); + xen_ram_init(pcms, ram_size, ram_memory); + + qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); + + state->memory_listener = xen_memory_listener; + QLIST_INIT(&state->physmap); + memory_listener_register(&state->memory_listener, &address_space_memory); + state->log_for_dirtybit = NULL; + + state->io_listener = xen_io_listener; + memory_listener_register(&state->io_listener, &address_space_io); + + state->device_listener = xen_device_listener; + device_listener_register(&state->device_listener); + + /* Initialize backend core & drivers */ + if (xen_be_init() != 0) { + error_report("xen backend core setup failed"); + goto err; + } + xen_be_register_common(); + xen_read_physmap(state); + + /* Disable ACPI build because Xen handles it */ + pcms->acpi_build_enabled = false; + + return; + +err: + error_report("xen hardware virtual machine initialisation failed"); + exit(1); +} + +void destroy_hvm_domain(bool reboot) +{ + xc_interface *xc_handle; + int sts; + + xc_handle = xc_interface_open(0, 0, 0); + if (xc_handle == NULL) { + fprintf(stderr, "Cannot acquire xenctrl handle\n"); + } else { + sts = xc_domain_shutdown(xc_handle, xen_domid, + reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff); + if (sts != 0) { + fprintf(stderr, "xc_domain_shutdown failed to issue %s, " + "sts %d, %s\n", reboot ? "reboot" : "poweroff", + sts, strerror(errno)); + } else { + fprintf(stderr, "Issued domain %d %s\n", xen_domid, + reboot ? "reboot" : "poweroff"); + } + xc_interface_close(xc_handle); + } +} + +void xen_register_framebuffer(MemoryRegion *mr) +{ + framebuffer = mr; +} + +void xen_shutdown_fatal_error(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + fprintf(stderr, "Will destroy the domain.\n"); + /* destroy the domain */ + qemu_system_shutdown_request(); +} + +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) +{ + if (unlikely(xen_in_migration)) { + int rc; + ram_addr_t start_pfn, nb_pages; + + if (length == 0) { + length = TARGET_PAGE_SIZE; + } + start_pfn = start >> TARGET_PAGE_BITS; + nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) + - start_pfn; + rc = xen_modified_memory(xen_domid, start_pfn, nb_pages); + if (rc) { + fprintf(stderr, + "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", + __func__, start, nb_pages, rc, strerror(-rc)); + } + } +} + +void qmp_xen_set_global_dirty_log(bool enable, Error **errp) +{ + if (enable) { + memory_global_dirty_log_start(); + } else { + memory_global_dirty_log_stop(); + } +} diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs index 6c80613dd7..f5b47bfd74 100644 --- a/stubs/Makefile.objs +++ b/stubs/Makefile.objs @@ -38,3 +38,4 @@ stub-obj-y += target-get-monitor-def.o stub-obj-y += pc_madt_cpu_entry.o stub-obj-y += vmgenid.o stub-obj-y += xen-common.o +stub-obj-y += xen-hvm.o diff --git a/stubs/xen-hvm.c b/stubs/xen-hvm.c new file mode 100644 index 0000000000..3ca6c51b21 --- /dev/null +++ b/stubs/xen-hvm.c @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2010 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "hw/xen/xen.h" +#include "exec/memory.h" +#include "qmp-commands.h" + +int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) +{ + return -1; +} + +void xen_piix3_set_irq(void *opaque, int irq_num, int level) +{ +} + +void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) +{ +} + +void xen_hvm_inject_msi(uint64_t addr, uint32_t data) +{ +} + +int xen_is_pirq_msi(uint32_t msi_data) +{ + return 0; +} + +void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, + Error **errp) +{ +} + +qemu_irq *xen_interrupt_controller_init(void) +{ + return NULL; +} + +void xen_register_framebuffer(MemoryRegion *mr) +{ +} + +void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) +{ +} + +void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) +{ +} + +void qmp_xen_set_global_dirty_log(bool enable, Error **errp) +{ +} diff --git a/trace-events b/trace-events index b07a09ba95..4e1448786f 100644 --- a/trace-events +++ b/trace-events @@ -48,17 +48,6 @@ spice_vmc_register_interface(void *scd) "spice vmc registered interface %p" spice_vmc_unregister_interface(void *scd) "spice vmc unregistered interface %p" spice_vmc_event(int event) "spice vmc event %d" -# xen-hvm.c -xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: %#lx, size %#lx" -xen_client_set_memory(uint64_t start_addr, unsigned long size, bool log_dirty) "%#"PRIx64" size %#lx, log_dirty %i" -handle_ioreq(void *req, uint32_t type, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p type=%d dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" -handle_ioreq_read(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p read type=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" -handle_ioreq_write(void *req, uint32_t type, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p write type=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" -cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p pio dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" -cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=%#"PRIx64" port=%#"PRIx64" size=%d" -cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=%#"PRIx64" port=%#"PRIx64" size=%d" -cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" - # xen-mapcache.c xen_map_cache(uint64_t phys_addr) "want %#"PRIx64 xen_remap_bucket(uint64_t index) "index %#"PRIx64 diff --git a/xen-hvm-stub.c b/xen-hvm-stub.c deleted file mode 100644 index 3ca6c51b21..0000000000 --- a/xen-hvm-stub.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (C) 2010 Citrix Ltd. - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" -#include "qemu-common.h" -#include "hw/xen/xen.h" -#include "exec/memory.h" -#include "qmp-commands.h" - -int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) -{ - return -1; -} - -void xen_piix3_set_irq(void *opaque, int irq_num, int level) -{ -} - -void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) -{ -} - -void xen_hvm_inject_msi(uint64_t addr, uint32_t data) -{ -} - -int xen_is_pirq_msi(uint32_t msi_data) -{ - return 0; -} - -void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, - Error **errp) -{ -} - -qemu_irq *xen_interrupt_controller_init(void) -{ - return NULL; -} - -void xen_register_framebuffer(MemoryRegion *mr) -{ -} - -void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) -{ -} - -void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) -{ -} - -void qmp_xen_set_global_dirty_log(bool enable, Error **errp) -{ -} diff --git a/xen-hvm.c b/xen-hvm.c deleted file mode 100644 index 335e263834..0000000000 --- a/xen-hvm.c +++ /dev/null @@ -1,1429 +0,0 @@ -/* - * Copyright (C) 2010 Citrix Ltd. - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" - -#include "cpu.h" -#include "hw/pci/pci.h" -#include "hw/i386/pc.h" -#include "hw/i386/apic-msidef.h" -#include "hw/xen/xen_common.h" -#include "hw/xen/xen_backend.h" -#include "qmp-commands.h" - -#include "sysemu/char.h" -#include "qemu/error-report.h" -#include "qemu/range.h" -#include "sysemu/xen-mapcache.h" -#include "trace-root.h" -#include "exec/address-spaces.h" - -#include -#include -#include - -//#define DEBUG_XEN_HVM - -#ifdef DEBUG_XEN_HVM -#define DPRINTF(fmt, ...) \ - do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) -#else -#define DPRINTF(fmt, ...) \ - do { } while (0) -#endif - -static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; -static MemoryRegion *framebuffer; -static bool xen_in_migration; - -/* Compatibility with older version */ - -/* This allows QEMU to build on a system that has Xen 4.5 or earlier - * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h - * needs to be included before this block and hw/xen/xen_common.h needs to - * be included before xen/hvm/ioreq.h - */ -#ifndef IOREQ_TYPE_VMWARE_PORT -#define IOREQ_TYPE_VMWARE_PORT 3 -struct vmware_regs { - uint32_t esi; - uint32_t edi; - uint32_t ebx; - uint32_t ecx; - uint32_t edx; -}; -typedef struct vmware_regs vmware_regs_t; - -struct shared_vmport_iopage { - struct vmware_regs vcpu_vmport_regs[1]; -}; -typedef struct shared_vmport_iopage shared_vmport_iopage_t; -#endif - -static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) -{ - return shared_page->vcpu_ioreq[i].vp_eport; -} -static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) -{ - return &shared_page->vcpu_ioreq[vcpu]; -} - -#define BUFFER_IO_MAX_DELAY 100 - -typedef struct XenPhysmap { - hwaddr start_addr; - ram_addr_t size; - const char *name; - hwaddr phys_offset; - - QLIST_ENTRY(XenPhysmap) list; -} XenPhysmap; - -typedef struct XenIOState { - ioservid_t ioservid; - shared_iopage_t *shared_page; - shared_vmport_iopage_t *shared_vmport_page; - buffered_iopage_t *buffered_io_page; - QEMUTimer *buffered_io_timer; - CPUState **cpu_by_vcpu_id; - /* the evtchn port for polling the notification, */ - evtchn_port_t *ioreq_local_port; - /* evtchn local port for buffered io */ - evtchn_port_t bufioreq_local_port; - /* the evtchn fd for polling */ - xenevtchn_handle *xce_handle; - /* which vcpu we are serving */ - int send_vcpu; - - struct xs_handle *xenstore; - MemoryListener memory_listener; - MemoryListener io_listener; - DeviceListener device_listener; - QLIST_HEAD(, XenPhysmap) physmap; - hwaddr free_phys_offset; - const XenPhysmap *log_for_dirtybit; - - Notifier exit; - Notifier suspend; - Notifier wakeup; -} XenIOState; - -/* Xen specific function for piix pci */ - -int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) -{ - return irq_num + ((pci_dev->devfn >> 3) << 2); -} - -void xen_piix3_set_irq(void *opaque, int irq_num, int level) -{ - xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2, - irq_num & 3, level); -} - -void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) -{ - int i; - - /* Scan for updates to PCI link routes (0x60-0x63). */ - for (i = 0; i < len; i++) { - uint8_t v = (val >> (8 * i)) & 0xff; - if (v & 0x80) { - v = 0; - } - v &= 0xf; - if (((address + i) >= 0x60) && ((address + i) <= 0x63)) { - xen_set_pci_link_route(xen_domid, address + i - 0x60, v); - } - } -} - -int xen_is_pirq_msi(uint32_t msi_data) -{ - /* If vector is 0, the msi is remapped into a pirq, passed as - * dest_id. - */ - return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0; -} - -void xen_hvm_inject_msi(uint64_t addr, uint32_t data) -{ - xen_inject_msi(xen_domid, addr, data); -} - -static void xen_suspend_notifier(Notifier *notifier, void *data) -{ - xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); -} - -/* Xen Interrupt Controller */ - -static void xen_set_irq(void *opaque, int irq, int level) -{ - xen_set_isa_irq_level(xen_domid, irq, level); -} - -qemu_irq *xen_interrupt_controller_init(void) -{ - return qemu_allocate_irqs(xen_set_irq, NULL, 16); -} - -/* Memory Ops */ - -static void xen_ram_init(PCMachineState *pcms, - ram_addr_t ram_size, MemoryRegion **ram_memory_p) -{ - MemoryRegion *sysmem = get_system_memory(); - ram_addr_t block_len; - uint64_t user_lowmem = object_property_get_int(qdev_get_machine(), - PC_MACHINE_MAX_RAM_BELOW_4G, - &error_abort); - - /* Handle the machine opt max-ram-below-4g. It is basically doing - * min(xen limit, user limit). - */ - if (!user_lowmem) { - user_lowmem = HVM_BELOW_4G_RAM_END; /* default */ - } - if (HVM_BELOW_4G_RAM_END <= user_lowmem) { - user_lowmem = HVM_BELOW_4G_RAM_END; - } - - if (ram_size >= user_lowmem) { - pcms->above_4g_mem_size = ram_size - user_lowmem; - pcms->below_4g_mem_size = user_lowmem; - } else { - pcms->above_4g_mem_size = 0; - pcms->below_4g_mem_size = ram_size; - } - if (!pcms->above_4g_mem_size) { - block_len = ram_size; - } else { - /* - * Xen does not allocate the memory continuously, it keeps a - * hole of the size computed above or passed in. - */ - block_len = (1ULL << 32) + pcms->above_4g_mem_size; - } - memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len, - &error_fatal); - *ram_memory_p = &ram_memory; - vmstate_register_ram_global(&ram_memory); - - memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k", - &ram_memory, 0, 0xa0000); - memory_region_add_subregion(sysmem, 0, &ram_640k); - /* Skip of the VGA IO memory space, it will be registered later by the VGA - * emulated device. - * - * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load - * the Options ROM, so it is registered here as RAM. - */ - memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", - &ram_memory, 0xc0000, - pcms->below_4g_mem_size - 0xc0000); - memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); - if (pcms->above_4g_mem_size > 0) { - memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", - &ram_memory, 0x100000000ULL, - pcms->above_4g_mem_size); - memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); - } -} - -void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, - Error **errp) -{ - unsigned long nr_pfn; - xen_pfn_t *pfn_list; - int i; - - if (runstate_check(RUN_STATE_INMIGRATE)) { - /* RAM already populated in Xen */ - fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT - " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", - __func__, size, ram_addr); - return; - } - - if (mr == &ram_memory) { - return; - } - - trace_xen_ram_alloc(ram_addr, size); - - nr_pfn = size >> TARGET_PAGE_BITS; - pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn); - - for (i = 0; i < nr_pfn; i++) { - pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; - } - - if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { - error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT, - ram_addr); - } - - g_free(pfn_list); -} - -static XenPhysmap *get_physmapping(XenIOState *state, - hwaddr start_addr, ram_addr_t size) -{ - XenPhysmap *physmap = NULL; - - start_addr &= TARGET_PAGE_MASK; - - QLIST_FOREACH(physmap, &state->physmap, list) { - if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { - return physmap; - } - } - return NULL; -} - -static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr, - ram_addr_t size, void *opaque) -{ - hwaddr addr = start_addr & TARGET_PAGE_MASK; - XenIOState *xen_io_state = opaque; - XenPhysmap *physmap = NULL; - - QLIST_FOREACH(physmap, &xen_io_state->physmap, list) { - if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) { - return physmap->start_addr; - } - } - - return start_addr; -} - -static int xen_add_to_physmap(XenIOState *state, - hwaddr start_addr, - ram_addr_t size, - MemoryRegion *mr, - hwaddr offset_within_region) -{ - unsigned long i = 0; - int rc = 0; - XenPhysmap *physmap = NULL; - hwaddr pfn, start_gpfn; - hwaddr phys_offset = memory_region_get_ram_addr(mr); - char path[80], value[17]; - const char *mr_name; - - if (get_physmapping(state, start_addr, size)) { - return 0; - } - if (size <= 0) { - return -1; - } - - /* Xen can only handle a single dirty log region for now and we want - * the linear framebuffer to be that region. - * Avoid tracking any regions that is not videoram and avoid tracking - * the legacy vga region. */ - if (mr == framebuffer && start_addr > 0xbffff) { - goto go_physmap; - } - return -1; - -go_physmap: - DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", - start_addr, start_addr + size); - - pfn = phys_offset >> TARGET_PAGE_BITS; - start_gpfn = start_addr >> TARGET_PAGE_BITS; - for (i = 0; i < size >> TARGET_PAGE_BITS; i++) { - unsigned long idx = pfn + i; - xen_pfn_t gpfn = start_gpfn + i; - - rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); - if (rc) { - DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %" - PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno); - return -rc; - } - } - - mr_name = memory_region_name(mr); - - physmap = g_malloc(sizeof (XenPhysmap)); - - physmap->start_addr = start_addr; - physmap->size = size; - physmap->name = mr_name; - physmap->phys_offset = phys_offset; - - QLIST_INSERT_HEAD(&state->physmap, physmap, list); - - xc_domain_pin_memory_cacheattr(xen_xc, xen_domid, - start_addr >> TARGET_PAGE_BITS, - (start_addr + size - 1) >> TARGET_PAGE_BITS, - XEN_DOMCTL_MEM_CACHEATTR_WB); - - snprintf(path, sizeof(path), - "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", - xen_domid, (uint64_t)phys_offset); - snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr); - if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { - return -1; - } - snprintf(path, sizeof(path), - "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", - xen_domid, (uint64_t)phys_offset); - snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size); - if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { - return -1; - } - if (mr_name) { - snprintf(path, sizeof(path), - "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", - xen_domid, (uint64_t)phys_offset); - if (!xs_write(state->xenstore, 0, path, mr_name, strlen(mr_name))) { - return -1; - } - } - - return 0; -} - -static int xen_remove_from_physmap(XenIOState *state, - hwaddr start_addr, - ram_addr_t size) -{ - unsigned long i = 0; - int rc = 0; - XenPhysmap *physmap = NULL; - hwaddr phys_offset = 0; - - physmap = get_physmapping(state, start_addr, size); - if (physmap == NULL) { - return -1; - } - - phys_offset = physmap->phys_offset; - size = physmap->size; - - DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at " - "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset); - - size >>= TARGET_PAGE_BITS; - start_addr >>= TARGET_PAGE_BITS; - phys_offset >>= TARGET_PAGE_BITS; - for (i = 0; i < size; i++) { - xen_pfn_t idx = start_addr + i; - xen_pfn_t gpfn = phys_offset + i; - - rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); - if (rc) { - fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %" - PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno); - return -rc; - } - } - - QLIST_REMOVE(physmap, list); - if (state->log_for_dirtybit == physmap) { - state->log_for_dirtybit = NULL; - } - g_free(physmap); - - return 0; -} - -static void xen_set_memory(struct MemoryListener *listener, - MemoryRegionSection *section, - bool add) -{ - XenIOState *state = container_of(listener, XenIOState, memory_listener); - hwaddr start_addr = section->offset_within_address_space; - ram_addr_t size = int128_get64(section->size); - bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA); - hvmmem_type_t mem_type; - - if (section->mr == &ram_memory) { - return; - } else { - if (add) { - xen_map_memory_section(xen_domid, state->ioservid, - section); - } else { - xen_unmap_memory_section(xen_domid, state->ioservid, - section); - } - } - - if (!memory_region_is_ram(section->mr)) { - return; - } - - if (log_dirty != add) { - return; - } - - trace_xen_client_set_memory(start_addr, size, log_dirty); - - start_addr &= TARGET_PAGE_MASK; - size = TARGET_PAGE_ALIGN(size); - - if (add) { - if (!memory_region_is_rom(section->mr)) { - xen_add_to_physmap(state, start_addr, size, - section->mr, section->offset_within_region); - } else { - mem_type = HVMMEM_ram_ro; - if (xen_set_mem_type(xen_domid, mem_type, - start_addr >> TARGET_PAGE_BITS, - size >> TARGET_PAGE_BITS)) { - DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n", - start_addr); - } - } - } else { - if (xen_remove_from_physmap(state, start_addr, size) < 0) { - DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); - } - } -} - -static void xen_region_add(MemoryListener *listener, - MemoryRegionSection *section) -{ - memory_region_ref(section->mr); - xen_set_memory(listener, section, true); -} - -static void xen_region_del(MemoryListener *listener, - MemoryRegionSection *section) -{ - xen_set_memory(listener, section, false); - memory_region_unref(section->mr); -} - -static void xen_io_add(MemoryListener *listener, - MemoryRegionSection *section) -{ - XenIOState *state = container_of(listener, XenIOState, io_listener); - MemoryRegion *mr = section->mr; - - if (mr->ops == &unassigned_io_ops) { - return; - } - - memory_region_ref(mr); - - xen_map_io_section(xen_domid, state->ioservid, section); -} - -static void xen_io_del(MemoryListener *listener, - MemoryRegionSection *section) -{ - XenIOState *state = container_of(listener, XenIOState, io_listener); - MemoryRegion *mr = section->mr; - - if (mr->ops == &unassigned_io_ops) { - return; - } - - xen_unmap_io_section(xen_domid, state->ioservid, section); - - memory_region_unref(mr); -} - -static void xen_device_realize(DeviceListener *listener, - DeviceState *dev) -{ - XenIOState *state = container_of(listener, XenIOState, device_listener); - - if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { - PCIDevice *pci_dev = PCI_DEVICE(dev); - - xen_map_pcidev(xen_domid, state->ioservid, pci_dev); - } -} - -static void xen_device_unrealize(DeviceListener *listener, - DeviceState *dev) -{ - XenIOState *state = container_of(listener, XenIOState, device_listener); - - if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { - PCIDevice *pci_dev = PCI_DEVICE(dev); - - xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); - } -} - -static void xen_sync_dirty_bitmap(XenIOState *state, - hwaddr start_addr, - ram_addr_t size) -{ - hwaddr npages = size >> TARGET_PAGE_BITS; - const int width = sizeof(unsigned long) * 8; - unsigned long bitmap[DIV_ROUND_UP(npages, width)]; - int rc, i, j; - const XenPhysmap *physmap = NULL; - - physmap = get_physmapping(state, start_addr, size); - if (physmap == NULL) { - /* not handled */ - return; - } - - if (state->log_for_dirtybit == NULL) { - state->log_for_dirtybit = physmap; - } else if (state->log_for_dirtybit != physmap) { - /* Only one range for dirty bitmap can be tracked. */ - return; - } - - rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, - npages, bitmap); - if (rc < 0) { -#ifndef ENODATA -#define ENODATA ENOENT -#endif - if (errno == ENODATA) { - memory_region_set_dirty(framebuffer, 0, size); - DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx - ", 0x" TARGET_FMT_plx "): %s\n", - start_addr, start_addr + size, strerror(errno)); - } - return; - } - - for (i = 0; i < ARRAY_SIZE(bitmap); i++) { - unsigned long map = bitmap[i]; - while (map != 0) { - j = ctzl(map); - map &= ~(1ul << j); - memory_region_set_dirty(framebuffer, - (i * width + j) * TARGET_PAGE_SIZE, - TARGET_PAGE_SIZE); - }; - } -} - -static void xen_log_start(MemoryListener *listener, - MemoryRegionSection *section, - int old, int new) -{ - XenIOState *state = container_of(listener, XenIOState, memory_listener); - - if (new & ~old & (1 << DIRTY_MEMORY_VGA)) { - xen_sync_dirty_bitmap(state, section->offset_within_address_space, - int128_get64(section->size)); - } -} - -static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section, - int old, int new) -{ - XenIOState *state = container_of(listener, XenIOState, memory_listener); - - if (old & ~new & (1 << DIRTY_MEMORY_VGA)) { - state->log_for_dirtybit = NULL; - /* Disable dirty bit tracking */ - xen_track_dirty_vram(xen_domid, 0, 0, NULL); - } -} - -static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section) -{ - XenIOState *state = container_of(listener, XenIOState, memory_listener); - - xen_sync_dirty_bitmap(state, section->offset_within_address_space, - int128_get64(section->size)); -} - -static void xen_log_global_start(MemoryListener *listener) -{ - if (xen_enabled()) { - xen_in_migration = true; - } -} - -static void xen_log_global_stop(MemoryListener *listener) -{ - xen_in_migration = false; -} - -static MemoryListener xen_memory_listener = { - .region_add = xen_region_add, - .region_del = xen_region_del, - .log_start = xen_log_start, - .log_stop = xen_log_stop, - .log_sync = xen_log_sync, - .log_global_start = xen_log_global_start, - .log_global_stop = xen_log_global_stop, - .priority = 10, -}; - -static MemoryListener xen_io_listener = { - .region_add = xen_io_add, - .region_del = xen_io_del, - .priority = 10, -}; - -static DeviceListener xen_device_listener = { - .realize = xen_device_realize, - .unrealize = xen_device_unrealize, -}; - -/* get the ioreq packets from share mem */ -static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) -{ - ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); - - if (req->state != STATE_IOREQ_READY) { - DPRINTF("I/O request not ready: " - "%x, ptr: %x, port: %"PRIx64", " - "data: %"PRIx64", count: %u, size: %u\n", - req->state, req->data_is_ptr, req->addr, - req->data, req->count, req->size); - return NULL; - } - - xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ - - req->state = STATE_IOREQ_INPROCESS; - return req; -} - -/* use poll to get the port notification */ -/* ioreq_vec--out,the */ -/* retval--the number of ioreq packet */ -static ioreq_t *cpu_get_ioreq(XenIOState *state) -{ - int i; - evtchn_port_t port; - - port = xenevtchn_pending(state->xce_handle); - if (port == state->bufioreq_local_port) { - timer_mod(state->buffered_io_timer, - BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); - return NULL; - } - - if (port != -1) { - for (i = 0; i < max_cpus; i++) { - if (state->ioreq_local_port[i] == port) { - break; - } - } - - if (i == max_cpus) { - hw_error("Fatal error while trying to get io event!\n"); - } - - /* unmask the wanted port again */ - xenevtchn_unmask(state->xce_handle, port); - - /* get the io packet from shared memory */ - state->send_vcpu = i; - return cpu_get_ioreq_from_shared_memory(state, i); - } - - /* read error or read nothing */ - return NULL; -} - -static uint32_t do_inp(uint32_t addr, unsigned long size) -{ - switch (size) { - case 1: - return cpu_inb(addr); - case 2: - return cpu_inw(addr); - case 4: - return cpu_inl(addr); - default: - hw_error("inp: bad size: %04x %lx", addr, size); - } -} - -static void do_outp(uint32_t addr, - unsigned long size, uint32_t val) -{ - switch (size) { - case 1: - return cpu_outb(addr, val); - case 2: - return cpu_outw(addr, val); - case 4: - return cpu_outl(addr, val); - default: - hw_error("outp: bad size: %04x %lx", addr, size); - } -} - -/* - * Helper functions which read/write an object from/to physical guest - * memory, as part of the implementation of an ioreq. - * - * Equivalent to - * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, - * val, req->size, 0/1) - * except without the integer overflow problems. - */ -static void rw_phys_req_item(hwaddr addr, - ioreq_t *req, uint32_t i, void *val, int rw) -{ - /* Do everything unsigned so overflow just results in a truncated result - * and accesses to undesired parts of guest memory, which is up - * to the guest */ - hwaddr offset = (hwaddr)req->size * i; - if (req->df) { - addr -= offset; - } else { - addr += offset; - } - cpu_physical_memory_rw(addr, val, req->size, rw); -} - -static inline void read_phys_req_item(hwaddr addr, - ioreq_t *req, uint32_t i, void *val) -{ - rw_phys_req_item(addr, req, i, val, 0); -} -static inline void write_phys_req_item(hwaddr addr, - ioreq_t *req, uint32_t i, void *val) -{ - rw_phys_req_item(addr, req, i, val, 1); -} - - -static void cpu_ioreq_pio(ioreq_t *req) -{ - uint32_t i; - - trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr, - req->data, req->count, req->size); - - if (req->size > sizeof(uint32_t)) { - hw_error("PIO: bad size (%u)", req->size); - } - - if (req->dir == IOREQ_READ) { - if (!req->data_is_ptr) { - req->data = do_inp(req->addr, req->size); - trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr, - req->size); - } else { - uint32_t tmp; - - for (i = 0; i < req->count; i++) { - tmp = do_inp(req->addr, req->size); - write_phys_req_item(req->data, req, i, &tmp); - } - } - } else if (req->dir == IOREQ_WRITE) { - if (!req->data_is_ptr) { - trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr, - req->size); - do_outp(req->addr, req->size, req->data); - } else { - for (i = 0; i < req->count; i++) { - uint32_t tmp = 0; - - read_phys_req_item(req->data, req, i, &tmp); - do_outp(req->addr, req->size, tmp); - } - } - } -} - -static void cpu_ioreq_move(ioreq_t *req) -{ - uint32_t i; - - trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr, - req->data, req->count, req->size); - - if (req->size > sizeof(req->data)) { - hw_error("MMIO: bad size (%u)", req->size); - } - - if (!req->data_is_ptr) { - if (req->dir == IOREQ_READ) { - for (i = 0; i < req->count; i++) { - read_phys_req_item(req->addr, req, i, &req->data); - } - } else if (req->dir == IOREQ_WRITE) { - for (i = 0; i < req->count; i++) { - write_phys_req_item(req->addr, req, i, &req->data); - } - } - } else { - uint64_t tmp; - - if (req->dir == IOREQ_READ) { - for (i = 0; i < req->count; i++) { - read_phys_req_item(req->addr, req, i, &tmp); - write_phys_req_item(req->data, req, i, &tmp); - } - } else if (req->dir == IOREQ_WRITE) { - for (i = 0; i < req->count; i++) { - read_phys_req_item(req->data, req, i, &tmp); - write_phys_req_item(req->addr, req, i, &tmp); - } - } - } -} - -static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req) -{ - X86CPU *cpu; - CPUX86State *env; - - cpu = X86_CPU(current_cpu); - env = &cpu->env; - env->regs[R_EAX] = req->data; - env->regs[R_EBX] = vmport_regs->ebx; - env->regs[R_ECX] = vmport_regs->ecx; - env->regs[R_EDX] = vmport_regs->edx; - env->regs[R_ESI] = vmport_regs->esi; - env->regs[R_EDI] = vmport_regs->edi; -} - -static void regs_from_cpu(vmware_regs_t *vmport_regs) -{ - X86CPU *cpu = X86_CPU(current_cpu); - CPUX86State *env = &cpu->env; - - vmport_regs->ebx = env->regs[R_EBX]; - vmport_regs->ecx = env->regs[R_ECX]; - vmport_regs->edx = env->regs[R_EDX]; - vmport_regs->esi = env->regs[R_ESI]; - vmport_regs->edi = env->regs[R_EDI]; -} - -static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req) -{ - vmware_regs_t *vmport_regs; - - assert(state->shared_vmport_page); - vmport_regs = - &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu]; - QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs)); - - current_cpu = state->cpu_by_vcpu_id[state->send_vcpu]; - regs_to_cpu(vmport_regs, req); - cpu_ioreq_pio(req); - regs_from_cpu(vmport_regs); - current_cpu = NULL; -} - -static void handle_ioreq(XenIOState *state, ioreq_t *req) -{ - trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr, - req->addr, req->data, req->count, req->size); - - if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && - (req->size < sizeof (target_ulong))) { - req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; - } - - if (req->dir == IOREQ_WRITE) - trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr, - req->addr, req->data, req->count, req->size); - - switch (req->type) { - case IOREQ_TYPE_PIO: - cpu_ioreq_pio(req); - break; - case IOREQ_TYPE_COPY: - cpu_ioreq_move(req); - break; - case IOREQ_TYPE_VMWARE_PORT: - handle_vmport_ioreq(state, req); - break; - case IOREQ_TYPE_TIMEOFFSET: - break; - case IOREQ_TYPE_INVALIDATE: - xen_invalidate_map_cache(); - break; - case IOREQ_TYPE_PCI_CONFIG: { - uint32_t sbdf = req->addr >> 32; - uint32_t val; - - /* Fake a write to port 0xCF8 so that - * the config space access will target the - * correct device model. - */ - val = (1u << 31) | - ((req->addr & 0x0f00) << 16) | - ((sbdf & 0xffff) << 8) | - (req->addr & 0xfc); - do_outp(0xcf8, 4, val); - - /* Now issue the config space access via - * port 0xCFC - */ - req->addr = 0xcfc | (req->addr & 0x03); - cpu_ioreq_pio(req); - break; - } - default: - hw_error("Invalid ioreq type 0x%x\n", req->type); - } - if (req->dir == IOREQ_READ) { - trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr, - req->addr, req->data, req->count, req->size); - } -} - -static int handle_buffered_iopage(XenIOState *state) -{ - buffered_iopage_t *buf_page = state->buffered_io_page; - buf_ioreq_t *buf_req = NULL; - ioreq_t req; - int qw; - - if (!buf_page) { - return 0; - } - - memset(&req, 0x00, sizeof(req)); - req.state = STATE_IOREQ_READY; - req.count = 1; - req.dir = IOREQ_WRITE; - - for (;;) { - uint32_t rdptr = buf_page->read_pointer, wrptr; - - xen_rmb(); - wrptr = buf_page->write_pointer; - xen_rmb(); - if (rdptr != buf_page->read_pointer) { - continue; - } - if (rdptr == wrptr) { - break; - } - buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; - req.size = 1U << buf_req->size; - req.addr = buf_req->addr; - req.data = buf_req->data; - req.type = buf_req->type; - xen_rmb(); - qw = (req.size == 8); - if (qw) { - if (rdptr + 1 == wrptr) { - hw_error("Incomplete quad word buffered ioreq"); - } - buf_req = &buf_page->buf_ioreq[(rdptr + 1) % - IOREQ_BUFFER_SLOT_NUM]; - req.data |= ((uint64_t)buf_req->data) << 32; - xen_rmb(); - } - - handle_ioreq(state, &req); - - /* Only req.data may get updated by handle_ioreq(), albeit even that - * should not happen as such data would never make it to the guest (we - * can only usefully see writes here after all). - */ - assert(req.state == STATE_IOREQ_READY); - assert(req.count == 1); - assert(req.dir == IOREQ_WRITE); - assert(!req.data_is_ptr); - - atomic_add(&buf_page->read_pointer, qw + 1); - } - - return req.count; -} - -static void handle_buffered_io(void *opaque) -{ - XenIOState *state = opaque; - - if (handle_buffered_iopage(state)) { - timer_mod(state->buffered_io_timer, - BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); - } else { - timer_del(state->buffered_io_timer); - xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port); - } -} - -static void cpu_handle_ioreq(void *opaque) -{ - XenIOState *state = opaque; - ioreq_t *req = cpu_get_ioreq(state); - - handle_buffered_iopage(state); - if (req) { - ioreq_t copy = *req; - - xen_rmb(); - handle_ioreq(state, ©); - req->data = copy.data; - - if (req->state != STATE_IOREQ_INPROCESS) { - fprintf(stderr, "Badness in I/O request ... not in service?!: " - "%x, ptr: %x, port: %"PRIx64", " - "data: %"PRIx64", count: %u, size: %u, type: %u\n", - req->state, req->data_is_ptr, req->addr, - req->data, req->count, req->size, req->type); - destroy_hvm_domain(false); - return; - } - - xen_wmb(); /* Update ioreq contents /then/ update state. */ - - /* - * We do this before we send the response so that the tools - * have the opportunity to pick up on the reset before the - * guest resumes and does a hlt with interrupts disabled which - * causes Xen to powerdown the domain. - */ - if (runstate_is_running()) { - if (qemu_shutdown_requested_get()) { - destroy_hvm_domain(false); - } - if (qemu_reset_requested_get()) { - qemu_system_reset(VMRESET_REPORT); - destroy_hvm_domain(true); - } - } - - req->state = STATE_IORESP_READY; - xenevtchn_notify(state->xce_handle, - state->ioreq_local_port[state->send_vcpu]); - } -} - -static void xen_main_loop_prepare(XenIOState *state) -{ - int evtchn_fd = -1; - - if (state->xce_handle != NULL) { - evtchn_fd = xenevtchn_fd(state->xce_handle); - } - - state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, - state); - - if (evtchn_fd != -1) { - CPUState *cpu_state; - - DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__); - CPU_FOREACH(cpu_state) { - DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n", - __func__, cpu_state->cpu_index, cpu_state); - state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; - } - qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); - } -} - - -static void xen_hvm_change_state_handler(void *opaque, int running, - RunState rstate) -{ - XenIOState *state = opaque; - - if (running) { - xen_main_loop_prepare(state); - } - - xen_set_ioreq_server_state(xen_domid, - state->ioservid, - (rstate == RUN_STATE_RUNNING)); -} - -static void xen_exit_notifier(Notifier *n, void *data) -{ - XenIOState *state = container_of(n, XenIOState, exit); - - xenevtchn_close(state->xce_handle); - xs_daemon_close(state->xenstore); -} - -static void xen_read_physmap(XenIOState *state) -{ - XenPhysmap *physmap = NULL; - unsigned int len, num, i; - char path[80], *value = NULL; - char **entries = NULL; - - snprintf(path, sizeof(path), - "/local/domain/0/device-model/%d/physmap", xen_domid); - entries = xs_directory(state->xenstore, 0, path, &num); - if (entries == NULL) - return; - - for (i = 0; i < num; i++) { - physmap = g_malloc(sizeof (XenPhysmap)); - physmap->phys_offset = strtoull(entries[i], NULL, 16); - snprintf(path, sizeof(path), - "/local/domain/0/device-model/%d/physmap/%s/start_addr", - xen_domid, entries[i]); - value = xs_read(state->xenstore, 0, path, &len); - if (value == NULL) { - g_free(physmap); - continue; - } - physmap->start_addr = strtoull(value, NULL, 16); - free(value); - - snprintf(path, sizeof(path), - "/local/domain/0/device-model/%d/physmap/%s/size", - xen_domid, entries[i]); - value = xs_read(state->xenstore, 0, path, &len); - if (value == NULL) { - g_free(physmap); - continue; - } - physmap->size = strtoull(value, NULL, 16); - free(value); - - snprintf(path, sizeof(path), - "/local/domain/0/device-model/%d/physmap/%s/name", - xen_domid, entries[i]); - physmap->name = xs_read(state->xenstore, 0, path, &len); - - QLIST_INSERT_HEAD(&state->physmap, physmap, list); - } - free(entries); -} - -static void xen_wakeup_notifier(Notifier *notifier, void *data) -{ - xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0); -} - -void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) -{ - int i, rc; - xen_pfn_t ioreq_pfn; - xen_pfn_t bufioreq_pfn; - evtchn_port_t bufioreq_evtchn; - XenIOState *state; - - state = g_malloc0(sizeof (XenIOState)); - - state->xce_handle = xenevtchn_open(NULL, 0); - if (state->xce_handle == NULL) { - perror("xen: event channel open"); - goto err; - } - - state->xenstore = xs_daemon_open(); - if (state->xenstore == NULL) { - perror("xen: xenstore open"); - goto err; - } - - if (xen_domid_restrict) { - rc = xen_restrict(xen_domid); - if (rc < 0) { - error_report("failed to restrict: error %d", errno); - goto err; - } - } - - xen_create_ioreq_server(xen_domid, &state->ioservid); - - state->exit.notify = xen_exit_notifier; - qemu_add_exit_notifier(&state->exit); - - state->suspend.notify = xen_suspend_notifier; - qemu_register_suspend_notifier(&state->suspend); - - state->wakeup.notify = xen_wakeup_notifier; - qemu_register_wakeup_notifier(&state->wakeup); - - rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, - &ioreq_pfn, &bufioreq_pfn, - &bufioreq_evtchn); - if (rc < 0) { - error_report("failed to get ioreq server info: error %d handle=%p", - errno, xen_xc); - goto err; - } - - DPRINTF("shared page at pfn %lx\n", ioreq_pfn); - DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn); - DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn); - - state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, - PROT_READ|PROT_WRITE, - 1, &ioreq_pfn, NULL); - if (state->shared_page == NULL) { - error_report("map shared IO page returned error %d handle=%p", - errno, xen_xc); - goto err; - } - - rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn); - if (!rc) { - DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn); - state->shared_vmport_page = - xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, - 1, &ioreq_pfn, NULL); - if (state->shared_vmport_page == NULL) { - error_report("map shared vmport IO page returned error %d handle=%p", - errno, xen_xc); - goto err; - } - } else if (rc != -ENOSYS) { - error_report("get vmport regs pfn returned error %d, rc=%d", - errno, rc); - goto err; - } - - state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, - PROT_READ|PROT_WRITE, - 1, &bufioreq_pfn, NULL); - if (state->buffered_io_page == NULL) { - error_report("map buffered IO page returned error %d", errno); - goto err; - } - - /* Note: cpus is empty at this point in init */ - state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); - - rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); - if (rc < 0) { - error_report("failed to enable ioreq server info: error %d handle=%p", - errno, xen_xc); - goto err; - } - - state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); - - /* FIXME: how about if we overflow the page here? */ - for (i = 0; i < max_cpus; i++) { - rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, - xen_vcpu_eport(state->shared_page, i)); - if (rc == -1) { - error_report("shared evtchn %d bind error %d", i, errno); - goto err; - } - state->ioreq_local_port[i] = rc; - } - - rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, - bufioreq_evtchn); - if (rc == -1) { - error_report("buffered evtchn bind error %d", errno); - goto err; - } - state->bufioreq_local_port = rc; - - /* Init RAM management */ - xen_map_cache_init(xen_phys_offset_to_gaddr, state); - xen_ram_init(pcms, ram_size, ram_memory); - - qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); - - state->memory_listener = xen_memory_listener; - QLIST_INIT(&state->physmap); - memory_listener_register(&state->memory_listener, &address_space_memory); - state->log_for_dirtybit = NULL; - - state->io_listener = xen_io_listener; - memory_listener_register(&state->io_listener, &address_space_io); - - state->device_listener = xen_device_listener; - device_listener_register(&state->device_listener); - - /* Initialize backend core & drivers */ - if (xen_be_init() != 0) { - error_report("xen backend core setup failed"); - goto err; - } - xen_be_register_common(); - xen_read_physmap(state); - - /* Disable ACPI build because Xen handles it */ - pcms->acpi_build_enabled = false; - - return; - -err: - error_report("xen hardware virtual machine initialisation failed"); - exit(1); -} - -void destroy_hvm_domain(bool reboot) -{ - xc_interface *xc_handle; - int sts; - - xc_handle = xc_interface_open(0, 0, 0); - if (xc_handle == NULL) { - fprintf(stderr, "Cannot acquire xenctrl handle\n"); - } else { - sts = xc_domain_shutdown(xc_handle, xen_domid, - reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff); - if (sts != 0) { - fprintf(stderr, "xc_domain_shutdown failed to issue %s, " - "sts %d, %s\n", reboot ? "reboot" : "poweroff", - sts, strerror(errno)); - } else { - fprintf(stderr, "Issued domain %d %s\n", xen_domid, - reboot ? "reboot" : "poweroff"); - } - xc_interface_close(xc_handle); - } -} - -void xen_register_framebuffer(MemoryRegion *mr) -{ - framebuffer = mr; -} - -void xen_shutdown_fatal_error(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - vfprintf(stderr, fmt, ap); - va_end(ap); - fprintf(stderr, "Will destroy the domain.\n"); - /* destroy the domain */ - qemu_system_shutdown_request(); -} - -void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) -{ - if (unlikely(xen_in_migration)) { - int rc; - ram_addr_t start_pfn, nb_pages; - - if (length == 0) { - length = TARGET_PAGE_SIZE; - } - start_pfn = start >> TARGET_PAGE_BITS; - nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) - - start_pfn; - rc = xen_modified_memory(xen_domid, start_pfn, nb_pages); - if (rc) { - fprintf(stderr, - "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", - __func__, start, nb_pages, rc, strerror(-rc)); - } - } -} - -void qmp_xen_set_global_dirty_log(bool enable, Error **errp) -{ - if (enable) { - memory_global_dirty_log_start(); - } else { - memory_global_dirty_log_stop(); - } -} -- cgit v1.2.3-55-g7522 From 28b99f473bda682385da944b0404aedbe11ea0dc Mon Sep 17 00:00:00 2001 From: Anthony Xu Date: Wed, 5 Apr 2017 16:21:31 -0700 Subject: move xen-mapcache.c to hw/i386/xen/ move xen-mapcache.c to hw/i386/xen/ Signed-off -by: Anthony Xu Reviewed-by: Stefano Stabellini --- Makefile.target | 3 - default-configs/i386-softmmu.mak | 1 - default-configs/x86_64-softmmu.mak | 1 - hw/i386/xen/Makefile.objs | 2 +- hw/i386/xen/trace-events | 6 + hw/i386/xen/xen-mapcache.c | 459 +++++++++++++++++++++++++++++++++++++ trace-events | 5 - xen-mapcache.c | 459 ------------------------------------- 8 files changed, 466 insertions(+), 470 deletions(-) create mode 100644 hw/i386/xen/xen-mapcache.c delete mode 100644 xen-mapcache.c (limited to 'hw') diff --git a/Makefile.target b/Makefile.target index d5ff0c736d..a535980110 100644 --- a/Makefile.target +++ b/Makefile.target @@ -149,9 +149,6 @@ obj-y += dump.o obj-y += migration/ram.o migration/savevm.o LIBS := $(libs_softmmu) $(LIBS) -# xen support -obj-$(CONFIG_XEN_I386) += xen-mapcache.o - # Hardware support ifeq ($(TARGET_NAME), sparc64) obj-y += hw/sparc64/ diff --git a/default-configs/i386-softmmu.mak b/default-configs/i386-softmmu.mak index 029e95202a..d2ab2f6655 100644 --- a/default-configs/i386-softmmu.mak +++ b/default-configs/i386-softmmu.mak @@ -39,7 +39,6 @@ CONFIG_TPM_TIS=$(CONFIG_TPM) CONFIG_MC146818RTC=y CONFIG_PCI_PIIX=y CONFIG_WDT_IB700=y -CONFIG_XEN_I386=$(CONFIG_XEN) CONFIG_ISA_DEBUG=y CONFIG_ISA_TESTDEV=y CONFIG_VMPORT=y diff --git a/default-configs/x86_64-softmmu.mak b/default-configs/x86_64-softmmu.mak index d1d7432f74..9bde2f1c4b 100644 --- a/default-configs/x86_64-softmmu.mak +++ b/default-configs/x86_64-softmmu.mak @@ -39,7 +39,6 @@ CONFIG_TPM_TIS=$(CONFIG_TPM) CONFIG_MC146818RTC=y CONFIG_PCI_PIIX=y CONFIG_WDT_IB700=y -CONFIG_XEN_I386=$(CONFIG_XEN) CONFIG_ISA_DEBUG=y CONFIG_ISA_TESTDEV=y CONFIG_VMPORT=y diff --git a/hw/i386/xen/Makefile.objs b/hw/i386/xen/Makefile.objs index daf4f53fb0..be9d10cf2a 100644 --- a/hw/i386/xen/Makefile.objs +++ b/hw/i386/xen/Makefile.objs @@ -1 +1 @@ -obj-y += xen_platform.o xen_apic.o xen_pvdevice.o xen-hvm.o +obj-y += xen_platform.o xen_apic.o xen_pvdevice.o xen-hvm.o xen-mapcache.o diff --git a/hw/i386/xen/trace-events b/hw/i386/xen/trace-events index f25d622d09..547438db13 100644 --- a/hw/i386/xen/trace-events +++ b/hw/i386/xen/trace-events @@ -15,3 +15,9 @@ cpu_ioreq_pio(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64 cpu_ioreq_pio_read_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio read reg data=%#"PRIx64" port=%#"PRIx64" size=%d" cpu_ioreq_pio_write_reg(void *req, uint64_t data, uint64_t addr, uint32_t size) "I/O=%p pio write reg data=%#"PRIx64" port=%#"PRIx64" size=%d" cpu_ioreq_move(void *req, uint32_t dir, uint32_t df, uint32_t data_is_ptr, uint64_t addr, uint64_t data, uint32_t count, uint32_t size) "I/O=%p copy dir=%d df=%d ptr=%d port=%#"PRIx64" data=%#"PRIx64" count=%d size=%d" + +# xen-mapcache.c +xen_map_cache(uint64_t phys_addr) "want %#"PRIx64 +xen_remap_bucket(uint64_t index) "index %#"PRIx64 +xen_map_cache_return(void* ptr) "%p" + diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c new file mode 100644 index 0000000000..31debdfb2c --- /dev/null +++ b/hw/i386/xen/xen-mapcache.c @@ -0,0 +1,459 @@ +/* + * Copyright (C) 2011 Citrix Ltd. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Contributions after 2012-01-13 are licensed under the terms of the + * GNU GPL, version 2 or (at your option) any later version. + */ + +#include "qemu/osdep.h" + +#include + +#include "hw/xen/xen_backend.h" +#include "sysemu/blockdev.h" +#include "qemu/bitmap.h" + +#include + +#include "sysemu/xen-mapcache.h" +#include "trace.h" + + +//#define MAPCACHE_DEBUG + +#ifdef MAPCACHE_DEBUG +# define DPRINTF(fmt, ...) do { \ + fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \ +} while (0) +#else +# define DPRINTF(fmt, ...) do { } while (0) +#endif + +#if HOST_LONG_BITS == 32 +# define MCACHE_BUCKET_SHIFT 16 +# define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */ +#else +# define MCACHE_BUCKET_SHIFT 20 +# define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */ +#endif +#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT) + +/* This is the size of the virtual address space reserve to QEMU that will not + * be use by MapCache. + * From empirical tests I observed that qemu use 75MB more than the + * max_mcache_size. + */ +#define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024) + +typedef struct MapCacheEntry { + hwaddr paddr_index; + uint8_t *vaddr_base; + unsigned long *valid_mapping; + uint8_t lock; + hwaddr size; + struct MapCacheEntry *next; +} MapCacheEntry; + +typedef struct MapCacheRev { + uint8_t *vaddr_req; + hwaddr paddr_index; + hwaddr size; + QTAILQ_ENTRY(MapCacheRev) next; +} MapCacheRev; + +typedef struct MapCache { + MapCacheEntry *entry; + unsigned long nr_buckets; + QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries; + + /* For most cases (>99.9%), the page address is the same. */ + MapCacheEntry *last_entry; + unsigned long max_mcache_size; + unsigned int mcache_bucket_shift; + + phys_offset_to_gaddr_t phys_offset_to_gaddr; + QemuMutex lock; + void *opaque; +} MapCache; + +static MapCache *mapcache; + +static inline void mapcache_lock(void) +{ + qemu_mutex_lock(&mapcache->lock); +} + +static inline void mapcache_unlock(void) +{ + qemu_mutex_unlock(&mapcache->lock); +} + +static inline int test_bits(int nr, int size, const unsigned long *addr) +{ + unsigned long res = find_next_zero_bit(addr, size + nr, nr); + if (res >= nr + size) + return 1; + else + return 0; +} + +void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) +{ + unsigned long size; + struct rlimit rlimit_as; + + mapcache = g_malloc0(sizeof (MapCache)); + + mapcache->phys_offset_to_gaddr = f; + mapcache->opaque = opaque; + qemu_mutex_init(&mapcache->lock); + + QTAILQ_INIT(&mapcache->locked_entries); + + if (geteuid() == 0) { + rlimit_as.rlim_cur = RLIM_INFINITY; + rlimit_as.rlim_max = RLIM_INFINITY; + mapcache->max_mcache_size = MCACHE_MAX_SIZE; + } else { + getrlimit(RLIMIT_AS, &rlimit_as); + rlimit_as.rlim_cur = rlimit_as.rlim_max; + + if (rlimit_as.rlim_max != RLIM_INFINITY) { + fprintf(stderr, "Warning: QEMU's maximum size of virtual" + " memory is not infinity.\n"); + } + if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) { + mapcache->max_mcache_size = rlimit_as.rlim_max - + NON_MCACHE_MEMORY_SIZE; + } else { + mapcache->max_mcache_size = MCACHE_MAX_SIZE; + } + } + + setrlimit(RLIMIT_AS, &rlimit_as); + + mapcache->nr_buckets = + (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) + + (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> + (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); + + size = mapcache->nr_buckets * sizeof (MapCacheEntry); + size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); + DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__, + mapcache->nr_buckets, size); + mapcache->entry = g_malloc0(size); +} + +static void xen_remap_bucket(MapCacheEntry *entry, + hwaddr size, + hwaddr address_index) +{ + uint8_t *vaddr_base; + xen_pfn_t *pfns; + int *err; + unsigned int i; + hwaddr nb_pfn = size >> XC_PAGE_SHIFT; + + trace_xen_remap_bucket(address_index); + + pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t)); + err = g_malloc0(nb_pfn * sizeof (int)); + + if (entry->vaddr_base != NULL) { + ram_block_notify_remove(entry->vaddr_base, entry->size); + if (munmap(entry->vaddr_base, entry->size) != 0) { + perror("unmap fails"); + exit(-1); + } + } + g_free(entry->valid_mapping); + entry->valid_mapping = NULL; + + for (i = 0; i < nb_pfn; i++) { + pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; + } + + vaddr_base = xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, + nb_pfn, pfns, err); + if (vaddr_base == NULL) { + perror("xenforeignmemory_map"); + exit(-1); + } + + entry->vaddr_base = vaddr_base; + entry->paddr_index = address_index; + entry->size = size; + entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) * + BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); + + ram_block_notify_add(entry->vaddr_base, entry->size); + bitmap_zero(entry->valid_mapping, nb_pfn); + for (i = 0; i < nb_pfn; i++) { + if (!err[i]) { + bitmap_set(entry->valid_mapping, i, 1); + } + } + + g_free(pfns); + g_free(err); +} + +static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size, + uint8_t lock) +{ + MapCacheEntry *entry, *pentry = NULL; + hwaddr address_index; + hwaddr address_offset; + hwaddr cache_size = size; + hwaddr test_bit_size; + bool translated = false; + +tryagain: + address_index = phys_addr >> MCACHE_BUCKET_SHIFT; + address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); + + trace_xen_map_cache(phys_addr); + + /* test_bit_size is always a multiple of XC_PAGE_SIZE */ + if (size) { + test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1)); + + if (test_bit_size % XC_PAGE_SIZE) { + test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); + } + } else { + test_bit_size = XC_PAGE_SIZE; + } + + if (mapcache->last_entry != NULL && + mapcache->last_entry->paddr_index == address_index && + !lock && !size && + test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + mapcache->last_entry->valid_mapping)) { + trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); + return mapcache->last_entry->vaddr_base + address_offset; + } + + /* size is always a multiple of MCACHE_BUCKET_SIZE */ + if (size) { + cache_size = size + address_offset; + if (cache_size % MCACHE_BUCKET_SIZE) { + cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE); + } + } else { + cache_size = MCACHE_BUCKET_SIZE; + } + + entry = &mapcache->entry[address_index % mapcache->nr_buckets]; + + while (entry && entry->lock && entry->vaddr_base && + (entry->paddr_index != address_index || entry->size != cache_size || + !test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping))) { + pentry = entry; + entry = entry->next; + } + if (!entry) { + entry = g_malloc0(sizeof (MapCacheEntry)); + pentry->next = entry; + xen_remap_bucket(entry, cache_size, address_index); + } else if (!entry->lock) { + if (!entry->vaddr_base || entry->paddr_index != address_index || + entry->size != cache_size || + !test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + xen_remap_bucket(entry, cache_size, address_index); + } + } + + if(!test_bits(address_offset >> XC_PAGE_SHIFT, + test_bit_size >> XC_PAGE_SHIFT, + entry->valid_mapping)) { + mapcache->last_entry = NULL; + if (!translated && mapcache->phys_offset_to_gaddr) { + phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque); + translated = true; + goto tryagain; + } + trace_xen_map_cache_return(NULL); + return NULL; + } + + mapcache->last_entry = entry; + if (lock) { + MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev)); + entry->lock++; + reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset; + reventry->paddr_index = mapcache->last_entry->paddr_index; + reventry->size = entry->size; + QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); + } + + trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); + return mapcache->last_entry->vaddr_base + address_offset; +} + +uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, + uint8_t lock) +{ + uint8_t *p; + + mapcache_lock(); + p = xen_map_cache_unlocked(phys_addr, size, lock); + mapcache_unlock(); + return p; +} + +ram_addr_t xen_ram_addr_from_mapcache(void *ptr) +{ + MapCacheEntry *entry = NULL; + MapCacheRev *reventry; + hwaddr paddr_index; + hwaddr size; + ram_addr_t raddr; + int found = 0; + + mapcache_lock(); + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + if (reventry->vaddr_req == ptr) { + paddr_index = reventry->paddr_index; + size = reventry->size; + found = 1; + break; + } + } + if (!found) { + fprintf(stderr, "%s, could not find %p\n", __func__, ptr); + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, + reventry->vaddr_req); + } + abort(); + return 0; + } + + entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr); + raddr = 0; + } else { + raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) + + ((unsigned long) ptr - (unsigned long) entry->vaddr_base); + } + mapcache_unlock(); + return raddr; +} + +static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) +{ + MapCacheEntry *entry = NULL, *pentry = NULL; + MapCacheRev *reventry; + hwaddr paddr_index; + hwaddr size; + int found = 0; + + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + if (reventry->vaddr_req == buffer) { + paddr_index = reventry->paddr_index; + size = reventry->size; + found = 1; + break; + } + } + if (!found) { + DPRINTF("%s, could not find %p\n", __func__, buffer); + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req); + } + return; + } + QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next); + g_free(reventry); + + if (mapcache->last_entry != NULL && + mapcache->last_entry->paddr_index == paddr_index) { + mapcache->last_entry = NULL; + } + + entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; + while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { + pentry = entry; + entry = entry->next; + } + if (!entry) { + DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer); + return; + } + entry->lock--; + if (entry->lock > 0 || pentry == NULL) { + return; + } + + pentry->next = entry->next; + ram_block_notify_remove(entry->vaddr_base, entry->size); + if (munmap(entry->vaddr_base, entry->size) != 0) { + perror("unmap fails"); + exit(-1); + } + g_free(entry->valid_mapping); + g_free(entry); +} + +void xen_invalidate_map_cache_entry(uint8_t *buffer) +{ + mapcache_lock(); + xen_invalidate_map_cache_entry_unlocked(buffer); + mapcache_unlock(); +} + +void xen_invalidate_map_cache(void) +{ + unsigned long i; + MapCacheRev *reventry; + + /* Flush pending AIO before destroying the mapcache */ + bdrv_drain_all(); + + mapcache_lock(); + + QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { + DPRINTF("There should be no locked mappings at this time, " + "but "TARGET_FMT_plx" -> %p is present\n", + reventry->paddr_index, reventry->vaddr_req); + } + + for (i = 0; i < mapcache->nr_buckets; i++) { + MapCacheEntry *entry = &mapcache->entry[i]; + + if (entry->vaddr_base == NULL) { + continue; + } + if (entry->lock > 0) { + continue; + } + + if (munmap(entry->vaddr_base, entry->size) != 0) { + perror("unmap fails"); + exit(-1); + } + + entry->paddr_index = 0; + entry->vaddr_base = NULL; + entry->size = 0; + g_free(entry->valid_mapping); + entry->valid_mapping = NULL; + } + + mapcache->last_entry = NULL; + + mapcache_unlock(); +} diff --git a/trace-events b/trace-events index 4e1448786f..e582d6315d 100644 --- a/trace-events +++ b/trace-events @@ -48,11 +48,6 @@ spice_vmc_register_interface(void *scd) "spice vmc registered interface %p" spice_vmc_unregister_interface(void *scd) "spice vmc unregistered interface %p" spice_vmc_event(int event) "spice vmc event %d" -# xen-mapcache.c -xen_map_cache(uint64_t phys_addr) "want %#"PRIx64 -xen_remap_bucket(uint64_t index) "index %#"PRIx64 -xen_map_cache_return(void* ptr) "%p" - # monitor.c monitor_protocol_event_handler(uint32_t event, void *qdict) "event=%d data=%p" monitor_protocol_event_emit(uint32_t event, void *data) "event=%d data=%p" diff --git a/xen-mapcache.c b/xen-mapcache.c deleted file mode 100644 index 1a96d2e5db..0000000000 --- a/xen-mapcache.c +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (C) 2011 Citrix Ltd. - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Contributions after 2012-01-13 are licensed under the terms of the - * GNU GPL, version 2 or (at your option) any later version. - */ - -#include "qemu/osdep.h" - -#include - -#include "hw/xen/xen_backend.h" -#include "sysemu/blockdev.h" -#include "qemu/bitmap.h" - -#include - -#include "sysemu/xen-mapcache.h" -#include "trace-root.h" - - -//#define MAPCACHE_DEBUG - -#ifdef MAPCACHE_DEBUG -# define DPRINTF(fmt, ...) do { \ - fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \ -} while (0) -#else -# define DPRINTF(fmt, ...) do { } while (0) -#endif - -#if HOST_LONG_BITS == 32 -# define MCACHE_BUCKET_SHIFT 16 -# define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */ -#else -# define MCACHE_BUCKET_SHIFT 20 -# define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */ -#endif -#define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT) - -/* This is the size of the virtual address space reserve to QEMU that will not - * be use by MapCache. - * From empirical tests I observed that qemu use 75MB more than the - * max_mcache_size. - */ -#define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024) - -typedef struct MapCacheEntry { - hwaddr paddr_index; - uint8_t *vaddr_base; - unsigned long *valid_mapping; - uint8_t lock; - hwaddr size; - struct MapCacheEntry *next; -} MapCacheEntry; - -typedef struct MapCacheRev { - uint8_t *vaddr_req; - hwaddr paddr_index; - hwaddr size; - QTAILQ_ENTRY(MapCacheRev) next; -} MapCacheRev; - -typedef struct MapCache { - MapCacheEntry *entry; - unsigned long nr_buckets; - QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries; - - /* For most cases (>99.9%), the page address is the same. */ - MapCacheEntry *last_entry; - unsigned long max_mcache_size; - unsigned int mcache_bucket_shift; - - phys_offset_to_gaddr_t phys_offset_to_gaddr; - QemuMutex lock; - void *opaque; -} MapCache; - -static MapCache *mapcache; - -static inline void mapcache_lock(void) -{ - qemu_mutex_lock(&mapcache->lock); -} - -static inline void mapcache_unlock(void) -{ - qemu_mutex_unlock(&mapcache->lock); -} - -static inline int test_bits(int nr, int size, const unsigned long *addr) -{ - unsigned long res = find_next_zero_bit(addr, size + nr, nr); - if (res >= nr + size) - return 1; - else - return 0; -} - -void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque) -{ - unsigned long size; - struct rlimit rlimit_as; - - mapcache = g_malloc0(sizeof (MapCache)); - - mapcache->phys_offset_to_gaddr = f; - mapcache->opaque = opaque; - qemu_mutex_init(&mapcache->lock); - - QTAILQ_INIT(&mapcache->locked_entries); - - if (geteuid() == 0) { - rlimit_as.rlim_cur = RLIM_INFINITY; - rlimit_as.rlim_max = RLIM_INFINITY; - mapcache->max_mcache_size = MCACHE_MAX_SIZE; - } else { - getrlimit(RLIMIT_AS, &rlimit_as); - rlimit_as.rlim_cur = rlimit_as.rlim_max; - - if (rlimit_as.rlim_max != RLIM_INFINITY) { - fprintf(stderr, "Warning: QEMU's maximum size of virtual" - " memory is not infinity.\n"); - } - if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) { - mapcache->max_mcache_size = rlimit_as.rlim_max - - NON_MCACHE_MEMORY_SIZE; - } else { - mapcache->max_mcache_size = MCACHE_MAX_SIZE; - } - } - - setrlimit(RLIMIT_AS, &rlimit_as); - - mapcache->nr_buckets = - (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) + - (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> - (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); - - size = mapcache->nr_buckets * sizeof (MapCacheEntry); - size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); - DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__, - mapcache->nr_buckets, size); - mapcache->entry = g_malloc0(size); -} - -static void xen_remap_bucket(MapCacheEntry *entry, - hwaddr size, - hwaddr address_index) -{ - uint8_t *vaddr_base; - xen_pfn_t *pfns; - int *err; - unsigned int i; - hwaddr nb_pfn = size >> XC_PAGE_SHIFT; - - trace_xen_remap_bucket(address_index); - - pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t)); - err = g_malloc0(nb_pfn * sizeof (int)); - - if (entry->vaddr_base != NULL) { - ram_block_notify_remove(entry->vaddr_base, entry->size); - if (munmap(entry->vaddr_base, entry->size) != 0) { - perror("unmap fails"); - exit(-1); - } - } - g_free(entry->valid_mapping); - entry->valid_mapping = NULL; - - for (i = 0; i < nb_pfn; i++) { - pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; - } - - vaddr_base = xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, - nb_pfn, pfns, err); - if (vaddr_base == NULL) { - perror("xenforeignmemory_map"); - exit(-1); - } - - entry->vaddr_base = vaddr_base; - entry->paddr_index = address_index; - entry->size = size; - entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) * - BITS_TO_LONGS(size >> XC_PAGE_SHIFT)); - - ram_block_notify_add(entry->vaddr_base, entry->size); - bitmap_zero(entry->valid_mapping, nb_pfn); - for (i = 0; i < nb_pfn; i++) { - if (!err[i]) { - bitmap_set(entry->valid_mapping, i, 1); - } - } - - g_free(pfns); - g_free(err); -} - -static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size, - uint8_t lock) -{ - MapCacheEntry *entry, *pentry = NULL; - hwaddr address_index; - hwaddr address_offset; - hwaddr cache_size = size; - hwaddr test_bit_size; - bool translated = false; - -tryagain: - address_index = phys_addr >> MCACHE_BUCKET_SHIFT; - address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); - - trace_xen_map_cache(phys_addr); - - /* test_bit_size is always a multiple of XC_PAGE_SIZE */ - if (size) { - test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1)); - - if (test_bit_size % XC_PAGE_SIZE) { - test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); - } - } else { - test_bit_size = XC_PAGE_SIZE; - } - - if (mapcache->last_entry != NULL && - mapcache->last_entry->paddr_index == address_index && - !lock && !size && - test_bits(address_offset >> XC_PAGE_SHIFT, - test_bit_size >> XC_PAGE_SHIFT, - mapcache->last_entry->valid_mapping)) { - trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); - return mapcache->last_entry->vaddr_base + address_offset; - } - - /* size is always a multiple of MCACHE_BUCKET_SIZE */ - if (size) { - cache_size = size + address_offset; - if (cache_size % MCACHE_BUCKET_SIZE) { - cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE); - } - } else { - cache_size = MCACHE_BUCKET_SIZE; - } - - entry = &mapcache->entry[address_index % mapcache->nr_buckets]; - - while (entry && entry->lock && entry->vaddr_base && - (entry->paddr_index != address_index || entry->size != cache_size || - !test_bits(address_offset >> XC_PAGE_SHIFT, - test_bit_size >> XC_PAGE_SHIFT, - entry->valid_mapping))) { - pentry = entry; - entry = entry->next; - } - if (!entry) { - entry = g_malloc0(sizeof (MapCacheEntry)); - pentry->next = entry; - xen_remap_bucket(entry, cache_size, address_index); - } else if (!entry->lock) { - if (!entry->vaddr_base || entry->paddr_index != address_index || - entry->size != cache_size || - !test_bits(address_offset >> XC_PAGE_SHIFT, - test_bit_size >> XC_PAGE_SHIFT, - entry->valid_mapping)) { - xen_remap_bucket(entry, cache_size, address_index); - } - } - - if(!test_bits(address_offset >> XC_PAGE_SHIFT, - test_bit_size >> XC_PAGE_SHIFT, - entry->valid_mapping)) { - mapcache->last_entry = NULL; - if (!translated && mapcache->phys_offset_to_gaddr) { - phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque); - translated = true; - goto tryagain; - } - trace_xen_map_cache_return(NULL); - return NULL; - } - - mapcache->last_entry = entry; - if (lock) { - MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev)); - entry->lock++; - reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset; - reventry->paddr_index = mapcache->last_entry->paddr_index; - reventry->size = entry->size; - QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); - } - - trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); - return mapcache->last_entry->vaddr_base + address_offset; -} - -uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, - uint8_t lock) -{ - uint8_t *p; - - mapcache_lock(); - p = xen_map_cache_unlocked(phys_addr, size, lock); - mapcache_unlock(); - return p; -} - -ram_addr_t xen_ram_addr_from_mapcache(void *ptr) -{ - MapCacheEntry *entry = NULL; - MapCacheRev *reventry; - hwaddr paddr_index; - hwaddr size; - ram_addr_t raddr; - int found = 0; - - mapcache_lock(); - QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { - if (reventry->vaddr_req == ptr) { - paddr_index = reventry->paddr_index; - size = reventry->size; - found = 1; - break; - } - } - if (!found) { - fprintf(stderr, "%s, could not find %p\n", __func__, ptr); - QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { - DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, - reventry->vaddr_req); - } - abort(); - return 0; - } - - entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; - while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { - entry = entry->next; - } - if (!entry) { - DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr); - raddr = 0; - } else { - raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) + - ((unsigned long) ptr - (unsigned long) entry->vaddr_base); - } - mapcache_unlock(); - return raddr; -} - -static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer) -{ - MapCacheEntry *entry = NULL, *pentry = NULL; - MapCacheRev *reventry; - hwaddr paddr_index; - hwaddr size; - int found = 0; - - QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { - if (reventry->vaddr_req == buffer) { - paddr_index = reventry->paddr_index; - size = reventry->size; - found = 1; - break; - } - } - if (!found) { - DPRINTF("%s, could not find %p\n", __func__, buffer); - QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { - DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req); - } - return; - } - QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next); - g_free(reventry); - - if (mapcache->last_entry != NULL && - mapcache->last_entry->paddr_index == paddr_index) { - mapcache->last_entry = NULL; - } - - entry = &mapcache->entry[paddr_index % mapcache->nr_buckets]; - while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { - pentry = entry; - entry = entry->next; - } - if (!entry) { - DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer); - return; - } - entry->lock--; - if (entry->lock > 0 || pentry == NULL) { - return; - } - - pentry->next = entry->next; - ram_block_notify_remove(entry->vaddr_base, entry->size); - if (munmap(entry->vaddr_base, entry->size) != 0) { - perror("unmap fails"); - exit(-1); - } - g_free(entry->valid_mapping); - g_free(entry); -} - -void xen_invalidate_map_cache_entry(uint8_t *buffer) -{ - mapcache_lock(); - xen_invalidate_map_cache_entry_unlocked(buffer); - mapcache_unlock(); -} - -void xen_invalidate_map_cache(void) -{ - unsigned long i; - MapCacheRev *reventry; - - /* Flush pending AIO before destroying the mapcache */ - bdrv_drain_all(); - - mapcache_lock(); - - QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) { - DPRINTF("There should be no locked mappings at this time, " - "but "TARGET_FMT_plx" -> %p is present\n", - reventry->paddr_index, reventry->vaddr_req); - } - - for (i = 0; i < mapcache->nr_buckets; i++) { - MapCacheEntry *entry = &mapcache->entry[i]; - - if (entry->vaddr_base == NULL) { - continue; - } - if (entry->lock > 0) { - continue; - } - - if (munmap(entry->vaddr_base, entry->size) != 0) { - perror("unmap fails"); - exit(-1); - } - - entry->paddr_index = 0; - entry->vaddr_base = NULL; - entry->size = 0; - g_free(entry->valid_mapping); - entry->valid_mapping = NULL; - } - - mapcache->last_entry = NULL; - - mapcache_unlock(); -} -- cgit v1.2.3-55-g7522