diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/block/aio.h | 4 | ||||
-rw-r--r-- | include/exec/cpu-all.h | 23 | ||||
-rw-r--r-- | include/exec/cpu-common.h | 15 | ||||
-rw-r--r-- | include/exec/memory.h | 166 | ||||
-rw-r--r-- | include/hw/i386/pc.h | 12 | ||||
-rw-r--r-- | include/qemu/main-loop.h | 4 | ||||
-rw-r--r-- | include/qemu/timer.h | 2 | ||||
-rw-r--r-- | include/qemu/typedefs.h | 1 | ||||
-rw-r--r-- | include/standard-headers/linux/input.h | 1 | ||||
-rw-r--r-- | include/standard-headers/linux/pci_regs.h | 15 |
10 files changed, 222 insertions, 21 deletions
diff --git a/include/block/aio.h b/include/block/aio.h index c7ae27c91c..ca551e346f 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -195,8 +195,8 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); * aio_notify: Force processing of pending events. * * Similar to signaling a condition variable, aio_notify forces - * aio_wait to exit, so that the next call will re-examine pending events. - * The caller of aio_notify will usually call aio_wait again very soon, + * aio_poll to exit, so that the next call will re-examine pending events. + * The caller of aio_notify will usually call aio_poll again very soon, * or go through another iteration of the GLib main loop. Hence, aio_notify * also has the side effect of recalculating the sets of file descriptors * that the main loop waits for. diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index e9004e5798..ffe43d5654 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -186,6 +186,29 @@ void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result); void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result); + +uint32_t lduw_phys_cached(MemoryRegionCache *cache, hwaddr addr); +uint32_t ldl_phys_cached(MemoryRegionCache *cache, hwaddr addr); +uint64_t ldq_phys_cached(MemoryRegionCache *cache, hwaddr addr); +void stl_phys_notdirty_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); +void stw_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); +void stl_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); +void stq_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); + +uint32_t address_space_lduw_cached(MemoryRegionCache *cache, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_ldl_cached(MemoryRegionCache *cache, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint64_t address_space_ldq_cached(MemoryRegionCache *cache, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl_notdirty_cached(MemoryRegionCache *cache, hwaddr addr, + uint32_t val, MemTxAttrs attrs, MemTxResult *result); +void address_space_stw_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stq_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result); #endif /* page related stuff */ diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index cffdc130e6..bd15853e51 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -94,21 +94,6 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr); */ void qemu_flush_coalesced_mmio_buffer(void); -uint32_t ldub_phys(AddressSpace *as, hwaddr addr); -uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr); -uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr); -uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr); -uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr); -uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr); -uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr); -void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); -void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val); -void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val); - void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr, const uint8_t *buf, int len); void cpu_flush_icache_range(hwaddr start, int len); diff --git a/include/exec/memory.h b/include/exec/memory.h index 9728a2fb1a..64560f61b4 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -1404,6 +1404,140 @@ void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val, void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result); +uint32_t ldub_phys(AddressSpace *as, hwaddr addr); +uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr); +uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr); +uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr); +uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr); +uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr); +uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr); +void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val); +void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val); +void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val); + +struct MemoryRegionCache { + hwaddr xlat; + void *ptr; + hwaddr len; + MemoryRegion *mr; + bool is_write; +}; + +/* address_space_cache_init: prepare for repeated access to a physical + * memory region + * + * @cache: #MemoryRegionCache to be filled + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @len: length of buffer + * @is_write: indicates the transfer direction + * + * Will only work with RAM, and may map a subset of the requested range by + * returning a value that is less than @len. On failure, return a negative + * errno value. + * + * Because it only works with RAM, this function can be used for + * read-modify-write operations. In this case, is_write should be %true. + * + * Note that addresses passed to the address_space_*_cached functions + * are relative to @addr. + */ +int64_t address_space_cache_init(MemoryRegionCache *cache, + AddressSpace *as, + hwaddr addr, + hwaddr len, + bool is_write); + +/** + * address_space_cache_invalidate: complete a write to a #MemoryRegionCache + * + * @cache: The #MemoryRegionCache to operate on. + * @addr: The first physical address that was written, relative to the + * address that was passed to @address_space_cache_init. + * @access_len: The number of bytes that were written starting at @addr. + */ +void address_space_cache_invalidate(MemoryRegionCache *cache, + hwaddr addr, + hwaddr access_len); + +/** + * address_space_cache_destroy: free a #MemoryRegionCache + * + * @cache: The #MemoryRegionCache whose memory should be released. + */ +void address_space_cache_destroy(MemoryRegionCache *cache); + +/* address_space_ld*_cached: load from a cached #MemoryRegion + * address_space_st*_cached: store into a cached #MemoryRegion + * + * These functions perform a load or store of the byte, word, + * longword or quad to the specified address. The address is + * a physical address in the AddressSpace, but it must lie within + * a #MemoryRegion that was mapped with address_space_cache_init. + * + * The _le suffixed functions treat the data as little endian; + * _be indicates big endian; no suffix indicates "same endianness + * as guest CPU". + * + * The "guest CPU endianness" accessors are deprecated for use outside + * target-* code; devices should be CPU-agnostic and use either the LE + * or the BE accessors. + * + * @cache: previously initialized #MemoryRegionCache to be accessed + * @addr: address within the address space + * @val: data value, for stores + * @attrs: memory transaction attributes + * @result: location to write the success/failure of the transaction; + * if NULL, this information is discarded + */ +uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result); +void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val, + MemTxAttrs attrs, MemTxResult *result); + +uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr); +uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); +uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); +uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); +uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); +uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr); +uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr); +void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); +void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); +void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); +void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); +void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); +void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); +void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); + /* address_space_translate: translate an address range into an address space * into a MemoryRegion and an address range into that section. Should be * called from an RCU critical section, to avoid that the last reference @@ -1529,6 +1663,38 @@ MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, return result; } +/** + * address_space_read_cached: read from a cached RAM region + * + * @cache: Cached region to be addressed + * @addr: address relative to the base of the RAM region + * @buf: buffer with the data transferred + * @len: length of the data transferred + */ +static inline void +address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, + void *buf, int len) +{ + assert(addr < cache->len && len <= cache->len - addr); + memcpy(buf, cache->ptr + addr, len); +} + +/** + * address_space_write_cached: write to a cached RAM region + * + * @cache: Cached region to be addressed + * @addr: address relative to the base of the RAM region + * @buf: buffer with the data transferred + * @len: length of the data transferred + */ +static inline void +address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, + void *buf, int len) +{ + assert(addr < cache->len && len <= cache->len - addr); + memcpy(cache->ptr + addr, buf, len); +} + #endif #endif diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index 4b74130559..b22e699c46 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -63,6 +63,9 @@ struct PCMachineState { AcpiNVDIMMState acpi_nvdimm_state; bool acpi_build_enabled; + bool smbus; + bool sata; + bool pit; /* RAM information (sizes, addresses, configuration): */ ram_addr_t below_4g_mem_size, above_4g_mem_size; @@ -88,6 +91,9 @@ struct PCMachineState { #define PC_MACHINE_VMPORT "vmport" #define PC_MACHINE_SMM "smm" #define PC_MACHINE_NVDIMM "nvdimm" +#define PC_MACHINE_SMBUS "smbus" +#define PC_MACHINE_SATA "sata" +#define PC_MACHINE_PIT "pit" /** * PCMachineClass: @@ -260,6 +266,7 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi, ISADevice **rtc_state, bool create_fdctrl, bool no_vmport, + bool has_pit, uint32_t hpet_irqs); void pc_init_ne2k_isa(ISABus *bus, NICInfo *nd); void pc_cmos_init(PCMachineState *pcms, @@ -372,6 +379,11 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *); #define PC_COMPAT_2_7 \ HW_COMPAT_2_7 \ {\ + .driver = "kvmclock",\ + .property = "x-mach-use-reliable-get-clock",\ + .value = "off",\ + },\ + {\ .driver = TYPE_X86_CPU,\ .property = "l3-cache",\ .value = "off",\ diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h index 470f600bbc..a9d4f23cd9 100644 --- a/include/qemu/main-loop.h +++ b/include/qemu/main-loop.h @@ -238,7 +238,7 @@ bool qemu_mutex_iothread_locked(void); * qemu_mutex_lock_iothread: Lock the main loop mutex. * * This function locks the main loop mutex. The mutex is taken by - * qemu_init_main_loop and always taken except while waiting on + * main() in vl.c and always taken except while waiting on * external events (such as with select). The mutex should be taken * by threads other than the main loop thread when calling * qemu_bh_new(), qemu_set_fd_handler() and basically all other @@ -253,7 +253,7 @@ void qemu_mutex_lock_iothread(void); * qemu_mutex_unlock_iothread: Unlock the main loop mutex. * * This function unlocks the main loop mutex. The mutex is taken by - * qemu_init_main_loop and always taken except while waiting on + * main() in vl.c and always taken except while waiting on * external events (such as with select). The mutex should be unlocked * as soon as possible by threads other than the main loop thread, * because it prevents the main loop from processing callbacks, diff --git a/include/qemu/timer.h b/include/qemu/timer.h index bdfae004e4..9abed51ae8 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -133,7 +133,7 @@ bool qemu_clock_has_timers(QEMUClockType type); * @type: the clock type * * Determines whether a clock's default timer list - * has an expired clock. + * has an expired timer. * * Returns: true if the clock's default timer list has * an expired timer diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h index 1b8c30a7a0..9a8bcbde36 100644 --- a/include/qemu/typedefs.h +++ b/include/qemu/typedefs.h @@ -45,6 +45,7 @@ typedef struct MachineState MachineState; typedef struct MemoryListener MemoryListener; typedef struct MemoryMappingList MemoryMappingList; typedef struct MemoryRegion MemoryRegion; +typedef struct MemoryRegionCache MemoryRegionCache; typedef struct MemoryRegionSection MemoryRegionSection; typedef struct MigrationIncomingState MigrationIncomingState; typedef struct MigrationParams MigrationParams; diff --git a/include/standard-headers/linux/input.h b/include/standard-headers/linux/input.h index 7361a16b50..b472b8530c 100644 --- a/include/standard-headers/linux/input.h +++ b/include/standard-headers/linux/input.h @@ -245,6 +245,7 @@ struct input_mask { #define BUS_SPI 0x1C #define BUS_RMI 0x1D #define BUS_CEC 0x1E +#define BUS_INTEL_ISHTP 0x1F /* * MT_TOOL types diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h index 404095124a..e5a2e68b22 100644 --- a/include/standard-headers/linux/pci_regs.h +++ b/include/standard-headers/linux/pci_regs.h @@ -612,6 +612,8 @@ */ #define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ #define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */ +#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */ +#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* Atomic 64-bit compare */ #define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */ #define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */ #define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ @@ -619,6 +621,7 @@ #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ #define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ #define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */ +#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */ #define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */ #define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */ #define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */ @@ -671,7 +674,8 @@ #define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */ #define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */ #define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */ -#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DPC +#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ +#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM #define PCI_EXT_CAP_DSN_SIZEOF 12 #define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40 @@ -964,4 +968,13 @@ #define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */ +/* Precision Time Measurement */ +#define PCI_PTM_CAP 0x04 /* PTM Capability */ +#define PCI_PTM_CAP_REQ 0x00000001 /* Requester capable */ +#define PCI_PTM_CAP_ROOT 0x00000004 /* Root capable */ +#define PCI_PTM_GRANULARITY_MASK 0x0000FF00 /* Clock granularity */ +#define PCI_PTM_CTRL 0x08 /* PTM Control */ +#define PCI_PTM_CTRL_ENABLE 0x00000001 /* PTM enable */ +#define PCI_PTM_CTRL_ROOT 0x00000002 /* Root select */ + #endif /* LINUX_PCI_REGS_H */ |