summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/block/aio-wait.h17
-rw-r--r--include/block/blockjob.h59
-rw-r--r--include/block/nbd.h2
-rw-r--r--include/hw/core/sysemu-cpu-ops.h8
-rw-r--r--include/hw/cxl/cxl_device.h1
-rw-r--r--include/hw/firmware/smbios.h10
-rw-r--r--include/hw/pci/pci.h48
-rw-r--r--include/hw/virtio/vhost-user-blk.h1
-rw-r--r--include/hw/virtio/vhost-user-gpio.h35
-rw-r--r--include/hw/virtio/vhost.h18
-rw-r--r--include/hw/virtio/virtio-blk-common.h20
-rw-r--r--include/hw/virtio/virtio.h28
-rw-r--r--include/monitor/hmp.h5
-rw-r--r--include/qemu/coroutine.h4
-rw-r--r--include/qemu/job.h306
-rw-r--r--include/qemu/typedefs.h1
-rw-r--r--include/sysemu/dump.h15
17 files changed, 433 insertions, 145 deletions
diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h
index 54840f8622..dd9a7f6461 100644
--- a/include/block/aio-wait.h
+++ b/include/block/aio-wait.h
@@ -59,10 +59,13 @@ typedef struct {
extern AioWait global_aio_wait;
/**
- * AIO_WAIT_WHILE:
+ * AIO_WAIT_WHILE_INTERNAL:
* @ctx: the aio context, or NULL if multiple aio contexts (for which the
* caller does not hold a lock) are involved in the polling condition.
* @cond: wait while this conditional expression is true
+ * @unlock: whether to unlock and then lock again @ctx. This apples
+ * only when waiting for another AioContext from the main loop.
+ * Otherwise it's ignored.
*
* Wait while a condition is true. Use this to implement synchronous
* operations that require event loop activity.
@@ -75,7 +78,7 @@ extern AioWait global_aio_wait;
* wait on conditions between two IOThreads since that could lead to deadlock,
* go via the main loop instead.
*/
-#define AIO_WAIT_WHILE(ctx, cond) ({ \
+#define AIO_WAIT_WHILE_INTERNAL(ctx, cond, unlock) ({ \
bool waited_ = false; \
AioWait *wait_ = &global_aio_wait; \
AioContext *ctx_ = (ctx); \
@@ -92,11 +95,11 @@ extern AioWait global_aio_wait;
assert(qemu_get_current_aio_context() == \
qemu_get_aio_context()); \
while ((cond)) { \
- if (ctx_) { \
+ if (unlock && ctx_) { \
aio_context_release(ctx_); \
} \
aio_poll(qemu_get_aio_context(), true); \
- if (ctx_) { \
+ if (unlock && ctx_) { \
aio_context_acquire(ctx_); \
} \
waited_ = true; \
@@ -105,6 +108,12 @@ extern AioWait global_aio_wait;
qatomic_dec(&wait_->num_waiters); \
waited_; })
+#define AIO_WAIT_WHILE(ctx, cond) \
+ AIO_WAIT_WHILE_INTERNAL(ctx, cond, true)
+
+#define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \
+ AIO_WAIT_WHILE_INTERNAL(ctx, cond, false)
+
/**
* aio_wait_kick:
* Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
index 6525e16fd5..03032b2eca 100644
--- a/include/block/blockjob.h
+++ b/include/block/blockjob.h
@@ -40,21 +40,38 @@ typedef struct BlockJobDriver BlockJobDriver;
* Long-running operation on a BlockDriverState.
*/
typedef struct BlockJob {
- /** Data belonging to the generic Job infrastructure */
+ /**
+ * Data belonging to the generic Job infrastructure.
+ * Protected by job mutex.
+ */
Job job;
- /** Status that is published by the query-block-jobs QMP API */
+ /**
+ * Status that is published by the query-block-jobs QMP API.
+ * Protected by job mutex.
+ */
BlockDeviceIoStatus iostatus;
- /** Speed that was set with @block_job_set_speed. */
+ /**
+ * Speed that was set with @block_job_set_speed.
+ * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
+ */
int64_t speed;
- /** Rate limiting data structure for implementing @speed. */
+ /**
+ * Rate limiting data structure for implementing @speed.
+ * RateLimit API is thread-safe.
+ */
RateLimit limit;
- /** Block other operations when block job is running */
+ /**
+ * Block other operations when block job is running.
+ * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
+ */
Error *blocker;
+ /** All notifiers are set once in block_job_create() and never modified. */
+
/** Called when a cancelled job is finalised. */
Notifier finalize_cancelled_notifier;
@@ -70,7 +87,10 @@ typedef struct BlockJob {
/** Called when the job coroutine yields or terminates */
Notifier idle_notifier;
- /** BlockDriverStates that are involved in this block job */
+ /**
+ * BlockDriverStates that are involved in this block job.
+ * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE).
+ */
GSList *nodes;
} BlockJob;
@@ -82,15 +102,16 @@ typedef struct BlockJob {
*/
/**
- * block_job_next:
+ * block_job_next_locked:
* @job: A block job, or %NULL.
*
* Get the next element from the list of block jobs after @job, or the
* first one if @job is %NULL.
*
* Returns the requested job, or %NULL if there are no more jobs left.
+ * Called with job lock held.
*/
-BlockJob *block_job_next(BlockJob *job);
+BlockJob *block_job_next_locked(BlockJob *job);
/**
* block_job_get:
@@ -99,9 +120,13 @@ BlockJob *block_job_next(BlockJob *job);
* Get the block job identified by @id (which must not be %NULL).
*
* Returns the requested job, or %NULL if it doesn't exist.
+ * Called with job lock *not* held.
*/
BlockJob *block_job_get(const char *id);
+/* Same as block_job_get(), but called with job lock held. */
+BlockJob *block_job_get_locked(const char *id);
+
/**
* block_job_add_bdrv:
* @job: A block job
@@ -135,32 +160,38 @@ void block_job_remove_all_bdrv(BlockJob *job);
bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs);
/**
- * block_job_set_speed:
+ * block_job_set_speed_locked:
* @job: The job to set the speed for.
* @speed: The new value
* @errp: Error object.
*
* Set a rate-limiting parameter for the job; the actual meaning may
* vary depending on the job type.
+ *
+ * Called with job lock held, but might release it temporarily.
*/
-bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp);
+bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp);
/**
- * block_job_query:
+ * block_job_query_locked:
* @job: The job to get information about.
*
* Return information about a job.
+ *
+ * Called with job lock held.
*/
-BlockJobInfo *block_job_query(BlockJob *job, Error **errp);
+BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp);
/**
- * block_job_iostatus_reset:
+ * block_job_iostatus_reset_locked:
* @job: The job whose I/O status should be reset.
*
* Reset I/O status on @job and on BlockDriverState objects it uses,
* other than job->blk.
+ *
+ * Called with job lock held.
*/
-void block_job_iostatus_reset(BlockJob *job);
+void block_job_iostatus_reset_locked(BlockJob *job);
/*
* block_job_get_aio_context:
diff --git a/include/block/nbd.h b/include/block/nbd.h
index c74b7a9d2e..4ede3b2bd0 100644
--- a/include/block/nbd.h
+++ b/include/block/nbd.h
@@ -424,6 +424,6 @@ QIOChannel *coroutine_fn
nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info,
bool blocking, Error **errp);
-void coroutine_fn nbd_co_establish_connection_cancel(NBDClientConnection *conn);
+void nbd_co_establish_connection_cancel(NBDClientConnection *conn);
#endif
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
index a9ba39e5f2..ee169b872c 100644
--- a/include/hw/core/sysemu-cpu-ops.h
+++ b/include/hw/core/sysemu-cpu-ops.h
@@ -53,25 +53,25 @@ typedef struct SysemuCPUOps {
* 32-bit VM coredump.
*/
int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque);
+ int cpuid, DumpState *s);
/**
* @write_elf64_note: Callback for writing a CPU-specific ELF note to a
* 64-bit VM coredump.
*/
int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
- int cpuid, void *opaque);
+ int cpuid, DumpState *s);
/**
* @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
* note to a 32-bit VM coredump.
*/
int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque);
+ DumpState *s);
/**
* @write_elf64_qemunote: Callback for writing a CPU- and QEMU-specific ELF
* note to a 64-bit VM coredump.
*/
int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
- void *opaque);
+ DumpState *s);
/**
* @virtio_is_big_endian: Callback to return %true if a CPU which supports
* runtime configurable endianness is currently big-endian.
diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h
index 1e141b6621..e4d221cdb3 100644
--- a/include/hw/cxl/cxl_device.h
+++ b/include/hw/cxl/cxl_device.h
@@ -237,6 +237,7 @@ struct CXLType3Dev {
/* Properties */
HostMemoryBackend *hostmem;
HostMemoryBackend *lsa;
+ uint64_t sn;
/* State */
AddressSpace hostmem_as;
diff --git a/include/hw/firmware/smbios.h b/include/hw/firmware/smbios.h
index 4b7ad77a44..e7d386f7c8 100644
--- a/include/hw/firmware/smbios.h
+++ b/include/hw/firmware/smbios.h
@@ -189,6 +189,16 @@ struct smbios_type_4 {
uint16_t processor_family2;
} QEMU_PACKED;
+/* SMBIOS type 8 - Port Connector Information */
+struct smbios_type_8 {
+ struct smbios_structure_header header;
+ uint8_t internal_reference_str;
+ uint8_t internal_connector_type;
+ uint8_t external_reference_str;
+ uint8_t external_connector_type;
+ uint8_t port_type;
+} QEMU_PACKED;
+
/* SMBIOS type 11 - OEM strings */
struct smbios_type_11 {
struct smbios_structure_header header;
diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h
index b54b6ef88f..97937cc922 100644
--- a/include/hw/pci/pci.h
+++ b/include/hw/pci/pci.h
@@ -688,60 +688,44 @@ static inline void
pci_set_byte_by_mask(uint8_t *config, uint8_t mask, uint8_t reg)
{
uint8_t val = pci_get_byte(config);
- uint8_t rval = reg << ctz32(mask);
- pci_set_byte(config, (~mask & val) | (mask & rval));
-}
+ uint8_t rval;
-static inline uint8_t
-pci_get_byte_by_mask(uint8_t *config, uint8_t mask)
-{
- uint8_t val = pci_get_byte(config);
- return (val & mask) >> ctz32(mask);
+ assert(mask);
+ rval = reg << ctz32(mask);
+ pci_set_byte(config, (~mask & val) | (mask & rval));
}
static inline void
pci_set_word_by_mask(uint8_t *config, uint16_t mask, uint16_t reg)
{
uint16_t val = pci_get_word(config);
- uint16_t rval = reg << ctz32(mask);
- pci_set_word(config, (~mask & val) | (mask & rval));
-}
+ uint16_t rval;
-static inline uint16_t
-pci_get_word_by_mask(uint8_t *config, uint16_t mask)
-{
- uint16_t val = pci_get_word(config);
- return (val & mask) >> ctz32(mask);
+ assert(mask);
+ rval = reg << ctz32(mask);
+ pci_set_word(config, (~mask & val) | (mask & rval));
}
static inline void
pci_set_long_by_mask(uint8_t *config, uint32_t mask, uint32_t reg)
{
uint32_t val = pci_get_long(config);
- uint32_t rval = reg << ctz32(mask);
- pci_set_long(config, (~mask & val) | (mask & rval));
-}
+ uint32_t rval;
-static inline uint32_t
-pci_get_long_by_mask(uint8_t *config, uint32_t mask)
-{
- uint32_t val = pci_get_long(config);
- return (val & mask) >> ctz32(mask);
+ assert(mask);
+ rval = reg << ctz32(mask);
+ pci_set_long(config, (~mask & val) | (mask & rval));
}
static inline void
pci_set_quad_by_mask(uint8_t *config, uint64_t mask, uint64_t reg)
{
uint64_t val = pci_get_quad(config);
- uint64_t rval = reg << ctz32(mask);
- pci_set_quad(config, (~mask & val) | (mask & rval));
-}
+ uint64_t rval;
-static inline uint64_t
-pci_get_quad_by_mask(uint8_t *config, uint64_t mask)
-{
- uint64_t val = pci_get_quad(config);
- return (val & mask) >> ctz32(mask);
+ assert(mask);
+ rval = reg << ctz32(mask);
+ pci_set_quad(config, (~mask & val) | (mask & rval));
}
PCIDevice *pci_new_multifunction(int devfn, bool multifunction,
diff --git a/include/hw/virtio/vhost-user-blk.h b/include/hw/virtio/vhost-user-blk.h
index 7c91f15040..ea085ee1ed 100644
--- a/include/hw/virtio/vhost-user-blk.h
+++ b/include/hw/virtio/vhost-user-blk.h
@@ -34,7 +34,6 @@ struct VHostUserBlk {
struct virtio_blk_config blkcfg;
uint16_t num_queues;
uint32_t queue_size;
- uint32_t config_wce;
struct vhost_dev dev;
struct vhost_inflight *inflight;
VhostUserState vhost_user;
diff --git a/include/hw/virtio/vhost-user-gpio.h b/include/hw/virtio/vhost-user-gpio.h
new file mode 100644
index 0000000000..4fe9aeecc0
--- /dev/null
+++ b/include/hw/virtio/vhost-user-gpio.h
@@ -0,0 +1,35 @@
+/*
+ * Vhost-user GPIO virtio device
+ *
+ * Copyright (c) 2021 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef _QEMU_VHOST_USER_GPIO_H
+#define _QEMU_VHOST_USER_GPIO_H
+
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-user.h"
+#include "standard-headers/linux/virtio_gpio.h"
+#include "chardev/char-fe.h"
+
+#define TYPE_VHOST_USER_GPIO "vhost-user-gpio-device"
+OBJECT_DECLARE_SIMPLE_TYPE(VHostUserGPIO, VHOST_USER_GPIO);
+
+struct VHostUserGPIO {
+ /*< private >*/
+ VirtIODevice parent_obj;
+ CharBackend chardev;
+ struct virtio_gpio_config config;
+ struct vhost_virtqueue *vhost_vq;
+ struct vhost_dev vhost_dev;
+ VhostUserState vhost_user;
+ VirtQueue *command_vq;
+ VirtQueue *interrupt_vq;
+ bool connected;
+ /*< public >*/
+};
+
+#endif /* _QEMU_VHOST_USER_GPIO_H */
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index a346f23d13..d7eb557885 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -5,6 +5,9 @@
#include "hw/virtio/virtio.h"
#include "exec/memory.h"
+#define VHOST_F_DEVICE_IOTLB 63
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+
/* Generic structures common for any vhost based device. */
struct vhost_inflight {
@@ -86,11 +89,15 @@ struct vhost_dev {
/* if non-zero, minimum required value for max_queues */
int num_queues;
uint64_t features;
+ /** @acked_features: final set of negotiated features */
uint64_t acked_features;
+ /** @backend_features: backend specific feature bits */
uint64_t backend_features;
+ /** @protocol_features: final negotiated protocol features */
uint64_t protocol_features;
uint64_t max_queues;
uint64_t backend_cap;
+ /* @started: is the vhost device started? */
bool started;
bool log_enabled;
uint64_t log_size;
@@ -163,6 +170,17 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
/**
+ * vhost_dev_is_started() - report status of vhost device
+ * @hdev: common vhost_dev structure
+ *
+ * Return the started status of the vhost device
+ */
+static inline bool vhost_dev_is_started(struct vhost_dev *hdev)
+{
+ return hdev->started;
+}
+
+/**
* vhost_dev_start() - start the vhost device
* @hdev: common vhost_dev structure
* @vdev: the VirtIODevice structure
diff --git a/include/hw/virtio/virtio-blk-common.h b/include/hw/virtio/virtio-blk-common.h
new file mode 100644
index 0000000000..31daada3e3
--- /dev/null
+++ b/include/hw/virtio/virtio-blk-common.h
@@ -0,0 +1,20 @@
+/*
+ * Virtio Block Device common helpers
+ *
+ * Copyright IBM, Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef VIRTIO_BLK_COMMON_H
+#define VIRTIO_BLK_COMMON_H
+
+#include "hw/virtio/virtio.h"
+
+extern const VirtIOConfigSizeParams virtio_blk_cfg_size_params;
+
+#endif
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index db1c0ddf6b..f41b4a7e64 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -24,7 +24,12 @@
#include "qom/object.h"
#include "hw/virtio/vhost.h"
-/* A guest should never accept this. It implies negotiation is broken. */
+/*
+ * A guest should never accept this. It implies negotiation is broken
+ * between the driver frontend and the device. This bit is re-used for
+ * vhost-user to advertise VHOST_USER_F_PROTOCOL_FEATURES between QEMU
+ * and a vhost-user backend.
+ */
#define VIRTIO_F_BAD_FEATURE 30
#define VIRTIO_LEGACY_FEATURES ((0x1ULL << VIRTIO_F_BAD_FEATURE) | \
@@ -44,8 +49,14 @@ typedef struct VirtIOFeature {
size_t end;
} VirtIOFeature;
-size_t virtio_feature_get_config_size(const VirtIOFeature *features,
- uint64_t host_features);
+typedef struct VirtIOConfigSizeParams {
+ size_t min_size;
+ size_t max_size;
+ const VirtIOFeature *feature_sizes;
+} VirtIOConfigSizeParams;
+
+size_t virtio_get_config_size(const VirtIOConfigSizeParams *params,
+ uint64_t host_features);
typedef struct VirtQueue VirtQueue;
@@ -71,6 +82,11 @@ typedef struct VirtQueueElement
#define TYPE_VIRTIO_DEVICE "virtio-device"
OBJECT_DECLARE_TYPE(VirtIODevice, VirtioDeviceClass, VIRTIO_DEVICE)
+typedef struct {
+ int virtio_bit;
+ const char *feature_desc;
+} qmp_virtio_feature_map_t;
+
enum virtio_device_endian {
VIRTIO_DEVICE_ENDIAN_UNKNOWN,
VIRTIO_DEVICE_ENDIAN_LITTLE,
@@ -95,6 +111,7 @@ struct VirtIODevice
VirtQueue *vq;
MemoryListener listener;
uint16_t device_id;
+ /* @vm_running: current VM running state via virtio_vmstate_change() */
bool vm_running;
bool broken; /* device in invalid state, needs reset */
bool use_disabled_flag; /* allow use of 'disable' flag when needed */
@@ -110,6 +127,7 @@ struct VirtIODevice
bool use_guest_notifier_mask;
AddressSpace *dma_as;
QLIST_HEAD(, VirtQueue) *vector_queues;
+ QTAILQ_ENTRY(VirtIODevice) next;
};
struct VirtioDeviceClass {
@@ -371,6 +389,10 @@ static inline bool virtio_device_started(VirtIODevice *vdev, uint8_t status)
return vdev->started;
}
+ if (!vdev->vm_running) {
+ return false;
+ }
+
return status & VIRTIO_CONFIG_S_DRIVER_OK;
}
diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h
index a618eb1e4e..a9cf064ee8 100644
--- a/include/monitor/hmp.h
+++ b/include/monitor/hmp.h
@@ -95,6 +95,11 @@ void hmp_qom_list(Monitor *mon, const QDict *qdict);
void hmp_qom_get(Monitor *mon, const QDict *qdict);
void hmp_qom_set(Monitor *mon, const QDict *qdict);
void hmp_info_qom_tree(Monitor *mon, const QDict *dict);
+void hmp_virtio_query(Monitor *mon, const QDict *qdict);
+void hmp_virtio_status(Monitor *mon, const QDict *qdict);
+void hmp_virtio_queue_status(Monitor *mon, const QDict *qdict);
+void hmp_vhost_queue_status(Monitor *mon, const QDict *qdict);
+void hmp_virtio_queue_element(Monitor *mon, const QDict *qdict);
void object_add_completion(ReadLineState *rs, int nb_args, const char *str);
void object_del_completion(ReadLineState *rs, int nb_args, const char *str);
void device_add_completion(ReadLineState *rs, int nb_args, const char *str);
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
index 08c5bb3c76..aae33cce17 100644
--- a/include/qemu/coroutine.h
+++ b/include/qemu/coroutine.h
@@ -92,12 +92,12 @@ void coroutine_fn qemu_coroutine_yield(void);
/**
* Get the AioContext of the given coroutine
*/
-AioContext *coroutine_fn qemu_coroutine_get_aio_context(Coroutine *co);
+AioContext *qemu_coroutine_get_aio_context(Coroutine *co);
/**
* Get the currently executing coroutine
*/
-Coroutine *coroutine_fn qemu_coroutine_self(void);
+Coroutine *qemu_coroutine_self(void);
/**
* Return whether or not currently inside a coroutine
diff --git a/include/qemu/job.h b/include/qemu/job.h
index c105b31076..e502787dd8 100644
--- a/include/qemu/job.h
+++ b/include/qemu/job.h
@@ -40,27 +40,62 @@ typedef struct JobTxn JobTxn;
* Long-running operation.
*/
typedef struct Job {
+
+ /* Fields set at initialization (job_create), and never modified */
+
/** The ID of the job. May be NULL for internal jobs. */
char *id;
- /** The type of this job. */
+ /**
+ * The type of this job.
+ * All callbacks are called with job_mutex *not* held.
+ */
const JobDriver *driver;
- /** Reference count of the block job */
- int refcnt;
-
- /** Current state; See @JobStatus for details. */
- JobStatus status;
-
- /** AioContext to run the job coroutine in */
- AioContext *aio_context;
-
/**
* The coroutine that executes the job. If not NULL, it is reentered when
* busy is false and the job is cancelled.
+ * Initialized in job_start()
*/
Coroutine *co;
+ /** True if this job should automatically finalize itself */
+ bool auto_finalize;
+
+ /** True if this job should automatically dismiss itself */
+ bool auto_dismiss;
+
+ /**
+ * The completion function that will be called when the job completes.
+ * Called with AioContext lock held, since many callback implementations
+ * use bdrv_* functions that require to hold the lock.
+ */
+ BlockCompletionFunc *cb;
+
+ /** The opaque value that is passed to the completion function. */
+ void *opaque;
+
+ /* ProgressMeter API is thread-safe */
+ ProgressMeter progress;
+
+ /**
+ * AioContext to run the job coroutine in.
+ * The job Aiocontext can be read when holding *either*
+ * the BQL (so we are in the main loop) or the job_mutex.
+ * It can only be written when we hold *both* BQL
+ * and the job_mutex.
+ */
+ AioContext *aio_context;
+
+
+ /** Protected by job_mutex */
+
+ /** Reference count of the block job */
+ int refcnt;
+
+ /** Current state; See @JobStatus for details. */
+ JobStatus status;
+
/**
* Timer that is used by @job_sleep_ns. Accessed under job_mutex (in
* job.c).
@@ -76,7 +111,7 @@ typedef struct Job {
/**
* Set to false by the job while the coroutine has yielded and may be
* re-entered by job_enter(). There may still be I/O or event loop activity
- * pending. Accessed under block_job_mutex (in blockjob.c).
+ * pending. Accessed under job_mutex.
*
* When the job is deferred to the main loop, busy is true as long as the
* bottom half is still pending.
@@ -112,14 +147,6 @@ typedef struct Job {
/** Set to true when the job has deferred work to the main loop. */
bool deferred_to_main_loop;
- /** True if this job should automatically finalize itself */
- bool auto_finalize;
-
- /** True if this job should automatically dismiss itself */
- bool auto_dismiss;
-
- ProgressMeter progress;
-
/**
* Return code from @run and/or @prepare callback(s).
* Not final until the job has reached the CONCLUDED status.
@@ -134,12 +161,6 @@ typedef struct Job {
*/
Error *err;
- /** The completion function that will be called when the job completes. */
- BlockCompletionFunc *cb;
-
- /** The opaque value that is passed to the completion function. */
- void *opaque;
-
/** Notifiers called when a cancelled job is finalised */
NotifierList on_finalize_cancelled;
@@ -167,6 +188,7 @@ typedef struct Job {
/**
* Callbacks and other information about a Job driver.
+ * All callbacks are invoked with job_mutex *not* held.
*/
struct JobDriver {
@@ -242,6 +264,9 @@ struct JobDriver {
*
* This callback will not be invoked if the job has already failed.
* If it fails, abort and then clean will be called.
+ *
+ * Called with AioContext lock held, since many callbacs implementations
+ * use bdrv_* functions that require to hold the lock.
*/
int (*prepare)(Job *job);
@@ -252,6 +277,9 @@ struct JobDriver {
*
* All jobs will complete with a call to either .commit() or .abort() but
* never both.
+ *
+ * Called with AioContext lock held, since many callback implementations
+ * use bdrv_* functions that require to hold the lock.
*/
void (*commit)(Job *job);
@@ -262,6 +290,9 @@ struct JobDriver {
*
* All jobs will complete with a call to either .commit() or .abort() but
* never both.
+ *
+ * Called with AioContext lock held, since many callback implementations
+ * use bdrv_* functions that require to hold the lock.
*/
void (*abort)(Job *job);
@@ -270,6 +301,9 @@ struct JobDriver {
* .commit() or .abort(). Regardless of which callback is invoked after
* completion, .clean() will always be called, even if the job does not
* belong to a transaction group.
+ *
+ * Called with AioContext lock held, since many callbacs implementations
+ * use bdrv_* functions that require to hold the lock.
*/
void (*clean)(Job *job);
@@ -284,11 +318,18 @@ struct JobDriver {
* READY).
* (If the callback is NULL, the job is assumed to terminate
* without I/O.)
+ *
+ * Called with AioContext lock held, since many callback implementations
+ * use bdrv_* functions that require to hold the lock.
*/
bool (*cancel)(Job *job, bool force);
- /** Called when the job is freed */
+ /**
+ * Called when the job is freed.
+ * Called with AioContext lock held, since many callback implementations
+ * use bdrv_* functions that require to hold the lock.
+ */
void (*free)(Job *job);
};
@@ -303,6 +344,30 @@ typedef enum JobCreateFlags {
JOB_MANUAL_DISMISS = 0x04,
} JobCreateFlags;
+extern QemuMutex job_mutex;
+
+#define JOB_LOCK_GUARD() QEMU_LOCK_GUARD(&job_mutex)
+
+#define WITH_JOB_LOCK_GUARD() WITH_QEMU_LOCK_GUARD(&job_mutex)
+
+/**
+ * job_lock:
+ *
+ * Take the mutex protecting the list of jobs and their status.
+ * Most functions called by the monitor need to call job_lock
+ * and job_unlock manually. On the other hand, function called
+ * by the block jobs themselves and by the block layer will take the
+ * lock for you.
+ */
+void job_lock(void);
+
+/**
+ * job_unlock:
+ *
+ * Release the mutex protecting the list of jobs and their status.
+ */
+void job_unlock(void);
+
/**
* Allocate and return a new job transaction. Jobs can be added to the
* transaction using job_txn_add_job().
@@ -319,23 +384,20 @@ JobTxn *job_txn_new(void);
/**
* Release a reference that was previously acquired with job_txn_add_job or
* job_txn_new. If it's the last reference to the object, it will be freed.
+ *
+ * Called with job lock *not* held.
*/
void job_txn_unref(JobTxn *txn);
-/**
- * @txn: The transaction (may be NULL)
- * @job: Job to add to the transaction
- *
- * Add @job to the transaction. The @job must not already be in a transaction.
- * The caller must call either job_txn_unref() or job_completed() to release
- * the reference that is automatically grabbed here.
- *
- * If @txn is NULL, the function does nothing.
+/*
+ * Same as job_txn_unref(), but called with job lock held.
+ * Might release the lock temporarily.
*/
-void job_txn_add_job(JobTxn *txn, Job *job);
+void job_txn_unref_locked(JobTxn *txn);
/**
* Create a new long-running job and return it.
+ * Called with job_mutex *not* held.
*
* @job_id: The id of the newly-created job, or %NULL for internal jobs
* @driver: The class object for the newly-created job.
@@ -353,20 +415,27 @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn,
/**
* Add a reference to Job refcnt, it will be decreased with job_unref, and then
* be freed if it comes to be the last reference.
+ *
+ * Called with job lock held.
*/
-void job_ref(Job *job);
+void job_ref_locked(Job *job);
/**
- * Release a reference that was previously acquired with job_ref() or
+ * Release a reference that was previously acquired with job_ref_locked() or
* job_create(). If it's the last reference to the object, it will be freed.
+ *
+ * Takes AioContext lock internally to invoke a job->driver callback.
+ * Called with job lock held.
*/
-void job_unref(Job *job);
+void job_unref_locked(Job *job);
/**
* @job: The job that has made progress
* @done: How much progress the job made since the last call
*
* Updates the progress counter of the job.
+ *
+ * May be called with mutex held or not held.
*/
void job_progress_update(Job *job, uint64_t done);
@@ -377,6 +446,8 @@ void job_progress_update(Job *job, uint64_t done);
*
* Sets the expected end value of the progress counter of a job so that a
* completion percentage can be calculated when the progress is updated.
+ *
+ * May be called with mutex held or not held.
*/
void job_progress_set_remaining(Job *job, uint64_t remaining);
@@ -392,27 +463,27 @@ void job_progress_set_remaining(Job *job, uint64_t remaining);
* length before, and job_progress_update() afterwards.
* (So the operation acts as a parenthesis in regards to the main job
* operation running in background.)
+ *
+ * May be called with mutex held or not held.
*/
void job_progress_increase_remaining(Job *job, uint64_t delta);
-/** To be called when a cancelled job is finalised. */
-void job_event_cancelled(Job *job);
-
-/** To be called when a successfully completed job is finalised. */
-void job_event_completed(Job *job);
-
/**
* Conditionally enter the job coroutine if the job is ready to run, not
* already busy and fn() returns true. fn() is called while under the job_lock
* critical section.
+ *
+ * Called with job lock held, but might release it temporarily.
*/
-void job_enter_cond(Job *job, bool(*fn)(Job *job));
+void job_enter_cond_locked(Job *job, bool(*fn)(Job *job));
/**
* @job: A job that has not yet been started.
*
* Begins execution of a job.
* Takes ownership of one reference to the job object.
+ *
+ * Called with job_mutex *not* held.
*/
void job_start(Job *job);
@@ -420,6 +491,7 @@ void job_start(Job *job);
* @job: The job to enter.
*
* Continue the specified job by entering the coroutine.
+ * Called with job_mutex *not* held.
*/
void job_enter(Job *job);
@@ -428,6 +500,8 @@ void job_enter(Job *job);
*
* Pause now if job_pause() has been called. Jobs that perform lots of I/O
* must call this between requests so that the job can be paused.
+ *
+ * Called with job_mutex *not* held.
*/
void coroutine_fn job_pause_point(Job *job);
@@ -435,8 +509,9 @@ void coroutine_fn job_pause_point(Job *job);
* @job: The job that calls the function.
*
* Yield the job coroutine.
+ * Called with job_mutex *not* held.
*/
-void job_yield(Job *job);
+void coroutine_fn job_yield(Job *job);
/**
* @job: The job that calls the function.
@@ -445,10 +520,11 @@ void job_yield(Job *job);
* Put the job to sleep (assuming that it wasn't canceled) for @ns
* %QEMU_CLOCK_REALTIME nanoseconds. Canceling the job will immediately
* interrupt the wait.
+ *
+ * Called with job_mutex *not* held.
*/
void coroutine_fn job_sleep_ns(Job *job, int64_t ns);
-
/** Returns the JobType of a given Job. */
JobType job_type(const Job *job);
@@ -458,88 +534,138 @@ const char *job_type_str(const Job *job);
/** Returns true if the job should not be visible to the management layer. */
bool job_is_internal(Job *job);
-/** Returns whether the job is being cancelled. */
+/**
+ * Returns whether the job is being cancelled.
+ * Called with job_mutex *not* held.
+ */
bool job_is_cancelled(Job *job);
+/* Same as job_is_cancelled(), but called with job lock held. */
+bool job_is_cancelled_locked(Job *job);
+
/**
* Returns whether the job is scheduled for cancellation (at an
* indefinite point).
+ * Called with job_mutex *not* held.
*/
bool job_cancel_requested(Job *job);
-/** Returns whether the job is in a completed state. */
-bool job_is_completed(Job *job);
+/**
+ * Returns whether the job is in a completed state.
+ * Called with job lock held.
+ */
+bool job_is_completed_locked(Job *job);
-/** Returns whether the job is ready to be completed. */
+/**
+ * Returns whether the job is ready to be completed.
+ * Called with job_mutex *not* held.
+ */
bool job_is_ready(Job *job);
+/* Same as job_is_ready(), but called with job lock held. */
+bool job_is_ready_locked(Job *job);
+
/**
* Request @job to pause at the next pause point. Must be paired with
* job_resume(). If the job is supposed to be resumed by user action, call
- * job_user_pause() instead.
+ * job_user_pause_locked() instead.
+ *
+ * Called with job lock *not* held.
*/
void job_pause(Job *job);
-/** Resumes a @job paused with job_pause. */
+/* Same as job_pause(), but called with job lock held. */
+void job_pause_locked(Job *job);
+
+/** Resumes a @job paused with job_pause. Called with job lock *not* held. */
void job_resume(Job *job);
+/*
+ * Same as job_resume(), but called with job lock held.
+ * Might release the lock temporarily.
+ */
+void job_resume_locked(Job *job);
+
/**
* Asynchronously pause the specified @job.
* Do not allow a resume until a matching call to job_user_resume.
+ * Called with job lock held.
*/
-void job_user_pause(Job *job, Error **errp);
+void job_user_pause_locked(Job *job, Error **errp);
-/** Returns true if the job is user-paused. */
-bool job_user_paused(Job *job);
+/**
+ * Returns true if the job is user-paused.
+ * Called with job lock held.
+ */
+bool job_user_paused_locked(Job *job);
/**
* Resume the specified @job.
- * Must be paired with a preceding job_user_pause.
+ * Must be paired with a preceding job_user_pause_locked.
+ * Called with job lock held, but might release it temporarily.
*/
-void job_user_resume(Job *job, Error **errp);
+void job_user_resume_locked(Job *job, Error **errp);
/**
* Get the next element from the list of block jobs after @job, or the
* first one if @job is %NULL.
*
* Returns the requested job, or %NULL if there are no more jobs left.
+ * Called with job lock *not* held.
*/
Job *job_next(Job *job);
+/* Same as job_next(), but called with job lock held. */
+Job *job_next_locked(Job *job);
+
/**
* Get the job identified by @id (which must not be %NULL).
*
* Returns the requested job, or %NULL if it doesn't exist.
+ * Called with job lock held.
*/
-Job *job_get(const char *id);
+Job *job_get_locked(const char *id);
/**
* Check whether the verb @verb can be applied to @job in its current state.
* Returns 0 if the verb can be applied; otherwise errp is set and -EPERM
* returned.
+ *
+ * Called with job lock held.
*/
-int job_apply_verb(Job *job, JobVerb verb, Error **errp);
+int job_apply_verb_locked(Job *job, JobVerb verb, Error **errp);
-/** The @job could not be started, free it. */
+/**
+ * The @job could not be started, free it.
+ * Called with job_mutex *not* held.
+ */
void job_early_fail(Job *job);
-/** Moves the @job from RUNNING to READY */
+/**
+ * Moves the @job from RUNNING to READY.
+ * Called with job_mutex *not* held.
+ */
void job_transition_to_ready(Job *job);
-/** Asynchronously complete the specified @job. */
-void job_complete(Job *job, Error **errp);
+/**
+ * Asynchronously complete the specified @job.
+ * Called with job lock held, but might release it temporarily.
+ */
+void job_complete_locked(Job *job, Error **errp);
/**
* Asynchronously cancel the specified @job. If @force is true, the job should
* be cancelled immediately without waiting for a consistent state.
+ * Called with job lock held.
*/
-void job_cancel(Job *job, bool force);
+void job_cancel_locked(Job *job, bool force);
/**
- * Cancels the specified job like job_cancel(), but may refuse to do so if the
- * operation isn't meaningful in the current state of the job.
+ * Cancels the specified job like job_cancel_locked(), but may refuse
+ * to do so if the operation isn't meaningful in the current state of the job.
+ * Called with job lock held.
*/
-void job_user_cancel(Job *job, bool force, Error **errp);
+void job_user_cancel_locked(Job *job, bool force, Error **errp);
/**
* Synchronously cancel the @job. The completion callback is called
@@ -550,16 +676,23 @@ void job_user_cancel(Job *job, bool force, Error **errp);
* Returns the return value from the job if the job actually completed
* during the call, or -ECANCELED if it was canceled.
*
- * Callers must hold the AioContext lock of job->aio_context.
+ * Called with job_lock *not* held.
*/
int job_cancel_sync(Job *job, bool force);
-/** Synchronously force-cancels all jobs using job_cancel_sync(). */
+/* Same as job_cancel_sync, but called with job lock held. */
+int job_cancel_sync_locked(Job *job, bool force);
+
+/**
+ * Synchronously force-cancels all jobs using job_cancel_sync_locked().
+ *
+ * Called with job_lock *not* held.
+ */
void job_cancel_sync_all(void);
/**
* @job: The job to be completed.
- * @errp: Error object which may be set by job_complete(); this is not
+ * @errp: Error object which may be set by job_complete_locked(); this is not
* necessarily set on every error, the job return value has to be
* checked as well.
*
@@ -568,10 +701,9 @@ void job_cancel_sync_all(void);
* function).
*
* Returns the return value from the job.
- *
- * Callers must hold the AioContext lock of job->aio_context.
+ * Called with job_lock held.
*/
-int job_complete_sync(Job *job, Error **errp);
+int job_complete_sync_locked(Job *job, Error **errp);
/**
* For a @job that has finished its work and is pending awaiting explicit
@@ -580,14 +712,18 @@ int job_complete_sync(Job *job, Error **errp);
* FIXME: Make the below statement universally true:
* For jobs that support the manual workflow mode, all graph changes that occur
* as a result will occur after this command and before a successful reply.
+ *
+ * Called with job lock held.
*/
-void job_finalize(Job *job, Error **errp);
+void job_finalize_locked(Job *job, Error **errp);
/**
* Remove the concluded @job from the query list and resets the passed pointer
* to %NULL. Returns an error if the job is not actually concluded.
+ *
+ * Called with job lock held.
*/
-void job_dismiss(Job **job, Error **errp);
+void job_dismiss_locked(Job **job, Error **errp);
/**
* Synchronously finishes the given @job. If @finish is given, it is called to
@@ -596,8 +732,20 @@ void job_dismiss(Job **job, Error **errp);
* Returns 0 if the job is successfully completed, -ECANCELED if the job was
* cancelled before completing, and -errno in other error cases.
*
- * Callers must hold the AioContext lock of job->aio_context.
+ * Called with job_lock held, but might release it temporarily.
+ */
+int job_finish_sync_locked(Job *job, void (*finish)(Job *, Error **errp),
+ Error **errp);
+
+/**
+ * Sets the @job->aio_context.
+ * Called with job_mutex *not* held.
+ *
+ * This function must run in the main thread to protect against
+ * concurrent read in job_finish_sync_locked(), takes the job_mutex
+ * lock to protect against the read in job_do_yield_locked(), and must
+ * be called when the job is quiescent.
*/
-int job_finish_sync(Job *job, void (*finish)(Job *, Error **errp), Error **errp);
+void job_set_aio_context(Job *job, AioContext *ctx);
#endif
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index 5f95169827..6d4e6d9708 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -131,6 +131,7 @@ typedef struct VirtIODevice VirtIODevice;
typedef struct Visitor Visitor;
typedef struct VMChangeStateEntry VMChangeStateEntry;
typedef struct VMStateDescription VMStateDescription;
+typedef struct DumpState DumpState;
/*
* Pointer types
diff --git a/include/sysemu/dump.h b/include/sysemu/dump.h
index ffc2ea1072..b62513d87d 100644
--- a/include/sysemu/dump.h
+++ b/include/sysemu/dump.h
@@ -166,11 +166,16 @@ typedef struct DumpState {
hwaddr memory_offset;
int fd;
- GuestPhysBlock *next_block;
- ram_addr_t start;
- bool has_filter;
- int64_t begin;
- int64_t length;
+ /*
+ * Dump filter area variables
+ *
+ * A filtered dump only contains the guest memory designated by
+ * the start address and length variables defined below.
+ *
+ * If length is 0, no filtering is applied.
+ */
+ int64_t filter_area_begin; /* Start address of partial guest memory area */
+ int64_t filter_area_length; /* Length of partial guest memory area */
uint8_t *note_buf; /* buffer for notes */
size_t note_buf_offset; /* the writing place in note_buf */