summaryrefslogtreecommitdiffstats
path: root/blockjob.c
diff options
context:
space:
mode:
Diffstat (limited to 'blockjob.c')
-rw-r--r--blockjob.c132
1 files changed, 74 insertions, 58 deletions
diff --git a/blockjob.c b/blockjob.c
index 4868453d74..bdf20a0e35 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -36,21 +36,6 @@
#include "qemu/main-loop.h"
#include "qemu/timer.h"
-/*
- * The block job API is composed of two categories of functions.
- *
- * The first includes functions used by the monitor. The monitor is
- * peculiar in that it accesses the block job list with block_job_get, and
- * therefore needs consistency across block_job_get and the actual operation
- * (e.g. block_job_set_speed). The consistency is achieved with
- * aio_context_acquire/release. These functions are declared in blockjob.h.
- *
- * The second includes functions used by the block job drivers and sometimes
- * by the core block layer. These do not care about locking, because the
- * whole coroutine runs under the AioContext lock, and are declared in
- * blockjob_int.h.
- */
-
static bool is_block_job(Job *job)
{
return job_type(job) == JOB_TYPE_BACKUP ||
@@ -59,21 +44,21 @@ static bool is_block_job(Job *job)
job_type(job) == JOB_TYPE_STREAM;
}
-BlockJob *block_job_next(BlockJob *bjob)
+BlockJob *block_job_next_locked(BlockJob *bjob)
{
Job *job = bjob ? &bjob->job : NULL;
GLOBAL_STATE_CODE();
do {
- job = job_next(job);
+ job = job_next_locked(job);
} while (job && !is_block_job(job));
return job ? container_of(job, BlockJob, job) : NULL;
}
-BlockJob *block_job_get(const char *id)
+BlockJob *block_job_get_locked(const char *id)
{
- Job *job = job_get(id);
+ Job *job = job_get_locked(id);
GLOBAL_STATE_CODE();
if (job && is_block_job(job)) {
@@ -83,6 +68,12 @@ BlockJob *block_job_get(const char *id)
}
}
+BlockJob *block_job_get(const char *id)
+{
+ JOB_LOCK_GUARD();
+ return block_job_get_locked(id);
+}
+
void block_job_free(Job *job)
{
BlockJob *bjob = container_of(job, BlockJob, job);
@@ -114,8 +105,10 @@ static bool child_job_drained_poll(BdrvChild *c)
/* An inactive or completed job doesn't have any pending requests. Jobs
* with !job->busy are either already paused or have a pause point after
* being reentered, so no job driver code will run before they pause. */
- if (!job->busy || job_is_completed(job)) {
- return false;
+ WITH_JOB_LOCK_GUARD() {
+ if (!job->busy || job_is_completed_locked(job)) {
+ return false;
+ }
}
/* Otherwise, assume that it isn't fully stopped yet, but allow the job to
@@ -163,12 +156,13 @@ static void child_job_set_aio_ctx(BdrvChild *c, AioContext *ctx,
bdrv_set_aio_context_ignore(sibling->bs, ctx, ignore);
}
- job->job.aio_context = ctx;
+ job_set_aio_context(&job->job, ctx);
}
static AioContext *child_job_get_parent_aio_context(BdrvChild *c)
{
BlockJob *job = c->opaque;
+ GLOBAL_STATE_CODE();
return job->job.aio_context;
}
@@ -250,7 +244,8 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
return 0;
}
-static void block_job_on_idle(Notifier *n, void *opaque)
+/* Called with job_mutex lock held. */
+static void block_job_on_idle_locked(Notifier *n, void *opaque)
{
aio_wait_kick();
}
@@ -271,14 +266,14 @@ static bool job_timer_pending(Job *job)
return timer_pending(&job->sleep_timer);
}
-bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
+bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp)
{
const BlockJobDriver *drv = block_job_driver(job);
int64_t old_speed = job->speed;
GLOBAL_STATE_CODE();
- if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp) < 0) {
+ if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) {
return false;
}
if (speed < 0) {
@@ -292,7 +287,9 @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
job->speed = speed;
if (drv->set_speed) {
+ job_unlock();
drv->set_speed(job, speed);
+ job_lock();
}
if (speed && speed <= old_speed) {
@@ -300,18 +297,24 @@ bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
}
/* kick only if a timer is pending */
- job_enter_cond(&job->job, job_timer_pending);
+ job_enter_cond_locked(&job->job, job_timer_pending);
return true;
}
+static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
+{
+ JOB_LOCK_GUARD();
+ return block_job_set_speed_locked(job, speed, errp);
+}
+
int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
{
IO_CODE();
return ratelimit_calculate_delay(&job->limit, n);
}
-BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
+BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
{
BlockJobInfo *info;
uint64_t progress_current, progress_total;
@@ -329,13 +332,13 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
info = g_new0(BlockJobInfo, 1);
info->type = g_strdup(job_type_str(&job->job));
info->device = g_strdup(job->job.id);
- info->busy = qatomic_read(&job->job.busy);
+ info->busy = job->job.busy;
info->paused = job->job.pause_count > 0;
info->offset = progress_current;
info->len = progress_total;
info->speed = job->speed;
info->io_status = job->iostatus;
- info->ready = job_is_ready(&job->job),
+ info->ready = job_is_ready_locked(&job->job),
info->status = job->job.status;
info->auto_finalize = job->job.auto_finalize;
info->auto_dismiss = job->job.auto_dismiss;
@@ -348,7 +351,8 @@ BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
return info;
}
-static void block_job_iostatus_set_err(BlockJob *job, int error)
+/* Called with job lock held */
+static void block_job_iostatus_set_err_locked(BlockJob *job, int error)
{
if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
@@ -356,7 +360,8 @@ static void block_job_iostatus_set_err(BlockJob *job, int error)
}
}
-static void block_job_event_cancelled(Notifier *n, void *opaque)
+/* Called with job_mutex lock held. */
+static void block_job_event_cancelled_locked(Notifier *n, void *opaque)
{
BlockJob *job = opaque;
uint64_t progress_current, progress_total;
@@ -375,7 +380,8 @@ static void block_job_event_cancelled(Notifier *n, void *opaque)
job->speed);
}
-static void block_job_event_completed(Notifier *n, void *opaque)
+/* Called with job_mutex lock held. */
+static void block_job_event_completed_locked(Notifier *n, void *opaque)
{
BlockJob *job = opaque;
const char *msg = NULL;
@@ -401,7 +407,8 @@ static void block_job_event_completed(Notifier *n, void *opaque)
msg);
}
-static void block_job_event_pending(Notifier *n, void *opaque)
+/* Called with job_mutex lock held. */
+static void block_job_event_pending_locked(Notifier *n, void *opaque)
{
BlockJob *job = opaque;
@@ -413,7 +420,8 @@ static void block_job_event_pending(Notifier *n, void *opaque)
job->job.id);
}
-static void block_job_event_ready(Notifier *n, void *opaque)
+/* Called with job_mutex lock held. */
+static void block_job_event_ready_locked(Notifier *n, void *opaque)
{
BlockJob *job = opaque;
uint64_t progress_current, progress_total;
@@ -433,11 +441,6 @@ static void block_job_event_ready(Notifier *n, void *opaque)
}
-/*
- * API for block job drivers and the block layer. These functions are
- * declared in blockjob_int.h.
- */
-
void *block_job_create(const char *job_id, const BlockJobDriver *driver,
JobTxn *txn, BlockDriverState *bs, uint64_t perm,
uint64_t shared_perm, int64_t speed, int flags,
@@ -463,19 +466,21 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
ratelimit_init(&job->limit);
- job->finalize_cancelled_notifier.notify = block_job_event_cancelled;
- job->finalize_completed_notifier.notify = block_job_event_completed;
- job->pending_notifier.notify = block_job_event_pending;
- job->ready_notifier.notify = block_job_event_ready;
- job->idle_notifier.notify = block_job_on_idle;
-
- notifier_list_add(&job->job.on_finalize_cancelled,
- &job->finalize_cancelled_notifier);
- notifier_list_add(&job->job.on_finalize_completed,
- &job->finalize_completed_notifier);
- notifier_list_add(&job->job.on_pending, &job->pending_notifier);
- notifier_list_add(&job->job.on_ready, &job->ready_notifier);
- notifier_list_add(&job->job.on_idle, &job->idle_notifier);
+ job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked;
+ job->finalize_completed_notifier.notify = block_job_event_completed_locked;
+ job->pending_notifier.notify = block_job_event_pending_locked;
+ job->ready_notifier.notify = block_job_event_ready_locked;
+ job->idle_notifier.notify = block_job_on_idle_locked;
+
+ WITH_JOB_LOCK_GUARD() {
+ notifier_list_add(&job->job.on_finalize_cancelled,
+ &job->finalize_cancelled_notifier);
+ notifier_list_add(&job->job.on_finalize_completed,
+ &job->finalize_completed_notifier);
+ notifier_list_add(&job->job.on_pending, &job->pending_notifier);
+ notifier_list_add(&job->job.on_ready, &job->ready_notifier);
+ notifier_list_add(&job->job.on_idle, &job->idle_notifier);
+ }
error_setg(&job->blocker, "block device is in use by block job: %s",
job_type_str(&job->job));
@@ -498,7 +503,7 @@ fail:
return NULL;
}
-void block_job_iostatus_reset(BlockJob *job)
+void block_job_iostatus_reset_locked(BlockJob *job)
{
GLOBAL_STATE_CODE();
if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
@@ -508,6 +513,12 @@ void block_job_iostatus_reset(BlockJob *job)
job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
}
+static void block_job_iostatus_reset(BlockJob *job)
+{
+ JOB_LOCK_GUARD();
+ block_job_iostatus_reset_locked(job);
+}
+
void block_job_user_resume(Job *job)
{
BlockJob *bjob = container_of(job, BlockJob, job);
@@ -546,12 +557,17 @@ BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
action);
}
if (action == BLOCK_ERROR_ACTION_STOP) {
- if (!job->job.user_paused) {
- job_pause(&job->job);
- /* make the pause user visible, which will be resumed from QMP. */
- job->job.user_paused = true;
+ WITH_JOB_LOCK_GUARD() {
+ if (!job->job.user_paused) {
+ job_pause_locked(&job->job);
+ /*
+ * make the pause user visible, which will be
+ * resumed from QMP.
+ */
+ job->job.user_paused = true;
+ }
+ block_job_iostatus_set_err_locked(job, error);
}
- block_job_iostatus_set_err(job, error);
}
return action;
}