summaryrefslogtreecommitdiffstats
path: root/blockdev.c
diff options
context:
space:
mode:
authorEmanuele Giuseppe Esposito2022-09-26 11:32:11 +0200
committerKevin Wolf2022-10-07 12:11:41 +0200
commit6f592e5aca1a27fe1c1f661cfe68b35b90850acf (patch)
tree74f0dbf7c423bb4361cf0bb5c008a8b50de7fe6e /blockdev.c
parentjob.h: categorize JobDriver callbacks that need the AioContext lock (diff)
downloadqemu-6f592e5aca1a27fe1c1f661cfe68b35b90850acf.tar.gz
qemu-6f592e5aca1a27fe1c1f661cfe68b35b90850acf.tar.xz
qemu-6f592e5aca1a27fe1c1f661cfe68b35b90850acf.zip
job.c: enable job lock/unlock and remove Aiocontext locks
Change the job_{lock/unlock} and macros to use job_mutex. Now that they are not nop anymore, remove the aiocontext to avoid deadlocks. Therefore: - when possible, remove completely the aiocontext lock/unlock pair - if it is used by some other function too, reduce the locking section as much as possible, leaving the job API outside. - change AIO_WAIT_WHILE in AIO_WAIT_WHILE_UNLOCKED, since we are not using the aiocontext lock anymore The only functions that still need the aiocontext lock are: - the JobDriver callbacks, already documented in job.h - job_cancel_sync() in replication.c is called with aio_context_lock taken, but now job is using AIO_WAIT_WHILE_UNLOCKED so we need to release the lock. Reduce the locking section to only cover the callback invocation and document the functions that take the AioContext lock, to avoid taking it twice. Also remove real_job_{lock/unlock}, as they are replaced by the public functions. Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> Message-Id: <20220926093214.506243-19-eesposit@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'blockdev.c')
-rw-r--r--blockdev.c72
1 files changed, 10 insertions, 62 deletions
diff --git a/blockdev.c b/blockdev.c
index 46090bb0aa..a32bafc07a 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -155,12 +155,7 @@ void blockdev_mark_auto_del(BlockBackend *blk)
for (job = block_job_next_locked(NULL); job;
job = block_job_next_locked(job)) {
if (block_job_has_bdrv(job, blk_bs(blk))) {
- AioContext *aio_context = job->job.aio_context;
- aio_context_acquire(aio_context);
-
job_cancel_locked(&job->job, false);
-
- aio_context_release(aio_context);
}
}
@@ -1847,14 +1842,7 @@ static void drive_backup_abort(BlkActionState *common)
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
if (state->job) {
- AioContext *aio_context;
-
- aio_context = bdrv_get_aio_context(state->bs);
- aio_context_acquire(aio_context);
-
job_cancel_sync(&state->job->job, true);
-
- aio_context_release(aio_context);
}
}
@@ -1948,14 +1936,7 @@ static void blockdev_backup_abort(BlkActionState *common)
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
if (state->job) {
- AioContext *aio_context;
-
- aio_context = bdrv_get_aio_context(state->bs);
- aio_context_acquire(aio_context);
-
job_cancel_sync(&state->job->job, true);
-
- aio_context_release(aio_context);
}
}
@@ -3317,19 +3298,14 @@ out:
}
/*
- * Get a block job using its ID and acquire its AioContext.
- * Called with job_mutex held.
+ * Get a block job using its ID. Called with job_mutex held.
*/
-static BlockJob *find_block_job_locked(const char *id,
- AioContext **aio_context,
- Error **errp)
+static BlockJob *find_block_job_locked(const char *id, Error **errp)
{
BlockJob *job;
assert(id != NULL);
- *aio_context = NULL;
-
job = block_job_get_locked(id);
if (!job) {
@@ -3338,36 +3314,30 @@ static BlockJob *find_block_job_locked(const char *id,
return NULL;
}
- *aio_context = block_job_get_aio_context(job);
- aio_context_acquire(*aio_context);
-
return job;
}
void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
{
- AioContext *aio_context;
BlockJob *job;
JOB_LOCK_GUARD();
- job = find_block_job_locked(device, &aio_context, errp);
+ job = find_block_job_locked(device, errp);
if (!job) {
return;
}
block_job_set_speed_locked(job, speed, errp);
- aio_context_release(aio_context);
}
void qmp_block_job_cancel(const char *device,
bool has_force, bool force, Error **errp)
{
- AioContext *aio_context;
BlockJob *job;
JOB_LOCK_GUARD();
- job = find_block_job_locked(device, &aio_context, errp);
+ job = find_block_job_locked(device, errp);
if (!job) {
return;
@@ -3380,22 +3350,19 @@ void qmp_block_job_cancel(const char *device,
if (job_user_paused_locked(&job->job) && !force) {
error_setg(errp, "The block job for device '%s' is currently paused",
device);
- goto out;
+ return;
}
trace_qmp_block_job_cancel(job);
job_user_cancel_locked(&job->job, force, errp);
-out:
- aio_context_release(aio_context);
}
void qmp_block_job_pause(const char *device, Error **errp)
{
- AioContext *aio_context;
BlockJob *job;
JOB_LOCK_GUARD();
- job = find_block_job_locked(device, &aio_context, errp);
+ job = find_block_job_locked(device, errp);
if (!job) {
return;
@@ -3403,16 +3370,14 @@ void qmp_block_job_pause(const char *device, Error **errp)
trace_qmp_block_job_pause(job);
job_user_pause_locked(&job->job, errp);
- aio_context_release(aio_context);
}
void qmp_block_job_resume(const char *device, Error **errp)
{
- AioContext *aio_context;
BlockJob *job;
JOB_LOCK_GUARD();
- job = find_block_job_locked(device, &aio_context, errp);
+ job = find_block_job_locked(device, errp);
if (!job) {
return;
@@ -3420,16 +3385,14 @@ void qmp_block_job_resume(const char *device, Error **errp)
trace_qmp_block_job_resume(job);
job_user_resume_locked(&job->job, errp);
- aio_context_release(aio_context);
}
void qmp_block_job_complete(const char *device, Error **errp)
{
- AioContext *aio_context;
BlockJob *job;
JOB_LOCK_GUARD();
- job = find_block_job_locked(device, &aio_context, errp);
+ job = find_block_job_locked(device, errp);
if (!job) {
return;
@@ -3437,16 +3400,14 @@ void qmp_block_job_complete(const char *device, Error **errp)
trace_qmp_block_job_complete(job);
job_complete_locked(&job->job, errp);
- aio_context_release(aio_context);
}
void qmp_block_job_finalize(const char *id, Error **errp)
{
- AioContext *aio_context;
BlockJob *job;
JOB_LOCK_GUARD();
- job = find_block_job_locked(id, &aio_context, errp);
+ job = find_block_job_locked(id, errp);
if (!job) {
return;
@@ -3456,24 +3417,16 @@ void qmp_block_job_finalize(const char *id, Error **errp)
job_ref_locked(&job->job);
job_finalize_locked(&job->job, errp);
- /*
- * Job's context might have changed via job_finalize (and job_txn_apply
- * automatically acquires the new one), so make sure we release the correct
- * one.
- */
- aio_context = block_job_get_aio_context(job);
job_unref_locked(&job->job);
- aio_context_release(aio_context);
}
void qmp_block_job_dismiss(const char *id, Error **errp)
{
- AioContext *aio_context;
BlockJob *bjob;
Job *job;
JOB_LOCK_GUARD();
- bjob = find_block_job_locked(id, &aio_context, errp);
+ bjob = find_block_job_locked(id, errp);
if (!bjob) {
return;
@@ -3482,7 +3435,6 @@ void qmp_block_job_dismiss(const char *id, Error **errp)
trace_qmp_block_job_dismiss(bjob);
job = &bjob->job;
job_dismiss_locked(&job, errp);
- aio_context_release(aio_context);
}
void qmp_change_backing_file(const char *device,
@@ -3764,15 +3716,11 @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
for (job = block_job_next_locked(NULL); job;
job = block_job_next_locked(job)) {
BlockJobInfo *value;
- AioContext *aio_context;
if (block_job_is_internal(job)) {
continue;
}
- aio_context = block_job_get_aio_context(job);
- aio_context_acquire(aio_context);
value = block_job_query_locked(job, errp);
- aio_context_release(aio_context);
if (!value) {
qapi_free_BlockJobInfoList(head);
return NULL;