diff options
author | Emanuele Giuseppe Esposito | 2022-09-26 11:32:11 +0200 |
---|---|---|
committer | Kevin Wolf | 2022-10-07 12:11:41 +0200 |
commit | 6f592e5aca1a27fe1c1f661cfe68b35b90850acf (patch) | |
tree | 74f0dbf7c423bb4361cf0bb5c008a8b50de7fe6e /tests | |
parent | job.h: categorize JobDriver callbacks that need the AioContext lock (diff) | |
download | qemu-6f592e5aca1a27fe1c1f661cfe68b35b90850acf.tar.gz qemu-6f592e5aca1a27fe1c1f661cfe68b35b90850acf.tar.xz qemu-6f592e5aca1a27fe1c1f661cfe68b35b90850acf.zip |
job.c: enable job lock/unlock and remove Aiocontext locks
Change the job_{lock/unlock} and macros to use job_mutex.
Now that they are not nop anymore, remove the aiocontext
to avoid deadlocks.
Therefore:
- when possible, remove completely the aiocontext lock/unlock pair
- if it is used by some other function too, reduce the locking
section as much as possible, leaving the job API outside.
- change AIO_WAIT_WHILE in AIO_WAIT_WHILE_UNLOCKED, since we
are not using the aiocontext lock anymore
The only functions that still need the aiocontext lock are:
- the JobDriver callbacks, already documented in job.h
- job_cancel_sync() in replication.c is called with aio_context_lock
taken, but now job is using AIO_WAIT_WHILE_UNLOCKED so we need to
release the lock.
Reduce the locking section to only cover the callback invocation
and document the functions that take the AioContext lock,
to avoid taking it twice.
Also remove real_job_{lock/unlock}, as they are replaced by the
public functions.
Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Message-Id: <20220926093214.506243-19-eesposit@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/unit/test-bdrv-drain.c | 4 | ||||
-rw-r--r-- | tests/unit/test-block-iothread.c | 2 | ||||
-rw-r--r-- | tests/unit/test-blockjob.c | 19 |
3 files changed, 11 insertions, 14 deletions
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index 0db056ea63..4924ceb562 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -930,9 +930,9 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, tjob->prepare_ret = -EIO; break; } + aio_context_release(ctx); job_start(&job->job); - aio_context_release(ctx); if (use_iothread) { /* job_co_entry() is run in the I/O thread, wait for the actual job @@ -1016,12 +1016,12 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ } - aio_context_acquire(ctx); WITH_JOB_LOCK_GUARD() { ret = job_complete_sync_locked(&job->job, &error_abort); } g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); + aio_context_acquire(ctx); if (use_iothread) { blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort); assert(blk_get_aio_context(blk_target) == qemu_get_aio_context()); diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index 96fd21c00a..def0709b2b 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -582,10 +582,10 @@ static void test_attach_blockjob(void) aio_poll(qemu_get_aio_context(), false); } - aio_context_acquire(ctx); WITH_JOB_LOCK_GUARD() { job_complete_sync_locked(&tjob->common.job, &error_abort); } + aio_context_acquire(ctx); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); aio_context_release(ctx); diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c index e4f126bb6d..f88e10e356 100644 --- a/tests/unit/test-blockjob.c +++ b/tests/unit/test-blockjob.c @@ -228,10 +228,7 @@ static void cancel_common(CancelJob *s) BlockJob *job = &s->common; BlockBackend *blk = s->blk; JobStatus sts = job->job.status; - AioContext *ctx; - - ctx = job->job.aio_context; - aio_context_acquire(ctx); + AioContext *ctx = job->job.aio_context; job_cancel_sync(&job->job, true); WITH_JOB_LOCK_GUARD() { @@ -242,9 +239,11 @@ static void cancel_common(CancelJob *s) assert(job->job.status == JOB_STATUS_NULL); job_unref_locked(&job->job); } - destroy_blk(blk); + aio_context_acquire(ctx); + destroy_blk(blk); aio_context_release(ctx); + } static void test_cancel_created(void) @@ -384,12 +383,10 @@ static void test_cancel_concluded(void) aio_poll(qemu_get_aio_context(), true); assert_job_status_is(job, JOB_STATUS_PENDING); - aio_context_acquire(job->aio_context); WITH_JOB_LOCK_GUARD() { job_finalize_locked(job, &error_abort); + assert(job->status == JOB_STATUS_CONCLUDED); } - aio_context_release(job->aio_context); - assert_job_status_is(job, JOB_STATUS_CONCLUDED); cancel_common(s); } @@ -481,13 +478,11 @@ static void test_complete_in_standby(void) /* Wait for the job to become READY */ job_start(job); - aio_context_acquire(ctx); /* * Here we are waiting for the status to change, so don't bother * protecting the read every time. */ - AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY); - aio_context_release(ctx); + AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY); /* Begin the drained section, pausing the job */ bdrv_drain_all_begin(); @@ -497,6 +492,7 @@ static void test_complete_in_standby(void) aio_context_acquire(ctx); /* This will schedule the job to resume it */ bdrv_drain_all_end(); + aio_context_release(ctx); WITH_JOB_LOCK_GUARD() { /* But the job cannot run, so it will remain on standby */ @@ -515,6 +511,7 @@ static void test_complete_in_standby(void) job_dismiss_locked(&job, &error_abort); } + aio_context_acquire(ctx); destroy_blk(blk); aio_context_release(ctx); iothread_join(iothread); |