diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/backup.c | 59 | ||||
-rw-r--r-- | block/commit.c | 44 | ||||
-rw-r--r-- | block/mirror.c | 113 | ||||
-rw-r--r-- | block/replication.c | 10 | ||||
-rw-r--r-- | block/sheepdog.c | 4 | ||||
-rw-r--r-- | block/stream.c | 39 | ||||
-rw-r--r-- | block/trace-events | 5 |
7 files changed, 147 insertions, 127 deletions
diff --git a/block/backup.c b/block/backup.c index e14d99560d..4e228e959b 100644 --- a/block/backup.c +++ b/block/backup.c @@ -160,7 +160,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, * offset field is an opaque progress value, it is not a disk offset. */ job->bytes_read += n; - block_job_progress_update(&job->common, n); + job_progress_update(&job->common.job, n); } out: @@ -207,25 +207,25 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) } } -static void backup_commit(BlockJob *job) +static void backup_commit(Job *job) { - BackupBlockJob *s = container_of(job, BackupBlockJob, common); + BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); if (s->sync_bitmap) { backup_cleanup_sync_bitmap(s, 0); } } -static void backup_abort(BlockJob *job) +static void backup_abort(Job *job) { - BackupBlockJob *s = container_of(job, BackupBlockJob, common); + BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); if (s->sync_bitmap) { backup_cleanup_sync_bitmap(s, -1); } } -static void backup_clean(BlockJob *job) +static void backup_clean(Job *job) { - BackupBlockJob *s = container_of(job, BackupBlockJob, common); + BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); assert(s->target); blk_unref(s->target); s->target = NULL; @@ -317,11 +317,11 @@ typedef struct { int ret; } BackupCompleteData; -static void backup_complete(BlockJob *job, void *opaque) +static void backup_complete(Job *job, void *opaque) { BackupCompleteData *data = opaque; - block_job_completed(job, data->ret); + job_completed(job, data->ret); g_free(data); } @@ -329,7 +329,7 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job) { uint64_t delay_ns; - if (block_job_is_cancelled(&job->common)) { + if (job_is_cancelled(&job->common.job)) { return true; } @@ -337,9 +337,9 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job) * return. Without a yield, the VM would not reboot. */ delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read); job->bytes_read = 0; - block_job_sleep_ns(&job->common, delay_ns); + job_sleep_ns(&job->common.job, delay_ns); - if (block_job_is_cancelled(&job->common)) { + if (job_is_cancelled(&job->common.job)) { return true; } @@ -406,8 +406,8 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job) bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size); } - /* TODO block_job_progress_set_remaining() would make more sense */ - block_job_progress_update(&job->common, + /* TODO job_progress_set_remaining() would make more sense */ + job_progress_update(&job->common.job, job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size); bdrv_dirty_iter_free(dbi); @@ -425,7 +425,7 @@ static void coroutine_fn backup_run(void *opaque) qemu_co_rwlock_init(&job->flush_rwlock); nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size); - block_job_progress_set_remaining(&job->common, job->len); + job_progress_set_remaining(&job->common.job, job->len); job->copy_bitmap = hbitmap_alloc(nb_clusters, 0); if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { @@ -441,10 +441,10 @@ static void coroutine_fn backup_run(void *opaque) if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { /* All bits are set in copy_bitmap to allow any cluster to be copied. * This does not actually require them to be copied. */ - while (!block_job_is_cancelled(&job->common)) { + while (!job_is_cancelled(&job->common.job)) { /* Yield until the job is cancelled. We just let our before_write * notify callback service CoW requests. */ - block_job_yield(&job->common); + job_yield(&job->common.job); } } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { ret = backup_run_incremental(job); @@ -519,16 +519,21 @@ static void coroutine_fn backup_run(void *opaque) data = g_malloc(sizeof(*data)); data->ret = ret; - block_job_defer_to_main_loop(&job->common, backup_complete, data); + job_defer_to_main_loop(&job->common.job, backup_complete, data); } static const BlockJobDriver backup_job_driver = { - .instance_size = sizeof(BackupBlockJob), - .job_type = BLOCK_JOB_TYPE_BACKUP, - .start = backup_run, - .commit = backup_commit, - .abort = backup_abort, - .clean = backup_clean, + .job_driver = { + .instance_size = sizeof(BackupBlockJob), + .job_type = JOB_TYPE_BACKUP, + .free = block_job_free, + .user_resume = block_job_user_resume, + .drain = block_job_drain, + .start = backup_run, + .commit = backup_commit, + .abort = backup_abort, + .clean = backup_clean, + }, .attached_aio_context = backup_attached_aio_context, .drain = backup_drain, }; @@ -541,7 +546,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, BlockdevOnError on_target_error, int creation_flags, BlockCompletionFunc *cb, void *opaque, - BlockJobTxn *txn, Error **errp) + JobTxn *txn, Error **errp) { int64_t len; BlockDriverInfo bdi; @@ -673,8 +678,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); } if (job) { - backup_clean(&job->common); - block_job_early_fail(&job->common); + backup_clean(&job->common.job); + job_early_fail(&job->common.job); } return NULL; diff --git a/block/commit.c b/block/commit.c index ba5df6aa0a..620666161b 100644 --- a/block/commit.c +++ b/block/commit.c @@ -72,9 +72,10 @@ typedef struct { int ret; } CommitCompleteData; -static void commit_complete(BlockJob *job, void *opaque) +static void commit_complete(Job *job, void *opaque) { - CommitBlockJob *s = container_of(job, CommitBlockJob, common); + CommitBlockJob *s = container_of(job, CommitBlockJob, common.job); + BlockJob *bjob = &s->common; CommitCompleteData *data = opaque; BlockDriverState *top = blk_bs(s->top); BlockDriverState *base = blk_bs(s->base); @@ -90,7 +91,7 @@ static void commit_complete(BlockJob *job, void *opaque) * the normal backing chain can be restored. */ blk_unref(s->base); - if (!block_job_is_cancelled(&s->common) && ret == 0) { + if (!job_is_cancelled(job) && ret == 0) { /* success */ ret = bdrv_drop_intermediate(s->commit_top_bs, base, s->backing_file_str); @@ -111,12 +112,12 @@ static void commit_complete(BlockJob *job, void *opaque) blk_unref(s->top); /* If there is more than one reference to the job (e.g. if called from - * block_job_finish_sync()), block_job_completed() won't free it and - * therefore the blockers on the intermediate nodes remain. This would - * cause bdrv_set_backing_hd() to fail. */ - block_job_remove_all_bdrv(job); + * job_finish_sync()), job_completed() won't free it and therefore the + * blockers on the intermediate nodes remain. This would cause + * bdrv_set_backing_hd() to fail. */ + block_job_remove_all_bdrv(bjob); - block_job_completed(&s->common, ret); + job_completed(job, ret); g_free(data); /* If bdrv_drop_intermediate() didn't already do that, remove the commit @@ -149,7 +150,7 @@ static void coroutine_fn commit_run(void *opaque) if (len < 0) { goto out; } - block_job_progress_set_remaining(&s->common, len); + job_progress_set_remaining(&s->common.job, len); ret = base_len = blk_getlength(s->base); if (base_len < 0) { @@ -171,8 +172,8 @@ static void coroutine_fn commit_run(void *opaque) /* Note that even when no rate limit is applied we need to yield * with no pending I/O here so that bdrv_drain_all() returns. */ - block_job_sleep_ns(&s->common, delay_ns); - if (block_job_is_cancelled(&s->common)) { + job_sleep_ns(&s->common.job, delay_ns); + if (job_is_cancelled(&s->common.job)) { break; } /* Copy if allocated above the base */ @@ -195,7 +196,7 @@ static void coroutine_fn commit_run(void *opaque) } } /* Publish progress */ - block_job_progress_update(&s->common, n); + job_progress_update(&s->common.job, n); if (copy) { delay_ns = block_job_ratelimit_get_delay(&s->common, n); @@ -211,13 +212,18 @@ out: data = g_malloc(sizeof(*data)); data->ret = ret; - block_job_defer_to_main_loop(&s->common, commit_complete, data); + job_defer_to_main_loop(&s->common.job, commit_complete, data); } static const BlockJobDriver commit_job_driver = { - .instance_size = sizeof(CommitBlockJob), - .job_type = BLOCK_JOB_TYPE_COMMIT, - .start = commit_run, + .job_driver = { + .instance_size = sizeof(CommitBlockJob), + .job_type = JOB_TYPE_COMMIT, + .free = block_job_free, + .user_resume = block_job_user_resume, + .drain = block_job_drain, + .start = commit_run, + }, }; static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs, @@ -277,7 +283,7 @@ void commit_start(const char *job_id, BlockDriverState *bs, } s = block_job_create(job_id, &commit_job_driver, NULL, bs, 0, BLK_PERM_ALL, - speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp); + speed, JOB_DEFAULT, NULL, NULL, errp); if (!s) { return; } @@ -367,7 +373,7 @@ void commit_start(const char *job_id, BlockDriverState *bs, s->on_error = on_error; trace_commit_start(bs, base, top, s); - block_job_start(&s->common); + job_start(&s->common.job); return; fail: @@ -380,7 +386,7 @@ fail: if (commit_top_bs) { bdrv_replace_node(commit_top_bs, top, &error_abort); } - block_job_early_fail(&s->common); + job_early_fail(&s->common.job); } diff --git a/block/mirror.c b/block/mirror.c index a4197bb975..dcb66ec3be 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -119,14 +119,14 @@ static void mirror_iteration_done(MirrorOp *op, int ret) bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); } if (!s->initial_zeroing_ongoing) { - block_job_progress_update(&s->common, op->bytes); + job_progress_update(&s->common.job, op->bytes); } } qemu_iovec_destroy(&op->qiov); g_free(op); if (s->waiting_for_io) { - qemu_coroutine_enter(s->common.co); + qemu_coroutine_enter(s->common.job.co); } } @@ -345,7 +345,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) mirror_wait_for_io(s); } - block_job_pause_point(&s->common); + job_pause_point(&s->common.job); /* Find the number of consective dirty chunks following the first dirty * one, and wait for in flight requests in them. */ @@ -484,9 +484,10 @@ typedef struct { int ret; } MirrorExitData; -static void mirror_exit(BlockJob *job, void *opaque) +static void mirror_exit(Job *job, void *opaque) { - MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); + MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); + BlockJob *bjob = &s->common; MirrorExitData *data = opaque; AioContext *replace_aio_context = NULL; BlockDriverState *src = s->source; @@ -497,7 +498,7 @@ static void mirror_exit(BlockJob *job, void *opaque) bdrv_release_dirty_bitmap(src, s->dirty_bitmap); /* Make sure that the source BDS doesn't go away before we called - * block_job_completed(). */ + * job_completed(). */ bdrv_ref(src); bdrv_ref(mirror_top_bs); bdrv_ref(target_bs); @@ -568,7 +569,7 @@ static void mirror_exit(BlockJob *job, void *opaque) * the blockers on the intermediate nodes so that the resulting state is * valid. Also give up permissions on mirror_top_bs->backing, which might * block the removal. */ - block_job_remove_all_bdrv(job); + block_job_remove_all_bdrv(bjob); bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, &error_abort); bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort); @@ -576,11 +577,11 @@ static void mirror_exit(BlockJob *job, void *opaque) /* We just changed the BDS the job BB refers to (with either or both of the * bdrv_replace_node() calls), so switch the BB back so the cleanup does * the right thing. We don't need any permissions any more now. */ - blk_remove_bs(job->blk); - blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort); - blk_insert_bs(job->blk, mirror_top_bs, &error_abort); + blk_remove_bs(bjob->blk); + blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); + blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); - block_job_completed(&s->common, data->ret); + job_completed(job, data->ret); g_free(data); bdrv_drained_end(src); @@ -594,9 +595,9 @@ static void mirror_throttle(MirrorBlockJob *s) if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { s->last_pause_ns = now; - block_job_sleep_ns(&s->common, 0); + job_sleep_ns(&s->common.job, 0); } else { - block_job_pause_point(&s->common); + job_pause_point(&s->common.job); } } @@ -622,7 +623,7 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) mirror_throttle(s); - if (block_job_is_cancelled(&s->common)) { + if (job_is_cancelled(&s->common.job)) { s->initial_zeroing_ongoing = false; return 0; } @@ -650,7 +651,7 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) mirror_throttle(s); - if (block_job_is_cancelled(&s->common)) { + if (job_is_cancelled(&s->common.job)) { return 0; } @@ -695,7 +696,7 @@ static void coroutine_fn mirror_run(void *opaque) checking for a NULL string */ int ret = 0; - if (block_job_is_cancelled(&s->common)) { + if (job_is_cancelled(&s->common.job)) { goto immediate_exit; } @@ -726,13 +727,13 @@ static void coroutine_fn mirror_run(void *opaque) } if (s->bdev_length == 0) { - /* Report BLOCK_JOB_READY and wait for complete. */ - block_job_event_ready(&s->common); + /* Transition to the READY state and wait for complete. */ + job_transition_to_ready(&s->common.job); s->synced = true; - while (!block_job_is_cancelled(&s->common) && !s->should_complete) { - block_job_yield(&s->common); + while (!job_is_cancelled(&s->common.job) && !s->should_complete) { + job_yield(&s->common.job); } - s->common.cancelled = false; + s->common.job.cancelled = false; goto immediate_exit; } @@ -768,7 +769,7 @@ static void coroutine_fn mirror_run(void *opaque) s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); if (!s->is_none_mode) { ret = mirror_dirty_init(s); - if (ret < 0 || block_job_is_cancelled(&s->common)) { + if (ret < 0 || job_is_cancelled(&s->common.job)) { goto immediate_exit; } } @@ -785,13 +786,13 @@ static void coroutine_fn mirror_run(void *opaque) goto immediate_exit; } - block_job_pause_point(&s->common); + job_pause_point(&s->common.job); cnt = bdrv_get_dirty_count(s->dirty_bitmap); /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is * the number of bytes currently being processed; together those are * the current remaining operation length */ - block_job_progress_set_remaining(&s->common, s->bytes_in_flight + cnt); + job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt); /* Note that even when no rate limit is applied we need to yield * periodically with no pending I/O so that bdrv_drain_all() returns. @@ -823,12 +824,12 @@ static void coroutine_fn mirror_run(void *opaque) * report completion. This way, block-job-cancel will leave * the target in a consistent state. */ - block_job_event_ready(&s->common); + job_transition_to_ready(&s->common.job); s->synced = true; } should_complete = s->should_complete || - block_job_is_cancelled(&s->common); + job_is_cancelled(&s->common.job); cnt = bdrv_get_dirty_count(s->dirty_bitmap); } @@ -856,7 +857,7 @@ static void coroutine_fn mirror_run(void *opaque) * completion. */ assert(QLIST_EMPTY(&bs->tracked_requests)); - s->common.cancelled = false; + s->common.job.cancelled = false; need_drain = false; break; } @@ -868,9 +869,9 @@ static void coroutine_fn mirror_run(void *opaque) cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); } trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); - block_job_sleep_ns(&s->common, delay_ns); - if (block_job_is_cancelled(&s->common) && - (!s->synced || s->common.force)) + job_sleep_ns(&s->common.job, delay_ns); + if (job_is_cancelled(&s->common.job) && + (!s->synced || s->common.job.force_cancel)) { break; } @@ -883,8 +884,8 @@ immediate_exit: * or it was cancelled prematurely so that we do not guarantee that * the target is a copy of the source. */ - assert(ret < 0 || ((s->common.force || !s->synced) && - block_job_is_cancelled(&s->common))); + assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) && + job_is_cancelled(&s->common.job))); assert(need_drain); mirror_wait_for_all_io(s); } @@ -901,12 +902,12 @@ immediate_exit: if (need_drain) { bdrv_drained_begin(bs); } - block_job_defer_to_main_loop(&s->common, mirror_exit, data); + job_defer_to_main_loop(&s->common.job, mirror_exit, data); } -static void mirror_complete(BlockJob *job, Error **errp) +static void mirror_complete(Job *job, Error **errp) { - MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); + MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); BlockDriverState *target; target = blk_bs(s->target); @@ -953,12 +954,12 @@ static void mirror_complete(BlockJob *job, Error **errp) } s->should_complete = true; - block_job_enter(&s->common); + job_enter(job); } -static void mirror_pause(BlockJob *job) +static void mirror_pause(Job *job) { - MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); + MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); mirror_wait_for_all_io(s); } @@ -986,21 +987,31 @@ static void mirror_drain(BlockJob *job) } static const BlockJobDriver mirror_job_driver = { - .instance_size = sizeof(MirrorBlockJob), - .job_type = BLOCK_JOB_TYPE_MIRROR, - .start = mirror_run, - .complete = mirror_complete, - .pause = mirror_pause, + .job_driver = { + .instance_size = sizeof(MirrorBlockJob), + .job_type = JOB_TYPE_MIRROR, + .free = block_job_free, + .user_resume = block_job_user_resume, + .drain = block_job_drain, + .start = mirror_run, + .pause = mirror_pause, + .complete = mirror_complete, + }, .attached_aio_context = mirror_attached_aio_context, .drain = mirror_drain, }; static const BlockJobDriver commit_active_job_driver = { - .instance_size = sizeof(MirrorBlockJob), - .job_type = BLOCK_JOB_TYPE_COMMIT, - .start = mirror_run, - .complete = mirror_complete, - .pause = mirror_pause, + .job_driver = { + .instance_size = sizeof(MirrorBlockJob), + .job_type = JOB_TYPE_COMMIT, + .free = block_job_free, + .user_resume = block_job_user_resume, + .drain = block_job_drain, + .start = mirror_run, + .pause = mirror_pause, + .complete = mirror_complete, + }, .attached_aio_context = mirror_attached_aio_context, .drain = mirror_drain, }; @@ -1237,7 +1248,7 @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, } trace_mirror_start(bs, s, opaque); - block_job_start(&s->common); + job_start(&s->common.job); return; fail: @@ -1248,7 +1259,7 @@ fail: g_free(s->replaces); blk_unref(s->target); - block_job_early_fail(&s->common); + job_early_fail(&s->common.job); } bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL, @@ -1275,7 +1286,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs, } is_none_mode = mode == MIRROR_SYNC_MODE_NONE; base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; - mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces, + mirror_start_job(job_id, bs, JOB_DEFAULT, target, replaces, speed, granularity, buf_size, backing_mode, on_source_error, on_target_error, unmap, NULL, NULL, &mirror_job_driver, is_none_mode, base, false, diff --git a/block/replication.c b/block/replication.c index 48148b884a..826db7b304 100644 --- a/block/replication.c +++ b/block/replication.c @@ -145,7 +145,7 @@ static void replication_close(BlockDriverState *bs) replication_stop(s->rs, false, NULL); } if (s->stage == BLOCK_REPLICATION_FAILOVER) { - block_job_cancel_sync(s->active_disk->bs->job); + job_cancel_sync(&s->active_disk->bs->job->job); } if (s->mode == REPLICATION_MODE_SECONDARY) { @@ -568,7 +568,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, job = backup_job_create(NULL, s->secondary_disk->bs, s->hidden_disk->bs, 0, MIRROR_SYNC_MODE_NONE, NULL, false, BLOCKDEV_ON_ERROR_REPORT, - BLOCKDEV_ON_ERROR_REPORT, BLOCK_JOB_INTERNAL, + BLOCKDEV_ON_ERROR_REPORT, JOB_INTERNAL, backup_job_completed, bs, NULL, &local_err); if (local_err) { error_propagate(errp, local_err); @@ -576,7 +576,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, aio_context_release(aio_context); return; } - block_job_start(job); + job_start(&job->job); break; default: aio_context_release(aio_context); @@ -681,7 +681,7 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) * disk, secondary disk in backup_job_completed(). */ if (s->secondary_disk->bs->job) { - block_job_cancel_sync(s->secondary_disk->bs->job); + job_cancel_sync(&s->secondary_disk->bs->job->job); } if (!failover) { @@ -693,7 +693,7 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) s->stage = BLOCK_REPLICATION_FAILOVER; commit_active_start(NULL, s->active_disk->bs, s->secondary_disk->bs, - BLOCK_JOB_INTERNAL, 0, BLOCKDEV_ON_ERROR_REPORT, + JOB_INTERNAL, 0, BLOCKDEV_ON_ERROR_REPORT, NULL, replication_done, bs, true, errp); break; default: diff --git a/block/sheepdog.c b/block/sheepdog.c index 4237132419..2a5bc0a59a 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -1859,9 +1859,7 @@ out: error_setg_errno(errp, -ret, "Can't pre-allocate"); } out_with_err_set: - if (blk) { - blk_unref(blk); - } + blk_unref(blk); g_free(buf); return ret; diff --git a/block/stream.c b/block/stream.c index df9660d2fc..a5d6e0cf8a 100644 --- a/block/stream.c +++ b/block/stream.c @@ -58,16 +58,16 @@ typedef struct { int ret; } StreamCompleteData; -static void stream_complete(BlockJob *job, void *opaque) +static void stream_complete(Job *job, void *opaque) { - StreamBlockJob *s = container_of(job, StreamBlockJob, common); + StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); + BlockJob *bjob = &s->common; StreamCompleteData *data = opaque; - BlockDriverState *bs = blk_bs(job->blk); + BlockDriverState *bs = blk_bs(bjob->blk); BlockDriverState *base = s->base; Error *local_err = NULL; - if (!block_job_is_cancelled(&s->common) && bs->backing && - data->ret == 0) { + if (!job_is_cancelled(job) && bs->backing && data->ret == 0) { const char *base_id = NULL, *base_fmt = NULL; if (base) { base_id = s->backing_file_str; @@ -88,12 +88,12 @@ out: /* Reopen the image back in read-only mode if necessary */ if (s->bs_flags != bdrv_get_flags(bs)) { /* Give up write permissions before making it read-only */ - blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort); + blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); bdrv_reopen(bs, s->bs_flags, NULL); } g_free(s->backing_file_str); - block_job_completed(&s->common, data->ret); + job_completed(job, data->ret); g_free(data); } @@ -121,7 +121,7 @@ static void coroutine_fn stream_run(void *opaque) ret = len; goto out; } - block_job_progress_set_remaining(&s->common, len); + job_progress_set_remaining(&s->common.job, len); buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE); @@ -140,8 +140,8 @@ static void coroutine_fn stream_run(void *opaque) /* Note that even when no rate limit is applied we need to yield * with no pending I/O here so that bdrv_drain_all() returns. */ - block_job_sleep_ns(&s->common, delay_ns); - if (block_job_is_cancelled(&s->common)) { + job_sleep_ns(&s->common.job, delay_ns); + if (job_is_cancelled(&s->common.job)) { break; } @@ -184,7 +184,7 @@ static void coroutine_fn stream_run(void *opaque) ret = 0; /* Publish progress */ - block_job_progress_update(&s->common, n); + job_progress_update(&s->common.job, n); if (copy) { delay_ns = block_job_ratelimit_get_delay(&s->common, n); } else { @@ -205,13 +205,18 @@ out: /* Modify backing chain and close BDSes in main loop */ data = g_malloc(sizeof(*data)); data->ret = ret; - block_job_defer_to_main_loop(&s->common, stream_complete, data); + job_defer_to_main_loop(&s->common.job, stream_complete, data); } static const BlockJobDriver stream_job_driver = { - .instance_size = sizeof(StreamBlockJob), - .job_type = BLOCK_JOB_TYPE_STREAM, - .start = stream_run, + .job_driver = { + .instance_size = sizeof(StreamBlockJob), + .job_type = JOB_TYPE_STREAM, + .free = block_job_free, + .start = stream_run, + .user_resume = block_job_user_resume, + .drain = block_job_drain, + }, }; void stream_start(const char *job_id, BlockDriverState *bs, @@ -238,7 +243,7 @@ void stream_start(const char *job_id, BlockDriverState *bs, BLK_PERM_GRAPH_MOD, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE, - speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp); + speed, JOB_DEFAULT, NULL, NULL, errp); if (!s) { goto fail; } @@ -259,7 +264,7 @@ void stream_start(const char *job_id, BlockDriverState *bs, s->on_error = on_error; trace_stream_start(bs, base, s); - block_job_start(&s->common); + job_start(&s->common.job); return; fail: diff --git a/block/trace-events b/block/trace-events index f8c50b4063..2d59b53fd3 100644 --- a/block/trace-events +++ b/block/trace-events @@ -4,11 +4,6 @@ bdrv_open_common(void *bs, const char *filename, int flags, const char *format_name) "bs %p filename \"%s\" flags 0x%x format_name \"%s\"" bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d" -# blockjob.c -block_job_completed(void *job, int ret, int jret) "job %p ret %d corrected ret %d" -block_job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)" -block_job_apply_verb(void *job, const char *state, const char *verb, const char *legal) "job %p in state %s; applying verb %s (%s)" - # block/block-backend.c blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x" blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x" |