summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy2019-04-29 11:08:38 +0200
committerMax Reitz2019-05-28 20:30:55 +0200
commitc2da3413c021398152e98022261bb1643276a2fe (patch)
treea45a8cb3a6b380e78ba6210957653ed485551669 /block
parentqcow2: do encryption in threads (diff)
downloadqemu-c2da3413c021398152e98022261bb1643276a2fe.tar.gz
qemu-c2da3413c021398152e98022261bb1643276a2fe.tar.xz
qemu-c2da3413c021398152e98022261bb1643276a2fe.zip
block/backup: simplify backup_incremental_init_copy_bitmap
Simplify backup_incremental_init_copy_bitmap using the function bdrv_dirty_bitmap_next_dirty_area. Note: move to job->len instead of bitmap size: it should not matter but less code. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-id: 20190429090842.57910-2-vsementsov@virtuozzo.com Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/backup.c40
1 files changed, 12 insertions, 28 deletions
diff --git a/block/backup.c b/block/backup.c
index 916817d8b1..db83b09a0b 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -394,43 +394,27 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
/* init copy_bitmap from sync_bitmap */
static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
{
- BdrvDirtyBitmapIter *dbi;
- int64_t offset;
- int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap),
- job->cluster_size);
-
- dbi = bdrv_dirty_iter_new(job->sync_bitmap);
- while ((offset = bdrv_dirty_iter_next(dbi)) != -1) {
- int64_t cluster = offset / job->cluster_size;
- int64_t next_cluster;
-
- offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap);
- if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) {
- hbitmap_set(job->copy_bitmap, cluster, end - cluster);
- break;
- }
+ uint64_t offset = 0;
+ uint64_t bytes = job->len;
- offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset,
- UINT64_MAX);
- if (offset == -1) {
- hbitmap_set(job->copy_bitmap, cluster, end - cluster);
- break;
- }
+ while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap,
+ &offset, &bytes))
+ {
+ uint64_t cluster = offset / job->cluster_size;
+ uint64_t end_cluster = DIV_ROUND_UP(offset + bytes, job->cluster_size);
- next_cluster = DIV_ROUND_UP(offset, job->cluster_size);
- hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster);
- if (next_cluster >= end) {
+ hbitmap_set(job->copy_bitmap, cluster, end_cluster - cluster);
+
+ offset = end_cluster * job->cluster_size;
+ if (offset >= job->len) {
break;
}
-
- bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size);
+ bytes = job->len - offset;
}
/* TODO job_progress_set_remaining() would make more sense */
job_progress_update(&job->common.job,
job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size);
-
- bdrv_dirty_iter_free(dbi);
}
static int coroutine_fn backup_run(Job *job, Error **errp)