summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy2021-08-24 10:38:29 +0200
committerHanna Reitz2021-09-01 12:57:31 +0200
commitf8b9504bac3a658af81cb19aec9572aa086799e2 (patch)
treeadcbc19043c224e663698eb416db975f07ec3149 /block
parentblock-copy: move detecting fleecing scheme to block-copy (diff)
downloadqemu-f8b9504bac3a658af81cb19aec9572aa086799e2.tar.gz
qemu-f8b9504bac3a658af81cb19aec9572aa086799e2.tar.xz
qemu-f8b9504bac3a658af81cb19aec9572aa086799e2.zip
block/block-copy: introduce block_copy_set_copy_opts()
We'll need a possibility to set compress and use_copy_range options after initialization of the state. So make corresponding part of block_copy_state_new() separate and public. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210824083856.17408-8-vsementsov@virtuozzo.com> Reviewed-by: Hanna Reitz <hreitz@redhat.com> Signed-off-by: Hanna Reitz <hreitz@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/block-copy.c49
1 files changed, 29 insertions, 20 deletions
diff --git a/block/block-copy.c b/block/block-copy.c
index 58b4345a5a..e83870ff81 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -315,6 +315,33 @@ static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
target->bs->bl.max_transfer));
}
+void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
+ bool compress)
+{
+ /* Keep BDRV_REQ_SERIALISING set (or not set) in block_copy_state_new() */
+ s->write_flags = (s->write_flags & BDRV_REQ_SERIALISING) |
+ (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
+
+ if (s->max_transfer < s->cluster_size) {
+ /*
+ * copy_range does not respect max_transfer. We don't want to bother
+ * with requests smaller than block-copy cluster size, so fallback to
+ * buffered copying (read and write respect max_transfer on their
+ * behalf).
+ */
+ s->method = COPY_READ_WRITE_CLUSTER;
+ } else if (compress) {
+ /* Compression supports only cluster-size writes and no copy-range. */
+ s->method = COPY_READ_WRITE_CLUSTER;
+ } else {
+ /*
+ * If copy range enabled, start with COPY_RANGE_SMALL, until first
+ * successful copy_range (look at block_copy_do_copy).
+ */
+ s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
+ }
+}
+
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
int64_t cluster_size, bool use_copy_range,
bool compress, Error **errp)
@@ -353,32 +380,14 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
.copy_bitmap = copy_bitmap,
.cluster_size = cluster_size,
.len = bdrv_dirty_bitmap_size(copy_bitmap),
- .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0) |
- (compress ? BDRV_REQ_WRITE_COMPRESSED : 0),
+ .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0),
.mem = shres_create(BLOCK_COPY_MAX_MEM),
.max_transfer = QEMU_ALIGN_DOWN(
block_copy_max_transfer(source, target),
cluster_size),
};
- if (s->max_transfer < cluster_size) {
- /*
- * copy_range does not respect max_transfer. We don't want to bother
- * with requests smaller than block-copy cluster size, so fallback to
- * buffered copying (read and write respect max_transfer on their
- * behalf).
- */
- s->method = COPY_READ_WRITE_CLUSTER;
- } else if (compress) {
- /* Compression supports only cluster-size writes and no copy-range. */
- s->method = COPY_READ_WRITE_CLUSTER;
- } else {
- /*
- * If copy range enabled, start with COPY_RANGE_SMALL, until first
- * successful copy_range (look at block_copy_do_copy).
- */
- s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
- }
+ block_copy_set_copy_opts(s, use_copy_range, compress);
ratelimit_init(&s->rate_limit);
qemu_co_mutex_init(&s->lock);