diff options
Diffstat (limited to 'block-migration.c')
-rw-r--r-- | block-migration.c | 235 |
1 files changed, 167 insertions, 68 deletions
diff --git a/block-migration.c b/block-migration.c index 230d7ed59f..92349a2149 100644 --- a/block-migration.c +++ b/block-migration.c @@ -15,8 +15,10 @@ #include "block_int.h" #include "hw/hw.h" #include "qemu-queue.h" +#include "qemu-timer.h" #include "monitor.h" #include "block-migration.h" +#include "migration.h" #include <assert.h> #define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS) @@ -26,9 +28,6 @@ #define BLK_MIG_FLAG_PROGRESS 0x04 #define MAX_IS_ALLOCATED_SEARCH 65536 -#define MAX_BLOCKS_READ 10000 -#define BLOCKS_READ_CHANGE 100 -#define INITIAL_BLOCKS_READ 100 //#define DEBUG_BLK_MIGRATION @@ -45,6 +44,7 @@ typedef struct BlkMigDevState { int bulk_completed; int shared_base; int64_t cur_sector; + int64_t cur_dirty; int64_t completed_sectors; int64_t total_sectors; int64_t dirty; @@ -59,6 +59,7 @@ typedef struct BlkMigBlock { QEMUIOVector qiov; BlockDriverAIOCB *aiocb; int ret; + int64_t time; QSIMPLEQ_ENTRY(BlkMigBlock) entry; } BlkMigBlock; @@ -72,6 +73,9 @@ typedef struct BlkMigState { int transferred; int64_t total_sector_sum; int prev_progress; + int bulk_completed; + long double total_time; + int reads; } BlkMigState; static BlkMigState block_mig_state; @@ -124,12 +128,28 @@ uint64_t blk_mig_bytes_total(void) return sum << BDRV_SECTOR_BITS; } +static inline void add_avg_read_time(int64_t time) +{ + block_mig_state.reads++; + block_mig_state.total_time += time; +} + +static inline long double compute_read_bwidth(void) +{ + assert(block_mig_state.total_time != 0); + return (block_mig_state.reads * BLOCK_SIZE)/ block_mig_state.total_time; +} + static void blk_mig_read_cb(void *opaque, int ret) { BlkMigBlock *blk = opaque; blk->ret = ret; + blk->time = qemu_get_clock_ns(rt_clock) - blk->time; + + add_avg_read_time(blk->time); + QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry); block_mig_state.submitted--; @@ -138,7 +158,7 @@ static void blk_mig_read_cb(void *opaque, int ret) } static int mig_save_device_bulk(Monitor *mon, QEMUFile *f, - BlkMigDevState *bmds, int is_async) + BlkMigDevState *bmds) { int64_t total_sectors = bmds->total_sectors; int64_t cur_sector = bmds->cur_sector; @@ -175,26 +195,18 @@ static int mig_save_device_bulk(Monitor *mon, QEMUFile *f, blk->bmds = bmds; blk->sector = cur_sector; - if (is_async) { - blk->iov.iov_base = blk->buf; - blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; - qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); + blk->iov.iov_base = blk->buf; + blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; + qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); - blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov, - nr_sectors, blk_mig_read_cb, blk); - if (!blk->aiocb) { - goto error; - } - block_mig_state.submitted++; - } else { - if (bdrv_read(bs, cur_sector, blk->buf, nr_sectors) < 0) { - goto error; - } - blk_send(f, blk); + blk->time = qemu_get_clock_ns(rt_clock); - qemu_free(blk->buf); - qemu_free(blk); + blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov, + nr_sectors, blk_mig_read_cb, blk); + if (!blk->aiocb) { + goto error; } + block_mig_state.submitted++; bdrv_reset_dirty(bs, cur_sector, nr_sectors); bmds->cur_sector = cur_sector + nr_sectors; @@ -229,6 +241,9 @@ static void init_blk_migration(Monitor *mon, QEMUFile *f) block_mig_state.transferred = 0; block_mig_state.total_sector_sum = 0; block_mig_state.prev_progress = -1; + block_mig_state.bulk_completed = 0; + block_mig_state.total_time = 0; + block_mig_state.reads = 0; for (bs = bdrv_first; bs != NULL; bs = bs->next) { if (bs->type == BDRV_TYPE_HD) { @@ -260,7 +275,7 @@ static void init_blk_migration(Monitor *mon, QEMUFile *f) } } -static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f, int is_async) +static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f) { int64_t completed_sector_sum = 0; BlkMigDevState *bmds; @@ -269,7 +284,7 @@ static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f, int is_async) QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { if (bmds->bulk_completed == 0) { - if (mig_save_device_bulk(mon, f, bmds, is_async) == 1) { + if (mig_save_device_bulk(mon, f, bmds) == 1) { /* completed bulk section for this device */ bmds->bulk_completed = 1; } @@ -293,39 +308,90 @@ static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f, int is_async) return ret; } -#define MAX_NUM_BLOCKS 4 - -static void blk_mig_save_dirty_blocks(Monitor *mon, QEMUFile *f) +static void blk_mig_reset_dirty_cursor(void) { BlkMigDevState *bmds; - BlkMigBlock blk; + + QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { + bmds->cur_dirty = 0; + } +} + +static int mig_save_device_dirty(Monitor *mon, QEMUFile *f, + BlkMigDevState *bmds, int is_async) +{ + BlkMigBlock *blk; + int64_t total_sectors = bmds->total_sectors; int64_t sector; + int nr_sectors; - blk.buf = qemu_malloc(BLOCK_SIZE); + for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) { + if (bdrv_get_dirty(bmds->bs, sector)) { - QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { - for (sector = 0; sector < bmds->cur_sector;) { - if (bdrv_get_dirty(bmds->bs, sector)) { - if (bdrv_read(bmds->bs, sector, blk.buf, - BDRV_SECTORS_PER_DIRTY_CHUNK) < 0) { - monitor_printf(mon, "Error reading sector %" PRId64 "\n", - sector); - qemu_file_set_error(f); - qemu_free(blk.buf); - return; + if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) { + nr_sectors = total_sectors - sector; + } else { + nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK; + } + blk = qemu_malloc(sizeof(BlkMigBlock)); + blk->buf = qemu_malloc(BLOCK_SIZE); + blk->bmds = bmds; + blk->sector = sector; + + if (is_async) { + blk->iov.iov_base = blk->buf; + blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE; + qemu_iovec_init_external(&blk->qiov, &blk->iov, 1); + + blk->time = qemu_get_clock_ns(rt_clock); + + blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov, + nr_sectors, blk_mig_read_cb, blk); + if (!blk->aiocb) { + goto error; } - blk.bmds = bmds; - blk.sector = sector; - blk_send(f, &blk); + block_mig_state.submitted++; + } else { + if (bdrv_read(bmds->bs, sector, blk->buf, + nr_sectors) < 0) { + goto error; + } + blk_send(f, blk); - bdrv_reset_dirty(bmds->bs, sector, - BDRV_SECTORS_PER_DIRTY_CHUNK); + qemu_free(blk->buf); + qemu_free(blk); } - sector += BDRV_SECTORS_PER_DIRTY_CHUNK; + + bdrv_reset_dirty(bmds->bs, sector, nr_sectors); + break; + } + sector += BDRV_SECTORS_PER_DIRTY_CHUNK; + bmds->cur_dirty = sector; + } + + return (bmds->cur_dirty >= bmds->total_sectors); + +error: + monitor_printf(mon, "Error reading sector %" PRId64 "\n", sector); + qemu_file_set_error(f); + qemu_free(blk->buf); + qemu_free(blk); + return 0; +} + +static int blk_mig_save_dirty_block(Monitor *mon, QEMUFile *f, int is_async) +{ + BlkMigDevState *bmds; + int ret = 0; + + QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { + if (mig_save_device_dirty(mon, f, bmds, is_async) == 0) { + ret = 1; + break; } } - qemu_free(blk.buf); + return ret; } static void flush_blks(QEMUFile* f) @@ -360,21 +426,42 @@ static void flush_blks(QEMUFile* f) block_mig_state.transferred); } -static int is_stage2_completed(void) +static int64_t get_remaining_dirty(void) { BlkMigDevState *bmds; + int64_t dirty = 0; - if (block_mig_state.submitted > 0) { - return 0; + QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { + dirty += bdrv_get_dirty_count(bmds->bs); } - QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { - if (bmds->bulk_completed == 0) { - return 0; + return dirty * BLOCK_SIZE; +} + +static int is_stage2_completed(void) +{ + int64_t remaining_dirty; + long double bwidth; + + if (block_mig_state.bulk_completed == 1) { + + remaining_dirty = get_remaining_dirty(); + if (remaining_dirty == 0) { + return 1; + } + + bwidth = compute_read_bwidth(); + + if ((remaining_dirty / bwidth) <= + migrate_max_downtime()) { + /* finish stage2 because we think that we can finish remaing work + below max_downtime */ + + return 1; } } - return 1; + return 0; } static void blk_mig_cleanup(Monitor *mon) @@ -428,29 +515,41 @@ static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque) return 0; } - /* control the rate of transfer */ - while ((block_mig_state.submitted + - block_mig_state.read_done) * BLOCK_SIZE < - qemu_file_get_rate_limit(f)) { - if (blk_mig_save_bulked_block(mon, f, 1) == 0) { - /* no more bulk blocks for now */ - break; + blk_mig_reset_dirty_cursor(); + + if (stage == 2) { + /* control the rate of transfer */ + while ((block_mig_state.submitted + + block_mig_state.read_done) * BLOCK_SIZE < + qemu_file_get_rate_limit(f)) { + if (block_mig_state.bulk_completed == 0) { + /* first finish the bulk phase */ + if (blk_mig_save_bulked_block(mon, f) == 0) { + /* finished saving bulk on all devices */ + block_mig_state.bulk_completed = 1; + } + } else { + if (blk_mig_save_dirty_block(mon, f, 1) == 0) { + /* no more dirty blocks */ + break; + } + } } - } - flush_blks(f); + flush_blks(f); - if (qemu_file_has_error(f)) { - blk_mig_cleanup(mon); - return 0; + if (qemu_file_has_error(f)) { + blk_mig_cleanup(mon); + return 0; + } } if (stage == 3) { - while (blk_mig_save_bulked_block(mon, f, 0) != 0) { - /* empty */ - } + /* we know for sure that save bulk is completed and + all async read completed */ + assert(block_mig_state.submitted == 0); - blk_mig_save_dirty_blocks(mon, f); + while (blk_mig_save_dirty_block(mon, f, 0) != 0); blk_mig_cleanup(mon); /* report completion */ |