summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--VERSION2
-rw-r--r--block.c127
-rw-r--r--block/block-backend.c114
-rw-r--r--block/bochs.c51
-rw-r--r--block/cloop.c38
-rw-r--r--block/crypto.c16
-rw-r--r--block/curl.c10
-rw-r--r--block/dmg.c40
-rw-r--r--block/io.c514
-rw-r--r--block/iscsi.c19
-rw-r--r--block/linux-aio.c57
-rw-r--r--block/nbd-client.c11
-rw-r--r--block/nbd-client.h2
-rw-r--r--block/nbd.c37
-rw-r--r--block/parallels.c5
-rw-r--r--block/qcow.c8
-rw-r--r--block/qcow2.c76
-rw-r--r--block/qed.c12
-rw-r--r--block/quorum.c94
-rw-r--r--block/raw-aio.h15
-rw-r--r--block/raw-posix.c21
-rw-r--r--block/raw_bsd.c15
-rw-r--r--block/sheepdog.c15
-rw-r--r--block/throttle-groups.c18
-rw-r--r--block/vdi.c131
-rw-r--r--block/vhdx.c5
-rw-r--r--block/vmdk.c367
-rw-r--r--block/vpc.c175
-rw-r--r--block/vvfat.c55
-rw-r--r--blockdev.c57
-rw-r--r--cpu-exec-common.c2
-rw-r--r--cpu-exec.c519
-rw-r--r--cputlb.c13
-rw-r--r--default-configs/arm-softmmu.mak1
-rw-r--r--dma-helpers.c14
-rw-r--r--docs/qapi-code-gen.txt44
-rw-r--r--exec.c2
-rw-r--r--hw/acpi/aml-build.c11
-rw-r--r--hw/arm/Makefile.objs1
-rw-r--r--hw/arm/armv7m.c11
-rw-r--r--hw/arm/boot.c43
-rw-r--r--hw/arm/fsl-imx6.c449
-rw-r--r--hw/arm/highbank.c12
-rw-r--r--hw/arm/integratorcp.c32
-rw-r--r--hw/arm/nseries.c3
-rw-r--r--hw/arm/pxa2xx.c26
-rw-r--r--hw/arm/pxa2xx_pic.c7
-rw-r--r--hw/arm/sabrelite.c121
-rw-r--r--hw/arm/spitz.c23
-rw-r--r--hw/arm/stellaris.c48
-rw-r--r--hw/arm/strongarm.c66
-rw-r--r--hw/arm/versatilepb.c13
-rw-r--r--hw/arm/virt-acpi-build.c52
-rw-r--r--hw/arm/virt.c8
-rw-r--r--hw/block/fdc.c25
-rw-r--r--hw/block/hd-geometry.c2
-rw-r--r--hw/block/m25p80.c23
-rw-r--r--hw/block/nand.c36
-rw-r--r--hw/block/onenand.c41
-rw-r--r--hw/block/pflash_cfi01.c12
-rw-r--r--hw/block/pflash_cfi02.c12
-rw-r--r--hw/block/virtio-blk.c18
-rw-r--r--hw/block/xen_disk.c10
-rw-r--r--hw/display/blizzard.c120
-rw-r--r--hw/display/blizzard_template.h146
-rw-r--r--hw/display/exynos4210_fimd.c19
-rw-r--r--hw/display/jazz_led.c18
-rw-r--r--hw/display/omap_lcd_template.h10
-rw-r--r--hw/display/omap_lcdc.c48
-rw-r--r--hw/i386/acpi-build.c41
-rw-r--r--hw/i386/kvmvapic.c3
-rw-r--r--hw/ide/atapi.c19
-rw-r--r--hw/ide/core.c10
-rw-r--r--hw/ide/internal.h2
-rw-r--r--hw/ide/macio.c13
-rw-r--r--hw/intc/etraxfs_pic.c13
-rw-r--r--hw/intc/exynos4210_combiner.c14
-rw-r--r--hw/intc/exynos4210_gic.c39
-rw-r--r--hw/intc/grlib_irqmp.c27
-rw-r--r--hw/intc/imx_avic.c15
-rw-r--r--hw/intc/omap_intc.c64
-rw-r--r--hw/intc/pl190.c13
-rw-r--r--hw/intc/slavio_intctl.c14
-rw-r--r--hw/misc/Makefile.objs1
-rw-r--r--hw/misc/bcm2835_property.c33
-rw-r--r--hw/misc/imx6_src.c264
-rw-r--r--hw/nvram/spapr_nvram.c4
-rw-r--r--hw/ppc/spapr_drc.c11
-rw-r--r--hw/scsi/scsi-disk.c45
-rw-r--r--hw/sd/sd.c51
-rw-r--r--hw/ssi/Makefile.objs1
-rw-r--r--hw/ssi/imx_spi.c454
-rw-r--r--hw/usb/dev-mtp.c4
-rw-r--r--hw/usb/hcd-xhci.c5
-rw-r--r--hw/usb/host-libusb.c13
-rw-r--r--hw/virtio/virtio-balloon.c15
-rw-r--r--include/block/block.h11
-rw-r--r--include/block/block_int.h44
-rw-r--r--include/block/throttle-groups.h1
-rw-r--r--include/exec/exec-all.h108
-rw-r--r--include/exec/gen-icount.h16
-rw-r--r--include/hw/acpi/acpi-defs.h17
-rw-r--r--include/hw/acpi/aml-build.h10
-rw-r--r--include/hw/arm/fsl-imx6.h450
-rw-r--r--include/hw/misc/imx6_src.h73
-rw-r--r--include/hw/ssi/imx_spi.h103
-rw-r--r--include/qapi/dealloc-visitor.h5
-rw-r--r--include/qapi/opts-visitor.h5
-rw-r--r--include/qapi/qmp-input-visitor.h9
-rw-r--r--include/qapi/qmp/dispatch.h6
-rw-r--r--include/qapi/string-input-visitor.h5
-rw-r--r--include/qapi/string-output-visitor.h5
-rw-r--r--include/qapi/visitor-impl.h81
-rw-r--r--include/qapi/visitor.h493
-rw-r--r--include/qemu/fifo32.h191
-rw-r--r--include/qemu/osdep.h14
-rw-r--r--include/qom/cpu.h4
-rw-r--r--include/sysemu/block-backend.h35
-rw-r--r--include/sysemu/dma.h4
-rw-r--r--nbd/server.c2
-rw-r--r--qapi/block-core.json32
-rw-r--r--qapi/opts-visitor.c70
-rw-r--r--qapi/qapi-dealloc-visitor.c43
-rw-r--r--qapi/qapi-visit-core.c111
-rw-r--r--qapi/qmp-dispatch.c18
-rw-r--r--qapi/qmp-input-visitor.c187
-rw-r--r--qapi/qmp-output-visitor.c63
-rw-r--r--qapi/qmp-registry.c1
-rw-r--r--qapi/string-input-visitor.c49
-rw-r--r--qapi/string-output-visitor.c43
-rw-r--r--qemu-doc.texi3
-rw-r--r--qemu-img.c46
-rw-r--r--qemu-io-cmds.c283
-rw-r--r--qemu-io.c40
-rw-r--r--qemu-nbd.c13
-rw-r--r--qmp-commands.hx53
-rw-r--r--qmp.c2
-rw-r--r--qom/cpu.c1
-rw-r--r--qom/object.c5
-rw-r--r--qom/object_interfaces.c40
-rw-r--r--qom/qom-qobject.c3
-rw-r--r--replay/replay-input.c2
-rw-r--r--scripts/qapi-commands.py12
-rw-r--r--scripts/qapi-event.py5
-rw-r--r--scripts/qapi-visit.py53
-rw-r--r--target-alpha/cpu.h2
-rw-r--r--target-alpha/translate.c4
-rw-r--r--target-arm/Makefile.objs1
-rw-r--r--target-arm/arm-powerctl.c224
-rw-r--r--target-arm/arm-powerctl.h75
-rw-r--r--target-arm/cpu.h2
-rw-r--r--target-arm/helper.c45
-rw-r--r--target-arm/internals.h24
-rw-r--r--target-arm/op_helper.c6
-rw-r--r--target-arm/psci.c70
-rw-r--r--target-arm/translate-a64.c47
-rw-r--r--target-arm/translate.c17
-rw-r--r--target-cris/cpu.h2
-rw-r--r--target-cris/translate.c16
-rw-r--r--target-i386/cpu.h2
-rw-r--r--target-i386/translate.c25
-rw-r--r--target-lm32/cpu.h2
-rw-r--r--target-lm32/translate.c21
-rw-r--r--target-m68k/cpu.h2
-rw-r--r--target-m68k/translate.c18
-rw-r--r--target-microblaze/cpu.h2
-rw-r--r--target-microblaze/translate.c15
-rw-r--r--target-mips/cpu.h2
-rw-r--r--target-mips/helper.c2
-rw-r--r--target-mips/translate.c20
-rw-r--r--target-moxie/cpu.h2
-rw-r--r--target-moxie/translate.c21
-rw-r--r--target-openrisc/cpu.h2
-rw-r--r--target-openrisc/translate.c20
-rw-r--r--target-ppc/cpu.h2
-rw-r--r--target-ppc/translate.c20
-rw-r--r--target-s390x/cpu.h2
-rw-r--r--target-s390x/translate.c17
-rw-r--r--target-sh4/cpu.h2
-rw-r--r--target-sh4/translate.c21
-rw-r--r--target-sparc/cpu.h2
-rw-r--r--target-sparc/translate.c24
-rw-r--r--target-tilegx/cpu.h2
-rw-r--r--target-tricore/cpu.h2
-rw-r--r--target-tricore/translate.c20
-rw-r--r--target-unicore32/cpu.h2
-rw-r--r--target-unicore32/translate.c16
-rw-r--r--target-xtensa/cpu.h2
-rw-r--r--target-xtensa/translate.c4
-rw-r--r--tcg/aarch64/tcg-target.inc.c21
-rw-r--r--tcg/arm/tcg-target.inc.c26
-rw-r--r--tcg/i386/tcg-target.inc.c31
-rw-r--r--tcg/ia64/tcg-target.inc.c6
-rw-r--r--tcg/mips/tcg-target.inc.c11
-rw-r--r--tcg/ppc/tcg-target.inc.c28
-rw-r--r--tcg/s390/tcg-target.inc.c19
-rw-r--r--tcg/sparc/tcg-target.inc.c11
-rw-r--r--tcg/tcg-op.h13
-rw-r--r--tcg/tcg.h31
-rw-r--r--tcg/tci/tcg-target.inc.c12
-rw-r--r--tci.c11
-rw-r--r--tests/.gitignore1
-rw-r--r--tests/Makefile6
-rw-r--r--tests/check-qnull.c75
-rwxr-xr-xtests/qemu-iotests/0042
-rwxr-xr-xtests/qemu-iotests/0125
-rw-r--r--tests/qemu-iotests/023.out2160
-rw-r--r--tests/qemu-iotests/039.out20
-rwxr-xr-xtests/qemu-iotests/04826
-rw-r--r--tests/qemu-iotests/048.out6
-rwxr-xr-xtests/qemu-iotests/0524
-rw-r--r--tests/qemu-iotests/052.out4
-rw-r--r--tests/qemu-iotests/061.out8
-rwxr-xr-xtests/qemu-iotests/0834
-rwxr-xr-xtests/qemu-iotests/1007
-rw-r--r--tests/qemu-iotests/100.out14
-rw-r--r--tests/qemu-iotests/137.out4
-rw-r--r--tests/qemu-iotests/common15
-rw-r--r--tests/qemu-iotests/common.config21
-rw-r--r--tests/qemu-iotests/common.filter5
-rw-r--r--tests/qemu-iotests/common.rc69
-rw-r--r--tests/qemu-iotests/iotests.py10
-rw-r--r--tests/test-qmp-commands.c15
-rw-r--r--tests/test-qmp-input-strict.c21
-rw-r--r--tests/test-qmp-input-visitor.c42
-rw-r--r--tests/test-qmp-output-visitor.c35
-rw-r--r--tests/test-string-input-visitor.c23
-rw-r--r--tests/test-visitor-serialization.c2
-rw-r--r--trace-events5
-rw-r--r--translate-all.c354
-rw-r--r--util/qemu-sockets.c2
231 files changed, 8441 insertions, 3799 deletions
diff --git a/VERSION b/VERSION
index f774a71ac5..373c4c9520 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.5.95
+2.6.50
diff --git a/block.c b/block.c
index d4939b49bf..18a497f69d 100644
--- a/block.c
+++ b/block.c
@@ -218,8 +218,6 @@ void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz,
void bdrv_register(BlockDriver *bdrv)
{
- bdrv_setup_io_funcs(bdrv);
-
QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
}
@@ -1176,10 +1174,10 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
return child;
}
-static BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
- BlockDriverState *child_bs,
- const char *child_name,
- const BdrvChildRole *child_role)
+BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
+ BlockDriverState *child_bs,
+ const char *child_name,
+ const BdrvChildRole *child_role)
{
BdrvChild *child = bdrv_root_attach_child(child_bs, child_name, child_role);
QLIST_INSERT_HEAD(&parent_bs->children, child, next);
@@ -2261,7 +2259,6 @@ static void swap_feature_fields(BlockDriverState *bs_top,
assert(!bs_new->throttle_state);
if (bs_top->throttle_state) {
- assert(bs_top->io_limits_enabled);
bdrv_io_limits_enable(bs_new, throttle_group_get_name(bs_top));
bdrv_io_limits_disable(bs_top);
}
@@ -3201,6 +3198,7 @@ void bdrv_init_with_whitelist(void)
void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
{
+ BdrvChild *child;
Error *local_err = NULL;
int ret;
@@ -3215,13 +3213,20 @@ void bdrv_invalidate_cache(BlockDriverState *bs, Error **errp)
if (bs->drv->bdrv_invalidate_cache) {
bs->drv->bdrv_invalidate_cache(bs, &local_err);
- } else if (bs->file) {
- bdrv_invalidate_cache(bs->file->bs, &local_err);
+ if (local_err) {
+ bs->open_flags |= BDRV_O_INACTIVE;
+ error_propagate(errp, local_err);
+ return;
+ }
}
- if (local_err) {
- bs->open_flags |= BDRV_O_INACTIVE;
- error_propagate(errp, local_err);
- return;
+
+ QLIST_FOREACH(child, &bs->children, next) {
+ bdrv_invalidate_cache(child->bs, &local_err);
+ if (local_err) {
+ bs->open_flags |= BDRV_O_INACTIVE;
+ error_propagate(errp, local_err);
+ return;
+ }
}
ret = refresh_total_sectors(bs, bs->total_sectors);
@@ -3250,38 +3255,63 @@ void bdrv_invalidate_cache_all(Error **errp)
}
}
-static int bdrv_inactivate(BlockDriverState *bs)
+static int bdrv_inactivate_recurse(BlockDriverState *bs,
+ bool setting_flag)
{
+ BdrvChild *child;
int ret;
- if (bs->drv->bdrv_inactivate) {
+ if (!setting_flag && bs->drv->bdrv_inactivate) {
ret = bs->drv->bdrv_inactivate(bs);
if (ret < 0) {
return ret;
}
}
- bs->open_flags |= BDRV_O_INACTIVE;
+ QLIST_FOREACH(child, &bs->children, next) {
+ ret = bdrv_inactivate_recurse(child->bs, setting_flag);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (setting_flag) {
+ bs->open_flags |= BDRV_O_INACTIVE;
+ }
return 0;
}
int bdrv_inactivate_all(void)
{
BlockDriverState *bs = NULL;
- int ret;
+ int ret = 0;
+ int pass;
while ((bs = bdrv_next(bs)) != NULL) {
- AioContext *aio_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(bdrv_get_aio_context(bs));
+ }
- aio_context_acquire(aio_context);
- ret = bdrv_inactivate(bs);
- aio_context_release(aio_context);
- if (ret < 0) {
- return ret;
+ /* We do two passes of inactivation. The first pass calls to drivers'
+ * .bdrv_inactivate callbacks recursively so all cache is flushed to disk;
+ * the second pass sets the BDRV_O_INACTIVE flag so that no further write
+ * is allowed. */
+ for (pass = 0; pass < 2; pass++) {
+ bs = NULL;
+ while ((bs = bdrv_next(bs)) != NULL) {
+ ret = bdrv_inactivate_recurse(bs, pass);
+ if (ret < 0) {
+ goto out;
+ }
}
}
- return 0;
+out:
+ bs = NULL;
+ while ((bs = bdrv_next(bs)) != NULL) {
+ aio_context_release(bdrv_get_aio_context(bs));
+ }
+
+ return ret;
}
/**************************************************************/
@@ -3981,3 +4011,52 @@ void bdrv_refresh_filename(BlockDriverState *bs)
QDECREF(json);
}
}
+
+/*
+ * Hot add/remove a BDS's child. So the user can take a child offline when
+ * it is broken and take a new child online
+ */
+void bdrv_add_child(BlockDriverState *parent_bs, BlockDriverState *child_bs,
+ Error **errp)
+{
+
+ if (!parent_bs->drv || !parent_bs->drv->bdrv_add_child) {
+ error_setg(errp, "The node %s does not support adding a child",
+ bdrv_get_device_or_node_name(parent_bs));
+ return;
+ }
+
+ if (!QLIST_EMPTY(&child_bs->parents)) {
+ error_setg(errp, "The node %s already has a parent",
+ child_bs->node_name);
+ return;
+ }
+
+ parent_bs->drv->bdrv_add_child(parent_bs, child_bs, errp);
+}
+
+void bdrv_del_child(BlockDriverState *parent_bs, BdrvChild *child, Error **errp)
+{
+ BdrvChild *tmp;
+
+ if (!parent_bs->drv || !parent_bs->drv->bdrv_del_child) {
+ error_setg(errp, "The node %s does not support removing a child",
+ bdrv_get_device_or_node_name(parent_bs));
+ return;
+ }
+
+ QLIST_FOREACH(tmp, &parent_bs->children, next) {
+ if (tmp == child) {
+ break;
+ }
+ }
+
+ if (!tmp) {
+ error_setg(errp, "The node %s does not have a child named %s",
+ bdrv_get_device_or_node_name(parent_bs),
+ bdrv_get_device_or_node_name(child->bs));
+ return;
+ }
+
+ parent_bs->drv->bdrv_del_child(parent_bs, child, errp);
+}
diff --git a/block/block-backend.c b/block/block-backend.c
index 16c9d5e0f2..a1e2c7fa20 100644
--- a/block/block-backend.c
+++ b/block/block-backend.c
@@ -1,7 +1,7 @@
/*
* QEMU Block backends
*
- * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2014-2016 Red Hat, Inc.
*
* Authors:
* Markus Armbruster <armbru@redhat.com>,
@@ -692,7 +692,7 @@ static int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
return ret;
}
- return bdrv_co_do_preadv(blk_bs(blk), offset, bytes, qiov, flags);
+ return bdrv_co_preadv(blk_bs(blk), offset, bytes, qiov, flags);
}
static int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
@@ -710,7 +710,7 @@ static int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
flags |= BDRV_REQ_FUA;
}
- return bdrv_co_do_pwritev(blk_bs(blk), offset, bytes, qiov, flags);
+ return bdrv_co_pwritev(blk_bs(blk), offset, bytes, qiov, flags);
}
typedef struct BlkRwCo {
@@ -772,55 +772,28 @@ static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
return rwco.ret;
}
-static int blk_rw(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
- int nb_sectors, CoroutineEntry co_entry,
- BdrvRequestFlags flags)
-{
- if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
- return -EINVAL;
- }
-
- return blk_prw(blk, sector_num << BDRV_SECTOR_BITS, buf,
- nb_sectors << BDRV_SECTOR_BITS, co_entry, flags);
-}
-
-int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
- int nb_sectors)
-{
- return blk_rw(blk, sector_num, buf, nb_sectors, blk_read_entry, 0);
-}
-
-int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
- int nb_sectors)
+int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
+ int count)
{
BlockDriverState *bs = blk_bs(blk);
- bool enabled;
int ret;
- ret = blk_check_request(blk, sector_num, nb_sectors);
+ ret = blk_check_byte_request(blk, offset, count);
if (ret < 0) {
return ret;
}
- enabled = bs->io_limits_enabled;
- bs->io_limits_enabled = false;
- ret = blk_read(blk, sector_num, buf, nb_sectors);
- bs->io_limits_enabled = enabled;
+ bdrv_no_throttling_begin(bs);
+ ret = blk_pread(blk, offset, buf, count);
+ bdrv_no_throttling_end(bs);
return ret;
}
-int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
- int nb_sectors)
-{
- return blk_rw(blk, sector_num, (uint8_t*) buf, nb_sectors,
- blk_write_entry, 0);
-}
-
-int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
- int nb_sectors, BdrvRequestFlags flags)
+int blk_write_zeroes(BlockBackend *blk, int64_t offset,
+ int count, BdrvRequestFlags flags)
{
- return blk_rw(blk, sector_num, NULL, nb_sectors, blk_write_entry,
- flags | BDRV_REQ_ZERO_WRITE);
+ return blk_prw(blk, offset, NULL, count, blk_write_entry,
+ flags | BDRV_REQ_ZERO_WRITE);
}
static void error_callback_bh(void *opaque)
@@ -932,18 +905,12 @@ static void blk_aio_write_entry(void *opaque)
blk_aio_complete(acb);
}
-BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
- int nb_sectors, BdrvRequestFlags flags,
+BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t offset,
+ int count, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
- if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
- return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
- }
-
- return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS,
- nb_sectors << BDRV_SECTOR_BITS, NULL,
- blk_aio_write_entry, flags | BDRV_REQ_ZERO_WRITE,
- cb, opaque);
+ return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry,
+ flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
}
int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
@@ -955,9 +922,11 @@ int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
return count;
}
-int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
+int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
+ BdrvRequestFlags flags)
{
- int ret = blk_prw(blk, offset, (void*) buf, count, blk_write_entry, 0);
+ int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
+ flags);
if (ret < 0) {
return ret;
}
@@ -991,30 +960,20 @@ int64_t blk_nb_sectors(BlockBackend *blk)
return bdrv_nb_sectors(blk_bs(blk));
}
-BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
- QEMUIOVector *iov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque)
+BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque)
{
- if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
- return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
- }
-
- assert(nb_sectors << BDRV_SECTOR_BITS == iov->size);
- return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS, iov->size, iov,
- blk_aio_read_entry, 0, cb, opaque);
+ return blk_aio_prwv(blk, offset, qiov->size, qiov,
+ blk_aio_read_entry, flags, cb, opaque);
}
-BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
- QEMUIOVector *iov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque)
+BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque)
{
- if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
- return blk_abort_aio_request(blk, cb, opaque, -EINVAL);
- }
-
- assert(nb_sectors << BDRV_SECTOR_BITS == iov->size);
- return blk_aio_prwv(blk, sector_num << BDRV_SECTOR_BITS, iov->size, iov,
- blk_aio_write_entry, 0, cb, opaque);
+ return blk_aio_prwv(blk, offset, qiov->size, qiov,
+ blk_aio_write_entry, flags, cb, opaque);
}
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
@@ -1444,15 +1403,10 @@ void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
}
-int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
- int nb_sectors, BdrvRequestFlags flags)
+int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t offset,
+ int count, BdrvRequestFlags flags)
{
- if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
- return -EINVAL;
- }
-
- return blk_co_pwritev(blk, sector_num << BDRV_SECTOR_BITS,
- nb_sectors << BDRV_SECTOR_BITS, NULL,
+ return blk_co_pwritev(blk, offset, count, NULL,
flags | BDRV_REQ_ZERO_WRITE);
}
diff --git a/block/bochs.c b/block/bochs.c
index af8b7abdfd..f0e18c0b84 100644
--- a/block/bochs.c
+++ b/block/bochs.c
@@ -104,6 +104,7 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
int ret;
bs->read_only = 1; // no write support yet
+ bs->request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O supported */
ret = bdrv_pread(bs->file->bs, 0, &bochs, sizeof(bochs));
if (ret < 0) {
@@ -221,38 +222,52 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
return bitmap_offset + (512 * (s->bitmap_blocks + extent_offset));
}
-static int bochs_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+bochs_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
+ BDRVBochsState *s = bs->opaque;
+ uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
+ int nb_sectors = bytes >> BDRV_SECTOR_BITS;
+ uint64_t bytes_done = 0;
+ QEMUIOVector local_qiov;
int ret;
+ assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
+
+ qemu_iovec_init(&local_qiov, qiov->niov);
+ qemu_co_mutex_lock(&s->lock);
+
while (nb_sectors > 0) {
int64_t block_offset = seek_to_sector(bs, sector_num);
if (block_offset < 0) {
- return block_offset;
- } else if (block_offset > 0) {
- ret = bdrv_pread(bs->file->bs, block_offset, buf, 512);
+ ret = block_offset;
+ goto fail;
+ }
+
+ qemu_iovec_reset(&local_qiov);
+ qemu_iovec_concat(&local_qiov, qiov, bytes_done, 512);
+
+ if (block_offset > 0) {
+ ret = bdrv_co_preadv(bs->file->bs, block_offset, 512,
+ &local_qiov, 0);
if (ret < 0) {
- return ret;
+ goto fail;
}
} else {
- memset(buf, 0, 512);
+ qemu_iovec_memset(&local_qiov, 0, 0, 512);
}
nb_sectors--;
sector_num++;
- buf += 512;
+ bytes_done += 512;
}
- return 0;
-}
-static coroutine_fn int bochs_co_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
-{
- int ret;
- BDRVBochsState *s = bs->opaque;
- qemu_co_mutex_lock(&s->lock);
- ret = bochs_read(bs, sector_num, buf, nb_sectors);
+ ret = 0;
+fail:
qemu_co_mutex_unlock(&s->lock);
+ qemu_iovec_destroy(&local_qiov);
+
return ret;
}
@@ -267,7 +282,7 @@ static BlockDriver bdrv_bochs = {
.instance_size = sizeof(BDRVBochsState),
.bdrv_probe = bochs_probe,
.bdrv_open = bochs_open,
- .bdrv_read = bochs_co_read,
+ .bdrv_co_preadv = bochs_co_preadv,
.bdrv_close = bochs_close,
};
diff --git a/block/cloop.c b/block/cloop.c
index a84f14019c..fc1ca3a05a 100644
--- a/block/cloop.c
+++ b/block/cloop.c
@@ -66,6 +66,7 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
int ret;
bs->read_only = 1;
+ bs->request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O supported */
/* read header */
ret = bdrv_pread(bs->file->bs, 128, &s->block_size, 4);
@@ -229,33 +230,38 @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num)
return 0;
}
-static int cloop_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+cloop_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVCloopState *s = bs->opaque;
- int i;
+ uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
+ int nb_sectors = bytes >> BDRV_SECTOR_BITS;
+ int ret, i;
+
+ assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
+
+ qemu_co_mutex_lock(&s->lock);
for (i = 0; i < nb_sectors; i++) {
+ void *data;
uint32_t sector_offset_in_block =
((sector_num + i) % s->sectors_per_block),
block_num = (sector_num + i) / s->sectors_per_block;
if (cloop_read_block(bs, block_num) != 0) {
- return -1;
+ ret = -EIO;
+ goto fail;
}
- memcpy(buf + i * 512,
- s->uncompressed_block + sector_offset_in_block * 512, 512);
+
+ data = s->uncompressed_block + sector_offset_in_block * 512;
+ qemu_iovec_from_buf(qiov, i * 512, data, 512);
}
- return 0;
-}
-static coroutine_fn int cloop_co_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
-{
- int ret;
- BDRVCloopState *s = bs->opaque;
- qemu_co_mutex_lock(&s->lock);
- ret = cloop_read(bs, sector_num, buf, nb_sectors);
+ ret = 0;
+fail:
qemu_co_mutex_unlock(&s->lock);
+
return ret;
}
@@ -273,7 +279,7 @@ static BlockDriver bdrv_cloop = {
.instance_size = sizeof(BDRVCloopState),
.bdrv_probe = cloop_probe,
.bdrv_open = cloop_open,
- .bdrv_read = cloop_co_read,
+ .bdrv_co_preadv = cloop_co_preadv,
.bdrv_close = cloop_close,
};
diff --git a/block/crypto.c b/block/crypto.c
index 1903e84fbd..758e14e032 100644
--- a/block/crypto.c
+++ b/block/crypto.c
@@ -91,7 +91,7 @@ static ssize_t block_crypto_write_func(QCryptoBlock *block,
struct BlockCryptoCreateData *data = opaque;
ssize_t ret;
- ret = blk_pwrite(data->blk, offset, buf, buflen);
+ ret = blk_pwrite(data->blk, offset, buf, buflen, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not write encryption header");
return ret;
@@ -196,7 +196,6 @@ block_crypto_open_opts_init(QCryptoBlockFormat format,
OptsVisitor *ov;
QCryptoBlockOpenOptions *ret = NULL;
Error *local_err = NULL;
- Error *end_err = NULL;
ret = g_new0(QCryptoBlockOpenOptions, 1);
ret->format = format;
@@ -219,9 +218,11 @@ block_crypto_open_opts_init(QCryptoBlockFormat format,
error_setg(&local_err, "Unsupported block format %d", format);
break;
}
+ if (!local_err) {
+ visit_check_struct(opts_get_visitor(ov), &local_err);
+ }
- visit_end_struct(opts_get_visitor(ov), &end_err);
- error_propagate(&local_err, end_err);
+ visit_end_struct(opts_get_visitor(ov));
out:
if (local_err) {
@@ -242,7 +243,6 @@ block_crypto_create_opts_init(QCryptoBlockFormat format,
OptsVisitor *ov;
QCryptoBlockCreateOptions *ret = NULL;
Error *local_err = NULL;
- Error *end_err = NULL;
ret = g_new0(QCryptoBlockCreateOptions, 1);
ret->format = format;
@@ -265,9 +265,11 @@ block_crypto_create_opts_init(QCryptoBlockFormat format,
error_setg(&local_err, "Unsupported block format %d", format);
break;
}
+ if (!local_err) {
+ visit_check_struct(opts_get_visitor(ov), &local_err);
+ }
- visit_end_struct(opts_get_visitor(ov), &end_err);
- error_propagate(&local_err, end_err);
+ visit_end_struct(opts_get_visitor(ov));
out:
if (local_err) {
diff --git a/block/curl.c b/block/curl.c
index 5a8f8b6239..da9f5e85de 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -36,10 +36,16 @@
// #define DEBUG_VERBOSE
#ifdef DEBUG_CURL
-#define DPRINTF(fmt, ...) do { printf(fmt, ## __VA_ARGS__); } while (0)
+#define DEBUG_CURL_PRINT 1
#else
-#define DPRINTF(fmt, ...) do { } while (0)
+#define DEBUG_CURL_PRINT 0
#endif
+#define DPRINTF(fmt, ...) \
+ do { \
+ if (DEBUG_CURL_PRINT) { \
+ fprintf(stderr, fmt, ## __VA_ARGS__); \
+ } \
+ } while (0)
#if LIBCURL_VERSION_NUM >= 0x071000
/* The multi interface timer callback was introduced in 7.16.0 */
diff --git a/block/dmg.c b/block/dmg.c
index a496eb7c9b..1ea5f22d82 100644
--- a/block/dmg.c
+++ b/block/dmg.c
@@ -440,6 +440,8 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
int ret;
bs->read_only = 1;
+ bs->request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O supported */
+
s->n_chunks = 0;
s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
/* used by dmg_read_mish_block to keep track of the current I/O position */
@@ -659,38 +661,42 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
return 0;
}
-static int dmg_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVDMGState *s = bs->opaque;
- int i;
+ uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
+ int nb_sectors = bytes >> BDRV_SECTOR_BITS;
+ int ret, i;
+
+ assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
+
+ qemu_co_mutex_lock(&s->lock);
for (i = 0; i < nb_sectors; i++) {
uint32_t sector_offset_in_chunk;
+ void *data;
+
if (dmg_read_chunk(bs, sector_num + i) != 0) {
- return -1;
+ ret = -EIO;
+ goto fail;
}
/* Special case: current chunk is all zeroes. Do not perform a memcpy as
* s->uncompressed_chunk may be too small to cover the large all-zeroes
* section. dmg_read_chunk is called to find s->current_chunk */
if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */
- memset(buf + i * 512, 0, 512);
+ qemu_iovec_memset(qiov, i * 512, 0, 512);
continue;
}
sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
- memcpy(buf + i * 512,
- s->uncompressed_chunk + sector_offset_in_chunk * 512, 512);
+ data = s->uncompressed_chunk + sector_offset_in_chunk * 512;
+ qemu_iovec_from_buf(qiov, i * 512, data, 512);
}
- return 0;
-}
-static coroutine_fn int dmg_co_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
-{
- int ret;
- BDRVDMGState *s = bs->opaque;
- qemu_co_mutex_lock(&s->lock);
- ret = dmg_read(bs, sector_num, buf, nb_sectors);
+ ret = 0;
+fail:
qemu_co_mutex_unlock(&s->lock);
return ret;
}
@@ -715,7 +721,7 @@ static BlockDriver bdrv_dmg = {
.instance_size = sizeof(BDRVDMGState),
.bdrv_probe = dmg_probe,
.bdrv_open = dmg_open,
- .bdrv_read = dmg_co_read,
+ .bdrv_co_preadv = dmg_co_preadv,
.bdrv_close = dmg_close,
};
diff --git a/block/io.c b/block/io.c
index a7dbf85b19..cd6d71a503 100644
--- a/block/io.c
+++ b/block/io.c
@@ -34,18 +34,6 @@
#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
-static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque);
-static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque);
-static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors,
- QEMUIOVector *iov);
-static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors,
- QEMUIOVector *iov);
static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
int64_t sector_num,
QEMUIOVector *qiov,
@@ -62,48 +50,35 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
void bdrv_set_io_limits(BlockDriverState *bs,
ThrottleConfig *cfg)
{
- int i;
-
throttle_group_config(bs, cfg);
-
- for (i = 0; i < 2; i++) {
- qemu_co_enter_next(&bs->throttled_reqs[i]);
- }
}
-/* this function drain all the throttled IOs */
-static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
+void bdrv_no_throttling_begin(BlockDriverState *bs)
{
- bool drained = false;
- bool enabled = bs->io_limits_enabled;
- int i;
-
- bs->io_limits_enabled = false;
-
- for (i = 0; i < 2; i++) {
- while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
- drained = true;
- }
+ if (bs->io_limits_disabled++ == 0) {
+ throttle_group_restart_bs(bs);
}
+}
- bs->io_limits_enabled = enabled;
-
- return drained;
+void bdrv_no_throttling_end(BlockDriverState *bs)
+{
+ assert(bs->io_limits_disabled);
+ --bs->io_limits_disabled;
}
void bdrv_io_limits_disable(BlockDriverState *bs)
{
- bs->io_limits_enabled = false;
- bdrv_start_throttled_reqs(bs);
+ assert(bs->throttle_state);
+ bdrv_no_throttling_begin(bs);
throttle_group_unregister_bs(bs);
+ bdrv_no_throttling_end(bs);
}
/* should be called before bdrv_set_io_limits if a limit is set */
void bdrv_io_limits_enable(BlockDriverState *bs, const char *group)
{
- assert(!bs->io_limits_enabled);
+ assert(!bs->throttle_state);
throttle_group_register_bs(bs, group);
- bs->io_limits_enabled = true;
}
void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group)
@@ -123,24 +98,6 @@ void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group)
bdrv_io_limits_enable(bs, group);
}
-void bdrv_setup_io_funcs(BlockDriver *bdrv)
-{
- /* Block drivers without coroutine functions need emulation */
- if (!bdrv->bdrv_co_readv) {
- bdrv->bdrv_co_readv = bdrv_co_readv_em;
- bdrv->bdrv_co_writev = bdrv_co_writev_em;
-
- /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
- * the block driver lacks aio we need to emulate that too.
- */
- if (!bdrv->bdrv_aio_readv) {
- /* add AIO emulation layer */
- bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
- bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
- }
- }
-}
-
void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
{
BlockDriver *drv = bs->drv;
@@ -260,18 +217,29 @@ typedef struct {
bool done;
} BdrvCoDrainData;
+static void bdrv_drain_poll(BlockDriverState *bs)
+{
+ bool busy = true;
+
+ while (busy) {
+ /* Keep iterating */
+ busy = bdrv_requests_pending(bs);
+ busy |= aio_poll(bdrv_get_aio_context(bs), busy);
+ }
+}
+
static void bdrv_co_drain_bh_cb(void *opaque)
{
BdrvCoDrainData *data = opaque;
Coroutine *co = data->co;
qemu_bh_delete(data->bh);
- bdrv_drain(data->bs);
+ bdrv_drain_poll(data->bs);
data->done = true;
qemu_coroutine_enter(co, NULL);
}
-void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
+static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
{
BdrvCoDrainData data;
@@ -305,21 +273,28 @@ void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
* not depend on events in other AioContexts. In that case, use
* bdrv_drain_all() instead.
*/
-void bdrv_drain(BlockDriverState *bs)
+void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
{
- bool busy = true;
+ bdrv_no_throttling_begin(bs);
+ bdrv_io_unplugged_begin(bs);
+ bdrv_drain_recurse(bs);
+ bdrv_co_yield_to_drain(bs);
+ bdrv_io_unplugged_end(bs);
+ bdrv_no_throttling_end(bs);
+}
+void bdrv_drain(BlockDriverState *bs)
+{
+ bdrv_no_throttling_begin(bs);
+ bdrv_io_unplugged_begin(bs);
bdrv_drain_recurse(bs);
if (qemu_in_coroutine()) {
- bdrv_co_drain(bs);
- return;
- }
- while (busy) {
- /* Keep iterating */
- bdrv_flush_io_queue(bs);
- busy = bdrv_requests_pending(bs);
- busy |= aio_poll(bdrv_get_aio_context(bs), busy);
+ bdrv_co_yield_to_drain(bs);
+ } else {
+ bdrv_drain_poll(bs);
}
+ bdrv_io_unplugged_end(bs);
+ bdrv_no_throttling_end(bs);
}
/*
@@ -342,6 +317,8 @@ void bdrv_drain_all(void)
if (bs->job) {
block_job_pause(bs->job);
}
+ bdrv_no_throttling_begin(bs);
+ bdrv_io_unplugged_begin(bs);
bdrv_drain_recurse(bs);
aio_context_release(aio_context);
@@ -366,7 +343,6 @@ void bdrv_drain_all(void)
aio_context_acquire(aio_context);
while ((bs = bdrv_next(bs))) {
if (aio_context == bdrv_get_aio_context(bs)) {
- bdrv_flush_io_queue(bs);
if (bdrv_requests_pending(bs)) {
busy = true;
aio_poll(aio_context, busy);
@@ -383,6 +359,8 @@ void bdrv_drain_all(void)
AioContext *aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
+ bdrv_io_unplugged_end(bs);
+ bdrv_no_throttling_end(bs);
if (bs->job) {
block_job_resume(bs->job);
}
@@ -581,13 +559,13 @@ static void coroutine_fn bdrv_rw_co_entry(void *opaque)
RwCo *rwco = opaque;
if (!rwco->is_write) {
- rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
- rwco->qiov->size, rwco->qiov,
- rwco->flags);
+ rwco->ret = bdrv_co_preadv(rwco->bs, rwco->offset,
+ rwco->qiov->size, rwco->qiov,
+ rwco->flags);
} else {
- rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
- rwco->qiov->size, rwco->qiov,
- rwco->flags);
+ rwco->ret = bdrv_co_pwritev(rwco->bs, rwco->offset,
+ rwco->qiov->size, rwco->qiov,
+ rwco->flags);
}
}
@@ -608,17 +586,6 @@ static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
.flags = flags,
};
- /**
- * In sync call context, when the vcpu is blocked, this throttling timer
- * will not fire; so the I/O throttling function has to be disabled here
- * if it has been enabled.
- */
- if (bs->io_limits_enabled) {
- fprintf(stderr, "Disabling I/O throttling on '%s' due "
- "to synchronous I/O.\n", bdrv_get_device_name(bs));
- bdrv_io_limits_disable(bs);
- }
-
if (qemu_in_coroutine()) {
/* Fast-path if already in coroutine context */
bdrv_rw_co_entry(&rwco);
@@ -685,7 +652,8 @@ int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
* Completely zero out a block device with the help of bdrv_write_zeroes.
* The operation is sped up by checking the block status and only writing
* zeroes to the device if they currently do not return zeroes. Optional
- * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
+ * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
+ * BDRV_REQ_FUA).
*
* Returns < 0 on error, 0 on success. For error codes see bdrv_write().
*/
@@ -800,6 +768,109 @@ int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
return 0;
}
+typedef struct CoroutineIOCompletion {
+ Coroutine *coroutine;
+ int ret;
+} CoroutineIOCompletion;
+
+static void bdrv_co_io_em_complete(void *opaque, int ret)
+{
+ CoroutineIOCompletion *co = opaque;
+
+ co->ret = ret;
+ qemu_coroutine_enter(co->coroutine, NULL);
+}
+
+static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
+ uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
+{
+ BlockDriver *drv = bs->drv;
+ int64_t sector_num;
+ unsigned int nb_sectors;
+
+ if (drv->bdrv_co_preadv) {
+ return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
+ }
+
+ sector_num = offset >> BDRV_SECTOR_BITS;
+ nb_sectors = bytes >> BDRV_SECTOR_BITS;
+
+ assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
+
+ if (drv->bdrv_co_readv) {
+ return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
+ } else {
+ BlockAIOCB *acb;
+ CoroutineIOCompletion co = {
+ .coroutine = qemu_coroutine_self(),
+ };
+
+ acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
+ bdrv_co_io_em_complete, &co);
+ if (acb == NULL) {
+ return -EIO;
+ } else {
+ qemu_coroutine_yield();
+ return co.ret;
+ }
+ }
+}
+
+static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
+ uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
+{
+ BlockDriver *drv = bs->drv;
+ int64_t sector_num;
+ unsigned int nb_sectors;
+ int ret;
+
+ if (drv->bdrv_co_pwritev) {
+ ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
+ goto emulate_flags;
+ }
+
+ sector_num = offset >> BDRV_SECTOR_BITS;
+ nb_sectors = bytes >> BDRV_SECTOR_BITS;
+
+ assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
+
+ if (drv->bdrv_co_writev_flags) {
+ ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
+ flags & bs->supported_write_flags);
+ flags &= ~bs->supported_write_flags;
+ } else if (drv->bdrv_co_writev) {
+ assert(!bs->supported_write_flags);
+ ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
+ } else {
+ BlockAIOCB *acb;
+ CoroutineIOCompletion co = {
+ .coroutine = qemu_coroutine_self(),
+ };
+
+ acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
+ bdrv_co_io_em_complete, &co);
+ if (acb == NULL) {
+ ret = -EIO;
+ } else {
+ qemu_coroutine_yield();
+ ret = co.ret;
+ }
+ }
+
+emulate_flags:
+ if (ret == 0 && (flags & BDRV_REQ_FUA)) {
+ ret = bdrv_co_flush(bs);
+ }
+
+ return ret;
+}
+
static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
{
@@ -836,8 +907,9 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
qemu_iovec_init_external(&bounce_qiov, &iov, 1);
- ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
- &bounce_qiov);
+ ret = bdrv_driver_preadv(bs, cluster_sector_num * BDRV_SECTOR_SIZE,
+ cluster_nb_sectors * BDRV_SECTOR_SIZE,
+ &bounce_qiov, 0);
if (ret < 0) {
goto err;
}
@@ -850,8 +922,9 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
/* This does not change the data on the disk, it is not necessary
* to flush even in cache=writethrough mode.
*/
- ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
- &bounce_qiov);
+ ret = bdrv_driver_pwritev(bs, cluster_sector_num * BDRV_SECTOR_SIZE,
+ cluster_nb_sectors * BDRV_SECTOR_SIZE,
+ &bounce_qiov, 0);
}
if (ret < 0) {
@@ -880,7 +953,6 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
int64_t align, QEMUIOVector *qiov, int flags)
{
- BlockDriver *drv = bs->drv;
int ret;
int64_t sector_num = offset >> BDRV_SECTOR_BITS;
@@ -921,7 +993,7 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
/* Forward the request to the BlockDriver */
if (!bs->zero_beyond_eof) {
- ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
+ ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
} else {
/* Read zeros after EOF */
int64_t total_sectors, max_nb_sectors;
@@ -935,7 +1007,7 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
align >> BDRV_SECTOR_BITS);
if (nb_sectors < max_nb_sectors) {
- ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
+ ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
} else if (max_nb_sectors > 0) {
QEMUIOVector local_qiov;
@@ -943,8 +1015,9 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
qemu_iovec_concat(&local_qiov, qiov, 0,
max_nb_sectors * BDRV_SECTOR_SIZE);
- ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
- &local_qiov);
+ ret = bdrv_driver_preadv(bs, offset,
+ max_nb_sectors * BDRV_SECTOR_SIZE,
+ &local_qiov, 0);
qemu_iovec_destroy(&local_qiov);
} else {
@@ -967,7 +1040,7 @@ out:
/*
* Handle a read request in coroutine context
*/
-int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
+int coroutine_fn bdrv_co_preadv(BlockDriverState *bs,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
@@ -997,7 +1070,7 @@ int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
}
/* throttling disk I/O */
- if (bs->io_limits_enabled) {
+ if (bs->throttle_state) {
throttle_group_co_io_limits_intercept(bs, bytes, false);
}
@@ -1049,8 +1122,8 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
return -EINVAL;
}
- return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
- nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
+ return bdrv_co_preadv(bs, sector_num << BDRV_SECTOR_BITS,
+ nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
}
int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
@@ -1088,6 +1161,7 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
QEMUIOVector qiov;
struct iovec iov = {0};
int ret = 0;
+ bool need_flush = false;
int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
BDRV_REQUEST_MAX_SECTORS);
@@ -1120,13 +1194,29 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
ret = -ENOTSUP;
/* First try the efficient write zeroes operation */
if (drv->bdrv_co_write_zeroes) {
- ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
+ ret = drv->bdrv_co_write_zeroes(bs, sector_num, num,
+ flags & bs->supported_zero_flags);
+ if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
+ !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
+ need_flush = true;
+ }
+ } else {
+ assert(!bs->supported_zero_flags);
}
if (ret == -ENOTSUP) {
/* Fall back to bounce buffer if write zeroes is unsupported */
int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
MAX_WRITE_ZEROES_BOUNCE_BUFFER);
+ BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
+
+ if ((flags & BDRV_REQ_FUA) &&
+ !(bs->supported_write_flags & BDRV_REQ_FUA)) {
+ /* No need for bdrv_driver_pwrite() to do a fallback
+ * flush on each chunk; use just one at the end */
+ write_flags &= ~BDRV_REQ_FUA;
+ need_flush = true;
+ }
num = MIN(num, max_xfer_len);
iov.iov_len = num * BDRV_SECTOR_SIZE;
if (iov.iov_base == NULL) {
@@ -1139,7 +1229,9 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
}
qemu_iovec_init_external(&qiov, &iov, 1);
- ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
+ ret = bdrv_driver_pwritev(bs, sector_num * BDRV_SECTOR_SIZE,
+ num * BDRV_SECTOR_SIZE, &qiov,
+ write_flags);
/* Keep bounce buffer around if it is big enough for all
* all future requests.
@@ -1155,6 +1247,9 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
}
fail:
+ if (ret == 0 && need_flush) {
+ ret = bdrv_co_flush(bs);
+ }
qemu_vfree(iov.iov_base);
return ret;
}
@@ -1199,23 +1294,12 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
} else if (flags & BDRV_REQ_ZERO_WRITE) {
bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
- } else if (drv->bdrv_co_writev_flags) {
- bdrv_debug_event(bs, BLKDBG_PWRITEV);
- ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
- flags);
} else {
- assert(drv->supported_write_flags == 0);
bdrv_debug_event(bs, BLKDBG_PWRITEV);
- ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
+ ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
}
bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
- if (ret == 0 && (flags & BDRV_REQ_FUA) &&
- !(drv->supported_write_flags & BDRV_REQ_FUA))
- {
- ret = bdrv_co_flush(bs);
- }
-
bdrv_set_dirty(bs, sector_num, nb_sectors);
if (bs->wr_highest_offset < offset + bytes) {
@@ -1320,7 +1404,7 @@ fail:
/*
* Handle a write request in coroutine context
*/
-int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
+int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
@@ -1347,7 +1431,7 @@ int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
}
/* throttling disk I/O */
- if (bs->io_limits_enabled) {
+ if (bs->throttle_state) {
throttle_group_co_io_limits_intercept(bs, bytes, true);
}
@@ -1455,8 +1539,8 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
return -EINVAL;
}
- return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
- nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
+ return bdrv_co_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
+ nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
}
int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
@@ -2064,80 +2148,6 @@ void bdrv_aio_cancel_async(BlockAIOCB *acb)
/**************************************************************/
/* async block device emulation */
-typedef struct BlockAIOCBSync {
- BlockAIOCB common;
- QEMUBH *bh;
- int ret;
- /* vector translation state */
- QEMUIOVector *qiov;
- uint8_t *bounce;
- int is_write;
-} BlockAIOCBSync;
-
-static const AIOCBInfo bdrv_em_aiocb_info = {
- .aiocb_size = sizeof(BlockAIOCBSync),
-};
-
-static void bdrv_aio_bh_cb(void *opaque)
-{
- BlockAIOCBSync *acb = opaque;
-
- if (!acb->is_write && acb->ret >= 0) {
- qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
- }
- qemu_vfree(acb->bounce);
- acb->common.cb(acb->common.opaque, acb->ret);
- qemu_bh_delete(acb->bh);
- acb->bh = NULL;
- qemu_aio_unref(acb);
-}
-
-static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
- int64_t sector_num,
- QEMUIOVector *qiov,
- int nb_sectors,
- BlockCompletionFunc *cb,
- void *opaque,
- int is_write)
-
-{
- BlockAIOCBSync *acb;
-
- acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
- acb->is_write = is_write;
- acb->qiov = qiov;
- acb->bounce = qemu_try_blockalign(bs, qiov->size);
- acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
-
- if (acb->bounce == NULL) {
- acb->ret = -ENOMEM;
- } else if (is_write) {
- qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
- acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
- } else {
- acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
- }
-
- qemu_bh_schedule(acb->bh);
-
- return &acb->common;
-}
-
-static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque)
-{
- return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
-}
-
-static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque)
-{
- return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
-}
-
-
typedef struct BlockAIOCBCoroutine {
BlockAIOCB common;
BlockRequest req;
@@ -2314,59 +2324,6 @@ void qemu_aio_unref(void *p)
/**************************************************************/
/* Coroutine block device emulation */
-typedef struct CoroutineIOCompletion {
- Coroutine *coroutine;
- int ret;
-} CoroutineIOCompletion;
-
-static void bdrv_co_io_em_complete(void *opaque, int ret)
-{
- CoroutineIOCompletion *co = opaque;
-
- co->ret = ret;
- qemu_coroutine_enter(co->coroutine, NULL);
-}
-
-static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, QEMUIOVector *iov,
- bool is_write)
-{
- CoroutineIOCompletion co = {
- .coroutine = qemu_coroutine_self(),
- };
- BlockAIOCB *acb;
-
- if (is_write) {
- acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
- bdrv_co_io_em_complete, &co);
- } else {
- acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
- bdrv_co_io_em_complete, &co);
- }
-
- trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
- if (!acb) {
- return -EIO;
- }
- qemu_coroutine_yield();
-
- return co.ret;
-}
-
-static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors,
- QEMUIOVector *iov)
-{
- return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
-}
-
-static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
- int64_t sector_num, int nb_sectors,
- QEMUIOVector *iov)
-{
- return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
-}
-
static void coroutine_fn bdrv_flush_co_entry(void *opaque)
{
RwCo *rwco = opaque;
@@ -2763,33 +2720,68 @@ void bdrv_add_before_write_notifier(BlockDriverState *bs,
void bdrv_io_plug(BlockDriverState *bs)
{
- BlockDriver *drv = bs->drv;
- if (drv && drv->bdrv_io_plug) {
- drv->bdrv_io_plug(bs);
- } else if (bs->file) {
- bdrv_io_plug(bs->file->bs);
+ BdrvChild *child;
+
+ QLIST_FOREACH(child, &bs->children, next) {
+ bdrv_io_plug(child->bs);
+ }
+
+ if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
+ BlockDriver *drv = bs->drv;
+ if (drv && drv->bdrv_io_plug) {
+ drv->bdrv_io_plug(bs);
+ }
}
}
void bdrv_io_unplug(BlockDriverState *bs)
{
- BlockDriver *drv = bs->drv;
- if (drv && drv->bdrv_io_unplug) {
- drv->bdrv_io_unplug(bs);
- } else if (bs->file) {
- bdrv_io_unplug(bs->file->bs);
+ BdrvChild *child;
+
+ assert(bs->io_plugged);
+ if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
+ BlockDriver *drv = bs->drv;
+ if (drv && drv->bdrv_io_unplug) {
+ drv->bdrv_io_unplug(bs);
+ }
+ }
+
+ QLIST_FOREACH(child, &bs->children, next) {
+ bdrv_io_unplug(child->bs);
}
}
-void bdrv_flush_io_queue(BlockDriverState *bs)
+void bdrv_io_unplugged_begin(BlockDriverState *bs)
{
- BlockDriver *drv = bs->drv;
- if (drv && drv->bdrv_flush_io_queue) {
- drv->bdrv_flush_io_queue(bs);
- } else if (bs->file) {
- bdrv_flush_io_queue(bs->file->bs);
+ BdrvChild *child;
+
+ if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
+ BlockDriver *drv = bs->drv;
+ if (drv && drv->bdrv_io_unplug) {
+ drv->bdrv_io_unplug(bs);
+ }
+ }
+
+ QLIST_FOREACH(child, &bs->children, next) {
+ bdrv_io_unplugged_begin(child->bs);
+ }
+}
+
+void bdrv_io_unplugged_end(BlockDriverState *bs)
+{
+ BdrvChild *child;
+
+ assert(bs->io_plug_disabled);
+ QLIST_FOREACH(child, &bs->children, next) {
+ bdrv_io_unplugged_end(child->bs);
+ }
+
+ if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
+ BlockDriver *drv = bs->drv;
+ if (drv && drv->bdrv_io_plug) {
+ drv->bdrv_io_plug(bs);
+ }
}
- bdrv_start_throttled_reqs(bs);
}
void bdrv_drained_begin(BlockDriverState *bs)
diff --git a/block/iscsi.c b/block/iscsi.c
index 302baf84c1..10f3906bcc 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -456,8 +456,11 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
struct IscsiTask iTask;
uint64_t lba;
uint32_t num_sectors;
- bool fua;
+ bool fua = flags & BDRV_REQ_FUA;
+ if (fua) {
+ assert(iscsilun->dpofua);
+ }
if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
return -EINVAL;
}
@@ -472,7 +475,6 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
iscsi_co_init_iscsitask(iscsilun, &iTask);
retry:
- fua = iscsilun->dpofua && (flags & BDRV_REQ_FUA);
if (iscsilun->use_16_for_rw) {
iTask.task = iscsi_write16_task(iscsilun->iscsi, iscsilun->lun, lba,
NULL, num_sectors * iscsilun->block_size,
@@ -513,13 +515,6 @@ retry:
return 0;
}
-static int coroutine_fn
-iscsi_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
- QEMUIOVector *iov)
-{
- return iscsi_co_writev_flags(bs, sector_num, nb_sectors, iov, 0);
-}
-
static bool iscsi_allocationmap_is_allocated(IscsiLun *iscsilun,
int64_t sector_num, int nb_sectors)
@@ -1555,6 +1550,10 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
task = NULL;
iscsi_modesense_sync(iscsilun);
+ if (iscsilun->dpofua) {
+ bs->supported_write_flags = BDRV_REQ_FUA;
+ }
+ bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP;
/* Check the write protect flag of the LUN if we want to write */
if (iscsilun->type == TYPE_DISK && (flags & BDRV_O_RDWR) &&
@@ -1847,9 +1846,7 @@ static BlockDriver bdrv_iscsi = {
.bdrv_co_discard = iscsi_co_discard,
.bdrv_co_write_zeroes = iscsi_co_write_zeroes,
.bdrv_co_readv = iscsi_co_readv,
- .bdrv_co_writev = iscsi_co_writev,
.bdrv_co_writev_flags = iscsi_co_writev_flags,
- .supported_write_flags = BDRV_REQ_FUA,
.bdrv_co_flush_to_disk = iscsi_co_flush,
#ifdef __linux__
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 805757e02e..90ec98ee23 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -30,7 +30,7 @@
struct qemu_laiocb {
BlockAIOCB common;
- struct qemu_laio_state *ctx;
+ LinuxAioState *ctx;
struct iocb iocb;
ssize_t ret;
size_t nbytes;
@@ -46,7 +46,7 @@ typedef struct {
QSIMPLEQ_HEAD(, qemu_laiocb) pending;
} LaioQueue;
-struct qemu_laio_state {
+struct LinuxAioState {
io_context_t ctx;
EventNotifier e;
@@ -60,7 +60,7 @@ struct qemu_laio_state {
int event_max;
};
-static void ioq_submit(struct qemu_laio_state *s);
+static void ioq_submit(LinuxAioState *s);
static inline ssize_t io_event_ret(struct io_event *ev)
{
@@ -70,8 +70,7 @@ static inline ssize_t io_event_ret(struct io_event *ev)
/*
* Completes an AIO request (calls the callback and frees the ACB).
*/
-static void qemu_laio_process_completion(struct qemu_laio_state *s,
- struct qemu_laiocb *laiocb)
+static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
{
int ret;
@@ -99,7 +98,7 @@ static void qemu_laio_process_completion(struct qemu_laio_state *s,
*
* The function is somewhat tricky because it supports nested event loops, for
* example when a request callback invokes aio_poll(). In order to do this,
- * the completion events array and index are kept in qemu_laio_state. The BH
+ * the completion events array and index are kept in LinuxAioState. The BH
* reschedules itself as long as there are completions pending so it will
* either be called again in a nested event loop or will be called after all
* events have been completed. When there are no events left to complete, the
@@ -107,7 +106,7 @@ static void qemu_laio_process_completion(struct qemu_laio_state *s,
*/
static void qemu_laio_completion_bh(void *opaque)
{
- struct qemu_laio_state *s = opaque;
+ LinuxAioState *s = opaque;
/* Fetch more completion events when empty */
if (s->event_idx == s->event_max) {
@@ -136,7 +135,7 @@ static void qemu_laio_completion_bh(void *opaque)
laiocb->ret = io_event_ret(&s->events[s->event_idx]);
s->event_idx++;
- qemu_laio_process_completion(s, laiocb);
+ qemu_laio_process_completion(laiocb);
}
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
@@ -146,7 +145,7 @@ static void qemu_laio_completion_bh(void *opaque)
static void qemu_laio_completion_cb(EventNotifier *e)
{
- struct qemu_laio_state *s = container_of(e, struct qemu_laio_state, e);
+ LinuxAioState *s = container_of(e, LinuxAioState, e);
if (event_notifier_test_and_clear(&s->e)) {
qemu_bh_schedule(s->completion_bh);
@@ -185,7 +184,7 @@ static void ioq_init(LaioQueue *io_q)
io_q->blocked = false;
}
-static void ioq_submit(struct qemu_laio_state *s)
+static void ioq_submit(LinuxAioState *s)
{
int ret, len;
struct qemu_laiocb *aiocb;
@@ -216,33 +215,25 @@ static void ioq_submit(struct qemu_laio_state *s)
s->io_q.blocked = (s->io_q.n > 0);
}
-void laio_io_plug(BlockDriverState *bs, void *aio_ctx)
+void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
{
- struct qemu_laio_state *s = aio_ctx;
-
- s->io_q.plugged++;
+ assert(!s->io_q.plugged);
+ s->io_q.plugged = 1;
}
-void laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug)
+void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s)
{
- struct qemu_laio_state *s = aio_ctx;
-
- assert(s->io_q.plugged > 0 || !unplug);
-
- if (unplug && --s->io_q.plugged > 0) {
- return;
- }
-
+ assert(s->io_q.plugged);
+ s->io_q.plugged = 0;
if (!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
ioq_submit(s);
}
}
-BlockAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
+BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque, int type)
{
- struct qemu_laio_state *s = aio_ctx;
struct qemu_laiocb *laiocb;
struct iocb *iocbs;
off_t offset = sector_num * 512;
@@ -284,26 +275,22 @@ out_free_aiocb:
return NULL;
}
-void laio_detach_aio_context(void *s_, AioContext *old_context)
+void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
{
- struct qemu_laio_state *s = s_;
-
aio_set_event_notifier(old_context, &s->e, false, NULL);
qemu_bh_delete(s->completion_bh);
}
-void laio_attach_aio_context(void *s_, AioContext *new_context)
+void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
{
- struct qemu_laio_state *s = s_;
-
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
aio_set_event_notifier(new_context, &s->e, false,
qemu_laio_completion_cb);
}
-void *laio_init(void)
+LinuxAioState *laio_init(void)
{
- struct qemu_laio_state *s;
+ LinuxAioState *s;
s = g_malloc0(sizeof(*s));
if (event_notifier_init(&s->e, false) < 0) {
@@ -325,10 +312,8 @@ out_free_state:
return NULL;
}
-void laio_cleanup(void *s_)
+void laio_cleanup(LinuxAioState *s)
{
- struct qemu_laio_state *s = s_;
-
event_notifier_cleanup(&s->e);
if (io_destroy(s->ctx) != 0) {
diff --git a/block/nbd-client.c b/block/nbd-client.c
index 878e879ace..4d13444409 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -243,15 +243,15 @@ static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov,
- int offset, int *flags)
+ int offset, int flags)
{
NbdClientSession *client = nbd_get_client_session(bs);
struct nbd_request request = { .type = NBD_CMD_WRITE };
struct nbd_reply reply;
ssize_t ret;
- if ((*flags & BDRV_REQ_FUA) && (client->nbdflags & NBD_FLAG_SEND_FUA)) {
- *flags &= ~BDRV_REQ_FUA;
+ if (flags & BDRV_REQ_FUA) {
+ assert(client->nbdflags & NBD_FLAG_SEND_FUA);
request.type |= NBD_CMD_FLAG_FUA;
}
@@ -291,7 +291,7 @@ int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
}
int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov, int *flags)
+ int nb_sectors, QEMUIOVector *qiov, int flags)
{
int offset = 0;
int ret;
@@ -414,6 +414,9 @@ int nbd_client_init(BlockDriverState *bs,
logout("Failed to negotiate with the NBD server\n");
return ret;
}
+ if (client->nbdflags & NBD_FLAG_SEND_FUA) {
+ bs->supported_write_flags = BDRV_REQ_FUA;
+ }
qemu_co_mutex_init(&client->send_mutex);
qemu_co_mutex_init(&client->free_sema);
diff --git a/block/nbd-client.h b/block/nbd-client.h
index bc7aec0795..c618dadc39 100644
--- a/block/nbd-client.h
+++ b/block/nbd-client.h
@@ -48,7 +48,7 @@ int nbd_client_co_discard(BlockDriverState *bs, int64_t sector_num,
int nb_sectors);
int nbd_client_co_flush(BlockDriverState *bs);
int nbd_client_co_writev(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov, int *flags);
+ int nb_sectors, QEMUIOVector *qiov, int flags);
int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov);
diff --git a/block/nbd.c b/block/nbd.c
index f7ea3b3608..6015e8b537 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -355,31 +355,6 @@ static int nbd_co_readv(BlockDriverState *bs, int64_t sector_num,
return nbd_client_co_readv(bs, sector_num, nb_sectors, qiov);
}
-static int nbd_co_writev_flags(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov, int flags)
-{
- int ret;
-
- ret = nbd_client_co_writev(bs, sector_num, nb_sectors, qiov, &flags);
- if (ret < 0) {
- return ret;
- }
-
- /* The flag wasn't sent to the server, so we need to emulate it with an
- * explicit flush */
- if (flags & BDRV_REQ_FUA) {
- ret = nbd_client_co_flush(bs);
- }
-
- return ret;
-}
-
-static int nbd_co_writev(BlockDriverState *bs, int64_t sector_num,
- int nb_sectors, QEMUIOVector *qiov)
-{
- return nbd_co_writev_flags(bs, sector_num, nb_sectors, qiov, 0);
-}
-
static int nbd_co_flush(BlockDriverState *bs)
{
return nbd_client_co_flush(bs);
@@ -476,9 +451,7 @@ static BlockDriver bdrv_nbd = {
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_file_open = nbd_open,
.bdrv_co_readv = nbd_co_readv,
- .bdrv_co_writev = nbd_co_writev,
- .bdrv_co_writev_flags = nbd_co_writev_flags,
- .supported_write_flags = BDRV_REQ_FUA,
+ .bdrv_co_writev_flags = nbd_client_co_writev,
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
@@ -496,9 +469,7 @@ static BlockDriver bdrv_nbd_tcp = {
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_file_open = nbd_open,
.bdrv_co_readv = nbd_co_readv,
- .bdrv_co_writev = nbd_co_writev,
- .bdrv_co_writev_flags = nbd_co_writev_flags,
- .supported_write_flags = BDRV_REQ_FUA,
+ .bdrv_co_writev_flags = nbd_client_co_writev,
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
@@ -516,9 +487,7 @@ static BlockDriver bdrv_nbd_unix = {
.bdrv_parse_filename = nbd_parse_filename,
.bdrv_file_open = nbd_open,
.bdrv_co_readv = nbd_co_readv,
- .bdrv_co_writev = nbd_co_writev,
- .bdrv_co_writev_flags = nbd_co_writev_flags,
- .supported_write_flags = BDRV_REQ_FUA,
+ .bdrv_co_writev_flags = nbd_client_co_writev,
.bdrv_close = nbd_close,
.bdrv_co_flush_to_os = nbd_co_flush,
.bdrv_co_discard = nbd_co_discard,
diff --git a/block/parallels.c b/block/parallels.c
index 324ed43ac4..cddbfc4012 100644
--- a/block/parallels.c
+++ b/block/parallels.c
@@ -512,11 +512,12 @@ static int parallels_create(const char *filename, QemuOpts *opts, Error **errp)
memset(tmp, 0, sizeof(tmp));
memcpy(tmp, &header, sizeof(header));
- ret = blk_pwrite(file, 0, tmp, BDRV_SECTOR_SIZE);
+ ret = blk_pwrite(file, 0, tmp, BDRV_SECTOR_SIZE, 0);
if (ret < 0) {
goto exit;
}
- ret = blk_write_zeroes(file, 1, bat_sectors - 1, 0);
+ ret = blk_write_zeroes(file, BDRV_SECTOR_SIZE,
+ (bat_sectors - 1) << BDRV_SECTOR_BITS, 0);
if (ret < 0) {
goto exit;
}
diff --git a/block/qcow.c b/block/qcow.c
index 60ddb12eca..d6dc1b05b3 100644
--- a/block/qcow.c
+++ b/block/qcow.c
@@ -853,14 +853,14 @@ static int qcow_create(const char *filename, QemuOpts *opts, Error **errp)
}
/* write all the data */
- ret = blk_pwrite(qcow_blk, 0, &header, sizeof(header));
+ ret = blk_pwrite(qcow_blk, 0, &header, sizeof(header), 0);
if (ret != sizeof(header)) {
goto exit;
}
if (backing_file) {
ret = blk_pwrite(qcow_blk, sizeof(header),
- backing_file, backing_filename_len);
+ backing_file, backing_filename_len, 0);
if (ret != backing_filename_len) {
goto exit;
}
@@ -869,8 +869,8 @@ static int qcow_create(const char *filename, QemuOpts *opts, Error **errp)
tmp = g_malloc0(BDRV_SECTOR_SIZE);
for (i = 0; i < ((sizeof(uint64_t)*l1_size + BDRV_SECTOR_SIZE - 1)/
BDRV_SECTOR_SIZE); i++) {
- ret = blk_pwrite(qcow_blk, header_size +
- BDRV_SECTOR_SIZE*i, tmp, BDRV_SECTOR_SIZE);
+ ret = blk_pwrite(qcow_blk, header_size + BDRV_SECTOR_SIZE * i,
+ tmp, BDRV_SECTOR_SIZE, 0);
if (ret != BDRV_SECTOR_SIZE) {
g_free(tmp);
goto exit;
diff --git a/block/qcow2.c b/block/qcow2.c
index 470734be9f..62febfc386 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -1757,13 +1757,6 @@ static void qcow2_invalidate_cache(BlockDriverState *bs, Error **errp)
qcow2_close(bs);
- bdrv_invalidate_cache(bs->file->bs, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- bs->drv = NULL;
- return;
- }
-
memset(s, 0, sizeof(BDRVQcow2State));
options = qdict_clone_shallow(bs->options);
@@ -2207,7 +2200,7 @@ static int qcow2_create2(const char *filename, int64_t total_size,
cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS);
}
- ret = blk_pwrite(blk, 0, header, cluster_size);
+ ret = blk_pwrite(blk, 0, header, cluster_size, 0);
g_free(header);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not write qcow2 header");
@@ -2217,7 +2210,7 @@ static int qcow2_create2(const char *filename, int64_t total_size,
/* Write a refcount table with one refcount block */
refcount_table = g_malloc0(2 * cluster_size);
refcount_table[0] = cpu_to_be64(2 * cluster_size);
- ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size);
+ ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0);
g_free(refcount_table);
if (ret < 0) {
@@ -2411,21 +2404,74 @@ finish:
return ret;
}
+
+static bool is_zero_cluster(BlockDriverState *bs, int64_t start)
+{
+ BDRVQcow2State *s = bs->opaque;
+ int nr;
+ BlockDriverState *file;
+ int64_t res = bdrv_get_block_status_above(bs, NULL, start,
+ s->cluster_sectors, &nr, &file);
+ return res >= 0 && ((res & BDRV_BLOCK_ZERO) || !(res & BDRV_BLOCK_DATA));
+}
+
+static bool is_zero_cluster_top_locked(BlockDriverState *bs, int64_t start)
+{
+ BDRVQcow2State *s = bs->opaque;
+ int nr = s->cluster_sectors;
+ uint64_t off;
+ int ret;
+
+ ret = qcow2_get_cluster_offset(bs, start << BDRV_SECTOR_BITS, &nr, &off);
+ return ret == QCOW2_CLUSTER_UNALLOCATED || ret == QCOW2_CLUSTER_ZERO;
+}
+
static coroutine_fn int qcow2_co_write_zeroes(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
{
int ret;
BDRVQcow2State *s = bs->opaque;
- /* Emulate misaligned zero writes */
- if (sector_num % s->cluster_sectors || nb_sectors % s->cluster_sectors) {
- return -ENOTSUP;
+ int head = sector_num % s->cluster_sectors;
+ int tail = (sector_num + nb_sectors) % s->cluster_sectors;
+
+ if (head != 0 || tail != 0) {
+ int64_t cl_end = -1;
+
+ sector_num -= head;
+ nb_sectors += head;
+
+ if (tail != 0) {
+ nb_sectors += s->cluster_sectors - tail;
+ }
+
+ if (!is_zero_cluster(bs, sector_num)) {
+ return -ENOTSUP;
+ }
+
+ if (nb_sectors > s->cluster_sectors) {
+ /* Technically the request can cover 2 clusters, f.e. 4k write
+ at s->cluster_sectors - 2k offset. One of these cluster can
+ be zeroed, one unallocated */
+ cl_end = sector_num + nb_sectors - s->cluster_sectors;
+ if (!is_zero_cluster(bs, cl_end)) {
+ return -ENOTSUP;
+ }
+ }
+
+ qemu_co_mutex_lock(&s->lock);
+ /* We can have new write after previous check */
+ if (!is_zero_cluster_top_locked(bs, sector_num) ||
+ (cl_end > 0 && !is_zero_cluster_top_locked(bs, cl_end))) {
+ qemu_co_mutex_unlock(&s->lock);
+ return -ENOTSUP;
+ }
+ } else {
+ qemu_co_mutex_lock(&s->lock);
}
/* Whatever is left can use real zero clusters */
- qemu_co_mutex_lock(&s->lock);
- ret = qcow2_zero_clusters(bs, sector_num << BDRV_SECTOR_BITS,
- nb_sectors);
+ ret = qcow2_zero_clusters(bs, sector_num << BDRV_SECTOR_BITS, nb_sectors);
qemu_co_mutex_unlock(&s->lock);
return ret;
diff --git a/block/qed.c b/block/qed.c
index 0af52741df..10ce18eb66 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -601,18 +601,18 @@ static int qed_create(const char *filename, uint32_t cluster_size,
}
qed_header_cpu_to_le(&header, &le_header);
- ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header));
+ ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0);
if (ret < 0) {
goto out;
}
ret = blk_pwrite(blk, sizeof(le_header), backing_file,
- header.backing_filename_size);
+ header.backing_filename_size, 0);
if (ret < 0) {
goto out;
}
l1_table = g_malloc0(l1_size);
- ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size);
+ ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0);
if (ret < 0) {
goto out;
}
@@ -1594,12 +1594,6 @@ static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
bdrv_qed_close(bs);
- bdrv_invalidate_cache(bs->file->bs, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
-
memset(s, 0, sizeof(BDRVQEDState));
ret = bdrv_qed_open(bs, NULL, bs->open_flags, &local_err);
if (local_err) {
diff --git a/block/quorum.c b/block/quorum.c
index da15465a9a..1ec3511528 100644
--- a/block/quorum.c
+++ b/block/quorum.c
@@ -14,6 +14,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/cutils.h"
#include "block/block_int.h"
#include "qapi/qmp/qbool.h"
#include "qapi/qmp/qdict.h"
@@ -67,6 +68,9 @@ typedef struct QuorumVotes {
typedef struct BDRVQuorumState {
BdrvChild **children; /* children BlockDriverStates */
int num_children; /* children count */
+ unsigned next_child_index; /* the index of the next child that should
+ * be added
+ */
int threshold; /* if less than threshold children reads gave the
* same result a quorum error occurs.
*/
@@ -747,21 +751,6 @@ static int64_t quorum_getlength(BlockDriverState *bs)
return result;
}
-static void quorum_invalidate_cache(BlockDriverState *bs, Error **errp)
-{
- BDRVQuorumState *s = bs->opaque;
- Error *local_err = NULL;
- int i;
-
- for (i = 0; i < s->num_children; i++) {
- bdrv_invalidate_cache(s->children[i]->bs, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
- }
-}
-
static coroutine_fn int quorum_co_flush(BlockDriverState *bs)
{
BDRVQuorumState *s = bs->opaque;
@@ -898,9 +887,9 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
ret = -EINVAL;
goto exit;
}
- if (s->num_children < 2) {
+ if (s->num_children < 1) {
error_setg(&local_err,
- "Number of provided children must be greater than 1");
+ "Number of provided children must be 1 or more");
ret = -EINVAL;
goto exit;
}
@@ -964,6 +953,7 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
opened[i] = true;
}
+ s->next_child_index = s->num_children;
g_free(opened);
goto exit;
@@ -1020,6 +1010,72 @@ static void quorum_attach_aio_context(BlockDriverState *bs,
}
}
+static void quorum_add_child(BlockDriverState *bs, BlockDriverState *child_bs,
+ Error **errp)
+{
+ BDRVQuorumState *s = bs->opaque;
+ BdrvChild *child;
+ char indexstr[32];
+ int ret;
+
+ assert(s->num_children <= INT_MAX / sizeof(BdrvChild *));
+ if (s->num_children == INT_MAX / sizeof(BdrvChild *) ||
+ s->next_child_index == UINT_MAX) {
+ error_setg(errp, "Too many children");
+ return;
+ }
+
+ ret = snprintf(indexstr, 32, "children.%u", s->next_child_index);
+ if (ret < 0 || ret >= 32) {
+ error_setg(errp, "cannot generate child name");
+ return;
+ }
+ s->next_child_index++;
+
+ bdrv_drained_begin(bs);
+
+ /* We can safely add the child now */
+ bdrv_ref(child_bs);
+ child = bdrv_attach_child(bs, child_bs, indexstr, &child_format);
+ s->children = g_renew(BdrvChild *, s->children, s->num_children + 1);
+ s->children[s->num_children++] = child;
+
+ bdrv_drained_end(bs);
+}
+
+static void quorum_del_child(BlockDriverState *bs, BdrvChild *child,
+ Error **errp)
+{
+ BDRVQuorumState *s = bs->opaque;
+ int i;
+
+ for (i = 0; i < s->num_children; i++) {
+ if (s->children[i] == child) {
+ break;
+ }
+ }
+
+ /* we have checked it in bdrv_del_child() */
+ assert(i < s->num_children);
+
+ if (s->num_children <= s->threshold) {
+ error_setg(errp,
+ "The number of children cannot be lower than the vote threshold %d",
+ s->threshold);
+ return;
+ }
+
+ bdrv_drained_begin(bs);
+
+ /* We can safely remove this child now */
+ memmove(&s->children[i], &s->children[i + 1],
+ (s->num_children - i - 1) * sizeof(BdrvChild *));
+ s->children = g_renew(BdrvChild *, s->children, --s->num_children);
+ bdrv_unref_child(bs, child);
+
+ bdrv_drained_end(bs);
+}
+
static void quorum_refresh_filename(BlockDriverState *bs, QDict *options)
{
BDRVQuorumState *s = bs->opaque;
@@ -1070,11 +1126,13 @@ static BlockDriver bdrv_quorum = {
.bdrv_aio_readv = quorum_aio_readv,
.bdrv_aio_writev = quorum_aio_writev,
- .bdrv_invalidate_cache = quorum_invalidate_cache,
.bdrv_detach_aio_context = quorum_detach_aio_context,
.bdrv_attach_aio_context = quorum_attach_aio_context,
+ .bdrv_add_child = quorum_add_child,
+ .bdrv_del_child = quorum_del_child,
+
.is_filter = true,
.bdrv_recurse_is_first_non_filter = quorum_recurse_is_first_non_filter,
};
diff --git a/block/raw-aio.h b/block/raw-aio.h
index 811e375018..714714e016 100644
--- a/block/raw-aio.h
+++ b/block/raw-aio.h
@@ -35,15 +35,16 @@
/* linux-aio.c - Linux native implementation */
#ifdef CONFIG_LINUX_AIO
-void *laio_init(void);
-void laio_cleanup(void *s);
-BlockAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
+typedef struct LinuxAioState LinuxAioState;
+LinuxAioState *laio_init(void);
+void laio_cleanup(LinuxAioState *s);
+BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque, int type);
-void laio_detach_aio_context(void *s, AioContext *old_context);
-void laio_attach_aio_context(void *s, AioContext *new_context);
-void laio_io_plug(BlockDriverState *bs, void *aio_ctx);
-void laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug);
+void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context);
+void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context);
+void laio_io_plug(BlockDriverState *bs, LinuxAioState *s);
+void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s);
#endif
#ifdef _WIN32
diff --git a/block/raw-posix.c b/block/raw-posix.c
index 906d5c9411..a4f5a1ba5f 100644
--- a/block/raw-posix.c
+++ b/block/raw-posix.c
@@ -139,7 +139,7 @@ typedef struct BDRVRawState {
#ifdef CONFIG_LINUX_AIO
int use_aio;
- void *aio_ctx;
+ LinuxAioState *aio_ctx;
#endif
#ifdef CONFIG_XFS
bool is_xfs:1;
@@ -398,7 +398,7 @@ static void raw_attach_aio_context(BlockDriverState *bs,
}
#ifdef CONFIG_LINUX_AIO
-static int raw_set_aio(void **aio_ctx, int *use_aio, int bdrv_flags)
+static int raw_set_aio(LinuxAioState **aio_ctx, int *use_aio, int bdrv_flags)
{
int ret = -1;
assert(aio_ctx != NULL);
@@ -517,6 +517,7 @@ static int raw_open_common(BlockDriverState *bs, QDict *options,
s->has_discard = true;
s->has_write_zeroes = true;
+ bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP;
if ((bs->open_flags & BDRV_O_NOCACHE) != 0) {
s->needs_alignment = true;
}
@@ -1345,17 +1346,7 @@ static void raw_aio_unplug(BlockDriverState *bs)
#ifdef CONFIG_LINUX_AIO
BDRVRawState *s = bs->opaque;
if (s->use_aio) {
- laio_io_unplug(bs, s->aio_ctx, true);
- }
-#endif
-}
-
-static void raw_aio_flush_io_queue(BlockDriverState *bs)
-{
-#ifdef CONFIG_LINUX_AIO
- BDRVRawState *s = bs->opaque;
- if (s->use_aio) {
- laio_io_unplug(bs, s->aio_ctx, false);
+ laio_io_unplug(bs, s->aio_ctx);
}
#endif
}
@@ -1949,7 +1940,6 @@ BlockDriver bdrv_file = {
.bdrv_refresh_limits = raw_refresh_limits,
.bdrv_io_plug = raw_aio_plug,
.bdrv_io_unplug = raw_aio_unplug,
- .bdrv_flush_io_queue = raw_aio_flush_io_queue,
.bdrv_truncate = raw_truncate,
.bdrv_getlength = raw_getlength,
@@ -2398,7 +2388,6 @@ static BlockDriver bdrv_host_device = {
.bdrv_refresh_limits = raw_refresh_limits,
.bdrv_io_plug = raw_aio_plug,
.bdrv_io_unplug = raw_aio_unplug,
- .bdrv_flush_io_queue = raw_aio_flush_io_queue,
.bdrv_truncate = raw_truncate,
.bdrv_getlength = raw_getlength,
@@ -2528,7 +2517,6 @@ static BlockDriver bdrv_host_cdrom = {
.bdrv_refresh_limits = raw_refresh_limits,
.bdrv_io_plug = raw_aio_plug,
.bdrv_io_unplug = raw_aio_unplug,
- .bdrv_flush_io_queue = raw_aio_flush_io_queue,
.bdrv_truncate = raw_truncate,
.bdrv_getlength = raw_getlength,
@@ -2664,7 +2652,6 @@ static BlockDriver bdrv_host_cdrom = {
.bdrv_refresh_limits = raw_refresh_limits,
.bdrv_io_plug = raw_aio_plug,
.bdrv_io_unplug = raw_aio_unplug,
- .bdrv_flush_io_queue = raw_aio_flush_io_queue,
.bdrv_truncate = raw_truncate,
.bdrv_getlength = raw_getlength,
diff --git a/block/raw_bsd.c b/block/raw_bsd.c
index a6cc7e9918..3385ed448d 100644
--- a/block/raw_bsd.c
+++ b/block/raw_bsd.c
@@ -105,8 +105,8 @@ raw_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
}
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
- ret = bdrv_co_do_pwritev(bs->file->bs, sector_num * BDRV_SECTOR_SIZE,
- nb_sectors * BDRV_SECTOR_SIZE, qiov, flags);
+ ret = bdrv_co_pwritev(bs->file->bs, sector_num * BDRV_SECTOR_SIZE,
+ nb_sectors * BDRV_SECTOR_SIZE, qiov, flags);
fail:
if (qiov == &local_qiov) {
@@ -116,13 +116,6 @@ fail:
return ret;
}
-static int coroutine_fn
-raw_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
- QEMUIOVector *qiov)
-{
- return raw_co_writev_flags(bs, sector_num, nb_sectors, qiov, 0);
-}
-
static int64_t coroutine_fn raw_co_get_block_status(BlockDriverState *bs,
int64_t sector_num,
int nb_sectors, int *pnum,
@@ -211,6 +204,8 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
bs->sg = bs->file->bs->sg;
+ bs->supported_write_flags = BDRV_REQ_FUA;
+ bs->supported_zero_flags = BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP;
if (bs->probed && !bdrv_is_read_only(bs)) {
fprintf(stderr,
@@ -256,9 +251,7 @@ BlockDriver bdrv_raw = {
.bdrv_close = &raw_close,
.bdrv_create = &raw_create,
.bdrv_co_readv = &raw_co_readv,
- .bdrv_co_writev = &raw_co_writev,
.bdrv_co_writev_flags = &raw_co_writev_flags,
- .supported_write_flags = BDRV_REQ_FUA,
.bdrv_co_write_zeroes = &raw_co_write_zeroes,
.bdrv_co_discard = &raw_co_discard,
.bdrv_co_get_block_status = &raw_co_get_block_status,
diff --git a/block/sheepdog.c b/block/sheepdog.c
index 33e0a33824..23fbace1f9 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -294,13 +294,16 @@ static inline size_t count_data_objs(const struct SheepdogInode *inode)
#undef DPRINTF
#ifdef DEBUG_SDOG
-#define DPRINTF(fmt, args...) \
- do { \
- fprintf(stdout, "%s %d: " fmt, __func__, __LINE__, ##args); \
- } while (0)
+#define DEBUG_SDOG_PRINT 1
#else
-#define DPRINTF(fmt, args...)
+#define DEBUG_SDOG_PRINT 0
#endif
+#define DPRINTF(fmt, args...) \
+ do { \
+ if (DEBUG_SDOG_PRINT) { \
+ fprintf(stderr, "%s %d: " fmt, __func__, __LINE__, ##args); \
+ } \
+ } while (0)
typedef struct SheepdogAIOCB SheepdogAIOCB;
@@ -1678,7 +1681,7 @@ static int sd_prealloc(const char *filename, Error **errp)
if (ret < 0) {
goto out;
}
- ret = blk_pwrite(blk, idx * buf_size, buf, buf_size);
+ ret = blk_pwrite(blk, idx * buf_size, buf, buf_size, 0);
if (ret < 0) {
goto out;
}
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 4920e09495..9ac063a0cd 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -219,6 +219,10 @@ static bool throttle_group_schedule_timer(BlockDriverState *bs,
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
bool must_wait;
+ if (bs->io_limits_disabled) {
+ return false;
+ }
+
/* Check if any of the timers in this group is already armed */
if (tg->any_timer_armed[is_write]) {
return true;
@@ -313,6 +317,17 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs,
qemu_mutex_unlock(&tg->lock);
}
+void throttle_group_restart_bs(BlockDriverState *bs)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
+ ;
+ }
+ }
+}
+
/* Update the throttle configuration for a particular group. Similar
* to throttle_config(), but guarantees atomicity within the
* throttling group.
@@ -335,6 +350,9 @@ void throttle_group_config(BlockDriverState *bs, ThrottleConfig *cfg)
}
throttle_config(ts, tt, cfg);
qemu_mutex_unlock(&tg->lock);
+
+ qemu_co_enter_next(&bs->throttled_reqs[0]);
+ qemu_co_enter_next(&bs->throttled_reqs[1]);
}
/* Get the throttle configuration from a particular group. Similar to
diff --git a/block/vdi.c b/block/vdi.c
index 75d4819edb..54e11447c3 100644
--- a/block/vdi.c
+++ b/block/vdi.c
@@ -557,98 +557,109 @@ static int64_t coroutine_fn vdi_co_get_block_status(BlockDriverState *bs,
return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
}
-static int vdi_co_read(BlockDriverState *bs,
- int64_t sector_num, uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+vdi_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVVdiState *s = bs->opaque;
+ QEMUIOVector local_qiov;
uint32_t bmap_entry;
uint32_t block_index;
- uint32_t sector_in_block;
- uint32_t n_sectors;
+ uint32_t offset_in_block;
+ uint32_t n_bytes;
+ uint64_t bytes_done = 0;
int ret = 0;
logout("\n");
- while (ret >= 0 && nb_sectors > 0) {
- block_index = sector_num / s->block_sectors;
- sector_in_block = sector_num % s->block_sectors;
- n_sectors = s->block_sectors - sector_in_block;
- if (n_sectors > nb_sectors) {
- n_sectors = nb_sectors;
- }
+ qemu_iovec_init(&local_qiov, qiov->niov);
+
+ while (ret >= 0 && bytes > 0) {
+ block_index = offset / s->block_size;
+ offset_in_block = offset % s->block_size;
+ n_bytes = MIN(bytes, s->block_size - offset_in_block);
- logout("will read %u sectors starting at sector %" PRIu64 "\n",
- n_sectors, sector_num);
+ logout("will read %u bytes starting at offset %" PRIu64 "\n",
+ n_bytes, offset);
/* prepare next AIO request */
bmap_entry = le32_to_cpu(s->bmap[block_index]);
if (!VDI_IS_ALLOCATED(bmap_entry)) {
/* Block not allocated, return zeros, no need to wait. */
- memset(buf, 0, n_sectors * SECTOR_SIZE);
+ qemu_iovec_memset(qiov, bytes_done, 0, n_bytes);
ret = 0;
} else {
- uint64_t offset = s->header.offset_data / SECTOR_SIZE +
- (uint64_t)bmap_entry * s->block_sectors +
- sector_in_block;
- ret = bdrv_read(bs->file->bs, offset, buf, n_sectors);
+ uint64_t data_offset = s->header.offset_data +
+ (uint64_t)bmap_entry * s->block_size +
+ offset_in_block;
+
+ qemu_iovec_reset(&local_qiov);
+ qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
+
+ ret = bdrv_co_preadv(bs->file->bs, data_offset, n_bytes,
+ &local_qiov, 0);
}
- logout("%u sectors read\n", n_sectors);
+ logout("%u bytes read\n", n_bytes);
- nb_sectors -= n_sectors;
- sector_num += n_sectors;
- buf += n_sectors * SECTOR_SIZE;
+ bytes -= n_bytes;
+ offset += n_bytes;
+ bytes_done += n_bytes;
}
+ qemu_iovec_destroy(&local_qiov);
+
return ret;
}
-static int vdi_co_write(BlockDriverState *bs,
- int64_t sector_num, const uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVVdiState *s = bs->opaque;
+ QEMUIOVector local_qiov;
uint32_t bmap_entry;
uint32_t block_index;
- uint32_t sector_in_block;
- uint32_t n_sectors;
+ uint32_t offset_in_block;
+ uint32_t n_bytes;
uint32_t bmap_first = VDI_UNALLOCATED;
uint32_t bmap_last = VDI_UNALLOCATED;
uint8_t *block = NULL;
+ uint64_t bytes_done = 0;
int ret = 0;
logout("\n");
- while (ret >= 0 && nb_sectors > 0) {
- block_index = sector_num / s->block_sectors;
- sector_in_block = sector_num % s->block_sectors;
- n_sectors = s->block_sectors - sector_in_block;
- if (n_sectors > nb_sectors) {
- n_sectors = nb_sectors;
- }
+ qemu_iovec_init(&local_qiov, qiov->niov);
+
+ while (ret >= 0 && bytes > 0) {
+ block_index = offset / s->block_size;
+ offset_in_block = offset % s->block_size;
+ n_bytes = MIN(bytes, s->block_size - offset_in_block);
- logout("will write %u sectors starting at sector %" PRIu64 "\n",
- n_sectors, sector_num);
+ logout("will write %u bytes starting at offset %" PRIu64 "\n",
+ n_bytes, offset);
/* prepare next AIO request */
bmap_entry = le32_to_cpu(s->bmap[block_index]);
if (!VDI_IS_ALLOCATED(bmap_entry)) {
/* Allocate new block and write to it. */
- uint64_t offset;
+ uint64_t data_offset;
bmap_entry = s->header.blocks_allocated;
s->bmap[block_index] = cpu_to_le32(bmap_entry);
s->header.blocks_allocated++;
- offset = s->header.offset_data / SECTOR_SIZE +
- (uint64_t)bmap_entry * s->block_sectors;
+ data_offset = s->header.offset_data +
+ (uint64_t)bmap_entry * s->block_size;
if (block == NULL) {
block = g_malloc(s->block_size);
bmap_first = block_index;
}
bmap_last = block_index;
/* Copy data to be written to new block and zero unused parts. */
- memset(block, 0, sector_in_block * SECTOR_SIZE);
- memcpy(block + sector_in_block * SECTOR_SIZE,
- buf, n_sectors * SECTOR_SIZE);
- memset(block + (sector_in_block + n_sectors) * SECTOR_SIZE, 0,
- (s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
+ memset(block, 0, offset_in_block);
+ qemu_iovec_to_buf(qiov, bytes_done, block + offset_in_block,
+ n_bytes);
+ memset(block + offset_in_block + n_bytes, 0,
+ s->block_size - n_bytes - offset_in_block);
/* Note that this coroutine does not yield anywhere from reading the
* bmap entry until here, so in regards to all the coroutines trying
@@ -658,12 +669,12 @@ static int vdi_co_write(BlockDriverState *bs,
* acquire the lock and thus the padded cluster is written before
* the other coroutines can write to the affected area. */
qemu_co_mutex_lock(&s->write_lock);
- ret = bdrv_write(bs->file->bs, offset, block, s->block_sectors);
+ ret = bdrv_pwrite(bs->file->bs, data_offset, block, s->block_size);
qemu_co_mutex_unlock(&s->write_lock);
} else {
- uint64_t offset = s->header.offset_data / SECTOR_SIZE +
- (uint64_t)bmap_entry * s->block_sectors +
- sector_in_block;
+ uint64_t data_offset = s->header.offset_data +
+ (uint64_t)bmap_entry * s->block_size +
+ offset_in_block;
qemu_co_mutex_lock(&s->write_lock);
/* This lock is only used to make sure the following write operation
* is executed after the write issued by the coroutine allocating
@@ -674,16 +685,23 @@ static int vdi_co_write(BlockDriverState *bs,
* that that write operation has returned (there may be other writes
* in flight, but they do not concern this very operation). */
qemu_co_mutex_unlock(&s->write_lock);
- ret = bdrv_write(bs->file->bs, offset, buf, n_sectors);
+
+ qemu_iovec_reset(&local_qiov);
+ qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
+
+ ret = bdrv_co_pwritev(bs->file->bs, data_offset, n_bytes,
+ &local_qiov, 0);
}
- nb_sectors -= n_sectors;
- sector_num += n_sectors;
- buf += n_sectors * SECTOR_SIZE;
+ bytes -= n_bytes;
+ offset += n_bytes;
+ bytes_done += n_bytes;
- logout("%u sectors written\n", n_sectors);
+ logout("%u bytes written\n", n_bytes);
}
+ qemu_iovec_destroy(&local_qiov);
+
logout("finished data write\n");
if (ret < 0) {
return ret;
@@ -694,6 +712,7 @@ static int vdi_co_write(BlockDriverState *bs,
VdiHeader *header = (VdiHeader *) block;
uint8_t *base;
uint64_t offset;
+ uint32_t n_sectors;
logout("now writing modified header\n");
assert(VDI_IS_ALLOCATED(bmap_first));
@@ -808,7 +827,7 @@ static int vdi_create(const char *filename, QemuOpts *opts, Error **errp)
vdi_header_print(&header);
#endif
vdi_header_to_le(&header);
- ret = blk_pwrite(blk, offset, &header, sizeof(header));
+ ret = blk_pwrite(blk, offset, &header, sizeof(header), 0);
if (ret < 0) {
error_setg(errp, "Error writing header to %s", filename);
goto exit;
@@ -829,7 +848,7 @@ static int vdi_create(const char *filename, QemuOpts *opts, Error **errp)
bmap[i] = VDI_UNALLOCATED;
}
}
- ret = blk_pwrite(blk, offset, bmap, bmap_size);
+ ret = blk_pwrite(blk, offset, bmap, bmap_size, 0);
if (ret < 0) {
error_setg(errp, "Error writing bmap to %s", filename);
goto exit;
@@ -903,9 +922,9 @@ static BlockDriver bdrv_vdi = {
.bdrv_co_get_block_status = vdi_co_get_block_status,
.bdrv_make_empty = vdi_make_empty,
- .bdrv_read = vdi_co_read,
+ .bdrv_co_preadv = vdi_co_preadv,
#if defined(CONFIG_VDI_WRITE)
- .bdrv_write = vdi_co_write,
+ .bdrv_co_pwritev = vdi_co_pwritev,
#endif
.bdrv_get_info = vdi_get_info,
diff --git a/block/vhdx.c b/block/vhdx.c
index 2b7b332404..ec778fe2a7 100644
--- a/block/vhdx.c
+++ b/block/vhdx.c
@@ -1856,13 +1856,14 @@ static int vhdx_create(const char *filename, QemuOpts *opts, Error **errp)
creator = g_utf8_to_utf16("QEMU v" QEMU_VERSION, -1, NULL,
&creator_items, NULL);
signature = cpu_to_le64(VHDX_FILE_SIGNATURE);
- ret = blk_pwrite(blk, VHDX_FILE_ID_OFFSET, &signature, sizeof(signature));
+ ret = blk_pwrite(blk, VHDX_FILE_ID_OFFSET, &signature, sizeof(signature),
+ 0);
if (ret < 0) {
goto delete_and_exit;
}
if (creator) {
ret = blk_pwrite(blk, VHDX_FILE_ID_OFFSET + sizeof(signature),
- creator, creator_items * sizeof(gunichar2));
+ creator, creator_items * sizeof(gunichar2), 0);
if (ret < 0) {
goto delete_and_exit;
}
diff --git a/block/vmdk.c b/block/vmdk.c
index 45f9d3c5b9..e6c97c25a6 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -1016,27 +1016,26 @@ static void vmdk_refresh_limits(BlockDriverState *bs, Error **errp)
*/
static int get_whole_cluster(BlockDriverState *bs,
VmdkExtent *extent,
- uint64_t cluster_sector_num,
- uint64_t sector_num,
- uint64_t skip_start_sector,
- uint64_t skip_end_sector)
+ uint64_t cluster_offset,
+ uint64_t offset,
+ uint64_t skip_start_bytes,
+ uint64_t skip_end_bytes)
{
int ret = VMDK_OK;
int64_t cluster_bytes;
uint8_t *whole_grain;
/* For COW, align request sector_num to cluster start */
- sector_num = QEMU_ALIGN_DOWN(sector_num, extent->cluster_sectors);
cluster_bytes = extent->cluster_sectors << BDRV_SECTOR_BITS;
+ offset = QEMU_ALIGN_DOWN(offset, cluster_bytes);
whole_grain = qemu_blockalign(bs, cluster_bytes);
if (!bs->backing) {
- memset(whole_grain, 0, skip_start_sector << BDRV_SECTOR_BITS);
- memset(whole_grain + (skip_end_sector << BDRV_SECTOR_BITS), 0,
- cluster_bytes - (skip_end_sector << BDRV_SECTOR_BITS));
+ memset(whole_grain, 0, skip_start_bytes);
+ memset(whole_grain + skip_end_bytes, 0, cluster_bytes - skip_end_bytes);
}
- assert(skip_end_sector <= extent->cluster_sectors);
+ assert(skip_end_bytes <= cluster_bytes);
/* we will be here if it's first write on non-exist grain(cluster).
* try to read from parent image, if exist */
if (bs->backing && !vmdk_is_cid_valid(bs)) {
@@ -1045,42 +1044,43 @@ static int get_whole_cluster(BlockDriverState *bs,
}
/* Read backing data before skip range */
- if (skip_start_sector > 0) {
+ if (skip_start_bytes > 0) {
if (bs->backing) {
- ret = bdrv_read(bs->backing->bs, sector_num,
- whole_grain, skip_start_sector);
+ ret = bdrv_pread(bs->backing->bs, offset, whole_grain,
+ skip_start_bytes);
if (ret < 0) {
ret = VMDK_ERROR;
goto exit;
}
}
- ret = bdrv_write(extent->file->bs, cluster_sector_num, whole_grain,
- skip_start_sector);
+ ret = bdrv_pwrite(extent->file->bs, cluster_offset, whole_grain,
+ skip_start_bytes);
if (ret < 0) {
ret = VMDK_ERROR;
goto exit;
}
}
/* Read backing data after skip range */
- if (skip_end_sector < extent->cluster_sectors) {
+ if (skip_end_bytes < cluster_bytes) {
if (bs->backing) {
- ret = bdrv_read(bs->backing->bs, sector_num + skip_end_sector,
- whole_grain + (skip_end_sector << BDRV_SECTOR_BITS),
- extent->cluster_sectors - skip_end_sector);
+ ret = bdrv_pread(bs->backing->bs, offset + skip_end_bytes,
+ whole_grain + skip_end_bytes,
+ cluster_bytes - skip_end_bytes);
if (ret < 0) {
ret = VMDK_ERROR;
goto exit;
}
}
- ret = bdrv_write(extent->file->bs, cluster_sector_num + skip_end_sector,
- whole_grain + (skip_end_sector << BDRV_SECTOR_BITS),
- extent->cluster_sectors - skip_end_sector);
+ ret = bdrv_pwrite(extent->file->bs, cluster_offset + skip_end_bytes,
+ whole_grain + skip_end_bytes,
+ cluster_bytes - skip_end_bytes);
if (ret < 0) {
ret = VMDK_ERROR;
goto exit;
}
}
+ ret = VMDK_OK;
exit:
qemu_vfree(whole_grain);
return ret;
@@ -1142,8 +1142,8 @@ static int get_cluster_offset(BlockDriverState *bs,
uint64_t offset,
bool allocate,
uint64_t *cluster_offset,
- uint64_t skip_start_sector,
- uint64_t skip_end_sector)
+ uint64_t skip_start_bytes,
+ uint64_t skip_end_bytes)
{
unsigned int l1_index, l2_offset, l2_index;
int min_index, i, j;
@@ -1230,10 +1230,8 @@ static int get_cluster_offset(BlockDriverState *bs,
* This problem may occur because of insufficient space on host disk
* or inappropriate VM shutdown.
*/
- ret = get_whole_cluster(bs, extent,
- cluster_sector,
- offset >> BDRV_SECTOR_BITS,
- skip_start_sector, skip_end_sector);
+ ret = get_whole_cluster(bs, extent, cluster_sector * BDRV_SECTOR_SIZE,
+ offset, skip_start_bytes, skip_end_bytes);
if (ret) {
return ret;
}
@@ -1259,15 +1257,26 @@ static VmdkExtent *find_extent(BDRVVmdkState *s,
return NULL;
}
+static inline uint64_t vmdk_find_offset_in_cluster(VmdkExtent *extent,
+ int64_t offset)
+{
+ uint64_t offset_in_cluster, extent_begin_offset, extent_relative_offset;
+ uint64_t cluster_size = extent->cluster_sectors * BDRV_SECTOR_SIZE;
+
+ extent_begin_offset =
+ (extent->end_sector - extent->sectors) * BDRV_SECTOR_SIZE;
+ extent_relative_offset = offset - extent_begin_offset;
+ offset_in_cluster = extent_relative_offset % cluster_size;
+
+ return offset_in_cluster;
+}
+
static inline uint64_t vmdk_find_index_in_cluster(VmdkExtent *extent,
int64_t sector_num)
{
- uint64_t index_in_cluster, extent_begin_sector, extent_relative_sector_num;
-
- extent_begin_sector = extent->end_sector - extent->sectors;
- extent_relative_sector_num = sector_num - extent_begin_sector;
- index_in_cluster = extent_relative_sector_num % extent->cluster_sectors;
- return index_in_cluster;
+ uint64_t offset;
+ offset = vmdk_find_offset_in_cluster(extent, sector_num * BDRV_SECTOR_SIZE);
+ return offset / BDRV_SECTOR_SIZE;
}
static int64_t coroutine_fn vmdk_co_get_block_status(BlockDriverState *bs,
@@ -1319,38 +1328,57 @@ static int64_t coroutine_fn vmdk_co_get_block_status(BlockDriverState *bs,
}
static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
- int64_t offset_in_cluster, const uint8_t *buf,
- int nb_sectors, int64_t sector_num)
+ int64_t offset_in_cluster, QEMUIOVector *qiov,
+ uint64_t qiov_offset, uint64_t n_bytes,
+ uint64_t offset)
{
int ret;
VmdkGrainMarker *data = NULL;
uLongf buf_len;
- const uint8_t *write_buf = buf;
- int write_len = nb_sectors * 512;
+ QEMUIOVector local_qiov;
+ struct iovec iov;
int64_t write_offset;
int64_t write_end_sector;
if (extent->compressed) {
+ void *compressed_data;
+
if (!extent->has_marker) {
ret = -EINVAL;
goto out;
}
buf_len = (extent->cluster_sectors << 9) * 2;
data = g_malloc(buf_len + sizeof(VmdkGrainMarker));
- if (compress(data->data, &buf_len, buf, nb_sectors << 9) != Z_OK ||
- buf_len == 0) {
+
+ compressed_data = g_malloc(n_bytes);
+ qemu_iovec_to_buf(qiov, qiov_offset, compressed_data, n_bytes);
+ ret = compress(data->data, &buf_len, compressed_data, n_bytes);
+ g_free(compressed_data);
+
+ if (ret != Z_OK || buf_len == 0) {
ret = -EINVAL;
goto out;
}
- data->lba = sector_num;
+
+ data->lba = offset >> BDRV_SECTOR_BITS;
data->size = buf_len;
- write_buf = (uint8_t *)data;
- write_len = buf_len + sizeof(VmdkGrainMarker);
+
+ n_bytes = buf_len + sizeof(VmdkGrainMarker);
+ iov = (struct iovec) {
+ .iov_base = data,
+ .iov_len = n_bytes,
+ };
+ qemu_iovec_init_external(&local_qiov, &iov, 1);
+ } else {
+ qemu_iovec_init(&local_qiov, qiov->niov);
+ qemu_iovec_concat(&local_qiov, qiov, qiov_offset, n_bytes);
}
+
write_offset = cluster_offset + offset_in_cluster,
- ret = bdrv_pwrite(extent->file->bs, write_offset, write_buf, write_len);
+ ret = bdrv_co_pwritev(extent->file->bs, write_offset, n_bytes,
+ &local_qiov, 0);
- write_end_sector = DIV_ROUND_UP(write_offset + write_len, BDRV_SECTOR_SIZE);
+ write_end_sector = DIV_ROUND_UP(write_offset + n_bytes, BDRV_SECTOR_SIZE);
if (extent->compressed) {
extent->next_cluster_sector = write_end_sector;
@@ -1359,19 +1387,21 @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
write_end_sector);
}
- if (ret != write_len) {
- ret = ret < 0 ? ret : -EIO;
+ if (ret < 0) {
goto out;
}
ret = 0;
out:
g_free(data);
+ if (!extent->compressed) {
+ qemu_iovec_destroy(&local_qiov);
+ }
return ret;
}
static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
- int64_t offset_in_cluster, uint8_t *buf,
- int nb_sectors)
+ int64_t offset_in_cluster, QEMUIOVector *qiov,
+ int bytes)
{
int ret;
int cluster_bytes, buf_bytes;
@@ -1383,14 +1413,13 @@ static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
if (!extent->compressed) {
- ret = bdrv_pread(extent->file->bs,
- cluster_offset + offset_in_cluster,
- buf, nb_sectors * 512);
- if (ret == nb_sectors * 512) {
- return 0;
- } else {
- return -EIO;
+ ret = bdrv_co_preadv(extent->file->bs,
+ cluster_offset + offset_in_cluster, bytes,
+ qiov, 0);
+ if (ret < 0) {
+ return ret;
}
+ return 0;
}
cluster_bytes = extent->cluster_sectors * 512;
/* Read two clusters in case GrainMarker + compressed data > one cluster */
@@ -1422,11 +1451,11 @@ static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
}
if (offset_in_cluster < 0 ||
- offset_in_cluster + nb_sectors * 512 > buf_len) {
+ offset_in_cluster + bytes > buf_len) {
ret = -EINVAL;
goto out;
}
- memcpy(buf, uncomp_buf + offset_in_cluster, nb_sectors * 512);
+ qemu_iovec_from_buf(qiov, 0, uncomp_buf + offset_in_cluster, bytes);
ret = 0;
out:
@@ -1435,64 +1464,73 @@ static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
return ret;
}
-static int vmdk_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+vmdk_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVVmdkState *s = bs->opaque;
int ret;
- uint64_t n, index_in_cluster;
+ uint64_t n_bytes, offset_in_cluster;
VmdkExtent *extent = NULL;
+ QEMUIOVector local_qiov;
uint64_t cluster_offset;
+ uint64_t bytes_done = 0;
- while (nb_sectors > 0) {
- extent = find_extent(s, sector_num, extent);
+ qemu_iovec_init(&local_qiov, qiov->niov);
+ qemu_co_mutex_lock(&s->lock);
+
+ while (bytes > 0) {
+ extent = find_extent(s, offset >> BDRV_SECTOR_BITS, extent);
if (!extent) {
- return -EIO;
+ ret = -EIO;
+ goto fail;
}
ret = get_cluster_offset(bs, extent, NULL,
- sector_num << 9, false, &cluster_offset,
- 0, 0);
- index_in_cluster = vmdk_find_index_in_cluster(extent, sector_num);
- n = extent->cluster_sectors - index_in_cluster;
- if (n > nb_sectors) {
- n = nb_sectors;
- }
+ offset, false, &cluster_offset, 0, 0);
+ offset_in_cluster = vmdk_find_offset_in_cluster(extent, offset);
+
+ n_bytes = MIN(bytes, extent->cluster_sectors * BDRV_SECTOR_SIZE
+ - offset_in_cluster);
+
if (ret != VMDK_OK) {
/* if not allocated, try to read from parent image, if exist */
if (bs->backing && ret != VMDK_ZEROED) {
if (!vmdk_is_cid_valid(bs)) {
- return -EINVAL;
+ ret = -EINVAL;
+ goto fail;
}
- ret = bdrv_read(bs->backing->bs, sector_num, buf, n);
+
+ qemu_iovec_reset(&local_qiov);
+ qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
+
+ ret = bdrv_co_preadv(bs->backing->bs, offset, n_bytes,
+ &local_qiov, 0);
if (ret < 0) {
- return ret;
+ goto fail;
}
} else {
- memset(buf, 0, 512 * n);
+ qemu_iovec_memset(qiov, bytes_done, 0, n_bytes);
}
} else {
- ret = vmdk_read_extent(extent,
- cluster_offset, index_in_cluster * 512,
- buf, n);
+ qemu_iovec_reset(&local_qiov);
+ qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
+
+ ret = vmdk_read_extent(extent, cluster_offset, offset_in_cluster,
+ &local_qiov, n_bytes);
if (ret) {
- return ret;
+ goto fail;
}
}
- nb_sectors -= n;
- sector_num += n;
- buf += n * 512;
+ bytes -= n_bytes;
+ offset += n_bytes;
+ bytes_done += n_bytes;
}
- return 0;
-}
-static coroutine_fn int vmdk_co_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
-{
- int ret;
- BDRVVmdkState *s = bs->opaque;
- qemu_co_mutex_lock(&s->lock);
- ret = vmdk_read(bs, sector_num, buf, nb_sectors);
+ ret = 0;
+fail:
qemu_co_mutex_unlock(&s->lock);
+ qemu_iovec_destroy(&local_qiov);
+
return ret;
}
@@ -1506,38 +1544,38 @@ static coroutine_fn int vmdk_co_read(BlockDriverState *bs, int64_t sector_num,
*
* Returns: error code with 0 for success.
*/
-static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
- const uint8_t *buf, int nb_sectors,
- bool zeroed, bool zero_dry_run)
+static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
+ uint64_t bytes, QEMUIOVector *qiov,
+ bool zeroed, bool zero_dry_run)
{
BDRVVmdkState *s = bs->opaque;
VmdkExtent *extent = NULL;
int ret;
- int64_t index_in_cluster, n;
+ int64_t offset_in_cluster, n_bytes;
uint64_t cluster_offset;
+ uint64_t bytes_done = 0;
VmdkMetaData m_data;
- if (sector_num > bs->total_sectors) {
- error_report("Wrong offset: sector_num=0x%" PRIx64
+ if (DIV_ROUND_UP(offset, BDRV_SECTOR_SIZE) > bs->total_sectors) {
+ error_report("Wrong offset: offset=0x%" PRIx64
" total_sectors=0x%" PRIx64,
- sector_num, bs->total_sectors);
+ offset, bs->total_sectors);
return -EIO;
}
- while (nb_sectors > 0) {
- extent = find_extent(s, sector_num, extent);
+ while (bytes > 0) {
+ extent = find_extent(s, offset >> BDRV_SECTOR_BITS, extent);
if (!extent) {
return -EIO;
}
- index_in_cluster = vmdk_find_index_in_cluster(extent, sector_num);
- n = extent->cluster_sectors - index_in_cluster;
- if (n > nb_sectors) {
- n = nb_sectors;
- }
- ret = get_cluster_offset(bs, extent, &m_data, sector_num << 9,
+ offset_in_cluster = vmdk_find_offset_in_cluster(extent, offset);
+ n_bytes = MIN(bytes, extent->cluster_sectors * BDRV_SECTOR_SIZE
+ - offset_in_cluster);
+
+ ret = get_cluster_offset(bs, extent, &m_data, offset,
!(extent->compressed || zeroed),
- &cluster_offset,
- index_in_cluster, index_in_cluster + n);
+ &cluster_offset, offset_in_cluster,
+ offset_in_cluster + n_bytes);
if (extent->compressed) {
if (ret == VMDK_OK) {
/* Refuse write to allocated cluster for streamOptimized */
@@ -1546,7 +1584,7 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
return -EIO;
} else {
/* allocate */
- ret = get_cluster_offset(bs, extent, &m_data, sector_num << 9,
+ ret = get_cluster_offset(bs, extent, &m_data, offset,
true, &cluster_offset, 0, 0);
}
}
@@ -1556,9 +1594,9 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
if (zeroed) {
/* Do zeroed write, buf is ignored */
if (extent->has_zero_grain &&
- index_in_cluster == 0 &&
- n >= extent->cluster_sectors) {
- n = extent->cluster_sectors;
+ offset_in_cluster == 0 &&
+ n_bytes >= extent->cluster_sectors * BDRV_SECTOR_SIZE) {
+ n_bytes = extent->cluster_sectors * BDRV_SECTOR_SIZE;
if (!zero_dry_run) {
/* update L2 tables */
if (vmdk_L2update(extent, &m_data, VMDK_GTE_ZEROED)
@@ -1570,9 +1608,8 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
return -ENOTSUP;
}
} else {
- ret = vmdk_write_extent(extent,
- cluster_offset, index_in_cluster * 512,
- buf, n, sector_num);
+ ret = vmdk_write_extent(extent, cluster_offset, offset_in_cluster,
+ qiov, bytes_done, n_bytes, offset);
if (ret) {
return ret;
}
@@ -1585,9 +1622,9 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
}
}
}
- nb_sectors -= n;
- sector_num += n;
- buf += n * 512;
+ bytes -= n_bytes;
+ offset += n_bytes;
+ bytes_done += n_bytes;
/* update CID on the first write every time the virtual disk is
* opened */
@@ -1602,25 +1639,65 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
return 0;
}
-static coroutine_fn int vmdk_co_write(BlockDriverState *bs, int64_t sector_num,
- const uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+vmdk_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
int ret;
BDRVVmdkState *s = bs->opaque;
qemu_co_mutex_lock(&s->lock);
- ret = vmdk_write(bs, sector_num, buf, nb_sectors, false, false);
+ ret = vmdk_pwritev(bs, offset, bytes, qiov, false, false);
qemu_co_mutex_unlock(&s->lock);
return ret;
}
+typedef struct VmdkWriteCompressedCo {
+ BlockDriverState *bs;
+ int64_t sector_num;
+ const uint8_t *buf;
+ int nb_sectors;
+ int ret;
+} VmdkWriteCompressedCo;
+
+static void vmdk_co_write_compressed(void *opaque)
+{
+ VmdkWriteCompressedCo *co = opaque;
+ QEMUIOVector local_qiov;
+ uint64_t offset = co->sector_num * BDRV_SECTOR_SIZE;
+ uint64_t bytes = co->nb_sectors * BDRV_SECTOR_SIZE;
+
+ struct iovec iov = (struct iovec) {
+ .iov_base = (uint8_t*) co->buf,
+ .iov_len = bytes,
+ };
+ qemu_iovec_init_external(&local_qiov, &iov, 1);
+
+ co->ret = vmdk_pwritev(co->bs, offset, bytes, &local_qiov, false, false);
+}
+
static int vmdk_write_compressed(BlockDriverState *bs,
int64_t sector_num,
const uint8_t *buf,
int nb_sectors)
{
BDRVVmdkState *s = bs->opaque;
+
if (s->num_extents == 1 && s->extents[0].compressed) {
- return vmdk_write(bs, sector_num, buf, nb_sectors, false, false);
+ Coroutine *co;
+ AioContext *aio_context = bdrv_get_aio_context(bs);
+ VmdkWriteCompressedCo data = {
+ .bs = bs,
+ .sector_num = sector_num,
+ .buf = buf,
+ .nb_sectors = nb_sectors,
+ .ret = -EINPROGRESS,
+ };
+ co = qemu_coroutine_create(vmdk_co_write_compressed);
+ qemu_coroutine_enter(co, &data);
+ while (data.ret == -EINPROGRESS) {
+ aio_poll(aio_context, true);
+ }
+ return data.ret;
} else {
return -ENOTSUP;
}
@@ -1633,12 +1710,15 @@ static int coroutine_fn vmdk_co_write_zeroes(BlockDriverState *bs,
{
int ret;
BDRVVmdkState *s = bs->opaque;
+ uint64_t offset = sector_num * BDRV_SECTOR_SIZE;
+ uint64_t bytes = nb_sectors * BDRV_SECTOR_SIZE;
+
qemu_co_mutex_lock(&s->lock);
/* write zeroes could fail if sectors not aligned to cluster, test it with
* dry_run == true before really updating image */
- ret = vmdk_write(bs, sector_num, NULL, nb_sectors, true, true);
+ ret = vmdk_pwritev(bs, offset, bytes, NULL, true, true);
if (!ret) {
- ret = vmdk_write(bs, sector_num, NULL, nb_sectors, true, false);
+ ret = vmdk_pwritev(bs, offset, bytes, NULL, true, false);
}
qemu_co_mutex_unlock(&s->lock);
return ret;
@@ -1728,12 +1808,12 @@ static int vmdk_create_extent(const char *filename, int64_t filesize,
header.check_bytes[3] = 0xa;
/* write all the data */
- ret = blk_pwrite(blk, 0, &magic, sizeof(magic));
+ ret = blk_pwrite(blk, 0, &magic, sizeof(magic), 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
goto exit;
}
- ret = blk_pwrite(blk, sizeof(magic), &header, sizeof(header));
+ ret = blk_pwrite(blk, sizeof(magic), &header, sizeof(header), 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
goto exit;
@@ -1753,7 +1833,7 @@ static int vmdk_create_extent(const char *filename, int64_t filesize,
gd_buf[i] = cpu_to_le32(tmp);
}
ret = blk_pwrite(blk, le64_to_cpu(header.rgd_offset) * BDRV_SECTOR_SIZE,
- gd_buf, gd_buf_size);
+ gd_buf, gd_buf_size, 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
goto exit;
@@ -1765,7 +1845,7 @@ static int vmdk_create_extent(const char *filename, int64_t filesize,
gd_buf[i] = cpu_to_le32(tmp);
}
ret = blk_pwrite(blk, le64_to_cpu(header.gd_offset) * BDRV_SECTOR_SIZE,
- gd_buf, gd_buf_size);
+ gd_buf, gd_buf_size, 0);
if (ret < 0) {
error_setg(errp, QERR_IO_ERROR);
goto exit;
@@ -1829,8 +1909,8 @@ static int vmdk_create(const char *filename, QemuOpts *opts, Error **errp)
int64_t total_size = 0, filesize;
char *adapter_type = NULL;
char *backing_file = NULL;
+ char *hw_version = NULL;
char *fmt = NULL;
- int flags = 0;
int ret = 0;
bool flat, split, compress;
GString *ext_desc_lines;
@@ -1861,7 +1941,7 @@ static int vmdk_create(const char *filename, QemuOpts *opts, Error **errp)
"# The Disk Data Base\n"
"#DDB\n"
"\n"
- "ddb.virtualHWVersion = \"%d\"\n"
+ "ddb.virtualHWVersion = \"%s\"\n"
"ddb.geometry.cylinders = \"%" PRId64 "\"\n"
"ddb.geometry.heads = \"%" PRIu32 "\"\n"
"ddb.geometry.sectors = \"63\"\n"
@@ -1878,8 +1958,20 @@ static int vmdk_create(const char *filename, QemuOpts *opts, Error **errp)
BDRV_SECTOR_SIZE);
adapter_type = qemu_opt_get_del(opts, BLOCK_OPT_ADAPTER_TYPE);
backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
+ hw_version = qemu_opt_get_del(opts, BLOCK_OPT_HWVERSION);
if (qemu_opt_get_bool_del(opts, BLOCK_OPT_COMPAT6, false)) {
- flags |= BLOCK_FLAG_COMPAT6;
+ if (strcmp(hw_version, "undefined")) {
+ error_setg(errp,
+ "compat6 cannot be enabled with hwversion set");
+ ret = -EINVAL;
+ goto exit;
+ }
+ g_free(hw_version);
+ hw_version = g_strdup("6");
+ }
+ if (strcmp(hw_version, "undefined") == 0) {
+ g_free(hw_version);
+ hw_version = g_strdup("4");
}
fmt = qemu_opt_get_del(opts, BLOCK_OPT_SUBFMT);
if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ZEROED_GRAIN, false)) {
@@ -2001,7 +2093,7 @@ static int vmdk_create(const char *filename, QemuOpts *opts, Error **errp)
fmt,
parent_desc_line,
ext_desc_lines->str,
- (flags & BLOCK_FLAG_COMPAT6 ? 6 : 4),
+ hw_version,
total_size /
(int64_t)(63 * number_heads * BDRV_SECTOR_SIZE),
number_heads,
@@ -2028,7 +2120,7 @@ static int vmdk_create(const char *filename, QemuOpts *opts, Error **errp)
blk_set_allow_write_beyond_eof(new_blk, true);
- ret = blk_pwrite(new_blk, desc_offset, desc, desc_len);
+ ret = blk_pwrite(new_blk, desc_offset, desc, desc_len, 0);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not write description");
goto exit;
@@ -2047,6 +2139,7 @@ exit:
}
g_free(adapter_type);
g_free(backing_file);
+ g_free(hw_version);
g_free(fmt);
g_free(desc);
g_free(path);
@@ -2298,6 +2391,12 @@ static QemuOptsList vmdk_create_opts = {
.def_value_str = "off"
},
{
+ .name = BLOCK_OPT_HWVERSION,
+ .type = QEMU_OPT_STRING,
+ .help = "VMDK hardware version",
+ .def_value_str = "undefined"
+ },
+ {
.name = BLOCK_OPT_SUBFMT,
.type = QEMU_OPT_STRING,
.help =
@@ -2321,8 +2420,8 @@ static BlockDriver bdrv_vmdk = {
.bdrv_open = vmdk_open,
.bdrv_check = vmdk_check,
.bdrv_reopen_prepare = vmdk_reopen_prepare,
- .bdrv_read = vmdk_co_read,
- .bdrv_write = vmdk_co_write,
+ .bdrv_co_preadv = vmdk_co_preadv,
+ .bdrv_co_pwritev = vmdk_co_pwritev,
.bdrv_write_compressed = vmdk_write_compressed,
.bdrv_co_write_zeroes = vmdk_co_write_zeroes,
.bdrv_close = vmdk_close,
diff --git a/block/vpc.c b/block/vpc.c
index 3e2ea698d9..0379813e2f 100644
--- a/block/vpc.c
+++ b/block/vpc.c
@@ -454,22 +454,21 @@ static int vpc_reopen_prepare(BDRVReopenState *state,
* The parameter write must be 1 if the offset will be used for a write
* operation (the block bitmaps is updated then), 0 otherwise.
*/
-static inline int64_t get_sector_offset(BlockDriverState *bs,
- int64_t sector_num, int write)
+static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset,
+ bool write)
{
BDRVVPCState *s = bs->opaque;
- uint64_t offset = sector_num * 512;
uint64_t bitmap_offset, block_offset;
- uint32_t pagetable_index, pageentry_index;
+ uint32_t pagetable_index, offset_in_block;
pagetable_index = offset / s->block_size;
- pageentry_index = (offset % s->block_size) / 512;
+ offset_in_block = offset % s->block_size;
if (pagetable_index >= s->max_table_entries || s->pagetable[pagetable_index] == 0xffffffff)
return -1; /* not allocated */
bitmap_offset = 512 * (uint64_t) s->pagetable[pagetable_index];
- block_offset = bitmap_offset + s->bitmap_size + (512 * pageentry_index);
+ block_offset = bitmap_offset + s->bitmap_size + offset_in_block;
/* We must ensure that we don't write to any sectors which are marked as
unused in the bitmap. We get away with setting all bits in the block
@@ -487,6 +486,12 @@ static inline int64_t get_sector_offset(BlockDriverState *bs,
return block_offset;
}
+static inline int64_t get_sector_offset(BlockDriverState *bs,
+ int64_t sector_num, bool write)
+{
+ return get_image_offset(bs, sector_num * BDRV_SECTOR_SIZE, write);
+}
+
/*
* Writes the footer to the end of the image file. This is needed when the
* file grows as it overwrites the old footer
@@ -513,7 +518,7 @@ static int rewrite_footer(BlockDriverState* bs)
*
* Returns the sectors' offset in the image file on success and < 0 on error
*/
-static int64_t alloc_block(BlockDriverState* bs, int64_t sector_num)
+static int64_t alloc_block(BlockDriverState* bs, int64_t offset)
{
BDRVVPCState *s = bs->opaque;
int64_t bat_offset;
@@ -522,14 +527,13 @@ static int64_t alloc_block(BlockDriverState* bs, int64_t sector_num)
uint8_t bitmap[s->bitmap_size];
/* Check if sector_num is valid */
- if ((sector_num < 0) || (sector_num > bs->total_sectors))
- return -1;
+ if ((offset < 0) || (offset > bs->total_sectors * BDRV_SECTOR_SIZE)) {
+ return -EINVAL;
+ }
/* Write entry into in-memory BAT */
- index = (sector_num * 512) / s->block_size;
- if (s->pagetable[index] != 0xFFFFFFFF)
- return -1;
-
+ index = offset / s->block_size;
+ assert(s->pagetable[index] == 0xFFFFFFFF);
s->pagetable[index] = s->free_data_block_offset / 512;
/* Initialize the block's bitmap */
@@ -553,11 +557,11 @@ static int64_t alloc_block(BlockDriverState* bs, int64_t sector_num)
if (ret < 0)
goto fail;
- return get_sector_offset(bs, sector_num, 0);
+ return get_image_offset(bs, offset, false);
fail:
s->free_data_block_offset -= (s->block_size + s->bitmap_size);
- return -1;
+ return ret;
}
static int vpc_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
@@ -573,104 +577,105 @@ static int vpc_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
return 0;
}
-static int vpc_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+vpc_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVVPCState *s = bs->opaque;
int ret;
- int64_t offset;
- int64_t sectors, sectors_per_block;
+ int64_t image_offset;
+ int64_t n_bytes;
+ int64_t bytes_done = 0;
VHDFooter *footer = (VHDFooter *) s->footer_buf;
+ QEMUIOVector local_qiov;
if (be32_to_cpu(footer->type) == VHD_FIXED) {
- return bdrv_read(bs->file->bs, sector_num, buf, nb_sectors);
+ return bdrv_co_preadv(bs->file->bs, offset, bytes, qiov, 0);
}
- while (nb_sectors > 0) {
- offset = get_sector_offset(bs, sector_num, 0);
- sectors_per_block = s->block_size >> BDRV_SECTOR_BITS;
- sectors = sectors_per_block - (sector_num % sectors_per_block);
- if (sectors > nb_sectors) {
- sectors = nb_sectors;
- }
+ qemu_co_mutex_lock(&s->lock);
+ qemu_iovec_init(&local_qiov, qiov->niov);
+
+ while (bytes > 0) {
+ image_offset = get_image_offset(bs, offset, false);
+ n_bytes = MIN(bytes, s->block_size - (offset % s->block_size));
- if (offset == -1) {
- memset(buf, 0, sectors * BDRV_SECTOR_SIZE);
+ if (image_offset == -1) {
+ qemu_iovec_memset(qiov, bytes_done, 0, n_bytes);
} else {
- ret = bdrv_pread(bs->file->bs, offset, buf,
- sectors * BDRV_SECTOR_SIZE);
- if (ret != sectors * BDRV_SECTOR_SIZE) {
- return -1;
+ qemu_iovec_reset(&local_qiov);
+ qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
+
+ ret = bdrv_co_preadv(bs->file->bs, image_offset, n_bytes,
+ &local_qiov, 0);
+ if (ret < 0) {
+ goto fail;
}
}
- nb_sectors -= sectors;
- sector_num += sectors;
- buf += sectors * BDRV_SECTOR_SIZE;
+ bytes -= n_bytes;
+ offset += n_bytes;
+ bytes_done += n_bytes;
}
- return 0;
-}
-static coroutine_fn int vpc_co_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
-{
- int ret;
- BDRVVPCState *s = bs->opaque;
- qemu_co_mutex_lock(&s->lock);
- ret = vpc_read(bs, sector_num, buf, nb_sectors);
+ ret = 0;
+fail:
+ qemu_iovec_destroy(&local_qiov);
qemu_co_mutex_unlock(&s->lock);
+
return ret;
}
-static int vpc_write(BlockDriverState *bs, int64_t sector_num,
- const uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+vpc_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVVPCState *s = bs->opaque;
- int64_t offset;
- int64_t sectors, sectors_per_block;
+ int64_t image_offset;
+ int64_t n_bytes;
+ int64_t bytes_done = 0;
int ret;
VHDFooter *footer = (VHDFooter *) s->footer_buf;
+ QEMUIOVector local_qiov;
if (be32_to_cpu(footer->type) == VHD_FIXED) {
- return bdrv_write(bs->file->bs, sector_num, buf, nb_sectors);
+ return bdrv_co_pwritev(bs->file->bs, offset, bytes, qiov, 0);
}
- while (nb_sectors > 0) {
- offset = get_sector_offset(bs, sector_num, 1);
- sectors_per_block = s->block_size >> BDRV_SECTOR_BITS;
- sectors = sectors_per_block - (sector_num % sectors_per_block);
- if (sectors > nb_sectors) {
- sectors = nb_sectors;
- }
+ qemu_co_mutex_lock(&s->lock);
+ qemu_iovec_init(&local_qiov, qiov->niov);
+
+ while (bytes > 0) {
+ image_offset = get_image_offset(bs, offset, true);
+ n_bytes = MIN(bytes, s->block_size - (offset % s->block_size));
- if (offset == -1) {
- offset = alloc_block(bs, sector_num);
- if (offset < 0)
- return -1;
+ if (image_offset == -1) {
+ image_offset = alloc_block(bs, offset);
+ if (image_offset < 0) {
+ ret = image_offset;
+ goto fail;
+ }
}
- ret = bdrv_pwrite(bs->file->bs, offset, buf,
- sectors * BDRV_SECTOR_SIZE);
- if (ret != sectors * BDRV_SECTOR_SIZE) {
- return -1;
+ qemu_iovec_reset(&local_qiov);
+ qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
+
+ ret = bdrv_co_pwritev(bs->file->bs, image_offset, n_bytes,
+ &local_qiov, 0);
+ if (ret < 0) {
+ goto fail;
}
- nb_sectors -= sectors;
- sector_num += sectors;
- buf += sectors * BDRV_SECTOR_SIZE;
+ bytes -= n_bytes;
+ offset += n_bytes;
+ bytes_done += n_bytes;
}
- return 0;
-}
-
-static coroutine_fn int vpc_co_write(BlockDriverState *bs, int64_t sector_num,
- const uint8_t *buf, int nb_sectors)
-{
- int ret;
- BDRVVPCState *s = bs->opaque;
- qemu_co_mutex_lock(&s->lock);
- ret = vpc_write(bs, sector_num, buf, nb_sectors);
+ ret = 0;
+fail:
+ qemu_iovec_destroy(&local_qiov);
qemu_co_mutex_unlock(&s->lock);
+
return ret;
}
@@ -783,13 +788,13 @@ static int create_dynamic_disk(BlockBackend *blk, uint8_t *buf,
block_size = 0x200000;
num_bat_entries = (total_sectors + block_size / 512) / (block_size / 512);
- ret = blk_pwrite(blk, offset, buf, HEADER_SIZE);
+ ret = blk_pwrite(blk, offset, buf, HEADER_SIZE, 0);
if (ret < 0) {
goto fail;
}
offset = 1536 + ((num_bat_entries * 4 + 511) & ~511);
- ret = blk_pwrite(blk, offset, buf, HEADER_SIZE);
+ ret = blk_pwrite(blk, offset, buf, HEADER_SIZE, 0);
if (ret < 0) {
goto fail;
}
@@ -799,7 +804,7 @@ static int create_dynamic_disk(BlockBackend *blk, uint8_t *buf,
memset(buf, 0xFF, 512);
for (i = 0; i < (num_bat_entries * 4 + 511) / 512; i++) {
- ret = blk_pwrite(blk, offset, buf, 512);
+ ret = blk_pwrite(blk, offset, buf, 512, 0);
if (ret < 0) {
goto fail;
}
@@ -826,7 +831,7 @@ static int create_dynamic_disk(BlockBackend *blk, uint8_t *buf,
/* Write the header */
offset = 512;
- ret = blk_pwrite(blk, offset, buf, 1024);
+ ret = blk_pwrite(blk, offset, buf, 1024, 0);
if (ret < 0) {
goto fail;
}
@@ -848,7 +853,7 @@ static int create_fixed_disk(BlockBackend *blk, uint8_t *buf,
return ret;
}
- ret = blk_pwrite(blk, total_size - HEADER_SIZE, buf, HEADER_SIZE);
+ ret = blk_pwrite(blk, total_size - HEADER_SIZE, buf, HEADER_SIZE, 0);
if (ret < 0) {
return ret;
}
@@ -1056,8 +1061,8 @@ static BlockDriver bdrv_vpc = {
.bdrv_reopen_prepare = vpc_reopen_prepare,
.bdrv_create = vpc_create,
- .bdrv_read = vpc_co_read,
- .bdrv_write = vpc_co_write,
+ .bdrv_co_preadv = vpc_co_preadv,
+ .bdrv_co_pwritev = vpc_co_pwritev,
.bdrv_co_get_block_status = vpc_co_get_block_status,
.bdrv_get_info = vpc_get_info,
diff --git a/block/vvfat.c b/block/vvfat.c
index 183fc4f049..5b0c8dd639 100644
--- a/block/vvfat.c
+++ b/block/vvfat.c
@@ -1179,6 +1179,7 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags,
bs->read_only = 0;
}
+ bs->request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O supported */
bs->total_sectors = cyls * heads * secs;
if (init_directories(s, dirname, heads, secs, errp)) {
@@ -1421,14 +1422,31 @@ DLOG(fprintf(stderr, "sector %d not allocated\n", (int)sector_num));
return 0;
}
-static coroutine_fn int vvfat_co_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+vvfat_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
int ret;
BDRVVVFATState *s = bs->opaque;
+ uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
+ int nb_sectors = bytes >> BDRV_SECTOR_BITS;
+ void *buf;
+
+ assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
+
+ buf = g_try_malloc(bytes);
+ if (bytes && buf == NULL) {
+ return -ENOMEM;
+ }
+
qemu_co_mutex_lock(&s->lock);
ret = vvfat_read(bs, sector_num, buf, nb_sectors);
qemu_co_mutex_unlock(&s->lock);
+
+ qemu_iovec_from_buf(qiov, 0, buf, bytes);
+ g_free(buf);
+
return ret;
}
@@ -2880,14 +2898,31 @@ DLOG(checkpoint());
return 0;
}
-static coroutine_fn int vvfat_co_write(BlockDriverState *bs, int64_t sector_num,
- const uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+vvfat_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
int ret;
BDRVVVFATState *s = bs->opaque;
+ uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
+ int nb_sectors = bytes >> BDRV_SECTOR_BITS;
+ void *buf;
+
+ assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
+ assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
+
+ buf = g_try_malloc(bytes);
+ if (bytes && buf == NULL) {
+ return -ENOMEM;
+ }
+ qemu_iovec_to_buf(qiov, 0, buf, bytes);
+
qemu_co_mutex_lock(&s->lock);
ret = vvfat_write(bs, sector_num, buf, nb_sectors);
qemu_co_mutex_unlock(&s->lock);
+
+ g_free(buf);
+
return ret;
}
@@ -2904,8 +2939,10 @@ static int64_t coroutine_fn vvfat_co_get_block_status(BlockDriverState *bs,
return BDRV_BLOCK_DATA;
}
-static int write_target_commit(BlockDriverState *bs, int64_t sector_num,
- const uint8_t* buffer, int nb_sectors) {
+static int coroutine_fn
+write_target_commit(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
+{
BDRVVVFATState* s = *((BDRVVVFATState**) bs->opaque);
return try_commit(s);
}
@@ -2918,7 +2955,7 @@ static void write_target_close(BlockDriverState *bs) {
static BlockDriver vvfat_write_target = {
.format_name = "vvfat_write_target",
- .bdrv_write = write_target_commit,
+ .bdrv_co_pwritev = write_target_commit,
.bdrv_close = write_target_close,
};
@@ -3014,8 +3051,8 @@ static BlockDriver bdrv_vvfat = {
.bdrv_file_open = vvfat_open,
.bdrv_close = vvfat_close,
- .bdrv_read = vvfat_co_read,
- .bdrv_write = vvfat_co_write,
+ .bdrv_co_preadv = vvfat_co_preadv,
+ .bdrv_co_pwritev = vvfat_co_pwritev,
.bdrv_co_get_block_status = vvfat_co_get_block_status,
};
diff --git a/blockdev.c b/blockdev.c
index f1f520a265..1892b8ec8e 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -73,7 +73,7 @@ static int if_max_devs[IF_COUNT] = {
* Do not change these numbers! They govern how drive option
* index maps to unit and bus. That mapping is ABI.
*
- * All controllers used to imlement if=T drives need to support
+ * All controllers used to implement if=T drives need to support
* if_max_devs[T] units, for any T with if_max_devs[T] != 0.
* Otherwise, some index values map to "impossible" bus, unit
* values.
@@ -4092,6 +4092,61 @@ out:
aio_context_release(aio_context);
}
+static BdrvChild *bdrv_find_child(BlockDriverState *parent_bs,
+ const char *child_name)
+{
+ BdrvChild *child;
+
+ QLIST_FOREACH(child, &parent_bs->children, next) {
+ if (strcmp(child->name, child_name) == 0) {
+ return child;
+ }
+ }
+
+ return NULL;
+}
+
+void qmp_x_blockdev_change(const char *parent, bool has_child,
+ const char *child, bool has_node,
+ const char *node, Error **errp)
+{
+ BlockDriverState *parent_bs, *new_bs = NULL;
+ BdrvChild *p_child;
+
+ parent_bs = bdrv_lookup_bs(parent, parent, errp);
+ if (!parent_bs) {
+ return;
+ }
+
+ if (has_child == has_node) {
+ if (has_child) {
+ error_setg(errp, "The parameters child and node are in conflict");
+ } else {
+ error_setg(errp, "Either child or node must be specified");
+ }
+ return;
+ }
+
+ if (has_child) {
+ p_child = bdrv_find_child(parent_bs, child);
+ if (!p_child) {
+ error_setg(errp, "Node '%s' does not have child '%s'",
+ parent, child);
+ return;
+ }
+ bdrv_del_child(parent_bs, p_child, errp);
+ }
+
+ if (has_node) {
+ new_bs = bdrv_find_node(node);
+ if (!new_bs) {
+ error_setg(errp, "Node '%s' not found", node);
+ return;
+ }
+ bdrv_add_child(parent_bs, new_bs, errp);
+ }
+}
+
BlockJobInfoList *qmp_query_block_jobs(Error **errp)
{
BlockJobInfoList *head = NULL, **p_next = &head;
diff --git a/cpu-exec-common.c b/cpu-exec-common.c
index 1b1731cd83..6bdda6b6b0 100644
--- a/cpu-exec-common.c
+++ b/cpu-exec-common.c
@@ -68,7 +68,6 @@ void cpu_reloading_memory_map(void)
void cpu_loop_exit(CPUState *cpu)
{
- cpu->current_tb = NULL;
siglongjmp(cpu->jmp_env, 1);
}
@@ -77,6 +76,5 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
if (pc) {
cpu_restore_state(cpu, pc);
}
- cpu->current_tb = NULL;
siglongjmp(cpu->jmp_env, 1);
}
diff --git a/cpu-exec.c b/cpu-exec.c
index bbfcbfb543..14df1aacf4 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -136,7 +136,9 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
{
CPUArchState *env = cpu->env_ptr;
- uintptr_t next_tb;
+ uintptr_t ret;
+ TranslationBlock *last_tb;
+ int tb_exit;
uint8_t *tb_ptr = itb->tc_ptr;
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
@@ -160,118 +162,125 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
#endif /* DEBUG_DISAS */
cpu->can_do_io = !use_icount;
- next_tb = tcg_qemu_tb_exec(env, tb_ptr);
+ ret = tcg_qemu_tb_exec(env, tb_ptr);
cpu->can_do_io = 1;
- trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
- next_tb & TB_EXIT_MASK);
+ last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
+ tb_exit = ret & TB_EXIT_MASK;
+ trace_exec_tb_exit(last_tb, tb_exit);
- if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
+ if (tb_exit > TB_EXIT_IDX1) {
/* We didn't start executing this TB (eg because the instruction
* counter hit zero); we must restore the guest PC to the address
* of the start of the TB.
*/
CPUClass *cc = CPU_GET_CLASS(cpu);
- TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
- qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
+ qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
"Stopped execution of TB chain before %p ["
TARGET_FMT_lx "] %s\n",
- itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
+ last_tb->tc_ptr, last_tb->pc,
+ lookup_symbol(last_tb->pc));
if (cc->synchronize_from_tb) {
- cc->synchronize_from_tb(cpu, tb);
+ cc->synchronize_from_tb(cpu, last_tb);
} else {
assert(cc->set_pc);
- cc->set_pc(cpu, tb->pc);
+ cc->set_pc(cpu, last_tb->pc);
}
}
- if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
+ if (tb_exit == TB_EXIT_REQUESTED) {
/* We were asked to stop executing TBs (probably a pending
* interrupt. We've now stopped, so clear the flag.
*/
cpu->tcg_exit_req = 0;
}
- return next_tb;
+ return ret;
}
+#ifndef CONFIG_USER_ONLY
/* Execute the code without caching the generated code. An interpreter
could be used if available. */
static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
TranslationBlock *orig_tb, bool ignore_icount)
{
TranslationBlock *tb;
+ bool old_tb_flushed;
/* Should never happen.
We only end up here when an existing TB is too long. */
if (max_cycles > CF_COUNT_MASK)
max_cycles = CF_COUNT_MASK;
+ old_tb_flushed = cpu->tb_flushed;
+ cpu->tb_flushed = false;
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
max_cycles | CF_NOCACHE
| (ignore_icount ? CF_IGNORE_ICOUNT : 0));
- tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
- cpu->current_tb = tb;
+ tb->orig_tb = cpu->tb_flushed ? NULL : orig_tb;
+ cpu->tb_flushed |= old_tb_flushed;
/* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc);
cpu_tb_exec(cpu, tb);
- cpu->current_tb = NULL;
tb_phys_invalidate(tb, -1);
tb_free(tb);
}
+#endif
static TranslationBlock *tb_find_physical(CPUState *cpu,
target_ulong pc,
target_ulong cs_base,
- uint64_t flags)
+ uint32_t flags)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
- TranslationBlock *tb, **ptb1;
+ TranslationBlock *tb, **tb_hash_head, **ptb1;
unsigned int h;
tb_page_addr_t phys_pc, phys_page1;
- target_ulong virt_page2;
-
- tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
/* find translated block using physical mappings */
phys_pc = get_page_addr_code(env, pc);
phys_page1 = phys_pc & TARGET_PAGE_MASK;
h = tb_phys_hash_func(phys_pc);
- ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
- for(;;) {
- tb = *ptb1;
- if (!tb) {
- return NULL;
- }
+
+ /* Start at head of the hash entry */
+ ptb1 = tb_hash_head = &tcg_ctx.tb_ctx.tb_phys_hash[h];
+ tb = *ptb1;
+
+ while (tb) {
if (tb->pc == pc &&
tb->page_addr[0] == phys_page1 &&
tb->cs_base == cs_base &&
tb->flags == flags) {
- /* check next page if needed */
- if (tb->page_addr[1] != -1) {
- tb_page_addr_t phys_page2;
- virt_page2 = (pc & TARGET_PAGE_MASK) +
- TARGET_PAGE_SIZE;
- phys_page2 = get_page_addr_code(env, virt_page2);
+ if (tb->page_addr[1] == -1) {
+ /* done, we have a match */
+ break;
+ } else {
+ /* check next page if needed */
+ target_ulong virt_page2 = (pc & TARGET_PAGE_MASK) +
+ TARGET_PAGE_SIZE;
+ tb_page_addr_t phys_page2 = get_page_addr_code(env, virt_page2);
+
if (tb->page_addr[1] == phys_page2) {
break;
}
- } else {
- break;
}
}
+
ptb1 = &tb->phys_hash_next;
+ tb = *ptb1;
}
- /* Move the TB to the head of the list */
- *ptb1 = tb->phys_hash_next;
- tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
- tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
+ if (tb) {
+ /* Move the TB to the head of the list */
+ *ptb1 = tb->phys_hash_next;
+ tb->phys_hash_next = *tb_hash_head;
+ *tb_hash_head = tb;
+ }
return tb;
}
static TranslationBlock *tb_find_slow(CPUState *cpu,
target_ulong pc,
target_ulong cs_base,
- uint64_t flags)
+ uint32_t flags)
{
TranslationBlock *tb;
@@ -309,26 +318,63 @@ found:
return tb;
}
-static inline TranslationBlock *tb_find_fast(CPUState *cpu)
+static inline TranslationBlock *tb_find_fast(CPUState *cpu,
+ TranslationBlock **last_tb,
+ int tb_exit)
{
CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb;
target_ulong cs_base, pc;
- int flags;
+ uint32_t flags;
/* we record a subset of the CPU state. It will
always be the same before a given translated block
is executed. */
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
+ tb_lock();
tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) {
tb = tb_find_slow(cpu, pc, cs_base, flags);
}
+ if (cpu->tb_flushed) {
+ /* Ensure that no TB jump will be modified as the
+ * translation buffer has been flushed.
+ */
+ *last_tb = NULL;
+ cpu->tb_flushed = false;
+ }
+ /* See if we can patch the calling TB. */
+ if (*last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
+ tb_add_jump(*last_tb, tb_exit, tb);
+ }
+ tb_unlock();
return tb;
}
-static void cpu_handle_debug_exception(CPUState *cpu)
+static inline bool cpu_handle_halt(CPUState *cpu)
+{
+ if (cpu->halted) {
+#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
+ if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
+ && replay_interrupt()) {
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ apic_poll_irq(x86_cpu->apic_state);
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
+ }
+#endif
+ if (!cpu_has_work(cpu)) {
+ current_cpu = NULL;
+ return true;
+ }
+
+ cpu->halted = 0;
+ }
+
+ return false;
+}
+
+static inline void cpu_handle_debug_exception(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUWatchpoint *wp;
@@ -342,37 +388,197 @@ static void cpu_handle_debug_exception(CPUState *cpu)
cc->debug_excp_handler(cpu);
}
+static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
+{
+ if (cpu->exception_index >= 0) {
+ if (cpu->exception_index >= EXCP_INTERRUPT) {
+ /* exit request from the cpu execution loop */
+ *ret = cpu->exception_index;
+ if (*ret == EXCP_DEBUG) {
+ cpu_handle_debug_exception(cpu);
+ }
+ cpu->exception_index = -1;
+ return true;
+ } else {
+#if defined(CONFIG_USER_ONLY)
+ /* if user mode only, we simulate a fake exception
+ which will be handled outside the cpu execution
+ loop */
+#if defined(TARGET_I386)
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ cc->do_interrupt(cpu);
+#endif
+ *ret = cpu->exception_index;
+ cpu->exception_index = -1;
+ return true;
+#else
+ if (replay_exception()) {
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ cc->do_interrupt(cpu);
+ cpu->exception_index = -1;
+ } else if (!replay_has_interrupt()) {
+ /* give a chance to iothread in replay mode */
+ *ret = EXCP_INTERRUPT;
+ return true;
+ }
+#endif
+ }
+#ifndef CONFIG_USER_ONLY
+ } else if (replay_has_exception()
+ && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
+ /* try to cause an exception pending in the log */
+ TranslationBlock *last_tb = NULL; /* Avoid chaining TBs */
+ cpu_exec_nocache(cpu, 1, tb_find_fast(cpu, &last_tb, 0), true);
+ *ret = -1;
+ return true;
+#endif
+ }
+
+ return false;
+}
+
+static inline void cpu_handle_interrupt(CPUState *cpu,
+ TranslationBlock **last_tb)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ int interrupt_request = cpu->interrupt_request;
+
+ if (unlikely(interrupt_request)) {
+ if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
+ /* Mask out external interrupts for this step. */
+ interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
+ }
+ if (interrupt_request & CPU_INTERRUPT_DEBUG) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
+ cpu->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cpu);
+ }
+ if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
+ /* Do nothing */
+ } else if (interrupt_request & CPU_INTERRUPT_HALT) {
+ replay_interrupt();
+ cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
+ cpu->halted = 1;
+ cpu->exception_index = EXCP_HLT;
+ cpu_loop_exit(cpu);
+ }
+#if defined(TARGET_I386)
+ else if (interrupt_request & CPU_INTERRUPT_INIT) {
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUArchState *env = &x86_cpu->env;
+ replay_interrupt();
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
+ do_cpu_init(x86_cpu);
+ cpu->exception_index = EXCP_HALTED;
+ cpu_loop_exit(cpu);
+ }
+#else
+ else if (interrupt_request & CPU_INTERRUPT_RESET) {
+ replay_interrupt();
+ cpu_reset(cpu);
+ cpu_loop_exit(cpu);
+ }
+#endif
+ /* The target hook has 3 exit conditions:
+ False when the interrupt isn't processed,
+ True when it is, and we should restart on a new TB,
+ and via longjmp via cpu_loop_exit. */
+ else {
+ replay_interrupt();
+ if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
+ *last_tb = NULL;
+ }
+ /* The target hook may have updated the 'cpu->interrupt_request';
+ * reload the 'interrupt_request' value */
+ interrupt_request = cpu->interrupt_request;
+ }
+ if (interrupt_request & CPU_INTERRUPT_EXITTB) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
+ /* ensure that no TB jump will be modified as
+ the program flow was changed */
+ *last_tb = NULL;
+ }
+ }
+ if (unlikely(cpu->exit_request || replay_has_interrupt())) {
+ cpu->exit_request = 0;
+ cpu->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit(cpu);
+ }
+}
+
+static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
+ TranslationBlock **last_tb, int *tb_exit,
+ SyncClocks *sc)
+{
+ uintptr_t ret;
+
+ if (unlikely(cpu->exit_request)) {
+ return;
+ }
+
+ trace_exec_tb(tb, tb->pc);
+ ret = cpu_tb_exec(cpu, tb);
+ *last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
+ *tb_exit = ret & TB_EXIT_MASK;
+ switch (*tb_exit) {
+ case TB_EXIT_REQUESTED:
+ /* Something asked us to stop executing
+ * chained TBs; just continue round the main
+ * loop. Whatever requested the exit will also
+ * have set something else (eg exit_request or
+ * interrupt_request) which we will handle
+ * next time around the loop. But we need to
+ * ensure the tcg_exit_req read in generated code
+ * comes before the next read of cpu->exit_request
+ * or cpu->interrupt_request.
+ */
+ smp_rmb();
+ *last_tb = NULL;
+ break;
+ case TB_EXIT_ICOUNT_EXPIRED:
+ {
+ /* Instruction counter expired. */
+#ifdef CONFIG_USER_ONLY
+ abort();
+#else
+ int insns_left = cpu->icount_decr.u32;
+ if (cpu->icount_extra && insns_left >= 0) {
+ /* Refill decrementer and continue execution. */
+ cpu->icount_extra += insns_left;
+ insns_left = MIN(0xffff, cpu->icount_extra);
+ cpu->icount_extra -= insns_left;
+ cpu->icount_decr.u16.low = insns_left;
+ } else {
+ if (insns_left > 0) {
+ /* Execute remaining instructions. */
+ cpu_exec_nocache(cpu, insns_left, *last_tb, false);
+ align_clocks(sc, cpu);
+ }
+ cpu->exception_index = EXCP_INTERRUPT;
+ *last_tb = NULL;
+ cpu_loop_exit(cpu);
+ }
+ break;
+#endif
+ }
+ default:
+ break;
+ }
+}
+
/* main execution loop */
int cpu_exec(CPUState *cpu)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
-#ifdef TARGET_I386
- X86CPU *x86_cpu = X86_CPU(cpu);
- CPUArchState *env = &x86_cpu->env;
-#endif
- int ret, interrupt_request;
- TranslationBlock *tb;
- uintptr_t next_tb;
+ int ret;
SyncClocks sc;
/* replay_interrupt may need current_cpu */
current_cpu = cpu;
- if (cpu->halted) {
-#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
- if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
- && replay_interrupt()) {
- apic_poll_irq(x86_cpu->apic_state);
- cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
- }
-#endif
- if (!cpu_has_work(cpu)) {
- current_cpu = NULL;
- return EXCP_HALTED;
- }
-
- cpu->halted = 0;
+ if (cpu_handle_halt(cpu)) {
+ return EXCP_HALTED;
}
atomic_mb_set(&tcg_current_cpu, cpu);
@@ -391,185 +597,26 @@ int cpu_exec(CPUState *cpu)
*/
init_delay_params(&sc, cpu);
- /* prepare setjmp context for exception handling */
for(;;) {
+ TranslationBlock *tb, *last_tb;
+ int tb_exit = 0;
+
+ /* prepare setjmp context for exception handling */
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
/* if an exception is pending, we execute it here */
- if (cpu->exception_index >= 0) {
- if (cpu->exception_index >= EXCP_INTERRUPT) {
- /* exit request from the cpu execution loop */
- ret = cpu->exception_index;
- if (ret == EXCP_DEBUG) {
- cpu_handle_debug_exception(cpu);
- }
- cpu->exception_index = -1;
- break;
- } else {
-#if defined(CONFIG_USER_ONLY)
- /* if user mode only, we simulate a fake exception
- which will be handled outside the cpu execution
- loop */
-#if defined(TARGET_I386)
- cc->do_interrupt(cpu);
-#endif
- ret = cpu->exception_index;
- cpu->exception_index = -1;
- break;
-#else
- if (replay_exception()) {
- cc->do_interrupt(cpu);
- cpu->exception_index = -1;
- } else if (!replay_has_interrupt()) {
- /* give a chance to iothread in replay mode */
- ret = EXCP_INTERRUPT;
- break;
- }
-#endif
- }
- } else if (replay_has_exception()
- && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
- /* try to cause an exception pending in the log */
- cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true);
- ret = -1;
+ if (cpu_handle_exception(cpu, &ret)) {
break;
}
- next_tb = 0; /* force lookup of first TB */
+ last_tb = NULL; /* forget the last executed TB after exception */
+ cpu->tb_flushed = false; /* reset before first TB lookup */
for(;;) {
- interrupt_request = cpu->interrupt_request;
- if (unlikely(interrupt_request)) {
- if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
- /* Mask out external interrupts for this step. */
- interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
- }
- if (interrupt_request & CPU_INTERRUPT_DEBUG) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
- cpu->exception_index = EXCP_DEBUG;
- cpu_loop_exit(cpu);
- }
- if (replay_mode == REPLAY_MODE_PLAY
- && !replay_has_interrupt()) {
- /* Do nothing */
- } else if (interrupt_request & CPU_INTERRUPT_HALT) {
- replay_interrupt();
- cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
- cpu->halted = 1;
- cpu->exception_index = EXCP_HLT;
- cpu_loop_exit(cpu);
- }
-#if defined(TARGET_I386)
- else if (interrupt_request & CPU_INTERRUPT_INIT) {
- replay_interrupt();
- cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
- do_cpu_init(x86_cpu);
- cpu->exception_index = EXCP_HALTED;
- cpu_loop_exit(cpu);
- }
-#else
- else if (interrupt_request & CPU_INTERRUPT_RESET) {
- replay_interrupt();
- cpu_reset(cpu);
- cpu_loop_exit(cpu);
- }
-#endif
- /* The target hook has 3 exit conditions:
- False when the interrupt isn't processed,
- True when it is, and we should restart on a new TB,
- and via longjmp via cpu_loop_exit. */
- else {
- replay_interrupt();
- if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
- next_tb = 0;
- }
- }
- /* Don't use the cached interrupt_request value,
- do_interrupt may have updated the EXITTB flag. */
- if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
- /* ensure that no TB jump will be modified as
- the program flow was changed */
- next_tb = 0;
- }
- }
- if (unlikely(cpu->exit_request
- || replay_has_interrupt())) {
- cpu->exit_request = 0;
- cpu->exception_index = EXCP_INTERRUPT;
- cpu_loop_exit(cpu);
- }
- tb_lock();
- tb = tb_find_fast(cpu);
- /* Note: we do it here to avoid a gcc bug on Mac OS X when
- doing it in tb_find_slow */
- if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
- /* as some TB could have been invalidated because
- of memory exceptions while generating the code, we
- must recompute the hash index here */
- next_tb = 0;
- tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
- }
- /* see if we can patch the calling TB. When the TB
- spans two pages, we cannot safely do a direct
- jump. */
- if (next_tb != 0 && tb->page_addr[1] == -1
- && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
- tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
- next_tb & TB_EXIT_MASK, tb);
- }
- tb_unlock();
- if (likely(!cpu->exit_request)) {
- trace_exec_tb(tb, tb->pc);
- /* execute the generated code */
- cpu->current_tb = tb;
- next_tb = cpu_tb_exec(cpu, tb);
- cpu->current_tb = NULL;
- switch (next_tb & TB_EXIT_MASK) {
- case TB_EXIT_REQUESTED:
- /* Something asked us to stop executing
- * chained TBs; just continue round the main
- * loop. Whatever requested the exit will also
- * have set something else (eg exit_request or
- * interrupt_request) which we will handle
- * next time around the loop. But we need to
- * ensure the tcg_exit_req read in generated code
- * comes before the next read of cpu->exit_request
- * or cpu->interrupt_request.
- */
- smp_rmb();
- next_tb = 0;
- break;
- case TB_EXIT_ICOUNT_EXPIRED:
- {
- /* Instruction counter expired. */
- int insns_left = cpu->icount_decr.u32;
- if (cpu->icount_extra && insns_left >= 0) {
- /* Refill decrementer and continue execution. */
- cpu->icount_extra += insns_left;
- insns_left = MIN(0xffff, cpu->icount_extra);
- cpu->icount_extra -= insns_left;
- cpu->icount_decr.u16.low = insns_left;
- } else {
- if (insns_left > 0) {
- /* Execute remaining instructions. */
- tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
- cpu_exec_nocache(cpu, insns_left, tb, false);
- align_clocks(&sc, cpu);
- }
- cpu->exception_index = EXCP_INTERRUPT;
- next_tb = 0;
- cpu_loop_exit(cpu);
- }
- break;
- }
- default:
- break;
- }
- }
+ cpu_handle_interrupt(cpu, &last_tb);
+ tb = tb_find_fast(cpu, &last_tb, tb_exit);
+ cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
/* Try to align the host and virtual clocks
if the guest is in advance */
align_clocks(&sc, cpu);
- /* reset soft MMU for next block (it can currently
- only be set by a memory fault) */
} /* for(;;) */
} else {
#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
@@ -579,18 +626,10 @@ int cpu_exec(CPUState *cpu)
* Newer versions of gcc would complain about this code (-Wclobbered). */
cpu = current_cpu;
cc = CPU_GET_CLASS(cpu);
-#ifdef TARGET_I386
- x86_cpu = X86_CPU(cpu);
- env = &x86_cpu->env;
-#endif
#else /* buggy compiler */
/* Assert that the compiler does not smash local variables. */
g_assert(cpu == current_cpu);
g_assert(cc == CPU_GET_CLASS(cpu));
-#ifdef TARGET_I386
- g_assert(x86_cpu == X86_CPU(cpu));
- g_assert(env == &x86_cpu->env);
-#endif
#endif /* buggy compiler */
cpu->can_do_io = 1;
tb_lock_reset();
diff --git a/cputlb.c b/cputlb.c
index 466663b56c..167280ae96 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -76,10 +76,6 @@ void tlb_flush(CPUState *cpu, int flush_global)
tlb_debug("(%d)\n", flush_global);
- /* must reset current TB so that interrupts cannot modify the
- links while we are modifying them */
- cpu->current_tb = NULL;
-
memset(env->tlb_table, -1, sizeof(env->tlb_table));
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
@@ -95,9 +91,6 @@ static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
CPUArchState *env = cpu->env_ptr;
tlb_debug("start\n");
- /* must reset current TB so that interrupts cannot modify the
- links while we are modifying them */
- cpu->current_tb = NULL;
for (;;) {
int mmu_idx = va_arg(argp, int);
@@ -152,9 +145,6 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
tlb_flush(cpu, 1);
return;
}
- /* must reset current TB so that interrupts cannot modify the
- links while we are modifying them */
- cpu->current_tb = NULL;
addr &= TARGET_PAGE_MASK;
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
@@ -193,9 +183,6 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
va_end(argp);
return;
}
- /* must reset current TB so that interrupts cannot modify the
- links while we are modifying them */
- cpu->current_tb = NULL;
addr &= TARGET_PAGE_MASK;
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
index c63cdd073d..c5bcba754a 100644
--- a/default-configs/arm-softmmu.mak
+++ b/default-configs/arm-softmmu.mak
@@ -100,6 +100,7 @@ CONFIG_ALLWINNER_A10_PIT=y
CONFIG_ALLWINNER_A10_PIC=y
CONFIG_ALLWINNER_A10=y
+CONFIG_FSL_IMX6=y
CONFIG_FSL_IMX31=y
CONFIG_FSL_IMX25=y
diff --git a/dma-helpers.c b/dma-helpers.c
index 4ad0bca67e..a6cc15f534 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -73,7 +73,7 @@ typedef struct {
BlockBackend *blk;
BlockAIOCB *acb;
QEMUSGList *sg;
- uint64_t sector_num;
+ uint64_t offset;
DMADirection dir;
int sg_cur_index;
dma_addr_t sg_cur_byte;
@@ -130,7 +130,7 @@ static void dma_blk_cb(void *opaque, int ret)
trace_dma_blk_cb(dbs, ret);
dbs->acb = NULL;
- dbs->sector_num += dbs->iov.size / 512;
+ dbs->offset += dbs->iov.size;
if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
dma_complete(dbs, ret);
@@ -164,8 +164,8 @@ static void dma_blk_cb(void *opaque, int ret)
qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK);
}
- dbs->acb = dbs->io_func(dbs->blk, dbs->sector_num, &dbs->iov,
- dbs->iov.size / 512, dma_blk_cb, dbs);
+ dbs->acb = dbs->io_func(dbs->blk, dbs->offset, &dbs->iov, 0,
+ dma_blk_cb, dbs);
assert(dbs->acb);
}
@@ -203,7 +203,7 @@ BlockAIOCB *dma_blk_io(
dbs->acb = NULL;
dbs->blk = blk;
dbs->sg = sg;
- dbs->sector_num = sector_num;
+ dbs->offset = sector_num << BDRV_SECTOR_BITS;
dbs->sg_cur_index = 0;
dbs->sg_cur_byte = 0;
dbs->dir = dir;
@@ -219,7 +219,7 @@ BlockAIOCB *dma_blk_read(BlockBackend *blk,
QEMUSGList *sg, uint64_t sector,
void (*cb)(void *opaque, int ret), void *opaque)
{
- return dma_blk_io(blk, sg, sector, blk_aio_readv, cb, opaque,
+ return dma_blk_io(blk, sg, sector, blk_aio_preadv, cb, opaque,
DMA_DIRECTION_FROM_DEVICE);
}
@@ -227,7 +227,7 @@ BlockAIOCB *dma_blk_write(BlockBackend *blk,
QEMUSGList *sg, uint64_t sector,
void (*cb)(void *opaque, int ret), void *opaque)
{
- return dma_blk_io(blk, sg, sector, blk_aio_writev, cb, opaque,
+ return dma_blk_io(blk, sg, sector, blk_aio_pwritev, cb, opaque,
DMA_DIRECTION_TO_DEVICE);
}
diff --git a/docs/qapi-code-gen.txt b/docs/qapi-code-gen.txt
index 0e4bafff08..d7d6987821 100644
--- a/docs/qapi-code-gen.txt
+++ b/docs/qapi-code-gen.txt
@@ -899,10 +899,16 @@ Example:
goto out_obj;
}
visit_type_UserDefOne_members(v, *obj, &err);
- error_propagate(errp, err);
- err = NULL;
+ if (err) {
+ goto out_obj;
+ }
+ visit_check_struct(v, &err);
out_obj:
- visit_end_struct(v, &err);
+ visit_end_struct(v);
+ if (err && visit_is_input(v)) {
+ qapi_free_UserDefOne(*obj);
+ *obj = NULL;
+ }
out:
error_propagate(errp, err);
}
@@ -910,21 +916,27 @@ Example:
void visit_type_UserDefOneList(Visitor *v, const char *name, UserDefOneList **obj, Error **errp)
{
Error *err = NULL;
- GenericList *i, **prev;
+ UserDefOneList *tail;
+ size_t size = sizeof(**obj);
- visit_start_list(v, name, &err);
+ visit_start_list(v, name, (GenericList **)obj, size, &err);
if (err) {
goto out;
}
- for (prev = (GenericList **)obj;
- !err && (i = visit_next_list(v, prev, sizeof(**obj))) != NULL;
- prev = &i) {
- UserDefOneList *native_i = (UserDefOneList *)i;
- visit_type_UserDefOne(v, NULL, &native_i->value, &err);
+ for (tail = *obj; tail;
+ tail = (UserDefOneList *)visit_next_list(v, (GenericList *)tail, size)) {
+ visit_type_UserDefOne(v, NULL, &tail->value, &err);
+ if (err) {
+ break;
+ }
}
visit_end_list(v);
+ if (err && visit_is_input(v)) {
+ qapi_free_UserDefOneList(*obj);
+ *obj = NULL;
+ }
out:
error_propagate(errp, err);
}
@@ -996,13 +1008,21 @@ Example:
{
Error *err = NULL;
UserDefOne *retval;
- QmpInputVisitor *qiv = qmp_input_visitor_new_strict(QOBJECT(args));
+ QmpInputVisitor *qiv = qmp_input_visitor_new(QOBJECT(args), true);
QapiDeallocVisitor *qdv;
Visitor *v;
UserDefOneList *arg1 = NULL;
v = qmp_input_get_visitor(qiv);
+ visit_start_struct(v, NULL, NULL, 0, &err);
+ if (err) {
+ goto out;
+ }
visit_type_UserDefOneList(v, "arg1", &arg1, &err);
+ if (!err) {
+ visit_check_struct(v, &err);
+ }
+ visit_end_struct(v);
if (err) {
goto out;
}
@@ -1019,7 +1039,9 @@ Example:
qmp_input_visitor_cleanup(qiv);
qdv = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(qdv);
+ visit_start_struct(v, NULL, NULL, 0, NULL);
visit_type_UserDefOneList(v, "arg1", &arg1, NULL);
+ visit_end_struct(v);
qapi_dealloc_visitor_cleanup(qdv);
}
diff --git a/exec.c b/exec.c
index c4f9036184..ee45472cab 100644
--- a/exec.c
+++ b/exec.c
@@ -2087,7 +2087,7 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
target_ulong pc, cs_base;
target_ulong vaddr;
CPUWatchpoint *wp;
- int cpu_flags;
+ uint32_t cpu_flags;
if (cpu->watchpoint_hit) {
/* We re-entered the check after replacing the TB. Now raise
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index ab89ca6380..cedb74e7cf 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -1563,3 +1563,14 @@ build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets,
build_header(linker, table_data,
(void *)rsdt, "RSDT", rsdt_len, 1, oem_id, oem_table_id);
}
+
+void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base,
+ uint64_t len, int node, MemoryAffinityFlags flags)
+{
+ numamem->type = ACPI_SRAT_MEMORY;
+ numamem->length = sizeof(*numamem);
+ numamem->proximity = cpu_to_le32(node);
+ numamem->flags = cpu_to_le32(flags);
+ numamem->base_addr = cpu_to_le64(base);
+ numamem->range_length = cpu_to_le64(len);
+}
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
index 954c9fe15e..12764ef2b7 100644
--- a/hw/arm/Makefile.objs
+++ b/hw/arm/Makefile.objs
@@ -16,4 +16,5 @@ obj-$(CONFIG_STM32F205_SOC) += stm32f205_soc.o
obj-$(CONFIG_XLNX_ZYNQMP) += xlnx-zynqmp.o xlnx-ep108.o
obj-$(CONFIG_FSL_IMX25) += fsl-imx25.o imx25_pdk.o
obj-$(CONFIG_FSL_IMX31) += fsl-imx31.o kzm.o
+obj-$(CONFIG_FSL_IMX6) += fsl-imx6.o sabrelite.o
obj-$(CONFIG_ASPEED_SOC) += ast2400.o palmetto-bmc.o
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
index bb2a22d967..49d30782c8 100644
--- a/hw/arm/armv7m.c
+++ b/hw/arm/armv7m.c
@@ -132,14 +132,14 @@ typedef struct {
uint32_t base;
} BitBandState;
-static int bitband_init(SysBusDevice *dev)
+static void bitband_init(Object *obj)
{
- BitBandState *s = BITBAND(dev);
+ BitBandState *s = BITBAND(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
- memory_region_init_io(&s->iomem, OBJECT(s), &bitband_ops, &s->base,
+ memory_region_init_io(&s->iomem, obj, &bitband_ops, &s->base,
"bitband", 0x02000000);
sysbus_init_mmio(dev, &s->iomem);
- return 0;
}
static void armv7m_bitband_init(void)
@@ -244,9 +244,7 @@ static Property bitband_properties[] = {
static void bitband_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = bitband_init;
dc->props = bitband_properties;
}
@@ -254,6 +252,7 @@ static const TypeInfo bitband_info = {
.name = TYPE_BITBAND,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(BitBandState),
+ .instance_init = bitband_init,
.class_init = bitband_class_init,
};
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index 5876945575..1b913a43ca 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -14,6 +14,7 @@
#include "hw/arm/linux-boot-if.h"
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
+#include "sysemu/numa.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "elf.h"
@@ -405,6 +406,9 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
void *fdt = NULL;
int size, rc;
uint32_t acells, scells;
+ char *nodename;
+ unsigned int i;
+ hwaddr mem_base, mem_len;
if (binfo->dtb_filename) {
char *filename;
@@ -456,12 +460,39 @@ static int load_dtb(hwaddr addr, const struct arm_boot_info *binfo,
goto fail;
}
- rc = qemu_fdt_setprop_sized_cells(fdt, "/memory", "reg",
- acells, binfo->loader_start,
- scells, binfo->ram_size);
- if (rc < 0) {
- fprintf(stderr, "couldn't set /memory/reg\n");
- goto fail;
+ if (nb_numa_nodes > 0) {
+ /*
+ * Turn the /memory node created before into a NOP node, then create
+ * /memory@addr nodes for all numa nodes respectively.
+ */
+ qemu_fdt_nop_node(fdt, "/memory");
+ mem_base = binfo->loader_start;
+ for (i = 0; i < nb_numa_nodes; i++) {
+ mem_len = numa_info[i].node_mem;
+ nodename = g_strdup_printf("/memory@%" PRIx64, mem_base);
+ qemu_fdt_add_subnode(fdt, nodename);
+ qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory");
+ rc = qemu_fdt_setprop_sized_cells(fdt, nodename, "reg",
+ acells, mem_base,
+ scells, mem_len);
+ if (rc < 0) {
+ fprintf(stderr, "couldn't set %s/reg for node %d\n", nodename,
+ i);
+ goto fail;
+ }
+
+ qemu_fdt_setprop_cell(fdt, nodename, "numa-node-id", i);
+ mem_base += mem_len;
+ g_free(nodename);
+ }
+ } else {
+ rc = qemu_fdt_setprop_sized_cells(fdt, "/memory", "reg",
+ acells, binfo->loader_start,
+ scells, binfo->ram_size);
+ if (rc < 0) {
+ fprintf(stderr, "couldn't set /memory/reg\n");
+ goto fail;
+ }
}
if (binfo->kernel_cmdline && *binfo->kernel_cmdline) {
diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c
new file mode 100644
index 0000000000..a5331bfd33
--- /dev/null
+++ b/hw/arm/fsl-imx6.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2015 Jean-Christophe Dubois <jcd@tribudubois.net>
+ *
+ * i.MX6 SOC emulation.
+ *
+ * Based on hw/arm/fsl-imx31.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu-common.h"
+#include "hw/arm/fsl-imx6.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/char.h"
+#include "qemu/error-report.h"
+
+#define NAME_SIZE 20
+
+static void fsl_imx6_init(Object *obj)
+{
+ FslIMX6State *s = FSL_IMX6(obj);
+ char name[NAME_SIZE];
+ int i;
+
+ if (smp_cpus > FSL_IMX6_NUM_CPUS) {
+ error_report("%s: Only %d CPUs are supported (%d requested)",
+ TYPE_FSL_IMX6, FSL_IMX6_NUM_CPUS, smp_cpus);
+ exit(1);
+ }
+
+ for (i = 0; i < smp_cpus; i++) {
+ object_initialize(&s->cpu[i], sizeof(s->cpu[i]),
+ "cortex-a9-" TYPE_ARM_CPU);
+ snprintf(name, NAME_SIZE, "cpu%d", i);
+ object_property_add_child(obj, name, OBJECT(&s->cpu[i]), NULL);
+ }
+
+ object_initialize(&s->a9mpcore, sizeof(s->a9mpcore), TYPE_A9MPCORE_PRIV);
+ qdev_set_parent_bus(DEVICE(&s->a9mpcore), sysbus_get_default());
+ object_property_add_child(obj, "a9mpcore", OBJECT(&s->a9mpcore), NULL);
+
+ object_initialize(&s->ccm, sizeof(s->ccm), TYPE_IMX6_CCM);
+ qdev_set_parent_bus(DEVICE(&s->ccm), sysbus_get_default());
+ object_property_add_child(obj, "ccm", OBJECT(&s->ccm), NULL);
+
+ object_initialize(&s->src, sizeof(s->src), TYPE_IMX6_SRC);
+ qdev_set_parent_bus(DEVICE(&s->src), sysbus_get_default());
+ object_property_add_child(obj, "src", OBJECT(&s->src), NULL);
+
+ for (i = 0; i < FSL_IMX6_NUM_UARTS; i++) {
+ object_initialize(&s->uart[i], sizeof(s->uart[i]), TYPE_IMX_SERIAL);
+ qdev_set_parent_bus(DEVICE(&s->uart[i]), sysbus_get_default());
+ snprintf(name, NAME_SIZE, "uart%d", i + 1);
+ object_property_add_child(obj, name, OBJECT(&s->uart[i]), NULL);
+ }
+
+ object_initialize(&s->gpt, sizeof(s->gpt), TYPE_IMX_GPT);
+ qdev_set_parent_bus(DEVICE(&s->gpt), sysbus_get_default());
+ object_property_add_child(obj, "gpt", OBJECT(&s->gpt), NULL);
+
+ for (i = 0; i < FSL_IMX6_NUM_EPITS; i++) {
+ object_initialize(&s->epit[i], sizeof(s->epit[i]), TYPE_IMX_EPIT);
+ qdev_set_parent_bus(DEVICE(&s->epit[i]), sysbus_get_default());
+ snprintf(name, NAME_SIZE, "epit%d", i + 1);
+ object_property_add_child(obj, name, OBJECT(&s->epit[i]), NULL);
+ }
+
+ for (i = 0; i < FSL_IMX6_NUM_I2CS; i++) {
+ object_initialize(&s->i2c[i], sizeof(s->i2c[i]), TYPE_IMX_I2C);
+ qdev_set_parent_bus(DEVICE(&s->i2c[i]), sysbus_get_default());
+ snprintf(name, NAME_SIZE, "i2c%d", i + 1);
+ object_property_add_child(obj, name, OBJECT(&s->i2c[i]), NULL);
+ }
+
+ for (i = 0; i < FSL_IMX6_NUM_GPIOS; i++) {
+ object_initialize(&s->gpio[i], sizeof(s->gpio[i]), TYPE_IMX_GPIO);
+ qdev_set_parent_bus(DEVICE(&s->gpio[i]), sysbus_get_default());
+ snprintf(name, NAME_SIZE, "gpio%d", i + 1);
+ object_property_add_child(obj, name, OBJECT(&s->gpio[i]), NULL);
+ }
+
+ for (i = 0; i < FSL_IMX6_NUM_ESDHCS; i++) {
+ object_initialize(&s->esdhc[i], sizeof(s->esdhc[i]), TYPE_SYSBUS_SDHCI);
+ qdev_set_parent_bus(DEVICE(&s->esdhc[i]), sysbus_get_default());
+ snprintf(name, NAME_SIZE, "sdhc%d", i + 1);
+ object_property_add_child(obj, name, OBJECT(&s->esdhc[i]), NULL);
+ }
+
+ for (i = 0; i < FSL_IMX6_NUM_ECSPIS; i++) {
+ object_initialize(&s->spi[i], sizeof(s->spi[i]), TYPE_IMX_SPI);
+ qdev_set_parent_bus(DEVICE(&s->spi[i]), sysbus_get_default());
+ snprintf(name, NAME_SIZE, "spi%d", i + 1);
+ object_property_add_child(obj, name, OBJECT(&s->spi[i]), NULL);
+ }
+}
+
+static void fsl_imx6_realize(DeviceState *dev, Error **errp)
+{
+ FslIMX6State *s = FSL_IMX6(dev);
+ uint16_t i;
+ Error *err = NULL;
+
+ for (i = 0; i < smp_cpus; i++) {
+
+ /* On uniprocessor, the CBAR is set to 0 */
+ if (smp_cpus > 1) {
+ object_property_set_int(OBJECT(&s->cpu[i]), FSL_IMX6_A9MPCORE_ADDR,
+ "reset-cbar", &error_abort);
+ }
+
+ /* All CPU but CPU 0 start in power off mode */
+ if (i) {
+ object_property_set_bool(OBJECT(&s->cpu[i]), true,
+ "start-powered-off", &error_abort);
+ }
+
+ object_property_set_bool(OBJECT(&s->cpu[i]), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ }
+
+ object_property_set_int(OBJECT(&s->a9mpcore), smp_cpus, "num-cpu",
+ &error_abort);
+
+ object_property_set_int(OBJECT(&s->a9mpcore),
+ FSL_IMX6_MAX_IRQ + GIC_INTERNAL, "num-irq",
+ &error_abort);
+
+ object_property_set_bool(OBJECT(&s->a9mpcore), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->a9mpcore), 0, FSL_IMX6_A9MPCORE_ADDR);
+
+ for (i = 0; i < smp_cpus; i++) {
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i,
+ qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_IRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i + smp_cpus,
+ qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_FIQ));
+ }
+
+ object_property_set_bool(OBJECT(&s->ccm), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccm), 0, FSL_IMX6_CCM_ADDR);
+
+ object_property_set_bool(OBJECT(&s->src), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->src), 0, FSL_IMX6_SRC_ADDR);
+
+ /* Initialize all UARTs */
+ for (i = 0; i < FSL_IMX6_NUM_UARTS; i++) {
+ static const struct {
+ hwaddr addr;
+ unsigned int irq;
+ } serial_table[FSL_IMX6_NUM_UARTS] = {
+ { FSL_IMX6_UART1_ADDR, FSL_IMX6_UART1_IRQ },
+ { FSL_IMX6_UART2_ADDR, FSL_IMX6_UART2_IRQ },
+ { FSL_IMX6_UART3_ADDR, FSL_IMX6_UART3_IRQ },
+ { FSL_IMX6_UART4_ADDR, FSL_IMX6_UART4_IRQ },
+ { FSL_IMX6_UART5_ADDR, FSL_IMX6_UART5_IRQ },
+ };
+
+ if (i < MAX_SERIAL_PORTS) {
+ CharDriverState *chr;
+
+ chr = serial_hds[i];
+
+ if (!chr) {
+ char *label = g_strdup_printf("imx6.uart%d", i + 1);
+ chr = qemu_chr_new(label, "null", NULL);
+ g_free(label);
+ serial_hds[i] = chr;
+ }
+
+ qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", chr);
+ }
+
+ object_property_set_bool(OBJECT(&s->uart[i]), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, serial_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0,
+ qdev_get_gpio_in(DEVICE(&s->a9mpcore),
+ serial_table[i].irq));
+ }
+
+ s->gpt.ccm = IMX_CCM(&s->ccm);
+
+ object_property_set_bool(OBJECT(&s->gpt), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt), 0, FSL_IMX6_GPT_ADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt), 0,
+ qdev_get_gpio_in(DEVICE(&s->a9mpcore),
+ FSL_IMX6_GPT_IRQ));
+
+ /* Initialize all EPIT timers */
+ for (i = 0; i < FSL_IMX6_NUM_EPITS; i++) {
+ static const struct {
+ hwaddr addr;
+ unsigned int irq;
+ } epit_table[FSL_IMX6_NUM_EPITS] = {
+ { FSL_IMX6_EPIT1_ADDR, FSL_IMX6_EPIT1_IRQ },
+ { FSL_IMX6_EPIT2_ADDR, FSL_IMX6_EPIT2_IRQ },
+ };
+
+ s->epit[i].ccm = IMX_CCM(&s->ccm);
+
+ object_property_set_bool(OBJECT(&s->epit[i]), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->epit[i]), 0, epit_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->epit[i]), 0,
+ qdev_get_gpio_in(DEVICE(&s->a9mpcore),
+ epit_table[i].irq));
+ }
+
+ /* Initialize all I2C */
+ for (i = 0; i < FSL_IMX6_NUM_I2CS; i++) {
+ static const struct {
+ hwaddr addr;
+ unsigned int irq;
+ } i2c_table[FSL_IMX6_NUM_I2CS] = {
+ { FSL_IMX6_I2C1_ADDR, FSL_IMX6_I2C1_IRQ },
+ { FSL_IMX6_I2C2_ADDR, FSL_IMX6_I2C2_IRQ },
+ { FSL_IMX6_I2C3_ADDR, FSL_IMX6_I2C3_IRQ }
+ };
+
+ object_property_set_bool(OBJECT(&s->i2c[i]), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, i2c_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0,
+ qdev_get_gpio_in(DEVICE(&s->a9mpcore),
+ i2c_table[i].irq));
+ }
+
+ /* Initialize all GPIOs */
+ for (i = 0; i < FSL_IMX6_NUM_GPIOS; i++) {
+ static const struct {
+ hwaddr addr;
+ unsigned int irq_low;
+ unsigned int irq_high;
+ } gpio_table[FSL_IMX6_NUM_GPIOS] = {
+ {
+ FSL_IMX6_GPIO1_ADDR,
+ FSL_IMX6_GPIO1_LOW_IRQ,
+ FSL_IMX6_GPIO1_HIGH_IRQ
+ },
+ {
+ FSL_IMX6_GPIO2_ADDR,
+ FSL_IMX6_GPIO2_LOW_IRQ,
+ FSL_IMX6_GPIO2_HIGH_IRQ
+ },
+ {
+ FSL_IMX6_GPIO3_ADDR,
+ FSL_IMX6_GPIO3_LOW_IRQ,
+ FSL_IMX6_GPIO3_HIGH_IRQ
+ },
+ {
+ FSL_IMX6_GPIO4_ADDR,
+ FSL_IMX6_GPIO4_LOW_IRQ,
+ FSL_IMX6_GPIO4_HIGH_IRQ
+ },
+ {
+ FSL_IMX6_GPIO5_ADDR,
+ FSL_IMX6_GPIO5_LOW_IRQ,
+ FSL_IMX6_GPIO5_HIGH_IRQ
+ },
+ {
+ FSL_IMX6_GPIO6_ADDR,
+ FSL_IMX6_GPIO6_LOW_IRQ,
+ FSL_IMX6_GPIO6_HIGH_IRQ
+ },
+ {
+ FSL_IMX6_GPIO7_ADDR,
+ FSL_IMX6_GPIO7_LOW_IRQ,
+ FSL_IMX6_GPIO7_HIGH_IRQ
+ },
+ };
+
+ object_property_set_bool(OBJECT(&s->gpio[i]), true, "has-edge-sel",
+ &error_abort);
+ object_property_set_bool(OBJECT(&s->gpio[i]), true, "has-upper-pin-irq",
+ &error_abort);
+ object_property_set_bool(OBJECT(&s->gpio[i]), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, gpio_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0,
+ qdev_get_gpio_in(DEVICE(&s->a9mpcore),
+ gpio_table[i].irq_low));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1,
+ qdev_get_gpio_in(DEVICE(&s->a9mpcore),
+ gpio_table[i].irq_high));
+ }
+
+ /* Initialize all SDHC */
+ for (i = 0; i < FSL_IMX6_NUM_ESDHCS; i++) {
+ static const struct {
+ hwaddr addr;
+ unsigned int irq;
+ } esdhc_table[FSL_IMX6_NUM_ESDHCS] = {
+ { FSL_IMX6_uSDHC1_ADDR, FSL_IMX6_uSDHC1_IRQ },
+ { FSL_IMX6_uSDHC2_ADDR, FSL_IMX6_uSDHC2_IRQ },
+ { FSL_IMX6_uSDHC3_ADDR, FSL_IMX6_uSDHC3_IRQ },
+ { FSL_IMX6_uSDHC4_ADDR, FSL_IMX6_uSDHC4_IRQ },
+ };
+
+ object_property_set_bool(OBJECT(&s->esdhc[i]), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->esdhc[i]), 0, esdhc_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->esdhc[i]), 0,
+ qdev_get_gpio_in(DEVICE(&s->a9mpcore),
+ esdhc_table[i].irq));
+ }
+
+ /* Initialize all ECSPI */
+ for (i = 0; i < FSL_IMX6_NUM_ECSPIS; i++) {
+ static const struct {
+ hwaddr addr;
+ unsigned int irq;
+ } spi_table[FSL_IMX6_NUM_ECSPIS] = {
+ { FSL_IMX6_eCSPI1_ADDR, FSL_IMX6_ECSPI1_IRQ },
+ { FSL_IMX6_eCSPI2_ADDR, FSL_IMX6_ECSPI2_IRQ },
+ { FSL_IMX6_eCSPI3_ADDR, FSL_IMX6_ECSPI3_IRQ },
+ { FSL_IMX6_eCSPI4_ADDR, FSL_IMX6_ECSPI4_IRQ },
+ { FSL_IMX6_eCSPI5_ADDR, FSL_IMX6_ECSPI5_IRQ },
+ };
+
+ /* Initialize the SPI */
+ object_property_set_bool(OBJECT(&s->spi[i]), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, spi_table[i].addr);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0,
+ qdev_get_gpio_in(DEVICE(&s->a9mpcore),
+ spi_table[i].irq));
+ }
+
+ /* ROM memory */
+ memory_region_init_rom_device(&s->rom, NULL, NULL, NULL, "imx6.rom",
+ FSL_IMX6_ROM_SIZE, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ memory_region_add_subregion(get_system_memory(), FSL_IMX6_ROM_ADDR,
+ &s->rom);
+
+ /* CAAM memory */
+ memory_region_init_rom_device(&s->caam, NULL, NULL, NULL, "imx6.caam",
+ FSL_IMX6_CAAM_MEM_SIZE, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ memory_region_add_subregion(get_system_memory(), FSL_IMX6_CAAM_MEM_ADDR,
+ &s->caam);
+
+ /* OCRAM memory */
+ memory_region_init_ram(&s->ocram, NULL, "imx6.ocram", FSL_IMX6_OCRAM_SIZE,
+ &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ memory_region_add_subregion(get_system_memory(), FSL_IMX6_OCRAM_ADDR,
+ &s->ocram);
+ vmstate_register_ram_global(&s->ocram);
+
+ /* internal OCRAM (256 KB) is aliased over 1 MB */
+ memory_region_init_alias(&s->ocram_alias, NULL, "imx6.ocram_alias",
+ &s->ocram, 0, FSL_IMX6_OCRAM_ALIAS_SIZE);
+ memory_region_add_subregion(get_system_memory(), FSL_IMX6_OCRAM_ALIAS_ADDR,
+ &s->ocram_alias);
+}
+
+static void fsl_imx6_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = fsl_imx6_realize;
+
+ /*
+ * Reason: creates an ARM CPU, thus use after free(), see
+ * arm_cpu_class_init()
+ */
+ dc->cannot_destroy_with_object_finalize_yet = true;
+ dc->desc = "i.MX6 SOC";
+}
+
+static const TypeInfo fsl_imx6_type_info = {
+ .name = TYPE_FSL_IMX6,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(FslIMX6State),
+ .instance_init = fsl_imx6_init,
+ .class_init = fsl_imx6_class_init,
+};
+
+static void fsl_imx6_register_types(void)
+{
+ type_register_static(&fsl_imx6_type_info);
+}
+
+type_init(fsl_imx6_register_types)
diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c
index d9930c0d34..41029a651d 100644
--- a/hw/arm/highbank.c
+++ b/hw/arm/highbank.c
@@ -168,23 +168,20 @@ static void highbank_regs_reset(DeviceState *dev)
s->regs[0x43] = 0x05F40121;
}
-static int highbank_regs_init(SysBusDevice *dev)
+static void highbank_regs_init(Object *obj)
{
- HighbankRegsState *s = HIGHBANK_REGISTERS(dev);
+ HighbankRegsState *s = HIGHBANK_REGISTERS(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
- memory_region_init_io(&s->iomem, OBJECT(s), &hb_mem_ops, s->regs,
+ memory_region_init_io(&s->iomem, obj, &hb_mem_ops, s->regs,
"highbank_regs", 0x1000);
sysbus_init_mmio(dev, &s->iomem);
-
- return 0;
}
static void highbank_regs_class_init(ObjectClass *klass, void *data)
{
- SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
- sbc->init = highbank_regs_init;
dc->desc = "Calxeda Highbank registers";
dc->vmsd = &vmstate_highbank_regs;
dc->reset = highbank_regs_reset;
@@ -194,6 +191,7 @@ static const TypeInfo highbank_regs_info = {
.name = TYPE_HIGHBANK_REGISTERS,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(HighbankRegsState),
+ .instance_init = highbank_regs_init,
.class_init = highbank_regs_class_init,
};
diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c
index e31bca6e72..24f16874f9 100644
--- a/hw/arm/integratorcp.c
+++ b/hw/arm/integratorcp.c
@@ -242,9 +242,10 @@ static const MemoryRegionOps integratorcm_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int integratorcm_init(SysBusDevice *dev)
+static void integratorcm_init(Object *obj)
{
- IntegratorCMState *s = INTEGRATOR_CM(dev);
+ IntegratorCMState *s = INTEGRATOR_CM(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
s->cm_osc = 0x01000048;
/* ??? What should the high bits of this value be? */
@@ -269,17 +270,16 @@ static int integratorcm_init(SysBusDevice *dev)
s->cm_init = 0x00000112;
s->cm_refcnt_offset = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 24,
1000);
- memory_region_init_ram(&s->flash, OBJECT(s), "integrator.flash", 0x100000,
+ memory_region_init_ram(&s->flash, obj, "integrator.flash", 0x100000,
&error_fatal);
vmstate_register_ram_global(&s->flash);
- memory_region_init_io(&s->iomem, OBJECT(s), &integratorcm_ops, s,
+ memory_region_init_io(&s->iomem, obj, &integratorcm_ops, s,
"integratorcm", 0x00800000);
sysbus_init_mmio(dev, &s->iomem);
integratorcm_do_remap(s);
/* ??? Save/restore. */
- return 0;
}
/* Integrator/CP hardware emulation. */
@@ -394,18 +394,18 @@ static const MemoryRegionOps icp_pic_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int icp_pic_init(SysBusDevice *sbd)
+static void icp_pic_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- icp_pic_state *s = INTEGRATOR_PIC(dev);
+ DeviceState *dev = DEVICE(obj);
+ icp_pic_state *s = INTEGRATOR_PIC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
qdev_init_gpio_in(dev, icp_pic_set_irq, 32);
sysbus_init_irq(sbd, &s->parent_irq);
sysbus_init_irq(sbd, &s->parent_fiq);
- memory_region_init_io(&s->iomem, OBJECT(s), &icp_pic_ops, s,
+ memory_region_init_io(&s->iomem, obj, &icp_pic_ops, s,
"icp-pic", 0x00800000);
sysbus_init_mmio(sbd, &s->iomem);
- return 0;
}
/* CP control registers. */
@@ -630,9 +630,7 @@ static Property core_properties[] = {
static void core_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = integratorcm_init;
dc->props = core_properties;
}
@@ -640,21 +638,15 @@ static const TypeInfo core_info = {
.name = TYPE_INTEGRATOR_CM,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(IntegratorCMState),
+ .instance_init = integratorcm_init,
.class_init = core_class_init,
};
-static void icp_pic_class_init(ObjectClass *klass, void *data)
-{
- SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
-
- sdc->init = icp_pic_init;
-}
-
static const TypeInfo icp_pic_info = {
.name = TYPE_INTEGRATOR_PIC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(icp_pic_state),
- .class_init = icp_pic_class_init,
+ .instance_init = icp_pic_init,
};
static const TypeInfo icp_ctrl_regs_info = {
diff --git a/hw/arm/nseries.c b/hw/arm/nseries.c
index 5382505559..c7068c0d38 100644
--- a/hw/arm/nseries.c
+++ b/hw/arm/nseries.c
@@ -1364,7 +1364,7 @@ static void n8x0_init(MachineState *machine,
if (option_rom[0].name &&
(machine->boot_order[0] == 'n' || !machine->kernel_filename)) {
- uint8_t nolo_tags[0x10000];
+ uint8_t *nolo_tags = g_new(uint8_t, 0x10000);
/* No, wait, better start at the ROM. */
s->mpu->cpu->env.regs[15] = OMAP2_Q2_BASE + 0x400000;
@@ -1383,6 +1383,7 @@ static void n8x0_init(MachineState *machine,
n800_setup_nolo_tags(nolo_tags);
cpu_physical_memory_write(OMAP2_SRAM_BASE, nolo_tags, 0x10000);
+ g_free(nolo_tags);
}
}
diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
index 1a8c36033a..e41a7c92ab 100644
--- a/hw/arm/pxa2xx.c
+++ b/hw/arm/pxa2xx.c
@@ -1107,9 +1107,10 @@ static const MemoryRegionOps pxa2xx_rtc_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int pxa2xx_rtc_init(SysBusDevice *dev)
+static void pxa2xx_rtc_init(Object *obj)
{
- PXA2xxRTCState *s = PXA2XX_RTC(dev);
+ PXA2xxRTCState *s = PXA2XX_RTC(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
struct tm tm;
int wom;
@@ -1138,11 +1139,9 @@ static int pxa2xx_rtc_init(SysBusDevice *dev)
sysbus_init_irq(dev, &s->rtc_irq);
- memory_region_init_io(&s->iomem, OBJECT(s), &pxa2xx_rtc_ops, s,
+ memory_region_init_io(&s->iomem, obj, &pxa2xx_rtc_ops, s,
"pxa2xx-rtc", 0x10000);
sysbus_init_mmio(dev, &s->iomem);
-
- return 0;
}
static void pxa2xx_rtc_pre_save(void *opaque)
@@ -1195,9 +1194,7 @@ static const VMStateDescription vmstate_pxa2xx_rtc_regs = {
static void pxa2xx_rtc_sysbus_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = pxa2xx_rtc_init;
dc->desc = "PXA2xx RTC Controller";
dc->vmsd = &vmstate_pxa2xx_rtc_regs;
}
@@ -1206,6 +1203,7 @@ static const TypeInfo pxa2xx_rtc_sysbus_info = {
.name = TYPE_PXA2XX_RTC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PXA2xxRTCState),
+ .instance_init = pxa2xx_rtc_init,
.class_init = pxa2xx_rtc_sysbus_class_init,
};
@@ -1501,19 +1499,18 @@ PXA2xxI2CState *pxa2xx_i2c_init(hwaddr base,
return s;
}
-static int pxa2xx_i2c_initfn(SysBusDevice *sbd)
+static void pxa2xx_i2c_initfn(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- PXA2xxI2CState *s = PXA2XX_I2C(dev);
+ DeviceState *dev = DEVICE(obj);
+ PXA2xxI2CState *s = PXA2XX_I2C(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
s->bus = i2c_init_bus(dev, "i2c");
- memory_region_init_io(&s->iomem, OBJECT(s), &pxa2xx_i2c_ops, s,
+ memory_region_init_io(&s->iomem, obj, &pxa2xx_i2c_ops, s,
"pxa2xx-i2c", s->region_size);
sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->irq);
-
- return 0;
}
I2CBus *pxa2xx_i2c_bus(PXA2xxI2CState *s)
@@ -1530,9 +1527,7 @@ static Property pxa2xx_i2c_properties[] = {
static void pxa2xx_i2c_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = pxa2xx_i2c_initfn;
dc->desc = "PXA2xx I2C Bus Controller";
dc->vmsd = &vmstate_pxa2xx_i2c;
dc->props = pxa2xx_i2c_properties;
@@ -1542,6 +1537,7 @@ static const TypeInfo pxa2xx_i2c_info = {
.name = TYPE_PXA2XX_I2C,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PXA2xxI2CState),
+ .instance_init = pxa2xx_i2c_initfn,
.class_init = pxa2xx_i2c_class_init,
};
diff --git a/hw/arm/pxa2xx_pic.c b/hw/arm/pxa2xx_pic.c
index 7e51532cde..b516ced8c0 100644
--- a/hw/arm/pxa2xx_pic.c
+++ b/hw/arm/pxa2xx_pic.c
@@ -310,17 +310,10 @@ static VMStateDescription vmstate_pxa2xx_pic_regs = {
},
};
-static int pxa2xx_pic_initfn(SysBusDevice *dev)
-{
- return 0;
-}
-
static void pxa2xx_pic_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = pxa2xx_pic_initfn;
dc->desc = "PXA2xx PIC";
dc->vmsd = &vmstate_pxa2xx_pic_regs;
}
diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c
new file mode 100644
index 0000000000..776c51e398
--- /dev/null
+++ b/hw/arm/sabrelite.c
@@ -0,0 +1,121 @@
+/*
+ * SABRELITE Board System emulation.
+ *
+ * Copyright (c) 2015 Jean-Christophe Dubois <jcd@tribudubois.net>
+ *
+ * This code is licensed under the GPL, version 2 or later.
+ * See the file `COPYING' in the top level directory.
+ *
+ * It (partially) emulates a sabrelite board, with a Freescale
+ * i.MX6 SoC
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu-common.h"
+#include "hw/arm/fsl-imx6.h"
+#include "hw/boards.h"
+#include "sysemu/sysemu.h"
+#include "qemu/error-report.h"
+#include "sysemu/qtest.h"
+
+typedef struct IMX6Sabrelite {
+ FslIMX6State soc;
+ MemoryRegion ram;
+} IMX6Sabrelite;
+
+static struct arm_boot_info sabrelite_binfo = {
+ /* DDR memory start */
+ .loader_start = FSL_IMX6_MMDC_ADDR,
+ /* No board ID, we boot from DT tree */
+ .board_id = -1,
+};
+
+/* No need to do any particular setup for secondary boot */
+static void sabrelite_write_secondary(ARMCPU *cpu,
+ const struct arm_boot_info *info)
+{
+}
+
+/* Secondary cores are reset through SRC device */
+static void sabrelite_reset_secondary(ARMCPU *cpu,
+ const struct arm_boot_info *info)
+{
+}
+
+static void sabrelite_init(MachineState *machine)
+{
+ IMX6Sabrelite *s = g_new0(IMX6Sabrelite, 1);
+ Error *err = NULL;
+
+ /* Check the amount of memory is compatible with the SOC */
+ if (machine->ram_size > FSL_IMX6_MMDC_SIZE) {
+ error_report("RAM size " RAM_ADDR_FMT " above max supported (%08x)",
+ machine->ram_size, FSL_IMX6_MMDC_SIZE);
+ exit(1);
+ }
+
+ object_initialize(&s->soc, sizeof(s->soc), TYPE_FSL_IMX6);
+ object_property_add_child(OBJECT(machine), "soc", OBJECT(&s->soc),
+ &error_abort);
+
+ object_property_set_bool(OBJECT(&s->soc), true, "realized", &err);
+ if (err != NULL) {
+ error_report("%s", error_get_pretty(err));
+ exit(1);
+ }
+
+ memory_region_allocate_system_memory(&s->ram, NULL, "sabrelite.ram",
+ machine->ram_size);
+ memory_region_add_subregion(get_system_memory(), FSL_IMX6_MMDC_ADDR,
+ &s->ram);
+
+ {
+ /*
+ * TODO: Ideally we would expose the chip select and spi bus on the
+ * SoC object using alias properties; then we would not need to
+ * directly access the underlying spi device object.
+ */
+ /* Add the sst25vf016b NOR FLASH memory to first SPI */
+ Object *spi_dev;
+
+ spi_dev = object_resolve_path_component(OBJECT(&s->soc), "spi1");
+ if (spi_dev) {
+ SSIBus *spi_bus;
+
+ spi_bus = (SSIBus *)qdev_get_child_bus(DEVICE(spi_dev), "spi");
+ if (spi_bus) {
+ DeviceState *flash_dev;
+
+ flash_dev = ssi_create_slave(spi_bus, "sst25vf016b");
+ if (flash_dev) {
+ qemu_irq cs_line = qdev_get_gpio_in_named(flash_dev,
+ SSI_GPIO_CS, 0);
+ sysbus_connect_irq(SYS_BUS_DEVICE(spi_dev), 1, cs_line);
+ }
+ }
+ }
+ }
+
+ sabrelite_binfo.ram_size = machine->ram_size;
+ sabrelite_binfo.kernel_filename = machine->kernel_filename;
+ sabrelite_binfo.kernel_cmdline = machine->kernel_cmdline;
+ sabrelite_binfo.initrd_filename = machine->initrd_filename;
+ sabrelite_binfo.nb_cpus = smp_cpus;
+ sabrelite_binfo.secure_boot = true;
+ sabrelite_binfo.write_secondary_boot = sabrelite_write_secondary;
+ sabrelite_binfo.secondary_cpu_reset_hook = sabrelite_reset_secondary;
+
+ if (!qtest_enabled()) {
+ arm_load_kernel(&s->soc.cpu[0], &sabrelite_binfo);
+ }
+}
+
+static void sabrelite_machine_init(MachineClass *mc)
+{
+ mc->desc = "Freescale i.MX6 Quad SABRE Lite Board (Cortex A9)";
+ mc->init = sabrelite_init;
+ mc->max_cpus = FSL_IMX6_NUM_CPUS;
+}
+
+DEFINE_MACHINE("sabrelite", sabrelite_machine_init)
diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c
index bf61d63b58..ba40f8302b 100644
--- a/hw/arm/spitz.c
+++ b/hw/arm/spitz.c
@@ -164,9 +164,10 @@ static void sl_flash_register(PXA2xxState *cpu, int size)
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, FLASH_BASE);
}
-static int sl_nand_init(SysBusDevice *dev)
+static void sl_nand_init(Object *obj)
{
- SLNANDState *s = SL_NAND(dev);
+ SLNANDState *s = SL_NAND(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
DriveInfo *nand;
s->ctl = 0;
@@ -175,10 +176,8 @@ static int sl_nand_init(SysBusDevice *dev)
s->nand = nand_init(nand ? blk_by_legacy_dinfo(nand) : NULL,
s->manf_id, s->chip_id);
- memory_region_init_io(&s->iomem, OBJECT(s), &sl_ops, s, "sl", 0x40);
+ memory_region_init_io(&s->iomem, obj, &sl_ops, s, "sl", 0x40);
sysbus_init_mmio(dev, &s->iomem);
-
- return 0;
}
/* Spitz Keyboard */
@@ -501,10 +500,10 @@ static void spitz_keyboard_register(PXA2xxState *cpu)
qemu_add_kbd_event_handler(spitz_keyboard_handler, s);
}
-static int spitz_keyboard_init(SysBusDevice *sbd)
+static void spitz_keyboard_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- SpitzKeyboardState *s = SPITZ_KEYBOARD(dev);
+ DeviceState *dev = DEVICE(obj);
+ SpitzKeyboardState *s = SPITZ_KEYBOARD(obj);
int i, j;
for (i = 0; i < 0x80; i ++)
@@ -519,8 +518,6 @@ static int spitz_keyboard_init(SysBusDevice *sbd)
s->kbdtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, spitz_keyboard_tick, s);
qdev_init_gpio_in(dev, spitz_keyboard_strobe, SPITZ_KEY_STROBE_NUM);
qdev_init_gpio_out(dev, s->sense, SPITZ_KEY_SENSE_NUM);
-
- return 0;
}
/* LCD backlight controller */
@@ -1065,9 +1062,7 @@ static Property sl_nand_properties[] = {
static void sl_nand_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = sl_nand_init;
dc->vmsd = &vmstate_sl_nand_info;
dc->props = sl_nand_properties;
/* Reason: init() method uses drive_get() */
@@ -1078,6 +1073,7 @@ static const TypeInfo sl_nand_info = {
.name = TYPE_SL_NAND,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(SLNANDState),
+ .instance_init = sl_nand_init,
.class_init = sl_nand_class_init,
};
@@ -1097,9 +1093,7 @@ static VMStateDescription vmstate_spitz_kbd = {
static void spitz_keyboard_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = spitz_keyboard_init;
dc->vmsd = &vmstate_spitz_kbd;
}
@@ -1107,6 +1101,7 @@ static const TypeInfo spitz_keyboard_info = {
.name = TYPE_SPITZ_KEYBOARD,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(SpitzKeyboardState),
+ .instance_init = spitz_keyboard_init,
.class_init = spitz_keyboard_class_init,
};
diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c
index c1766f856a..f90b9fd190 100644
--- a/hw/arm/stellaris.c
+++ b/hw/arm/stellaris.c
@@ -316,23 +316,22 @@ static const VMStateDescription vmstate_stellaris_gptm = {
}
};
-static int stellaris_gptm_init(SysBusDevice *sbd)
+static void stellaris_gptm_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- gptm_state *s = STELLARIS_GPTM(dev);
+ DeviceState *dev = DEVICE(obj);
+ gptm_state *s = STELLARIS_GPTM(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
sysbus_init_irq(sbd, &s->irq);
qdev_init_gpio_out(dev, &s->trigger, 1);
- memory_region_init_io(&s->iomem, OBJECT(s), &gptm_ops, s,
+ memory_region_init_io(&s->iomem, obj, &gptm_ops, s,
"gptm", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
s->opaque[0] = s->opaque[1] = s;
s->timer[0] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[0]);
s->timer[1] = timer_new_ns(QEMU_CLOCK_VIRTUAL, gptm_tick, &s->opaque[1]);
- vmstate_register(dev, -1, &vmstate_stellaris_gptm, s);
- return 0;
}
@@ -873,23 +872,22 @@ static const VMStateDescription vmstate_stellaris_i2c = {
}
};
-static int stellaris_i2c_init(SysBusDevice *sbd)
+static void stellaris_i2c_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- stellaris_i2c_state *s = STELLARIS_I2C(dev);
+ DeviceState *dev = DEVICE(obj);
+ stellaris_i2c_state *s = STELLARIS_I2C(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
I2CBus *bus;
sysbus_init_irq(sbd, &s->irq);
bus = i2c_init_bus(dev, "i2c");
s->bus = bus;
- memory_region_init_io(&s->iomem, OBJECT(s), &stellaris_i2c_ops, s,
+ memory_region_init_io(&s->iomem, obj, &stellaris_i2c_ops, s,
"i2c", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
/* ??? For now we only implement the master interface. */
stellaris_i2c_reset(s);
- vmstate_register(dev, -1, &vmstate_stellaris_i2c, s);
- return 0;
}
/* Analogue to Digital Converter. This is only partially implemented,
@@ -1160,23 +1158,22 @@ static const VMStateDescription vmstate_stellaris_adc = {
}
};
-static int stellaris_adc_init(SysBusDevice *sbd)
+static void stellaris_adc_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- stellaris_adc_state *s = STELLARIS_ADC(dev);
+ DeviceState *dev = DEVICE(obj);
+ stellaris_adc_state *s = STELLARIS_ADC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
int n;
for (n = 0; n < 4; n++) {
sysbus_init_irq(sbd, &s->irq[n]);
}
- memory_region_init_io(&s->iomem, OBJECT(s), &stellaris_adc_ops, s,
+ memory_region_init_io(&s->iomem, obj, &stellaris_adc_ops, s,
"adc", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
stellaris_adc_reset(s);
qdev_init_gpio_in(dev, stellaris_adc_trigger, 1);
- vmstate_register(dev, -1, &vmstate_stellaris_adc, s);
- return 0;
}
static
@@ -1425,43 +1422,46 @@ type_init(stellaris_machine_init)
static void stellaris_i2c_class_init(ObjectClass *klass, void *data)
{
- SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
- sdc->init = stellaris_i2c_init;
+ dc->vmsd = &vmstate_stellaris_i2c;
}
static const TypeInfo stellaris_i2c_info = {
.name = TYPE_STELLARIS_I2C,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(stellaris_i2c_state),
+ .instance_init = stellaris_i2c_init,
.class_init = stellaris_i2c_class_init,
};
static void stellaris_gptm_class_init(ObjectClass *klass, void *data)
{
- SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
- sdc->init = stellaris_gptm_init;
+ dc->vmsd = &vmstate_stellaris_gptm;
}
static const TypeInfo stellaris_gptm_info = {
.name = TYPE_STELLARIS_GPTM,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(gptm_state),
+ .instance_init = stellaris_gptm_init,
.class_init = stellaris_gptm_class_init,
};
static void stellaris_adc_class_init(ObjectClass *klass, void *data)
{
- SysBusDeviceClass *sdc = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
- sdc->init = stellaris_adc_init;
+ dc->vmsd = &vmstate_stellaris_adc;
}
static const TypeInfo stellaris_adc_info = {
.name = TYPE_STELLARIS_ADC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(stellaris_adc_state),
+ .instance_init = stellaris_adc_init,
.class_init = stellaris_adc_class_init,
};
diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c
index 1eeb1ab391..3bc8a98150 100644
--- a/hw/arm/strongarm.c
+++ b/hw/arm/strongarm.c
@@ -179,19 +179,18 @@ static const MemoryRegionOps strongarm_pic_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int strongarm_pic_initfn(SysBusDevice *sbd)
+static void strongarm_pic_initfn(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- StrongARMPICState *s = STRONGARM_PIC(dev);
+ DeviceState *dev = DEVICE(obj);
+ StrongARMPICState *s = STRONGARM_PIC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
qdev_init_gpio_in(dev, strongarm_pic_set_irq, SA_PIC_SRCS);
- memory_region_init_io(&s->iomem, OBJECT(s), &strongarm_pic_ops, s,
+ memory_region_init_io(&s->iomem, obj, &strongarm_pic_ops, s,
"pic", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
sysbus_init_irq(sbd, &s->irq);
sysbus_init_irq(sbd, &s->fiq);
-
- return 0;
}
static int strongarm_pic_post_load(void *opaque, int version_id)
@@ -217,9 +216,7 @@ static VMStateDescription vmstate_strongarm_pic_regs = {
static void strongarm_pic_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = strongarm_pic_initfn;
dc->desc = "StrongARM PIC";
dc->vmsd = &vmstate_strongarm_pic_regs;
}
@@ -228,6 +225,7 @@ static const TypeInfo strongarm_pic_info = {
.name = TYPE_STRONGARM_PIC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(StrongARMPICState),
+ .instance_init = strongarm_pic_initfn,
.class_init = strongarm_pic_class_init,
};
@@ -381,9 +379,10 @@ static const MemoryRegionOps strongarm_rtc_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int strongarm_rtc_init(SysBusDevice *dev)
+static void strongarm_rtc_init(Object *obj)
{
- StrongARMRTCState *s = STRONGARM_RTC(dev);
+ StrongARMRTCState *s = STRONGARM_RTC(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
struct tm tm;
s->rttr = 0x0;
@@ -400,11 +399,9 @@ static int strongarm_rtc_init(SysBusDevice *dev)
sysbus_init_irq(dev, &s->rtc_irq);
sysbus_init_irq(dev, &s->rtc_hz_irq);
- memory_region_init_io(&s->iomem, OBJECT(s), &strongarm_rtc_ops, s,
+ memory_region_init_io(&s->iomem, obj, &strongarm_rtc_ops, s,
"rtc", 0x10000);
sysbus_init_mmio(dev, &s->iomem);
-
- return 0;
}
static void strongarm_rtc_pre_save(void *opaque)
@@ -443,9 +440,7 @@ static const VMStateDescription vmstate_strongarm_rtc_regs = {
static void strongarm_rtc_sysbus_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = strongarm_rtc_init;
dc->desc = "StrongARM RTC Controller";
dc->vmsd = &vmstate_strongarm_rtc_regs;
}
@@ -454,6 +449,7 @@ static const TypeInfo strongarm_rtc_sysbus_info = {
.name = TYPE_STRONGARM_RTC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(StrongARMRTCState),
+ .instance_init = strongarm_rtc_init,
.class_init = strongarm_rtc_sysbus_class_init,
};
@@ -646,16 +642,17 @@ static DeviceState *strongarm_gpio_init(hwaddr base,
return dev;
}
-static int strongarm_gpio_initfn(SysBusDevice *sbd)
+static void strongarm_gpio_initfn(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- StrongARMGPIOInfo *s = STRONGARM_GPIO(dev);
+ DeviceState *dev = DEVICE(obj);
+ StrongARMGPIOInfo *s = STRONGARM_GPIO(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
int i;
qdev_init_gpio_in(dev, strongarm_gpio_set, 28);
qdev_init_gpio_out(dev, s->handler, 28);
- memory_region_init_io(&s->iomem, OBJECT(s), &strongarm_gpio_ops, s,
+ memory_region_init_io(&s->iomem, obj, &strongarm_gpio_ops, s,
"gpio", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
@@ -663,8 +660,6 @@ static int strongarm_gpio_initfn(SysBusDevice *sbd)
sysbus_init_irq(sbd, &s->irqs[i]);
}
sysbus_init_irq(sbd, &s->irqX);
-
- return 0;
}
static const VMStateDescription vmstate_strongarm_gpio_regs = {
@@ -687,9 +682,7 @@ static const VMStateDescription vmstate_strongarm_gpio_regs = {
static void strongarm_gpio_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = strongarm_gpio_initfn;
dc->desc = "StrongARM GPIO controller";
dc->vmsd = &vmstate_strongarm_gpio_regs;
}
@@ -698,6 +691,7 @@ static const TypeInfo strongarm_gpio_info = {
.name = TYPE_STRONGARM_GPIO,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(StrongARMGPIOInfo),
+ .instance_init = strongarm_gpio_initfn,
.class_init = strongarm_gpio_class_init,
};
@@ -824,20 +818,19 @@ static const MemoryRegionOps strongarm_ppc_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int strongarm_ppc_init(SysBusDevice *sbd)
+static void strongarm_ppc_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- StrongARMPPCInfo *s = STRONGARM_PPC(dev);
+ DeviceState *dev = DEVICE(obj);
+ StrongARMPPCInfo *s = STRONGARM_PPC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
qdev_init_gpio_in(dev, strongarm_ppc_set, 22);
qdev_init_gpio_out(dev, s->handler, 22);
- memory_region_init_io(&s->iomem, OBJECT(s), &strongarm_ppc_ops, s,
+ memory_region_init_io(&s->iomem, obj, &strongarm_ppc_ops, s,
"ppc", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
-
- return 0;
}
static const VMStateDescription vmstate_strongarm_ppc_regs = {
@@ -859,9 +852,7 @@ static const VMStateDescription vmstate_strongarm_ppc_regs = {
static void strongarm_ppc_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = strongarm_ppc_init;
dc->desc = "StrongARM PPC controller";
dc->vmsd = &vmstate_strongarm_ppc_regs;
}
@@ -870,6 +861,7 @@ static const TypeInfo strongarm_ppc_info = {
.name = TYPE_STRONGARM_PPC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(StrongARMPPCInfo),
+ .instance_init = strongarm_ppc_init,
.class_init = strongarm_ppc_class_init,
};
@@ -1231,11 +1223,12 @@ static const MemoryRegionOps strongarm_uart_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int strongarm_uart_init(SysBusDevice *dev)
+static void strongarm_uart_init(Object *obj)
{
- StrongARMUARTState *s = STRONGARM_UART(dev);
+ StrongARMUARTState *s = STRONGARM_UART(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
- memory_region_init_io(&s->iomem, OBJECT(s), &strongarm_uart_ops, s,
+ memory_region_init_io(&s->iomem, obj, &strongarm_uart_ops, s,
"uart", 0x10000);
sysbus_init_mmio(dev, &s->iomem);
sysbus_init_irq(dev, &s->irq);
@@ -1250,8 +1243,6 @@ static int strongarm_uart_init(SysBusDevice *dev)
strongarm_uart_event,
s);
}
-
- return 0;
}
static void strongarm_uart_reset(DeviceState *dev)
@@ -1321,9 +1312,7 @@ static Property strongarm_uart_properties[] = {
static void strongarm_uart_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = strongarm_uart_init;
dc->desc = "StrongARM UART controller";
dc->reset = strongarm_uart_reset;
dc->vmsd = &vmstate_strongarm_uart_regs;
@@ -1334,6 +1323,7 @@ static const TypeInfo strongarm_uart_info = {
.name = TYPE_STRONGARM_UART,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(StrongARMUARTState),
+ .instance_init = strongarm_uart_init,
.class_init = strongarm_uart_class_init,
};
diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c
index e5a80c2d2c..d079bc9e82 100644
--- a/hw/arm/versatilepb.c
+++ b/hw/arm/versatilepb.c
@@ -153,10 +153,11 @@ static const MemoryRegionOps vpb_sic_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int vpb_sic_init(SysBusDevice *sbd)
+static void vpb_sic_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- vpb_sic_state *s = VERSATILE_PB_SIC(dev);
+ DeviceState *dev = DEVICE(obj);
+ vpb_sic_state *s = VERSATILE_PB_SIC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
int i;
qdev_init_gpio_in(dev, vpb_sic_set_irq, 32);
@@ -164,10 +165,9 @@ static int vpb_sic_init(SysBusDevice *sbd)
sysbus_init_irq(sbd, &s->parent[i]);
}
s->irq = 31;
- memory_region_init_io(&s->iomem, OBJECT(s), &vpb_sic_ops, s,
+ memory_region_init_io(&s->iomem, obj, &vpb_sic_ops, s,
"vpb-sic", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
- return 0;
}
/* Board init. */
@@ -427,9 +427,7 @@ type_init(versatile_machine_init)
static void vpb_sic_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = vpb_sic_init;
dc->vmsd = &vmstate_vpb_sic;
}
@@ -437,6 +435,7 @@ static const TypeInfo vpb_sic_info = {
.name = TYPE_VERSATILE_PB_SIC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(vpb_sic_state),
+ .instance_init = vpb_sic_init,
.class_init = vpb_sic_class_init,
};
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index f51fe396ce..26a7bac48f 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -43,6 +43,7 @@
#include "hw/acpi/aml-build.h"
#include "hw/pci/pcie_host.h"
#include "hw/pci/pci.h"
+#include "sysemu/numa.h"
#define ARM_SPI_BASE 32
#define ACPI_POWER_BUTTON_DEVICE "PWRB"
@@ -414,6 +415,52 @@ build_spcr(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
}
static void
+build_srat(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
+{
+ AcpiSystemResourceAffinityTable *srat;
+ AcpiSratProcessorGiccAffinity *core;
+ AcpiSratMemoryAffinity *numamem;
+ int i, j, srat_start;
+ uint64_t mem_base;
+ uint32_t *cpu_node = g_malloc0(guest_info->smp_cpus * sizeof(uint32_t));
+
+ for (i = 0; i < guest_info->smp_cpus; i++) {
+ for (j = 0; j < nb_numa_nodes; j++) {
+ if (test_bit(i, numa_info[j].node_cpu)) {
+ cpu_node[i] = j;
+ break;
+ }
+ }
+ }
+
+ srat_start = table_data->len;
+ srat = acpi_data_push(table_data, sizeof(*srat));
+ srat->reserved1 = cpu_to_le32(1);
+
+ for (i = 0; i < guest_info->smp_cpus; ++i) {
+ core = acpi_data_push(table_data, sizeof(*core));
+ core->type = ACPI_SRAT_PROCESSOR_GICC;
+ core->length = sizeof(*core);
+ core->proximity = cpu_to_le32(cpu_node[i]);
+ core->acpi_processor_uid = cpu_to_le32(i);
+ core->flags = cpu_to_le32(1);
+ }
+ g_free(cpu_node);
+
+ mem_base = guest_info->memmap[VIRT_MEM].base;
+ for (i = 0; i < nb_numa_nodes; ++i) {
+ numamem = acpi_data_push(table_data, sizeof(*numamem));
+ build_srat_memory(numamem, mem_base, numa_info[i].node_mem, i,
+ MEM_AFFINITY_ENABLED);
+ mem_base += numa_info[i].node_mem;
+ }
+
+ build_header(linker, table_data,
+ (void *)(table_data->data + srat_start), "SRAT",
+ table_data->len - srat_start, 3, NULL, NULL);
+}
+
+static void
build_mcfg(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
{
AcpiTableMcfg *mcfg;
@@ -638,6 +685,11 @@ void virt_acpi_build(VirtGuestInfo *guest_info, AcpiBuildTables *tables)
acpi_add_table(table_offsets, tables_blob);
build_spcr(tables_blob, tables->linker, guest_info);
+ if (nb_numa_nodes > 0) {
+ acpi_add_table(table_offsets, tables_blob);
+ build_srat(tables_blob, tables->linker, guest_info);
+ }
+
/* RSDT is pointed to by RSDP */
rsdt = tables_blob->len;
build_rsdt(tables_blob, tables->linker, table_offsets, NULL, NULL);
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 56d35c7716..fe6b11d24e 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -38,6 +38,7 @@
#include "net/net.h"
#include "sysemu/block-backend.h"
#include "sysemu/device_tree.h"
+#include "sysemu/numa.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
#include "hw/boards.h"
@@ -329,6 +330,7 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
{
int cpu;
int addr_cells = 1;
+ unsigned int i;
/*
* From Documentation/devicetree/bindings/arm/cpus.txt
@@ -378,6 +380,12 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
armcpu->mp_affinity);
}
+ for (i = 0; i < nb_numa_nodes; i++) {
+ if (test_bit(cpu, numa_info[i].node_cpu)) {
+ qemu_fdt_setprop_cell(vbi->fdt, nodename, "numa-node-id", i);
+ }
+ }
+
g_free(nodename);
}
}
diff --git a/hw/block/fdc.c b/hw/block/fdc.c
index 372227569e..f73af7db46 100644
--- a/hw/block/fdc.c
+++ b/hw/block/fdc.c
@@ -223,6 +223,13 @@ static int fd_sector(FDrive *drv)
NUM_SIDES(drv));
}
+/* Returns current position, in bytes, for given drive */
+static int fd_offset(FDrive *drv)
+{
+ g_assert(fd_sector(drv) < INT_MAX >> BDRV_SECTOR_BITS);
+ return fd_sector(drv) << BDRV_SECTOR_BITS;
+}
+
/* Seek to a new position:
* returns 0 if already on right track
* returns 1 if track changed
@@ -1629,8 +1636,8 @@ static int fdctrl_transfer_handler (void *opaque, int nchan,
if (fdctrl->data_dir != FD_DIR_WRITE ||
len < FD_SECTOR_LEN || rel_pos != 0) {
/* READ & SCAN commands and realign to a sector for WRITE */
- if (blk_read(cur_drv->blk, fd_sector(cur_drv),
- fdctrl->fifo, 1) < 0) {
+ if (blk_pread(cur_drv->blk, fd_offset(cur_drv),
+ fdctrl->fifo, BDRV_SECTOR_SIZE) < 0) {
FLOPPY_DPRINTF("Floppy: error getting sector %d\n",
fd_sector(cur_drv));
/* Sure, image size is too small... */
@@ -1657,8 +1664,8 @@ static int fdctrl_transfer_handler (void *opaque, int nchan,
k->read_memory(fdctrl->dma, nchan, fdctrl->fifo + rel_pos,
fdctrl->data_pos, len);
- if (blk_write(cur_drv->blk, fd_sector(cur_drv),
- fdctrl->fifo, 1) < 0) {
+ if (blk_pwrite(cur_drv->blk, fd_offset(cur_drv),
+ fdctrl->fifo, BDRV_SECTOR_SIZE, 0) < 0) {
FLOPPY_DPRINTF("error writing sector %d\n",
fd_sector(cur_drv));
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00);
@@ -1741,7 +1748,8 @@ static uint32_t fdctrl_read_data(FDCtrl *fdctrl)
fd_sector(cur_drv));
return 0;
}
- if (blk_read(cur_drv->blk, fd_sector(cur_drv), fdctrl->fifo, 1)
+ if (blk_pread(cur_drv->blk, fd_offset(cur_drv), fdctrl->fifo,
+ BDRV_SECTOR_SIZE)
< 0) {
FLOPPY_DPRINTF("error getting sector %d\n",
fd_sector(cur_drv));
@@ -1820,7 +1828,8 @@ static void fdctrl_format_sector(FDCtrl *fdctrl)
}
memset(fdctrl->fifo, 0, FD_SECTOR_LEN);
if (cur_drv->blk == NULL ||
- blk_write(cur_drv->blk, fd_sector(cur_drv), fdctrl->fifo, 1) < 0) {
+ blk_pwrite(cur_drv->blk, fd_offset(cur_drv), fdctrl->fifo,
+ BDRV_SECTOR_SIZE, 0) < 0) {
FLOPPY_DPRINTF("error formatting sector %d\n", fd_sector(cur_drv));
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00);
} else {
@@ -2243,8 +2252,8 @@ static void fdctrl_write_data(FDCtrl *fdctrl, uint32_t value)
if (pos == FD_SECTOR_LEN - 1 ||
fdctrl->data_pos == fdctrl->data_len) {
cur_drv = get_cur_drv(fdctrl);
- if (blk_write(cur_drv->blk, fd_sector(cur_drv), fdctrl->fifo, 1)
- < 0) {
+ if (blk_pwrite(cur_drv->blk, fd_offset(cur_drv), fdctrl->fifo,
+ BDRV_SECTOR_SIZE, 0) < 0) {
FLOPPY_DPRINTF("error writing sector %d\n",
fd_sector(cur_drv));
break;
diff --git a/hw/block/hd-geometry.c b/hw/block/hd-geometry.c
index 6d02192dbb..d388f13e9d 100644
--- a/hw/block/hd-geometry.c
+++ b/hw/block/hd-geometry.c
@@ -66,7 +66,7 @@ static int guess_disk_lchs(BlockBackend *blk,
* but also in async I/O mode. So the I/O throttling function has to
* be disabled temporarily here, not permanently.
*/
- if (blk_read_unthrottled(blk, 0, buf, 1) < 0) {
+ if (blk_pread_unthrottled(blk, 0, buf, BDRV_SECTOR_SIZE) < 0) {
return -1;
}
/* test msdos magic */
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index 906b71257e..5d308637df 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -358,25 +358,21 @@ static void blk_sync_complete(void *opaque, int ret)
static void flash_sync_page(Flash *s, int page)
{
- int blk_sector, nb_sectors;
QEMUIOVector iov;
if (!s->blk || blk_is_read_only(s->blk)) {
return;
}
- blk_sector = (page * s->pi->page_size) / BDRV_SECTOR_SIZE;
- nb_sectors = DIV_ROUND_UP(s->pi->page_size, BDRV_SECTOR_SIZE);
qemu_iovec_init(&iov, 1);
- qemu_iovec_add(&iov, s->storage + blk_sector * BDRV_SECTOR_SIZE,
- nb_sectors * BDRV_SECTOR_SIZE);
- blk_aio_writev(s->blk, blk_sector, &iov, nb_sectors, blk_sync_complete,
- NULL);
+ qemu_iovec_add(&iov, s->storage + page * s->pi->page_size,
+ s->pi->page_size);
+ blk_aio_pwritev(s->blk, page * s->pi->page_size, &iov, 0,
+ blk_sync_complete, NULL);
}
static inline void flash_sync_area(Flash *s, int64_t off, int64_t len)
{
- int64_t start, end, nb_sectors;
QEMUIOVector iov;
if (!s->blk || blk_is_read_only(s->blk)) {
@@ -384,13 +380,9 @@ static inline void flash_sync_area(Flash *s, int64_t off, int64_t len)
}
assert(!(len % BDRV_SECTOR_SIZE));
- start = off / BDRV_SECTOR_SIZE;
- end = (off + len) / BDRV_SECTOR_SIZE;
- nb_sectors = end - start;
qemu_iovec_init(&iov, 1);
- qemu_iovec_add(&iov, s->storage + (start * BDRV_SECTOR_SIZE),
- nb_sectors * BDRV_SECTOR_SIZE);
- blk_aio_writev(s->blk, start, &iov, nb_sectors, blk_sync_complete, NULL);
+ qemu_iovec_add(&iov, s->storage + off, len);
+ blk_aio_pwritev(s->blk, off, &iov, 0, blk_sync_complete, NULL);
}
static void flash_erase(Flash *s, int offset, FlashCMD cmd)
@@ -907,8 +899,7 @@ static int m25p80_init(SSISlave *ss)
s->storage = blk_blockalign(s->blk, s->size);
/* FIXME: Move to late init */
- if (blk_read(s->blk, 0, s->storage,
- DIV_ROUND_UP(s->size, BDRV_SECTOR_SIZE))) {
+ if (blk_pread(s->blk, 0, s->storage, s->size)) {
fprintf(stderr, "Failed to initialize SPI flash!\n");
return 1;
}
diff --git a/hw/block/nand.c b/hw/block/nand.c
index 29c6596810..c69e6755d9 100644
--- a/hw/block/nand.c
+++ b/hw/block/nand.c
@@ -663,7 +663,8 @@ static void glue(nand_blk_write_, PAGE_SIZE)(NANDFlashState *s)
sector = SECTOR(s->addr);
off = (s->addr & PAGE_MASK) + s->offset;
soff = SECTOR_OFFSET(s->addr);
- if (blk_read(s->blk, sector, iobuf, PAGE_SECTORS) < 0) {
+ if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS, iobuf,
+ PAGE_SECTORS << BDRV_SECTOR_BITS) < 0) {
printf("%s: read error in sector %" PRIu64 "\n", __func__, sector);
return;
}
@@ -675,21 +676,24 @@ static void glue(nand_blk_write_, PAGE_SIZE)(NANDFlashState *s)
MIN(OOB_SIZE, off + s->iolen - PAGE_SIZE));
}
- if (blk_write(s->blk, sector, iobuf, PAGE_SECTORS) < 0) {
+ if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS, iobuf,
+ PAGE_SECTORS << BDRV_SECTOR_BITS, 0) < 0) {
printf("%s: write error in sector %" PRIu64 "\n", __func__, sector);
}
} else {
off = PAGE_START(s->addr) + (s->addr & PAGE_MASK) + s->offset;
sector = off >> 9;
soff = off & 0x1ff;
- if (blk_read(s->blk, sector, iobuf, PAGE_SECTORS + 2) < 0) {
+ if (blk_pread(s->blk, sector << BDRV_SECTOR_BITS, iobuf,
+ (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS) < 0) {
printf("%s: read error in sector %" PRIu64 "\n", __func__, sector);
return;
}
mem_and(iobuf + soff, s->io, s->iolen);
- if (blk_write(s->blk, sector, iobuf, PAGE_SECTORS + 2) < 0) {
+ if (blk_pwrite(s->blk, sector << BDRV_SECTOR_BITS, iobuf,
+ (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS, 0) < 0) {
printf("%s: write error in sector %" PRIu64 "\n", __func__, sector);
}
}
@@ -716,17 +720,20 @@ static void glue(nand_blk_erase_, PAGE_SIZE)(NANDFlashState *s)
i = SECTOR(addr);
page = SECTOR(addr + (1 << (ADDR_SHIFT + s->erase_shift)));
for (; i < page; i ++)
- if (blk_write(s->blk, i, iobuf, 1) < 0) {
+ if (blk_pwrite(s->blk, i << BDRV_SECTOR_BITS, iobuf,
+ BDRV_SECTOR_SIZE, 0) < 0) {
printf("%s: write error in sector %" PRIu64 "\n", __func__, i);
}
} else {
addr = PAGE_START(addr);
page = addr >> 9;
- if (blk_read(s->blk, page, iobuf, 1) < 0) {
+ if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, iobuf,
+ BDRV_SECTOR_SIZE) < 0) {
printf("%s: read error in sector %" PRIu64 "\n", __func__, page);
}
memset(iobuf + (addr & 0x1ff), 0xff, (~addr & 0x1ff) + 1);
- if (blk_write(s->blk, page, iobuf, 1) < 0) {
+ if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS, iobuf,
+ BDRV_SECTOR_SIZE, 0) < 0) {
printf("%s: write error in sector %" PRIu64 "\n", __func__, page);
}
@@ -734,18 +741,20 @@ static void glue(nand_blk_erase_, PAGE_SIZE)(NANDFlashState *s)
i = (addr & ~0x1ff) + 0x200;
for (addr += ((PAGE_SIZE + OOB_SIZE) << s->erase_shift) - 0x200;
i < addr; i += 0x200) {
- if (blk_write(s->blk, i >> 9, iobuf, 1) < 0) {
+ if (blk_pwrite(s->blk, i, iobuf, BDRV_SECTOR_SIZE, 0) < 0) {
printf("%s: write error in sector %" PRIu64 "\n",
__func__, i >> 9);
}
}
page = i >> 9;
- if (blk_read(s->blk, page, iobuf, 1) < 0) {
+ if (blk_pread(s->blk, page << BDRV_SECTOR_BITS, iobuf,
+ BDRV_SECTOR_SIZE) < 0) {
printf("%s: read error in sector %" PRIu64 "\n", __func__, page);
}
memset(iobuf, 0xff, ((addr - 1) & 0x1ff) + 1);
- if (blk_write(s->blk, page, iobuf, 1) < 0) {
+ if (blk_pwrite(s->blk, page << BDRV_SECTOR_BITS, iobuf,
+ BDRV_SECTOR_SIZE, 0) < 0) {
printf("%s: write error in sector %" PRIu64 "\n", __func__, page);
}
}
@@ -760,7 +769,8 @@ static void glue(nand_blk_load_, PAGE_SIZE)(NANDFlashState *s,
if (s->blk) {
if (s->mem_oob) {
- if (blk_read(s->blk, SECTOR(addr), s->io, PAGE_SECTORS) < 0) {
+ if (blk_pread(s->blk, SECTOR(addr) << BDRV_SECTOR_BITS, s->io,
+ PAGE_SECTORS << BDRV_SECTOR_BITS) < 0) {
printf("%s: read error in sector %" PRIu64 "\n",
__func__, SECTOR(addr));
}
@@ -769,8 +779,8 @@ static void glue(nand_blk_load_, PAGE_SIZE)(NANDFlashState *s,
OOB_SIZE);
s->ioaddr = s->io + SECTOR_OFFSET(s->addr) + offset;
} else {
- if (blk_read(s->blk, PAGE_START(addr) >> 9,
- s->io, (PAGE_SECTORS + 2)) < 0) {
+ if (blk_pread(s->blk, PAGE_START(addr), s->io,
+ (PAGE_SECTORS + 2) << BDRV_SECTOR_BITS) < 0) {
printf("%s: read error in sector %" PRIu64 "\n",
__func__, PAGE_START(addr) >> 9);
}
diff --git a/hw/block/onenand.c b/hw/block/onenand.c
index 883f4b1faa..8d8422739e 100644
--- a/hw/block/onenand.c
+++ b/hw/block/onenand.c
@@ -224,7 +224,8 @@ static void onenand_reset(OneNANDState *s, int cold)
/* Lock the whole flash */
memset(s->blockwp, ONEN_LOCK_LOCKED, s->blocks);
- if (s->blk_cur && blk_read(s->blk_cur, 0, s->boot[0], 8) < 0) {
+ if (s->blk_cur && blk_pread(s->blk_cur, 0, s->boot[0],
+ 8 << BDRV_SECTOR_BITS) < 0) {
hw_error("%s: Loading the BootRAM failed.\n", __func__);
}
}
@@ -240,8 +241,11 @@ static void onenand_system_reset(DeviceState *dev)
static inline int onenand_load_main(OneNANDState *s, int sec, int secn,
void *dest)
{
+ assert(UINT32_MAX >> BDRV_SECTOR_BITS > sec);
+ assert(UINT32_MAX >> BDRV_SECTOR_BITS > secn);
if (s->blk_cur) {
- return blk_read(s->blk_cur, sec, dest, secn) < 0;
+ return blk_pread(s->blk_cur, sec << BDRV_SECTOR_BITS, dest,
+ secn << BDRV_SECTOR_BITS) < 0;
} else if (sec + secn > s->secs_cur) {
return 1;
}
@@ -257,19 +261,22 @@ static inline int onenand_prog_main(OneNANDState *s, int sec, int secn,
int result = 0;
if (secn > 0) {
- uint32_t size = (uint32_t)secn * 512;
+ uint32_t size = secn << BDRV_SECTOR_BITS;
+ uint32_t offset = sec << BDRV_SECTOR_BITS;
+ assert(UINT32_MAX >> BDRV_SECTOR_BITS > sec);
+ assert(UINT32_MAX >> BDRV_SECTOR_BITS > secn);
const uint8_t *sp = (const uint8_t *)src;
uint8_t *dp = 0;
if (s->blk_cur) {
dp = g_malloc(size);
- if (!dp || blk_read(s->blk_cur, sec, dp, secn) < 0) {
+ if (!dp || blk_pread(s->blk_cur, offset, dp, size) < 0) {
result = 1;
}
} else {
if (sec + secn > s->secs_cur) {
result = 1;
} else {
- dp = (uint8_t *)s->current + (sec << 9);
+ dp = (uint8_t *)s->current + offset;
}
}
if (!result) {
@@ -278,7 +285,7 @@ static inline int onenand_prog_main(OneNANDState *s, int sec, int secn,
dp[i] &= sp[i];
}
if (s->blk_cur) {
- result = blk_write(s->blk_cur, sec, dp, secn) < 0;
+ result = blk_pwrite(s->blk_cur, offset, dp, size, 0) < 0;
}
}
if (dp && s->blk_cur) {
@@ -295,7 +302,8 @@ static inline int onenand_load_spare(OneNANDState *s, int sec, int secn,
uint8_t buf[512];
if (s->blk_cur) {
- if (blk_read(s->blk_cur, s->secs_cur + (sec >> 5), buf, 1) < 0) {
+ uint32_t offset = (s->secs_cur + (sec >> 5)) << BDRV_SECTOR_BITS;
+ if (blk_pread(s->blk_cur, offset, buf, BDRV_SECTOR_SIZE) < 0) {
return 1;
}
memcpy(dest, buf + ((sec & 31) << 4), secn << 4);
@@ -304,7 +312,7 @@ static inline int onenand_load_spare(OneNANDState *s, int sec, int secn,
} else {
memcpy(dest, s->current + (s->secs_cur << 9) + (sec << 4), secn << 4);
}
-
+
return 0;
}
@@ -315,10 +323,12 @@ static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn,
if (secn > 0) {
const uint8_t *sp = (const uint8_t *)src;
uint8_t *dp = 0, *dpp = 0;
+ uint32_t offset = (s->secs_cur + (sec >> 5)) << BDRV_SECTOR_BITS;
+ assert(UINT32_MAX >> BDRV_SECTOR_BITS > s->secs_cur + (sec >> 5));
if (s->blk_cur) {
dp = g_malloc(512);
if (!dp
- || blk_read(s->blk_cur, s->secs_cur + (sec >> 5), dp, 1) < 0) {
+ || blk_pread(s->blk_cur, offset, dp, BDRV_SECTOR_SIZE) < 0) {
result = 1;
} else {
dpp = dp + ((sec & 31) << 4);
@@ -336,8 +346,8 @@ static inline int onenand_prog_spare(OneNANDState *s, int sec, int secn,
dpp[i] &= sp[i];
}
if (s->blk_cur) {
- result = blk_write(s->blk_cur, s->secs_cur + (sec >> 5),
- dp, 1) < 0;
+ result = blk_pwrite(s->blk_cur, offset, dp,
+ BDRV_SECTOR_SIZE, 0) < 0;
}
}
g_free(dp);
@@ -355,14 +365,17 @@ static inline int onenand_erase(OneNANDState *s, int sec, int num)
for (; num > 0; num--, sec++) {
if (s->blk_cur) {
int erasesec = s->secs_cur + (sec >> 5);
- if (blk_write(s->blk_cur, sec, blankbuf, 1) < 0) {
+ if (blk_pwrite(s->blk_cur, sec << BDRV_SECTOR_BITS, blankbuf,
+ BDRV_SECTOR_SIZE, 0) < 0) {
goto fail;
}
- if (blk_read(s->blk_cur, erasesec, tmpbuf, 1) < 0) {
+ if (blk_pread(s->blk_cur, erasesec << BDRV_SECTOR_BITS, tmpbuf,
+ BDRV_SECTOR_SIZE) < 0) {
goto fail;
}
memcpy(tmpbuf + ((sec & 31) << 4), blankbuf, 1 << 4);
- if (blk_write(s->blk_cur, erasesec, tmpbuf, 1) < 0) {
+ if (blk_pwrite(s->blk_cur, erasesec << BDRV_SECTOR_BITS, tmpbuf,
+ BDRV_SECTOR_SIZE, 0) < 0) {
goto fail;
}
} else {
diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c
index 106a775232..3a1f85d279 100644
--- a/hw/block/pflash_cfi01.c
+++ b/hw/block/pflash_cfi01.c
@@ -413,11 +413,11 @@ static void pflash_update(pflash_t *pfl, int offset,
int offset_end;
if (pfl->blk) {
offset_end = offset + size;
- /* round to sectors */
- offset = offset >> 9;
- offset_end = (offset_end + 511) >> 9;
- blk_write(pfl->blk, offset, pfl->storage + (offset << 9),
- offset_end - offset);
+ /* widen to sector boundaries */
+ offset = QEMU_ALIGN_DOWN(offset, BDRV_SECTOR_SIZE);
+ offset_end = QEMU_ALIGN_UP(offset_end, BDRV_SECTOR_SIZE);
+ blk_pwrite(pfl->blk, offset, pfl->storage + offset,
+ offset_end - offset, 0);
}
}
@@ -739,7 +739,7 @@ static void pflash_cfi01_realize(DeviceState *dev, Error **errp)
if (pfl->blk) {
/* read the initial flash content */
- ret = blk_read(pfl->blk, 0, pfl->storage, total_len >> 9);
+ ret = blk_pread(pfl->blk, 0, pfl->storage, total_len);
if (ret < 0) {
vmstate_unregister_ram(&pfl->mem, DEVICE(pfl));
diff --git a/hw/block/pflash_cfi02.c b/hw/block/pflash_cfi02.c
index b13172c6e1..5f106102c5 100644
--- a/hw/block/pflash_cfi02.c
+++ b/hw/block/pflash_cfi02.c
@@ -253,11 +253,11 @@ static void pflash_update(pflash_t *pfl, int offset,
int offset_end;
if (pfl->blk) {
offset_end = offset + size;
- /* round to sectors */
- offset = offset >> 9;
- offset_end = (offset_end + 511) >> 9;
- blk_write(pfl->blk, offset, pfl->storage + (offset << 9),
- offset_end - offset);
+ /* widen to sector boundaries */
+ offset = QEMU_ALIGN_DOWN(offset, BDRV_SECTOR_SIZE);
+ offset_end = QEMU_ALIGN_UP(offset_end, BDRV_SECTOR_SIZE);
+ blk_pwrite(pfl->blk, offset, pfl->storage + offset,
+ offset_end - offset, 0);
}
}
@@ -622,7 +622,7 @@ static void pflash_cfi02_realize(DeviceState *dev, Error **errp)
pfl->chip_len = chip_len;
if (pfl->blk) {
/* read the initial flash content */
- ret = blk_read(pfl->blk, 0, pfl->storage, chip_len >> 9);
+ ret = blk_pread(pfl->blk, 0, pfl->storage, chip_len);
if (ret < 0) {
vmstate_unregister_ram(&pfl->orig_mem, DEVICE(pfl));
error_setg(errp, "failed to read the initial flash content");
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 3f88f8cf59..284e64667c 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -322,7 +322,6 @@ static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb,
{
QEMUIOVector *qiov = &mrb->reqs[start]->qiov;
int64_t sector_num = mrb->reqs[start]->sector_num;
- int nb_sectors = mrb->reqs[start]->qiov.size / BDRV_SECTOR_SIZE;
bool is_write = mrb->is_write;
if (num_reqs > 1) {
@@ -331,7 +330,7 @@ static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb,
int tmp_niov = qiov->niov;
/* mrb->reqs[start]->qiov was initialized from external so we can't
- * modifiy it here. We need to initialize it locally and then add the
+ * modify it here. We need to initialize it locally and then add the
* external iovecs. */
qemu_iovec_init(qiov, niov);
@@ -343,23 +342,22 @@ static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb,
qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0,
mrb->reqs[i]->qiov.size);
mrb->reqs[i - 1]->mr_next = mrb->reqs[i];
- nb_sectors += mrb->reqs[i]->qiov.size / BDRV_SECTOR_SIZE;
}
- assert(nb_sectors == qiov->size / BDRV_SECTOR_SIZE);
- trace_virtio_blk_submit_multireq(mrb, start, num_reqs, sector_num,
- nb_sectors, is_write);
+ trace_virtio_blk_submit_multireq(mrb, start, num_reqs,
+ sector_num << BDRV_SECTOR_BITS,
+ qiov->size, is_write);
block_acct_merge_done(blk_get_stats(blk),
is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ,
num_reqs - 1);
}
if (is_write) {
- blk_aio_writev(blk, sector_num, qiov, nb_sectors,
- virtio_blk_rw_complete, mrb->reqs[start]);
+ blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0,
+ virtio_blk_rw_complete, mrb->reqs[start]);
} else {
- blk_aio_readv(blk, sector_num, qiov, nb_sectors,
- virtio_blk_rw_complete, mrb->reqs[start]);
+ blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov, 0,
+ virtio_blk_rw_complete, mrb->reqs[start]);
}
}
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index d4ce380fee..064c116a7c 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -554,9 +554,8 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
ioreq->v.size, BLOCK_ACCT_READ);
ioreq->aio_inflight++;
- blk_aio_readv(blkdev->blk, ioreq->start / BLOCK_SIZE,
- &ioreq->v, ioreq->v.size / BLOCK_SIZE,
- qemu_aio_complete, ioreq);
+ blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
+ qemu_aio_complete, ioreq);
break;
case BLKIF_OP_WRITE:
case BLKIF_OP_FLUSH_DISKCACHE:
@@ -569,9 +568,8 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
ioreq->req.operation == BLKIF_OP_WRITE ?
BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
ioreq->aio_inflight++;
- blk_aio_writev(blkdev->blk, ioreq->start / BLOCK_SIZE,
- &ioreq->v, ioreq->v.size / BLOCK_SIZE,
- qemu_aio_complete, ioreq);
+ blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
+ qemu_aio_complete, ioreq);
break;
case BLKIF_OP_DISCARD:
{
diff --git a/hw/display/blizzard.c b/hw/display/blizzard.c
index c231960d96..cbf07d14d9 100644
--- a/hw/display/blizzard.c
+++ b/hw/display/blizzard.c
@@ -925,16 +925,83 @@ static void blizzard_update_display(void *opaque)
s->my[1] = 0;
}
-#define DEPTH 8
-#include "blizzard_template.h"
-#define DEPTH 15
-#include "blizzard_template.h"
-#define DEPTH 16
-#include "blizzard_template.h"
-#define DEPTH 24
-#include "blizzard_template.h"
-#define DEPTH 32
-#include "blizzard_template.h"
+static void blizzard_draw_line16_32(uint32_t *dest,
+ const uint16_t *src, unsigned int width)
+{
+ uint16_t data;
+ unsigned int r, g, b;
+ const uint16_t *end = (const void *) src + width;
+ while (src < end) {
+ data = *src ++;
+ b = (data & 0x1f) << 3;
+ data >>= 5;
+ g = (data & 0x3f) << 2;
+ data >>= 6;
+ r = (data & 0x1f) << 3;
+ data >>= 5;
+ *dest++ = rgb_to_pixel32(r, g, b);
+ }
+}
+
+static void blizzard_draw_line24mode1_32(uint32_t *dest,
+ const uint8_t *src, unsigned int width)
+{
+ /* TODO: check if SDL 24-bit planes are not in the same format and
+ * if so, use memcpy */
+ unsigned int r[2], g[2], b[2];
+ const uint8_t *end = src + width;
+ while (src < end) {
+ g[0] = *src ++;
+ r[0] = *src ++;
+ r[1] = *src ++;
+ b[0] = *src ++;
+ *dest++ = rgb_to_pixel32(r[0], g[0], b[0]);
+ b[1] = *src ++;
+ g[1] = *src ++;
+ *dest++ = rgb_to_pixel32(r[1], g[1], b[1]);
+ }
+}
+
+static void blizzard_draw_line24mode2_32(uint32_t *dest,
+ const uint8_t *src, unsigned int width)
+{
+ unsigned int r, g, b;
+ const uint8_t *end = src + width;
+ while (src < end) {
+ r = *src ++;
+ src ++;
+ b = *src ++;
+ g = *src ++;
+ *dest++ = rgb_to_pixel32(r, g, b);
+ }
+}
+
+/* No rotation */
+static blizzard_fn_t blizzard_draw_fn_32[0x10] = {
+ NULL,
+ /* RGB 5:6:5*/
+ (blizzard_fn_t) blizzard_draw_line16_32,
+ /* RGB 6:6:6 mode 1 */
+ (blizzard_fn_t) blizzard_draw_line24mode1_32,
+ /* RGB 8:8:8 mode 1 */
+ (blizzard_fn_t) blizzard_draw_line24mode1_32,
+ NULL, NULL,
+ /* RGB 6:6:6 mode 2 */
+ (blizzard_fn_t) blizzard_draw_line24mode2_32,
+ /* RGB 8:8:8 mode 2 */
+ (blizzard_fn_t) blizzard_draw_line24mode2_32,
+ /* YUV 4:2:2 */
+ NULL,
+ /* YUV 4:2:0 */
+ NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
+/* 90deg, 180deg and 270deg rotation */
+static blizzard_fn_t blizzard_draw_fn_r_32[0x10] = {
+ /* TODO */
+ [0 ... 0xf] = NULL,
+};
static const GraphicHwOps blizzard_ops = {
.invalidate = blizzard_invalidate_display,
@@ -951,35 +1018,10 @@ void *s1d13745_init(qemu_irq gpio_int)
s->con = graphic_console_init(NULL, 0, &blizzard_ops, s);
surface = qemu_console_surface(s->con);
- switch (surface_bits_per_pixel(surface)) {
- case 0:
- s->line_fn_tab[0] = s->line_fn_tab[1] =
- g_malloc0(sizeof(blizzard_fn_t) * 0x10);
- break;
- case 8:
- s->line_fn_tab[0] = blizzard_draw_fn_8;
- s->line_fn_tab[1] = blizzard_draw_fn_r_8;
- break;
- case 15:
- s->line_fn_tab[0] = blizzard_draw_fn_15;
- s->line_fn_tab[1] = blizzard_draw_fn_r_15;
- break;
- case 16:
- s->line_fn_tab[0] = blizzard_draw_fn_16;
- s->line_fn_tab[1] = blizzard_draw_fn_r_16;
- break;
- case 24:
- s->line_fn_tab[0] = blizzard_draw_fn_24;
- s->line_fn_tab[1] = blizzard_draw_fn_r_24;
- break;
- case 32:
- s->line_fn_tab[0] = blizzard_draw_fn_32;
- s->line_fn_tab[1] = blizzard_draw_fn_r_32;
- break;
- default:
- fprintf(stderr, "%s: Bad color depth\n", __FUNCTION__);
- exit(1);
- }
+ assert(surface_bits_per_pixel(surface) == 32);
+
+ s->line_fn_tab[0] = blizzard_draw_fn_32;
+ s->line_fn_tab[1] = blizzard_draw_fn_r_32;
blizzard_reset(s);
diff --git a/hw/display/blizzard_template.h b/hw/display/blizzard_template.h
deleted file mode 100644
index b7ef27c808..0000000000
--- a/hw/display/blizzard_template.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * QEMU Epson S1D13744/S1D13745 templates
- *
- * Copyright (C) 2008 Nokia Corporation
- * Written by Andrzej Zaborowski <andrew@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 or
- * (at your option) version 3 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#define SKIP_PIXEL(to) (to += deststep)
-#if DEPTH == 8
-# define PIXEL_TYPE uint8_t
-# define COPY_PIXEL(to, from) do { *to = from; SKIP_PIXEL(to); } while (0)
-# define COPY_PIXEL1(to, from) (*to++ = from)
-#elif DEPTH == 15 || DEPTH == 16
-# define PIXEL_TYPE uint16_t
-# define COPY_PIXEL(to, from) do { *to = from; SKIP_PIXEL(to); } while (0)
-# define COPY_PIXEL1(to, from) (*to++ = from)
-#elif DEPTH == 24
-# define PIXEL_TYPE uint8_t
-# define COPY_PIXEL(to, from) \
- do { \
- to[0] = from; \
- to[1] = (from) >> 8; \
- to[2] = (from) >> 16; \
- SKIP_PIXEL(to); \
- } while (0)
-
-# define COPY_PIXEL1(to, from) \
- do { \
- *to++ = from; \
- *to++ = (from) >> 8; \
- *to++ = (from) >> 16; \
- } while (0)
-#elif DEPTH == 32
-# define PIXEL_TYPE uint32_t
-# define COPY_PIXEL(to, from) do { *to = from; SKIP_PIXEL(to); } while (0)
-# define COPY_PIXEL1(to, from) (*to++ = from)
-#else
-# error unknown bit depth
-#endif
-
-#ifdef HOST_WORDS_BIGENDIAN
-# define SWAP_WORDS 1
-#endif
-
-static void glue(blizzard_draw_line16_, DEPTH)(PIXEL_TYPE *dest,
- const uint16_t *src, unsigned int width)
-{
-#if !defined(SWAP_WORDS) && DEPTH == 16
- memcpy(dest, src, width);
-#else
- uint16_t data;
- unsigned int r, g, b;
- const uint16_t *end = (const void *) src + width;
- while (src < end) {
- data = *src ++;
- b = (data & 0x1f) << 3;
- data >>= 5;
- g = (data & 0x3f) << 2;
- data >>= 6;
- r = (data & 0x1f) << 3;
- data >>= 5;
- COPY_PIXEL1(dest, glue(rgb_to_pixel, DEPTH)(r, g, b));
- }
-#endif
-}
-
-static void glue(blizzard_draw_line24mode1_, DEPTH)(PIXEL_TYPE *dest,
- const uint8_t *src, unsigned int width)
-{
- /* TODO: check if SDL 24-bit planes are not in the same format and
- * if so, use memcpy */
- unsigned int r[2], g[2], b[2];
- const uint8_t *end = src + width;
- while (src < end) {
- g[0] = *src ++;
- r[0] = *src ++;
- r[1] = *src ++;
- b[0] = *src ++;
- COPY_PIXEL1(dest, glue(rgb_to_pixel, DEPTH)(r[0], g[0], b[0]));
- b[1] = *src ++;
- g[1] = *src ++;
- COPY_PIXEL1(dest, glue(rgb_to_pixel, DEPTH)(r[1], g[1], b[1]));
- }
-}
-
-static void glue(blizzard_draw_line24mode2_, DEPTH)(PIXEL_TYPE *dest,
- const uint8_t *src, unsigned int width)
-{
- unsigned int r, g, b;
- const uint8_t *end = src + width;
- while (src < end) {
- r = *src ++;
- src ++;
- b = *src ++;
- g = *src ++;
- COPY_PIXEL1(dest, glue(rgb_to_pixel, DEPTH)(r, g, b));
- }
-}
-
-/* No rotation */
-static blizzard_fn_t glue(blizzard_draw_fn_, DEPTH)[0x10] = {
- NULL,
- /* RGB 5:6:5*/
- (blizzard_fn_t) glue(blizzard_draw_line16_, DEPTH),
- /* RGB 6:6:6 mode 1 */
- (blizzard_fn_t) glue(blizzard_draw_line24mode1_, DEPTH),
- /* RGB 8:8:8 mode 1 */
- (blizzard_fn_t) glue(blizzard_draw_line24mode1_, DEPTH),
- NULL, NULL,
- /* RGB 6:6:6 mode 2 */
- (blizzard_fn_t) glue(blizzard_draw_line24mode2_, DEPTH),
- /* RGB 8:8:8 mode 2 */
- (blizzard_fn_t) glue(blizzard_draw_line24mode2_, DEPTH),
- /* YUV 4:2:2 */
- NULL,
- /* YUV 4:2:0 */
- NULL,
- NULL, NULL, NULL, NULL, NULL, NULL,
-};
-
-/* 90deg, 180deg and 270deg rotation */
-static blizzard_fn_t glue(blizzard_draw_fn_r_, DEPTH)[0x10] = {
- /* TODO */
- [0 ... 0xf] = NULL,
-};
-
-#undef DEPTH
-#undef SKIP_PIXEL
-#undef COPY_PIXEL
-#undef COPY_PIXEL1
-#undef PIXEL_TYPE
-
-#undef SWAP_WORDS
diff --git a/hw/display/exynos4210_fimd.c b/hw/display/exynos4210_fimd.c
index 728eb214a4..e5be713406 100644
--- a/hw/display/exynos4210_fimd.c
+++ b/hw/display/exynos4210_fimd.c
@@ -1909,9 +1909,10 @@ static const GraphicHwOps exynos4210_fimd_ops = {
.gfx_update = exynos4210_fimd_update,
};
-static int exynos4210_fimd_init(SysBusDevice *dev)
+static void exynos4210_fimd_init(Object *obj)
{
- Exynos4210fimdState *s = EXYNOS4210_FIMD(dev);
+ Exynos4210fimdState *s = EXYNOS4210_FIMD(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
s->ifb = NULL;
@@ -1919,28 +1920,32 @@ static int exynos4210_fimd_init(SysBusDevice *dev)
sysbus_init_irq(dev, &s->irq[1]);
sysbus_init_irq(dev, &s->irq[2]);
- memory_region_init_io(&s->iomem, OBJECT(s), &exynos4210_fimd_mmio_ops, s,
+ memory_region_init_io(&s->iomem, obj, &exynos4210_fimd_mmio_ops, s,
"exynos4210.fimd", FIMD_REGS_SIZE);
sysbus_init_mmio(dev, &s->iomem);
- s->console = graphic_console_init(DEVICE(dev), 0, &exynos4210_fimd_ops, s);
+}
- return 0;
+static void exynos4210_fimd_realize(DeviceState *dev, Error **errp)
+{
+ Exynos4210fimdState *s = EXYNOS4210_FIMD(dev);
+
+ s->console = graphic_console_init(dev, 0, &exynos4210_fimd_ops, s);
}
static void exynos4210_fimd_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
dc->vmsd = &exynos4210_fimd_vmstate;
dc->reset = exynos4210_fimd_reset;
- k->init = exynos4210_fimd_init;
+ dc->realize = exynos4210_fimd_realize;
}
static const TypeInfo exynos4210_fimd_info = {
.name = TYPE_EXYNOS4210_FIMD,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(Exynos4210fimdState),
+ .instance_init = exynos4210_fimd_init,
.class_init = exynos4210_fimd_class_init,
};
diff --git a/hw/display/jazz_led.c b/hw/display/jazz_led.c
index 09dcdb46a3..b72fdb1717 100644
--- a/hw/display/jazz_led.c
+++ b/hw/display/jazz_led.c
@@ -267,16 +267,20 @@ static const GraphicHwOps jazz_led_ops = {
.text_update = jazz_led_text_update,
};
-static int jazz_led_init(SysBusDevice *dev)
+static void jazz_led_init(Object *obj)
{
- LedState *s = JAZZ_LED(dev);
+ LedState *s = JAZZ_LED(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
- memory_region_init_io(&s->iomem, OBJECT(s), &led_ops, s, "led", 1);
+ memory_region_init_io(&s->iomem, obj, &led_ops, s, "led", 1);
sysbus_init_mmio(dev, &s->iomem);
+}
- s->con = graphic_console_init(DEVICE(dev), 0, &jazz_led_ops, s);
+static void jazz_led_realize(DeviceState *dev, Error **errp)
+{
+ LedState *s = JAZZ_LED(dev);
- return 0;
+ s->con = graphic_console_init(dev, 0, &jazz_led_ops, s);
}
static void jazz_led_reset(DeviceState *d)
@@ -291,18 +295,18 @@ static void jazz_led_reset(DeviceState *d)
static void jazz_led_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = jazz_led_init;
dc->desc = "Jazz LED display",
dc->vmsd = &vmstate_jazz_led;
dc->reset = jazz_led_reset;
+ dc->realize = jazz_led_realize;
}
static const TypeInfo jazz_led_info = {
.name = TYPE_JAZZ_LED,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(LedState),
+ .instance_init = jazz_led_init,
.class_init = jazz_led_class_init,
};
diff --git a/hw/display/omap_lcd_template.h b/hw/display/omap_lcd_template.h
index f0ce71fd66..1025ff3825 100644
--- a/hw/display/omap_lcd_template.h
+++ b/hw/display/omap_lcd_template.h
@@ -27,13 +27,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#if DEPTH == 8
-# define BPP 1
-# define PIXEL_TYPE uint8_t
-#elif DEPTH == 15 || DEPTH == 16
-# define BPP 2
-# define PIXEL_TYPE uint16_t
-#elif DEPTH == 32
+#if DEPTH == 32
# define BPP 4
# define PIXEL_TYPE uint32_t
#else
@@ -152,7 +146,7 @@ static void glue(draw_line12_, DEPTH)(void *opaque,
static void glue(draw_line16_, DEPTH)(void *opaque,
uint8_t *d, const uint8_t *s, int width, int deststep)
{
-#if DEPTH == 16 && defined(HOST_WORDS_BIGENDIAN) == defined(TARGET_WORDS_BIGENDIAN)
+#if defined(HOST_WORDS_BIGENDIAN) == defined(TARGET_WORDS_BIGENDIAN)
memcpy(d, s, width * 2);
#else
uint16_t v;
diff --git a/hw/display/omap_lcdc.c b/hw/display/omap_lcdc.c
index ce1058bf85..07a5effe04 100644
--- a/hw/display/omap_lcdc.c
+++ b/hw/display/omap_lcdc.c
@@ -71,47 +71,9 @@ static void omap_lcd_interrupts(struct omap_lcd_panel_s *s)
#define draw_line_func drawfn
-#define DEPTH 8
-#include "omap_lcd_template.h"
-#define DEPTH 15
-#include "omap_lcd_template.h"
-#define DEPTH 16
-#include "omap_lcd_template.h"
#define DEPTH 32
#include "omap_lcd_template.h"
-static draw_line_func draw_line_table2[33] = {
- [0 ... 32] = NULL,
- [8] = draw_line2_8,
- [15] = draw_line2_15,
- [16] = draw_line2_16,
- [32] = draw_line2_32,
-}, draw_line_table4[33] = {
- [0 ... 32] = NULL,
- [8] = draw_line4_8,
- [15] = draw_line4_15,
- [16] = draw_line4_16,
- [32] = draw_line4_32,
-}, draw_line_table8[33] = {
- [0 ... 32] = NULL,
- [8] = draw_line8_8,
- [15] = draw_line8_15,
- [16] = draw_line8_16,
- [32] = draw_line8_32,
-}, draw_line_table12[33] = {
- [0 ... 32] = NULL,
- [8] = draw_line12_8,
- [15] = draw_line12_15,
- [16] = draw_line12_16,
- [32] = draw_line12_32,
-}, draw_line_table16[33] = {
- [0 ... 32] = NULL,
- [8] = draw_line16_8,
- [15] = draw_line16_15,
- [16] = draw_line16_16,
- [32] = draw_line16_32,
-};
-
static void omap_update_display(void *opaque)
{
struct omap_lcd_panel_s *omap_lcd = (struct omap_lcd_panel_s *) opaque;
@@ -143,25 +105,25 @@ static void omap_update_display(void *opaque)
/* Colour depth */
switch ((omap_lcd->palette[0] >> 12) & 7) {
case 1:
- draw_line = draw_line_table2[surface_bits_per_pixel(surface)];
+ draw_line = draw_line2_32;
bpp = 2;
break;
case 2:
- draw_line = draw_line_table4[surface_bits_per_pixel(surface)];
+ draw_line = draw_line4_32;
bpp = 4;
break;
case 3:
- draw_line = draw_line_table8[surface_bits_per_pixel(surface)];
+ draw_line = draw_line8_32;
bpp = 8;
break;
case 4 ... 7:
if (!omap_lcd->tft)
- draw_line = draw_line_table12[surface_bits_per_pixel(surface)];
+ draw_line = draw_line12_32;
else
- draw_line = draw_line_table16[surface_bits_per_pixel(surface)];
+ draw_line = draw_line16_32;
bpp = 16;
break;
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 64770034ff..279f0d7d05 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -2427,26 +2427,6 @@ build_tpm2(GArray *table_data, GArray *linker)
(void *)tpm2_ptr, "TPM2", sizeof(*tpm2_ptr), 4, NULL, NULL);
}
-typedef enum {
- MEM_AFFINITY_NOFLAGS = 0,
- MEM_AFFINITY_ENABLED = (1 << 0),
- MEM_AFFINITY_HOTPLUGGABLE = (1 << 1),
- MEM_AFFINITY_NON_VOLATILE = (1 << 2),
-} MemoryAffinityFlags;
-
-static void
-acpi_build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base,
- uint64_t len, int node, MemoryAffinityFlags flags)
-{
- numamem->type = ACPI_SRAT_MEMORY;
- numamem->length = sizeof(*numamem);
- memset(numamem->proximity, 0, 4);
- numamem->proximity[0] = node;
- numamem->flags = cpu_to_le32(flags);
- numamem->base_addr = cpu_to_le64(base);
- numamem->range_length = cpu_to_le64(len);
-}
-
static void
build_srat(GArray *table_data, GArray *linker, MachineState *machine)
{
@@ -2474,7 +2454,7 @@ build_srat(GArray *table_data, GArray *linker, MachineState *machine)
int apic_id = apic_ids->cpus[i].arch_id;
core = acpi_data_push(table_data, sizeof *core);
- core->type = ACPI_SRAT_PROCESSOR;
+ core->type = ACPI_SRAT_PROCESSOR_APIC;
core->length = sizeof(*core);
core->local_apic_id = apic_id;
curnode = pcms->node_cpu[apic_id];
@@ -2492,7 +2472,7 @@ build_srat(GArray *table_data, GArray *linker, MachineState *machine)
numa_start = table_data->len;
numamem = acpi_data_push(table_data, sizeof *numamem);
- acpi_build_srat_memory(numamem, 0, 640*1024, 0, MEM_AFFINITY_ENABLED);
+ build_srat_memory(numamem, 0, 640 * 1024, 0, MEM_AFFINITY_ENABLED);
next_base = 1024 * 1024;
for (i = 1; i < pcms->numa_nodes + 1; ++i) {
mem_base = next_base;
@@ -2508,21 +2488,21 @@ build_srat(GArray *table_data, GArray *linker, MachineState *machine)
mem_len -= next_base - pcms->below_4g_mem_size;
if (mem_len > 0) {
numamem = acpi_data_push(table_data, sizeof *numamem);
- acpi_build_srat_memory(numamem, mem_base, mem_len, i - 1,
- MEM_AFFINITY_ENABLED);
+ build_srat_memory(numamem, mem_base, mem_len, i - 1,
+ MEM_AFFINITY_ENABLED);
}
mem_base = 1ULL << 32;
mem_len = next_base - pcms->below_4g_mem_size;
next_base += (1ULL << 32) - pcms->below_4g_mem_size;
}
numamem = acpi_data_push(table_data, sizeof *numamem);
- acpi_build_srat_memory(numamem, mem_base, mem_len, i - 1,
- MEM_AFFINITY_ENABLED);
+ build_srat_memory(numamem, mem_base, mem_len, i - 1,
+ MEM_AFFINITY_ENABLED);
}
slots = (table_data->len - numa_start) / sizeof *numamem;
for (; slots < pcms->numa_nodes + 2; slots++) {
numamem = acpi_data_push(table_data, sizeof *numamem);
- acpi_build_srat_memory(numamem, 0, 0, 0, MEM_AFFINITY_NOFLAGS);
+ build_srat_memory(numamem, 0, 0, 0, MEM_AFFINITY_NOFLAGS);
}
/*
@@ -2532,10 +2512,9 @@ build_srat(GArray *table_data, GArray *linker, MachineState *machine)
*/
if (hotplugabble_address_space_size) {
numamem = acpi_data_push(table_data, sizeof *numamem);
- acpi_build_srat_memory(numamem, pcms->hotplug_memory.base,
- hotplugabble_address_space_size, 0,
- MEM_AFFINITY_HOTPLUGGABLE |
- MEM_AFFINITY_ENABLED);
+ build_srat_memory(numamem, pcms->hotplug_memory.base,
+ hotplugabble_address_space_size, 0,
+ MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
}
build_header(linker, table_data,
diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c
index c69f374049..f14445d4fb 100644
--- a/hw/i386/kvmvapic.c
+++ b/hw/i386/kvmvapic.c
@@ -397,7 +397,7 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
uint32_t imm32;
target_ulong current_pc = 0;
target_ulong current_cs_base = 0;
- int current_flags = 0;
+ uint32_t current_flags = 0;
if (smp_cpus == 1) {
handlers = &s->rom_state.up;
@@ -446,7 +446,6 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
resume_all_vcpus();
if (!kvm_enabled()) {
- cs->current_tb = NULL;
tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1);
cpu_resume_from_signal(cs, NULL);
}
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
index 2bb606c1c5..95056d92e7 100644
--- a/hw/ide/atapi.c
+++ b/hw/ide/atapi.c
@@ -28,6 +28,9 @@
#include "hw/scsi/scsi.h"
#include "sysemu/block-backend.h"
+#define ATAPI_SECTOR_BITS (2 + BDRV_SECTOR_BITS)
+#define ATAPI_SECTOR_SIZE (1 << ATAPI_SECTOR_BITS)
+
static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret);
static void padstr8(uint8_t *buf, int buf_size, const char *src)
@@ -111,7 +114,7 @@ cd_read_sector_sync(IDEState *s)
{
int ret;
block_acct_start(blk_get_stats(s->blk), &s->acct,
- 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
+ ATAPI_SECTOR_SIZE, BLOCK_ACCT_READ);
#ifdef DEBUG_IDE_ATAPI
printf("cd_read_sector_sync: lba=%d\n", s->lba);
@@ -119,12 +122,12 @@ cd_read_sector_sync(IDEState *s)
switch (s->cd_sector_size) {
case 2048:
- ret = blk_read(s->blk, (int64_t)s->lba << 2,
- s->io_buffer, 4);
+ ret = blk_pread(s->blk, (int64_t)s->lba << ATAPI_SECTOR_BITS,
+ s->io_buffer, ATAPI_SECTOR_SIZE);
break;
case 2352:
- ret = blk_read(s->blk, (int64_t)s->lba << 2,
- s->io_buffer + 16, 4);
+ ret = blk_pread(s->blk, (int64_t)s->lba << ATAPI_SECTOR_BITS,
+ s->io_buffer + 16, ATAPI_SECTOR_SIZE);
if (ret >= 0) {
cd_data_to_raw(s->io_buffer, s->lba);
}
@@ -182,7 +185,7 @@ static int cd_read_sector(IDEState *s)
s->iov.iov_base = (s->cd_sector_size == 2352) ?
s->io_buffer + 16 : s->io_buffer;
- s->iov.iov_len = 4 * BDRV_SECTOR_SIZE;
+ s->iov.iov_len = ATAPI_SECTOR_SIZE;
qemu_iovec_init_external(&s->qiov, &s->iov, 1);
#ifdef DEBUG_IDE_ATAPI
@@ -190,7 +193,7 @@ static int cd_read_sector(IDEState *s)
#endif
block_acct_start(blk_get_stats(s->blk), &s->acct,
- 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
+ ATAPI_SECTOR_SIZE, BLOCK_ACCT_READ);
ide_buffered_readv(s, (int64_t)s->lba << 2, &s->qiov, 4,
cd_read_sector_cb, s);
@@ -435,7 +438,7 @@ static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret)
#endif
s->bus->dma->iov.iov_base = (void *)(s->io_buffer + data_offset);
- s->bus->dma->iov.iov_len = n * 4 * 512;
+ s->bus->dma->iov.iov_len = n * ATAPI_SECTOR_SIZE;
qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1);
s->bus->dma->aiocb = ide_buffered_readv(s, (int64_t)s->lba << 2,
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 41e6a2dc45..fe2bfba489 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -442,7 +442,7 @@ static void ide_issue_trim_cb(void *opaque, int ret)
}
BlockAIOCB *ide_issue_trim(BlockBackend *blk,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
+ int64_t offset, QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
{
TrimAIOCB *iocb;
@@ -616,8 +616,8 @@ BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
req->iov.iov_len = iov->size;
qemu_iovec_init_external(&req->qiov, &req->iov, 1);
- aioreq = blk_aio_readv(s->blk, sector_num, &req->qiov, nb_sectors,
- ide_buffered_readv_cb, req);
+ aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
+ &req->qiov, 0, ide_buffered_readv_cb, req);
QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
return aioreq;
@@ -1006,8 +1006,8 @@ static void ide_sector_write(IDEState *s)
block_acct_start(blk_get_stats(s->blk), &s->acct,
n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
- s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n,
- ide_sector_write_cb, s);
+ s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
+ &s->qiov, 0, ide_sector_write_cb, s);
}
static void ide_flush_cb(void *opaque, int ret)
diff --git a/hw/ide/internal.h b/hw/ide/internal.h
index d2c458f579..ceb9e5994a 100644
--- a/hw/ide/internal.h
+++ b/hw/ide/internal.h
@@ -614,7 +614,7 @@ void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
void ide_transfer_stop(IDEState *s);
void ide_set_inactive(IDEState *s, bool more);
BlockAIOCB *ide_issue_trim(BlockBackend *blk,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
+ int64_t offset, QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
QEMUIOVector *iov, int nb_sectors,
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index 76256eb8a8..d7d9c0ff3a 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -55,8 +55,8 @@ static const int debug_macio = 0;
/*
* Unaligned DMA read/write access functions required for OS X/Darwin which
* don't perform DMA transactions on sector boundaries. These functions are
- * modelled on bdrv_co_do_preadv()/bdrv_co_do_pwritev() and so should be
- * easy to remove if the unaligned block APIs are ever exposed.
+ * modelled on bdrv_co_preadv()/bdrv_co_pwritev() and so should be easy to
+ * remove if the unaligned block APIs are ever exposed.
*/
static void pmac_dma_read(BlockBackend *blk,
@@ -120,8 +120,7 @@ static void pmac_dma_read(BlockBackend *blk,
MACIO_DPRINTF("--- Block read transfer - sector_num: %" PRIx64 " "
"nsector: %x\n", (offset >> 9), (bytes >> 9));
- s->bus->dma->aiocb = blk_aio_readv(blk, (offset >> 9), &io->iov,
- (bytes >> 9), cb, io);
+ s->bus->dma->aiocb = blk_aio_preadv(blk, offset, &io->iov, 0, cb, io);
}
static void pmac_dma_write(BlockBackend *blk,
@@ -205,8 +204,7 @@ static void pmac_dma_write(BlockBackend *blk,
MACIO_DPRINTF("--- Block write transfer - sector_num: %" PRIx64 " "
"nsector: %x\n", (offset >> 9), (bytes >> 9));
- s->bus->dma->aiocb = blk_aio_writev(blk, (offset >> 9), &io->iov,
- (bytes >> 9), cb, io);
+ s->bus->dma->aiocb = blk_aio_pwritev(blk, offset, &io->iov, 0, cb, io);
}
static void pmac_dma_trim(BlockBackend *blk,
@@ -232,8 +230,7 @@ static void pmac_dma_trim(BlockBackend *blk,
s->io_buffer_index += io->len;
io->len = 0;
- s->bus->dma->aiocb = ide_issue_trim(blk, (offset >> 9), &io->iov,
- (bytes >> 9), cb, io);
+ s->bus->dma->aiocb = ide_issue_trim(blk, offset, &io->iov, 0, cb, io);
}
static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
diff --git a/hw/intc/etraxfs_pic.c b/hw/intc/etraxfs_pic.c
index 48f9477065..64a6f4b4ba 100644
--- a/hw/intc/etraxfs_pic.c
+++ b/hw/intc/etraxfs_pic.c
@@ -146,19 +146,19 @@ static void irq_handler(void *opaque, int irq, int level)
pic_update(fs);
}
-static int etraxfs_pic_init(SysBusDevice *sbd)
+static void etraxfs_pic_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- struct etrax_pic *s = ETRAX_FS_PIC(dev);
+ DeviceState *dev = DEVICE(obj);
+ struct etrax_pic *s = ETRAX_FS_PIC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
qdev_init_gpio_in(dev, irq_handler, 32);
sysbus_init_irq(sbd, &s->parent_irq);
sysbus_init_irq(sbd, &s->parent_nmi);
- memory_region_init_io(&s->mmio, OBJECT(s), &pic_ops, s,
+ memory_region_init_io(&s->mmio, obj, &pic_ops, s,
"etraxfs-pic", R_MAX * 4);
sysbus_init_mmio(sbd, &s->mmio);
- return 0;
}
static Property etraxfs_pic_properties[] = {
@@ -169,9 +169,7 @@ static Property etraxfs_pic_properties[] = {
static void etraxfs_pic_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = etraxfs_pic_init;
dc->props = etraxfs_pic_properties;
/*
* Note: pointer property "interrupt_vector" may remain null, thus
@@ -183,6 +181,7 @@ static const TypeInfo etraxfs_pic_info = {
.name = TYPE_ETRAX_FS_PIC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(struct etrax_pic),
+ .instance_init = etraxfs_pic_init,
.class_init = etraxfs_pic_class_init,
};
diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c
index dc0c903266..f19a7062be 100644
--- a/hw/intc/exynos4210_combiner.c
+++ b/hw/intc/exynos4210_combiner.c
@@ -406,10 +406,11 @@ static const MemoryRegionOps exynos4210_combiner_ops = {
/*
* Internal Combiner initialization.
*/
-static int exynos4210_combiner_init(SysBusDevice *sbd)
+static void exynos4210_combiner_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- Exynos4210CombinerState *s = EXYNOS4210_COMBINER(dev);
+ DeviceState *dev = DEVICE(obj);
+ Exynos4210CombinerState *s = EXYNOS4210_COMBINER(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
unsigned int i;
/* Allocate general purpose input signals and connect a handler to each of
@@ -421,11 +422,9 @@ static int exynos4210_combiner_init(SysBusDevice *sbd)
sysbus_init_irq(sbd, &s->output_irq[i]);
}
- memory_region_init_io(&s->iomem, OBJECT(s), &exynos4210_combiner_ops, s,
+ memory_region_init_io(&s->iomem, obj, &exynos4210_combiner_ops, s,
"exynos4210-combiner", IIC_REGION_SIZE);
sysbus_init_mmio(sbd, &s->iomem);
-
- return 0;
}
static Property exynos4210_combiner_properties[] = {
@@ -436,9 +435,7 @@ static Property exynos4210_combiner_properties[] = {
static void exynos4210_combiner_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = exynos4210_combiner_init;
dc->reset = exynos4210_combiner_reset;
dc->props = exynos4210_combiner_properties;
dc->vmsd = &vmstate_exynos4210_combiner;
@@ -448,6 +445,7 @@ static const TypeInfo exynos4210_combiner_info = {
.name = TYPE_EXYNOS4210_COMBINER,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(Exynos4210CombinerState),
+ .instance_init = exynos4210_combiner_init,
.class_init = exynos4210_combiner_class_init,
};
diff --git a/hw/intc/exynos4210_gic.c b/hw/intc/exynos4210_gic.c
index 4f7e89f7b8..fd7a8f3058 100644
--- a/hw/intc/exynos4210_gic.c
+++ b/hw/intc/exynos4210_gic.c
@@ -281,10 +281,11 @@ static void exynos4210_gic_set_irq(void *opaque, int irq, int level)
qemu_set_irq(qdev_get_gpio_in(s->gic, irq), level);
}
-static int exynos4210_gic_init(SysBusDevice *sbd)
+static void exynos4210_gic_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- Exynos4210GicState *s = EXYNOS4210_GIC(dev);
+ DeviceState *dev = DEVICE(obj);
+ Exynos4210GicState *s = EXYNOS4210_GIC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
uint32_t i;
const char cpu_prefix[] = "exynos4210-gic-alias_cpu";
const char dist_prefix[] = "exynos4210-gic-alias_dist";
@@ -305,15 +306,15 @@ static int exynos4210_gic_init(SysBusDevice *sbd)
qdev_init_gpio_in(dev, exynos4210_gic_set_irq,
EXYNOS4210_GIC_NIRQ - 32);
- memory_region_init(&s->cpu_container, OBJECT(s), "exynos4210-cpu-container",
+ memory_region_init(&s->cpu_container, obj, "exynos4210-cpu-container",
EXYNOS4210_EXT_GIC_CPU_REGION_SIZE);
- memory_region_init(&s->dist_container, OBJECT(s), "exynos4210-dist-container",
+ memory_region_init(&s->dist_container, obj, "exynos4210-dist-container",
EXYNOS4210_EXT_GIC_DIST_REGION_SIZE);
for (i = 0; i < s->num_cpu; i++) {
/* Map CPU interface per SMP Core */
sprintf(cpu_alias_name, "%s%x", cpu_prefix, i);
- memory_region_init_alias(&s->cpu_alias[i], OBJECT(s),
+ memory_region_init_alias(&s->cpu_alias[i], obj,
cpu_alias_name,
sysbus_mmio_get_region(busdev, 1),
0,
@@ -323,7 +324,7 @@ static int exynos4210_gic_init(SysBusDevice *sbd)
/* Map Distributor per SMP Core */
sprintf(dist_alias_name, "%s%x", dist_prefix, i);
- memory_region_init_alias(&s->dist_alias[i], OBJECT(s),
+ memory_region_init_alias(&s->dist_alias[i], obj,
dist_alias_name,
sysbus_mmio_get_region(busdev, 0),
0,
@@ -334,8 +335,6 @@ static int exynos4210_gic_init(SysBusDevice *sbd)
sysbus_init_mmio(sbd, &s->cpu_container);
sysbus_init_mmio(sbd, &s->dist_container);
-
- return 0;
}
static Property exynos4210_gic_properties[] = {
@@ -346,9 +345,7 @@ static Property exynos4210_gic_properties[] = {
static void exynos4210_gic_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = exynos4210_gic_init;
dc->props = exynos4210_gic_properties;
}
@@ -356,6 +353,7 @@ static const TypeInfo exynos4210_gic_info = {
.name = TYPE_EXYNOS4210_GIC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(Exynos4210GicState),
+ .instance_init = exynos4210_gic_init,
.class_init = exynos4210_gic_class_init,
};
@@ -430,9 +428,16 @@ static void exynos4210_irq_gate_reset(DeviceState *d)
/*
* IRQ Gate initialization.
*/
-static int exynos4210_irq_gate_init(SysBusDevice *sbd)
+static void exynos4210_irq_gate_init(Object *obj)
+{
+ Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ sysbus_init_irq(sbd, &s->out);
+}
+
+static void exynos4210_irq_gate_realize(DeviceState *dev, Error **errp)
{
- DeviceState *dev = DEVICE(sbd);
Exynos4210IRQGateState *s = EXYNOS4210_IRQ_GATE(dev);
/* Allocate general purpose input signals and connect a handler to each of
@@ -440,27 +445,23 @@ static int exynos4210_irq_gate_init(SysBusDevice *sbd)
qdev_init_gpio_in(dev, exynos4210_irq_gate_handler, s->n_in);
s->level = g_malloc0(s->n_in * sizeof(*s->level));
-
- sysbus_init_irq(sbd, &s->out);
-
- return 0;
}
static void exynos4210_irq_gate_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = exynos4210_irq_gate_init;
dc->reset = exynos4210_irq_gate_reset;
dc->vmsd = &vmstate_exynos4210_irq_gate;
dc->props = exynos4210_irq_gate_properties;
+ dc->realize = exynos4210_irq_gate_realize;
}
static const TypeInfo exynos4210_irq_gate_info = {
.name = TYPE_EXYNOS4210_IRQ_GATE,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(Exynos4210IRQGateState),
+ .instance_init = exynos4210_irq_gate_init,
.class_init = exynos4210_irq_gate_class_init,
};
diff --git a/hw/intc/grlib_irqmp.c b/hw/intc/grlib_irqmp.c
index f5ca8f752b..ac7e63f38b 100644
--- a/hw/intc/grlib_irqmp.c
+++ b/hw/intc/grlib_irqmp.c
@@ -31,6 +31,7 @@
#include "hw/sparc/grlib.h"
#include "trace.h"
+#include "qapi/error.h"
#define IRQMP_MAX_CPU 16
#define IRQMP_REG_SIZE 256 /* Size of memory mapped registers */
@@ -323,23 +324,27 @@ static void grlib_irqmp_reset(DeviceState *d)
irqmp->state->parent = irqmp;
}
-static int grlib_irqmp_init(SysBusDevice *dev)
+static void grlib_irqmp_init(Object *obj)
{
- IRQMP *irqmp = GRLIB_IRQMP(dev);
-
- /* Check parameters */
- if (irqmp->set_pil_in == NULL) {
- return -1;
- }
+ IRQMP *irqmp = GRLIB_IRQMP(obj);
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
- memory_region_init_io(&irqmp->iomem, OBJECT(dev), &grlib_irqmp_ops, irqmp,
+ memory_region_init_io(&irqmp->iomem, obj, &grlib_irqmp_ops, irqmp,
"irqmp", IRQMP_REG_SIZE);
irqmp->state = g_malloc0(sizeof *irqmp->state);
sysbus_init_mmio(dev, &irqmp->iomem);
+}
- return 0;
+static void grlib_irqmp_realize(DeviceState *dev, Error **errp)
+{
+ IRQMP *irqmp = GRLIB_IRQMP(dev);
+
+ /* Check parameters */
+ if (irqmp->set_pil_in == NULL) {
+ error_setg(errp, "set_pil_in cannot be NULL.");
+ }
}
static Property grlib_irqmp_properties[] = {
@@ -351,19 +356,19 @@ static Property grlib_irqmp_properties[] = {
static void grlib_irqmp_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = grlib_irqmp_init;
dc->reset = grlib_irqmp_reset;
dc->props = grlib_irqmp_properties;
/* Reason: pointer properties "set_pil_in", "set_pil_in_opaque" */
dc->cannot_instantiate_with_device_add_yet = true;
+ dc->realize = grlib_irqmp_realize;
}
static const TypeInfo grlib_irqmp_info = {
.name = TYPE_GRLIB_IRQMP,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(IRQMP),
+ .instance_init = grlib_irqmp_init,
.class_init = grlib_irqmp_class_init,
};
diff --git a/hw/intc/imx_avic.c b/hw/intc/imx_avic.c
index 7027655774..d21cb97451 100644
--- a/hw/intc/imx_avic.c
+++ b/hw/intc/imx_avic.c
@@ -321,28 +321,26 @@ static void imx_avic_reset(DeviceState *dev)
memset(s->prio, 0, sizeof s->prio);
}
-static int imx_avic_init(SysBusDevice *sbd)
+static void imx_avic_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- IMXAVICState *s = IMX_AVIC(dev);
+ DeviceState *dev = DEVICE(obj);
+ IMXAVICState *s = IMX_AVIC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- memory_region_init_io(&s->iomem, OBJECT(s), &imx_avic_ops, s,
+ memory_region_init_io(&s->iomem, obj, &imx_avic_ops, s,
TYPE_IMX_AVIC, 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
qdev_init_gpio_in(dev, imx_avic_set_irq, IMX_AVIC_NUM_IRQS);
sysbus_init_irq(sbd, &s->irq);
sysbus_init_irq(sbd, &s->fiq);
-
- return 0;
}
static void imx_avic_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = imx_avic_init;
+
dc->vmsd = &vmstate_imx_avic;
dc->reset = imx_avic_reset;
dc->desc = "i.MX Advanced Vector Interrupt Controller";
@@ -352,6 +350,7 @@ static const TypeInfo imx_avic_info = {
.name = TYPE_IMX_AVIC,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(IMXAVICState),
+ .instance_init = imx_avic_init,
.class_init = imx_avic_class_init,
};
diff --git a/hw/intc/omap_intc.c b/hw/intc/omap_intc.c
index 336882510b..877be67971 100644
--- a/hw/intc/omap_intc.c
+++ b/hw/intc/omap_intc.c
@@ -22,6 +22,7 @@
#include "hw/arm/omap.h"
#include "hw/sysbus.h"
#include "qemu/error-report.h"
+#include "qapi/error.h"
/* Interrupt Handlers */
struct omap_intr_handler_bank_s {
@@ -363,23 +364,28 @@ static void omap_inth_reset(DeviceState *dev)
qemu_set_irq(s->parent_intr[1], 0);
}
-static int omap_intc_init(SysBusDevice *sbd)
+static void omap_intc_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- struct omap_intr_handler_s *s = OMAP_INTC(dev);
+ DeviceState *dev = DEVICE(obj);
+ struct omap_intr_handler_s *s = OMAP_INTC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- if (!s->iclk) {
- error_report("omap-intc: clk not connected");
- return -1;
- }
s->nbanks = 1;
sysbus_init_irq(sbd, &s->parent_intr[0]);
sysbus_init_irq(sbd, &s->parent_intr[1]);
qdev_init_gpio_in(dev, omap_set_intr, s->nbanks * 32);
- memory_region_init_io(&s->mmio, OBJECT(s), &omap_inth_mem_ops, s,
+ memory_region_init_io(&s->mmio, obj, &omap_inth_mem_ops, s,
"omap-intc", s->size);
sysbus_init_mmio(sbd, &s->mmio);
- return 0;
+}
+
+static void omap_intc_realize(DeviceState *dev, Error **errp)
+{
+ struct omap_intr_handler_s *s = OMAP_INTC(dev);
+
+ if (!s->iclk) {
+ error_setg(errp, "omap-intc: clk not connected");
+ }
}
static Property omap_intc_properties[] = {
@@ -391,18 +397,18 @@ static Property omap_intc_properties[] = {
static void omap_intc_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = omap_intc_init;
dc->reset = omap_inth_reset;
dc->props = omap_intc_properties;
/* Reason: pointer property "clk" */
dc->cannot_instantiate_with_device_add_yet = true;
+ dc->realize = omap_intc_realize;
}
static const TypeInfo omap_intc_info = {
.name = "omap-intc",
.parent = TYPE_OMAP_INTC,
+ .instance_init = omap_intc_init,
.class_init = omap_intc_class_init,
};
@@ -605,28 +611,34 @@ static const MemoryRegionOps omap2_inth_mem_ops = {
},
};
-static int omap2_intc_init(SysBusDevice *sbd)
+static void omap2_intc_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- struct omap_intr_handler_s *s = OMAP_INTC(dev);
+ DeviceState *dev = DEVICE(obj);
+ struct omap_intr_handler_s *s = OMAP_INTC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- if (!s->iclk) {
- error_report("omap2-intc: iclk not connected");
- return -1;
- }
- if (!s->fclk) {
- error_report("omap2-intc: fclk not connected");
- return -1;
- }
s->level_only = 1;
s->nbanks = 3;
sysbus_init_irq(sbd, &s->parent_intr[0]);
sysbus_init_irq(sbd, &s->parent_intr[1]);
qdev_init_gpio_in(dev, omap_set_intr_noedge, s->nbanks * 32);
- memory_region_init_io(&s->mmio, OBJECT(s), &omap2_inth_mem_ops, s,
+ memory_region_init_io(&s->mmio, obj, &omap2_inth_mem_ops, s,
"omap2-intc", 0x1000);
sysbus_init_mmio(sbd, &s->mmio);
- return 0;
+}
+
+static void omap2_intc_realize(DeviceState *dev, Error **errp)
+{
+ struct omap_intr_handler_s *s = OMAP_INTC(dev);
+
+ if (!s->iclk) {
+ error_setg(errp, "omap2-intc: iclk not connected");
+ return;
+ }
+ if (!s->fclk) {
+ error_setg(errp, "omap2-intc: fclk not connected");
+ return;
+ }
}
static Property omap2_intc_properties[] = {
@@ -640,18 +652,18 @@ static Property omap2_intc_properties[] = {
static void omap2_intc_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = omap2_intc_init;
dc->reset = omap_inth_reset;
dc->props = omap2_intc_properties;
/* Reason: pointer property "iclk", "fclk" */
dc->cannot_instantiate_with_device_add_yet = true;
+ dc->realize = omap2_intc_realize;
}
static const TypeInfo omap2_intc_info = {
.name = "omap2-intc",
.parent = TYPE_OMAP_INTC,
+ .instance_init = omap2_intc_init,
.class_init = omap2_intc_class_init,
};
diff --git a/hw/intc/pl190.c b/hw/intc/pl190.c
index 5ecbc4a485..1e50baf237 100644
--- a/hw/intc/pl190.c
+++ b/hw/intc/pl190.c
@@ -236,17 +236,17 @@ static void pl190_reset(DeviceState *d)
pl190_update_vectors(s);
}
-static int pl190_init(SysBusDevice *sbd)
+static void pl190_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- PL190State *s = PL190(dev);
+ DeviceState *dev = DEVICE(obj);
+ PL190State *s = PL190(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- memory_region_init_io(&s->iomem, OBJECT(s), &pl190_ops, s, "pl190", 0x1000);
+ memory_region_init_io(&s->iomem, obj, &pl190_ops, s, "pl190", 0x1000);
sysbus_init_mmio(sbd, &s->iomem);
qdev_init_gpio_in(dev, pl190_set_irq, 32);
sysbus_init_irq(sbd, &s->irq);
sysbus_init_irq(sbd, &s->fiq);
- return 0;
}
static const VMStateDescription vmstate_pl190 = {
@@ -271,9 +271,7 @@ static const VMStateDescription vmstate_pl190 = {
static void pl190_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = pl190_init;
dc->reset = pl190_reset;
dc->vmsd = &vmstate_pl190;
}
@@ -282,6 +280,7 @@ static const TypeInfo pl190_info = {
.name = TYPE_PL190,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PL190State),
+ .instance_init = pl190_init,
.class_init = pl190_class_init,
};
diff --git a/hw/intc/slavio_intctl.c b/hw/intc/slavio_intctl.c
index c9486ed999..e82e893628 100644
--- a/hw/intc/slavio_intctl.c
+++ b/hw/intc/slavio_intctl.c
@@ -418,15 +418,16 @@ static void slavio_intctl_reset(DeviceState *d)
slavio_check_interrupts(s, 0);
}
-static int slavio_intctl_init1(SysBusDevice *sbd)
+static void slavio_intctl_init(Object *obj)
{
- DeviceState *dev = DEVICE(sbd);
- SLAVIO_INTCTLState *s = SLAVIO_INTCTL(dev);
+ DeviceState *dev = DEVICE(obj);
+ SLAVIO_INTCTLState *s = SLAVIO_INTCTL(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
unsigned int i, j;
char slave_name[45];
qdev_init_gpio_in(dev, slavio_set_irq_all, 32 + MAX_CPUS);
- memory_region_init_io(&s->iomem, OBJECT(s), &slavio_intctlm_mem_ops, s,
+ memory_region_init_io(&s->iomem, obj, &slavio_intctlm_mem_ops, s,
"master-interrupt-controller", INTCTLM_SIZE);
sysbus_init_mmio(sbd, &s->iomem);
@@ -443,16 +444,12 @@ static int slavio_intctl_init1(SysBusDevice *sbd)
s->slaves[i].cpu = i;
s->slaves[i].master = s;
}
-
- return 0;
}
static void slavio_intctl_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = slavio_intctl_init1;
dc->reset = slavio_intctl_reset;
dc->vmsd = &vmstate_intctl;
}
@@ -461,6 +458,7 @@ static const TypeInfo slavio_intctl_info = {
.name = TYPE_SLAVIO_INTCTL,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(SLAVIO_INTCTLState),
+ .instance_init = slavio_intctl_init,
.class_init = slavio_intctl_class_init,
};
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
index 93f952880a..bc0dd2cc75 100644
--- a/hw/misc/Makefile.objs
+++ b/hw/misc/Makefile.objs
@@ -29,6 +29,7 @@ obj-$(CONFIG_IMX) += imx_ccm.o
obj-$(CONFIG_IMX) += imx31_ccm.o
obj-$(CONFIG_IMX) += imx25_ccm.o
obj-$(CONFIG_IMX) += imx6_ccm.o
+obj-$(CONFIG_IMX) += imx6_src.o
obj-$(CONFIG_MILKYMIST) += milkymist-hpdmc.o
obj-$(CONFIG_MILKYMIST) += milkymist-pfpu.o
obj-$(CONFIG_MAINSTONE) += mst_fpga.o
diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c
index 530411f841..34473469d4 100644
--- a/hw/misc/bcm2835_property.c
+++ b/hw/misc/bcm2835_property.c
@@ -21,6 +21,8 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
int n;
uint32_t offset, length, color;
uint32_t xres, yres, xoffset, yoffset, bpp, pixo, alpha;
+ uint32_t tmp_xres, tmp_yres, tmp_xoffset, tmp_yoffset;
+ uint32_t tmp_bpp, tmp_pixo, tmp_alpha;
uint32_t *newxres = NULL, *newyres = NULL, *newxoffset = NULL,
*newyoffset = NULL, *newbpp = NULL, *newpixo = NULL, *newalpha = NULL;
@@ -139,7 +141,11 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
case 0x00040001: /* Allocate buffer */
stl_le_phys(&s->dma_as, value + 12, s->fbdev->base);
- stl_le_phys(&s->dma_as, value + 16, s->fbdev->size);
+ tmp_xres = newxres != NULL ? *newxres : s->fbdev->xres;
+ tmp_yres = newyres != NULL ? *newyres : s->fbdev->yres;
+ tmp_bpp = newbpp != NULL ? *newbpp : s->fbdev->bpp;
+ stl_le_phys(&s->dma_as, value + 16,
+ tmp_xres * tmp_yres * tmp_bpp / 8);
resplen = 8;
break;
case 0x00048001: /* Release buffer */
@@ -150,8 +156,10 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
break;
case 0x00040003: /* Get display width/height */
case 0x00040004:
- stl_le_phys(&s->dma_as, value + 12, s->fbdev->xres);
- stl_le_phys(&s->dma_as, value + 16, s->fbdev->yres);
+ tmp_xres = newxres != NULL ? *newxres : s->fbdev->xres;
+ tmp_yres = newyres != NULL ? *newyres : s->fbdev->yres;
+ stl_le_phys(&s->dma_as, value + 12, tmp_xres);
+ stl_le_phys(&s->dma_as, value + 16, tmp_yres);
resplen = 8;
break;
case 0x00044003: /* Test display width/height */
@@ -167,7 +175,8 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
resplen = 8;
break;
case 0x00040005: /* Get depth */
- stl_le_phys(&s->dma_as, value + 12, s->fbdev->bpp);
+ tmp_bpp = newbpp != NULL ? *newbpp : s->fbdev->bpp;
+ stl_le_phys(&s->dma_as, value + 12, tmp_bpp);
resplen = 4;
break;
case 0x00044005: /* Test depth */
@@ -179,7 +188,8 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
resplen = 4;
break;
case 0x00040006: /* Get pixel order */
- stl_le_phys(&s->dma_as, value + 12, s->fbdev->pixo);
+ tmp_pixo = newpixo != NULL ? *newpixo : s->fbdev->pixo;
+ stl_le_phys(&s->dma_as, value + 12, tmp_pixo);
resplen = 4;
break;
case 0x00044006: /* Test pixel order */
@@ -191,7 +201,8 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
resplen = 4;
break;
case 0x00040007: /* Get alpha */
- stl_le_phys(&s->dma_as, value + 12, s->fbdev->alpha);
+ tmp_alpha = newalpha != NULL ? *newalpha : s->fbdev->alpha;
+ stl_le_phys(&s->dma_as, value + 12, tmp_alpha);
resplen = 4;
break;
case 0x00044007: /* Test pixel alpha */
@@ -203,12 +214,16 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
resplen = 4;
break;
case 0x00040008: /* Get pitch */
- stl_le_phys(&s->dma_as, value + 12, s->fbdev->pitch);
+ tmp_xres = newxres != NULL ? *newxres : s->fbdev->xres;
+ tmp_bpp = newbpp != NULL ? *newbpp : s->fbdev->bpp;
+ stl_le_phys(&s->dma_as, value + 12, tmp_xres * tmp_bpp / 8);
resplen = 4;
break;
case 0x00040009: /* Get virtual offset */
- stl_le_phys(&s->dma_as, value + 12, s->fbdev->xoffset);
- stl_le_phys(&s->dma_as, value + 16, s->fbdev->yoffset);
+ tmp_xoffset = newxoffset != NULL ? *newxoffset : s->fbdev->xoffset;
+ tmp_yoffset = newyoffset != NULL ? *newyoffset : s->fbdev->yoffset;
+ stl_le_phys(&s->dma_as, value + 12, tmp_xoffset);
+ stl_le_phys(&s->dma_as, value + 16, tmp_yoffset);
resplen = 8;
break;
case 0x00044009: /* Test virtual offset */
diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c
new file mode 100644
index 0000000000..6b026b459f
--- /dev/null
+++ b/hw/misc/imx6_src.c
@@ -0,0 +1,264 @@
+/*
+ * IMX6 System Reset Controller
+ *
+ * Copyright (c) 2015 Jean-Christophe Dubois <jcd@tribudubois.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hw/misc/imx6_src.h"
+#include "sysemu/sysemu.h"
+#include "qemu/bitops.h"
+#include "arm-powerctl.h"
+
+#ifndef DEBUG_IMX6_SRC
+#define DEBUG_IMX6_SRC 0
+#endif
+
+#define DPRINTF(fmt, args...) \
+ do { \
+ if (DEBUG_IMX6_SRC) { \
+ fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX6_SRC, \
+ __func__, ##args); \
+ } \
+ } while (0)
+
+static char const *imx6_src_reg_name(uint32_t reg)
+{
+ static char unknown[20];
+
+ switch (reg) {
+ case SRC_SCR:
+ return "SRC_SCR";
+ case SRC_SBMR1:
+ return "SRC_SBMR1";
+ case SRC_SRSR:
+ return "SRC_SRSR";
+ case SRC_SISR:
+ return "SRC_SISR";
+ case SRC_SIMR:
+ return "SRC_SIMR";
+ case SRC_SBMR2:
+ return "SRC_SBMR2";
+ case SRC_GPR1:
+ return "SRC_GPR1";
+ case SRC_GPR2:
+ return "SRC_GPR2";
+ case SRC_GPR3:
+ return "SRC_GPR3";
+ case SRC_GPR4:
+ return "SRC_GPR4";
+ case SRC_GPR5:
+ return "SRC_GPR5";
+ case SRC_GPR6:
+ return "SRC_GPR6";
+ case SRC_GPR7:
+ return "SRC_GPR7";
+ case SRC_GPR8:
+ return "SRC_GPR8";
+ case SRC_GPR9:
+ return "SRC_GPR9";
+ case SRC_GPR10:
+ return "SRC_GPR10";
+ default:
+ sprintf(unknown, "%d ?", reg);
+ return unknown;
+ }
+}
+
+static const VMStateDescription vmstate_imx6_src = {
+ .name = TYPE_IMX6_SRC,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, IMX6SRCState, SRC_MAX),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void imx6_src_reset(DeviceState *dev)
+{
+ IMX6SRCState *s = IMX6_SRC(dev);
+
+ DPRINTF("\n");
+
+ memset(s->regs, 0, sizeof(s->regs));
+
+ /* Set reset values */
+ s->regs[SRC_SCR] = 0x521;
+ s->regs[SRC_SRSR] = 0x1;
+ s->regs[SRC_SIMR] = 0x1F;
+}
+
+static uint64_t imx6_src_read(void *opaque, hwaddr offset, unsigned size)
+{
+ uint32_t value = 0;
+ IMX6SRCState *s = (IMX6SRCState *)opaque;
+ uint32_t index = offset >> 2;
+
+ if (index < SRC_MAX) {
+ value = s->regs[index];
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
+ HWADDR_PRIx "\n", TYPE_IMX6_SRC, __func__, offset);
+
+ }
+
+ DPRINTF("reg[%s] => 0x%" PRIx32 "\n", imx6_src_reg_name(index), value);
+
+ return value;
+}
+
+static void imx6_src_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ IMX6SRCState *s = (IMX6SRCState *)opaque;
+ uint32_t index = offset >> 2;
+ unsigned long change_mask;
+ unsigned long current_value = value;
+
+ if (index >= SRC_MAX) {
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
+ HWADDR_PRIx "\n", TYPE_IMX6_SRC, __func__, offset);
+ return;
+ }
+
+ DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx6_src_reg_name(index),
+ (uint32_t)current_value);
+
+ change_mask = s->regs[index] ^ (uint32_t)current_value;
+
+ switch (index) {
+ case SRC_SCR:
+ /*
+ * On real hardware when the system reset controller starts a
+ * secondary CPU it runs through some boot ROM code which reads
+ * the SRC_GPRX registers controlling the start address and branches
+ * to it.
+ * Here we are taking a short cut and branching directly to the
+ * requested address (we don't want to run the boot ROM code inside
+ * QEMU)
+ */
+ if (EXTRACT(change_mask, CORE3_ENABLE)) {
+ if (EXTRACT(current_value, CORE3_ENABLE)) {
+ /* CORE 3 is brought up */
+ arm_set_cpu_on(3, s->regs[SRC_GPR7], s->regs[SRC_GPR8],
+ 3, false);
+ } else {
+ /* CORE 3 is shut down */
+ arm_set_cpu_off(3);
+ }
+ /* We clear the reset bits as the processor changed state */
+ clear_bit(CORE3_RST_SHIFT, &current_value);
+ clear_bit(CORE3_RST_SHIFT, &change_mask);
+ }
+ if (EXTRACT(change_mask, CORE2_ENABLE)) {
+ if (EXTRACT(current_value, CORE2_ENABLE)) {
+ /* CORE 2 is brought up */
+ arm_set_cpu_on(2, s->regs[SRC_GPR5], s->regs[SRC_GPR6],
+ 3, false);
+ } else {
+ /* CORE 3 is shut down */
+ arm_set_cpu_off(2);
+ }
+ /* We clear the reset bits as the processor changed state */
+ clear_bit(CORE2_RST_SHIFT, &current_value);
+ clear_bit(CORE2_RST_SHIFT, &change_mask);
+ }
+ if (EXTRACT(change_mask, CORE1_ENABLE)) {
+ if (EXTRACT(current_value, CORE1_ENABLE)) {
+ /* CORE 1 is brought up */
+ arm_set_cpu_on(1, s->regs[SRC_GPR3], s->regs[SRC_GPR4],
+ 3, false);
+ } else {
+ /* CORE 3 is shut down */
+ arm_set_cpu_off(1);
+ }
+ /* We clear the reset bits as the processor changed state */
+ clear_bit(CORE1_RST_SHIFT, &current_value);
+ clear_bit(CORE1_RST_SHIFT, &change_mask);
+ }
+ if (EXTRACT(change_mask, CORE0_RST)) {
+ arm_reset_cpu(0);
+ clear_bit(CORE0_RST_SHIFT, &current_value);
+ }
+ if (EXTRACT(change_mask, CORE1_RST)) {
+ arm_reset_cpu(1);
+ clear_bit(CORE1_RST_SHIFT, &current_value);
+ }
+ if (EXTRACT(change_mask, CORE2_RST)) {
+ arm_reset_cpu(2);
+ clear_bit(CORE2_RST_SHIFT, &current_value);
+ }
+ if (EXTRACT(change_mask, CORE3_RST)) {
+ arm_reset_cpu(3);
+ clear_bit(CORE3_RST_SHIFT, &current_value);
+ }
+ if (EXTRACT(change_mask, SW_IPU2_RST)) {
+ /* We pretend the IPU2 is reset */
+ clear_bit(SW_IPU2_RST_SHIFT, &current_value);
+ }
+ if (EXTRACT(change_mask, SW_IPU1_RST)) {
+ /* We pretend the IPU1 is reset */
+ clear_bit(SW_IPU1_RST_SHIFT, &current_value);
+ }
+ s->regs[index] = current_value;
+ break;
+ default:
+ s->regs[index] = current_value;
+ break;
+ }
+}
+
+static const struct MemoryRegionOps imx6_src_ops = {
+ .read = imx6_src_read,
+ .write = imx6_src_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ /*
+ * Our device would not work correctly if the guest was doing
+ * unaligned access. This might not be a limitation on the real
+ * device but in practice there is no reason for a guest to access
+ * this device unaligned.
+ */
+ .min_access_size = 4,
+ .max_access_size = 4,
+ .unaligned = false,
+ },
+};
+
+static void imx6_src_realize(DeviceState *dev, Error **errp)
+{
+ IMX6SRCState *s = IMX6_SRC(dev);
+
+ memory_region_init_io(&s->iomem, OBJECT(dev), &imx6_src_ops, s,
+ TYPE_IMX6_SRC, 0x1000);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
+}
+
+static void imx6_src_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = imx6_src_realize;
+ dc->reset = imx6_src_reset;
+ dc->vmsd = &vmstate_imx6_src;
+ dc->desc = "i.MX6 System Reset Controller";
+}
+
+static const TypeInfo imx6_src_info = {
+ .name = TYPE_IMX6_SRC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IMX6SRCState),
+ .class_init = imx6_src_class_init,
+};
+
+static void imx6_src_register_types(void)
+{
+ type_register_static(&imx6_src_info);
+}
+
+type_init(imx6_src_register_types)
diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c
index 802636ef35..019f25dc58 100644
--- a/hw/nvram/spapr_nvram.c
+++ b/hw/nvram/spapr_nvram.c
@@ -124,7 +124,7 @@ static void rtas_nvram_store(PowerPCCPU *cpu, sPAPRMachineState *spapr,
alen = len;
if (nvram->blk) {
- alen = blk_pwrite(nvram->blk, offset, membuf, len);
+ alen = blk_pwrite(nvram->blk, offset, membuf, len, 0);
}
assert(nvram->buf);
@@ -190,7 +190,7 @@ static int spapr_nvram_post_load(void *opaque, int version_id)
sPAPRNVRAM *nvram = VIO_SPAPR_NVRAM(opaque);
if (nvram->blk) {
- int alen = blk_pwrite(nvram->blk, 0, nvram->buf, nvram->size);
+ int alen = blk_pwrite(nvram->blk, 0, nvram->buf, nvram->size, 0);
if (alen < 0) {
return alen;
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
index 1f5f1d790a..94c875d752 100644
--- a/hw/ppc/spapr_drc.c
+++ b/hw/ppc/spapr_drc.c
@@ -269,11 +269,7 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
void *fdt;
if (!drc->fdt) {
- visit_start_struct(v, name, NULL, 0, &err);
- if (!err) {
- visit_end_struct(v, &err);
- }
- error_propagate(errp, err);
+ visit_type_null(v, NULL, errp);
return;
}
@@ -301,7 +297,8 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
case FDT_END_NODE:
/* shouldn't ever see an FDT_END_NODE before FDT_BEGIN_NODE */
g_assert(fdt_depth > 0);
- visit_end_struct(v, &err);
+ visit_check_struct(v, &err);
+ visit_end_struct(v);
if (err) {
error_propagate(errp, err);
return;
@@ -312,7 +309,7 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
int i;
prop = fdt_get_property_by_offset(fdt, fdt_offset, &prop_len);
name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
- visit_start_list(v, name, &err);
+ visit_start_list(v, name, NULL, 0, &err);
if (err) {
error_propagate(errp, err);
return;
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index c3ce54a203..ce89c98b4e 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -108,7 +108,7 @@ static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
scsi_req_complete(&r->req, CHECK_CONDITION);
}
-static uint32_t scsi_init_iovec(SCSIDiskReq *r, size_t size)
+static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
@@ -118,7 +118,6 @@ static uint32_t scsi_init_iovec(SCSIDiskReq *r, size_t size)
}
r->iov.iov_len = MIN(r->sector_count * 512, r->buflen);
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
- return r->qiov.size / 512;
}
static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
@@ -316,7 +315,6 @@ done:
static void scsi_do_read(SCSIDiskReq *r, int ret)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- uint32_t n;
assert (r->req.aiocb == NULL);
@@ -340,11 +338,12 @@ static void scsi_do_read(SCSIDiskReq *r, int ret)
r->req.aiocb = dma_blk_read(s->qdev.conf.blk, r->req.sg, r->sector,
scsi_dma_complete, r);
} else {
- n = scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
+ scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
- n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
- r->req.aiocb = blk_aio_readv(s->qdev.conf.blk, r->sector, &r->qiov, n,
- scsi_read_complete, r);
+ r->qiov.size, BLOCK_ACCT_READ);
+ r->req.aiocb = blk_aio_preadv(s->qdev.conf.blk,
+ r->sector << BDRV_SECTOR_BITS, &r->qiov,
+ 0, scsi_read_complete, r);
}
done:
@@ -504,7 +503,6 @@ static void scsi_write_data(SCSIRequest *req)
{
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
- uint32_t n;
/* No data transfer may already be in progress */
assert(r->req.aiocb == NULL);
@@ -544,11 +542,11 @@ static void scsi_write_data(SCSIRequest *req)
r->req.aiocb = dma_blk_write(s->qdev.conf.blk, r->req.sg, r->sector,
scsi_dma_complete, r);
} else {
- n = r->qiov.size / 512;
block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
- n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
- r->req.aiocb = blk_aio_writev(s->qdev.conf.blk, r->sector, &r->qiov, n,
- scsi_write_complete, r);
+ r->qiov.size, BLOCK_ACCT_WRITE);
+ r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
+ r->sector << BDRV_SECTOR_BITS, &r->qiov,
+ 0, scsi_write_complete, r);
}
}
@@ -1730,13 +1728,13 @@ static void scsi_write_same_complete(void *opaque, int ret)
if (data->iov.iov_len) {
block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
data->iov.iov_len, BLOCK_ACCT_WRITE);
- /* blk_aio_write doesn't like the qiov size being different from
- * nb_sectors, make sure they match.
- */
+ /* Reinitialize qiov, to handle unaligned WRITE SAME request
+ * where final qiov may need smaller size */
qemu_iovec_init_external(&data->qiov, &data->iov, 1);
- r->req.aiocb = blk_aio_writev(s->qdev.conf.blk, data->sector,
- &data->qiov, data->iov.iov_len / 512,
- scsi_write_same_complete, data);
+ r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
+ data->sector << BDRV_SECTOR_BITS,
+ &data->qiov, 0,
+ scsi_write_same_complete, data);
return;
}
@@ -1781,8 +1779,8 @@ static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
nb_sectors * s->qdev.blocksize,
BLOCK_ACCT_WRITE);
r->req.aiocb = blk_aio_write_zeroes(s->qdev.conf.blk,
- r->req.cmd.lba * (s->qdev.blocksize / 512),
- nb_sectors * (s->qdev.blocksize / 512),
+ r->req.cmd.lba * s->qdev.blocksize,
+ nb_sectors * s->qdev.blocksize,
flags, scsi_aio_complete, r);
return;
}
@@ -1803,9 +1801,10 @@ static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
scsi_req_ref(&r->req);
block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
data->iov.iov_len, BLOCK_ACCT_WRITE);
- r->req.aiocb = blk_aio_writev(s->qdev.conf.blk, data->sector,
- &data->qiov, data->iov.iov_len / 512,
- scsi_write_same_complete, data);
+ r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
+ data->sector << BDRV_SECTOR_BITS,
+ &data->qiov, 0,
+ scsi_write_same_complete, data);
}
static void scsi_disk_emulate_write_data(SCSIRequest *req)
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
index b66e5d2dba..87e3d23a3d 100644
--- a/hw/sd/sd.c
+++ b/hw/sd/sd.c
@@ -123,7 +123,6 @@ struct SDState {
qemu_irq readonly_cb;
qemu_irq inserted_cb;
BlockBackend *blk;
- uint8_t *buf;
bool enable;
};
@@ -551,7 +550,7 @@ static const VMStateDescription sd_vmstate = {
VMSTATE_UINT64(data_start, SDState),
VMSTATE_UINT32(data_offset, SDState),
VMSTATE_UINT8_ARRAY(data, SDState, 512),
- VMSTATE_BUFFER_POINTER_UNSAFE(buf, SDState, 1, 512),
+ VMSTATE_UNUSED_V(1, 512),
VMSTATE_BOOL(enable, SDState),
VMSTATE_END_OF_LIST()
},
@@ -1577,57 +1576,17 @@ send_response:
static void sd_blk_read(SDState *sd, uint64_t addr, uint32_t len)
{
- uint64_t end = addr + len;
-
DPRINTF("sd_blk_read: addr = 0x%08llx, len = %d\n",
(unsigned long long) addr, len);
- if (!sd->blk || blk_read(sd->blk, addr >> 9, sd->buf, 1) < 0) {
+ if (!sd->blk || blk_pread(sd->blk, addr, sd->data, len) < 0) {
fprintf(stderr, "sd_blk_read: read error on host side\n");
- return;
}
-
- if (end > (addr & ~511) + 512) {
- memcpy(sd->data, sd->buf + (addr & 511), 512 - (addr & 511));
-
- if (blk_read(sd->blk, end >> 9, sd->buf, 1) < 0) {
- fprintf(stderr, "sd_blk_read: read error on host side\n");
- return;
- }
- memcpy(sd->data + 512 - (addr & 511), sd->buf, end & 511);
- } else
- memcpy(sd->data, sd->buf + (addr & 511), len);
}
static void sd_blk_write(SDState *sd, uint64_t addr, uint32_t len)
{
- uint64_t end = addr + len;
-
- if ((addr & 511) || len < 512)
- if (!sd->blk || blk_read(sd->blk, addr >> 9, sd->buf, 1) < 0) {
- fprintf(stderr, "sd_blk_write: read error on host side\n");
- return;
- }
-
- if (end > (addr & ~511) + 512) {
- memcpy(sd->buf + (addr & 511), sd->data, 512 - (addr & 511));
- if (blk_write(sd->blk, addr >> 9, sd->buf, 1) < 0) {
- fprintf(stderr, "sd_blk_write: write error on host side\n");
- return;
- }
-
- if (blk_read(sd->blk, end >> 9, sd->buf, 1) < 0) {
- fprintf(stderr, "sd_blk_write: read error on host side\n");
- return;
- }
- memcpy(sd->buf, sd->data + 512 - (addr & 511), end & 511);
- if (blk_write(sd->blk, end >> 9, sd->buf, 1) < 0) {
- fprintf(stderr, "sd_blk_write: write error on host side\n");
- }
- } else {
- memcpy(sd->buf + (addr & 511), sd->data, len);
- if (!sd->blk || blk_write(sd->blk, addr >> 9, sd->buf, 1) < 0) {
- fprintf(stderr, "sd_blk_write: write error on host side\n");
- }
+ if (!sd->blk || blk_pwrite(sd->blk, addr, sd->data, len, 0) < 0) {
+ fprintf(stderr, "sd_blk_write: write error on host side\n");
}
}
@@ -1925,8 +1884,6 @@ static void sd_realize(DeviceState *dev, Error **errp)
return;
}
- sd->buf = blk_blockalign(sd->blk, 512);
-
if (sd->blk) {
blk_set_dev_ops(sd->blk, &sd_block_ops, sd);
}
diff --git a/hw/ssi/Makefile.objs b/hw/ssi/Makefile.objs
index 9555825aca..fcbb79ef01 100644
--- a/hw/ssi/Makefile.objs
+++ b/hw/ssi/Makefile.objs
@@ -4,3 +4,4 @@ common-obj-$(CONFIG_XILINX_SPI) += xilinx_spi.o
common-obj-$(CONFIG_XILINX_SPIPS) += xilinx_spips.o
obj-$(CONFIG_OMAP) += omap_spi.o
+obj-$(CONFIG_IMX) += imx_spi.o
diff --git a/hw/ssi/imx_spi.c b/hw/ssi/imx_spi.c
new file mode 100644
index 0000000000..d5dd42aca6
--- /dev/null
+++ b/hw/ssi/imx_spi.c
@@ -0,0 +1,454 @@
+/*
+ * IMX SPI Controller
+ *
+ * Copyright (c) 2016 Jean-Christophe Dubois <jcd@tribudubois.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hw/ssi/imx_spi.h"
+#include "sysemu/sysemu.h"
+
+#ifndef DEBUG_IMX_SPI
+#define DEBUG_IMX_SPI 0
+#endif
+
+#define DPRINTF(fmt, args...) \
+ do { \
+ if (DEBUG_IMX_SPI) { \
+ fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_SPI, \
+ __func__, ##args); \
+ } \
+ } while (0)
+
+static char const *imx_spi_reg_name(uint32_t reg)
+{
+ static char unknown[20];
+
+ switch (reg) {
+ case ECSPI_RXDATA:
+ return "ECSPI_RXDATA";
+ case ECSPI_TXDATA:
+ return "ECSPI_TXDATA";
+ case ECSPI_CONREG:
+ return "ECSPI_CONREG";
+ case ECSPI_CONFIGREG:
+ return "ECSPI_CONFIGREG";
+ case ECSPI_INTREG:
+ return "ECSPI_INTREG";
+ case ECSPI_DMAREG:
+ return "ECSPI_DMAREG";
+ case ECSPI_STATREG:
+ return "ECSPI_STATREG";
+ case ECSPI_PERIODREG:
+ return "ECSPI_PERIODREG";
+ case ECSPI_TESTREG:
+ return "ECSPI_TESTREG";
+ case ECSPI_MSGDATA:
+ return "ECSPI_MSGDATA";
+ default:
+ sprintf(unknown, "%d ?", reg);
+ return unknown;
+ }
+}
+
+static const VMStateDescription vmstate_imx_spi = {
+ .name = TYPE_IMX_SPI,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_FIFO32(tx_fifo, IMXSPIState),
+ VMSTATE_FIFO32(rx_fifo, IMXSPIState),
+ VMSTATE_INT16(burst_length, IMXSPIState),
+ VMSTATE_UINT32_ARRAY(regs, IMXSPIState, ECSPI_MAX),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void imx_spi_txfifo_reset(IMXSPIState *s)
+{
+ fifo32_reset(&s->tx_fifo);
+ s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TE;
+ s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_TF;
+}
+
+static void imx_spi_rxfifo_reset(IMXSPIState *s)
+{
+ fifo32_reset(&s->rx_fifo);
+ s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_RR;
+ s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_RF;
+ s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_RO;
+}
+
+static void imx_spi_update_irq(IMXSPIState *s)
+{
+ int level;
+
+ if (fifo32_is_empty(&s->rx_fifo)) {
+ s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_RR;
+ } else {
+ s->regs[ECSPI_STATREG] |= ECSPI_STATREG_RR;
+ }
+
+ if (fifo32_is_full(&s->rx_fifo)) {
+ s->regs[ECSPI_STATREG] |= ECSPI_STATREG_RF;
+ } else {
+ s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_RF;
+ }
+
+ if (fifo32_is_empty(&s->tx_fifo)) {
+ s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TE;
+ } else {
+ s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_TE;
+ }
+
+ if (fifo32_is_full(&s->tx_fifo)) {
+ s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TF;
+ } else {
+ s->regs[ECSPI_STATREG] &= ~ECSPI_STATREG_TF;
+ }
+
+ level = s->regs[ECSPI_STATREG] & s->regs[ECSPI_INTREG] ? 1 : 0;
+
+ qemu_set_irq(s->irq, level);
+
+ DPRINTF("IRQ level is %d\n", level);
+}
+
+static uint8_t imx_spi_selected_channel(IMXSPIState *s)
+{
+ return EXTRACT(s->regs[ECSPI_CONREG], ECSPI_CONREG_CHANNEL_SELECT);
+}
+
+static uint32_t imx_spi_burst_length(IMXSPIState *s)
+{
+ return EXTRACT(s->regs[ECSPI_CONREG], ECSPI_CONREG_BURST_LENGTH) + 1;
+}
+
+static bool imx_spi_is_enabled(IMXSPIState *s)
+{
+ return s->regs[ECSPI_CONREG] & ECSPI_CONREG_EN;
+}
+
+static bool imx_spi_channel_is_master(IMXSPIState *s)
+{
+ uint8_t mode = EXTRACT(s->regs[ECSPI_CONREG], ECSPI_CONREG_CHANNEL_MODE);
+
+ return (mode & (1 << imx_spi_selected_channel(s))) ? true : false;
+}
+
+static bool imx_spi_is_multiple_master_burst(IMXSPIState *s)
+{
+ uint8_t wave = EXTRACT(s->regs[ECSPI_CONFIGREG], ECSPI_CONFIGREG_SS_CTL);
+
+ return imx_spi_channel_is_master(s) &&
+ !(s->regs[ECSPI_CONREG] & ECSPI_CONREG_SMC) &&
+ ((wave & (1 << imx_spi_selected_channel(s))) ? true : false);
+}
+
+static void imx_spi_flush_txfifo(IMXSPIState *s)
+{
+ uint32_t tx;
+ uint32_t rx;
+
+ DPRINTF("Begin: TX Fifo Size = %d, RX Fifo Size = %d\n",
+ fifo32_num_used(&s->tx_fifo), fifo32_num_used(&s->rx_fifo));
+
+ while (!fifo32_is_empty(&s->tx_fifo)) {
+ int tx_burst = 0;
+ int index = 0;
+
+ if (s->burst_length <= 0) {
+ s->burst_length = imx_spi_burst_length(s);
+
+ DPRINTF("Burst length = %d\n", s->burst_length);
+
+ if (imx_spi_is_multiple_master_burst(s)) {
+ s->regs[ECSPI_CONREG] |= ECSPI_CONREG_XCH;
+ }
+ }
+
+ tx = fifo32_pop(&s->tx_fifo);
+
+ DPRINTF("data tx:0x%08x\n", tx);
+
+ tx_burst = MIN(s->burst_length, 32);
+
+ rx = 0;
+
+ while (tx_burst) {
+ uint8_t byte = tx & 0xff;
+
+ DPRINTF("writing 0x%02x\n", (uint32_t)byte);
+
+ /* We need to write one byte at a time */
+ byte = ssi_transfer(s->bus, byte);
+
+ DPRINTF("0x%02x read\n", (uint32_t)byte);
+
+ tx = tx >> 8;
+ rx |= (byte << (index * 8));
+
+ /* Remove 8 bits from the actual burst */
+ tx_burst -= 8;
+ s->burst_length -= 8;
+ index++;
+ }
+
+ DPRINTF("data rx:0x%08x\n", rx);
+
+ if (fifo32_is_full(&s->rx_fifo)) {
+ s->regs[ECSPI_STATREG] |= ECSPI_STATREG_RO;
+ } else {
+ fifo32_push(&s->rx_fifo, (uint8_t)rx);
+ }
+
+ if (s->burst_length <= 0) {
+ s->regs[ECSPI_CONREG] &= ~ECSPI_CONREG_XCH;
+
+ if (!imx_spi_is_multiple_master_burst(s)) {
+ s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TC;
+ break;
+ }
+ }
+ }
+
+ if (fifo32_is_empty(&s->tx_fifo)) {
+ s->regs[ECSPI_STATREG] |= ECSPI_STATREG_TC;
+ }
+
+ /* TODO: We should also use TDR and RDR bits */
+
+ DPRINTF("End: TX Fifo Size = %d, RX Fifo Size = %d\n",
+ fifo32_num_used(&s->tx_fifo), fifo32_num_used(&s->rx_fifo));
+}
+
+static void imx_spi_reset(DeviceState *dev)
+{
+ IMXSPIState *s = IMX_SPI(dev);
+
+ DPRINTF("\n");
+
+ memset(s->regs, 0, sizeof(s->regs));
+
+ s->regs[ECSPI_STATREG] = 0x00000003;
+
+ imx_spi_rxfifo_reset(s);
+ imx_spi_txfifo_reset(s);
+
+ imx_spi_update_irq(s);
+
+ s->burst_length = 0;
+}
+
+static uint64_t imx_spi_read(void *opaque, hwaddr offset, unsigned size)
+{
+ uint32_t value = 0;
+ IMXSPIState *s = opaque;
+ uint32_t index = offset >> 2;
+
+ if (index >= ECSPI_MAX) {
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
+ HWADDR_PRIx "\n", TYPE_IMX_SPI, __func__, offset);
+ return 0;
+ }
+
+ switch (index) {
+ case ECSPI_RXDATA:
+ if (!imx_spi_is_enabled(s)) {
+ value = 0;
+ } else if (fifo32_is_empty(&s->rx_fifo)) {
+ /* value is undefined */
+ value = 0xdeadbeef;
+ } else {
+ /* read from the RX FIFO */
+ value = fifo32_pop(&s->rx_fifo);
+ }
+
+ break;
+ case ECSPI_TXDATA:
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Trying to read from TX FIFO\n",
+ TYPE_IMX_SPI, __func__);
+
+ /* Reading from TXDATA gives 0 */
+
+ break;
+ case ECSPI_MSGDATA:
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Trying to read from MSG FIFO\n",
+ TYPE_IMX_SPI, __func__);
+
+ /* Reading from MSGDATA gives 0 */
+
+ break;
+ default:
+ value = s->regs[index];
+ break;
+ }
+
+ DPRINTF("reg[%s] => 0x%" PRIx32 "\n", imx_spi_reg_name(index), value);
+
+ imx_spi_update_irq(s);
+
+ return (uint64_t)value;
+}
+
+static void imx_spi_write(void *opaque, hwaddr offset, uint64_t value,
+ unsigned size)
+{
+ IMXSPIState *s = opaque;
+ uint32_t index = offset >> 2;
+ uint32_t change_mask;
+
+ if (index >= ECSPI_MAX) {
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
+ HWADDR_PRIx "\n", TYPE_IMX_SPI, __func__, offset);
+ return;
+ }
+
+ DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx_spi_reg_name(index),
+ (uint32_t)value);
+
+ change_mask = s->regs[index] ^ value;
+
+ switch (index) {
+ case ECSPI_RXDATA:
+ qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Trying to write to RX FIFO\n",
+ TYPE_IMX_SPI, __func__);
+ break;
+ case ECSPI_TXDATA:
+ case ECSPI_MSGDATA:
+ /* Is there any difference between TXDATA and MSGDATA ? */
+ /* I'll have to look in the linux driver */
+ if (!imx_spi_is_enabled(s)) {
+ /* Ignore writes if device is disabled */
+ break;
+ } else if (fifo32_is_full(&s->tx_fifo)) {
+ /* Ignore writes if queue is full */
+ break;
+ }
+
+ fifo32_push(&s->tx_fifo, (uint32_t)value);
+
+ if (imx_spi_channel_is_master(s) &&
+ (s->regs[ECSPI_CONREG] & ECSPI_CONREG_SMC)) {
+ /*
+ * Start emitting if current channel is master and SMC bit is
+ * set.
+ */
+ imx_spi_flush_txfifo(s);
+ }
+
+ break;
+ case ECSPI_STATREG:
+ /* the RO and TC bits are write-one-to-clear */
+ value &= ECSPI_STATREG_RO | ECSPI_STATREG_TC;
+ s->regs[ECSPI_STATREG] &= ~value;
+
+ break;
+ case ECSPI_CONREG:
+ s->regs[ECSPI_CONREG] = value;
+
+ if (!imx_spi_is_enabled(s)) {
+ /* device is disabled, so this is a reset */
+ imx_spi_reset(DEVICE(s));
+ return;
+ }
+
+ if (imx_spi_channel_is_master(s)) {
+ int i;
+
+ /* We are in master mode */
+
+ for (i = 0; i < 4; i++) {
+ qemu_set_irq(s->cs_lines[i],
+ i == imx_spi_selected_channel(s) ? 0 : 1);
+ }
+
+ if ((value & change_mask & ECSPI_CONREG_SMC) &&
+ !fifo32_is_empty(&s->tx_fifo)) {
+ /* SMC bit is set and TX FIFO has some slots filled in */
+ imx_spi_flush_txfifo(s);
+ } else if ((value & change_mask & ECSPI_CONREG_XCH) &&
+ !(value & ECSPI_CONREG_SMC)) {
+ /* This is a request to start emitting */
+ imx_spi_flush_txfifo(s);
+ }
+ }
+
+ break;
+ default:
+ s->regs[index] = value;
+
+ break;
+ }
+
+ imx_spi_update_irq(s);
+}
+
+static const struct MemoryRegionOps imx_spi_ops = {
+ .read = imx_spi_read,
+ .write = imx_spi_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ /*
+ * Our device would not work correctly if the guest was doing
+ * unaligned access. This might not be a limitation on the real
+ * device but in practice there is no reason for a guest to access
+ * this device unaligned.
+ */
+ .min_access_size = 4,
+ .max_access_size = 4,
+ .unaligned = false,
+ },
+};
+
+static void imx_spi_realize(DeviceState *dev, Error **errp)
+{
+ IMXSPIState *s = IMX_SPI(dev);
+ int i;
+
+ s->bus = ssi_create_bus(dev, "spi");
+
+ memory_region_init_io(&s->iomem, OBJECT(dev), &imx_spi_ops, s,
+ TYPE_IMX_SPI, 0x1000);
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
+
+ ssi_auto_connect_slaves(dev, s->cs_lines, s->bus);
+
+ for (i = 0; i < 4; ++i) {
+ sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]);
+ }
+
+ s->burst_length = 0;
+
+ fifo32_create(&s->tx_fifo, ECSPI_FIFO_SIZE);
+ fifo32_create(&s->rx_fifo, ECSPI_FIFO_SIZE);
+}
+
+static void imx_spi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = imx_spi_realize;
+ dc->vmsd = &vmstate_imx_spi;
+ dc->reset = imx_spi_reset;
+ dc->desc = "i.MX SPI Controller";
+}
+
+static const TypeInfo imx_spi_info = {
+ .name = TYPE_IMX_SPI,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IMXSPIState),
+ .class_init = imx_spi_class_init,
+};
+
+static void imx_spi_register_types(void)
+{
+ type_register_static(&imx_spi_info);
+}
+
+type_init(imx_spi_register_types)
diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c
index bda84a64bd..1be85ae75a 100644
--- a/hw/usb/dev-mtp.c
+++ b/hw/usb/dev-mtp.c
@@ -788,8 +788,8 @@ static MTPData *usb_mtp_get_device_info(MTPState *s, MTPControl *c)
trace_usb_mtp_op_get_device_info(s->dev.addr);
usb_mtp_add_u16(d, 100);
- usb_mtp_add_u32(d, 0xffffffff);
- usb_mtp_add_u16(d, 0x0101);
+ usb_mtp_add_u32(d, 0x00000006);
+ usb_mtp_add_u16(d, 0x0064);
usb_mtp_add_wstr(d, L"");
usb_mtp_add_u16(d, 0x0000);
diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
index bcde8a2f48..43ba61599a 100644
--- a/hw/usb/hcd-xhci.c
+++ b/hw/usb/hcd-xhci.c
@@ -1531,7 +1531,10 @@ static TRBCCode xhci_disable_ep(XHCIState *xhci, unsigned int slotid,
usb_packet_cleanup(&epctx->transfers[i].packet);
}
- xhci_set_ep_state(xhci, epctx, NULL, EP_DISABLED);
+ /* only touch guest RAM if we're not resetting the HC */
+ if (xhci->dcbaap_low || xhci->dcbaap_high) {
+ xhci_set_ep_state(xhci, epctx, NULL, EP_DISABLED);
+ }
timer_free(epctx->kick_timer);
g_free(epctx);
diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c
index 6458a94485..8b774f4939 100644
--- a/hw/usb/host-libusb.c
+++ b/hw/usb/host-libusb.c
@@ -34,7 +34,9 @@
*/
#include "qemu/osdep.h"
+#ifndef CONFIG_WIN32
#include <poll.h>
+#endif
#include <libusb.h>
#include "qapi/error.h"
@@ -204,6 +206,8 @@ static const char *err_names[] = {
static libusb_context *ctx;
static uint32_t loglevel;
+#ifndef CONFIG_WIN32
+
static void usb_host_handle_fd(void *opaque)
{
struct timeval tv = { 0, 0 };
@@ -223,9 +227,13 @@ static void usb_host_del_fd(int fd, void *user_data)
qemu_set_fd_handler(fd, NULL, NULL, NULL);
}
+#endif /* !CONFIG_WIN32 */
+
static int usb_host_init(void)
{
+#ifndef CONFIG_WIN32
const struct libusb_pollfd **poll;
+#endif
int i, rc;
if (ctx) {
@@ -236,7 +244,9 @@ static int usb_host_init(void)
return -1;
}
libusb_set_debug(ctx, loglevel);
-
+#ifdef CONFIG_WIN32
+ /* FIXME: add support for Windows. */
+#else
libusb_set_pollfd_notifiers(ctx, usb_host_add_fd,
usb_host_del_fd,
ctx);
@@ -247,6 +257,7 @@ static int usb_host_init(void)
}
}
free(poll);
+#endif
return 0;
}
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index 9dbe681790..8c15e09470 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -138,17 +138,18 @@ static void balloon_stats_get_all(Object *obj, Visitor *v, const char *name,
for (i = 0; i < VIRTIO_BALLOON_S_NR; i++) {
visit_type_uint64(v, balloon_stat_names[i], &s->stats[i], &err);
if (err) {
- break;
+ goto out_nested;
}
}
- error_propagate(errp, err);
- err = NULL;
- visit_end_struct(v, &err);
+ visit_check_struct(v, &err);
+out_nested:
+ visit_end_struct(v);
+ if (!err) {
+ visit_check_struct(v, &err);
+ }
out_end:
- error_propagate(errp, err);
- err = NULL;
- visit_end_struct(v, &err);
+ visit_end_struct(v);
out:
error_propagate(errp, err);
}
diff --git a/include/block/block.h b/include/block/block.h
index 3a731377db..b210832778 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -476,6 +476,10 @@ void bdrv_disable_copy_on_read(BlockDriverState *bs);
void bdrv_ref(BlockDriverState *bs);
void bdrv_unref(BlockDriverState *bs);
void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child);
+BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
+ BlockDriverState *child_bs,
+ const char *child_name,
+ const BdrvChildRole *child_role);
bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp);
void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason);
@@ -520,7 +524,8 @@ int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
void bdrv_io_plug(BlockDriverState *bs);
void bdrv_io_unplug(BlockDriverState *bs);
-void bdrv_flush_io_queue(BlockDriverState *bs);
+void bdrv_io_unplugged_begin(BlockDriverState *bs);
+void bdrv_io_unplugged_end(BlockDriverState *bs);
/**
* bdrv_drained_begin:
@@ -541,4 +546,8 @@ void bdrv_drained_begin(BlockDriverState *bs);
*/
void bdrv_drained_end(BlockDriverState *bs);
+void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child,
+ Error **errp);
+void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp);
+
#endif
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 10d87595be..a029c2003f 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -38,12 +38,12 @@
#include "qemu/throttle.h"
#define BLOCK_FLAG_ENCRYPT 1
-#define BLOCK_FLAG_COMPAT6 4
#define BLOCK_FLAG_LAZY_REFCOUNTS 8
#define BLOCK_OPT_SIZE "size"
#define BLOCK_OPT_ENCRYPT "encryption"
#define BLOCK_OPT_COMPAT6 "compat6"
+#define BLOCK_OPT_HWVERSION "hwversion"
#define BLOCK_OPT_BACKING_FILE "backing_file"
#define BLOCK_OPT_BACKING_FMT "backing_fmt"
#define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
@@ -127,10 +127,6 @@ struct BlockDriver {
Error **errp);
int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags,
Error **errp);
- int (*bdrv_read)(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors);
- int (*bdrv_write)(BlockDriverState *bs, int64_t sector_num,
- const uint8_t *buf, int nb_sectors);
void (*bdrv_close)(BlockDriverState *bs);
int (*bdrv_create)(const char *filename, QemuOpts *opts, Error **errp);
int (*bdrv_set_key)(BlockDriverState *bs, const char *key);
@@ -153,18 +149,20 @@ struct BlockDriver {
int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
+ int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs,
+ uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
int coroutine_fn (*bdrv_co_writev_flags)(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags);
-
- int supported_write_flags;
+ int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs,
+ uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
/*
* Efficiently zero a region of the disk image. Typically an image format
* would use a compact metadata representation to implement this. This
- * function pointer may be NULL and .bdrv_co_writev() will be called
- * instead.
+ * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev()
+ * will be called instead.
*/
int coroutine_fn (*bdrv_co_write_zeroes)(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
@@ -294,7 +292,6 @@ struct BlockDriver {
/* io queue for linux-aio */
void (*bdrv_io_plug)(BlockDriverState *bs);
void (*bdrv_io_unplug)(BlockDriverState *bs);
- void (*bdrv_flush_io_queue)(BlockDriverState *bs);
/**
* Try to get @bs's logical and physical block size.
@@ -317,6 +314,11 @@ struct BlockDriver {
*/
void (*bdrv_drain)(BlockDriverState *bs);
+ void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child,
+ Error **errp);
+ void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child,
+ Error **errp);
+
QLIST_ENTRY(BlockDriver) list;
};
@@ -424,10 +426,10 @@ struct BlockDriverState {
/* I/O throttling.
* throttle_state tells us if this BDS has I/O limits configured.
- * io_limits_enabled tells us if they are currently being
- * enforced, but it can be temporarily set to false */
+ * io_limits_disabled tells us if they are currently being enforced */
CoQueue throttled_reqs[2];
- bool io_limits_enabled;
+ unsigned int io_limits_disabled;
+
/* The following fields are protected by the ThrottleGroup lock.
* See the ThrottleGroup documentation for details. */
ThrottleState *throttle_state;
@@ -446,6 +448,11 @@ struct BlockDriverState {
/* Alignment requirement for offset/length of I/O requests */
unsigned int request_alignment;
+ /* Flags honored during pwrite (so far: BDRV_REQ_FUA) */
+ unsigned int supported_write_flags;
+ /* Flags honored during write_zeroes (so far: BDRV_REQ_FUA,
+ * BDRV_REQ_MAY_UNMAP) */
+ unsigned int supported_zero_flags;
/* the following member gives a name to every node on the bs graph. */
char node_name[32];
@@ -484,6 +491,10 @@ struct BlockDriverState {
uint64_t write_threshold_offset;
NotifierWithReturn write_threshold_notifier;
+ /* counters for nested bdrv_io_plug and bdrv_io_unplugged_begin */
+ unsigned io_plugged;
+ unsigned io_plug_disabled;
+
int quiesce_counter;
};
@@ -517,10 +528,10 @@ extern BlockDriver bdrv_qcow2;
*/
void bdrv_setup_io_funcs(BlockDriver *bdrv);
-int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
+int coroutine_fn bdrv_co_preadv(BlockDriverState *bs,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags);
-int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
+int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags);
@@ -713,6 +724,9 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
const BdrvChildRole *child_role);
void bdrv_root_unref_child(BdrvChild *child);
+void bdrv_no_throttling_begin(BlockDriverState *bs);
+void bdrv_no_throttling_end(BlockDriverState *bs);
+
void blk_dev_change_media_cb(BlockBackend *blk, bool load);
bool blk_dev_has_removable_media(BlockBackend *blk);
bool blk_dev_has_tray(BlockBackend *blk);
diff --git a/include/block/throttle-groups.h b/include/block/throttle-groups.h
index aba28f30b6..395f72d444 100644
--- a/include/block/throttle-groups.h
+++ b/include/block/throttle-groups.h
@@ -38,6 +38,7 @@ void throttle_group_get_config(BlockDriverState *bs, ThrottleConfig *cfg);
void throttle_group_register_bs(BlockDriverState *bs, const char *groupname);
void throttle_group_unregister_bs(BlockDriverState *bs);
+void throttle_group_restart_bs(BlockDriverState *bs);
void coroutine_fn throttle_group_co_io_limits_intercept(BlockDriverState *bs,
unsigned int bytes,
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 736209505a..85528f9941 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -76,7 +76,8 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
TranslationBlock *tb_gen_code(CPUState *cpu,
- target_ulong pc, target_ulong cs_base, int flags,
+ target_ulong pc, target_ulong cs_base,
+ uint32_t flags,
int cflags);
void cpu_exec_init(CPUState *cpu, Error **errp);
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
@@ -229,13 +230,14 @@ static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
|| defined(__sparc__) || defined(__aarch64__) \
|| defined(__s390x__) || defined(__mips__) \
|| defined(CONFIG_TCG_INTERPRETER)
+/* NOTE: Direct jump patching must be atomic to be thread-safe. */
#define USE_DIRECT_JUMP
#endif
struct TranslationBlock {
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
target_ulong cs_base; /* CS base for this block */
- uint64_t flags; /* flags defining in which context the code was generated */
+ uint32_t flags; /* flags defining in which context the code was generated */
uint16_t size; /* size of target code for this block (1 <=
size <= TARGET_PAGE_SIZE) */
uint16_t icount;
@@ -257,20 +259,34 @@ struct TranslationBlock {
struct TranslationBlock *page_next[2];
tb_page_addr_t page_addr[2];
- /* the following data are used to directly call another TB from
- the code of this one. */
- uint16_t tb_next_offset[2]; /* offset of original jump target */
+ /* The following data are used to directly call another TB from
+ * the code of this one. This can be done either by emitting direct or
+ * indirect native jump instructions. These jumps are reset so that the TB
+ * just continue its execution. The TB can be linked to another one by
+ * setting one of the jump targets (or patching the jump instruction). Only
+ * two of such jumps are supported.
+ */
+ uint16_t jmp_reset_offset[2]; /* offset of original jump target */
+#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
#ifdef USE_DIRECT_JUMP
- uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
+ uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
#else
- uintptr_t tb_next[2]; /* address of jump generated code */
+ uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
#endif
- /* list of TBs jumping to this one. This is a circular list using
- the two least significant bits of the pointers to tell what is
- the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
- jmp_first */
- struct TranslationBlock *jmp_next[2];
- struct TranslationBlock *jmp_first;
+ /* Each TB has an assosiated circular list of TBs jumping to this one.
+ * jmp_list_first points to the first TB jumping to this one.
+ * jmp_list_next is used to point to the next TB in a list.
+ * Since each TB can have two jumps, it can participate in two lists.
+ * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a
+ * TranslationBlock structure, but the two least significant bits of
+ * them are used to encode which data field of the pointed TB should
+ * be used to traverse the list further from that TB:
+ * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
+ * In other words, 0/1 tells which jump is used in the pointed TB,
+ * and 2 means that this is a pointer back to the target TB of this list.
+ */
+ uintptr_t jmp_list_next[2];
+ uintptr_t jmp_list_first;
};
#include "qemu/thread.h"
@@ -288,8 +304,6 @@ struct TBContext {
/* statistics */
int tb_flush_count;
int tb_phys_invalidate_count;
-
- int tb_invalidated_flag;
};
void tb_free(TranslationBlock *tb);
@@ -302,7 +316,7 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
/* patch the branch destination */
- *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
+ atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
/* no need to flush icache explicitly */
}
#elif defined(_ARCH_PPC)
@@ -312,7 +326,7 @@ void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
/* patch the branch destination */
- stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
+ atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
/* no need to flush icache explicitly */
}
#elif defined(__s390x__)
@@ -320,36 +334,15 @@ static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
/* patch the branch destination */
intptr_t disp = addr - (jmp_addr - 2);
- stl_be_p((void*)jmp_addr, disp / 2);
+ atomic_set((int32_t *)jmp_addr, disp / 2);
/* no need to flush icache explicitly */
}
#elif defined(__aarch64__)
void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
#elif defined(__arm__)
-static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
-{
-#if !QEMU_GNUC_PREREQ(4, 1)
- register unsigned long _beg __asm ("a1");
- register unsigned long _end __asm ("a2");
- register unsigned long _flg __asm ("a3");
-#endif
-
- /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
- *(uint32_t *)jmp_addr =
- (*(uint32_t *)jmp_addr & ~0xffffff)
- | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
-
-#if QEMU_GNUC_PREREQ(4, 1)
- __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
-#else
- /* flush icache */
- _beg = jmp_addr;
- _end = jmp_addr + 4;
- _flg = 0;
- __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
-#endif
-}
+void arm_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
+#define tb_set_jmp_target1 arm_tb_set_jmp_target
#elif defined(__sparc__) || defined(__mips__)
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
#else
@@ -359,7 +352,7 @@ void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
static inline void tb_set_jmp_target(TranslationBlock *tb,
int n, uintptr_t addr)
{
- uint16_t offset = tb->tb_jmp_offset[n];
+ uint16_t offset = tb->jmp_insn_offset[n];
tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
}
@@ -369,7 +362,7 @@ static inline void tb_set_jmp_target(TranslationBlock *tb,
static inline void tb_set_jmp_target(TranslationBlock *tb,
int n, uintptr_t addr)
{
- tb->tb_next[n] = addr;
+ tb->jmp_target_addr[n] = addr;
}
#endif
@@ -377,20 +370,23 @@ static inline void tb_set_jmp_target(TranslationBlock *tb,
static inline void tb_add_jump(TranslationBlock *tb, int n,
TranslationBlock *tb_next)
{
- /* NOTE: this test is only needed for thread safety */
- if (!tb->jmp_next[n]) {
- qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
- "Linking TBs %p [" TARGET_FMT_lx
- "] index %d -> %p [" TARGET_FMT_lx "]\n",
- tb->tc_ptr, tb->pc, n,
- tb_next->tc_ptr, tb_next->pc);
- /* patch the native jump address */
- tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
-
- /* add in TB jmp circular list */
- tb->jmp_next[n] = tb_next->jmp_first;
- tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
+ if (tb->jmp_list_next[n]) {
+ /* Another thread has already done this while we were
+ * outside of the lock; nothing to do in this case */
+ return;
}
+ qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
+ "Linking TBs %p [" TARGET_FMT_lx
+ "] index %d -> %p [" TARGET_FMT_lx "]\n",
+ tb->tc_ptr, tb->pc, n,
+ tb_next->tc_ptr, tb_next->pc);
+
+ /* patch the native jump address */
+ tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
+
+ /* add in TB jmp circular list */
+ tb->jmp_list_next[n] = tb_next->jmp_list_first;
+ tb_next->jmp_list_first = (uintptr_t)tb | n;
}
/* GETRA is the true target of the return instruction that we'll execute,
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
index 05d89d358f..a011324b92 100644
--- a/include/exec/gen-icount.h
+++ b/include/exec/gen-icount.h
@@ -5,14 +5,13 @@
/* Helpers for instruction counting code generation. */
-static TCGArg *icount_arg;
+static int icount_start_insn_idx;
static TCGLabel *icount_label;
static TCGLabel *exitreq_label;
static inline void gen_tb_start(TranslationBlock *tb)
{
TCGv_i32 count, flag, imm;
- int i;
exitreq_label = gen_new_label();
flag = tcg_temp_new_i32();
@@ -31,13 +30,12 @@ static inline void gen_tb_start(TranslationBlock *tb)
-ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
imm = tcg_temp_new_i32();
+ /* We emit a movi with a dummy immediate argument. Keep the insn index
+ * of the movi so that we later (when we know the actual insn count)
+ * can update the immediate argument with the actual insn count. */
+ icount_start_insn_idx = tcg_op_buf_count();
tcg_gen_movi_i32(imm, 0xdeadbeef);
- /* This is a horrid hack to allow fixing up the value later. */
- i = tcg_ctx.gen_last_op_idx;
- i = tcg_ctx.gen_op_buf[i].args;
- icount_arg = &tcg_ctx.gen_opparam_buf[i + 1];
-
tcg_gen_sub_i32(count, count, imm);
tcg_temp_free_i32(imm);
@@ -53,7 +51,9 @@ static void gen_tb_end(TranslationBlock *tb, int num_insns)
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED);
if (tb->cflags & CF_USE_ICOUNT) {
- *icount_arg = num_insns;
+ /* Update the num_insn immediate parameter now that we know
+ * the actual insn count. */
+ tcg_set_insn_param(icount_start_insn_idx, 1, num_insns);
gen_set_label(icount_label);
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED);
}
diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h
index c7a03d43b9..850a9626b7 100644
--- a/include/hw/acpi/acpi-defs.h
+++ b/include/hw/acpi/acpi-defs.h
@@ -455,8 +455,10 @@ struct AcpiSystemResourceAffinityTable
} QEMU_PACKED;
typedef struct AcpiSystemResourceAffinityTable AcpiSystemResourceAffinityTable;
-#define ACPI_SRAT_PROCESSOR 0
+#define ACPI_SRAT_PROCESSOR_APIC 0
#define ACPI_SRAT_MEMORY 1
+#define ACPI_SRAT_PROCESSOR_x2APIC 2
+#define ACPI_SRAT_PROCESSOR_GICC 3
struct AcpiSratProcessorAffinity
{
@@ -473,7 +475,7 @@ typedef struct AcpiSratProcessorAffinity AcpiSratProcessorAffinity;
struct AcpiSratMemoryAffinity
{
ACPI_SUB_HEADER_DEF
- uint8_t proximity[4];
+ uint32_t proximity;
uint16_t reserved1;
uint64_t base_addr;
uint64_t range_length;
@@ -483,6 +485,17 @@ struct AcpiSratMemoryAffinity
} QEMU_PACKED;
typedef struct AcpiSratMemoryAffinity AcpiSratMemoryAffinity;
+struct AcpiSratProcessorGiccAffinity
+{
+ ACPI_SUB_HEADER_DEF
+ uint32_t proximity;
+ uint32_t acpi_processor_uid;
+ uint32_t flags;
+ uint32_t clock_domain;
+} QEMU_PACKED;
+
+typedef struct AcpiSratProcessorGiccAffinity AcpiSratProcessorGiccAffinity;
+
/* PCI fw r3.0 MCFG table. */
/* Subtable */
struct AcpiMcfgAllocation {
diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h
index 2c994b351a..7eb51c7885 100644
--- a/include/hw/acpi/aml-build.h
+++ b/include/hw/acpi/aml-build.h
@@ -198,6 +198,13 @@ typedef enum {
AML_PULL_NONE = 3,
} AmlPinConfig;
+typedef enum {
+ MEM_AFFINITY_NOFLAGS = 0,
+ MEM_AFFINITY_ENABLED = (1 << 0),
+ MEM_AFFINITY_HOTPLUGGABLE = (1 << 1),
+ MEM_AFFINITY_NON_VOLATILE = (1 << 2),
+} MemoryAffinityFlags;
+
typedef
struct AcpiBuildTables {
GArray *table_data;
@@ -372,4 +379,7 @@ int
build_append_named_dword(GArray *array, const char *name_format, ...)
GCC_FMT_ATTR(2, 3);
+void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base,
+ uint64_t len, int node, MemoryAffinityFlags flags);
+
#endif
diff --git a/include/hw/arm/fsl-imx6.h b/include/hw/arm/fsl-imx6.h
new file mode 100644
index 0000000000..d24aaee1c1
--- /dev/null
+++ b/include/hw/arm/fsl-imx6.h
@@ -0,0 +1,450 @@
+/*
+ * Freescale i.MX31 SoC emulation
+ *
+ * Copyright (C) 2015 Jean-Christophe Dubois <jcd@tribudubois.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef FSL_IMX6_H
+#define FSL_IMX6_H
+
+#include "hw/arm/arm.h"
+#include "hw/cpu/a9mpcore.h"
+#include "hw/misc/imx6_ccm.h"
+#include "hw/misc/imx6_src.h"
+#include "hw/char/imx_serial.h"
+#include "hw/timer/imx_gpt.h"
+#include "hw/timer/imx_epit.h"
+#include "hw/i2c/imx_i2c.h"
+#include "hw/gpio/imx_gpio.h"
+#include "hw/sd/sdhci.h"
+#include "hw/ssi/imx_spi.h"
+#include "exec/memory.h"
+
+#define TYPE_FSL_IMX6 "fsl,imx6"
+#define FSL_IMX6(obj) OBJECT_CHECK(FslIMX6State, (obj), TYPE_FSL_IMX6)
+
+#define FSL_IMX6_NUM_CPUS 4
+#define FSL_IMX6_NUM_UARTS 5
+#define FSL_IMX6_NUM_EPITS 2
+#define FSL_IMX6_NUM_I2CS 3
+#define FSL_IMX6_NUM_GPIOS 7
+#define FSL_IMX6_NUM_ESDHCS 4
+#define FSL_IMX6_NUM_ECSPIS 5
+
+typedef struct FslIMX6State {
+ /*< private >*/
+ DeviceState parent_obj;
+
+ /*< public >*/
+ ARMCPU cpu[FSL_IMX6_NUM_CPUS];
+ A9MPPrivState a9mpcore;
+ IMX6CCMState ccm;
+ IMX6SRCState src;
+ IMXSerialState uart[FSL_IMX6_NUM_UARTS];
+ IMXGPTState gpt;
+ IMXEPITState epit[FSL_IMX6_NUM_EPITS];
+ IMXI2CState i2c[FSL_IMX6_NUM_I2CS];
+ IMXGPIOState gpio[FSL_IMX6_NUM_GPIOS];
+ SDHCIState esdhc[FSL_IMX6_NUM_ESDHCS];
+ IMXSPIState spi[FSL_IMX6_NUM_ECSPIS];
+ MemoryRegion rom;
+ MemoryRegion caam;
+ MemoryRegion ocram;
+ MemoryRegion ocram_alias;
+} FslIMX6State;
+
+
+#define FSL_IMX6_MMDC_ADDR 0x10000000
+#define FSL_IMX6_MMDC_SIZE 0xF0000000
+#define FSL_IMX6_EIM_MEM_ADDR 0x08000000
+#define FSL_IMX6_EIM_MEM_SIZE 0x8000000
+#define FSL_IMX6_IPU_2_ADDR 0x02800000
+#define FSL_IMX6_IPU_2_SIZE 0x400000
+#define FSL_IMX6_IPU_1_ADDR 0x02400000
+#define FSL_IMX6_IPU_1_SIZE 0x400000
+#define FSL_IMX6_MIPI_HSI_ADDR 0x02208000
+#define FSL_IMX6_MIPI_HSI_SIZE 0x4000
+#define FSL_IMX6_OPENVG_ADDR 0x02204000
+#define FSL_IMX6_OPENVG_SIZE 0x4000
+#define FSL_IMX6_SATA_ADDR 0x02200000
+#define FSL_IMX6_SATA_SIZE 0x4000
+#define FSL_IMX6_AIPS_2_ADDR 0x02100000
+#define FSL_IMX6_AIPS_2_SIZE 0x100000
+/* AIPS2 */
+#define FSL_IMX6_UART5_ADDR 0x021F4000
+#define FSL_IMX6_UART5_SIZE 0x4000
+#define FSL_IMX6_UART4_ADDR 0x021F0000
+#define FSL_IMX6_UART4_SIZE 0x4000
+#define FSL_IMX6_UART3_ADDR 0x021EC000
+#define FSL_IMX6_UART3_SIZE 0x4000
+#define FSL_IMX6_UART2_ADDR 0x021E8000
+#define FSL_IMX6_UART2_SIZE 0x4000
+#define FSL_IMX6_VDOA_ADDR 0x021E4000
+#define FSL_IMX6_VDOA_SIZE 0x4000
+#define FSL_IMX6_MIPI_DSI_ADDR 0x021E0000
+#define FSL_IMX6_MIPI_DSI_SIZE 0x4000
+#define FSL_IMX6_MIPI_CSI_ADDR 0x021DC000
+#define FSL_IMX6_MIPI_CSI_SIZE 0x4000
+#define FSL_IMX6_AUDMUX_ADDR 0x021D8000
+#define FSL_IMX6_AUDMUX_SIZE 0x4000
+#define FSL_IMX6_TZASC2_ADDR 0x021D4000
+#define FSL_IMX6_TZASC2_SIZE 0x4000
+#define FSL_IMX6_TZASC1_ADDR 0x021D0000
+#define FSL_IMX6_TZASC1_SIZE 0x4000
+#define FSL_IMX6_CSU_ADDR 0x021C0000
+#define FSL_IMX6_CSU_SIZE 0x4000
+#define FSL_IMX6_OCOTPCTRL_ADDR 0x021BC000
+#define FSL_IMX6_OCOTPCTRL_SIZE 0x4000
+#define FSL_IMX6_EIM_ADDR 0x021B8000
+#define FSL_IMX6_EIM_SIZE 0x4000
+#define FSL_IMX6_MMDC1_ADDR 0x021B4000
+#define FSL_IMX6_MMDC1_SIZE 0x4000
+#define FSL_IMX6_MMDC0_ADDR 0x021B0000
+#define FSL_IMX6_MMDC0_SIZE 0x4000
+#define FSL_IMX6_ROMCP_ADDR 0x021AC000
+#define FSL_IMX6_ROMCP_SIZE 0x4000
+#define FSL_IMX6_I2C3_ADDR 0x021A8000
+#define FSL_IMX6_I2C3_SIZE 0x4000
+#define FSL_IMX6_I2C2_ADDR 0x021A4000
+#define FSL_IMX6_I2C2_SIZE 0x4000
+#define FSL_IMX6_I2C1_ADDR 0x021A0000
+#define FSL_IMX6_I2C1_SIZE 0x4000
+#define FSL_IMX6_uSDHC4_ADDR 0x0219C000
+#define FSL_IMX6_uSDHC4_SIZE 0x4000
+#define FSL_IMX6_uSDHC3_ADDR 0x02198000
+#define FSL_IMX6_uSDHC3_SIZE 0x4000
+#define FSL_IMX6_uSDHC2_ADDR 0x02194000
+#define FSL_IMX6_uSDHC2_SIZE 0x4000
+#define FSL_IMX6_uSDHC1_ADDR 0x02190000
+#define FSL_IMX6_uSDHC1_SIZE 0x4000
+#define FSL_IMX6_MLB150_ADDR 0x0218C000
+#define FSL_IMX6_MLB150_SIZE 0x4000
+#define FSL_IMX6_ENET_ADDR 0x02188000
+#define FSL_IMX6_ENET_SIZE 0x4000
+#define FSL_IMX6_USBOH3_USB_ADDR 0x02184000
+#define FSL_IMX6_USBOH3_USB_SIZE 0x4000
+#define FSL_IMX6_AIPS2_CFG_ADDR 0x0217C000
+#define FSL_IMX6_AIPS2_CFG_SIZE 0x4000
+/* DAP */
+#define FSL_IMX6_PTF_CTRL_ADDR 0x02160000
+#define FSL_IMX6_PTF_CTRL_SIZE 0x1000
+#define FSL_IMX6_PTM3_ADDR 0x0215F000
+#define FSL_IMX6_PTM3_SIZE 0x1000
+#define FSL_IMX6_PTM2_ADDR 0x0215E000
+#define FSL_IMX6_PTM2_SIZE 0x1000
+#define FSL_IMX6_PTM1_ADDR 0x0215D000
+#define FSL_IMX6_PTM1_SIZE 0x1000
+#define FSL_IMX6_PTM0_ADDR 0x0215C000
+#define FSL_IMX6_PTM0_SIZE 0x1000
+#define FSL_IMX6_CTI3_ADDR 0x0215B000
+#define FSL_IMX6_CTI3_SIZE 0x1000
+#define FSL_IMX6_CTI2_ADDR 0x0215A000
+#define FSL_IMX6_CTI2_SIZE 0x1000
+#define FSL_IMX6_CTI1_ADDR 0x02159000
+#define FSL_IMX6_CTI1_SIZE 0x1000
+#define FSL_IMX6_CTI0_ADDR 0x02158000
+#define FSL_IMX6_CTI0_SIZE 0x1000
+#define FSL_IMX6_CPU3_PMU_ADDR 0x02157000
+#define FSL_IMX6_CPU3_PMU_SIZE 0x1000
+#define FSL_IMX6_CPU3_DEBUG_IF_ADDR 0x02156000
+#define FSL_IMX6_CPU3_DEBUG_IF_SIZE 0x1000
+#define FSL_IMX6_CPU2_PMU_ADDR 0x02155000
+#define FSL_IMX6_CPU2_PMU_SIZE 0x1000
+#define FSL_IMX6_CPU2_DEBUG_IF_ADDR 0x02154000
+#define FSL_IMX6_CPU2_DEBUG_IF_SIZE 0x1000
+#define FSL_IMX6_CPU1_PMU_ADDR 0x02153000
+#define FSL_IMX6_CPU1_PMU_SIZE 0x1000
+#define FSL_IMX6_CPU1_DEBUG_IF_ADDR 0x02152000
+#define FSL_IMX6_CPU1_DEBUG_IF_SIZE 0x1000
+#define FSL_IMX6_CPU0_PMU_ADDR 0x02151000
+#define FSL_IMX6_CPU0_PMU_SIZE 0x1000
+#define FSL_IMX6_CPU0_DEBUG_IF_ADDR 0x02150000
+#define FSL_IMX6_CPU0_DEBUG_IF_SIZE 0x1000
+#define FSL_IMX6_CA9_INTEG_ADDR 0x0214F000
+#define FSL_IMX6_CA9_INTEG_SIZE 0x1000
+#define FSL_IMX6_FUNNEL_ADDR 0x02144000
+#define FSL_IMX6_FUNNEL_SIZE 0x1000
+#define FSL_IMX6_TPIU_ADDR 0x02143000
+#define FSL_IMX6_TPIU_SIZE 0x1000
+#define FSL_IMX6_EXT_CTI_ADDR 0x02142000
+#define FSL_IMX6_EXT_CTI_SIZE 0x1000
+#define FSL_IMX6_ETB_ADDR 0x02141000
+#define FSL_IMX6_ETB_SIZE 0x1000
+#define FSL_IMX6_DAP_ROM_TABLE_ADDR 0x02140000
+#define FSL_IMX6_DAP_ROM_TABLE_SIZE 0x1000
+/* DAP end */
+#define FSL_IMX6_CAAM_ADDR 0x02100000
+#define FSL_IMX6_CAAM_SIZE 0x10000
+/* AIPS2 end */
+#define FSL_IMX6_AIPS_1_ADDR 0x02000000
+#define FSL_IMX6_AIPS_1_SIZE 0x100000
+/* AIPS1 */
+#define FSL_IMX6_SDMA_ADDR 0x020EC000
+#define FSL_IMX6_SDMA_SIZE 0x4000
+#define FSL_IMX6_DCIC2_ADDR 0x020E8000
+#define FSL_IMX6_DCIC2_SIZE 0x4000
+#define FSL_IMX6_DCIC1_ADDR 0x020E4000
+#define FSL_IMX6_DCIC1_SIZE 0x4000
+#define FSL_IMX6_IOMUXC_ADDR 0x020E0000
+#define FSL_IMX6_IOMUXC_SIZE 0x4000
+#define FSL_IMX6_PGCARM_ADDR 0x020DCA00
+#define FSL_IMX6_PGCARM_SIZE 0x20
+#define FSL_IMX6_PGCPU_ADDR 0x020DC260
+#define FSL_IMX6_PGCPU_SIZE 0x20
+#define FSL_IMX6_GPC_ADDR 0x020DC000
+#define FSL_IMX6_GPC_SIZE 0x4000
+#define FSL_IMX6_SRC_ADDR 0x020D8000
+#define FSL_IMX6_SRC_SIZE 0x4000
+#define FSL_IMX6_EPIT2_ADDR 0x020D4000
+#define FSL_IMX6_EPIT2_SIZE 0x4000
+#define FSL_IMX6_EPIT1_ADDR 0x020D0000
+#define FSL_IMX6_EPIT1_SIZE 0x4000
+#define FSL_IMX6_SNVSHP_ADDR 0x020CC000
+#define FSL_IMX6_SNVSHP_SIZE 0x4000
+#define FSL_IMX6_USBPHY2_ADDR 0x020CA000
+#define FSL_IMX6_USBPHY2_SIZE 0x1000
+#define FSL_IMX6_USBPHY1_ADDR 0x020C9000
+#define FSL_IMX6_USBPHY1_SIZE 0x1000
+#define FSL_IMX6_ANALOG_ADDR 0x020C8000
+#define FSL_IMX6_ANALOG_SIZE 0x1000
+#define FSL_IMX6_CCM_ADDR 0x020C4000
+#define FSL_IMX6_CCM_SIZE 0x4000
+#define FSL_IMX6_WDOG2_ADDR 0x020C0000
+#define FSL_IMX6_WDOG2_SIZE 0x4000
+#define FSL_IMX6_WDOG1_ADDR 0x020BC000
+#define FSL_IMX6_WDOG1_SIZE 0x4000
+#define FSL_IMX6_KPP_ADDR 0x020B8000
+#define FSL_IMX6_KPP_SIZE 0x4000
+#define FSL_IMX6_GPIO7_ADDR 0x020B4000
+#define FSL_IMX6_GPIO7_SIZE 0x4000
+#define FSL_IMX6_GPIO6_ADDR 0x020B0000
+#define FSL_IMX6_GPIO6_SIZE 0x4000
+#define FSL_IMX6_GPIO5_ADDR 0x020AC000
+#define FSL_IMX6_GPIO5_SIZE 0x4000
+#define FSL_IMX6_GPIO4_ADDR 0x020A8000
+#define FSL_IMX6_GPIO4_SIZE 0x4000
+#define FSL_IMX6_GPIO3_ADDR 0x020A4000
+#define FSL_IMX6_GPIO3_SIZE 0x4000
+#define FSL_IMX6_GPIO2_ADDR 0x020A0000
+#define FSL_IMX6_GPIO2_SIZE 0x4000
+#define FSL_IMX6_GPIO1_ADDR 0x0209C000
+#define FSL_IMX6_GPIO1_SIZE 0x4000
+#define FSL_IMX6_GPT_ADDR 0x02098000
+#define FSL_IMX6_GPT_SIZE 0x4000
+#define FSL_IMX6_CAN2_ADDR 0x02094000
+#define FSL_IMX6_CAN2_SIZE 0x4000
+#define FSL_IMX6_CAN1_ADDR 0x02090000
+#define FSL_IMX6_CAN1_SIZE 0x4000
+#define FSL_IMX6_PWM4_ADDR 0x0208C000
+#define FSL_IMX6_PWM4_SIZE 0x4000
+#define FSL_IMX6_PWM3_ADDR 0x02088000
+#define FSL_IMX6_PWM3_SIZE 0x4000
+#define FSL_IMX6_PWM2_ADDR 0x02084000
+#define FSL_IMX6_PWM2_SIZE 0x4000
+#define FSL_IMX6_PWM1_ADDR 0x02080000
+#define FSL_IMX6_PWM1_SIZE 0x4000
+#define FSL_IMX6_AIPS1_CFG_ADDR 0x0207C000
+#define FSL_IMX6_AIPS1_CFG_SIZE 0x4000
+#define FSL_IMX6_VPU_ADDR 0x02040000
+#define FSL_IMX6_VPU_SIZE 0x3C000
+#define FSL_IMX6_AIPS1_SPBA_ADDR 0x0203C000
+#define FSL_IMX6_AIPS1_SPBA_SIZE 0x4000
+#define FSL_IMX6_ASRC_ADDR 0x02034000
+#define FSL_IMX6_ASRC_SIZE 0x4000
+#define FSL_IMX6_SSI3_ADDR 0x02030000
+#define FSL_IMX6_SSI3_SIZE 0x4000
+#define FSL_IMX6_SSI2_ADDR 0x0202C000
+#define FSL_IMX6_SSI2_SIZE 0x4000
+#define FSL_IMX6_SSI1_ADDR 0x02028000
+#define FSL_IMX6_SSI1_SIZE 0x4000
+#define FSL_IMX6_ESAI_ADDR 0x02024000
+#define FSL_IMX6_ESAI_SIZE 0x4000
+#define FSL_IMX6_UART1_ADDR 0x02020000
+#define FSL_IMX6_UART1_SIZE 0x4000
+#define FSL_IMX6_eCSPI5_ADDR 0x02018000
+#define FSL_IMX6_eCSPI5_SIZE 0x4000
+#define FSL_IMX6_eCSPI4_ADDR 0x02014000
+#define FSL_IMX6_eCSPI4_SIZE 0x4000
+#define FSL_IMX6_eCSPI3_ADDR 0x02010000
+#define FSL_IMX6_eCSPI3_SIZE 0x4000
+#define FSL_IMX6_eCSPI2_ADDR 0x0200C000
+#define FSL_IMX6_eCSPI2_SIZE 0x4000
+#define FSL_IMX6_eCSPI1_ADDR 0x02008000
+#define FSL_IMX6_eCSPI1_SIZE 0x4000
+#define FSL_IMX6_SPDIF_ADDR 0x02004000
+#define FSL_IMX6_SPDIF_SIZE 0x4000
+/* AIPS1 end */
+#define FSL_IMX6_PCIe_REG_ADDR 0x01FFC000
+#define FSL_IMX6_PCIe_REG_SIZE 0x4000
+#define FSL_IMX6_PCIe_ADDR 0x01000000
+#define FSL_IMX6_PCIe_SIZE 0xFFC000
+#define FSL_IMX6_GPV_1_PL301_CFG_ADDR 0x00C00000
+#define FSL_IMX6_GPV_1_PL301_CFG_SIZE 0x100000
+#define FSL_IMX6_GPV_0_PL301_CFG_ADDR 0x00B00000
+#define FSL_IMX6_GPV_0_PL301_CFG_SIZE 0x100000
+#define FSL_IMX6_PL310_ADDR 0x00A02000
+#define FSL_IMX6_PL310_SIZE 0x1000
+#define FSL_IMX6_A9MPCORE_ADDR 0x00A00000
+#define FSL_IMX6_A9MPCORE_SIZE 0x2000
+#define FSL_IMX6_OCRAM_ALIAS_ADDR 0x00940000
+#define FSL_IMX6_OCRAM_ALIAS_SIZE 0xC0000
+#define FSL_IMX6_OCRAM_ADDR 0x00900000
+#define FSL_IMX6_OCRAM_SIZE 0x40000
+#define FSL_IMX6_GPV_4_PL301_CFG_ADDR 0x00800000
+#define FSL_IMX6_GPV_4_PL301_CFG_SIZE 0x100000
+#define FSL_IMX6_GPV_3_PL301_CFG_ADDR 0x00300000
+#define FSL_IMX6_GPV_3_PL301_CFG_SIZE 0x100000
+#define FSL_IMX6_GPV_2_PL301_CFG_ADDR 0x00200000
+#define FSL_IMX6_GPV_2_PL301_CFG_SIZE 0x100000
+#define FSL_IMX6_DTCP_ADDR 0x00138000
+#define FSL_IMX6_DTCP_SIZE 0x4000
+#define FSL_IMX6_GPU_2D_ADDR 0x00134000
+#define FSL_IMX6_GPU_2D_SIZE 0x4000
+#define FSL_IMX6_GPU_3D_ADDR 0x00130000
+#define FSL_IMX6_GPU_3D_SIZE 0x4000
+#define FSL_IMX6_HDMI_ADDR 0x00120000
+#define FSL_IMX6_HDMI_SIZE 0x9000
+#define FSL_IMX6_BCH_ADDR 0x00114000
+#define FSL_IMX6_BCH_SIZE 0x4000
+#define FSL_IMX6_GPMI_ADDR 0x00112000
+#define FSL_IMX6_GPMI_SIZE 0x2000
+#define FSL_IMX6_APBH_BRIDGE_DMA_ADDR 0x00110000
+#define FSL_IMX6_APBH_BRIDGE_DMA_SIZE 0x2000
+#define FSL_IMX6_CAAM_MEM_ADDR 0x00100000
+#define FSL_IMX6_CAAM_MEM_SIZE 0x4000
+#define FSL_IMX6_ROM_ADDR 0x00000000
+#define FSL_IMX6_ROM_SIZE 0x18000
+
+#define FSL_IMX6_IOMUXC_IRQ 0
+#define FSL_IMX6_DAP_IRQ 1
+#define FSL_IMX6_SDMA_IRQ 2
+#define FSL_IMX6_VPU_JPEG_IRQ 3
+#define FSL_IMX6_SNVS_PMIC_IRQ 4
+#define FSL_IMX6_IPU1_ERROR_IRQ 5
+#define FSL_IMX6_IPU1_SYNC_IRQ 6
+#define FSL_IMX6_IPU2_ERROR_IRQ 7
+#define FSL_IMX6_IPU2_SYNC_IRQ 8
+#define FSL_IMX6_GPU3D_IRQ 9
+#define FSL_IMX6_R2D_IRQ 10
+#define FSL_IMX6_V2D_IRQ 11
+#define FSL_IMX6_VPU_IRQ 12
+#define FSL_IMX6_APBH_BRIDGE_DMA_IRQ 13
+#define FSL_IMX6_EIM_IRQ 14
+#define FSL_IMX6_BCH_IRQ 15
+#define FSL_IMX6_GPMI_IRQ 16
+#define FSL_IMX6_DTCP_IRQ 17
+#define FSL_IMX6_VDOA_IRQ 18
+#define FSL_IMX6_SNVS_CONS_IRQ 19
+#define FSL_IMX6_SNVS_SEC_IRQ 20
+#define FSL_IMX6_CSU_IRQ 21
+#define FSL_IMX6_uSDHC1_IRQ 22
+#define FSL_IMX6_uSDHC2_IRQ 23
+#define FSL_IMX6_uSDHC3_IRQ 24
+#define FSL_IMX6_uSDHC4_IRQ 25
+#define FSL_IMX6_UART1_IRQ 26
+#define FSL_IMX6_UART2_IRQ 27
+#define FSL_IMX6_UART3_IRQ 28
+#define FSL_IMX6_UART4_IRQ 29
+#define FSL_IMX6_UART5_IRQ 30
+#define FSL_IMX6_ECSPI1_IRQ 31
+#define FSL_IMX6_ECSPI2_IRQ 32
+#define FSL_IMX6_ECSPI3_IRQ 33
+#define FSL_IMX6_ECSPI4_IRQ 34
+#define FSL_IMX6_ECSPI5_IRQ 35
+#define FSL_IMX6_I2C1_IRQ 36
+#define FSL_IMX6_I2C2_IRQ 37
+#define FSL_IMX6_I2C3_IRQ 38
+#define FSL_IMX6_SATA_IRQ 39
+#define FSL_IMX6_USB_HOST1_IRQ 40
+#define FSL_IMX6_USB_HOST2_IRQ 41
+#define FSL_IMX6_USB_HOST3_IRQ 42
+#define FSL_IMX6_USB_OTG_IRQ 43
+#define FSL_IMX6_USB_PHY_UTMI0_IRQ 44
+#define FSL_IMX6_USB_PHY_UTMI1_IRQ 45
+#define FSL_IMX6_SSI1_IRQ 46
+#define FSL_IMX6_SSI2_IRQ 47
+#define FSL_IMX6_SSI3_IRQ 48
+#define FSL_IMX6_TEMP_IRQ 49
+#define FSL_IMX6_ASRC_IRQ 50
+#define FSL_IMX6_ESAI_IRQ 51
+#define FSL_IMX6_SPDIF_IRQ 52
+#define FSL_IMX6_MLB150_IRQ 53
+#define FSL_IMX6_PMU1_IRQ 54
+#define FSL_IMX6_GPT_IRQ 55
+#define FSL_IMX6_EPIT1_IRQ 56
+#define FSL_IMX6_EPIT2_IRQ 57
+#define FSL_IMX6_GPIO1_INT7_IRQ 58
+#define FSL_IMX6_GPIO1_INT6_IRQ 59
+#define FSL_IMX6_GPIO1_INT5_IRQ 60
+#define FSL_IMX6_GPIO1_INT4_IRQ 61
+#define FSL_IMX6_GPIO1_INT3_IRQ 62
+#define FSL_IMX6_GPIO1_INT2_IRQ 63
+#define FSL_IMX6_GPIO1_INT1_IRQ 64
+#define FSL_IMX6_GPIO1_INT0_IRQ 65
+#define FSL_IMX6_GPIO1_LOW_IRQ 66
+#define FSL_IMX6_GPIO1_HIGH_IRQ 67
+#define FSL_IMX6_GPIO2_LOW_IRQ 68
+#define FSL_IMX6_GPIO2_HIGH_IRQ 69
+#define FSL_IMX6_GPIO3_LOW_IRQ 70
+#define FSL_IMX6_GPIO3_HIGH_IRQ 71
+#define FSL_IMX6_GPIO4_LOW_IRQ 72
+#define FSL_IMX6_GPIO4_HIGH_IRQ 73
+#define FSL_IMX6_GPIO5_LOW_IRQ 74
+#define FSL_IMX6_GPIO5_HIGH_IRQ 75
+#define FSL_IMX6_GPIO6_LOW_IRQ 76
+#define FSL_IMX6_GPIO6_HIGH_IRQ 77
+#define FSL_IMX6_GPIO7_LOW_IRQ 78
+#define FSL_IMX6_GPIO7_HIGH_IRQ 79
+#define FSL_IMX6_WDOG1_IRQ 80
+#define FSL_IMX6_WDOG2_IRQ 81
+#define FSL_IMX6_KPP_IRQ 82
+#define FSL_IMX6_PWM1_IRQ 83
+#define FSL_IMX6_PWM2_IRQ 84
+#define FSL_IMX6_PWM3_IRQ 85
+#define FSL_IMX6_PWM4_IRQ 86
+#define FSL_IMX6_CCM1_IRQ 87
+#define FSL_IMX6_CCM2_IRQ 88
+#define FSL_IMX6_GPC_IRQ 89
+#define FSL_IMX6_SRC_IRQ 91
+#define FSL_IMX6_CPU_L2_IRQ 92
+#define FSL_IMX6_CPU_PARITY_IRQ 93
+#define FSL_IMX6_CPU_PERF_IRQ 94
+#define FSL_IMX6_CPU_CTI_IRQ 95
+#define FSL_IMX6_SRC_COMB_IRQ 96
+#define FSL_IMX6_MIPI_CSI1_IRQ 100
+#define FSL_IMX6_MIPI_CSI2_IRQ 101
+#define FSL_IMX6_MIPI_DSI_IRQ 102
+#define FSL_IMX6_MIPI_HSI_IRQ 103
+#define FSL_IMX6_SJC_IRQ 104
+#define FSL_IMX6_CAAM0_IRQ 105
+#define FSL_IMX6_CAAM1_IRQ 106
+#define FSL_IMX6_ASC1_IRQ 108
+#define FSL_IMX6_ASC2_IRQ 109
+#define FSL_IMX6_FLEXCAN1_IRQ 110
+#define FSL_IMX6_FLEXCAN2_IRQ 111
+#define FSL_IMX6_HDMI_MASTER_IRQ 115
+#define FSL_IMX6_HDMI_CEC_IRQ 116
+#define FSL_IMX6_MLB150_LOW_IRQ 117
+#define FSL_IMX6_ENET_MAC_IRQ 118
+#define FSL_IMX6_ENET_MAC_1588_IRQ 119
+#define FSL_IMX6_PCIE1_IRQ 120
+#define FSL_IMX6_PCIE2_IRQ 121
+#define FSL_IMX6_PCIE3_IRQ 122
+#define FSL_IMX6_PCIE4_IRQ 123
+#define FSL_IMX6_DCIC1_IRQ 124
+#define FSL_IMX6_DCIC2_IRQ 125
+#define FSL_IMX6_MLB150_HIGH_IRQ 126
+#define FSL_IMX6_PMU2_IRQ 127
+#define FSL_IMX6_MAX_IRQ 128
+
+#endif /* FSL_IMX6_H */
diff --git a/include/hw/misc/imx6_src.h b/include/hw/misc/imx6_src.h
new file mode 100644
index 0000000000..eb3640732e
--- /dev/null
+++ b/include/hw/misc/imx6_src.h
@@ -0,0 +1,73 @@
+/*
+ * IMX6 System Reset Controller
+ *
+ * Copyright (C) 2012 NICTA
+ * Updated by Jean-Christophe Dubois <jcd@tribudubois.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef IMX6_SRC_H
+#define IMX6_SRC_H
+
+#include "hw/sysbus.h"
+#include "qemu/bitops.h"
+
+#define SRC_SCR 0
+#define SRC_SBMR1 1
+#define SRC_SRSR 2
+#define SRC_SISR 5
+#define SRC_SIMR 6
+#define SRC_SBMR2 7
+#define SRC_GPR1 8
+#define SRC_GPR2 9
+#define SRC_GPR3 10
+#define SRC_GPR4 11
+#define SRC_GPR5 12
+#define SRC_GPR6 13
+#define SRC_GPR7 14
+#define SRC_GPR8 15
+#define SRC_GPR9 16
+#define SRC_GPR10 17
+#define SRC_MAX 18
+
+/* SRC_SCR */
+#define CORE3_ENABLE_SHIFT 24
+#define CORE3_ENABLE_LENGTH 1
+#define CORE2_ENABLE_SHIFT 23
+#define CORE2_ENABLE_LENGTH 1
+#define CORE1_ENABLE_SHIFT 22
+#define CORE1_ENABLE_LENGTH 1
+#define CORE3_RST_SHIFT 16
+#define CORE3_RST_LENGTH 1
+#define CORE2_RST_SHIFT 15
+#define CORE2_RST_LENGTH 1
+#define CORE1_RST_SHIFT 14
+#define CORE1_RST_LENGTH 1
+#define CORE0_RST_SHIFT 13
+#define CORE0_RST_LENGTH 1
+#define SW_IPU1_RST_SHIFT 3
+#define SW_IPU1_RST_LENGTH 1
+#define SW_IPU2_RST_SHIFT 12
+#define SW_IPU2_RST_LENGTH 1
+#define WARM_RST_ENABLE_SHIFT 0
+#define WARM_RST_ENABLE_LENGTH 1
+
+#define EXTRACT(value, name) extract32(value, name##_SHIFT, name##_LENGTH)
+
+#define TYPE_IMX6_SRC "imx6.src"
+#define IMX6_SRC(obj) OBJECT_CHECK(IMX6SRCState, (obj), TYPE_IMX6_SRC)
+
+typedef struct IMX6SRCState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion iomem;
+
+ uint32_t regs[SRC_MAX];
+
+} IMX6SRCState;
+
+#endif /* IMX6_SRC_H */
diff --git a/include/hw/ssi/imx_spi.h b/include/hw/ssi/imx_spi.h
new file mode 100644
index 0000000000..7103953581
--- /dev/null
+++ b/include/hw/ssi/imx_spi.h
@@ -0,0 +1,103 @@
+/*
+ * IMX SPI Controller
+ *
+ * Copyright 2016 Jean-Christophe Dubois <jcd@tribudubois.net>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef IMX_SPI_H
+#define IMX_SPI_H
+
+#include "hw/sysbus.h"
+#include "hw/ssi/ssi.h"
+#include "qemu/bitops.h"
+#include "qemu/fifo32.h"
+
+#define ECSPI_FIFO_SIZE 64
+
+#define ECSPI_RXDATA 0
+#define ECSPI_TXDATA 1
+#define ECSPI_CONREG 2
+#define ECSPI_CONFIGREG 3
+#define ECSPI_INTREG 4
+#define ECSPI_DMAREG 5
+#define ECSPI_STATREG 6
+#define ECSPI_PERIODREG 7
+#define ECSPI_TESTREG 8
+#define ECSPI_MSGDATA 16
+#define ECSPI_MAX 17
+
+/* ECSPI_CONREG */
+#define ECSPI_CONREG_EN (1 << 0)
+#define ECSPI_CONREG_HT (1 << 1)
+#define ECSPI_CONREG_XCH (1 << 2)
+#define ECSPI_CONREG_SMC (1 << 3)
+#define ECSPI_CONREG_CHANNEL_MODE_SHIFT 4
+#define ECSPI_CONREG_CHANNEL_MODE_LENGTH 4
+#define ECSPI_CONREG_DRCTL_SHIFT 16
+#define ECSPI_CONREG_DRCTL_LENGTH 2
+#define ECSPI_CONREG_CHANNEL_SELECT_SHIFT 18
+#define ECSPI_CONREG_CHANNEL_SELECT_LENGTH 2
+#define ECSPI_CONREG_BURST_LENGTH_SHIFT 20
+#define ECSPI_CONREG_BURST_LENGTH_LENGTH 12
+
+/* ECSPI_CONFIGREG */
+#define ECSPI_CONFIGREG_SS_CTL_SHIFT 8
+#define ECSPI_CONFIGREG_SS_CTL_LENGTH 4
+
+/* ECSPI_INTREG */
+#define ECSPI_INTREG_TEEN (1 << 0)
+#define ECSPI_INTREG_TDREN (1 << 1)
+#define ECSPI_INTREG_TFEN (1 << 2)
+#define ECSPI_INTREG_RREN (1 << 3)
+#define ECSPI_INTREG_RDREN (1 << 4)
+#define ECSPI_INTREG_RFEN (1 << 5)
+#define ECSPI_INTREG_ROEN (1 << 6)
+#define ECSPI_INTREG_TCEN (1 << 7)
+
+/* ECSPI_DMAREG */
+#define ECSPI_DMAREG_RXTDEN (1 << 31)
+#define ECSPI_DMAREG_RXDEN (1 << 23)
+#define ECSPI_DMAREG_TEDEN (1 << 7)
+#define ECSPI_DMAREG_RX_THRESHOLD_SHIFT 16
+#define ECSPI_DMAREG_RX_THRESHOLD_LENGTH 6
+
+/* ECSPI_STATREG */
+#define ECSPI_STATREG_TE (1 << 0)
+#define ECSPI_STATREG_TDR (1 << 1)
+#define ECSPI_STATREG_TF (1 << 2)
+#define ECSPI_STATREG_RR (1 << 3)
+#define ECSPI_STATREG_RDR (1 << 4)
+#define ECSPI_STATREG_RF (1 << 5)
+#define ECSPI_STATREG_RO (1 << 6)
+#define ECSPI_STATREG_TC (1 << 7)
+
+#define EXTRACT(value, name) extract32(value, name##_SHIFT, name##_LENGTH)
+
+#define TYPE_IMX_SPI "imx.spi"
+#define IMX_SPI(obj) OBJECT_CHECK(IMXSPIState, (obj), TYPE_IMX_SPI)
+
+typedef struct IMXSPIState {
+ /* <private> */
+ SysBusDevice parent_obj;
+
+ /* <public> */
+ MemoryRegion iomem;
+
+ qemu_irq irq;
+
+ qemu_irq cs_lines[4];
+
+ SSIBus *bus;
+
+ uint32_t regs[ECSPI_MAX];
+
+ Fifo32 rx_fifo;
+ Fifo32 tx_fifo;
+
+ int16_t burst_length;
+} IMXSPIState;
+
+#endif /* IMX_SPI_H */
diff --git a/include/qapi/dealloc-visitor.h b/include/qapi/dealloc-visitor.h
index cf4c36d2d3..45b06b248c 100644
--- a/include/qapi/dealloc-visitor.h
+++ b/include/qapi/dealloc-visitor.h
@@ -18,6 +18,11 @@
typedef struct QapiDeallocVisitor QapiDeallocVisitor;
+/*
+ * The dealloc visitor is primarly used only by generated
+ * qapi_free_FOO() functions, and is the only visitor designed to work
+ * correctly in the face of a partially-constructed QAPI tree.
+ */
QapiDeallocVisitor *qapi_dealloc_visitor_new(void);
void qapi_dealloc_visitor_cleanup(QapiDeallocVisitor *d);
diff --git a/include/qapi/opts-visitor.h b/include/qapi/opts-visitor.h
index fd48c14ec8..ae1bf7cf51 100644
--- a/include/qapi/opts-visitor.h
+++ b/include/qapi/opts-visitor.h
@@ -29,6 +29,11 @@ typedef struct OptsVisitor OptsVisitor;
* - string representations of negative numbers yield negative values,
* - values below INT64_MIN or LLONG_MIN are rejected,
* - values above INT64_MAX or LLONG_MAX are rejected.
+ *
+ * The Opts input visitor does not implement support for visiting QAPI
+ * alternates, numbers (other than integers), null, or arbitrary
+ * QTypes. It also requires a non-null list argument to
+ * visit_start_list().
*/
OptsVisitor *opts_visitor_new(const QemuOpts *opts);
void opts_visitor_cleanup(OptsVisitor *nv);
diff --git a/include/qapi/qmp-input-visitor.h b/include/qapi/qmp-input-visitor.h
index 3ed499cc42..b0624d8683 100644
--- a/include/qapi/qmp-input-visitor.h
+++ b/include/qapi/qmp-input-visitor.h
@@ -19,8 +19,13 @@
typedef struct QmpInputVisitor QmpInputVisitor;
-QmpInputVisitor *qmp_input_visitor_new(QObject *obj);
-QmpInputVisitor *qmp_input_visitor_new_strict(QObject *obj);
+/*
+ * Return a new input visitor that converts QMP to QAPI.
+ *
+ * Set @strict to reject a parse that doesn't consume all keys of a
+ * dictionary; otherwise excess input is ignored.
+ */
+QmpInputVisitor *qmp_input_visitor_new(QObject *obj, bool strict);
void qmp_input_visitor_cleanup(QmpInputVisitor *v);
diff --git a/include/qapi/qmp/dispatch.h b/include/qapi/qmp/dispatch.h
index 495520994c..5609946a16 100644
--- a/include/qapi/qmp/dispatch.h
+++ b/include/qapi/qmp/dispatch.h
@@ -19,11 +19,6 @@
typedef void (QmpCommandFunc)(QDict *, QObject **, Error **);
-typedef enum QmpCommandType
-{
- QCT_NORMAL,
-} QmpCommandType;
-
typedef enum QmpCommandOptions
{
QCO_NO_OPTIONS = 0x0,
@@ -33,7 +28,6 @@ typedef enum QmpCommandOptions
typedef struct QmpCommand
{
const char *name;
- QmpCommandType type;
QmpCommandFunc *fn;
QmpCommandOptions options;
QTAILQ_ENTRY(QmpCommand) node;
diff --git a/include/qapi/string-input-visitor.h b/include/qapi/string-input-visitor.h
index 089243c09e..7b76c2b9e3 100644
--- a/include/qapi/string-input-visitor.h
+++ b/include/qapi/string-input-visitor.h
@@ -17,6 +17,11 @@
typedef struct StringInputVisitor StringInputVisitor;
+/*
+ * The string input visitor does not implement support for visiting
+ * QAPI structs, alternates, null, or arbitrary QTypes. It also
+ * requires a non-null list argument to visit_start_list().
+ */
StringInputVisitor *string_input_visitor_new(const char *str);
void string_input_visitor_cleanup(StringInputVisitor *v);
diff --git a/include/qapi/string-output-visitor.h b/include/qapi/string-output-visitor.h
index d99717f650..e10522a35b 100644
--- a/include/qapi/string-output-visitor.h
+++ b/include/qapi/string-output-visitor.h
@@ -17,6 +17,11 @@
typedef struct StringOutputVisitor StringOutputVisitor;
+/*
+ * The string output visitor does not implement support for visiting
+ * QAPI structs, alternates, null, or arbitrary QTypes. It also
+ * requires a non-null list argument to visit_start_list().
+ */
StringOutputVisitor *string_output_visitor_new(bool human);
void string_output_visitor_cleanup(StringOutputVisitor *v);
diff --git a/include/qapi/visitor-impl.h b/include/qapi/visitor-impl.h
index 2bd8f292b2..145afd03e7 100644
--- a/include/qapi/visitor-impl.h
+++ b/include/qapi/visitor-impl.h
@@ -14,55 +14,96 @@
#include "qapi/visitor.h"
+/*
+ * This file describes the callback interface for implementing a QAPI
+ * visitor. For the client interface, see visitor.h. When
+ * implementing the callbacks, it is easiest to declare a struct with
+ * 'Visitor visitor;' as the first member. A callback's contract
+ * matches the corresponding public functions' contract unless stated
+ * otherwise. In the comments below, some callbacks are marked "must
+ * be set for $TYPE visits to work"; if a visitor implementation omits
+ * that callback, it should also document that it is only useful for a
+ * subset of QAPI.
+ */
+
+/*
+ * There are three classes of visitors; setting the class determines
+ * how QAPI enums are visited, as well as what additional restrictions
+ * can be asserted.
+ */
+typedef enum VisitorType {
+ VISITOR_INPUT,
+ VISITOR_OUTPUT,
+ VISITOR_DEALLOC,
+} VisitorType;
+
struct Visitor
{
- /* Must be set */
+ /* Must be set to visit structs */
void (*start_struct)(Visitor *v, const char *name, void **obj,
size_t size, Error **errp);
- void (*end_struct)(Visitor *v, Error **errp);
- void (*start_list)(Visitor *v, const char *name, Error **errp);
+ /* Optional; intended for input visitors */
+ void (*check_struct)(Visitor *v, Error **errp);
+
+ /* Must be set to visit structs */
+ void (*end_struct)(Visitor *v);
+
+ /* Must be set; implementations may require @list to be non-null,
+ * but must document it. */
+ void (*start_list)(Visitor *v, const char *name, GenericList **list,
+ size_t size, Error **errp);
+
/* Must be set */
- GenericList *(*next_list)(Visitor *v, GenericList **list, size_t size);
+ GenericList *(*next_list)(Visitor *v, GenericList *tail, size_t size);
+
/* Must be set */
void (*end_list)(Visitor *v);
- /* Optional, needed for input and dealloc visitors. */
+ /* Must be set by input and dealloc visitors to visit alternates;
+ * optional for output visitors. */
void (*start_alternate)(Visitor *v, const char *name,
GenericAlternate **obj, size_t size,
bool promote_int, Error **errp);
- /* Optional, needed for dealloc visitor. */
+ /* Optional, needed for dealloc visitor */
void (*end_alternate)(Visitor *v);
- /* Must be set. */
- void (*type_enum)(Visitor *v, const char *name, int *obj,
- const char *const strings[], Error **errp);
-
- /* Must be set. */
+ /* Must be set */
void (*type_int64)(Visitor *v, const char *name, int64_t *obj,
Error **errp);
- /* Must be set. */
+
+ /* Must be set */
void (*type_uint64)(Visitor *v, const char *name, uint64_t *obj,
Error **errp);
- /* Optional; fallback is type_uint64(). */
+
+ /* Optional; fallback is type_uint64() */
void (*type_size)(Visitor *v, const char *name, uint64_t *obj,
Error **errp);
- /* Must be set. */
+
+ /* Must be set */
void (*type_bool)(Visitor *v, const char *name, bool *obj, Error **errp);
+
+ /* Must be set */
void (*type_str)(Visitor *v, const char *name, char **obj, Error **errp);
+
+ /* Must be set to visit numbers */
void (*type_number)(Visitor *v, const char *name, double *obj,
Error **errp);
+
+ /* Must be set to visit arbitrary QTypes */
void (*type_any)(Visitor *v, const char *name, QObject **obj,
Error **errp);
- /* May be NULL; most useful for input visitors. */
+ /* Must be set to visit explicit null values. */
+ void (*type_null)(Visitor *v, const char *name, Error **errp);
+
+ /* Must be set for input visitors, optional otherwise. The core
+ * takes care of the return type in the public interface. */
void (*optional)(Visitor *v, const char *name, bool *present);
-};
-void input_type_enum(Visitor *v, const char *name, int *obj,
- const char *const strings[], Error **errp);
-void output_type_enum(Visitor *v, const char *name, int *obj,
- const char *const strings[], Error **errp);
+ /* Must be set */
+ VisitorType type;
+};
#endif
diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h
index 9a8d0105fb..4d12167bdc 100644
--- a/include/qapi/visitor.h
+++ b/include/qapi/visitor.h
@@ -16,8 +16,199 @@
#include "qapi/qmp/qobject.h"
+/*
+ * The QAPI schema defines both a set of C data types, and a QMP wire
+ * format. QAPI objects can contain references to other QAPI objects,
+ * resulting in a directed acyclic graph. QAPI also generates visitor
+ * functions to walk these graphs. This file represents the interface
+ * for doing work at each node of a QAPI graph; it can also be used
+ * for a virtual walk, where there is no actual QAPI C struct.
+ *
+ * There are three kinds of visitor classes: input visitors (QMP,
+ * string, and QemuOpts) parse an external representation and build
+ * the corresponding QAPI graph, output visitors (QMP and string) take
+ * a completed QAPI graph and generate an external representation, and
+ * the dealloc visitor can take a QAPI graph (possibly partially
+ * constructed) and recursively free its resources. While the dealloc
+ * and QMP input/output visitors are general, the string and QemuOpts
+ * visitors have some implementation limitations; see the
+ * documentation for each visitor for more details on what it
+ * supports. Also, see visitor-impl.h for the callback contracts
+ * implemented by each visitor, and docs/qapi-code-gen.txt for more
+ * about the QAPI code generator.
+ *
+ * All QAPI types have a corresponding function with a signature
+ * roughly compatible with this:
+ *
+ * void visit_type_FOO(Visitor *v, const char *name, T obj, Error **errp);
+ *
+ * where T is FOO for scalar types, and FOO * otherwise. The scalar
+ * visitors are declared here; the remaining visitors are generated in
+ * qapi-visit.h.
+ *
+ * The @name parameter of visit_type_FOO() describes the relation
+ * between this QAPI value and its parent container. When visiting
+ * the root of a tree, @name is ignored; when visiting a member of an
+ * object, @name is the key associated with the value; and when
+ * visiting a member of a list, @name is NULL.
+ *
+ * FIXME: Clients must pass NULL for @name when visiting a member of a
+ * list, but this leads to poor error messages; it might be nicer to
+ * require a non-NULL name such as "key.0" for '{ "key": [ "value" ]
+ * }' if an error is encountered on "value" (or to have the visitor
+ * core auto-generate the nicer name).
+ *
+ * The visit_type_FOO() functions expect a non-null @obj argument;
+ * they allocate *@obj during input visits, leave it unchanged on
+ * output visits, and recursively free any resources during a dealloc
+ * visit. Each function also takes the customary @errp argument (see
+ * qapi/error.h for details), for reporting any errors (such as if a
+ * member @name is not present, or is present but not the specified
+ * type).
+ *
+ * If an error is detected during visit_type_FOO() with an input
+ * visitor, then *@obj will be NULL for pointer types, and left
+ * unchanged for scalar types. Using an output visitor with an
+ * incomplete object has undefined behavior (other than a special case
+ * for visit_type_str() treating NULL like ""), while the dealloc
+ * visitor safely handles incomplete objects. Since input visitors
+ * never produce an incomplete object, such an object is possible only
+ * by manual construction.
+ *
+ * For the QAPI object types (structs, unions, and alternates), there
+ * is an additional generated function in qapi-visit.h compatible
+ * with:
+ *
+ * void visit_type_FOO_members(Visitor *v, FOO *obj, Error **errp);
+ *
+ * for visiting the members of a type without also allocating the QAPI
+ * struct.
+ *
+ * Additionally, in qapi-types.h, all QAPI pointer types (structs,
+ * unions, alternates, and lists) have a generated function compatible
+ * with:
+ *
+ * void qapi_free_FOO(FOO *obj);
+ *
+ * which behaves like free() in that @obj may be NULL. Because of
+ * these functions, the dealloc visitor is seldom used directly
+ * outside of generated code. QAPI types can also inherit from a base
+ * class; when this happens, a function is generated for easily going
+ * from the derived type to the base type:
+ *
+ * BASE *qapi_CHILD_base(CHILD *obj);
+ *
+ * For a real QAPI struct, typical input usage involves:
+ *
+ * <example>
+ * Foo *f;
+ * Error *err = NULL;
+ * Visitor *v;
+ *
+ * v = ...obtain input visitor...
+ * visit_type_Foo(v, NULL, &f, &err);
+ * if (err) {
+ * ...handle error...
+ * } else {
+ * ...use f...
+ * }
+ * ...clean up v...
+ * qapi_free_Foo(f);
+ * </example>
+ *
+ * For a list, it is:
+ * <example>
+ * FooList *l;
+ * Error *err = NULL;
+ * Visitor *v;
+ *
+ * v = ...obtain input visitor...
+ * visit_type_FooList(v, NULL, &l, &err);
+ * if (err) {
+ * ...handle error...
+ * } else {
+ * for ( ; l; l = l->next) {
+ * ...use l->value...
+ * }
+ * }
+ * ...clean up v...
+ * qapi_free_FooList(l);
+ * </example>
+ *
+ * Similarly, typical output usage is:
+ *
+ * <example>
+ * Foo *f = ...obtain populated object...
+ * Error *err = NULL;
+ * Visitor *v;
+ *
+ * v = ...obtain output visitor...
+ * visit_type_Foo(v, NULL, &f, &err);
+ * if (err) {
+ * ...handle error...
+ * }
+ * ...clean up v...
+ * </example>
+ *
+ * When visiting a real QAPI struct, this file provides several
+ * helpers that rely on in-tree information to control the walk:
+ * visit_optional() for the 'has_member' field associated with
+ * optional 'member' in the C struct; and visit_next_list() for
+ * advancing through a FooList linked list. Similarly, the
+ * visit_is_input() helper makes it possible to write code that is
+ * visitor-agnostic everywhere except for cleanup. Only the generated
+ * visit_type functions need to use these helpers.
+ *
+ * It is also possible to use the visitors to do a virtual walk, where
+ * no actual QAPI struct is present. In this situation, decisions
+ * about what needs to be walked are made by the calling code, and
+ * structured visits are split between pairs of start and end methods
+ * (where the end method must be called if the start function
+ * succeeded, even if an intermediate visit encounters an error).
+ * Thus, a virtual walk corresponding to '{ "list": [1, 2] }' looks
+ * like:
+ *
+ * <example>
+ * Visitor *v;
+ * Error *err = NULL;
+ * int value;
+ *
+ * v = ...obtain visitor...
+ * visit_start_struct(v, NULL, NULL, 0, &err);
+ * if (err) {
+ * goto out;
+ * }
+ * visit_start_list(v, "list", NULL, 0, &err);
+ * if (err) {
+ * goto outobj;
+ * }
+ * value = 1;
+ * visit_type_int(v, NULL, &value, &err);
+ * if (err) {
+ * goto outlist;
+ * }
+ * value = 2;
+ * visit_type_int(v, NULL, &value, &err);
+ * if (err) {
+ * goto outlist;
+ * }
+ * outlist:
+ * visit_end_list(v);
+ * if (!err) {
+ * visit_check_struct(v, &err);
+ * }
+ * outobj:
+ * visit_end_struct(v);
+ * out:
+ * error_propagate(errp, err);
+ * ...clean up v...
+ * </example>
+ */
+
+/*** Useful types ***/
+
/* This struct is layout-compatible with all other *List structs
- * created by the qapi generator. It is used as a typical
+ * created by the QAPI generator. It is used as a typical
* singly-linked list. */
typedef struct GenericList {
struct GenericList *next;
@@ -25,35 +216,139 @@ typedef struct GenericList {
} GenericList;
/* This struct is layout-compatible with all Alternate types
- * created by the qapi generator. */
+ * created by the QAPI generator. */
typedef struct GenericAlternate {
QType type;
char padding[];
} GenericAlternate;
+/*** Visiting structures ***/
+
+/*
+ * Start visiting an object @obj (struct or union).
+ *
+ * @name expresses the relationship of this object to its parent
+ * container; see the general description of @name above.
+ *
+ * @obj must be non-NULL for a real walk, in which case @size
+ * determines how much memory an input visitor will allocate into
+ * *@obj. @obj may also be NULL for a virtual walk, in which case
+ * @size is ignored.
+ *
+ * @errp obeys typical error usage, and reports failures such as a
+ * member @name is not present, or present but not an object. On
+ * error, input visitors set *@obj to NULL.
+ *
+ * After visit_start_struct() succeeds, the caller may visit its
+ * members one after the other, passing the member's name and address
+ * within the struct. Finally, visit_end_struct() needs to be called
+ * to clean up, even if intermediate visits fail. See the examples
+ * above.
+ *
+ * FIXME Should this be named visit_start_object, since it is also
+ * used for QAPI unions, and maps to JSON objects?
+ */
void visit_start_struct(Visitor *v, const char *name, void **obj,
size_t size, Error **errp);
-void visit_end_struct(Visitor *v, Error **errp);
-void visit_start_list(Visitor *v, const char *name, Error **errp);
-GenericList *visit_next_list(Visitor *v, GenericList **list, size_t size);
+/*
+ * Prepare for completing an object visit.
+ *
+ * @errp obeys typical error usage, and reports failures such as
+ * unparsed keys remaining in the input stream.
+ *
+ * Should be called prior to visit_end_struct() if all other
+ * intermediate visit steps were successful, to allow the visitor one
+ * last chance to report errors. May be skipped on a cleanup path,
+ * where there is no need to check for further errors.
+ */
+void visit_check_struct(Visitor *v, Error **errp);
+
+/*
+ * Complete an object visit started earlier.
+ *
+ * Must be called after any successful use of visit_start_struct(),
+ * even if intermediate processing was skipped due to errors, to allow
+ * the backend to release any resources. Destroying the visitor early
+ * behaves as if this was implicitly called.
+ */
+void visit_end_struct(Visitor *v);
+
+
+/*** Visiting lists ***/
+
+/*
+ * Start visiting a list.
+ *
+ * @name expresses the relationship of this list to its parent
+ * container; see the general description of @name above.
+ *
+ * @list must be non-NULL for a real walk, in which case @size
+ * determines how much memory an input visitor will allocate into
+ * *@list (at least sizeof(GenericList)). Some visitors also allow
+ * @list to be NULL for a virtual walk, in which case @size is
+ * ignored.
+ *
+ * @errp obeys typical error usage, and reports failures such as a
+ * member @name is not present, or present but not a list. On error,
+ * input visitors set *@list to NULL.
+ *
+ * After visit_start_list() succeeds, the caller may visit its members
+ * one after the other. A real visit (where @obj is non-NULL) uses
+ * visit_next_list() for traversing the linked list, while a virtual
+ * visit (where @obj is NULL) uses other means. For each list
+ * element, call the appropriate visit_type_FOO() with name set to
+ * NULL and obj set to the address of the value member of the list
+ * element. Finally, visit_end_list() needs to be called to clean up,
+ * even if intermediate visits fail. See the examples above.
+ */
+void visit_start_list(Visitor *v, const char *name, GenericList **list,
+ size_t size, Error **errp);
+
+/*
+ * Iterate over a GenericList during a non-virtual list visit.
+ *
+ * @size represents the size of a linked list node (at least
+ * sizeof(GenericList)).
+ *
+ * @tail must not be NULL; on the first call, @tail is the value of
+ * *list after visit_start_list(), and on subsequent calls @tail must
+ * be the previously returned value. Should be called in a loop until
+ * a NULL return or error occurs; for each non-NULL return, the caller
+ * then calls the appropriate visit_type_*() for the element type of
+ * the list, with that function's name parameter set to NULL and obj
+ * set to the address of @tail->value.
+ */
+GenericList *visit_next_list(Visitor *v, GenericList *tail, size_t size);
+
+/*
+ * Complete a list visit started earlier.
+ *
+ * Must be called after any successful use of visit_start_list(), even
+ * if intermediate processing was skipped due to errors, to allow the
+ * backend to release any resources. Destroying the visitor early
+ * behaves as if this was implicitly called.
+ */
void visit_end_list(Visitor *v);
+
+/*** Visiting alternates ***/
+
/*
- * Start the visit of an alternate @obj with the given @size.
+ * Start the visit of an alternate @obj.
*
- * @name specifies the relationship to the containing struct (ignored
- * for a top level visit, the name of the key if this alternate is
- * part of an object, or NULL if this alternate is part of a list).
+ * @name expresses the relationship of this alternate to its parent
+ * container; see the general description of @name above.
*
- * @obj must not be NULL. Input visitors will allocate @obj and
- * determine the qtype of the next thing to be visited, stored in
- * (*@obj)->type. Other visitors will leave @obj unchanged.
+ * @obj must not be NULL. Input visitors use @size to determine how
+ * much memory to allocate into *@obj, then determine the qtype of the
+ * next thing to be visited, stored in (*@obj)->type. Other visitors
+ * will leave @obj unchanged.
*
* If @promote_int, treat integers as QTYPE_FLOAT.
*
- * If successful, this must be paired with visit_end_alternate(), even
- * if visiting the contents of the alternate fails.
+ * If successful, this must be paired with visit_end_alternate() to
+ * clean up, even if visiting the contents of the alternate fails.
*/
void visit_start_alternate(Visitor *v, const char *name,
GenericAlternate **obj, size_t size,
@@ -62,46 +357,202 @@ void visit_start_alternate(Visitor *v, const char *name,
/*
* Finish visiting an alternate type.
*
- * Must be called after a successful visit_start_alternate(), even if
- * an error occurred in the meantime.
+ * Must be called after any successful use of visit_start_alternate(),
+ * even if intermediate processing was skipped due to errors, to allow
+ * the backend to release any resources. Destroying the visitor early
+ * behaves as if this was implicitly called.
*
* TODO: Should all the visit_end_* interfaces take obj parameter, so
* that dealloc visitor need not track what was passed in visit_start?
*/
void visit_end_alternate(Visitor *v);
-/**
- * Check if an optional member @name of an object needs visiting.
- * For input visitors, set *@present according to whether the
- * corresponding visit_type_*() needs calling; for other visitors,
- * leave *@present unchanged. Return *@present for convenience.
+
+/*** Other helpers ***/
+
+/*
+ * Does optional struct member @name need visiting?
+ *
+ * @name must not be NULL. This function is only useful between
+ * visit_start_struct() and visit_end_struct(), since only objects
+ * have optional keys.
+ *
+ * @present points to the address of the optional member's has_ flag.
+ *
+ * Input visitors set *@present according to input; other visitors
+ * leave it unchanged. In either case, return *@present for
+ * convenience.
*/
bool visit_optional(Visitor *v, const char *name, bool *present);
+/*
+ * Visit an enum value.
+ *
+ * @name expresses the relationship of this enum to its parent
+ * container; see the general description of @name above.
+ *
+ * @obj must be non-NULL. Input visitors parse input and set *@obj to
+ * the enumeration value, leaving @obj unchanged on error; other
+ * visitors use *@obj but leave it unchanged.
+ *
+ * Currently, all input visitors parse text input, and all output
+ * visitors produce text output. The mapping between enumeration
+ * values and strings is done by the visitor core, using @strings; it
+ * should be the ENUM_lookup array from visit-types.h.
+ *
+ * May call visit_type_str() under the hood, and the enum visit may
+ * fail even if the corresponding string visit succeeded; this implies
+ * that visit_type_str() must have no unwelcome side effects.
+ */
void visit_type_enum(Visitor *v, const char *name, int *obj,
const char *const strings[], Error **errp);
+
+/*
+ * Check if visitor is an input visitor.
+ */
+bool visit_is_input(Visitor *v);
+
+/*** Visiting built-in types ***/
+
+/*
+ * Visit an integer value.
+ *
+ * @name expresses the relationship of this integer to its parent
+ * container; see the general description of @name above.
+ *
+ * @obj must be non-NULL. Input visitors set *@obj to the value;
+ * other visitors will leave *@obj unchanged.
+ */
void visit_type_int(Visitor *v, const char *name, int64_t *obj, Error **errp);
+
+/*
+ * Visit a uint8_t value.
+ * Like visit_type_int(), except clamps the value to uint8_t range.
+ */
void visit_type_uint8(Visitor *v, const char *name, uint8_t *obj,
Error **errp);
+
+/*
+ * Visit a uint16_t value.
+ * Like visit_type_int(), except clamps the value to uint16_t range.
+ */
void visit_type_uint16(Visitor *v, const char *name, uint16_t *obj,
Error **errp);
+
+/*
+ * Visit a uint32_t value.
+ * Like visit_type_int(), except clamps the value to uint32_t range.
+ */
void visit_type_uint32(Visitor *v, const char *name, uint32_t *obj,
Error **errp);
+
+/*
+ * Visit a uint64_t value.
+ * Like visit_type_int(), except clamps the value to uint64_t range,
+ * that is, ensures it is unsigned.
+ */
void visit_type_uint64(Visitor *v, const char *name, uint64_t *obj,
Error **errp);
+
+/*
+ * Visit an int8_t value.
+ * Like visit_type_int(), except clamps the value to int8_t range.
+ */
void visit_type_int8(Visitor *v, const char *name, int8_t *obj, Error **errp);
+
+/*
+ * Visit an int16_t value.
+ * Like visit_type_int(), except clamps the value to int16_t range.
+ */
void visit_type_int16(Visitor *v, const char *name, int16_t *obj,
Error **errp);
+
+/*
+ * Visit an int32_t value.
+ * Like visit_type_int(), except clamps the value to int32_t range.
+ */
void visit_type_int32(Visitor *v, const char *name, int32_t *obj,
Error **errp);
+
+/*
+ * Visit an int64_t value.
+ * Identical to visit_type_int().
+ */
void visit_type_int64(Visitor *v, const char *name, int64_t *obj,
Error **errp);
+
+/*
+ * Visit a uint64_t value.
+ * Like visit_type_uint64(), except that some visitors may choose to
+ * recognize additional syntax, such as suffixes for easily scaling
+ * values.
+ */
void visit_type_size(Visitor *v, const char *name, uint64_t *obj,
Error **errp);
+
+/*
+ * Visit a boolean value.
+ *
+ * @name expresses the relationship of this boolean to its parent
+ * container; see the general description of @name above.
+ *
+ * @obj must be non-NULL. Input visitors set *@obj to the value;
+ * other visitors will leave *@obj unchanged.
+ */
void visit_type_bool(Visitor *v, const char *name, bool *obj, Error **errp);
+
+/*
+ * Visit a string value.
+ *
+ * @name expresses the relationship of this string to its parent
+ * container; see the general description of @name above.
+ *
+ * @obj must be non-NULL. Input visitors set *@obj to the value
+ * (never NULL). Other visitors leave *@obj unchanged, and commonly
+ * treat NULL like "".
+ *
+ * It is safe to cast away const when preparing a (const char *) value
+ * into @obj for use by an output visitor.
+ *
+ * FIXME: Callers that try to output NULL *obj should not be allowed.
+ */
void visit_type_str(Visitor *v, const char *name, char **obj, Error **errp);
+
+/*
+ * Visit a number (i.e. double) value.
+ *
+ * @name expresses the relationship of this number to its parent
+ * container; see the general description of @name above.
+ *
+ * @obj must be non-NULL. Input visitors set *@obj to the value;
+ * other visitors will leave *@obj unchanged. Visitors should
+ * document if infinity or NaN are not permitted.
+ */
void visit_type_number(Visitor *v, const char *name, double *obj,
Error **errp);
+
+/*
+ * Visit an arbitrary value.
+ *
+ * @name expresses the relationship of this value to its parent
+ * container; see the general description of @name above.
+ *
+ * @obj must be non-NULL. Input visitors set *@obj to the value;
+ * other visitors will leave *@obj unchanged. *@obj must be non-NULL
+ * for output visitors.
+ */
void visit_type_any(Visitor *v, const char *name, QObject **obj, Error **errp);
+/*
+ * Visit a JSON null value.
+ *
+ * @name expresses the relationship of the null value to its parent
+ * container; see the general description of @name above.
+ *
+ * Unlike all other visit_type_* functions, no obj parameter is
+ * needed; rather, this is a witness that an explicit null value is
+ * expected rather than any other type.
+ */
+void visit_type_null(Visitor *v, const char *name, Error **errp);
+
#endif
diff --git a/include/qemu/fifo32.h b/include/qemu/fifo32.h
new file mode 100644
index 0000000000..2e5a0ccddf
--- /dev/null
+++ b/include/qemu/fifo32.h
@@ -0,0 +1,191 @@
+/*
+ * Generic FIFO32 component, based on FIFO8.
+ *
+ * Copyright (c) 2016 Jean-Christophe Dubois
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef FIFO32_H
+#define FIFO32_H
+
+#include "qemu/osdep.h"
+#include "qemu/fifo8.h"
+
+typedef struct {
+ Fifo8 fifo;
+} Fifo32;
+
+/**
+ * fifo32_create:
+ * @fifo: struct Fifo32 to initialise with new FIFO
+ * @capacity: capacity of the newly created FIFO expressed in 32 bit words
+ *
+ * Create a FIFO of the specified size. Clients should call fifo32_destroy()
+ * when finished using the fifo. The FIFO is initially empty.
+ */
+
+static inline void fifo32_create(Fifo32 *fifo, uint32_t capacity)
+{
+ fifo8_create(&fifo->fifo, capacity * sizeof(uint32_t));
+}
+
+/**
+ * fifo32_destroy:
+ * @fifo: FIFO to cleanup
+ *
+ * Cleanup a FIFO created with fifo32_create(). Frees memory created for FIFO
+ * storage. The FIFO is no longer usable after this has been called.
+ */
+
+static inline void fifo32_destroy(Fifo32 *fifo)
+{
+ fifo8_destroy(&fifo->fifo);
+}
+
+/**
+ * fifo32_num_free:
+ * @fifo: FIFO to check
+ *
+ * Return the number of free uint32_t slots in the FIFO.
+ *
+ * Returns: Number of free 32 bit words.
+ */
+
+static inline uint32_t fifo32_num_free(Fifo32 *fifo)
+{
+ return DIV_ROUND_UP(fifo8_num_free(&fifo->fifo), sizeof(uint32_t));
+}
+
+/**
+ * fifo32_num_used:
+ * @fifo: FIFO to check
+ *
+ * Return the number of used uint32_t slots in the FIFO.
+ *
+ * Returns: Number of used 32 bit words.
+ */
+
+static inline uint32_t fifo32_num_used(Fifo32 *fifo)
+{
+ return DIV_ROUND_UP(fifo8_num_used(&fifo->fifo), sizeof(uint32_t));
+}
+
+/**
+ * fifo32_push:
+ * @fifo: FIFO to push to
+ * @data: 32 bits data word to push
+ *
+ * Push a 32 bits data word to the FIFO. Behaviour is undefined if the FIFO
+ * is full. Clients are responsible for checking for fullness using
+ * fifo32_is_full().
+ */
+
+static inline void fifo32_push(Fifo32 *fifo, uint32_t data)
+{
+ int i;
+
+ for (i = 0; i < sizeof(data); i++) {
+ fifo8_push(&fifo->fifo, data & 0xff);
+ data >>= 8;
+ }
+}
+
+/**
+ * fifo32_push_all:
+ * @fifo: FIFO to push to
+ * @data: data to push
+ * @size: number of 32 bit words to push
+ *
+ * Push a 32 bit word array to the FIFO. Behaviour is undefined if the FIFO
+ * is full. Clients are responsible for checking the space left in the FIFO
+ * using fifo32_num_free().
+ */
+
+static inline void fifo32_push_all(Fifo32 *fifo, const uint32_t *data,
+ uint32_t num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ fifo32_push(fifo, data[i]);
+ }
+}
+
+/**
+ * fifo32_pop:
+ * @fifo: fifo to pop from
+ *
+ * Pop a 32 bits data word from the FIFO. Behaviour is undefined if the FIFO
+ * is empty. Clients are responsible for checking for emptiness using
+ * fifo32_is_empty().
+ *
+ * Returns: The popped 32 bits data word.
+ */
+
+static inline uint32_t fifo32_pop(Fifo32 *fifo)
+{
+ uint32_t ret = 0;
+ int i;
+
+ for (i = 0; i < sizeof(uint32_t); i++) {
+ ret |= (fifo8_pop(&fifo->fifo) << (i * 8));
+ }
+
+ return ret;
+}
+
+/**
+ * There is no fifo32_pop_buf() because the data is not stored in the buffer
+ * as a set of native-order words.
+ */
+
+/**
+ * fifo32_reset:
+ * @fifo: FIFO to reset
+ *
+ * Reset a FIFO. All data is discarded and the FIFO is emptied.
+ */
+
+static inline void fifo32_reset(Fifo32 *fifo)
+{
+ fifo8_reset(&fifo->fifo);
+}
+
+/**
+ * fifo32_is_empty:
+ * @fifo: FIFO to check
+ *
+ * Check if a FIFO is empty.
+ *
+ * Returns: True if the fifo is empty, false otherwise.
+ */
+
+static inline bool fifo32_is_empty(Fifo32 *fifo)
+{
+ return fifo8_is_empty(&fifo->fifo);
+}
+
+/**
+ * fifo32_is_full:
+ * @fifo: FIFO to check
+ *
+ * Check if a FIFO is full.
+ *
+ * Returns: True if the fifo is full, false otherwise.
+ */
+
+static inline bool fifo32_is_full(Fifo32 *fifo)
+{
+ return fifo8_num_free(&fifo->fifo) < sizeof(uint32_t);
+}
+
+#define VMSTATE_FIFO32(_field, _state) VMSTATE_FIFO8(_field.fifo, _state)
+
+#endif /* FIFO32_H */
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
index 408783f532..1e3221cbec 100644
--- a/include/qemu/osdep.h
+++ b/include/qemu/osdep.h
@@ -158,6 +158,20 @@ extern int daemon(int, int);
/* Round number up to multiple */
#define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m))
+/* Check if n is a multiple of m */
+#define QEMU_IS_ALIGNED(n, m) (((n) % (m)) == 0)
+
+/* n-byte align pointer down */
+#define QEMU_ALIGN_PTR_DOWN(p, n) \
+ ((typeof(p))QEMU_ALIGN_DOWN((uintptr_t)(p), (n)))
+
+/* n-byte align pointer up */
+#define QEMU_ALIGN_PTR_UP(p, n) \
+ ((typeof(p))QEMU_ALIGN_UP((uintptr_t)(p), (n)))
+
+/* Check if pointer p is n-bytes aligned */
+#define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n))
+
#ifndef ROUND_UP
#define ROUND_UP(n,d) (((n) + (d) - 1) & -(d))
#endif
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index b7a10f791a..4349c465c5 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -238,6 +238,7 @@ struct kvm_run;
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
* @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
* CPU and return to its top level loop.
+ * @tb_flushed: Indicates the translation buffer has been flushed.
* @singlestep_enabled: Flags for single-stepping.
* @icount_extra: Instructions until next timer event.
* @icount_decr: Number of cycles left, with interrupt flag in high bit.
@@ -252,7 +253,6 @@ struct kvm_run;
* @as: Pointer to the first AddressSpace, for the convenience of targets which
* only have a single AddressSpace
* @env_ptr: Pointer to subclass-specific CPUArchState field.
- * @current_tb: Currently executing TB.
* @gdb_regs: Additional GDB registers.
* @gdb_num_regs: Number of total registers accessible to GDB.
* @gdb_num_g_regs: Number of registers in GDB 'g' packets.
@@ -289,6 +289,7 @@ struct CPUState {
bool stopped;
bool crash_occurred;
bool exit_request;
+ bool tb_flushed;
uint32_t interrupt_request;
int singlestep_enabled;
int64_t icount_extra;
@@ -303,7 +304,6 @@ struct CPUState {
MemoryRegion *memory;
void *env_ptr; /* CPUArchState */
- struct TranslationBlock *current_tb;
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
struct GDBRegisterState *gdb_regs;
int gdb_num_regs;
diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h
index c62b6fe96d..26736ed84e 100644
--- a/include/sysemu/block-backend.h
+++ b/include/sysemu/block-backend.h
@@ -1,7 +1,7 @@
/*
* QEMU Block backends
*
- * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2014-2016 Red Hat, Inc.
*
* Authors:
* Markus Armbruster <armbru@redhat.com>,
@@ -90,28 +90,25 @@ void blk_attach_dev_nofail(BlockBackend *blk, void *dev);
void blk_detach_dev(BlockBackend *blk, void *dev);
void *blk_get_attached_dev(BlockBackend *blk);
void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque);
-int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
- int nb_sectors);
-int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
- int nb_sectors);
-int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
- int nb_sectors);
-int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
- int nb_sectors, BdrvRequestFlags flags);
-BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
- int nb_sectors, BdrvRequestFlags flags,
+int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
+ int count);
+int blk_write_zeroes(BlockBackend *blk, int64_t offset,
+ int count, BdrvRequestFlags flags);
+BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t offset,
+ int count, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque);
int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count);
-int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count);
+int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
+ BdrvRequestFlags flags);
int64_t blk_getlength(BlockBackend *blk);
void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr);
int64_t blk_nb_sectors(BlockBackend *blk);
-BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
- QEMUIOVector *iov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque);
-BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
- QEMUIOVector *iov, int nb_sectors,
+BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
+ QEMUIOVector *qiov, BdrvRequestFlags flags,
+ BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *blk_aio_discard(BlockBackend *blk,
@@ -178,8 +175,8 @@ int blk_get_open_flags_from_root_state(BlockBackend *blk);
void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
BlockCompletionFunc *cb, void *opaque);
-int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
- int nb_sectors, BdrvRequestFlags flags);
+int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t offset,
+ int count, BdrvRequestFlags flags);
int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
const uint8_t *buf, int nb_sectors);
int blk_truncate(BlockBackend *blk, int64_t offset);
diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h
index b0fbb9bb35..0f7cd4d3ce 100644
--- a/include/sysemu/dma.h
+++ b/include/sysemu/dma.h
@@ -197,8 +197,8 @@ void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
void qemu_sglist_destroy(QEMUSGList *qsg);
#endif
-typedef BlockAIOCB *DMAIOFunc(BlockBackend *blk, int64_t sector_num,
- QEMUIOVector *iov, int nb_sectors,
+typedef BlockAIOCB *DMAIOFunc(BlockBackend *blk, int64_t offset,
+ QEMUIOVector *iov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *dma_blk_io(BlockBackend *blk,
diff --git a/nbd/server.c b/nbd/server.c
index 2184c64fef..fa862cd622 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -1115,7 +1115,7 @@ static void nbd_trip(void *opaque)
TRACE("Writing to device");
ret = blk_pwrite(exp->blk, request.from + exp->dev_offset,
- req->data, request.len);
+ req->data, request.len, 0);
if (ret < 0) {
LOG("writing to file failed");
reply.error = -ret;
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 1d09079cc1..98a20d22f4 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -2556,3 +2556,35 @@
##
{ 'command': 'block-set-write-threshold',
'data': { 'node-name': 'str', 'write-threshold': 'uint64' } }
+
+##
+# @x-blockdev-change
+#
+# Dynamically reconfigure the block driver state graph. It can be used
+# to add, remove, insert or replace a graph node. Currently only the
+# Quorum driver implements this feature to add or remove its child. This
+# is useful to fix a broken quorum child.
+#
+# If @node is specified, it will be inserted under @parent. @child
+# may not be specified in this case. If both @parent and @child are
+# specified but @node is not, @child will be detached from @parent.
+#
+# @parent: the id or name of the parent node.
+#
+# @child: #optional the name of a child under the given parent node.
+#
+# @node: #optional the name of the node that will be added.
+#
+# Note: this command is experimental, and its API is not stable. It
+# does not support all kinds of operations, all kinds of children, nor
+# all block drivers.
+#
+# Warning: The data in a new quorum child MUST be consistent with that of
+# the rest of the array.
+#
+# Since: 2.7
+##
+{ 'command': 'x-blockdev-change',
+ 'data' : { 'parent': 'str',
+ '*child': 'str',
+ '*node': 'str' } }
diff --git a/qapi/opts-visitor.c b/qapi/opts-visitor.c
index 602f2609cc..4cf1cf885b 100644
--- a/qapi/opts-visitor.c
+++ b/qapi/opts-visitor.c
@@ -23,9 +23,8 @@
enum ListMode
{
LM_NONE, /* not traversing a list of repeated options */
- LM_STARTED, /* opts_start_list() succeeded */
- LM_IN_PROGRESS, /* opts_next_list() has been called.
+ LM_IN_PROGRESS, /* opts_next_list() ready to be called.
*
* Generating the next list link will consume the most
* recently parsed QemuOpt instance of the repeated
@@ -133,7 +132,7 @@ opts_start_struct(Visitor *v, const char *name, void **obj,
const QemuOpt *opt;
if (obj) {
- *obj = g_malloc0(size > 0 ? size : 1);
+ *obj = g_malloc0(size);
}
if (ov->depth++ > 0) {
return;
@@ -159,13 +158,13 @@ opts_start_struct(Visitor *v, const char *name, void **obj,
static void
-opts_end_struct(Visitor *v, Error **errp)
+opts_check_struct(Visitor *v, Error **errp)
{
OptsVisitor *ov = to_ov(v);
GHashTableIter iter;
GQueue *any;
- if (--ov->depth > 0) {
+ if (ov->depth > 0) {
return;
}
@@ -177,6 +176,18 @@ opts_end_struct(Visitor *v, Error **errp)
first = g_queue_peek_head(any);
error_setg(errp, QERR_INVALID_PARAMETER, first->name);
}
+}
+
+
+static void
+opts_end_struct(Visitor *v)
+{
+ OptsVisitor *ov = to_ov(v);
+
+ if (--ov->depth > 0) {
+ return;
+ }
+
g_hash_table_destroy(ov->unprocessed_opts);
ov->unprocessed_opts = NULL;
if (ov->fake_id_opt) {
@@ -202,35 +213,33 @@ lookup_distinct(const OptsVisitor *ov, const char *name, Error **errp)
static void
-opts_start_list(Visitor *v, const char *name, Error **errp)
+opts_start_list(Visitor *v, const char *name, GenericList **list, size_t size,
+ Error **errp)
{
OptsVisitor *ov = to_ov(v);
/* we can't traverse a list in a list */
assert(ov->list_mode == LM_NONE);
+ /* we don't support visits without a list */
+ assert(list);
ov->repeated_opts = lookup_distinct(ov, name, errp);
- if (ov->repeated_opts != NULL) {
- ov->list_mode = LM_STARTED;
+ if (ov->repeated_opts) {
+ ov->list_mode = LM_IN_PROGRESS;
+ *list = g_malloc0(size);
+ } else {
+ *list = NULL;
}
}
static GenericList *
-opts_next_list(Visitor *v, GenericList **list, size_t size)
+opts_next_list(Visitor *v, GenericList *tail, size_t size)
{
OptsVisitor *ov = to_ov(v);
- GenericList **link;
switch (ov->list_mode) {
- case LM_STARTED:
- ov->list_mode = LM_IN_PROGRESS;
- link = list;
- break;
-
case LM_SIGNED_INTERVAL:
case LM_UNSIGNED_INTERVAL:
- link = &(*list)->next;
-
if (ov->list_mode == LM_SIGNED_INTERVAL) {
if (ov->range_next.s < ov->range_limit.s) {
++ov->range_next.s;
@@ -251,7 +260,6 @@ opts_next_list(Visitor *v, GenericList **list, size_t size)
g_hash_table_remove(ov->unprocessed_opts, opt->name);
return NULL;
}
- link = &(*list)->next;
break;
}
@@ -259,8 +267,8 @@ opts_next_list(Visitor *v, GenericList **list, size_t size)
abort();
}
- *link = g_malloc0(size);
- return *link;
+ tail->next = g_malloc0(size);
+ return tail->next;
}
@@ -269,8 +277,7 @@ opts_end_list(Visitor *v)
{
OptsVisitor *ov = to_ov(v);
- assert(ov->list_mode == LM_STARTED ||
- ov->list_mode == LM_IN_PROGRESS ||
+ assert(ov->list_mode == LM_IN_PROGRESS ||
ov->list_mode == LM_SIGNED_INTERVAL ||
ov->list_mode == LM_UNSIGNED_INTERVAL);
ov->repeated_opts = NULL;
@@ -314,9 +321,15 @@ opts_type_str(Visitor *v, const char *name, char **obj, Error **errp)
opt = lookup_scalar(ov, name, errp);
if (!opt) {
+ *obj = NULL;
return;
}
*obj = g_strdup(opt->str ? opt->str : "");
+ /* Note that we consume a string even if this is called as part of
+ * an enum visit that later fails because the string is not a
+ * valid enum value; this is harmless because tracking what gets
+ * consumed only matters to visit_end_struct() as the final error
+ * check if there were no other failures during the visit. */
processed(ov, name);
}
@@ -507,23 +520,16 @@ opts_visitor_new(const QemuOpts *opts)
ov = g_malloc0(sizeof *ov);
+ ov->visitor.type = VISITOR_INPUT;
+
ov->visitor.start_struct = &opts_start_struct;
+ ov->visitor.check_struct = &opts_check_struct;
ov->visitor.end_struct = &opts_end_struct;
ov->visitor.start_list = &opts_start_list;
ov->visitor.next_list = &opts_next_list;
ov->visitor.end_list = &opts_end_list;
- /* input_type_enum() covers both "normal" enums and union discriminators.
- * The union discriminator field is always generated as "type"; it should
- * match the "type" QemuOpt child of any QemuOpts.
- *
- * input_type_enum() will remove the looked-up key from the
- * "unprocessed_opts" hash even if the lookup fails, because the removal is
- * done earlier in opts_type_str(). This should be harmless.
- */
- ov->visitor.type_enum = &input_type_enum;
-
ov->visitor.type_int64 = &opts_type_int64;
ov->visitor.type_uint64 = &opts_type_uint64;
ov->visitor.type_size = &opts_type_size;
diff --git a/qapi/qapi-dealloc-visitor.c b/qapi/qapi-dealloc-visitor.c
index 69221794ec..cd68b55a1a 100644
--- a/qapi/qapi-dealloc-visitor.c
+++ b/qapi/qapi-dealloc-visitor.c
@@ -22,7 +22,6 @@
typedef struct StackEntry
{
void *value;
- bool is_list_head;
QTAILQ_ENTRY(StackEntry) node;
} StackEntry;
@@ -43,10 +42,6 @@ static void qapi_dealloc_push(QapiDeallocVisitor *qov, void *value)
e->value = value;
- /* see if we're just pushing a list head tracker */
- if (value == NULL) {
- e->is_list_head = true;
- }
QTAILQ_INSERT_HEAD(&qov->stack, e, node);
}
@@ -67,7 +62,7 @@ static void qapi_dealloc_start_struct(Visitor *v, const char *name, void **obj,
qapi_dealloc_push(qov, obj);
}
-static void qapi_dealloc_end_struct(Visitor *v, Error **errp)
+static void qapi_dealloc_end_struct(Visitor *v)
{
QapiDeallocVisitor *qov = to_qov(v);
void **obj = qapi_dealloc_pop(qov);
@@ -93,38 +88,22 @@ static void qapi_dealloc_end_alternate(Visitor *v)
}
}
-static void qapi_dealloc_start_list(Visitor *v, const char *name, Error **errp)
+static void qapi_dealloc_start_list(Visitor *v, const char *name,
+ GenericList **list, size_t size,
+ Error **errp)
{
- QapiDeallocVisitor *qov = to_qov(v);
- qapi_dealloc_push(qov, NULL);
}
-static GenericList *qapi_dealloc_next_list(Visitor *v, GenericList **listp,
+static GenericList *qapi_dealloc_next_list(Visitor *v, GenericList *tail,
size_t size)
{
- GenericList *list = *listp;
- QapiDeallocVisitor *qov = to_qov(v);
- StackEntry *e = QTAILQ_FIRST(&qov->stack);
-
- if (e && e->is_list_head) {
- e->is_list_head = false;
- return list;
- }
-
- if (list) {
- list = list->next;
- g_free(*listp);
- return list;
- }
-
- return NULL;
+ GenericList *next = tail->next;
+ g_free(tail);
+ return next;
}
static void qapi_dealloc_end_list(Visitor *v)
{
- QapiDeallocVisitor *qov = to_qov(v);
- void *obj = qapi_dealloc_pop(qov);
- assert(obj == NULL); /* should've been list head tracker with no payload */
}
static void qapi_dealloc_type_str(Visitor *v, const char *name, char **obj,
@@ -163,8 +142,7 @@ static void qapi_dealloc_type_anything(Visitor *v, const char *name,
}
}
-static void qapi_dealloc_type_enum(Visitor *v, const char *name, int *obj,
- const char * const strings[], Error **errp)
+static void qapi_dealloc_type_null(Visitor *v, const char *name, Error **errp)
{
}
@@ -184,6 +162,7 @@ QapiDeallocVisitor *qapi_dealloc_visitor_new(void)
v = g_malloc0(sizeof(*v));
+ v->visitor.type = VISITOR_DEALLOC;
v->visitor.start_struct = qapi_dealloc_start_struct;
v->visitor.end_struct = qapi_dealloc_end_struct;
v->visitor.start_alternate = qapi_dealloc_start_alternate;
@@ -191,13 +170,13 @@ QapiDeallocVisitor *qapi_dealloc_visitor_new(void)
v->visitor.start_list = qapi_dealloc_start_list;
v->visitor.next_list = qapi_dealloc_next_list;
v->visitor.end_list = qapi_dealloc_end_list;
- v->visitor.type_enum = qapi_dealloc_type_enum;
v->visitor.type_int64 = qapi_dealloc_type_int64;
v->visitor.type_uint64 = qapi_dealloc_type_uint64;
v->visitor.type_bool = qapi_dealloc_type_bool;
v->visitor.type_str = qapi_dealloc_type_str;
v->visitor.type_number = qapi_dealloc_type_number;
v->visitor.type_any = qapi_dealloc_type_anything;
+ v->visitor.type_null = qapi_dealloc_type_null;
QTAILQ_INIT(&v->stack);
diff --git a/qapi/qapi-visit-core.c b/qapi/qapi-visit-core.c
index fa680c9991..eada4676a2 100644
--- a/qapi/qapi-visit-core.c
+++ b/qapi/qapi-visit-core.c
@@ -23,23 +23,48 @@
void visit_start_struct(Visitor *v, const char *name, void **obj,
size_t size, Error **errp)
{
- v->start_struct(v, name, obj, size, errp);
+ Error *err = NULL;
+
+ if (obj) {
+ assert(size);
+ assert(v->type != VISITOR_OUTPUT || *obj);
+ }
+ v->start_struct(v, name, obj, size, &err);
+ if (obj && v->type == VISITOR_INPUT) {
+ assert(!err != !*obj);
+ }
+ error_propagate(errp, err);
}
-void visit_end_struct(Visitor *v, Error **errp)
+void visit_check_struct(Visitor *v, Error **errp)
{
- v->end_struct(v, errp);
+ if (v->check_struct) {
+ v->check_struct(v, errp);
+ }
}
-void visit_start_list(Visitor *v, const char *name, Error **errp)
+void visit_end_struct(Visitor *v)
{
- v->start_list(v, name, errp);
+ v->end_struct(v);
}
-GenericList *visit_next_list(Visitor *v, GenericList **list, size_t size)
+void visit_start_list(Visitor *v, const char *name, GenericList **list,
+ size_t size, Error **errp)
{
- assert(list && size >= sizeof(GenericList));
- return v->next_list(v, list, size);
+ Error *err = NULL;
+
+ assert(!list || size >= sizeof(GenericList));
+ v->start_list(v, name, list, size, &err);
+ if (list && v->type == VISITOR_INPUT) {
+ assert(!(err && *list));
+ }
+ error_propagate(errp, err);
+}
+
+GenericList *visit_next_list(Visitor *v, GenericList *tail, size_t size)
+{
+ assert(tail && size >= sizeof(GenericList));
+ return v->next_list(v, tail, size);
}
void visit_end_list(Visitor *v)
@@ -51,10 +76,17 @@ void visit_start_alternate(Visitor *v, const char *name,
GenericAlternate **obj, size_t size,
bool promote_int, Error **errp)
{
+ Error *err = NULL;
+
assert(obj && size >= sizeof(GenericAlternate));
+ assert(v->type != VISITOR_OUTPUT || *obj);
if (v->start_alternate) {
- v->start_alternate(v, name, obj, size, promote_int, errp);
+ v->start_alternate(v, name, obj, size, promote_int, &err);
}
+ if (v->type == VISITOR_INPUT) {
+ assert(v->start_alternate && !err != !*obj);
+ }
+ error_propagate(errp, err);
}
void visit_end_alternate(Visitor *v)
@@ -72,14 +104,14 @@ bool visit_optional(Visitor *v, const char *name, bool *present)
return *present;
}
-void visit_type_enum(Visitor *v, const char *name, int *obj,
- const char *const strings[], Error **errp)
+bool visit_is_input(Visitor *v)
{
- v->type_enum(v, name, obj, strings, errp);
+ return v->type == VISITOR_INPUT;
}
void visit_type_int(Visitor *v, const char *name, int64_t *obj, Error **errp)
{
+ assert(obj);
v->type_int64(v, name, obj, errp);
}
@@ -127,6 +159,7 @@ void visit_type_uint32(Visitor *v, const char *name, uint32_t *obj,
void visit_type_uint64(Visitor *v, const char *name, uint64_t *obj,
Error **errp)
{
+ assert(obj);
v->type_uint64(v, name, obj, errp);
}
@@ -174,12 +207,14 @@ void visit_type_int32(Visitor *v, const char *name, int32_t *obj,
void visit_type_int64(Visitor *v, const char *name, int64_t *obj,
Error **errp)
{
+ assert(obj);
v->type_int64(v, name, obj, errp);
}
void visit_type_size(Visitor *v, const char *name, uint64_t *obj,
Error **errp)
{
+ assert(obj);
if (v->type_size) {
v->type_size(v, name, obj, errp);
} else {
@@ -189,33 +224,58 @@ void visit_type_size(Visitor *v, const char *name, uint64_t *obj,
void visit_type_bool(Visitor *v, const char *name, bool *obj, Error **errp)
{
+ assert(obj);
v->type_bool(v, name, obj, errp);
}
void visit_type_str(Visitor *v, const char *name, char **obj, Error **errp)
{
- v->type_str(v, name, obj, errp);
+ Error *err = NULL;
+
+ assert(obj);
+ /* TODO: Fix callers to not pass NULL when they mean "", so that we
+ * can enable:
+ assert(v->type != VISITOR_OUTPUT || *obj);
+ */
+ v->type_str(v, name, obj, &err);
+ if (v->type == VISITOR_INPUT) {
+ assert(!err != !*obj);
+ }
+ error_propagate(errp, err);
}
void visit_type_number(Visitor *v, const char *name, double *obj,
Error **errp)
{
+ assert(obj);
v->type_number(v, name, obj, errp);
}
void visit_type_any(Visitor *v, const char *name, QObject **obj, Error **errp)
{
- v->type_any(v, name, obj, errp);
+ Error *err = NULL;
+
+ assert(obj);
+ assert(v->type != VISITOR_OUTPUT || *obj);
+ v->type_any(v, name, obj, &err);
+ if (v->type == VISITOR_INPUT) {
+ assert(!err != !*obj);
+ }
+ error_propagate(errp, err);
+}
+
+void visit_type_null(Visitor *v, const char *name, Error **errp)
+{
+ v->type_null(v, name, errp);
}
-void output_type_enum(Visitor *v, const char *name, int *obj,
- const char *const strings[], Error **errp)
+static void output_type_enum(Visitor *v, const char *name, int *obj,
+ const char *const strings[], Error **errp)
{
int i = 0;
int value = *obj;
char *enum_str;
- assert(strings);
while (strings[i++] != NULL);
if (value < 0 || value >= i - 1) {
error_setg(errp, QERR_INVALID_PARAMETER, name ? name : "null");
@@ -226,15 +286,13 @@ void output_type_enum(Visitor *v, const char *name, int *obj,
visit_type_str(v, name, &enum_str, errp);
}
-void input_type_enum(Visitor *v, const char *name, int *obj,
- const char *const strings[], Error **errp)
+static void input_type_enum(Visitor *v, const char *name, int *obj,
+ const char *const strings[], Error **errp)
{
Error *local_err = NULL;
int64_t value = 0;
char *enum_str;
- assert(strings);
-
visit_type_str(v, name, &enum_str, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -257,3 +315,14 @@ void input_type_enum(Visitor *v, const char *name, int *obj,
g_free(enum_str);
*obj = value;
}
+
+void visit_type_enum(Visitor *v, const char *name, int *obj,
+ const char *const strings[], Error **errp)
+{
+ assert(obj && strings);
+ if (v->type == VISITOR_INPUT) {
+ input_type_enum(v, name, obj, strings, errp);
+ } else if (v->type == VISITOR_OUTPUT) {
+ output_type_enum(v, name, obj, strings, errp);
+ }
+}
diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c
index 510a1aead8..08faf853ac 100644
--- a/qapi/qmp-dispatch.c
+++ b/qapi/qmp-dispatch.c
@@ -94,17 +94,13 @@ static QObject *do_qmp_dispatch(QObject *request, Error **errp)
QINCREF(args);
}
- switch (cmd->type) {
- case QCT_NORMAL:
- cmd->fn(args, &ret, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- } else if (cmd->options & QCO_NO_SUCCESS_RESP) {
- g_assert(!ret);
- } else if (!ret) {
- ret = QOBJECT(qdict_new());
- }
- break;
+ cmd->fn(args, &ret, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ } else if (cmd->options & QCO_NO_SUCCESS_RESP) {
+ g_assert(!ret);
+ } else if (!ret) {
+ ret = QOBJECT(qdict_new());
}
QDECREF(args);
diff --git a/qapi/qmp-input-visitor.c b/qapi/qmp-input-visitor.c
index 7cd1b777a0..aea90a1378 100644
--- a/qapi/qmp-input-visitor.c
+++ b/qapi/qmp-input-visitor.c
@@ -25,16 +25,25 @@
typedef struct StackObject
{
- QObject *obj;
- const QListEntry *entry;
- GHashTable *h;
+ QObject *obj; /* Object being visited */
+
+ GHashTable *h; /* If obj is dict: unvisited keys */
+ const QListEntry *entry; /* If obj is list: unvisited tail */
} StackObject;
struct QmpInputVisitor
{
Visitor visitor;
+
+ /* Root of visit at visitor creation. */
+ QObject *root;
+
+ /* Stack of objects being visited (all entries will be either
+ * QDict or QList). */
StackObject stack[QIV_STACK_SIZE];
int nb_stack;
+
+ /* True to reject parse in visit_end_struct() if unvisited keys remain. */
bool strict;
};
@@ -47,20 +56,37 @@ static QObject *qmp_input_get_object(QmpInputVisitor *qiv,
const char *name,
bool consume)
{
- QObject *qobj = qiv->stack[qiv->nb_stack - 1].obj;
+ StackObject *tos;
+ QObject *qobj;
+ QObject *ret;
- if (qobj) {
- if (name && qobject_type(qobj) == QTYPE_QDICT) {
- if (qiv->stack[qiv->nb_stack - 1].h && consume) {
- g_hash_table_remove(qiv->stack[qiv->nb_stack - 1].h, name);
- }
- return qdict_get(qobject_to_qdict(qobj), name);
- } else if (qiv->stack[qiv->nb_stack - 1].entry) {
- return qlist_entry_obj(qiv->stack[qiv->nb_stack - 1].entry);
+ if (!qiv->nb_stack) {
+ /* Starting at root, name is ignored. */
+ return qiv->root;
+ }
+
+ /* We are in a container; find the next element. */
+ tos = &qiv->stack[qiv->nb_stack - 1];
+ qobj = tos->obj;
+ assert(qobj);
+
+ if (qobject_type(qobj) == QTYPE_QDICT) {
+ assert(name);
+ ret = qdict_get(qobject_to_qdict(qobj), name);
+ if (tos->h && consume && ret) {
+ bool removed = g_hash_table_remove(tos->h, name);
+ assert(removed);
+ }
+ } else {
+ assert(qobject_type(qobj) == QTYPE_QLIST);
+ assert(!name);
+ ret = qlist_entry_obj(tos->entry);
+ if (consume) {
+ tos->entry = qlist_next(tos->entry);
}
}
- return qobj;
+ return ret;
}
static void qdict_add_key(const char *key, QObject *obj, void *opaque)
@@ -69,35 +95,44 @@ static void qdict_add_key(const char *key, QObject *obj, void *opaque)
g_hash_table_insert(h, (gpointer) key, NULL);
}
-static void qmp_input_push(QmpInputVisitor *qiv, QObject *obj, Error **errp)
+static const QListEntry *qmp_input_push(QmpInputVisitor *qiv, QObject *obj,
+ Error **errp)
{
GHashTable *h;
+ StackObject *tos = &qiv->stack[qiv->nb_stack];
+ assert(obj);
if (qiv->nb_stack >= QIV_STACK_SIZE) {
error_setg(errp, "An internal buffer overran");
- return;
+ return NULL;
}
- qiv->stack[qiv->nb_stack].obj = obj;
- qiv->stack[qiv->nb_stack].entry = NULL;
- qiv->stack[qiv->nb_stack].h = NULL;
+ tos->obj = obj;
+ assert(!tos->h);
+ assert(!tos->entry);
if (qiv->strict && qobject_type(obj) == QTYPE_QDICT) {
h = g_hash_table_new(g_str_hash, g_str_equal);
qdict_iter(qobject_to_qdict(obj), qdict_add_key, h);
- qiv->stack[qiv->nb_stack].h = h;
+ tos->h = h;
+ } else if (qobject_type(obj) == QTYPE_QLIST) {
+ tos->entry = qlist_first(qobject_to_qlist(obj));
}
qiv->nb_stack++;
+ return tos->entry;
}
-static void qmp_input_pop(QmpInputVisitor *qiv, Error **errp)
+static void qmp_input_check_struct(Visitor *v, Error **errp)
{
+ QmpInputVisitor *qiv = to_qiv(v);
+ StackObject *tos = &qiv->stack[qiv->nb_stack - 1];
+
assert(qiv->nb_stack > 0);
if (qiv->strict) {
- GHashTable * const top_ht = qiv->stack[qiv->nb_stack - 1].h;
+ GHashTable *const top_ht = tos->h;
if (top_ht) {
GHashTableIter iter;
const char *key;
@@ -106,8 +141,23 @@ static void qmp_input_pop(QmpInputVisitor *qiv, Error **errp)
if (g_hash_table_iter_next(&iter, (void **)&key, NULL)) {
error_setg(errp, QERR_QMP_EXTRA_MEMBER, key);
}
+ }
+ }
+}
+
+static void qmp_input_pop(Visitor *v)
+{
+ QmpInputVisitor *qiv = to_qiv(v);
+ StackObject *tos = &qiv->stack[qiv->nb_stack - 1];
+
+ assert(qiv->nb_stack > 0);
+
+ if (qiv->strict) {
+ GHashTable * const top_ht = qiv->stack[qiv->nb_stack - 1].h;
+ if (top_ht) {
g_hash_table_unref(top_ht);
}
+ tos->h = NULL;
}
qiv->nb_stack--;
@@ -120,6 +170,9 @@ static void qmp_input_start_struct(Visitor *v, const char *name, void **obj,
QObject *qobj = qmp_input_get_object(qiv, name, true);
Error *err = NULL;
+ if (obj) {
+ *obj = NULL;
+ }
if (!qobj || qobject_type(qobj) != QTYPE_QDICT) {
error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null",
"QDict");
@@ -137,63 +190,46 @@ static void qmp_input_start_struct(Visitor *v, const char *name, void **obj,
}
}
-static void qmp_input_end_struct(Visitor *v, Error **errp)
-{
- QmpInputVisitor *qiv = to_qiv(v);
-
- qmp_input_pop(qiv, errp);
-}
-static void qmp_input_start_list(Visitor *v, const char *name, Error **errp)
+static void qmp_input_start_list(Visitor *v, const char *name,
+ GenericList **list, size_t size, Error **errp)
{
QmpInputVisitor *qiv = to_qiv(v);
QObject *qobj = qmp_input_get_object(qiv, name, true);
+ const QListEntry *entry;
if (!qobj || qobject_type(qobj) != QTYPE_QLIST) {
+ if (list) {
+ *list = NULL;
+ }
error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null",
"list");
return;
}
- qmp_input_push(qiv, qobj, errp);
+ entry = qmp_input_push(qiv, qobj, errp);
+ if (list) {
+ if (entry) {
+ *list = g_malloc0(size);
+ } else {
+ *list = NULL;
+ }
+ }
}
-static GenericList *qmp_input_next_list(Visitor *v, GenericList **list,
+static GenericList *qmp_input_next_list(Visitor *v, GenericList *tail,
size_t size)
{
QmpInputVisitor *qiv = to_qiv(v);
- GenericList *entry;
StackObject *so = &qiv->stack[qiv->nb_stack - 1];
- bool first;
- if (so->entry == NULL) {
- so->entry = qlist_first(qobject_to_qlist(so->obj));
- first = true;
- } else {
- so->entry = qlist_next(so->entry);
- first = false;
- }
-
- if (so->entry == NULL) {
+ if (!so->entry) {
return NULL;
}
-
- entry = g_malloc0(size);
- if (first) {
- *list = entry;
- } else {
- (*list)->next = entry;
- }
-
- return entry;
+ tail->next = g_malloc0(size);
+ return tail->next;
}
-static void qmp_input_end_list(Visitor *v)
-{
- QmpInputVisitor *qiv = to_qiv(v);
-
- qmp_input_pop(qiv, &error_abort);
-}
static void qmp_input_start_alternate(Visitor *v, const char *name,
GenericAlternate **obj, size_t size,
@@ -267,6 +303,7 @@ static void qmp_input_type_str(Visitor *v, const char *name, char **obj,
QString *qstr = qobject_to_qstring(qmp_input_get_object(qiv, name, true));
if (!qstr) {
+ *obj = NULL;
error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null",
"string");
return;
@@ -309,11 +346,22 @@ static void qmp_input_type_any(Visitor *v, const char *name, QObject **obj,
*obj = qobj;
}
-static void qmp_input_optional(Visitor *v, const char *name, bool *present)
+static void qmp_input_type_null(Visitor *v, const char *name, Error **errp)
{
QmpInputVisitor *qiv = to_qiv(v);
QObject *qobj = qmp_input_get_object(qiv, name, true);
+ if (qobject_type(qobj) != QTYPE_QNULL) {
+ error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null",
+ "null");
+ }
+}
+
+static void qmp_input_optional(Visitor *v, const char *name, bool *present)
+{
+ QmpInputVisitor *qiv = to_qiv(v);
+ QObject *qobj = qmp_input_get_object(qiv, name, false);
+
if (!qobj) {
*present = false;
return;
@@ -329,43 +377,36 @@ Visitor *qmp_input_get_visitor(QmpInputVisitor *v)
void qmp_input_visitor_cleanup(QmpInputVisitor *v)
{
- qobject_decref(v->stack[0].obj);
+ qobject_decref(v->root);
g_free(v);
}
-QmpInputVisitor *qmp_input_visitor_new(QObject *obj)
+QmpInputVisitor *qmp_input_visitor_new(QObject *obj, bool strict)
{
QmpInputVisitor *v;
v = g_malloc0(sizeof(*v));
+ v->visitor.type = VISITOR_INPUT;
v->visitor.start_struct = qmp_input_start_struct;
- v->visitor.end_struct = qmp_input_end_struct;
+ v->visitor.check_struct = qmp_input_check_struct;
+ v->visitor.end_struct = qmp_input_pop;
v->visitor.start_list = qmp_input_start_list;
v->visitor.next_list = qmp_input_next_list;
- v->visitor.end_list = qmp_input_end_list;
+ v->visitor.end_list = qmp_input_pop;
v->visitor.start_alternate = qmp_input_start_alternate;
- v->visitor.type_enum = input_type_enum;
v->visitor.type_int64 = qmp_input_type_int64;
v->visitor.type_uint64 = qmp_input_type_uint64;
v->visitor.type_bool = qmp_input_type_bool;
v->visitor.type_str = qmp_input_type_str;
v->visitor.type_number = qmp_input_type_number;
v->visitor.type_any = qmp_input_type_any;
+ v->visitor.type_null = qmp_input_type_null;
v->visitor.optional = qmp_input_optional;
+ v->strict = strict;
- qmp_input_push(v, obj, NULL);
+ v->root = obj;
qobject_incref(obj);
return v;
}
-
-QmpInputVisitor *qmp_input_visitor_new_strict(QObject *obj)
-{
- QmpInputVisitor *v;
-
- v = qmp_input_visitor_new(obj);
- v->strict = true;
-
- return v;
-}
diff --git a/qapi/qmp-output-visitor.c b/qapi/qmp-output-visitor.c
index d44c676317..4d3cf78333 100644
--- a/qapi/qmp-output-visitor.c
+++ b/qapi/qmp-output-visitor.c
@@ -22,7 +22,6 @@
typedef struct QStackEntry
{
QObject *value;
- bool is_list_head;
QTAILQ_ENTRY(QStackEntry) node;
} QStackEntry;
@@ -52,9 +51,6 @@ static void qmp_output_push_obj(QmpOutputVisitor *qov, QObject *value)
assert(qov->root);
assert(value);
e->value = value;
- if (qobject_type(e->value) == QTYPE_QLIST) {
- e->is_list_head = true;
- }
QTAILQ_INSERT_HEAD(&qov->stack, e, node);
}
@@ -82,9 +78,8 @@ static void qmp_output_add_obj(QmpOutputVisitor *qov, const char *name,
QObject *cur = e ? e->value : NULL;
if (!cur) {
- /* FIXME we should require the user to reset the visitor, rather
- * than throwing away the previous root */
- qobject_decref(qov->root);
+ /* Don't allow reuse of visitor on more than one root */
+ assert(!qov->root);
qov->root = value;
} else {
switch (qobject_type(cur)) {
@@ -93,6 +88,7 @@ static void qmp_output_add_obj(QmpOutputVisitor *qov, const char *name,
qdict_put_obj(qobject_to_qdict(cur), name, value);
break;
case QTYPE_QLIST:
+ assert(!name);
qlist_append_obj(qobject_to_qlist(cur), value);
break;
default:
@@ -111,13 +107,16 @@ static void qmp_output_start_struct(Visitor *v, const char *name, void **obj,
qmp_output_push(qov, dict);
}
-static void qmp_output_end_struct(Visitor *v, Error **errp)
+static void qmp_output_end_struct(Visitor *v)
{
QmpOutputVisitor *qov = to_qov(v);
- qmp_output_pop(qov);
+ QObject *value = qmp_output_pop(qov);
+ assert(qobject_type(value) == QTYPE_QDICT);
}
-static void qmp_output_start_list(Visitor *v, const char *name, Error **errp)
+static void qmp_output_start_list(Visitor *v, const char *name,
+ GenericList **listp, size_t size,
+ Error **errp)
{
QmpOutputVisitor *qov = to_qov(v);
QList *list = qlist_new();
@@ -126,26 +125,17 @@ static void qmp_output_start_list(Visitor *v, const char *name, Error **errp)
qmp_output_push(qov, list);
}
-static GenericList *qmp_output_next_list(Visitor *v, GenericList **listp,
+static GenericList *qmp_output_next_list(Visitor *v, GenericList *tail,
size_t size)
{
- GenericList *list = *listp;
- QmpOutputVisitor *qov = to_qov(v);
- QStackEntry *e = QTAILQ_FIRST(&qov->stack);
-
- assert(e);
- if (e->is_list_head) {
- e->is_list_head = false;
- return list;
- }
-
- return list ? list->next : NULL;
+ return tail->next;
}
static void qmp_output_end_list(Visitor *v)
{
QmpOutputVisitor *qov = to_qov(v);
- qmp_output_pop(qov);
+ QObject *value = qmp_output_pop(qov);
+ assert(qobject_type(value) == QTYPE_QLIST);
}
static void qmp_output_type_int64(Visitor *v, const char *name, int64_t *obj,
@@ -196,18 +186,22 @@ static void qmp_output_type_any(Visitor *v, const char *name, QObject **obj,
qmp_output_add_obj(qov, name, *obj);
}
-/* Finish building, and return the root object. Will not be NULL. */
+static void qmp_output_type_null(Visitor *v, const char *name, Error **errp)
+{
+ QmpOutputVisitor *qov = to_qov(v);
+ qmp_output_add_obj(qov, name, qnull());
+}
+
+/* Finish building, and return the root object.
+ * The root object is never null. The caller becomes the object's
+ * owner, and should use qobject_decref() when done with it. */
QObject *qmp_output_get_qobject(QmpOutputVisitor *qov)
{
- /* FIXME: we should require that a visit occurred, and that it is
- * complete (no starts without a matching end) */
- QObject *obj = qov->root;
- if (obj) {
- qobject_incref(obj);
- } else {
- obj = qnull();
- }
- return obj;
+ /* A visit must have occurred, with each start paired with end. */
+ assert(qov->root && QTAILQ_EMPTY(&qov->stack));
+
+ qobject_incref(qov->root);
+ return qov->root;
}
Visitor *qmp_output_get_visitor(QmpOutputVisitor *v)
@@ -234,18 +228,19 @@ QmpOutputVisitor *qmp_output_visitor_new(void)
v = g_malloc0(sizeof(*v));
+ v->visitor.type = VISITOR_OUTPUT;
v->visitor.start_struct = qmp_output_start_struct;
v->visitor.end_struct = qmp_output_end_struct;
v->visitor.start_list = qmp_output_start_list;
v->visitor.next_list = qmp_output_next_list;
v->visitor.end_list = qmp_output_end_list;
- v->visitor.type_enum = output_type_enum;
v->visitor.type_int64 = qmp_output_type_int64;
v->visitor.type_uint64 = qmp_output_type_uint64;
v->visitor.type_bool = qmp_output_type_bool;
v->visitor.type_str = qmp_output_type_str;
v->visitor.type_number = qmp_output_type_number;
v->visitor.type_any = qmp_output_type_any;
+ v->visitor.type_null = qmp_output_type_null;
QTAILQ_INIT(&v->stack);
diff --git a/qapi/qmp-registry.c b/qapi/qmp-registry.c
index 4ebfbccd46..4332a6818d 100644
--- a/qapi/qmp-registry.c
+++ b/qapi/qmp-registry.c
@@ -25,7 +25,6 @@ void qmp_register_command(const char *name, QmpCommandFunc *fn,
QmpCommand *cmd = g_malloc0(sizeof(*cmd));
cmd->name = name;
- cmd->type = QCT_NORMAL;
cmd->fn = fn;
cmd->enabled = true;
cmd->options = options;
diff --git a/qapi/string-input-visitor.c b/qapi/string-input-visitor.c
index 5ea2d77b5a..30b58791c9 100644
--- a/qapi/string-input-visitor.c
+++ b/qapi/string-input-visitor.c
@@ -25,8 +25,6 @@ struct StringInputVisitor
{
Visitor visitor;
- bool head;
-
GList *ranges;
GList *cur_range;
int64_t cur;
@@ -44,7 +42,7 @@ static void free_range(void *range, void *dummy)
g_free(range);
}
-static void parse_str(StringInputVisitor *siv, Error **errp)
+static int parse_str(StringInputVisitor *siv, const char *name, Error **errp)
{
char *str = (char *) siv->string;
long long start, end;
@@ -52,7 +50,7 @@ static void parse_str(StringInputVisitor *siv, Error **errp)
char *endptr;
if (siv->ranges) {
- return;
+ return 0;
}
do {
@@ -117,19 +115,29 @@ static void parse_str(StringInputVisitor *siv, Error **errp)
}
} while (str);
- return;
+ return 0;
error:
g_list_foreach(siv->ranges, free_range, NULL);
g_list_free(siv->ranges);
siv->ranges = NULL;
+ error_setg(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null",
+ "an int64 value or range");
+ return -1;
}
static void
-start_list(Visitor *v, const char *name, Error **errp)
+start_list(Visitor *v, const char *name, GenericList **list, size_t size,
+ Error **errp)
{
StringInputVisitor *siv = to_siv(v);
- parse_str(siv, errp);
+ /* We don't support visits without a list */
+ assert(list);
+
+ if (parse_str(siv, name, errp) < 0) {
+ *list = NULL;
+ return;
+ }
siv->cur_range = g_list_first(siv->ranges);
if (siv->cur_range) {
@@ -137,13 +145,15 @@ start_list(Visitor *v, const char *name, Error **errp)
if (r) {
siv->cur = r->begin;
}
+ *list = g_malloc0(size);
+ } else {
+ *list = NULL;
}
}
-static GenericList *next_list(Visitor *v, GenericList **list, size_t size)
+static GenericList *next_list(Visitor *v, GenericList *tail, size_t size)
{
StringInputVisitor *siv = to_siv(v);
- GenericList **link;
Range *r;
if (!siv->ranges || !siv->cur_range) {
@@ -167,21 +177,12 @@ static GenericList *next_list(Visitor *v, GenericList **list, size_t size)
siv->cur = r->begin;
}
- if (siv->head) {
- link = list;
- siv->head = false;
- } else {
- link = &(*list)->next;
- }
-
- *link = g_malloc0(size);
- return *link;
+ tail->next = g_malloc0(size);
+ return tail->next;
}
static void end_list(Visitor *v)
{
- StringInputVisitor *siv = to_siv(v);
- siv->head = true;
}
static void parse_type_int64(Visitor *v, const char *name, int64_t *obj,
@@ -195,7 +196,9 @@ static void parse_type_int64(Visitor *v, const char *name, int64_t *obj,
return;
}
- parse_str(siv, errp);
+ if (parse_str(siv, name, errp) < 0) {
+ return;
+ }
if (!siv->ranges) {
goto error;
@@ -293,6 +296,7 @@ static void parse_type_str(Visitor *v, const char *name, char **obj,
if (siv->string) {
*obj = g_strdup(siv->string);
} else {
+ *obj = NULL;
error_setg(errp, QERR_INVALID_PARAMETER_TYPE, name ? name : "null",
"string");
}
@@ -348,7 +352,7 @@ StringInputVisitor *string_input_visitor_new(const char *str)
v = g_malloc0(sizeof(*v));
- v->visitor.type_enum = input_type_enum;
+ v->visitor.type = VISITOR_INPUT;
v->visitor.type_int64 = parse_type_int64;
v->visitor.type_uint64 = parse_type_uint64;
v->visitor.type_size = parse_type_size;
@@ -361,6 +365,5 @@ StringInputVisitor *string_input_visitor_new(const char *str)
v->visitor.optional = parse_optional;
v->string = str;
- v->head = true;
return v;
}
diff --git a/qapi/string-output-visitor.c b/qapi/string-output-visitor.c
index c2e5c5b92b..d01319628b 100644
--- a/qapi/string-output-visitor.c
+++ b/qapi/string-output-visitor.c
@@ -20,7 +20,7 @@
enum ListMode {
LM_NONE, /* not traversing a list of repeated options */
- LM_STARTED, /* start_list() succeeded */
+ LM_STARTED, /* next_list() ready to be called */
LM_IN_PROGRESS, /* next_list() has been called.
*
@@ -48,7 +48,7 @@ enum ListMode {
LM_UNSIGNED_INTERVAL,/* Same as above, only for an unsigned interval. */
- LM_END
+ LM_END, /* next_list() called, about to see last element. */
};
typedef enum ListMode ListMode;
@@ -58,7 +58,6 @@ struct StringOutputVisitor
Visitor visitor;
bool human;
GString *string;
- bool head;
ListMode list_mode;
union {
int64_t s;
@@ -266,39 +265,29 @@ static void print_type_number(Visitor *v, const char *name, double *obj,
}
static void
-start_list(Visitor *v, const char *name, Error **errp)
+start_list(Visitor *v, const char *name, GenericList **list, size_t size,
+ Error **errp)
{
StringOutputVisitor *sov = to_sov(v);
/* we can't traverse a list in a list */
assert(sov->list_mode == LM_NONE);
- sov->list_mode = LM_STARTED;
- sov->head = true;
+ /* We don't support visits without a list */
+ assert(list);
+ /* List handling is only needed if there are at least two elements */
+ if (*list && (*list)->next) {
+ sov->list_mode = LM_STARTED;
+ }
}
-static GenericList *next_list(Visitor *v, GenericList **list, size_t size)
+static GenericList *next_list(Visitor *v, GenericList *tail, size_t size)
{
StringOutputVisitor *sov = to_sov(v);
- GenericList *ret = NULL;
- if (*list) {
- if (sov->head) {
- ret = *list;
- } else {
- ret = (*list)->next;
- }
+ GenericList *ret = tail->next;
- if (sov->head) {
- if (ret && ret->next == NULL) {
- sov->list_mode = LM_NONE;
- }
- sov->head = false;
- } else {
- if (ret && ret->next == NULL) {
- sov->list_mode = LM_END;
- }
- }
+ if (ret && !ret->next) {
+ sov->list_mode = LM_END;
}
-
return ret;
}
@@ -311,8 +300,6 @@ static void end_list(Visitor *v)
sov->list_mode == LM_NONE ||
sov->list_mode == LM_IN_PROGRESS);
sov->list_mode = LM_NONE;
- sov->head = true;
-
}
char *string_output_get_string(StringOutputVisitor *sov)
@@ -351,7 +338,7 @@ StringOutputVisitor *string_output_visitor_new(bool human)
v->string = g_string_new(NULL);
v->human = human;
- v->visitor.type_enum = output_type_enum;
+ v->visitor.type = VISITOR_OUTPUT;
v->visitor.type_int64 = print_type_int64;
v->visitor.type_uint64 = print_type_uint64;
v->visitor.type_size = print_type_size;
diff --git a/qemu-doc.texi b/qemu-doc.texi
index 79141d3582..f37fd3130e 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -693,6 +693,9 @@ Supported options:
File name of a base image (see @option{create} subcommand).
@item compat6
Create a VMDK version 6 image (instead of version 4)
+@item hwversion
+Specify vmdk virtual hardware version. Compat6 flag cannot be enabled
+if hwversion is specified.
@item subformat
Specifies which VMDK subformat to use. Valid options are
@code{monolithicSparse} (default),
diff --git a/qemu-img.c b/qemu-img.c
index 46f2a6def4..47923663be 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -1088,7 +1088,8 @@ static int check_empty_sectors(BlockBackend *blk, int64_t sect_num,
uint8_t *buffer, bool quiet)
{
int pnum, ret = 0;
- ret = blk_read(blk, sect_num, buffer, sect_count);
+ ret = blk_pread(blk, sect_num << BDRV_SECTOR_BITS, buffer,
+ sect_count << BDRV_SECTOR_BITS);
if (ret < 0) {
error_report("Error while reading offset %" PRId64 " of %s: %s",
sectors_to_bytes(sect_num), filename, strerror(-ret));
@@ -1301,7 +1302,8 @@ static int img_compare(int argc, char **argv)
nb_sectors = MIN(pnum1, pnum2);
} else if (allocated1 == allocated2) {
if (allocated1) {
- ret = blk_read(blk1, sector_num, buf1, nb_sectors);
+ ret = blk_pread(blk1, sector_num << BDRV_SECTOR_BITS, buf1,
+ nb_sectors << BDRV_SECTOR_BITS);
if (ret < 0) {
error_report("Error while reading offset %" PRId64 " of %s:"
" %s", sectors_to_bytes(sector_num), filename1,
@@ -1309,7 +1311,8 @@ static int img_compare(int argc, char **argv)
ret = 4;
goto out;
}
- ret = blk_read(blk2, sector_num, buf2, nb_sectors);
+ ret = blk_pread(blk2, sector_num << BDRV_SECTOR_BITS, buf2,
+ nb_sectors << BDRV_SECTOR_BITS);
if (ret < 0) {
error_report("Error while reading offset %" PRId64
" of %s: %s", sectors_to_bytes(sector_num),
@@ -1472,10 +1475,21 @@ static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
} else if (!s->target_has_backing) {
/* Without a target backing file we must copy over the contents of
* the backing file as well. */
- /* TODO Check block status of the backing file chain to avoid
+ /* Check block status of the backing file chain to avoid
* needlessly reading zeroes and limiting the iteration to the
* buffer size */
- s->status = BLK_DATA;
+ ret = bdrv_get_block_status_above(blk_bs(s->src[s->src_cur]), NULL,
+ sector_num - s->src_cur_offset,
+ n, &n, &file);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (ret & BDRV_BLOCK_ZERO) {
+ s->status = BLK_ZERO;
+ } else {
+ s->status = BLK_DATA;
+ }
} else {
s->status = BLK_BACKING_FILE;
}
@@ -1522,7 +1536,9 @@ static int convert_read(ImgConvertState *s, int64_t sector_num, int nb_sectors,
bs_sectors = s->src_sectors[s->src_cur];
n = MIN(nb_sectors, bs_sectors - (sector_num - s->src_cur_offset));
- ret = blk_read(blk, sector_num - s->src_cur_offset, buf, n);
+ ret = blk_pread(blk,
+ (sector_num - s->src_cur_offset) << BDRV_SECTOR_BITS,
+ buf, n << BDRV_SECTOR_BITS);
if (ret < 0) {
return ret;
}
@@ -1577,7 +1593,8 @@ static int convert_write(ImgConvertState *s, int64_t sector_num, int nb_sectors,
if (!s->min_sparse ||
is_allocated_sectors_min(buf, n, &n, s->min_sparse))
{
- ret = blk_write(s->target, sector_num, buf, n);
+ ret = blk_pwrite(s->target, sector_num << BDRV_SECTOR_BITS,
+ buf, n << BDRV_SECTOR_BITS, 0);
if (ret < 0) {
return ret;
}
@@ -1589,7 +1606,8 @@ static int convert_write(ImgConvertState *s, int64_t sector_num, int nb_sectors,
if (s->has_zero_init) {
break;
}
- ret = blk_write_zeroes(s->target, sector_num, n, 0);
+ ret = blk_write_zeroes(s->target, sector_num << BDRV_SECTOR_BITS,
+ n << BDRV_SECTOR_BITS, 0);
if (ret < 0) {
return ret;
}
@@ -3023,7 +3041,8 @@ static int img_rebase(int argc, char **argv)
n = old_backing_num_sectors - sector;
}
- ret = blk_read(blk_old_backing, sector, buf_old, n);
+ ret = blk_pread(blk_old_backing, sector << BDRV_SECTOR_BITS,
+ buf_old, n << BDRV_SECTOR_BITS);
if (ret < 0) {
error_report("error while reading from old backing file");
goto out;
@@ -3037,7 +3056,8 @@ static int img_rebase(int argc, char **argv)
n = new_backing_num_sectors - sector;
}
- ret = blk_read(blk_new_backing, sector, buf_new, n);
+ ret = blk_pread(blk_new_backing, sector << BDRV_SECTOR_BITS,
+ buf_new, n << BDRV_SECTOR_BITS);
if (ret < 0) {
error_report("error while reading from new backing file");
goto out;
@@ -3053,8 +3073,10 @@ static int img_rebase(int argc, char **argv)
if (compare_sectors(buf_old + written * 512,
buf_new + written * 512, n - written, &pnum))
{
- ret = blk_write(blk, sector + written,
- buf_old + written * 512, pnum);
+ ret = blk_pwrite(blk,
+ (sector + written) << BDRV_SECTOR_BITS,
+ buf_old + written * 512,
+ pnum << BDRV_SECTOR_BITS, 0);
if (ret < 0) {
error_report("Error while writing to COW image: %s",
strerror(-ret));
diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c
index e34f777118..4a00bc604d 100644
--- a/qemu-io-cmds.c
+++ b/qemu-io-cmds.c
@@ -1,7 +1,7 @@
/*
* Command line utility to exercise the QEMU I/O path.
*
- * Copyright (C) 2009 Red Hat, Inc.
+ * Copyright (C) 2009-2016 Red Hat, Inc.
* Copyright (c) 2003-2005 Silicon Graphics, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
@@ -345,7 +345,7 @@ static void dump_buffer(const void *buffer, int64_t offset, int64_t len)
}
static void print_report(const char *op, struct timeval *t, int64_t offset,
- int64_t count, int64_t total, int cnt, int Cflag)
+ int64_t count, int64_t total, int cnt, bool Cflag)
{
char s1[64], s2[64], ts[64];
@@ -395,12 +395,6 @@ create_iovec(BlockBackend *blk, QEMUIOVector *qiov, char **argv, int nr_iov,
goto fail;
}
- if (len & 0x1ff) {
- printf("length argument %" PRId64
- " is not sector aligned\n", len);
- goto fail;
- }
-
sizes[i] = len;
count += len;
}
@@ -419,40 +413,6 @@ fail:
return buf;
}
-static int do_read(BlockBackend *blk, char *buf, int64_t offset, int64_t count,
- int64_t *total)
-{
- int ret;
-
- if (count >> 9 > INT_MAX) {
- return -ERANGE;
- }
-
- ret = blk_read(blk, offset >> 9, (uint8_t *)buf, count >> 9);
- if (ret < 0) {
- return ret;
- }
- *total = count;
- return 1;
-}
-
-static int do_write(BlockBackend *blk, char *buf, int64_t offset, int64_t count,
- int64_t *total)
-{
- int ret;
-
- if (count >> 9 > INT_MAX) {
- return -ERANGE;
- }
-
- ret = blk_write(blk, offset >> 9, (uint8_t *)buf, count >> 9);
- if (ret < 0) {
- return ret;
- }
- *total = count;
- return 1;
-}
-
static int do_pread(BlockBackend *blk, char *buf, int64_t offset,
int64_t count, int64_t *total)
{
@@ -468,13 +428,13 @@ static int do_pread(BlockBackend *blk, char *buf, int64_t offset,
}
static int do_pwrite(BlockBackend *blk, char *buf, int64_t offset,
- int64_t count, int64_t *total)
+ int64_t count, int flags, int64_t *total)
{
if (count > INT_MAX) {
return -ERANGE;
}
- *total = blk_pwrite(blk, offset, (uint8_t *)buf, count);
+ *total = blk_pwrite(blk, offset, (uint8_t *)buf, count, flags);
if (*total < 0) {
return *total;
}
@@ -486,6 +446,7 @@ typedef struct {
int64_t offset;
int64_t count;
int64_t *total;
+ int flags;
int ret;
bool done;
} CoWriteZeroes;
@@ -494,8 +455,8 @@ static void coroutine_fn co_write_zeroes_entry(void *opaque)
{
CoWriteZeroes *data = opaque;
- data->ret = blk_co_write_zeroes(data->blk, data->offset / BDRV_SECTOR_SIZE,
- data->count / BDRV_SECTOR_SIZE, 0);
+ data->ret = blk_co_write_zeroes(data->blk, data->offset, data->count,
+ data->flags);
data->done = true;
if (data->ret < 0) {
*data->total = data->ret;
@@ -506,7 +467,7 @@ static void coroutine_fn co_write_zeroes_entry(void *opaque)
}
static int do_co_write_zeroes(BlockBackend *blk, int64_t offset, int64_t count,
- int64_t *total)
+ int flags, int64_t *total)
{
Coroutine *co;
CoWriteZeroes data = {
@@ -514,6 +475,7 @@ static int do_co_write_zeroes(BlockBackend *blk, int64_t offset, int64_t count,
.offset = offset,
.count = count,
.total = total,
+ .flags = flags,
.done = false,
};
@@ -589,8 +551,7 @@ static int do_aio_readv(BlockBackend *blk, QEMUIOVector *qiov,
{
int async_ret = NOT_DONE;
- blk_aio_readv(blk, offset >> 9, qiov, qiov->size >> 9,
- aio_rw_done, &async_ret);
+ blk_aio_preadv(blk, offset, qiov, 0, aio_rw_done, &async_ret);
while (async_ret == NOT_DONE) {
main_loop_wait(false);
}
@@ -600,12 +561,11 @@ static int do_aio_readv(BlockBackend *blk, QEMUIOVector *qiov,
}
static int do_aio_writev(BlockBackend *blk, QEMUIOVector *qiov,
- int64_t offset, int *total)
+ int64_t offset, int flags, int *total)
{
int async_ret = NOT_DONE;
- blk_aio_writev(blk, offset >> 9, qiov, qiov->size >> 9,
- aio_rw_done, &async_ret);
+ blk_aio_pwritev(blk, offset, qiov, flags, aio_rw_done, &async_ret);
while (async_ret == NOT_DONE) {
main_loop_wait(false);
}
@@ -671,7 +631,7 @@ static void read_help(void)
" -b, -- read from the VM state rather than the virtual disk\n"
" -C, -- report statistics in a machine parsable format\n"
" -l, -- length for pattern verification (only with -P)\n"
-" -p, -- use blk_pread to read the file\n"
+" -p, -- ignored for backwards compatibility\n"
" -P, -- use a pattern to verify read data\n"
" -q, -- quiet mode, do not show I/O statistics\n"
" -s, -- start offset for pattern verification (only with -P)\n"
@@ -687,7 +647,7 @@ static const cmdinfo_t read_cmd = {
.cfunc = read_f,
.argmin = 2,
.argmax = -1,
- .args = "[-abCpqv] [-P pattern [-s off] [-l len]] off len",
+ .args = "[-abCqv] [-P pattern [-s off] [-l len]] off len",
.oneline = "reads a number of bytes at a specified offset",
.help = read_help,
};
@@ -695,8 +655,8 @@ static const cmdinfo_t read_cmd = {
static int read_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
- int Cflag = 0, pflag = 0, qflag = 0, vflag = 0;
- int Pflag = 0, sflag = 0, lflag = 0, bflag = 0;
+ bool Cflag = false, qflag = false, vflag = false;
+ bool Pflag = false, sflag = false, lflag = false, bflag = false;
int c, cnt;
char *buf;
int64_t offset;
@@ -709,13 +669,13 @@ static int read_f(BlockBackend *blk, int argc, char **argv)
while ((c = getopt(argc, argv, "bCl:pP:qs:v")) != -1) {
switch (c) {
case 'b':
- bflag = 1;
+ bflag = true;
break;
case 'C':
- Cflag = 1;
+ Cflag = true;
break;
case 'l':
- lflag = 1;
+ lflag = true;
pattern_count = cvtnum(optarg);
if (pattern_count < 0) {
print_cvtnum_err(pattern_count, optarg);
@@ -723,20 +683,20 @@ static int read_f(BlockBackend *blk, int argc, char **argv)
}
break;
case 'p':
- pflag = 1;
+ /* Ignored for backwards compatibility */
break;
case 'P':
- Pflag = 1;
+ Pflag = true;
pattern = parse_pattern(optarg);
if (pattern < 0) {
return 0;
}
break;
case 'q':
- qflag = 1;
+ qflag = true;
break;
case 's':
- sflag = 1;
+ sflag = true;
pattern_offset = cvtnum(optarg);
if (pattern_offset < 0) {
print_cvtnum_err(pattern_offset, optarg);
@@ -744,7 +704,7 @@ static int read_f(BlockBackend *blk, int argc, char **argv)
}
break;
case 'v':
- vflag = 1;
+ vflag = true;
break;
default:
return qemuio_command_usage(&read_cmd);
@@ -755,11 +715,6 @@ static int read_f(BlockBackend *blk, int argc, char **argv)
return qemuio_command_usage(&read_cmd);
}
- if (bflag && pflag) {
- printf("-b and -p cannot be specified at the same time\n");
- return 0;
- }
-
offset = cvtnum(argv[optind]);
if (offset < 0) {
print_cvtnum_err(offset, argv[optind]);
@@ -790,7 +745,7 @@ static int read_f(BlockBackend *blk, int argc, char **argv)
return 0;
}
- if (!pflag) {
+ if (bflag) {
if (offset & 0x1ff) {
printf("offset %" PRId64 " is not sector aligned\n",
offset);
@@ -806,12 +761,10 @@ static int read_f(BlockBackend *blk, int argc, char **argv)
buf = qemu_io_alloc(blk, count, 0xab);
gettimeofday(&t1, NULL);
- if (pflag) {
- cnt = do_pread(blk, buf, offset, count, &total);
- } else if (bflag) {
+ if (bflag) {
cnt = do_load_vmstate(blk, buf, offset, count, &total);
} else {
- cnt = do_read(blk, buf, offset, count, &total);
+ cnt = do_pread(blk, buf, offset, count, &total);
}
gettimeofday(&t2, NULL);
@@ -875,7 +828,7 @@ static const cmdinfo_t readv_cmd = {
.cfunc = readv_f,
.argmin = 2,
.argmax = -1,
- .args = "[-Cqv] [-P pattern ] off len [len..]",
+ .args = "[-Cqv] [-P pattern] off len [len..]",
.oneline = "reads a number of bytes at a specified offset",
.help = readv_help,
};
@@ -883,7 +836,7 @@ static const cmdinfo_t readv_cmd = {
static int readv_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
- int Cflag = 0, qflag = 0, vflag = 0;
+ bool Cflag = false, qflag = false, vflag = false;
int c, cnt;
char *buf;
int64_t offset;
@@ -892,25 +845,25 @@ static int readv_f(BlockBackend *blk, int argc, char **argv)
int nr_iov;
QEMUIOVector qiov;
int pattern = 0;
- int Pflag = 0;
+ bool Pflag = false;
while ((c = getopt(argc, argv, "CP:qv")) != -1) {
switch (c) {
case 'C':
- Cflag = 1;
+ Cflag = true;
break;
case 'P':
- Pflag = 1;
+ Pflag = true;
pattern = parse_pattern(optarg);
if (pattern < 0) {
return 0;
}
break;
case 'q':
- qflag = 1;
+ qflag = true;
break;
case 'v':
- vflag = 1;
+ vflag = true;
break;
default:
return qemuio_command_usage(&readv_cmd);
@@ -929,12 +882,6 @@ static int readv_f(BlockBackend *blk, int argc, char **argv)
}
optind++;
- if (offset & 0x1ff) {
- printf("offset %" PRId64 " is not sector aligned\n",
- offset);
- return 0;
- }
-
nr_iov = argc - optind;
buf = create_iovec(blk, &qiov, &argv[optind], nr_iov, 0xab);
if (buf == NULL) {
@@ -991,10 +938,12 @@ static void write_help(void)
" filled with a set pattern (0xcdcdcdcd).\n"
" -b, -- write to the VM state rather than the virtual disk\n"
" -c, -- write compressed data with blk_write_compressed\n"
-" -p, -- use blk_pwrite to write the file\n"
+" -f, -- use Force Unit Access semantics\n"
+" -p, -- ignored for backwards compatibility\n"
" -P, -- use different pattern to fill file\n"
" -C, -- report statistics in a machine parsable format\n"
" -q, -- quiet mode, do not show I/O statistics\n"
+" -u, -- with -z, allow unmapping\n"
" -z, -- write zeroes using blk_co_write_zeroes\n"
"\n");
}
@@ -1007,7 +956,7 @@ static const cmdinfo_t write_cmd = {
.cfunc = write_f,
.argmin = 2,
.argmax = -1,
- .args = "[-bcCpqz] [-P pattern ] off len",
+ .args = "[-bcCfquz] [-P pattern] off len",
.oneline = "writes a number of bytes at a specified offset",
.help = write_help,
};
@@ -1015,8 +964,9 @@ static const cmdinfo_t write_cmd = {
static int write_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
- int Cflag = 0, pflag = 0, qflag = 0, bflag = 0, Pflag = 0, zflag = 0;
- int cflag = 0;
+ bool Cflag = false, qflag = false, bflag = false;
+ bool Pflag = false, zflag = false, cflag = false;
+ int flags = 0;
int c, cnt;
char *buf = NULL;
int64_t offset;
@@ -1025,32 +975,38 @@ static int write_f(BlockBackend *blk, int argc, char **argv)
int64_t total = 0;
int pattern = 0xcd;
- while ((c = getopt(argc, argv, "bcCpP:qz")) != -1) {
+ while ((c = getopt(argc, argv, "bcCfpP:quz")) != -1) {
switch (c) {
case 'b':
- bflag = 1;
+ bflag = true;
break;
case 'c':
- cflag = 1;
+ cflag = true;
break;
case 'C':
- Cflag = 1;
+ Cflag = true;
+ break;
+ case 'f':
+ flags |= BDRV_REQ_FUA;
break;
case 'p':
- pflag = 1;
+ /* Ignored for backwards compatibility */
break;
case 'P':
- Pflag = 1;
+ Pflag = true;
pattern = parse_pattern(optarg);
if (pattern < 0) {
return 0;
}
break;
case 'q':
- qflag = 1;
+ qflag = true;
+ break;
+ case 'u':
+ flags |= BDRV_REQ_MAY_UNMAP;
break;
case 'z':
- zflag = 1;
+ zflag = true;
break;
default:
return qemuio_command_usage(&write_cmd);
@@ -1061,8 +1017,18 @@ static int write_f(BlockBackend *blk, int argc, char **argv)
return qemuio_command_usage(&write_cmd);
}
- if (bflag + pflag + zflag > 1) {
- printf("-b, -p, or -z cannot be specified at the same time\n");
+ if (bflag && zflag) {
+ printf("-b and -z cannot be specified at the same time\n");
+ return 0;
+ }
+
+ if ((flags & BDRV_REQ_FUA) && (bflag || cflag)) {
+ printf("-f and -b or -c cannot be specified at the same time\n");
+ return 0;
+ }
+
+ if ((flags & BDRV_REQ_MAY_UNMAP) && !zflag) {
+ printf("-u requires -z to be specified\n");
return 0;
}
@@ -1088,7 +1054,7 @@ static int write_f(BlockBackend *blk, int argc, char **argv)
return 0;
}
- if (!pflag) {
+ if (bflag || cflag) {
if (offset & 0x1ff) {
printf("offset %" PRId64 " is not sector aligned\n",
offset);
@@ -1107,16 +1073,14 @@ static int write_f(BlockBackend *blk, int argc, char **argv)
}
gettimeofday(&t1, NULL);
- if (pflag) {
- cnt = do_pwrite(blk, buf, offset, count, &total);
- } else if (bflag) {
+ if (bflag) {
cnt = do_save_vmstate(blk, buf, offset, count, &total);
} else if (zflag) {
- cnt = do_co_write_zeroes(blk, offset, count, &total);
+ cnt = do_co_write_zeroes(blk, offset, count, flags, &total);
} else if (cflag) {
cnt = do_write_compressed(blk, buf, offset, count, &total);
} else {
- cnt = do_write(blk, buf, offset, count, &total);
+ cnt = do_pwrite(blk, buf, offset, count, flags, &total);
}
gettimeofday(&t2, NULL);
@@ -1155,6 +1119,7 @@ writev_help(void)
" filled with a set pattern (0xcdcdcdcd).\n"
" -P, -- use different pattern to fill file\n"
" -C, -- report statistics in a machine parsable format\n"
+" -f, -- use Force Unit Access semantics\n"
" -q, -- quiet mode, do not show I/O statistics\n"
"\n");
}
@@ -1166,7 +1131,7 @@ static const cmdinfo_t writev_cmd = {
.cfunc = writev_f,
.argmin = 2,
.argmax = -1,
- .args = "[-Cq] [-P pattern ] off len [len..]",
+ .args = "[-Cfq] [-P pattern] off len [len..]",
.oneline = "writes a number of bytes at a specified offset",
.help = writev_help,
};
@@ -1174,7 +1139,8 @@ static const cmdinfo_t writev_cmd = {
static int writev_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
- int Cflag = 0, qflag = 0;
+ bool Cflag = false, qflag = false;
+ int flags = 0;
int c, cnt;
char *buf;
int64_t offset;
@@ -1187,10 +1153,13 @@ static int writev_f(BlockBackend *blk, int argc, char **argv)
while ((c = getopt(argc, argv, "CqP:")) != -1) {
switch (c) {
case 'C':
- Cflag = 1;
+ Cflag = true;
+ break;
+ case 'f':
+ flags |= BDRV_REQ_FUA;
break;
case 'q':
- qflag = 1;
+ qflag = true;
break;
case 'P':
pattern = parse_pattern(optarg);
@@ -1214,12 +1183,6 @@ static int writev_f(BlockBackend *blk, int argc, char **argv)
}
optind++;
- if (offset & 0x1ff) {
- printf("offset %" PRId64 " is not sector aligned\n",
- offset);
- return 0;
- }
-
nr_iov = argc - optind;
buf = create_iovec(blk, &qiov, &argv[optind], nr_iov, pattern);
if (buf == NULL) {
@@ -1227,7 +1190,7 @@ static int writev_f(BlockBackend *blk, int argc, char **argv)
}
gettimeofday(&t1, NULL);
- cnt = do_aio_writev(blk, &qiov, offset, &total);
+ cnt = do_aio_writev(blk, &qiov, offset, flags, &total);
gettimeofday(&t2, NULL);
if (cnt < 0) {
@@ -1283,7 +1246,7 @@ static const cmdinfo_t multiwrite_cmd = {
static int multiwrite_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
- int Cflag = 0, qflag = 0;
+ bool Cflag = false, qflag = false;
int c, cnt;
char **buf;
int64_t offset, first_offset = 0;
@@ -1299,10 +1262,10 @@ static int multiwrite_f(BlockBackend *blk, int argc, char **argv)
while ((c = getopt(argc, argv, "CqP:")) != -1) {
switch (c) {
case 'C':
- Cflag = 1;
+ Cflag = true;
break;
case 'q':
- qflag = 1;
+ qflag = true;
break;
case 'P':
pattern = parse_pattern(optarg);
@@ -1412,11 +1375,11 @@ struct aio_ctx {
QEMUIOVector qiov;
int64_t offset;
char *buf;
- int qflag;
- int vflag;
- int Cflag;
- int Pflag;
- int zflag;
+ bool qflag;
+ bool vflag;
+ bool Cflag;
+ bool Pflag;
+ bool zflag;
BlockAcctCookie acct;
int pattern;
struct timeval t1;
@@ -1525,7 +1488,7 @@ static const cmdinfo_t aio_read_cmd = {
.cfunc = aio_read_f,
.argmin = 2,
.argmax = -1,
- .args = "[-Cqv] [-P pattern ] off len [len..]",
+ .args = "[-Cqv] [-P pattern] off len [len..]",
.oneline = "asynchronously reads a number of bytes",
.help = aio_read_help,
};
@@ -1539,10 +1502,10 @@ static int aio_read_f(BlockBackend *blk, int argc, char **argv)
while ((c = getopt(argc, argv, "CP:qv")) != -1) {
switch (c) {
case 'C':
- ctx->Cflag = 1;
+ ctx->Cflag = true;
break;
case 'P':
- ctx->Pflag = 1;
+ ctx->Pflag = true;
ctx->pattern = parse_pattern(optarg);
if (ctx->pattern < 0) {
g_free(ctx);
@@ -1550,10 +1513,10 @@ static int aio_read_f(BlockBackend *blk, int argc, char **argv)
}
break;
case 'q':
- ctx->qflag = 1;
+ ctx->qflag = true;
break;
case 'v':
- ctx->vflag = 1;
+ ctx->vflag = true;
break;
default:
g_free(ctx);
@@ -1574,14 +1537,6 @@ static int aio_read_f(BlockBackend *blk, int argc, char **argv)
}
optind++;
- if (ctx->offset & 0x1ff) {
- printf("offset %" PRId64 " is not sector aligned\n",
- ctx->offset);
- block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_READ);
- g_free(ctx);
- return 0;
- }
-
nr_iov = argc - optind;
ctx->buf = create_iovec(blk, &ctx->qiov, &argv[optind], nr_iov, 0xab);
if (ctx->buf == NULL) {
@@ -1593,8 +1548,7 @@ static int aio_read_f(BlockBackend *blk, int argc, char **argv)
gettimeofday(&ctx->t1, NULL);
block_acct_start(blk_get_stats(blk), &ctx->acct, ctx->qiov.size,
BLOCK_ACCT_READ);
- blk_aio_readv(blk, ctx->offset >> 9, &ctx->qiov,
- ctx->qiov.size >> 9, aio_read_done, ctx);
+ blk_aio_preadv(blk, ctx->offset, &ctx->qiov, 0, aio_read_done, ctx);
return 0;
}
@@ -1614,7 +1568,9 @@ static void aio_write_help(void)
" used to ensure all outstanding aio requests have been completed.\n"
" -P, -- use different pattern to fill file\n"
" -C, -- report statistics in a machine parsable format\n"
+" -f, -- use Force Unit Access semantics\n"
" -q, -- quiet mode, do not show I/O statistics\n"
+" -u, -- with -z, allow unmapping\n"
" -z, -- write zeroes using blk_aio_write_zeroes\n"
"\n");
}
@@ -1626,7 +1582,7 @@ static const cmdinfo_t aio_write_cmd = {
.cfunc = aio_write_f,
.argmin = 2,
.argmax = -1,
- .args = "[-Cqz] [-P pattern ] off len [len..]",
+ .args = "[-Cfquz] [-P pattern] off len [len..]",
.oneline = "asynchronously writes a number of bytes",
.help = aio_write_help,
};
@@ -1636,15 +1592,22 @@ static int aio_write_f(BlockBackend *blk, int argc, char **argv)
int nr_iov, c;
int pattern = 0xcd;
struct aio_ctx *ctx = g_new0(struct aio_ctx, 1);
+ int flags = 0;
ctx->blk = blk;
- while ((c = getopt(argc, argv, "CqP:z")) != -1) {
+ while ((c = getopt(argc, argv, "CfqP:z")) != -1) {
switch (c) {
case 'C':
- ctx->Cflag = 1;
+ ctx->Cflag = true;
+ break;
+ case 'f':
+ flags |= BDRV_REQ_FUA;
break;
case 'q':
- ctx->qflag = 1;
+ ctx->qflag = true;
+ break;
+ case 'u':
+ flags |= BDRV_REQ_MAY_UNMAP;
break;
case 'P':
pattern = parse_pattern(optarg);
@@ -1654,7 +1617,7 @@ static int aio_write_f(BlockBackend *blk, int argc, char **argv)
}
break;
case 'z':
- ctx->zflag = 1;
+ ctx->zflag = true;
break;
default:
g_free(ctx);
@@ -1673,6 +1636,11 @@ static int aio_write_f(BlockBackend *blk, int argc, char **argv)
return 0;
}
+ if ((flags & BDRV_REQ_MAY_UNMAP) && !ctx->zflag) {
+ printf("-u requires -z to be specified\n");
+ return 0;
+ }
+
if (ctx->zflag && ctx->Pflag) {
printf("-z and -P cannot be specified at the same time\n");
g_free(ctx);
@@ -1687,24 +1655,17 @@ static int aio_write_f(BlockBackend *blk, int argc, char **argv)
}
optind++;
- if (ctx->offset & 0x1ff) {
- printf("offset %" PRId64 " is not sector aligned\n",
- ctx->offset);
- block_acct_invalid(blk_get_stats(blk), BLOCK_ACCT_WRITE);
- g_free(ctx);
- return 0;
- }
-
if (ctx->zflag) {
int64_t count = cvtnum(argv[optind]);
if (count < 0) {
print_cvtnum_err(count, argv[optind]);
+ g_free(ctx);
return 0;
}
ctx->qiov.size = count;
- blk_aio_write_zeroes(blk, ctx->offset >> 9, count >> 9, 0,
- aio_write_done, ctx);
+ blk_aio_write_zeroes(blk, ctx->offset, count, flags, aio_write_done,
+ ctx);
} else {
nr_iov = argc - optind;
ctx->buf = create_iovec(blk, &ctx->qiov, &argv[optind], nr_iov,
@@ -1719,8 +1680,8 @@ static int aio_write_f(BlockBackend *blk, int argc, char **argv)
block_acct_start(blk_get_stats(blk), &ctx->acct, ctx->qiov.size,
BLOCK_ACCT_WRITE);
- blk_aio_writev(blk, ctx->offset >> 9, &ctx->qiov,
- ctx->qiov.size >> 9, aio_write_done, ctx);
+ blk_aio_pwritev(blk, ctx->offset, &ctx->qiov, flags, aio_write_done,
+ ctx);
}
return 0;
}
@@ -1884,17 +1845,17 @@ static const cmdinfo_t discard_cmd = {
static int discard_f(BlockBackend *blk, int argc, char **argv)
{
struct timeval t1, t2;
- int Cflag = 0, qflag = 0;
+ bool Cflag = false, qflag = false;
int c, ret;
int64_t offset, count;
while ((c = getopt(argc, argv, "Cq")) != -1) {
switch (c) {
case 'C':
- Cflag = 1;
+ Cflag = true;
break;
case 'q':
- qflag = 1;
+ qflag = true;
break;
default:
return qemuio_command_usage(&discard_cmd);
diff --git a/qemu-io.c b/qemu-io.c
index 0598251e7c..5ef3ef7f35 100644
--- a/qemu-io.c
+++ b/qemu-io.c
@@ -101,12 +101,15 @@ static void open_help(void)
" opens a new file in the requested mode\n"
"\n"
" Example:\n"
-" 'open -Cn /tmp/data' - creates/opens data file read-write and uncached\n"
+" 'open -n -o driver=raw /tmp/data' - opens raw data file read-write, uncached\n"
"\n"
" Opens a file for subsequent use by all of the other qemu-io commands.\n"
" -r, -- open file read-only\n"
" -s, -- use snapshot file\n"
-" -n, -- disable host cache\n"
+" -n, -- disable host cache, short for -t none\n"
+" -k, -- use kernel AIO implementation (on Linux only)\n"
+" -t, -- use the given cache mode for the image\n"
+" -d, -- use the given discard mode for the image\n"
" -o, -- options to be given to the block driver"
"\n");
}
@@ -120,7 +123,7 @@ static const cmdinfo_t open_cmd = {
.argmin = 1,
.argmax = -1,
.flags = CMD_NOFILE_OK,
- .args = "[-Crsn] [-o options] [path]",
+ .args = "[-rsnk] [-t cache] [-d discard] [-o options] [path]",
.oneline = "open the file specified by path",
.help = open_help,
};
@@ -137,14 +140,14 @@ static QemuOptsList empty_opts = {
static int open_f(BlockBackend *blk, int argc, char **argv)
{
- int flags = 0;
+ int flags = BDRV_O_UNMAP;
int readonly = 0;
bool writethrough = true;
int c;
QemuOpts *qopts;
QDict *opts;
- while ((c = getopt(argc, argv, "snrgo:")) != -1) {
+ while ((c = getopt(argc, argv, "snro:kt:d:")) != -1) {
switch (c) {
case 's':
flags |= BDRV_O_SNAPSHOT;
@@ -156,9 +159,27 @@ static int open_f(BlockBackend *blk, int argc, char **argv)
case 'r':
readonly = 1;
break;
+ case 'k':
+ flags |= BDRV_O_NATIVE_AIO;
+ break;
+ case 't':
+ if (bdrv_parse_cache_mode(optarg, &flags, &writethrough) < 0) {
+ error_report("Invalid cache option: %s", optarg);
+ qemu_opts_reset(&empty_opts);
+ return 0;
+ }
+ break;
+ case 'd':
+ if (bdrv_parse_discard_flags(optarg, &flags) < 0) {
+ error_report("Invalid discard option: %s", optarg);
+ qemu_opts_reset(&empty_opts);
+ return 0;
+ }
+ break;
case 'o':
if (imageOpts) {
printf("--image-opts and 'open -o' are mutually exclusive\n");
+ qemu_opts_reset(&empty_opts);
return 0;
}
if (!qemu_opts_parse_noisily(&empty_opts, optarg, false)) {
@@ -216,20 +237,22 @@ static const cmdinfo_t quit_cmd = {
static void usage(const char *name)
{
printf(
-"Usage: %s [-h] [-V] [-rsnm] [-f FMT] [-c STRING] ... [file]\n"
+"Usage: %s [OPTIONS]... [-c STRING]... [file]\n"
"QEMU Disk exerciser\n"
"\n"
" --object OBJECTDEF define an object such as 'secret' for\n"
" passwords and/or encryption keys\n"
+" --image-opts treat file as option string\n"
" -c, --cmd STRING execute command with its arguments\n"
" from the given string\n"
" -f, --format FMT specifies the block driver to use\n"
" -r, --read-only export read-only\n"
" -s, --snapshot use snapshot file\n"
-" -n, --nocache disable host cache\n"
+" -n, --nocache disable host cache, short for -t none\n"
" -m, --misalign misalign allocations for O_DIRECT\n"
" -k, --native-aio use kernel AIO implementation (on Linux only)\n"
" -t, --cache=MODE use the given cache mode for the image\n"
+" -d, --discard=MODE use the given discard mode for the image\n"
" -T, --trace FILE enable trace events listed in the given file\n"
" -h, --help display this help and exit\n"
" -V, --version output version information and exit\n"
@@ -410,11 +433,10 @@ static QemuOptsList file_opts = {
int main(int argc, char **argv)
{
int readonly = 0;
- const char *sopt = "hVc:d:f:rsnmgkt:T:";
+ const char *sopt = "hVc:d:f:rsnmkt:T:";
const struct option lopt[] = {
{ "help", no_argument, NULL, 'h' },
{ "version", no_argument, NULL, 'V' },
- { "offset", required_argument, NULL, 'o' },
{ "cmd", required_argument, NULL, 'c' },
{ "format", required_argument, NULL, 'f' },
{ "read-only", no_argument, NULL, 'r' },
diff --git a/qemu-nbd.c b/qemu-nbd.c
index c55b40ffc8..3e541131f4 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -46,6 +46,8 @@
#define QEMU_NBD_OPT_TLSCREDS 261
#define QEMU_NBD_OPT_IMAGE_OPTS 262
+#define MBR_SIZE 512
+
static NBDExport *exp;
static bool newproto;
static int verbose;
@@ -159,12 +161,13 @@ static int find_partition(BlockBackend *blk, int partition,
off_t *offset, off_t *size)
{
struct partition_record mbr[4];
- uint8_t data[512];
+ uint8_t data[MBR_SIZE];
int i;
int ext_partnum = 4;
int ret;
- if ((ret = blk_read(blk, 0, data, 1)) < 0) {
+ ret = blk_pread(blk, 0, data, sizeof(data));
+ if (ret < 0) {
error_report("error while reading: %s", strerror(-ret));
exit(EXIT_FAILURE);
}
@@ -182,10 +185,12 @@ static int find_partition(BlockBackend *blk, int partition,
if (mbr[i].system == 0xF || mbr[i].system == 0x5) {
struct partition_record ext[4];
- uint8_t data1[512];
+ uint8_t data1[MBR_SIZE];
int j;
- if ((ret = blk_read(blk, mbr[i].start_sector_abs, data1, 1)) < 0) {
+ ret = blk_pread(blk, mbr[i].start_sector_abs * MBR_SIZE,
+ data1, sizeof(data1));
+ if (ret < 0) {
error_report("error while reading: %s", strerror(-ret));
exit(EXIT_FAILURE);
}
diff --git a/qmp-commands.hx b/qmp-commands.hx
index de896a5a31..94847e5b48 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -4398,6 +4398,59 @@ Example:
EQMP
{
+ .name = "x-blockdev-change",
+ .args_type = "parent:B,child:B?,node:B?",
+ .mhandler.cmd_new = qmp_marshal_x_blockdev_change,
+ },
+
+SQMP
+x-blockdev-change
+-----------------
+
+Dynamically reconfigure the block driver state graph. It can be used
+to add, remove, insert or replace a graph node. Currently only the
+Quorum driver implements this feature to add or remove its child. This
+is useful to fix a broken quorum child.
+
+If @node is specified, it will be inserted under @parent. @child
+may not be specified in this case. If both @parent and @child are
+specified but @node is not, @child will be detached from @parent.
+
+Arguments:
+- "parent": the id or name of the parent node (json-string)
+- "child": the name of a child under the given parent node (json-string, optional)
+- "node": the name of the node that will be added (json-string, optional)
+
+Note: this command is experimental, and not a stable API. It doesn't
+support all kinds of operations, all kinds of children, nor all block
+drivers.
+
+Warning: The data in a new quorum child MUST be consistent with that of
+the rest of the array.
+
+Example:
+
+Add a new node to a quorum
+-> { "execute": "blockdev-add",
+ "arguments": { "options": { "driver": "raw",
+ "node-name": "new_node",
+ "file": { "driver": "file",
+ "filename": "test.raw" } } } }
+<- { "return": {} }
+-> { "execute": "x-blockdev-change",
+ "arguments": { "parent": "disk1",
+ "node": "new_node" } }
+<- { "return": {} }
+
+Delete a quorum's node
+-> { "execute": "x-blockdev-change",
+ "arguments": { "parent": "disk1",
+ "child": "children.1" } }
+<- { "return": {} }
+
+EQMP
+
+ {
.name = "query-named-block-nodes",
.args_type = "",
.mhandler.cmd_new = qmp_marshal_query_named_block_nodes,
diff --git a/qmp.c b/qmp.c
index 9d0953bc29..e784a67631 100644
--- a/qmp.c
+++ b/qmp.c
@@ -663,7 +663,7 @@ void qmp_object_add(const char *type, const char *id,
}
}
- qiv = qmp_input_visitor_new(props);
+ qiv = qmp_input_visitor_new(props, true);
obj = user_creatable_add_type(type, id, pdict,
qmp_input_get_visitor(qiv), errp);
qmp_input_visitor_cleanup(qiv);
diff --git a/qom/cpu.c b/qom/cpu.c
index c9007d3d06..751e992de8 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -254,7 +254,6 @@ static void cpu_common_reset(CPUState *cpu)
}
cpu->interrupt_request = 0;
- cpu->current_tb = NULL;
cpu->halted = 0;
cpu->mem_io_pc = 0;
cpu->mem_io_vaddr = 0;
diff --git a/qom/object.c b/qom/object.c
index 8e6e68dffc..3bc8a009bb 100644
--- a/qom/object.c
+++ b/qom/object.c
@@ -2036,10 +2036,9 @@ static void property_get_tm(Object *obj, Visitor *v, const char *name,
if (err) {
goto out_end;
}
+ visit_check_struct(v, &err);
out_end:
- error_propagate(errp, err);
- err = NULL;
- visit_end_struct(v, errp);
+ visit_end_struct(v);
out:
error_propagate(errp, err);
diff --git a/qom/object_interfaces.c b/qom/object_interfaces.c
index 393189024f..51e62e29d6 100644
--- a/qom/object_interfaces.c
+++ b/qom/object_interfaces.c
@@ -42,7 +42,7 @@ Object *user_creatable_add(const QDict *qdict,
char *type = NULL;
char *id = NULL;
Object *obj = NULL;
- Error *local_err = NULL, *end_err = NULL;
+ Error *local_err = NULL;
QDict *pdict;
pdict = qdict_clone_shallow(qdict);
@@ -63,21 +63,15 @@ Object *user_creatable_add(const QDict *qdict,
if (local_err) {
goto out_visit;
}
-
- obj = user_creatable_add_type(type, id, pdict, v, &local_err);
+ visit_check_struct(v, &local_err);
if (local_err) {
goto out_visit;
}
- out_visit:
- visit_end_struct(v, &end_err);
- if (end_err) {
- error_propagate(&local_err, end_err);
- if (obj) {
- user_creatable_del(id, NULL);
- }
- goto out;
- }
+ obj = user_creatable_add_type(type, id, pdict, v, &local_err);
+
+out_visit:
+ visit_end_struct(v);
out:
QDECREF(pdict);
@@ -118,15 +112,25 @@ Object *user_creatable_add_type(const char *type, const char *id,
return NULL;
}
+ assert(qdict);
obj = object_new(type);
- if (qdict) {
- for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) {
- object_property_set(obj, v, e->key, &local_err);
- if (local_err) {
- goto out;
- }
+ visit_start_struct(v, NULL, NULL, 0, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) {
+ object_property_set(obj, v, e->key, &local_err);
+ if (local_err) {
+ break;
}
}
+ if (!local_err) {
+ visit_check_struct(v, &local_err);
+ }
+ visit_end_struct(v);
+ if (local_err) {
+ goto out;
+ }
object_property_add_child(object_get_objects_root(),
id, obj, &local_err);
diff --git a/qom/qom-qobject.c b/qom/qom-qobject.c
index e6b17c1f1b..b66088d730 100644
--- a/qom/qom-qobject.c
+++ b/qom/qom-qobject.c
@@ -22,7 +22,8 @@ void object_property_set_qobject(Object *obj, QObject *value,
const char *name, Error **errp)
{
QmpInputVisitor *qiv;
- qiv = qmp_input_visitor_new(value);
+ /* TODO: Should we reject, rather than ignore, excess input? */
+ qiv = qmp_input_visitor_new(value, false);
object_property_set(obj, qmp_input_get_visitor(qiv), name, errp);
qmp_input_visitor_cleanup(qiv);
diff --git a/replay/replay-input.c b/replay/replay-input.c
index 06babe0ecc..03e99d5aba 100644
--- a/replay/replay-input.c
+++ b/replay/replay-input.c
@@ -37,7 +37,7 @@ static InputEvent *qapi_clone_InputEvent(InputEvent *src)
return NULL;
}
- qiv = qmp_input_visitor_new(obj);
+ qiv = qmp_input_visitor_new(obj, true);
iv = qmp_input_get_visitor(qiv);
visit_type_InputEvent(iv, NULL, &dst, &error_abort);
qmp_input_visitor_cleanup(qiv);
diff --git a/scripts/qapi-commands.py b/scripts/qapi-commands.py
index b570069faa..8c6acb3f3f 100644
--- a/scripts/qapi-commands.py
+++ b/scripts/qapi-commands.py
@@ -115,13 +115,21 @@ def gen_marshal(name, arg_type, ret_type):
if arg_type and arg_type.members:
ret += mcgen('''
- QmpInputVisitor *qiv = qmp_input_visitor_new_strict(QOBJECT(args));
+ QmpInputVisitor *qiv = qmp_input_visitor_new(QOBJECT(args), true);
QapiDeallocVisitor *qdv;
Visitor *v;
%(c_name)s arg = {0};
v = qmp_input_get_visitor(qiv);
+ visit_start_struct(v, NULL, NULL, 0, &err);
+ if (err) {
+ goto out;
+ }
visit_type_%(c_name)s_members(v, &arg, &err);
+ if (!err) {
+ visit_check_struct(v, &err);
+ }
+ visit_end_struct(v);
if (err) {
goto out;
}
@@ -150,7 +158,9 @@ out:
qmp_input_visitor_cleanup(qiv);
qdv = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(qdv);
+ visit_start_struct(v, NULL, NULL, 0, NULL);
visit_type_%(c_name)s_members(v, &arg, NULL);
+ visit_end_struct(v);
qapi_dealloc_visitor_cleanup(qdv);
''',
c_name=arg_type.c_name())
diff --git a/scripts/qapi-event.py b/scripts/qapi-event.py
index 9b5c5b535d..21fb16744d 100644
--- a/scripts/qapi-event.py
+++ b/scripts/qapi-event.py
@@ -98,7 +98,10 @@ def gen_event_send(name, arg_type):
goto out;
}
visit_type_%(c_name)s_members(v, &param, &err);
- visit_end_struct(v, err ? NULL : &err);
+ if (!err) {
+ visit_check_struct(v, &err);
+ }
+ visit_end_struct(v);
if (err) {
goto out;
}
diff --git a/scripts/qapi-visit.py b/scripts/qapi-visit.py
index 31d2330356..70ea8caef5 100644
--- a/scripts/qapi-visit.py
+++ b/scripts/qapi-visit.py
@@ -108,30 +108,32 @@ out:
def gen_visit_list(name, element_type):
- # FIXME: if *obj is NULL on entry, and the first visit_next_list()
- # assigns to *obj, while a later one fails, we should clean up *obj
- # rather than leaving it non-NULL. As currently written, the caller must
- # call qapi_free_FOOList() to avoid a memory leak of the partial FOOList.
return mcgen('''
void visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s **obj, Error **errp)
{
Error *err = NULL;
- GenericList *i, **prev;
+ %(c_name)s *tail;
+ size_t size = sizeof(**obj);
- visit_start_list(v, name, &err);
+ visit_start_list(v, name, (GenericList **)obj, size, &err);
if (err) {
goto out;
}
- for (prev = (GenericList **)obj;
- !err && (i = visit_next_list(v, prev, sizeof(**obj))) != NULL;
- prev = &i) {
- %(c_name)s *native_i = (%(c_name)s *)i;
- visit_type_%(c_elt_type)s(v, NULL, &native_i->value, &err);
+ for (tail = *obj; tail;
+ tail = (%(c_name)s *)visit_next_list(v, (GenericList *)tail, size)) {
+ visit_type_%(c_elt_type)s(v, NULL, &tail->value, &err);
+ if (err) {
+ break;
+ }
}
visit_end_list(v);
+ if (err && visit_is_input(v)) {
+ qapi_free_%(c_name)s(*obj);
+ *obj = NULL;
+ }
out:
error_propagate(errp, err);
}
@@ -186,9 +188,10 @@ void visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s **obj, Error
break;
}
visit_type_%(c_type)s_members(v, &(*obj)->u.%(c_name)s, &err);
- error_propagate(errp, err);
- err = NULL;
- visit_end_struct(v, &err);
+ if (!err) {
+ visit_check_struct(v, &err);
+ }
+ visit_end_struct(v);
''',
c_type=var.type.c_name(),
c_name=c_name(var.name))
@@ -208,20 +211,20 @@ void visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s **obj, Error
"%(name)s");
}
visit_end_alternate(v);
+ if (err && visit_is_input(v)) {
+ qapi_free_%(c_name)s(*obj);
+ *obj = NULL;
+ }
out:
error_propagate(errp, err);
}
''',
- name=name)
+ name=name, c_name=c_name(name))
return ret
def gen_visit_object(name, base, members, variants):
- # FIXME: if *obj is NULL on entry, and visit_start_struct() assigns to
- # *obj, but then visit_type_FOO_members() fails, we should clean up *obj
- # rather than leaving it non-NULL. As currently written, the caller must
- # call qapi_free_FOO() to avoid a memory leak of the partial FOO.
return mcgen('''
void visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s **obj, Error **errp)
@@ -236,10 +239,16 @@ void visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s **obj, Error
goto out_obj;
}
visit_type_%(c_name)s_members(v, *obj, &err);
- error_propagate(errp, err);
- err = NULL;
+ if (err) {
+ goto out_obj;
+ }
+ visit_check_struct(v, &err);
out_obj:
- visit_end_struct(v, &err);
+ visit_end_struct(v);
+ if (err && visit_is_input(v)) {
+ qapi_free_%(c_name)s(*obj);
+ *obj = NULL;
+ }
out:
error_propagate(errp, err);
}
diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h
index 420f2a53fe..b25d7d09d0 100644
--- a/target-alpha/cpu.h
+++ b/target-alpha/cpu.h
@@ -465,7 +465,7 @@ enum {
};
static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc,
- target_ulong *cs_base, int *pflags)
+ target_ulong *cs_base, uint32_t *pflags)
{
int flags = 0;
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 5b86992dd3..8c2183a418 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -460,12 +460,16 @@ static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
|| ctx->singlestep_enabled || singlestep) {
return false;
}
+#ifndef CONFIG_USER_ONLY
/* If the destination is in the superpage, the page perms can't change. */
if (in_superpage(ctx, dest)) {
return true;
}
/* Check for the dest on the same page as the start of the TB. */
return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
+#else
+ return true;
+#endif
}
static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
diff --git a/target-arm/Makefile.objs b/target-arm/Makefile.objs
index 82cbe6bbad..f20641163c 100644
--- a/target-arm/Makefile.objs
+++ b/target-arm/Makefile.objs
@@ -9,3 +9,4 @@ obj-y += neon_helper.o iwmmxt_helper.o
obj-y += gdbstub.o
obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o
obj-y += crypto_helper.o
+obj-y += arm-powerctl.o
diff --git a/target-arm/arm-powerctl.c b/target-arm/arm-powerctl.c
new file mode 100644
index 0000000000..cb9919b465
--- /dev/null
+++ b/target-arm/arm-powerctl.c
@@ -0,0 +1,224 @@
+/*
+ * QEMU support -- ARM Power Control specific functions.
+ *
+ * Copyright (c) 2016 Jean-Christophe Dubois
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include <cpu.h>
+#include <cpu-qom.h>
+#include "internals.h"
+#include "arm-powerctl.h"
+
+#ifndef DEBUG_ARM_POWERCTL
+#define DEBUG_ARM_POWERCTL 0
+#endif
+
+#define DPRINTF(fmt, args...) \
+ do { \
+ if (DEBUG_ARM_POWERCTL) { \
+ fprintf(stderr, "[ARM]%s: " fmt , __func__, ##args); \
+ } \
+ } while (0)
+
+CPUState *arm_get_cpu_by_id(uint64_t id)
+{
+ CPUState *cpu;
+
+ DPRINTF("cpu %" PRId64 "\n", id);
+
+ CPU_FOREACH(cpu) {
+ ARMCPU *armcpu = ARM_CPU(cpu);
+
+ if (armcpu->mp_affinity == id) {
+ return cpu;
+ }
+ }
+
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: Requesting unknown CPU %" PRId64 "\n",
+ __func__, id);
+
+ return NULL;
+}
+
+int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
+ uint32_t target_el, bool target_aa64)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64
+ "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry,
+ context_id);
+
+ /* requested EL level need to be in the 1 to 3 range */
+ assert((target_el > 0) && (target_el < 4));
+
+ if (target_aa64 && (entry & 3)) {
+ /*
+ * if we are booting in AArch64 mode then "entry" needs to be 4 bytes
+ * aligned.
+ */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ /* Retrieve the cpu we are powering up */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ /* The cpu was not found */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (!target_cpu->powered_off) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is already on\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_ALREADY_ON;
+ }
+
+ /*
+ * The newly brought CPU is requested to enter the exception level
+ * "target_el" and be in the requested mode (AArch64 or AArch32).
+ */
+
+ if (((target_el == 3) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) ||
+ ((target_el == 2) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL2))) {
+ /*
+ * The CPU does not support requested level
+ */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ if (!target_aa64 && arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64)) {
+ /*
+ * For now we don't support booting an AArch64 CPU in AArch32 mode
+ * TODO: We should add this support later
+ */
+ qemu_log_mask(LOG_UNIMP,
+ "[ARM]%s: Starting AArch64 CPU %" PRId64
+ " in AArch32 mode is not supported yet\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ /* Initialize the cpu we are turning on */
+ cpu_reset(target_cpu_state);
+ target_cpu->powered_off = false;
+ target_cpu_state->halted = 0;
+
+ if (target_aa64) {
+ if ((target_el < 3) && arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) {
+ /*
+ * As target mode is AArch64, we need to set lower
+ * exception level (the requested level 2) to AArch64
+ */
+ target_cpu->env.cp15.scr_el3 |= SCR_RW;
+ }
+
+ if ((target_el < 2) && arm_feature(&target_cpu->env, ARM_FEATURE_EL2)) {
+ /*
+ * As target mode is AArch64, we need to set lower
+ * exception level (the requested level 1) to AArch64
+ */
+ target_cpu->env.cp15.hcr_el2 |= HCR_RW;
+ }
+
+ target_cpu->env.pstate = aarch64_pstate_mode(target_el, true);
+ } else {
+ /* We are requested to boot in AArch32 mode */
+ static uint32_t mode_for_el[] = { 0,
+ ARM_CPU_MODE_SVC,
+ ARM_CPU_MODE_HYP,
+ ARM_CPU_MODE_SVC };
+
+ cpsr_write(&target_cpu->env, mode_for_el[target_el], CPSR_M,
+ CPSRWriteRaw);
+ }
+
+ if (target_el == 3) {
+ /* Processor is in secure mode */
+ target_cpu->env.cp15.scr_el3 &= ~SCR_NS;
+ } else {
+ /* Processor is not in secure mode */
+ target_cpu->env.cp15.scr_el3 |= SCR_NS;
+ }
+
+ /* We check if the started CPU is now at the correct level */
+ assert(target_el == arm_current_el(&target_cpu->env));
+
+ if (target_aa64) {
+ target_cpu->env.xregs[0] = context_id;
+ target_cpu->env.thumb = false;
+ } else {
+ target_cpu->env.regs[0] = context_id;
+ target_cpu->env.thumb = entry & 1;
+ entry &= 0xfffffffe;
+ }
+
+ /* Start the new CPU at the requested address */
+ cpu_set_pc(target_cpu_state, entry);
+
+ /* We are good to go */
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
+
+int arm_set_cpu_off(uint64_t cpuid)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ DPRINTF("cpu %" PRId64 "\n", cpuid);
+
+ /* change to the cpu we are powering up */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (target_cpu->powered_off) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is already off\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_IS_OFF;
+ }
+
+ target_cpu->powered_off = true;
+ target_cpu_state->halted = 1;
+ target_cpu_state->exception_index = EXCP_HLT;
+ cpu_loop_exit(target_cpu_state);
+ /* notreached */
+
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
+
+int arm_reset_cpu(uint64_t cpuid)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ DPRINTF("cpu %" PRId64 "\n", cpuid);
+
+ /* change to the cpu we are resetting */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (target_cpu->powered_off) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is off\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_IS_OFF;
+ }
+
+ /* Reset the cpu */
+ cpu_reset(target_cpu_state);
+
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
diff --git a/target-arm/arm-powerctl.h b/target-arm/arm-powerctl.h
new file mode 100644
index 0000000000..98ee04989b
--- /dev/null
+++ b/target-arm/arm-powerctl.h
@@ -0,0 +1,75 @@
+/*
+ * QEMU support -- ARM Power Control specific functions.
+ *
+ * Copyright (c) 2016 Jean-Christophe Dubois
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_ARM_POWERCTL_H
+#define QEMU_ARM_POWERCTL_H
+
+#include "kvm-consts.h"
+
+#define QEMU_ARM_POWERCTL_RET_SUCCESS QEMU_PSCI_RET_SUCCESS
+#define QEMU_ARM_POWERCTL_INVALID_PARAM QEMU_PSCI_RET_INVALID_PARAMS
+#define QEMU_ARM_POWERCTL_ALREADY_ON QEMU_PSCI_RET_ALREADY_ON
+#define QEMU_ARM_POWERCTL_IS_OFF QEMU_PSCI_RET_DENIED
+
+/*
+ * arm_get_cpu_by_id:
+ * @cpuid: the id of the CPU we want to retrieve the state
+ *
+ * Retrieve a CPUState object from its CPU ID provided in @cpuid.
+ *
+ * Returns: a pointer to the CPUState structure of the requested CPU.
+ */
+CPUState *arm_get_cpu_by_id(uint64_t cpuid);
+
+/*
+ * arm_set_cpu_on:
+ * @cpuid: the id of the CPU we want to start/wake up.
+ * @entry: the address the CPU shall start from.
+ * @context_id: the value to put in r0/x0.
+ * @target_el: The desired exception level.
+ * @target_aa64: 1 if the requested mode is AArch64. 0 otherwise.
+ *
+ * Start the cpu designated by @cpuid in @target_el exception level. The mode
+ * shall be AArch64 if @target_aa64 is set to 1. Otherwise the mode is
+ * AArch32. The CPU shall start at @entry with @context_id in r0/x0.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_ALREADY_ON if the CPU was already started.
+ */
+int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
+ uint32_t target_el, bool target_aa64);
+
+/*
+ * arm_set_cpu_off:
+ * @cpuid: the id of the CPU we want to stop/shut down.
+ *
+ * Stop the cpu designated by @cpuid.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_IS_OFF if CPU is already off
+ */
+
+int arm_set_cpu_off(uint64_t cpuid);
+
+/*
+ * arm_reset_cpu:
+ * @cpuid: the id of the CPU we want to reset.
+ *
+ * Reset the cpu designated by @cpuid.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_IS_OFF if CPU is off
+ */
+int arm_reset_cpu(uint64_t cpuid);
+
+#endif
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 066ff678dc..9deef86786 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -2117,7 +2117,7 @@ static inline bool arm_cpu_bswap_data(CPUARMState *env)
#endif
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
if (is_a64(env)) {
*pc = env->pc;
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 09638b2e7d..a2ab701ca5 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -3559,8 +3559,10 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
.resetvalue = 0 },
{ .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
- .access = PL2_RW, .writefn = vmsa_tcr_el1_write,
- .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
+ .access = PL2_RW,
+ /* no .writefn needed as this can't cause an ASID change;
+ * no .raw_writefn or .resetfn needed as we never use mask/base_mask
+ */
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
{ .name = "VTCR", .state = ARM_CP_STATE_AA32,
.cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
@@ -3753,8 +3755,10 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
{ .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
- .access = PL3_RW, .writefn = vmsa_tcr_el1_write,
- .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
+ .access = PL3_RW,
+ /* no .writefn needed as this can't cause an ASID change;
+ * no .raw_writefn or .resetfn needed as we never use mask/base_mask
+ */
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
@@ -6708,7 +6712,9 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn)
prot |= PAGE_WRITE;
}
if (!xn) {
- prot |= PAGE_EXEC;
+ if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
+ prot |= PAGE_EXEC;
+ }
}
return prot;
}
@@ -7248,7 +7254,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
uint32_t tg;
uint64_t ttbr;
int ttbr_select;
- hwaddr descaddr, descmask;
+ hwaddr descaddr, indexmask, indexmask_grainsize;
uint32_t tableattrs;
target_ulong page_size;
uint32_t attrs;
@@ -7437,28 +7443,20 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
level = startlevel;
}
- /* Clear the vaddr bits which aren't part of the within-region address,
- * so that we don't have to special case things when calculating the
- * first descriptor address.
- */
- if (va_size != inputsize) {
- address &= (1ULL << inputsize) - 1;
- }
-
- descmask = (1ULL << (stride + 3)) - 1;
+ indexmask_grainsize = (1ULL << (stride + 3)) - 1;
+ indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
/* Now we can extract the actual base address from the TTBR */
descaddr = extract64(ttbr, 0, 48);
- descaddr &= ~((1ULL << (inputsize - (stride * (4 - level)))) - 1);
+ descaddr &= ~indexmask;
/* The address field in the descriptor goes up to bit 39 for ARMv7
- * but up to bit 47 for ARMv8.
+ * but up to bit 47 for ARMv8, but we use the descaddrmask
+ * up to bit 39 for AArch32, because we don't need other bits in that case
+ * to construct next descriptor address (anyway they should be all zeroes).
*/
- if (arm_feature(env, ARM_FEATURE_V8)) {
- descaddrmask = 0xfffffffff000ULL;
- } else {
- descaddrmask = 0xfffffff000ULL;
- }
+ descaddrmask = ((1ull << (va_size == 64 ? 48 : 40)) - 1) &
+ ~indexmask_grainsize;
/* Secure accesses start with the page table in secure memory and
* can be downgraded to non-secure at any step. Non-secure accesses
@@ -7470,7 +7468,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
uint64_t descriptor;
bool nstable;
- descaddr |= (address >> (stride * (4 - level))) & descmask;
+ descaddr |= (address >> (stride * (4 - level))) & indexmask;
descaddr &= ~7ULL;
nstable = extract32(tableattrs, 4, 1);
descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fsr, fi);
@@ -7493,6 +7491,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
*/
tableattrs |= extract64(descriptor, 59, 5);
level++;
+ indexmask = indexmask_grainsize;
continue;
}
/* Block entry at level 1 or 2, or page entry at level 3.
diff --git a/target-arm/internals.h b/target-arm/internals.h
index 2e70272be2..54a0fb1db7 100644
--- a/target-arm/internals.h
+++ b/target-arm/internals.h
@@ -263,7 +263,9 @@ enum arm_exception_class {
#define ARM_EL_EC_SHIFT 26
#define ARM_EL_IL_SHIFT 25
+#define ARM_EL_ISV_SHIFT 24
#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
+#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
/* Utility functions for constructing various kinds of syndrome value.
* Note that in general we follow the AArch64 syndrome values; in a
@@ -383,11 +385,27 @@ static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
| (ea << 9) | (s1ptw << 7) | fsc;
}
-static inline uint32_t syn_data_abort(int same_el, int ea, int cm, int s1ptw,
- int wnr, int fsc)
+static inline uint32_t syn_data_abort_no_iss(int same_el,
+ int ea, int cm, int s1ptw,
+ int wnr, int fsc)
{
return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
- | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
+ | ARM_EL_IL
+ | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
+}
+
+static inline uint32_t syn_data_abort_with_iss(int same_el,
+ int sas, int sse, int srt,
+ int sf, int ar,
+ int ea, int cm, int s1ptw,
+ int wnr, int fsc,
+ bool is_16bit)
+{
+ return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16)
+ | (sf << 15) | (ar << 14)
+ | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
}
static inline uint32_t syn_swstep(int same_el, int isv, int ex)
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index d626ff1a20..c7fba8526c 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -115,7 +115,8 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
exc = EXCP_PREFETCH_ABORT;
} else {
- syn = syn_data_abort(same_el, 0, 0, fi.s1ptw, is_write == 1, syn);
+ syn = syn_data_abort_no_iss(same_el,
+ 0, 0, fi.s1ptw, is_write == 1, syn);
if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
fsr |= (1 << 11);
}
@@ -161,7 +162,8 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int is_write,
}
raise_exception(env, EXCP_DATA_ABORT,
- syn_data_abort(same_el, 0, 0, 0, is_write == 1, 0x21),
+ syn_data_abort_no_iss(same_el,
+ 0, 0, 0, is_write == 1, 0x21),
target_el);
}
diff --git a/target-arm/psci.c b/target-arm/psci.c
index c55487f872..ce2e0dca39 100644
--- a/target-arm/psci.c
+++ b/target-arm/psci.c
@@ -22,6 +22,7 @@
#include <kvm-consts.h>
#include <sysemu/sysemu.h>
#include "internals.h"
+#include "arm-powerctl.h"
bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
{
@@ -73,21 +74,6 @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
}
}
-static CPUState *get_cpu_by_id(uint64_t id)
-{
- CPUState *cpu;
-
- CPU_FOREACH(cpu) {
- ARMCPU *armcpu = ARM_CPU(cpu);
-
- if (armcpu->mp_affinity == id) {
- return cpu;
- }
- }
-
- return NULL;
-}
-
void arm_handle_psci_call(ARMCPU *cpu)
{
/*
@@ -98,7 +84,6 @@ void arm_handle_psci_call(ARMCPU *cpu)
* Additional information about the calling convention used is available in
* the document 'SMC Calling Convention' (ARM DEN 0028)
*/
- CPUState *cs = CPU(cpu);
CPUARMState *env = &cpu->env;
uint64_t param[4];
uint64_t context_id, mpidr;
@@ -123,7 +108,6 @@ void arm_handle_psci_call(ARMCPU *cpu)
switch (param[0]) {
CPUState *target_cpu_state;
ARMCPU *target_cpu;
- CPUClass *target_cpu_class;
case QEMU_PSCI_0_2_FN_PSCI_VERSION:
ret = QEMU_PSCI_0_2_RET_VERSION_0_2;
@@ -137,7 +121,7 @@ void arm_handle_psci_call(ARMCPU *cpu)
switch (param[2]) {
case 0:
- target_cpu_state = get_cpu_by_id(mpidr);
+ target_cpu_state = arm_get_cpu_by_id(mpidr);
if (!target_cpu_state) {
ret = QEMU_PSCI_RET_INVALID_PARAMS;
break;
@@ -167,52 +151,13 @@ void arm_handle_psci_call(ARMCPU *cpu)
mpidr = param[1];
entry = param[2];
context_id = param[3];
-
- /* change to the cpu we are powering up */
- target_cpu_state = get_cpu_by_id(mpidr);
- if (!target_cpu_state) {
- ret = QEMU_PSCI_RET_INVALID_PARAMS;
- break;
- }
- target_cpu = ARM_CPU(target_cpu_state);
- if (!target_cpu->powered_off) {
- ret = QEMU_PSCI_RET_ALREADY_ON;
- break;
- }
- target_cpu_class = CPU_GET_CLASS(target_cpu);
-
- /* Initialize the cpu we are turning on */
- cpu_reset(target_cpu_state);
- target_cpu->powered_off = false;
- target_cpu_state->halted = 0;
-
/*
* The PSCI spec mandates that newly brought up CPUs enter the
* exception level of the caller in the same execution mode as
* the caller, with context_id in x0/r0, respectively.
- *
- * For now, it is sufficient to assert() that CPUs come out of
- * reset in the same mode as the calling CPU, since we only
- * implement EL1, which means that
- * (a) there is no EL2 for the calling CPU to trap into to change
- * its state
- * (b) the newly brought up CPU enters EL1 immediately after coming
- * out of reset in the default state
*/
- assert(is_a64(env) == is_a64(&target_cpu->env));
- if (is_a64(env)) {
- if (entry & 1) {
- ret = QEMU_PSCI_RET_INVALID_PARAMS;
- break;
- }
- target_cpu->env.xregs[0] = context_id;
- } else {
- target_cpu->env.regs[0] = context_id;
- target_cpu->env.thumb = entry & 1;
- }
- target_cpu_class->set_pc(target_cpu_state, entry);
-
- ret = 0;
+ ret = arm_set_cpu_on(mpidr, entry, context_id, arm_current_el(env),
+ is_a64(env));
break;
case QEMU_PSCI_0_1_FN_CPU_OFF:
case QEMU_PSCI_0_2_FN_CPU_OFF:
@@ -250,9 +195,8 @@ err:
return;
cpu_off:
- cpu->powered_off = true;
- cs->halted = 1;
- cs->exception_index = EXCP_HLT;
- cpu_loop_exit(cs);
+ ret = arm_set_cpu_off(cpu->mp_affinity);
/* notreached */
+ /* sanity check in case something failed */
+ assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
}
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index b13cff756a..5526bbda2c 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -274,10 +274,12 @@ static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
return false;
}
+#ifndef CONFIG_USER_ONLY
/* Only link tbs from inside the same guest page */
if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
return false;
}
+#endif
return true;
}
@@ -2086,19 +2088,19 @@ static void disas_ldst_pair(DisasContext *s, uint32_t insn)
* size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
* opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
*/
-static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
+static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
{
- int rt = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
int imm9 = sextract32(insn, 12, 9);
- int opc = extract32(insn, 22, 2);
- int size = extract32(insn, 30, 2);
int idx = extract32(insn, 10, 2);
bool is_signed = false;
bool is_store = false;
bool is_extended = false;
bool is_unpriv = (idx == 2);
- bool is_vector = extract32(insn, 26, 1);
bool post_index;
bool writeback;
@@ -2128,8 +2130,8 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
return;
}
is_store = (opc == 0);
- is_signed = opc & (1<<1);
- is_extended = (size < 3) && (opc & 1);
+ is_signed = extract32(opc, 1, 1);
+ is_extended = (size < 3) && extract32(opc, 0, 1);
}
switch (idx) {
@@ -2205,19 +2207,19 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn)
* Rn: address register or SP for base
* Rm: offset register or ZR for offset
*/
-static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
+static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
{
- int rt = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
int shift = extract32(insn, 12, 1);
int rm = extract32(insn, 16, 5);
- int opc = extract32(insn, 22, 2);
int opt = extract32(insn, 13, 3);
- int size = extract32(insn, 30, 2);
bool is_signed = false;
bool is_store = false;
bool is_extended = false;
- bool is_vector = extract32(insn, 26, 1);
TCGv_i64 tcg_rm;
TCGv_i64 tcg_addr;
@@ -2294,14 +2296,14 @@ static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn)
* Rn: base address register (inc SP)
* Rt: target register
*/
-static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
+static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
{
- int rt = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5);
unsigned int imm12 = extract32(insn, 10, 12);
- bool is_vector = extract32(insn, 26, 1);
- int size = extract32(insn, 30, 2);
- int opc = extract32(insn, 22, 2);
unsigned int offset;
TCGv_i64 tcg_addr;
@@ -2360,20 +2362,25 @@ static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn)
/* Load/store register (all forms) */
static void disas_ldst_reg(DisasContext *s, uint32_t insn)
{
+ int rt = extract32(insn, 0, 5);
+ int opc = extract32(insn, 22, 2);
+ bool is_vector = extract32(insn, 26, 1);
+ int size = extract32(insn, 30, 2);
+
switch (extract32(insn, 24, 2)) {
case 0:
if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
- disas_ldst_reg_roffset(s, insn);
+ disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
} else {
/* Load/store register (unscaled immediate)
* Load/store immediate pre/post-indexed
* Load/store register unprivileged
*/
- disas_ldst_reg_imm9(s, insn);
+ disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
}
break;
case 1:
- disas_ldst_reg_unsigned_imm(s, insn);
+ disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
break;
default:
unallocated_encoding(s);
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 940ec8d981..a43b1f61cf 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -4049,15 +4049,22 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
return 0;
}
-static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
+static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
{
- TranslationBlock *tb;
+#ifndef CONFIG_USER_ONLY
+ return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
+ ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
- tb = s->tb;
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
+static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
+{
+ if (use_goto_tb(s, dest)) {
tcg_gen_goto_tb(n);
gen_set_pc_im(s, dest);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)s->tb + n);
} else {
gen_set_pc_im(s, dest);
tcg_gen_exit_tb(0);
diff --git a/target-cris/cpu.h b/target-cris/cpu.h
index 415cf91436..a492fc687f 100644
--- a/target-cris/cpu.h
+++ b/target-cris/cpu.h
@@ -249,7 +249,7 @@ int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
diff --git a/target-cris/translate.c b/target-cris/translate.c
index a73176c118..f28b1999a7 100644
--- a/target-cris/translate.c
+++ b/target-cris/translate.c
@@ -520,14 +520,22 @@ static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
gen_set_label(l1);
}
+static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
+{
+#ifndef CONFIG_USER_ONLY
+ return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
+ (dc->ppc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
{
- TranslationBlock *tb;
- tb = dc->tb;
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
+ if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(env_pc, dest);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)dc->tb + n);
} else {
tcg_gen_movi_tl(env_pc, dest);
tcg_gen_exit_tb(0);
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 732eb6d7ec..444fda9ce6 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -1269,7 +1269,7 @@ void tcg_x86_init(void);
#include "exec/exec-all.h"
static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*cs_base = env->segs[R_CS].base;
*pc = *cs_base + env->eip;
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 1a1214dcb1..868c26244b 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -2085,20 +2085,25 @@ static inline int insn_const_size(TCGMemOp ot)
}
}
+static inline bool use_goto_tb(DisasContext *s, target_ulong pc)
+{
+#ifndef CONFIG_USER_ONLY
+ return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
+ (pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
{
- TranslationBlock *tb;
- target_ulong pc;
-
- pc = s->cs_base + eip;
- tb = s->tb;
- /* NOTE: we handle the case where the TB spans two pages here */
- if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
- (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
+ target_ulong pc = s->cs_base + eip;
+
+ if (use_goto_tb(s, pc)) {
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
gen_jmp_im(eip);
- tcg_gen_exit_tb((uintptr_t)tb + tb_num);
+ tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
} else {
/* jump to another page: currently not optimized */
gen_jmp_im(eip);
@@ -8178,7 +8183,7 @@ void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
CPUState *cs = CPU(cpu);
DisasContext dc1, *dc = &dc1;
target_ulong pc_ptr;
- uint64_t flags;
+ uint32_t flags;
target_ulong pc_start;
target_ulong cs_base;
int num_insns;
diff --git a/target-lm32/cpu.h b/target-lm32/cpu.h
index f220fc0bb9..6a0d297b30 100644
--- a/target-lm32/cpu.h
+++ b/target-lm32/cpu.h
@@ -226,7 +226,7 @@ int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPULM32State *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
diff --git a/target-lm32/translate.c b/target-lm32/translate.c
index 256a51f849..dd972f5b8c 100644
--- a/target-lm32/translate.c
+++ b/target-lm32/translate.c
@@ -133,16 +133,25 @@ static inline void t_gen_illegal_insn(DisasContext *dc)
gen_helper_ill(cpu_env);
}
-static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
{
- TranslationBlock *tb;
+ if (unlikely(dc->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
- tb = dc->tb;
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
- likely(!dc->singlestep_enabled)) {
+static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+{
+ if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_pc, dest);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)dc->tb + n);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
if (dc->singlestep_enabled) {
diff --git a/target-m68k/cpu.h b/target-m68k/cpu.h
index 48b4c872fc..d2f467ca9b 100644
--- a/target-m68k/cpu.h
+++ b/target-m68k/cpu.h
@@ -230,7 +230,7 @@ int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
diff --git a/target-m68k/translate.c b/target-m68k/translate.c
index 7560c3a808..e46356e44c 100644
--- a/target-m68k/translate.c
+++ b/target-m68k/translate.c
@@ -852,19 +852,25 @@ static inline void gen_addr_fault(DisasContext *s)
} \
} while (0)
+static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
+{
+#ifndef CONFIG_USER_ONLY
+ return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
+ (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
/* Generate a jump to an immediate address. */
static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
{
- TranslationBlock *tb;
-
- tb = s->tb;
if (unlikely(s->singlestep_enabled)) {
gen_exception(s, dest, EXCP_DEBUG);
- } else if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
- (s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
+ } else if (use_goto_tb(s, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(QREG_PC, dest);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)s->tb + n);
} else {
gen_jmp_im(s, dest);
tcg_gen_exit_tb(0);
diff --git a/target-microblaze/cpu.h b/target-microblaze/cpu.h
index 2f7335eaa7..bf74e2c032 100644
--- a/target-microblaze/cpu.h
+++ b/target-microblaze/cpu.h
@@ -322,7 +322,7 @@ int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
#include "exec/cpu-all.h"
static inline void cpu_get_tb_cpu_state(CPUMBState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->sregs[SR_PC];
*cs_base = 0;
diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c
index f944965a14..a7a8ac8f99 100644
--- a/target-microblaze/translate.c
+++ b/target-microblaze/translate.c
@@ -124,14 +124,21 @@ static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
dc->is_jmp = DISAS_UPDATE;
}
+static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
+{
+#ifndef CONFIG_USER_ONLY
+ return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
{
- TranslationBlock *tb;
- tb = dc->tb;
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
+ if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)dc->tb + n);
} else {
tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
tcg_gen_exit_tb(0);
diff --git a/target-mips/cpu.h b/target-mips/cpu.h
index 866924d188..53e826223f 100644
--- a/target-mips/cpu.h
+++ b/target-mips/cpu.h
@@ -839,7 +839,7 @@ static inline void restore_pamask(CPUMIPSState *env)
}
static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->active_tc.PC;
*cs_base = 0;
diff --git a/target-mips/helper.c b/target-mips/helper.c
index 1004edee05..cfea177ee5 100644
--- a/target-mips/helper.c
+++ b/target-mips/helper.c
@@ -539,7 +539,7 @@ void mips_cpu_do_interrupt(CPUState *cs)
break;
case EXCP_SRESET:
env->CP0_Status |= (1 << CP0St_SR);
- memset(env->CP0_WatchLo, 0, sizeof(*env->CP0_WatchLo));
+ memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
goto set_error_EPC;
case EXCP_NMI:
env->CP0_Status |= (1 << CP0St_NMI);
diff --git a/target-mips/translate.c b/target-mips/translate.c
index a3a05ec66d..ddfb9244d7 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -4191,15 +4191,25 @@ static void gen_trap (DisasContext *ctx, uint32_t opc,
tcg_temp_free(t1);
}
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
{
- TranslationBlock *tb;
- tb = ctx->tb;
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
- likely(!ctx->singlestep_enabled)) {
+ if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(n);
gen_save_pc(dest);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
} else {
gen_save_pc(dest);
if (ctx->singlestep_enabled) {
diff --git a/target-moxie/cpu.h b/target-moxie/cpu.h
index 4ee207796c..1b46e52c37 100644
--- a/target-moxie/cpu.h
+++ b/target-moxie/cpu.h
@@ -132,7 +132,7 @@ static inline int cpu_mmu_index(CPUMoxieState *env, bool ifetch)
#include "exec/exec-all.h"
static inline void cpu_get_tb_cpu_state(CPUMoxieState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
diff --git a/target-moxie/translate.c b/target-moxie/translate.c
index a437e2ab60..58200c25d3 100644
--- a/target-moxie/translate.c
+++ b/target-moxie/translate.c
@@ -121,17 +121,26 @@ void moxie_translate_init(void)
done_init = 1;
}
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
static inline void gen_goto_tb(CPUMoxieState *env, DisasContext *ctx,
int n, target_ulong dest)
{
- TranslationBlock *tb;
- tb = ctx->tb;
-
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
- !ctx->singlestep_enabled) {
+ if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
if (ctx->singlestep_enabled) {
diff --git a/target-openrisc/cpu.h b/target-openrisc/cpu.h
index 4b63f25804..ed818af0cf 100644
--- a/target-openrisc/cpu.h
+++ b/target-openrisc/cpu.h
@@ -392,7 +392,7 @@ int cpu_openrisc_get_phys_data(OpenRISCCPU *cpu,
static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
diff --git a/target-openrisc/translate.c b/target-openrisc/translate.c
index 5d0ab442a8..d4f1f260e4 100644
--- a/target-openrisc/translate.c
+++ b/target-openrisc/translate.c
@@ -190,15 +190,25 @@ static void check_ov64s(DisasContext *dc)
}
#endif*/
+static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
+{
+ if (unlikely(dc->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
{
- TranslationBlock *tb;
- tb = dc->tb;
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
- likely(!dc->singlestep_enabled)) {
+ if (use_goto_tb(dc, dest)) {
tcg_gen_movi_tl(cpu_pc, dest);
tcg_gen_goto_tb(n);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)dc->tb + n);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
if (dc->singlestep_enabled) {
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index 5282533b38..508f03b74d 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -2303,7 +2303,7 @@ static inline void cpu_write_xer(CPUPPCState *env, target_ulong xer)
}
static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->nip;
*cs_base = 0;
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index b3860ecdea..d485d7c7cb 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -3822,19 +3822,29 @@ static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
#endif
}
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
/*** Branch ***/
static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
{
- TranslationBlock *tb;
- tb = ctx->tb;
if (NARROW_MODE(ctx)) {
dest = (uint32_t) dest;
}
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
- likely(!ctx->singlestep_enabled)) {
+ if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_nip, dest & ~3);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
} else {
tcg_gen_movi_tl(cpu_nip, dest & ~3);
if (unlikely(ctx->singlestep_enabled)) {
diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
index 6d97c089a4..07f76ad884 100644
--- a/target-s390x/cpu.h
+++ b/target-s390x/cpu.h
@@ -338,7 +338,7 @@ static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
}
static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->psw.addr;
*cs_base = 0;
diff --git a/target-s390x/translate.c b/target-s390x/translate.c
index c871ef2bb3..e99eb5cb01 100644
--- a/target-s390x/translate.c
+++ b/target-s390x/translate.c
@@ -608,12 +608,17 @@ static void gen_op_calc_cc(DisasContext *s)
static int use_goto_tb(DisasContext *s, uint64_t dest)
{
- /* NOTE: we handle the case where the TB spans two pages here */
- return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
- || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
- && !s->singlestep_enabled
- && !(s->tb->cflags & CF_LAST_IO)
- && !(s->tb->flags & FLAG_MASK_PER));
+ if (unlikely(s->singlestep_enabled) ||
+ (s->tb->cflags & CF_LAST_IO) ||
+ (s->tb->flags & FLAG_MASK_PER)) {
+ return false;
+ }
+#ifndef CONFIG_USER_ONLY
+ return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
+ (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
}
static void account_noninline_branch(DisasContext *s, int cc_op)
diff --git a/target-sh4/cpu.h b/target-sh4/cpu.h
index 3b23e967bb..10c0191795 100644
--- a/target-sh4/cpu.h
+++ b/target-sh4/cpu.h
@@ -347,7 +347,7 @@ static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr)
}
static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
diff --git a/target-sh4/translate.c b/target-sh4/translate.c
index 7c189680a7..53f782c054 100644
--- a/target-sh4/translate.c
+++ b/target-sh4/translate.c
@@ -205,17 +205,26 @@ static void gen_write_sr(TCGv src)
tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
}
-static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
- TranslationBlock *tb;
- tb = ctx->tb;
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
- !ctx->singlestep_enabled) {
+static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ if (use_goto_tb(ctx, dest)) {
/* Use a direct jump if in same page and singlestep not enabled */
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
if (ctx->singlestep_enabled)
diff --git a/target-sparc/cpu.h b/target-sparc/cpu.h
index dc46122758..59ec7cafbe 100644
--- a/target-sparc/cpu.h
+++ b/target-sparc/cpu.h
@@ -688,7 +688,7 @@ trap_state* cpu_tsptr(CPUSPARCState* env);
#define TB_FLAG_AM_ENABLED (1 << 5)
static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = env->npc;
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index 7998ff57bf..d154e3f7b6 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -303,20 +303,30 @@ static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
}
}
+static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
+ target_ulong npc)
+{
+ if (unlikely(s->singlestep)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
+ (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
static inline void gen_goto_tb(DisasContext *s, int tb_num,
target_ulong pc, target_ulong npc)
{
- TranslationBlock *tb;
-
- tb = s->tb;
- if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
- (npc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) &&
- !s->singlestep) {
+ if (use_goto_tb(s, pc, npc)) {
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
tcg_gen_movi_tl(cpu_pc, pc);
tcg_gen_movi_tl(cpu_npc, npc);
- tcg_gen_exit_tb((uintptr_t)tb + tb_num);
+ tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
} else {
/* jump to another page: currently not optimized */
tcg_gen_movi_tl(cpu_pc, pc);
diff --git a/target-tilegx/cpu.h b/target-tilegx/cpu.h
index 022cad186a..c9dda127c9 100644
--- a/target-tilegx/cpu.h
+++ b/target-tilegx/cpu.h
@@ -169,7 +169,7 @@ TileGXCPU *cpu_tilegx_init(const char *cpu_model);
#define cpu_signal_handler cpu_tilegx_signal_handler
static inline void cpu_get_tb_cpu_state(CPUTLGState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
diff --git a/target-tricore/cpu.h b/target-tricore/cpu.h
index 90045a93d2..eaebdd28e6 100644
--- a/target-tricore/cpu.h
+++ b/target-tricore/cpu.h
@@ -377,7 +377,7 @@ void tricore_tcg_init(void);
int cpu_tricore_signal_handler(int host_signum, void *pinfo, void *puc);
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->PC;
*cs_base = 0;
diff --git a/target-tricore/translate.c b/target-tricore/translate.c
index 912bf226be..0237e7bea8 100644
--- a/target-tricore/translate.c
+++ b/target-tricore/translate.c
@@ -3236,15 +3236,25 @@ static inline void gen_save_pc(target_ulong pc)
tcg_gen_movi_tl(cpu_PC, pc);
}
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
{
- TranslationBlock *tb;
- tb = ctx->tb;
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
- likely(!ctx->singlestep_enabled)) {
+ if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(n);
gen_save_pc(dest);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
} else {
gen_save_pc(dest);
if (ctx->singlestep_enabled) {
diff --git a/target-unicore32/cpu.h b/target-unicore32/cpu.h
index 9c1fbf9b77..e64cb7ecdd 100644
--- a/target-unicore32/cpu.h
+++ b/target-unicore32/cpu.h
@@ -144,7 +144,7 @@ UniCore32CPU *uc32_cpu_init(const char *cpu_model);
#define cpu_init(cpu_model) CPU(uc32_cpu_init(cpu_model))
static inline void cpu_get_tb_cpu_state(CPUUniCore32State *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
*pc = env->regs[31];
*cs_base = 0;
diff --git a/target-unicore32/translate.c b/target-unicore32/translate.c
index 39af3af05f..307f7b2059 100644
--- a/target-unicore32/translate.c
+++ b/target-unicore32/translate.c
@@ -1089,15 +1089,21 @@ static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t i
}
}
-static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
+static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
{
- TranslationBlock *tb;
+#ifndef CONFIG_USER_ONLY
+ return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
- tb = s->tb;
- if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
+static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
+{
+ if (use_goto_tb(s, dest)) {
tcg_gen_goto_tb(n);
gen_set_pc_im(dest);
- tcg_gen_exit_tb((uintptr_t)tb + n);
+ tcg_gen_exit_tb((uintptr_t)s->tb + n);
} else {
gen_set_pc_im(dest);
tcg_gen_exit_tb(0);
diff --git a/target-xtensa/cpu.h b/target-xtensa/cpu.h
index d0bd9dada8..7bfc9c841d 100644
--- a/target-xtensa/cpu.h
+++ b/target-xtensa/cpu.h
@@ -507,7 +507,7 @@ static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
#define XTENSA_TBFLAG_WINDOW_SHIFT 15
static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+ target_ulong *cs_base, uint32_t *flags)
{
CPUState *cs = CPU(xtensa_env_get_cpu(env));
diff --git a/target-xtensa/translate.c b/target-xtensa/translate.c
index 9894488469..9eac56e2a5 100644
--- a/target-xtensa/translate.c
+++ b/target-xtensa/translate.c
@@ -418,9 +418,11 @@ static void gen_jump(DisasContext *dc, TCGv dest)
static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
{
TCGv_i32 tmp = tcg_const_i32(dest);
+#ifndef CONFIG_USER_ONLY
if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
slot = -1;
}
+#endif
gen_jump_slot(dc, tmp, slot);
tcg_temp_free(tmp);
}
@@ -446,9 +448,11 @@ static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
{
TCGv_i32 tmp = tcg_const_i32(dest);
+#ifndef CONFIG_USER_ONLY
if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
slot = -1;
}
+#endif
gen_callw_slot(dc, callinc, tmp, slot);
tcg_temp_free(tmp);
}
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
index a8fb4420de..1447f7c216 100644
--- a/tcg/aarch64/tcg-target.inc.c
+++ b/tcg/aarch64/tcg-target.inc.c
@@ -73,6 +73,18 @@ static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
*code_ptr = deposit32(*code_ptr, 0, 26, offset);
}
+static inline void reloc_pc26_atomic(tcg_insn_unit *code_ptr,
+ tcg_insn_unit *target)
+{
+ ptrdiff_t offset = target - code_ptr;
+ tcg_insn_unit insn;
+ tcg_debug_assert(offset == sextract64(offset, 0, 26));
+ /* read instruction, mask away previous PC_REL26 parameter contents,
+ set the proper offset, then write back the instruction. */
+ insn = atomic_read(code_ptr);
+ atomic_set(code_ptr, deposit32(insn, 0, 26, offset));
+}
+
static inline void reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
{
ptrdiff_t offset = target - code_ptr;
@@ -835,7 +847,7 @@ void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr;
tcg_insn_unit *target = (tcg_insn_unit *)addr;
- reloc_pc26(code_ptr, target);
+ reloc_pc26_atomic(code_ptr, target);
flush_icache_range(jmp_addr, jmp_addr + 4);
}
@@ -1294,12 +1306,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
#ifndef USE_DIRECT_JUMP
#error "USE_DIRECT_JUMP required for aarch64"
#endif
- tcg_debug_assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */
- s->tb_jmp_offset[a0] = tcg_current_code_size(s);
+ /* consistency for USE_DIRECT_JUMP */
+ tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
/* actual branch destination will be patched by
aarch64_tb_set_jmp_target later, beware retranslation. */
tcg_out_goto_noaddr(s);
- s->tb_next_offset[a0] = tcg_current_code_size(s);
+ s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
break;
case INDEX_op_br:
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
index 2b7fbddbf0..f9f54c64c6 100644
--- a/tcg/arm/tcg-target.inc.c
+++ b/tcg/arm/tcg-target.inc.c
@@ -121,6 +121,14 @@ static inline void reloc_pc24(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
*code_ptr = (*code_ptr & ~0xffffff) | (offset & 0xffffff);
}
+static inline void reloc_pc24_atomic(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
+{
+ ptrdiff_t offset = (tcg_ptr_byte_diff(target, code_ptr) - 8) >> 2;
+ tcg_insn_unit insn = atomic_read(code_ptr);
+ tcg_debug_assert(offset == sextract32(offset, 0, 24));
+ atomic_set(code_ptr, deposit32(insn, 0, 24, offset));
+}
+
static void patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend)
{
@@ -1038,6 +1046,16 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *addr)
}
}
+void arm_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
+{
+ tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr;
+ tcg_insn_unit *target = (tcg_insn_unit *)addr;
+
+ /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
+ reloc_pc24_atomic(code_ptr, target);
+ flush_icache_range(jmp_addr, jmp_addr + 4);
+}
+
static inline void tcg_out_goto_label(TCGContext *s, int cond, TCGLabel *l)
{
if (l->has_value) {
@@ -1647,17 +1665,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_goto(s, COND_AL, tb_ret_addr);
break;
case INDEX_op_goto_tb:
- if (s->tb_jmp_offset) {
+ if (s->tb_jmp_insn_offset) {
/* Direct jump method */
- s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
+ s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
tcg_out_b_noaddr(s, COND_AL);
} else {
/* Indirect jump method */
- intptr_t ptr = (intptr_t)(s->tb_next + args[0]);
+ intptr_t ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]);
tcg_out_movi32(s, COND_AL, TCG_REG_R0, ptr & ~0xfff);
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, ptr & 0xfff);
}
- s->tb_next_offset[args[0]] = tcg_current_code_size(s);
+ s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
case INDEX_op_br:
tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
index 007407c3fc..317484cb5d 100644
--- a/tcg/i386/tcg-target.inc.c
+++ b/tcg/i386/tcg-target.inc.c
@@ -1123,6 +1123,21 @@ static void tcg_out_jmp(TCGContext *s, tcg_insn_unit *dest)
tcg_out_branch(s, 0, dest);
}
+static void tcg_out_nopn(TCGContext *s, int n)
+{
+ int i;
+ /* Emit 1 or 2 operand size prefixes for the standard one byte nop,
+ * "xchg %eax,%eax", forming "xchg %ax,%ax". All cores accept the
+ * duplicate prefix, and all of the interesting recent cores can
+ * decode and discard the duplicates in a single cycle.
+ */
+ tcg_debug_assert(n >= 1);
+ for (i = 1; i < n; ++i) {
+ tcg_out8(s, 0x66);
+ }
+ tcg_out8(s, 0x90);
+}
+
#if defined(CONFIG_SOFTMMU)
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra)
@@ -1775,17 +1790,25 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_jmp(s, tb_ret_addr);
break;
case INDEX_op_goto_tb:
- if (s->tb_jmp_offset) {
+ if (s->tb_jmp_insn_offset) {
/* direct jump method */
+ int gap;
+ /* jump displacement must be aligned for atomic patching;
+ * see if we need to add extra nops before jump
+ */
+ gap = tcg_pcrel_diff(s, QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4));
+ if (gap != 1) {
+ tcg_out_nopn(s, gap - 1);
+ }
tcg_out8(s, OPC_JMP_long); /* jmp im */
- s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
+ s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
tcg_out32(s, 0);
} else {
/* indirect jump method */
tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
- (intptr_t)(s->tb_next + args[0]));
+ (intptr_t)(s->tb_jmp_target_addr + args[0]));
}
- s->tb_next_offset[args[0]] = tcg_current_code_size(s);
+ s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
case INDEX_op_br:
tcg_out_jxx(s, JCC_JMP, arg_label(args[0]), 0);
diff --git a/tcg/ia64/tcg-target.inc.c b/tcg/ia64/tcg-target.inc.c
index 7557e6a9d4..395223e340 100644
--- a/tcg/ia64/tcg-target.inc.c
+++ b/tcg/ia64/tcg-target.inc.c
@@ -881,13 +881,13 @@ static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg)
static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
{
- if (s->tb_jmp_offset) {
+ if (s->tb_jmp_insn_offset) {
/* direct jump method */
tcg_abort();
} else {
/* indirect jump method */
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2,
- (tcg_target_long)(s->tb_next + arg));
+ (tcg_target_long)(s->tb_jmp_target_addr + arg));
tcg_out_bundle(s, MmI,
tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1,
TCG_REG_R2, TCG_REG_R2),
@@ -900,7 +900,7 @@ static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4,
TCG_REG_B6));
}
- s->tb_next_offset[arg] = tcg_current_code_size(s);
+ s->tb_jmp_reset_offset[arg] = tcg_current_code_size(s);
}
static inline void tcg_out_jmp(TCGContext *s, TCGArg addr)
diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
index aaf881cfd0..50e98ea63a 100644
--- a/tcg/mips/tcg-target.inc.c
+++ b/tcg/mips/tcg-target.inc.c
@@ -1397,19 +1397,19 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
break;
case INDEX_op_goto_tb:
- if (s->tb_jmp_offset) {
+ if (s->tb_jmp_insn_offset) {
/* direct jump method */
- s->tb_jmp_offset[a0] = tcg_current_code_size(s);
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
/* Avoid clobbering the address during retranslation. */
tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff));
} else {
/* indirect jump method */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
- (uintptr_t)(s->tb_next + a0));
+ (uintptr_t)(s->tb_jmp_target_addr + a0));
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
}
tcg_out_nop(s);
- s->tb_next_offset[a0] = tcg_current_code_size(s);
+ s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
break;
case INDEX_op_br:
tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO,
@@ -1885,7 +1885,6 @@ static void tcg_target_init(TCGContext *s)
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
- uint32_t *ptr = (uint32_t *)jmp_addr;
- *ptr = deposit32(*ptr, 0, 26, addr >> 2);
+ atomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2));
flush_icache_range(jmp_addr, jmp_addr + 4);
}
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index 00bb90fc25..da100528ab 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -1237,6 +1237,7 @@ static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
}
+#ifdef __powerpc64__
void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
{
tcg_insn_unit i1, i2;
@@ -1265,11 +1266,18 @@ void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
pair = (uint64_t)i2 << 32 | i1;
#endif
- /* ??? __atomic_store_8, presuming there's some way to do that
- for 32-bit, otherwise this is good enough for 64-bit. */
- *(uint64_t *)jmp_addr = pair;
+ atomic_set((uint64_t *)jmp_addr, pair);
flush_icache_range(jmp_addr, jmp_addr + 8);
}
+#else
+void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
+{
+ intptr_t diff = addr - jmp_addr;
+ tcg_debug_assert(in_range_b(diff));
+ atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
+ flush_icache_range(jmp_addr, jmp_addr + 4);
+}
+#endif
static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
{
@@ -1894,17 +1902,23 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out_b(s, 0, tb_ret_addr);
break;
case INDEX_op_goto_tb:
- tcg_debug_assert(s->tb_jmp_offset);
- /* Direct jump. Ensure the next insns are 8-byte aligned. */
+ tcg_debug_assert(s->tb_jmp_insn_offset);
+ /* Direct jump. */
+#ifdef __powerpc64__
+ /* Ensure the next insns are 8-byte aligned. */
if ((uintptr_t)s->code_ptr & 7) {
tcg_out32(s, NOP);
}
- s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
+ s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
/* To be replaced by either a branch+nop or a load into TMP1. */
s->code_ptr += 2;
tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
tcg_out32(s, BCCTR | BO_ALWAYS);
- s->tb_next_offset[args[0]] = tcg_current_code_size(s);
+#else
+ /* To be replaced by a branch. */
+ s->code_ptr++;
+#endif
+ s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
case INDEX_op_br:
{
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
index 5805532398..e0a60e618c 100644
--- a/tcg/s390/tcg-target.inc.c
+++ b/tcg/s390/tcg-target.inc.c
@@ -219,6 +219,8 @@ typedef enum S390Opcode {
RX_ST = 0x50,
RX_STC = 0x42,
RX_STH = 0x40,
+
+ NOP = 0x0707,
} S390Opcode;
#ifdef CONFIG_DEBUG_TCG
@@ -1715,17 +1717,24 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_goto_tb:
- if (s->tb_jmp_offset) {
+ if (s->tb_jmp_insn_offset) {
+ /* branch displacement must be aligned for atomic patching;
+ * see if we need to add extra nop before branch
+ */
+ if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
+ tcg_out16(s, NOP);
+ }
tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
- s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
+ s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
s->code_ptr += 2;
} else {
- /* load address stored at s->tb_next + args[0] */
- tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
+ /* load address stored at s->tb_jmp_target_addr + args[0] */
+ tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0,
+ s->tb_jmp_target_addr + args[0]);
/* and go there */
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
}
- s->tb_next_offset[args[0]] = tcg_current_code_size(s);
+ s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
OP_32_64(ld8u):
diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c
index d641cfd8c5..9938a5085e 100644
--- a/tcg/sparc/tcg-target.inc.c
+++ b/tcg/sparc/tcg-target.inc.c
@@ -1229,18 +1229,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
break;
case INDEX_op_goto_tb:
- if (s->tb_jmp_offset) {
+ if (s->tb_jmp_insn_offset) {
/* direct jump method */
- s->tb_jmp_offset[a0] = tcg_current_code_size(s);
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
/* Make sure to preserve links during retranslation. */
tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1)));
} else {
/* indirect jump method */
- tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + a0));
+ tcg_out_ld_ptr(s, TCG_REG_T1,
+ (uintptr_t)(s->tb_jmp_target_addr + a0));
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
}
tcg_out_nop(s);
- s->tb_next_offset[a0] = tcg_current_code_size(s);
+ s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
break;
case INDEX_op_br:
tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
@@ -1647,6 +1648,6 @@ void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
the code_gen_buffer can't be larger than 2GB. */
tcg_debug_assert(disp == (int32_t)disp);
- *ptr = CALL | (uint32_t)disp >> 2;
+ atomic_set(ptr, deposit32(CALL, 0, 30, disp >> 2));
flush_icache_range(jmp_addr, jmp_addr + 4);
}
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index c446d3dc72..f217e80747 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -753,6 +753,19 @@ static inline void tcg_gen_exit_tb(uintptr_t val)
tcg_gen_op1i(INDEX_op_exit_tb, val);
}
+/**
+ * tcg_gen_goto_tb() - output goto_tb TCG operation
+ * @idx: Direct jump slot index (0 or 1)
+ *
+ * See tcg/README for more info about this TCG operation.
+ *
+ * NOTE: In softmmu emulation, direct jumps with goto_tb are only safe within
+ * the pages this TB resides in because we don't take care of direct jumps when
+ * address mapping changes, e.g. in tlb_flush(). In user mode, there's only a
+ * static address translation, so the destination address is always valid, TBs
+ * are always invalidated properly, and direct jumps are reset when mapping
+ * changes.
+ */
void tcg_gen_goto_tb(unsigned idx);
#if TARGET_LONG_BITS == 32
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 40c8fbe2ae..a013d77a33 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -510,9 +510,9 @@ struct TCGContext {
/* goto_tb support */
tcg_insn_unit *code_buf;
- uintptr_t *tb_next;
- uint16_t *tb_next_offset;
- uint16_t *tb_jmp_offset; /* != NULL if USE_DIRECT_JUMP */
+ uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
+ uint16_t *tb_jmp_insn_offset; /* tb->jmp_insn_offset if USE_DIRECT_JUMP */
+ uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_addr if !USE_DIRECT_JUMP */
/* liveness analysis */
uint16_t *op_dead_args; /* for each operation, each bit tells if the
@@ -595,6 +595,12 @@ struct TCGContext {
extern TCGContext tcg_ctx;
+static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v)
+{
+ int op_argi = tcg_ctx.gen_op_buf[op_idx].args;
+ tcg_ctx.gen_opparam_buf[op_argi + arg] = v;
+}
+
/* The number of opcodes emitted so far. */
static inline int tcg_op_buf_count(void)
{
@@ -919,7 +925,7 @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi)
/**
* tcg_qemu_tb_exec:
- * @env: CPUArchState * for the CPU
+ * @env: pointer to CPUArchState for the CPU
* @tb_ptr: address of generated code for the TB to execute
*
* Start executing code from a given translation block.
@@ -930,30 +936,31 @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi)
* which has not yet been directly linked, or an asynchronous
* event such as an interrupt needs handling.
*
- * The return value is a pointer to the next TB to execute
- * (if known; otherwise zero). This pointer is assumed to be
- * 4-aligned, and the bottom two bits are used to return further
- * information:
+ * Return: The return value is the value passed to the corresponding
+ * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
+ * The value is either zero or a 4-byte aligned pointer to that TB combined
+ * with additional information in its two least significant bits. The
+ * additional information is encoded as follows:
* 0, 1: the link between this TB and the next is via the specified
* TB index (0 or 1). That is, we left the TB via (the equivalent
* of) "goto_tb <index>". The main loop uses this to determine
* how to link the TB just executed to the next.
* 2: we are using instruction counting code generation, and we
* did not start executing this TB because the instruction counter
- * would hit zero midway through it. In this case the next-TB pointer
+ * would hit zero midway through it. In this case the pointer
* returned is the TB we were about to execute, and the caller must
* arrange to execute the remaining count of instructions.
* 3: we stopped because the CPU's exit_request flag was set
* (usually meaning that there is an interrupt that needs to be
- * handled). The next-TB pointer returned is the TB we were
- * about to execute when we noticed the pending exit request.
+ * handled). The pointer returned is the TB we were about to execute
+ * when we noticed the pending exit request.
*
* If the bottom two bits indicate an exit-via-index then the CPU
* state is correctly synchronised and ready for execution of the next
* TB (and in particular the guest PC is the address to execute next).
* Otherwise, we gave up on execution of this TB before it started, and
* the caller must fix up the CPU state by calling the CPU's
- * synchronize_from_tb() method with the next-TB pointer we return (falling
+ * synchronize_from_tb() method with the TB pointer we return (falling
* back to calling the CPU's set_pc method with tb->pb if no
* synchronize_from_tb() method exists).
*
diff --git a/tcg/tci/tcg-target.inc.c b/tcg/tci/tcg-target.inc.c
index e2fc52a167..fa74d5278e 100644
--- a/tcg/tci/tcg-target.inc.c
+++ b/tcg/tci/tcg-target.inc.c
@@ -553,17 +553,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out64(s, args[0]);
break;
case INDEX_op_goto_tb:
- if (s->tb_jmp_offset) {
+ if (s->tb_jmp_insn_offset) {
/* Direct jump method. */
- tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_offset));
- s->tb_jmp_offset[args[0]] = tcg_current_code_size(s);
+ tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_insn_offset));
+ /* Align for atomic patching and thread safety */
+ s->code_ptr = QEMU_ALIGN_PTR_UP(s->code_ptr, 4);
+ s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
tcg_out32(s, 0);
} else {
/* Indirect jump method. */
TODO();
}
- tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_next_offset));
- s->tb_next_offset[args[0]] = tcg_current_code_size(s);
+ tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_reset_offset));
+ s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
case INDEX_op_br:
tci_out_label(s, arg_label(args[0]));
diff --git a/tci.c b/tci.c
index 82705fe772..0fdc4e2c12 100644
--- a/tci.c
+++ b/tci.c
@@ -467,7 +467,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
{
long tcg_temps[CPU_TEMP_BUF_NLONGS];
uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
- uintptr_t next_tb = 0;
+ uintptr_t ret = 0;
tci_reg[TCG_AREG0] = (tcg_target_ulong)env;
tci_reg[TCG_REG_CALL_STACK] = sp_value;
@@ -1085,11 +1085,14 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
/* QEMU specific operations. */
case INDEX_op_exit_tb:
- next_tb = *(uint64_t *)tb_ptr;
+ ret = *(uint64_t *)tb_ptr;
goto exit;
break;
case INDEX_op_goto_tb:
- t0 = tci_read_i32(&tb_ptr);
+ /* Jump address is aligned */
+ tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4);
+ t0 = atomic_read((int32_t *)tb_ptr);
+ tb_ptr += sizeof(int32_t);
tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr += (int32_t)t0;
continue;
@@ -1240,5 +1243,5 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
tci_assert(tb_ptr == old_code_ptr + op_size);
}
exit:
- return next_tb;
+ return ret;
}
diff --git a/tests/.gitignore b/tests/.gitignore
index 9eed22988b..a06a8ba26f 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -3,6 +3,7 @@ check-qfloat
check-qint
check-qjson
check-qlist
+check-qnull
check-qstring
check-qom-interface
check-qom-proplist
diff --git a/tests/Makefile b/tests/Makefile
index 9194f1850b..9dddde6589 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -16,6 +16,8 @@ check-unit-y += tests/check-qstring$(EXESUF)
gcov-files-check-qstring-y = qobject/qstring.c
check-unit-y += tests/check-qlist$(EXESUF)
gcov-files-check-qlist-y = qobject/qlist.c
+check-unit-y += tests/check-qnull$(EXESUF)
+gcov-files-check-qnull-y = qobject/qnull.c
check-unit-y += tests/check-qjson$(EXESUF)
gcov-files-check-qjson-y = qobject/qjson.c
check-unit-y += tests/test-qmp-output-visitor$(EXESUF)
@@ -382,7 +384,8 @@ GENERATED_HEADERS += tests/test-qapi-types.h tests/test-qapi-visit.h \
tests/test-qmp-introspect.h
test-obj-y = tests/check-qint.o tests/check-qstring.o tests/check-qdict.o \
- tests/check-qlist.o tests/check-qfloat.o tests/check-qjson.o \
+ tests/check-qlist.o tests/check-qfloat.o tests/check-qnull.o \
+ tests/check-qjson.o \
tests/test-coroutine.o tests/test-string-output-visitor.o \
tests/test-string-input-visitor.o tests/test-qmp-output-visitor.o \
tests/test-qmp-input-visitor.o tests/test-qmp-input-strict.o \
@@ -410,6 +413,7 @@ tests/check-qstring$(EXESUF): tests/check-qstring.o $(test-util-obj-y)
tests/check-qdict$(EXESUF): tests/check-qdict.o $(test-util-obj-y)
tests/check-qlist$(EXESUF): tests/check-qlist.o $(test-util-obj-y)
tests/check-qfloat$(EXESUF): tests/check-qfloat.o $(test-util-obj-y)
+tests/check-qnull$(EXESUF): tests/check-qnull.o $(test-util-obj-y)
tests/check-qjson$(EXESUF): tests/check-qjson.o $(test-util-obj-y)
tests/check-qom-interface$(EXESUF): tests/check-qom-interface.o $(test-qom-obj-y)
tests/check-qom-proplist$(EXESUF): tests/check-qom-proplist.o $(test-qom-obj-y)
diff --git a/tests/check-qnull.c b/tests/check-qnull.c
new file mode 100644
index 0000000000..fd9c68f7e1
--- /dev/null
+++ b/tests/check-qnull.c
@@ -0,0 +1,75 @@
+/*
+ * QNull unit-tests.
+ *
+ * Copyright (C) 2016 Red Hat Inc.
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include <glib.h>
+
+#include "qapi/qmp/qobject.h"
+#include "qemu-common.h"
+#include "qapi/qmp-input-visitor.h"
+#include "qapi/qmp-output-visitor.h"
+#include "qapi/error.h"
+
+/*
+ * Public Interface test-cases
+ *
+ * (with some violations to access 'private' data)
+ */
+
+static void qnull_ref_test(void)
+{
+ QObject *obj;
+
+ g_assert(qnull_.refcnt == 1);
+ obj = qnull();
+ g_assert(obj);
+ g_assert(obj == &qnull_);
+ g_assert(qnull_.refcnt == 2);
+ g_assert(qobject_type(obj) == QTYPE_QNULL);
+ qobject_decref(obj);
+ g_assert(qnull_.refcnt == 1);
+}
+
+static void qnull_visit_test(void)
+{
+ QObject *obj;
+ QmpOutputVisitor *qov;
+ QmpInputVisitor *qiv;
+
+ /*
+ * Most tests of interactions between QObject and visitors are in
+ * test-qmp-*-visitor; but these tests live here because they
+ * depend on layering violations to check qnull_ refcnt.
+ */
+
+ g_assert(qnull_.refcnt == 1);
+ obj = qnull();
+ qiv = qmp_input_visitor_new(obj, true);
+ qobject_decref(obj);
+ visit_type_null(qmp_input_get_visitor(qiv), NULL, &error_abort);
+ qmp_input_visitor_cleanup(qiv);
+
+ qov = qmp_output_visitor_new();
+ visit_type_null(qmp_output_get_visitor(qov), NULL, &error_abort);
+ obj = qmp_output_get_qobject(qov);
+ g_assert(obj == &qnull_);
+ qobject_decref(obj);
+ qmp_output_visitor_cleanup(qov);
+
+ g_assert(qnull_.refcnt == 1);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+
+ g_test_add_func("/public/qnull_ref", qnull_ref_test);
+ g_test_add_func("/public/qnull_visit", qnull_visit_test);
+
+ return g_test_run();
+}
diff --git a/tests/qemu-iotests/004 b/tests/qemu-iotests/004
index 67e1beb209..6f2aa3d9a2 100755
--- a/tests/qemu-iotests/004
+++ b/tests/qemu-iotests/004
@@ -37,7 +37,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15
. ./common.rc
. ./common.filter
-_supported_fmt raw qcow qcow2 qed vdi vmdk vhdx
+_supported_fmt raw qcow qcow2 qed vdi vmdk vhdx luks
_supported_proto generic
_supported_os Linux
diff --git a/tests/qemu-iotests/012 b/tests/qemu-iotests/012
index d1d3f22093..01a770d59c 100755
--- a/tests/qemu-iotests/012
+++ b/tests/qemu-iotests/012
@@ -43,13 +43,16 @@ _supported_fmt generic
_supported_proto file
_supported_os Linux
+# Remove once all tests are fixed to use TEST_IMG_FILE
+# correctly and common.rc sets it unconditionally
+test -z "$TEST_IMG_FILE" && TEST_IMG_FILE=$TEST_IMG
size=128M
_make_test_img $size
echo
echo "== mark image read-only"
-chmod a-w "$TEST_IMG"
+chmod a-w "$TEST_IMG_FILE"
echo
echo "== read from read-only image"
diff --git a/tests/qemu-iotests/023.out b/tests/qemu-iotests/023.out
index d4e9be25e1..664871b30a 100644
--- a/tests/qemu-iotests/023.out
+++ b/tests/qemu-iotests/023.out
@@ -225,42 +225,78 @@ wrote 512/512 bytes at offset 108544
wrote 512/512 bytes at offset 109568
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 110848 is not sector aligned
-offset 111872 is not sector aligned
-offset 112896 is not sector aligned
-offset 113920 is not sector aligned
-offset 114944 is not sector aligned
-offset 115968 is not sector aligned
-offset 116992 is not sector aligned
-offset 118016 is not sector aligned
-offset 119040 is not sector aligned
-offset 120064 is not sector aligned
-offset 121088 is not sector aligned
-offset 122112 is not sector aligned
-offset 123136 is not sector aligned
-offset 124160 is not sector aligned
-offset 125184 is not sector aligned
-offset 126208 is not sector aligned
-offset 127232 is not sector aligned
-offset 128256 is not sector aligned
-offset 129280 is not sector aligned
-offset 130304 is not sector aligned
-offset 131328 is not sector aligned
-offset 132352 is not sector aligned
-offset 133376 is not sector aligned
-offset 134400 is not sector aligned
-offset 135424 is not sector aligned
-offset 136448 is not sector aligned
-offset 137472 is not sector aligned
-offset 138496 is not sector aligned
-offset 139520 is not sector aligned
-offset 140544 is not sector aligned
-offset 141568 is not sector aligned
-offset 142592 is not sector aligned
-offset 143616 is not sector aligned
-offset 144640 is not sector aligned
-offset 145664 is not sector aligned
-offset 146688 is not sector aligned
+wrote 512/512 bytes at offset 110848
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 111872
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 112896
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 113920
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 114944
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 115968
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 116992
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 118016
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 119040
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 120064
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 121088
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 122112
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 123136
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 124160
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 125184
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 126208
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 127232
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 128256
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 129280
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 130304
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 131328
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 132352
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 133376
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 134400
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 135424
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 136448
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 137472
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 138496
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 139520
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 140544
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 141568
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 142592
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 143616
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 144640
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 145664
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 146688
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
wrote 2048/2048 bytes at offset 147968
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -507,42 +543,78 @@ read 512/512 bytes at offset 108544
read 512/512 bytes at offset 109568
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 110848 is not sector aligned
-offset 111872 is not sector aligned
-offset 112896 is not sector aligned
-offset 113920 is not sector aligned
-offset 114944 is not sector aligned
-offset 115968 is not sector aligned
-offset 116992 is not sector aligned
-offset 118016 is not sector aligned
-offset 119040 is not sector aligned
-offset 120064 is not sector aligned
-offset 121088 is not sector aligned
-offset 122112 is not sector aligned
-offset 123136 is not sector aligned
-offset 124160 is not sector aligned
-offset 125184 is not sector aligned
-offset 126208 is not sector aligned
-offset 127232 is not sector aligned
-offset 128256 is not sector aligned
-offset 129280 is not sector aligned
-offset 130304 is not sector aligned
-offset 131328 is not sector aligned
-offset 132352 is not sector aligned
-offset 133376 is not sector aligned
-offset 134400 is not sector aligned
-offset 135424 is not sector aligned
-offset 136448 is not sector aligned
-offset 137472 is not sector aligned
-offset 138496 is not sector aligned
-offset 139520 is not sector aligned
-offset 140544 is not sector aligned
-offset 141568 is not sector aligned
-offset 142592 is not sector aligned
-offset 143616 is not sector aligned
-offset 144640 is not sector aligned
-offset 145664 is not sector aligned
-offset 146688 is not sector aligned
+read 512/512 bytes at offset 110848
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 111872
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 112896
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 113920
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 114944
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 115968
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 116992
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 118016
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 119040
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 120064
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 121088
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 122112
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 123136
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 124160
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 125184
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 126208
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 127232
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 128256
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 129280
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 130304
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 131328
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 132352
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 133376
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 134400
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 135424
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 136448
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 137472
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 138496
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 139520
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 140544
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 141568
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 142592
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 143616
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 144640
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 145664
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 146688
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
read 2048/2048 bytes at offset 147968
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -789,42 +861,78 @@ wrote 512/512 bytes at offset 108544
wrote 512/512 bytes at offset 109568
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 110848 is not sector aligned
-offset 111872 is not sector aligned
-offset 112896 is not sector aligned
-offset 113920 is not sector aligned
-offset 114944 is not sector aligned
-offset 115968 is not sector aligned
-offset 116992 is not sector aligned
-offset 118016 is not sector aligned
-offset 119040 is not sector aligned
-offset 120064 is not sector aligned
-offset 121088 is not sector aligned
-offset 122112 is not sector aligned
-offset 123136 is not sector aligned
-offset 124160 is not sector aligned
-offset 125184 is not sector aligned
-offset 126208 is not sector aligned
-offset 127232 is not sector aligned
-offset 128256 is not sector aligned
-offset 129280 is not sector aligned
-offset 130304 is not sector aligned
-offset 131328 is not sector aligned
-offset 132352 is not sector aligned
-offset 133376 is not sector aligned
-offset 134400 is not sector aligned
-offset 135424 is not sector aligned
-offset 136448 is not sector aligned
-offset 137472 is not sector aligned
-offset 138496 is not sector aligned
-offset 139520 is not sector aligned
-offset 140544 is not sector aligned
-offset 141568 is not sector aligned
-offset 142592 is not sector aligned
-offset 143616 is not sector aligned
-offset 144640 is not sector aligned
-offset 145664 is not sector aligned
-offset 146688 is not sector aligned
+wrote 512/512 bytes at offset 110848
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 111872
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 112896
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 113920
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 114944
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 115968
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 116992
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 118016
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 119040
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 120064
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 121088
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 122112
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 123136
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 124160
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 125184
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 126208
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 127232
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 128256
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 129280
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 130304
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 131328
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 132352
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 133376
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 134400
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 135424
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 136448
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 137472
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 138496
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 139520
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 140544
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 141568
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 142592
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 143616
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 144640
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 145664
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 146688
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
wrote 2048/2048 bytes at offset 147968
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -1071,42 +1179,78 @@ read 512/512 bytes at offset 108544
read 512/512 bytes at offset 109568
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 110848 is not sector aligned
-offset 111872 is not sector aligned
-offset 112896 is not sector aligned
-offset 113920 is not sector aligned
-offset 114944 is not sector aligned
-offset 115968 is not sector aligned
-offset 116992 is not sector aligned
-offset 118016 is not sector aligned
-offset 119040 is not sector aligned
-offset 120064 is not sector aligned
-offset 121088 is not sector aligned
-offset 122112 is not sector aligned
-offset 123136 is not sector aligned
-offset 124160 is not sector aligned
-offset 125184 is not sector aligned
-offset 126208 is not sector aligned
-offset 127232 is not sector aligned
-offset 128256 is not sector aligned
-offset 129280 is not sector aligned
-offset 130304 is not sector aligned
-offset 131328 is not sector aligned
-offset 132352 is not sector aligned
-offset 133376 is not sector aligned
-offset 134400 is not sector aligned
-offset 135424 is not sector aligned
-offset 136448 is not sector aligned
-offset 137472 is not sector aligned
-offset 138496 is not sector aligned
-offset 139520 is not sector aligned
-offset 140544 is not sector aligned
-offset 141568 is not sector aligned
-offset 142592 is not sector aligned
-offset 143616 is not sector aligned
-offset 144640 is not sector aligned
-offset 145664 is not sector aligned
-offset 146688 is not sector aligned
+read 512/512 bytes at offset 110848
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 111872
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 112896
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 113920
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 114944
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 115968
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 116992
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 118016
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 119040
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 120064
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 121088
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 122112
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 123136
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 124160
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 125184
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 126208
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 127232
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 128256
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 129280
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 130304
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 131328
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 132352
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 133376
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 134400
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 135424
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 136448
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 137472
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 138496
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 139520
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 140544
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 141568
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 142592
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 143616
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 144640
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 145664
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 146688
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
read 2048/2048 bytes at offset 147968
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -1355,42 +1499,78 @@ wrote 512/512 bytes at offset 4295075840
wrote 512/512 bytes at offset 4295076864
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 4295078144 is not sector aligned
-offset 4295079168 is not sector aligned
-offset 4295080192 is not sector aligned
-offset 4295081216 is not sector aligned
-offset 4295082240 is not sector aligned
-offset 4295083264 is not sector aligned
-offset 4295084288 is not sector aligned
-offset 4295085312 is not sector aligned
-offset 4295086336 is not sector aligned
-offset 4295087360 is not sector aligned
-offset 4295088384 is not sector aligned
-offset 4295089408 is not sector aligned
-offset 4295090432 is not sector aligned
-offset 4295091456 is not sector aligned
-offset 4295092480 is not sector aligned
-offset 4295093504 is not sector aligned
-offset 4295094528 is not sector aligned
-offset 4295095552 is not sector aligned
-offset 4295096576 is not sector aligned
-offset 4295097600 is not sector aligned
-offset 4295098624 is not sector aligned
-offset 4295099648 is not sector aligned
-offset 4295100672 is not sector aligned
-offset 4295101696 is not sector aligned
-offset 4295102720 is not sector aligned
-offset 4295103744 is not sector aligned
-offset 4295104768 is not sector aligned
-offset 4295105792 is not sector aligned
-offset 4295106816 is not sector aligned
-offset 4295107840 is not sector aligned
-offset 4295108864 is not sector aligned
-offset 4295109888 is not sector aligned
-offset 4295110912 is not sector aligned
-offset 4295111936 is not sector aligned
-offset 4295112960 is not sector aligned
-offset 4295113984 is not sector aligned
+wrote 512/512 bytes at offset 4295078144
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295079168
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295080192
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295081216
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295082240
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295083264
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295084288
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295085312
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295086336
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295087360
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295088384
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295089408
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295090432
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295091456
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295092480
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295093504
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295094528
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295095552
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295096576
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295097600
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295098624
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295099648
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295100672
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295101696
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295102720
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295103744
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295104768
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295105792
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295106816
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295107840
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295108864
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295109888
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295110912
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295111936
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295112960
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295113984
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
wrote 2048/2048 bytes at offset 4295115264
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -1637,42 +1817,78 @@ read 512/512 bytes at offset 4295075840
read 512/512 bytes at offset 4295076864
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 4295078144 is not sector aligned
-offset 4295079168 is not sector aligned
-offset 4295080192 is not sector aligned
-offset 4295081216 is not sector aligned
-offset 4295082240 is not sector aligned
-offset 4295083264 is not sector aligned
-offset 4295084288 is not sector aligned
-offset 4295085312 is not sector aligned
-offset 4295086336 is not sector aligned
-offset 4295087360 is not sector aligned
-offset 4295088384 is not sector aligned
-offset 4295089408 is not sector aligned
-offset 4295090432 is not sector aligned
-offset 4295091456 is not sector aligned
-offset 4295092480 is not sector aligned
-offset 4295093504 is not sector aligned
-offset 4295094528 is not sector aligned
-offset 4295095552 is not sector aligned
-offset 4295096576 is not sector aligned
-offset 4295097600 is not sector aligned
-offset 4295098624 is not sector aligned
-offset 4295099648 is not sector aligned
-offset 4295100672 is not sector aligned
-offset 4295101696 is not sector aligned
-offset 4295102720 is not sector aligned
-offset 4295103744 is not sector aligned
-offset 4295104768 is not sector aligned
-offset 4295105792 is not sector aligned
-offset 4295106816 is not sector aligned
-offset 4295107840 is not sector aligned
-offset 4295108864 is not sector aligned
-offset 4295109888 is not sector aligned
-offset 4295110912 is not sector aligned
-offset 4295111936 is not sector aligned
-offset 4295112960 is not sector aligned
-offset 4295113984 is not sector aligned
+read 512/512 bytes at offset 4295078144
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295079168
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295080192
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295081216
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295082240
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295083264
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295084288
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295085312
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295086336
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295087360
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295088384
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295089408
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295090432
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295091456
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295092480
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295093504
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295094528
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295095552
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295096576
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295097600
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295098624
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295099648
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295100672
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295101696
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295102720
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295103744
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295104768
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295105792
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295106816
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295107840
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295108864
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295109888
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295110912
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295111936
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295112960
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295113984
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
read 2048/2048 bytes at offset 4295115264
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -1919,42 +2135,78 @@ wrote 512/512 bytes at offset 4295075840
wrote 512/512 bytes at offset 4295076864
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 4295078144 is not sector aligned
-offset 4295079168 is not sector aligned
-offset 4295080192 is not sector aligned
-offset 4295081216 is not sector aligned
-offset 4295082240 is not sector aligned
-offset 4295083264 is not sector aligned
-offset 4295084288 is not sector aligned
-offset 4295085312 is not sector aligned
-offset 4295086336 is not sector aligned
-offset 4295087360 is not sector aligned
-offset 4295088384 is not sector aligned
-offset 4295089408 is not sector aligned
-offset 4295090432 is not sector aligned
-offset 4295091456 is not sector aligned
-offset 4295092480 is not sector aligned
-offset 4295093504 is not sector aligned
-offset 4295094528 is not sector aligned
-offset 4295095552 is not sector aligned
-offset 4295096576 is not sector aligned
-offset 4295097600 is not sector aligned
-offset 4295098624 is not sector aligned
-offset 4295099648 is not sector aligned
-offset 4295100672 is not sector aligned
-offset 4295101696 is not sector aligned
-offset 4295102720 is not sector aligned
-offset 4295103744 is not sector aligned
-offset 4295104768 is not sector aligned
-offset 4295105792 is not sector aligned
-offset 4295106816 is not sector aligned
-offset 4295107840 is not sector aligned
-offset 4295108864 is not sector aligned
-offset 4295109888 is not sector aligned
-offset 4295110912 is not sector aligned
-offset 4295111936 is not sector aligned
-offset 4295112960 is not sector aligned
-offset 4295113984 is not sector aligned
+wrote 512/512 bytes at offset 4295078144
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295079168
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295080192
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295081216
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295082240
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295083264
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295084288
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295085312
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295086336
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295087360
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295088384
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295089408
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295090432
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295091456
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295092480
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295093504
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295094528
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295095552
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295096576
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295097600
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295098624
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295099648
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295100672
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295101696
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295102720
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295103744
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295104768
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295105792
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295106816
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295107840
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295108864
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295109888
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295110912
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295111936
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295112960
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295113984
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
wrote 2048/2048 bytes at offset 4295115264
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -2201,42 +2453,78 @@ read 512/512 bytes at offset 4295075840
read 512/512 bytes at offset 4295076864
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 4295078144 is not sector aligned
-offset 4295079168 is not sector aligned
-offset 4295080192 is not sector aligned
-offset 4295081216 is not sector aligned
-offset 4295082240 is not sector aligned
-offset 4295083264 is not sector aligned
-offset 4295084288 is not sector aligned
-offset 4295085312 is not sector aligned
-offset 4295086336 is not sector aligned
-offset 4295087360 is not sector aligned
-offset 4295088384 is not sector aligned
-offset 4295089408 is not sector aligned
-offset 4295090432 is not sector aligned
-offset 4295091456 is not sector aligned
-offset 4295092480 is not sector aligned
-offset 4295093504 is not sector aligned
-offset 4295094528 is not sector aligned
-offset 4295095552 is not sector aligned
-offset 4295096576 is not sector aligned
-offset 4295097600 is not sector aligned
-offset 4295098624 is not sector aligned
-offset 4295099648 is not sector aligned
-offset 4295100672 is not sector aligned
-offset 4295101696 is not sector aligned
-offset 4295102720 is not sector aligned
-offset 4295103744 is not sector aligned
-offset 4295104768 is not sector aligned
-offset 4295105792 is not sector aligned
-offset 4295106816 is not sector aligned
-offset 4295107840 is not sector aligned
-offset 4295108864 is not sector aligned
-offset 4295109888 is not sector aligned
-offset 4295110912 is not sector aligned
-offset 4295111936 is not sector aligned
-offset 4295112960 is not sector aligned
-offset 4295113984 is not sector aligned
+read 512/512 bytes at offset 4295078144
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295079168
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295080192
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295081216
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295082240
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295083264
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295084288
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295085312
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295086336
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295087360
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295088384
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295089408
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295090432
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295091456
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295092480
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295093504
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295094528
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295095552
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295096576
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295097600
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295098624
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295099648
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295100672
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295101696
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295102720
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295103744
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295104768
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295105792
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295106816
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295107840
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295108864
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295109888
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295110912
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295111936
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295112960
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295113984
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
read 2048/2048 bytes at offset 4295115264
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -2489,42 +2777,78 @@ read 512/512 bytes at offset 108544
read 512/512 bytes at offset 109568
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 110848 is not sector aligned
-offset 111872 is not sector aligned
-offset 112896 is not sector aligned
-offset 113920 is not sector aligned
-offset 114944 is not sector aligned
-offset 115968 is not sector aligned
-offset 116992 is not sector aligned
-offset 118016 is not sector aligned
-offset 119040 is not sector aligned
-offset 120064 is not sector aligned
-offset 121088 is not sector aligned
-offset 122112 is not sector aligned
-offset 123136 is not sector aligned
-offset 124160 is not sector aligned
-offset 125184 is not sector aligned
-offset 126208 is not sector aligned
-offset 127232 is not sector aligned
-offset 128256 is not sector aligned
-offset 129280 is not sector aligned
-offset 130304 is not sector aligned
-offset 131328 is not sector aligned
-offset 132352 is not sector aligned
-offset 133376 is not sector aligned
-offset 134400 is not sector aligned
-offset 135424 is not sector aligned
-offset 136448 is not sector aligned
-offset 137472 is not sector aligned
-offset 138496 is not sector aligned
-offset 139520 is not sector aligned
-offset 140544 is not sector aligned
-offset 141568 is not sector aligned
-offset 142592 is not sector aligned
-offset 143616 is not sector aligned
-offset 144640 is not sector aligned
-offset 145664 is not sector aligned
-offset 146688 is not sector aligned
+read 512/512 bytes at offset 110848
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 111872
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 112896
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 113920
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 114944
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 115968
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 116992
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 118016
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 119040
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 120064
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 121088
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 122112
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 123136
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 124160
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 125184
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 126208
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 127232
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 128256
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 129280
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 130304
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 131328
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 132352
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 133376
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 134400
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 135424
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 136448
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 137472
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 138496
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 139520
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 140544
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 141568
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 142592
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 143616
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 144640
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 145664
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 146688
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
read 2048/2048 bytes at offset 147968
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -2771,42 +3095,78 @@ read 512/512 bytes at offset 108544
read 512/512 bytes at offset 109568
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 110848 is not sector aligned
-offset 111872 is not sector aligned
-offset 112896 is not sector aligned
-offset 113920 is not sector aligned
-offset 114944 is not sector aligned
-offset 115968 is not sector aligned
-offset 116992 is not sector aligned
-offset 118016 is not sector aligned
-offset 119040 is not sector aligned
-offset 120064 is not sector aligned
-offset 121088 is not sector aligned
-offset 122112 is not sector aligned
-offset 123136 is not sector aligned
-offset 124160 is not sector aligned
-offset 125184 is not sector aligned
-offset 126208 is not sector aligned
-offset 127232 is not sector aligned
-offset 128256 is not sector aligned
-offset 129280 is not sector aligned
-offset 130304 is not sector aligned
-offset 131328 is not sector aligned
-offset 132352 is not sector aligned
-offset 133376 is not sector aligned
-offset 134400 is not sector aligned
-offset 135424 is not sector aligned
-offset 136448 is not sector aligned
-offset 137472 is not sector aligned
-offset 138496 is not sector aligned
-offset 139520 is not sector aligned
-offset 140544 is not sector aligned
-offset 141568 is not sector aligned
-offset 142592 is not sector aligned
-offset 143616 is not sector aligned
-offset 144640 is not sector aligned
-offset 145664 is not sector aligned
-offset 146688 is not sector aligned
+read 512/512 bytes at offset 110848
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 111872
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 112896
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 113920
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 114944
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 115968
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 116992
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 118016
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 119040
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 120064
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 121088
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 122112
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 123136
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 124160
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 125184
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 126208
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 127232
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 128256
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 129280
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 130304
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 131328
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 132352
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 133376
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 134400
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 135424
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 136448
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 137472
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 138496
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 139520
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 140544
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 141568
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 142592
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 143616
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 144640
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 145664
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 146688
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
read 2048/2048 bytes at offset 147968
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -3055,42 +3415,78 @@ read 512/512 bytes at offset 4295075840
read 512/512 bytes at offset 4295076864
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 4295078144 is not sector aligned
-offset 4295079168 is not sector aligned
-offset 4295080192 is not sector aligned
-offset 4295081216 is not sector aligned
-offset 4295082240 is not sector aligned
-offset 4295083264 is not sector aligned
-offset 4295084288 is not sector aligned
-offset 4295085312 is not sector aligned
-offset 4295086336 is not sector aligned
-offset 4295087360 is not sector aligned
-offset 4295088384 is not sector aligned
-offset 4295089408 is not sector aligned
-offset 4295090432 is not sector aligned
-offset 4295091456 is not sector aligned
-offset 4295092480 is not sector aligned
-offset 4295093504 is not sector aligned
-offset 4295094528 is not sector aligned
-offset 4295095552 is not sector aligned
-offset 4295096576 is not sector aligned
-offset 4295097600 is not sector aligned
-offset 4295098624 is not sector aligned
-offset 4295099648 is not sector aligned
-offset 4295100672 is not sector aligned
-offset 4295101696 is not sector aligned
-offset 4295102720 is not sector aligned
-offset 4295103744 is not sector aligned
-offset 4295104768 is not sector aligned
-offset 4295105792 is not sector aligned
-offset 4295106816 is not sector aligned
-offset 4295107840 is not sector aligned
-offset 4295108864 is not sector aligned
-offset 4295109888 is not sector aligned
-offset 4295110912 is not sector aligned
-offset 4295111936 is not sector aligned
-offset 4295112960 is not sector aligned
-offset 4295113984 is not sector aligned
+read 512/512 bytes at offset 4295078144
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295079168
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295080192
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295081216
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295082240
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295083264
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295084288
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295085312
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295086336
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295087360
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295088384
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295089408
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295090432
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295091456
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295092480
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295093504
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295094528
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295095552
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295096576
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295097600
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295098624
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295099648
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295100672
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295101696
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295102720
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295103744
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295104768
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295105792
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295106816
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295107840
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295108864
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295109888
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295110912
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295111936
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295112960
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295113984
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
read 2048/2048 bytes at offset 4295115264
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -3337,42 +3733,78 @@ read 512/512 bytes at offset 4295075840
read 512/512 bytes at offset 4295076864
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 216
-offset 4295078144 is not sector aligned
-offset 4295079168 is not sector aligned
-offset 4295080192 is not sector aligned
-offset 4295081216 is not sector aligned
-offset 4295082240 is not sector aligned
-offset 4295083264 is not sector aligned
-offset 4295084288 is not sector aligned
-offset 4295085312 is not sector aligned
-offset 4295086336 is not sector aligned
-offset 4295087360 is not sector aligned
-offset 4295088384 is not sector aligned
-offset 4295089408 is not sector aligned
-offset 4295090432 is not sector aligned
-offset 4295091456 is not sector aligned
-offset 4295092480 is not sector aligned
-offset 4295093504 is not sector aligned
-offset 4295094528 is not sector aligned
-offset 4295095552 is not sector aligned
-offset 4295096576 is not sector aligned
-offset 4295097600 is not sector aligned
-offset 4295098624 is not sector aligned
-offset 4295099648 is not sector aligned
-offset 4295100672 is not sector aligned
-offset 4295101696 is not sector aligned
-offset 4295102720 is not sector aligned
-offset 4295103744 is not sector aligned
-offset 4295104768 is not sector aligned
-offset 4295105792 is not sector aligned
-offset 4295106816 is not sector aligned
-offset 4295107840 is not sector aligned
-offset 4295108864 is not sector aligned
-offset 4295109888 is not sector aligned
-offset 4295110912 is not sector aligned
-offset 4295111936 is not sector aligned
-offset 4295112960 is not sector aligned
-offset 4295113984 is not sector aligned
+read 512/512 bytes at offset 4295078144
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295079168
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295080192
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295081216
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295082240
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295083264
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295084288
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295085312
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295086336
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295087360
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295088384
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295089408
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295090432
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295091456
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295092480
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295093504
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295094528
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295095552
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295096576
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295097600
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295098624
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295099648
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295100672
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295101696
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295102720
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295103744
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295104768
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295105792
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295106816
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295107840
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295108864
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295109888
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295110912
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295111936
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295112960
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295113984
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 33
read 2048/2048 bytes at offset 4295115264
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -3623,42 +4055,78 @@ wrote 512/512 bytes at offset 109056
wrote 512/512 bytes at offset 110080
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 217
-offset 111360 is not sector aligned
-offset 112384 is not sector aligned
-offset 113408 is not sector aligned
-offset 114432 is not sector aligned
-offset 115456 is not sector aligned
-offset 116480 is not sector aligned
-offset 117504 is not sector aligned
-offset 118528 is not sector aligned
-offset 119552 is not sector aligned
-offset 120576 is not sector aligned
-offset 121600 is not sector aligned
-offset 122624 is not sector aligned
-offset 123648 is not sector aligned
-offset 124672 is not sector aligned
-offset 125696 is not sector aligned
-offset 126720 is not sector aligned
-offset 127744 is not sector aligned
-offset 128768 is not sector aligned
-offset 129792 is not sector aligned
-offset 130816 is not sector aligned
-offset 131840 is not sector aligned
-offset 132864 is not sector aligned
-offset 133888 is not sector aligned
-offset 134912 is not sector aligned
-offset 135936 is not sector aligned
-offset 136960 is not sector aligned
-offset 137984 is not sector aligned
-offset 139008 is not sector aligned
-offset 140032 is not sector aligned
-offset 141056 is not sector aligned
-offset 142080 is not sector aligned
-offset 143104 is not sector aligned
-offset 144128 is not sector aligned
-offset 145152 is not sector aligned
-offset 146176 is not sector aligned
-offset 147200 is not sector aligned
+wrote 512/512 bytes at offset 111360
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 112384
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 113408
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 114432
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 115456
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 116480
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 117504
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 118528
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 119552
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 120576
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 121600
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 122624
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 123648
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 124672
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 125696
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 126720
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 127744
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 128768
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 129792
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 130816
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 131840
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 132864
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 133888
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 134912
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 135936
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 136960
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 137984
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 139008
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 140032
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 141056
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 142080
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 143104
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 144128
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 145152
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 146176
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 147200
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 34
wrote 2048/2048 bytes at offset 148480
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -3905,42 +4373,78 @@ read 512/512 bytes at offset 109056
read 512/512 bytes at offset 110080
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 217
-offset 111360 is not sector aligned
-offset 112384 is not sector aligned
-offset 113408 is not sector aligned
-offset 114432 is not sector aligned
-offset 115456 is not sector aligned
-offset 116480 is not sector aligned
-offset 117504 is not sector aligned
-offset 118528 is not sector aligned
-offset 119552 is not sector aligned
-offset 120576 is not sector aligned
-offset 121600 is not sector aligned
-offset 122624 is not sector aligned
-offset 123648 is not sector aligned
-offset 124672 is not sector aligned
-offset 125696 is not sector aligned
-offset 126720 is not sector aligned
-offset 127744 is not sector aligned
-offset 128768 is not sector aligned
-offset 129792 is not sector aligned
-offset 130816 is not sector aligned
-offset 131840 is not sector aligned
-offset 132864 is not sector aligned
-offset 133888 is not sector aligned
-offset 134912 is not sector aligned
-offset 135936 is not sector aligned
-offset 136960 is not sector aligned
-offset 137984 is not sector aligned
-offset 139008 is not sector aligned
-offset 140032 is not sector aligned
-offset 141056 is not sector aligned
-offset 142080 is not sector aligned
-offset 143104 is not sector aligned
-offset 144128 is not sector aligned
-offset 145152 is not sector aligned
-offset 146176 is not sector aligned
-offset 147200 is not sector aligned
+read 512/512 bytes at offset 111360
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 112384
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 113408
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 114432
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 115456
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 116480
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 117504
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 118528
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 119552
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 120576
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 121600
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 122624
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 123648
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 124672
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 125696
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 126720
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 127744
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 128768
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 129792
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 130816
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 131840
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 132864
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 133888
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 134912
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 135936
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 136960
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 137984
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 139008
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 140032
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 141056
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 142080
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 143104
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 144128
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 145152
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 146176
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 147200
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 34
read 2048/2048 bytes at offset 148480
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -4187,42 +4691,78 @@ wrote 512/512 bytes at offset 109056
wrote 512/512 bytes at offset 110080
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 217
-offset 111360 is not sector aligned
-offset 112384 is not sector aligned
-offset 113408 is not sector aligned
-offset 114432 is not sector aligned
-offset 115456 is not sector aligned
-offset 116480 is not sector aligned
-offset 117504 is not sector aligned
-offset 118528 is not sector aligned
-offset 119552 is not sector aligned
-offset 120576 is not sector aligned
-offset 121600 is not sector aligned
-offset 122624 is not sector aligned
-offset 123648 is not sector aligned
-offset 124672 is not sector aligned
-offset 125696 is not sector aligned
-offset 126720 is not sector aligned
-offset 127744 is not sector aligned
-offset 128768 is not sector aligned
-offset 129792 is not sector aligned
-offset 130816 is not sector aligned
-offset 131840 is not sector aligned
-offset 132864 is not sector aligned
-offset 133888 is not sector aligned
-offset 134912 is not sector aligned
-offset 135936 is not sector aligned
-offset 136960 is not sector aligned
-offset 137984 is not sector aligned
-offset 139008 is not sector aligned
-offset 140032 is not sector aligned
-offset 141056 is not sector aligned
-offset 142080 is not sector aligned
-offset 143104 is not sector aligned
-offset 144128 is not sector aligned
-offset 145152 is not sector aligned
-offset 146176 is not sector aligned
-offset 147200 is not sector aligned
+wrote 512/512 bytes at offset 111360
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 112384
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 113408
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 114432
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 115456
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 116480
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 117504
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 118528
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 119552
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 120576
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 121600
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 122624
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 123648
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 124672
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 125696
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 126720
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 127744
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 128768
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 129792
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 130816
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 131840
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 132864
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 133888
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 134912
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 135936
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 136960
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 137984
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 139008
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 140032
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 141056
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 142080
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 143104
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 144128
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 145152
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 146176
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 147200
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 34
wrote 2048/2048 bytes at offset 148480
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -4469,42 +5009,78 @@ read 512/512 bytes at offset 109056
read 512/512 bytes at offset 110080
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 217
-offset 111360 is not sector aligned
-offset 112384 is not sector aligned
-offset 113408 is not sector aligned
-offset 114432 is not sector aligned
-offset 115456 is not sector aligned
-offset 116480 is not sector aligned
-offset 117504 is not sector aligned
-offset 118528 is not sector aligned
-offset 119552 is not sector aligned
-offset 120576 is not sector aligned
-offset 121600 is not sector aligned
-offset 122624 is not sector aligned
-offset 123648 is not sector aligned
-offset 124672 is not sector aligned
-offset 125696 is not sector aligned
-offset 126720 is not sector aligned
-offset 127744 is not sector aligned
-offset 128768 is not sector aligned
-offset 129792 is not sector aligned
-offset 130816 is not sector aligned
-offset 131840 is not sector aligned
-offset 132864 is not sector aligned
-offset 133888 is not sector aligned
-offset 134912 is not sector aligned
-offset 135936 is not sector aligned
-offset 136960 is not sector aligned
-offset 137984 is not sector aligned
-offset 139008 is not sector aligned
-offset 140032 is not sector aligned
-offset 141056 is not sector aligned
-offset 142080 is not sector aligned
-offset 143104 is not sector aligned
-offset 144128 is not sector aligned
-offset 145152 is not sector aligned
-offset 146176 is not sector aligned
-offset 147200 is not sector aligned
+read 512/512 bytes at offset 111360
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 112384
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 113408
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 114432
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 115456
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 116480
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 117504
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 118528
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 119552
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 120576
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 121600
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 122624
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 123648
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 124672
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 125696
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 126720
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 127744
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 128768
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 129792
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 130816
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 131840
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 132864
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 133888
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 134912
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 135936
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 136960
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 137984
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 139008
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 140032
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 141056
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 142080
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 143104
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 144128
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 145152
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 146176
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 147200
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 34
read 2048/2048 bytes at offset 148480
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -4753,42 +5329,78 @@ wrote 512/512 bytes at offset 4295076352
wrote 512/512 bytes at offset 4295077376
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 217
-offset 4295078656 is not sector aligned
-offset 4295079680 is not sector aligned
-offset 4295080704 is not sector aligned
-offset 4295081728 is not sector aligned
-offset 4295082752 is not sector aligned
-offset 4295083776 is not sector aligned
-offset 4295084800 is not sector aligned
-offset 4295085824 is not sector aligned
-offset 4295086848 is not sector aligned
-offset 4295087872 is not sector aligned
-offset 4295088896 is not sector aligned
-offset 4295089920 is not sector aligned
-offset 4295090944 is not sector aligned
-offset 4295091968 is not sector aligned
-offset 4295092992 is not sector aligned
-offset 4295094016 is not sector aligned
-offset 4295095040 is not sector aligned
-offset 4295096064 is not sector aligned
-offset 4295097088 is not sector aligned
-offset 4295098112 is not sector aligned
-offset 4295099136 is not sector aligned
-offset 4295100160 is not sector aligned
-offset 4295101184 is not sector aligned
-offset 4295102208 is not sector aligned
-offset 4295103232 is not sector aligned
-offset 4295104256 is not sector aligned
-offset 4295105280 is not sector aligned
-offset 4295106304 is not sector aligned
-offset 4295107328 is not sector aligned
-offset 4295108352 is not sector aligned
-offset 4295109376 is not sector aligned
-offset 4295110400 is not sector aligned
-offset 4295111424 is not sector aligned
-offset 4295112448 is not sector aligned
-offset 4295113472 is not sector aligned
-offset 4295114496 is not sector aligned
+wrote 512/512 bytes at offset 4295078656
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295079680
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295080704
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295081728
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295082752
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295083776
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295084800
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295085824
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295086848
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295087872
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295088896
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295089920
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295090944
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295091968
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295092992
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295094016
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295095040
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295096064
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295097088
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295098112
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295099136
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295100160
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295101184
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295102208
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295103232
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295104256
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295105280
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295106304
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295107328
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295108352
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295109376
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295110400
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295111424
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295112448
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295113472
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295114496
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 34
wrote 2048/2048 bytes at offset 4295115776
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -5035,42 +5647,78 @@ read 512/512 bytes at offset 4295076352
read 512/512 bytes at offset 4295077376
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 217
-offset 4295078656 is not sector aligned
-offset 4295079680 is not sector aligned
-offset 4295080704 is not sector aligned
-offset 4295081728 is not sector aligned
-offset 4295082752 is not sector aligned
-offset 4295083776 is not sector aligned
-offset 4295084800 is not sector aligned
-offset 4295085824 is not sector aligned
-offset 4295086848 is not sector aligned
-offset 4295087872 is not sector aligned
-offset 4295088896 is not sector aligned
-offset 4295089920 is not sector aligned
-offset 4295090944 is not sector aligned
-offset 4295091968 is not sector aligned
-offset 4295092992 is not sector aligned
-offset 4295094016 is not sector aligned
-offset 4295095040 is not sector aligned
-offset 4295096064 is not sector aligned
-offset 4295097088 is not sector aligned
-offset 4295098112 is not sector aligned
-offset 4295099136 is not sector aligned
-offset 4295100160 is not sector aligned
-offset 4295101184 is not sector aligned
-offset 4295102208 is not sector aligned
-offset 4295103232 is not sector aligned
-offset 4295104256 is not sector aligned
-offset 4295105280 is not sector aligned
-offset 4295106304 is not sector aligned
-offset 4295107328 is not sector aligned
-offset 4295108352 is not sector aligned
-offset 4295109376 is not sector aligned
-offset 4295110400 is not sector aligned
-offset 4295111424 is not sector aligned
-offset 4295112448 is not sector aligned
-offset 4295113472 is not sector aligned
-offset 4295114496 is not sector aligned
+read 512/512 bytes at offset 4295078656
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295079680
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295080704
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295081728
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295082752
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295083776
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295084800
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295085824
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295086848
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295087872
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295088896
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295089920
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295090944
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295091968
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295092992
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295094016
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295095040
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295096064
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295097088
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295098112
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295099136
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295100160
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295101184
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295102208
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295103232
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295104256
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295105280
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295106304
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295107328
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295108352
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295109376
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295110400
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295111424
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295112448
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295113472
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295114496
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 34
read 2048/2048 bytes at offset 4295115776
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -5317,42 +5965,78 @@ wrote 512/512 bytes at offset 4295076352
wrote 512/512 bytes at offset 4295077376
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 217
-offset 4295078656 is not sector aligned
-offset 4295079680 is not sector aligned
-offset 4295080704 is not sector aligned
-offset 4295081728 is not sector aligned
-offset 4295082752 is not sector aligned
-offset 4295083776 is not sector aligned
-offset 4295084800 is not sector aligned
-offset 4295085824 is not sector aligned
-offset 4295086848 is not sector aligned
-offset 4295087872 is not sector aligned
-offset 4295088896 is not sector aligned
-offset 4295089920 is not sector aligned
-offset 4295090944 is not sector aligned
-offset 4295091968 is not sector aligned
-offset 4295092992 is not sector aligned
-offset 4295094016 is not sector aligned
-offset 4295095040 is not sector aligned
-offset 4295096064 is not sector aligned
-offset 4295097088 is not sector aligned
-offset 4295098112 is not sector aligned
-offset 4295099136 is not sector aligned
-offset 4295100160 is not sector aligned
-offset 4295101184 is not sector aligned
-offset 4295102208 is not sector aligned
-offset 4295103232 is not sector aligned
-offset 4295104256 is not sector aligned
-offset 4295105280 is not sector aligned
-offset 4295106304 is not sector aligned
-offset 4295107328 is not sector aligned
-offset 4295108352 is not sector aligned
-offset 4295109376 is not sector aligned
-offset 4295110400 is not sector aligned
-offset 4295111424 is not sector aligned
-offset 4295112448 is not sector aligned
-offset 4295113472 is not sector aligned
-offset 4295114496 is not sector aligned
+wrote 512/512 bytes at offset 4295078656
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295079680
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295080704
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295081728
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295082752
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295083776
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295084800
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295085824
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295086848
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295087872
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295088896
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295089920
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295090944
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295091968
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295092992
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295094016
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295095040
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295096064
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295097088
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295098112
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295099136
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295100160
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295101184
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295102208
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295103232
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295104256
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295105280
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295106304
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295107328
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295108352
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295109376
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295110400
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295111424
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295112448
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295113472
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+wrote 512/512 bytes at offset 4295114496
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 34
wrote 2048/2048 bytes at offset 4295115776
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -5599,42 +6283,78 @@ read 512/512 bytes at offset 4295076352
read 512/512 bytes at offset 4295077376
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 217
-offset 4295078656 is not sector aligned
-offset 4295079680 is not sector aligned
-offset 4295080704 is not sector aligned
-offset 4295081728 is not sector aligned
-offset 4295082752 is not sector aligned
-offset 4295083776 is not sector aligned
-offset 4295084800 is not sector aligned
-offset 4295085824 is not sector aligned
-offset 4295086848 is not sector aligned
-offset 4295087872 is not sector aligned
-offset 4295088896 is not sector aligned
-offset 4295089920 is not sector aligned
-offset 4295090944 is not sector aligned
-offset 4295091968 is not sector aligned
-offset 4295092992 is not sector aligned
-offset 4295094016 is not sector aligned
-offset 4295095040 is not sector aligned
-offset 4295096064 is not sector aligned
-offset 4295097088 is not sector aligned
-offset 4295098112 is not sector aligned
-offset 4295099136 is not sector aligned
-offset 4295100160 is not sector aligned
-offset 4295101184 is not sector aligned
-offset 4295102208 is not sector aligned
-offset 4295103232 is not sector aligned
-offset 4295104256 is not sector aligned
-offset 4295105280 is not sector aligned
-offset 4295106304 is not sector aligned
-offset 4295107328 is not sector aligned
-offset 4295108352 is not sector aligned
-offset 4295109376 is not sector aligned
-offset 4295110400 is not sector aligned
-offset 4295111424 is not sector aligned
-offset 4295112448 is not sector aligned
-offset 4295113472 is not sector aligned
-offset 4295114496 is not sector aligned
+read 512/512 bytes at offset 4295078656
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295079680
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295080704
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295081728
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295082752
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295083776
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295084800
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295085824
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295086848
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295087872
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295088896
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295089920
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295090944
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295091968
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295092992
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295094016
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295095040
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295096064
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295097088
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295098112
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295099136
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295100160
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295101184
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295102208
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295103232
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295104256
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295105280
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295106304
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295107328
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295108352
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295109376
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295110400
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295111424
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295112448
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295113472
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+read 512/512 bytes at offset 4295114496
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
=== IO: pattern 34
read 2048/2048 bytes at offset 4295115776
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
diff --git a/tests/qemu-iotests/039.out b/tests/qemu-iotests/039.out
index 32c884694c..c6e0ac2da3 100644
--- a/tests/qemu-iotests/039.out
+++ b/tests/qemu-iotests/039.out
@@ -12,9 +12,9 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
wrote 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
./common.config: Killed ( if [ "${VALGRIND_QEMU}" == "y" ]; then
- exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
else
- exec "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
fi )
incompatible_features 0x1
ERROR cluster 5 refcount=0 reference=1
@@ -51,9 +51,9 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
wrote 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
./common.config: Killed ( if [ "${VALGRIND_QEMU}" == "y" ]; then
- exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
else
- exec "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
fi )
incompatible_features 0x1
ERROR cluster 5 refcount=0 reference=1
@@ -69,9 +69,9 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
wrote 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
./common.config: Killed ( if [ "${VALGRIND_QEMU}" == "y" ]; then
- exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
else
- exec "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
fi )
incompatible_features 0x0
No errors were found on the image.
@@ -92,9 +92,9 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
wrote 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
./common.config: Killed ( if [ "${VALGRIND_QEMU}" == "y" ]; then
- exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
else
- exec "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
fi )
incompatible_features 0x1
ERROR cluster 5 refcount=0 reference=1
@@ -106,9 +106,9 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
wrote 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
./common.config: Killed ( if [ "${VALGRIND_QEMU}" == "y" ]; then
- exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
else
- exec "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
fi )
incompatible_features 0x0
No errors were found on the image.
diff --git a/tests/qemu-iotests/048 b/tests/qemu-iotests/048
index e1eeac2a31..203c04fc7f 100755
--- a/tests/qemu-iotests/048
+++ b/tests/qemu-iotests/048
@@ -31,13 +31,13 @@ _cleanup()
{
echo "Cleanup"
_cleanup_test_img
- rm "${TEST_IMG2}"
+ rm "${TEST_IMG_FILE2}"
}
trap "_cleanup; exit \$status" 0 1 2 3 15
_compare()
{
- $QEMU_IMG compare "$@" "$TEST_IMG" "${TEST_IMG2}"
+ $QEMU_IMG compare $QEMU_IMG_EXTRA_ARGS "$@" "$TEST_IMG" "${TEST_IMG2}"
echo $?
}
@@ -46,25 +46,37 @@ _compare()
. ./common.filter
. ./common.pattern
-_supported_fmt raw qcow qcow2 qed
+_supported_fmt raw qcow qcow2 qed luks
_supported_proto file
_supported_os Linux
+# Remove once all tests are fixed to use TEST_IMG_FILE
+# correctly and common.rc sets it unconditionally
+test -z "$TEST_IMG_FILE" && TEST_IMG_FILE=$TEST_IMG
+
# Setup test basic parameters
TEST_IMG2=$TEST_IMG.2
+TEST_IMG_FILE2=$TEST_IMG_FILE.2
CLUSTER_SIZE=4096
-size=1024M
+size=128M
_make_test_img $size
io_pattern write 524288 $CLUSTER_SIZE $CLUSTER_SIZE 4 45
# Compare identical images
-cp "$TEST_IMG" "${TEST_IMG2}"
+cp "$TEST_IMG_FILE" "${TEST_IMG_FILE2}"
_compare
_compare -q
# Compare images with different size
-$QEMU_IMG resize -f $IMGFMT "$TEST_IMG" +512M
+if [ "$IMGOPTSSYNTAX" = "true" ]; then
+ $QEMU_IMG resize $QEMU_IMG_EXTRA_ARGS "$TEST_IMG" +32M
+else
+ $QEMU_IMG resize -f $IMGFMT "$TEST_IMG" +32M
+fi
+# Ensure extended space is zero-initialized
+$QEMU_IO "$TEST_IMG" -c "write -z $size 32M" | _filter_qemu_io
+
_compare
_compare -s
@@ -77,7 +89,7 @@ _compare
# Test unaligned case of mismatch offsets in allocated clusters
_make_test_img $size
io_pattern write 0 512 0 1 100
-cp "$TEST_IMG" "$TEST_IMG2"
+cp "$TEST_IMG_FILE" "$TEST_IMG_FILE2"
io_pattern write 512 512 0 1 101
_compare
diff --git a/tests/qemu-iotests/048.out b/tests/qemu-iotests/048.out
index 57100dc453..0bcf6635a1 100644
--- a/tests/qemu-iotests/048.out
+++ b/tests/qemu-iotests/048.out
@@ -1,5 +1,5 @@
QA output created by 048
-Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
=== IO: pattern 45
wrote 4096/4096 bytes at offset 524288
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -13,6 +13,8 @@ Images are identical.
0
0
Image resized.
+wrote 33554432/33554432 bytes at offset 134217728
+32 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
Warning: Image size mismatch!
Images are identical.
0
@@ -28,7 +30,7 @@ wrote 4096/4096 bytes at offset 0
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
Content mismatch at offset 0!
1
-Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1073741824
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
=== IO: pattern 100
wrote 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
diff --git a/tests/qemu-iotests/052 b/tests/qemu-iotests/052
index 4b647242d2..842eaced3b 100755
--- a/tests/qemu-iotests/052
+++ b/tests/qemu-iotests/052
@@ -48,6 +48,10 @@ size=128M
_make_test_img $size
echo
+echo "== initializing whole image =="
+$QEMU_IO -c "write -z 0 $size" "$TEST_IMG" | _filter_qemu_io
+
+echo
echo "== reading whole image =="
$QEMU_IO -s -c "read 0 $size" "$TEST_IMG" | _filter_qemu_io
diff --git a/tests/qemu-iotests/052.out b/tests/qemu-iotests/052.out
index 9dab51c0e8..a377d3028d 100644
--- a/tests/qemu-iotests/052.out
+++ b/tests/qemu-iotests/052.out
@@ -1,6 +1,10 @@
QA output created by 052
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+== initializing whole image ==
+wrote 134217728/134217728 bytes at offset 0
+128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
== reading whole image ==
read 134217728/134217728 bytes at offset 0
128 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
diff --git a/tests/qemu-iotests/061.out b/tests/qemu-iotests/061.out
index a03732e19c..a431b7f305 100644
--- a/tests/qemu-iotests/061.out
+++ b/tests/qemu-iotests/061.out
@@ -58,9 +58,9 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
wrote 131072/131072 bytes at offset 0
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
./common.config: Killed ( if [ "${VALGRIND_QEMU}" == "y" ]; then
- exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
else
- exec "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
fi )
magic 0x514649fb
version 3
@@ -220,9 +220,9 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
wrote 131072/131072 bytes at offset 0
128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
./common.config: Killed ( if [ "${VALGRIND_QEMU}" == "y" ]; then
- exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
else
- exec "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
fi )
magic 0x514649fb
version 3
diff --git a/tests/qemu-iotests/083 b/tests/qemu-iotests/083
index bc724ae058..bff9360048 100755
--- a/tests/qemu-iotests/083
+++ b/tests/qemu-iotests/083
@@ -43,7 +43,7 @@ choose_tcp_port() {
wait_for_tcp_port() {
while ! (netstat --tcp --listening --numeric | \
- grep "$1.*0\\.0\\.0\\.0:\\*.*LISTEN") 2>&1 >/dev/null; do
+ grep "$1.*0\\.0\\.0\\.0:\\*.*LISTEN") >/dev/null 2>&1; do
sleep 0.1
done
}
@@ -70,7 +70,7 @@ EOF
nbd_url="nbd:127.0.0.1:$port:exportname=foo"
fi
- $PYTHON nbd-fault-injector.py $extra_args "127.0.0.1:$port" "$TEST_DIR/nbd-fault-injector.conf" 2>&1 >/dev/null &
+ $PYTHON nbd-fault-injector.py $extra_args "127.0.0.1:$port" "$TEST_DIR/nbd-fault-injector.conf" >/dev/null 2>&1 &
wait_for_tcp_port "127\\.0\\.0\\.1:$port"
$QEMU_IO -c "read 0 512" "$nbd_url" 2>&1 | _filter_qemu_io | _filter_nbd
diff --git a/tests/qemu-iotests/100 b/tests/qemu-iotests/100
index 5b2fb33330..e66db07982 100755
--- a/tests/qemu-iotests/100
+++ b/tests/qemu-iotests/100
@@ -47,6 +47,7 @@ size=128M
echo
echo "== Single request =="
_make_test_img $size
+$QEMU_IO -c "write -z 0 8k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "multiwrite 0 4k" "$TEST_IMG" | _filter_qemu_io
echo
@@ -59,6 +60,7 @@ _cleanup_test_img
echo
echo "== Sequential requests =="
_make_test_img $size
+$QEMU_IO -c "write -z 0 12k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "multiwrite 0 4k ; 4k 4k" "$TEST_IMG" | _filter_qemu_io
echo
@@ -72,6 +74,7 @@ _cleanup_test_img
echo
echo "== Superset overlapping requests =="
_make_test_img $size
+$QEMU_IO -c "write -z 0 8k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "multiwrite 0 4k ; 1k 2k" "$TEST_IMG" | _filter_qemu_io
echo
@@ -87,6 +90,7 @@ _cleanup_test_img
echo
echo "== Subset overlapping requests =="
_make_test_img $size
+$QEMU_IO -c "write -z 0 8k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "multiwrite 1k 2k ; 0k 4k" "$TEST_IMG" | _filter_qemu_io
echo
@@ -102,6 +106,7 @@ _cleanup_test_img
echo
echo "== Head overlapping requests =="
_make_test_img $size
+$QEMU_IO -c "write -z 0 8k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "multiwrite 0k 2k ; 0k 4k" "$TEST_IMG" | _filter_qemu_io
echo
@@ -116,6 +121,7 @@ _cleanup_test_img
echo
echo "== Tail overlapping requests =="
_make_test_img $size
+$QEMU_IO -c "write -z 0 8k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "multiwrite 2k 2k ; 0k 4k" "$TEST_IMG" | _filter_qemu_io
echo
@@ -130,6 +136,7 @@ _cleanup_test_img
echo
echo "== Disjoint requests =="
_make_test_img $size
+$QEMU_IO -c "write -z 0 72k" "$TEST_IMG" | _filter_qemu_io
$QEMU_IO -c "multiwrite 0 4k ; 64k 4k" "$TEST_IMG" | _filter_qemu_io
echo
diff --git a/tests/qemu-iotests/100.out b/tests/qemu-iotests/100.out
index 05649038d9..a44cae40db 100644
--- a/tests/qemu-iotests/100.out
+++ b/tests/qemu-iotests/100.out
@@ -2,6 +2,8 @@ QA output created by 100
== Single request ==
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+wrote 8192/8192 bytes at offset 0
+8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 4096/4096 bytes at offset 0
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -13,6 +15,8 @@ read 4096/4096 bytes at offset 4096
== Sequential requests ==
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+wrote 12288/12288 bytes at offset 0
+12 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 8192/8192 bytes at offset 0
8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -26,6 +30,8 @@ read 4096/4096 bytes at offset 8192
== Superset overlapping requests ==
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+wrote 8192/8192 bytes at offset 0
+8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 6144/6144 bytes at offset 0
6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -39,6 +45,8 @@ read 4096/4096 bytes at offset 4096
== Subset overlapping requests ==
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+wrote 8192/8192 bytes at offset 0
+8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 6144/6144 bytes at offset 1024
6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -52,6 +60,8 @@ read 4096/4096 bytes at offset 4096
== Head overlapping requests ==
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+wrote 8192/8192 bytes at offset 0
+8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 6144/6144 bytes at offset 0
6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -63,6 +73,8 @@ read 4096/4096 bytes at offset 4096
== Tail overlapping requests ==
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+wrote 8192/8192 bytes at offset 0
+8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 6144/6144 bytes at offset 2048
6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
@@ -74,6 +86,8 @@ read 4096/4096 bytes at offset 4096
== Disjoint requests ==
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+wrote 73728/73728 bytes at offset 0
+72 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 8192/8192 bytes at offset 0
8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
diff --git a/tests/qemu-iotests/137.out b/tests/qemu-iotests/137.out
index 88c702cf77..c0e753483b 100644
--- a/tests/qemu-iotests/137.out
+++ b/tests/qemu-iotests/137.out
@@ -32,9 +32,9 @@ Unsupported value 'blubb' for qcow2 option 'overlap-check'. Allowed are any of t
wrote 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
./common.config: Killed ( if [ "${VALGRIND_QEMU}" == "y" ]; then
- exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
else
- exec "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@";
+ exec "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@";
fi )
incompatible_features 0x0
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
diff --git a/tests/qemu-iotests/common b/tests/qemu-iotests/common
index 49e1931129..d60ea2ce3c 100644
--- a/tests/qemu-iotests/common
+++ b/tests/qemu-iotests/common
@@ -53,6 +53,8 @@ export QEMU_IO_OPTIONS=""
export CACHEMODE_IS_DEFAULT=true
export QEMU_OPTIONS="-nodefaults"
export VALGRIND_QEMU=
+export IMGKEYSECRET=
+export IMGOPTSSYNTAX=false
for r
do
@@ -207,6 +209,13 @@ testlist options
xpand=false
;;
+ -luks)
+ IMGOPTSSYNTAX=true
+ IMGFMT=luks
+ IMGKEYSECRET=123456
+ xpand=false
+ ;;
+
-qed)
IMGFMT=qed
xpand=false
@@ -399,7 +408,11 @@ BEGIN { for (t='$start'; t<='$end'; t++) printf "%03d\n",t }' \
done
# Set qemu-io cache mode with $CACHEMODE we have
-QEMU_IO_OPTIONS="$QEMU_IO_OPTIONS -f $IMGFMT --cache $CACHEMODE"
+if [ "$IMGOPTSSYNTAX" = "true" ]; then
+ QEMU_IO_OPTIONS="$QEMU_IO_OPTIONS --cache $CACHEMODE"
+else
+ QEMU_IO_OPTIONS="$QEMU_IO_OPTIONS -f $IMGFMT --cache $CACHEMODE"
+fi
# Set default options for qemu-img create -o if they were not specified
_set_default_imgopts
diff --git a/tests/qemu-iotests/common.config b/tests/qemu-iotests/common.config
index f824651bac..f6384fbae7 100644
--- a/tests/qemu-iotests/common.config
+++ b/tests/qemu-iotests/common.config
@@ -123,12 +123,19 @@ _qemu_img_wrapper()
_qemu_io_wrapper()
{
local VALGRIND_LOGFILE="${TEST_DIR}"/$$.valgrind
+ local QEMU_IO_ARGS="$QEMU_IO_OPTIONS"
+ if [ "$IMGOPTSSYNTAX" = "true" ]; then
+ QEMU_IO_ARGS="--image-opts $QEMU_IO_ARGS"
+ if [ -n "$IMGKEYSECRET" ]; then
+ QEMU_IO_ARGS="--object secret,id=keysec0,data=$IMGKEYSECRET $QEMU_IO_ARGS"
+ fi
+ fi
local RETVAL
(
if [ "${VALGRIND_QEMU}" == "y" ]; then
- exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@"
+ exec valgrind --log-file="${VALGRIND_LOGFILE}" --error-exitcode=99 "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@"
else
- exec "$QEMU_IO_PROG" $QEMU_IO_OPTIONS "$@"
+ exec "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@"
fi
)
RETVAL=$?
@@ -154,6 +161,16 @@ export QEMU_IMG=_qemu_img_wrapper
export QEMU_IO=_qemu_io_wrapper
export QEMU_NBD=_qemu_nbd_wrapper
+QEMU_IMG_EXTRA_ARGS=
+if [ "$IMGOPTSSYNTAX" = "true" ]; then
+ QEMU_IMG_EXTRA_ARGS="--image-opts $QEMU_IMG_EXTRA_ARGS"
+ if [ -n "$IMGKEYSECRET" ]; then
+ QEMU_IMG_EXTRA_ARGS="--object secret,id=keysec0,data=$IMGKEYSECRET $QEMU_IMG_EXTRA_ARGS"
+ fi
+fi
+export QEMU_IMG_EXTRA_ARGS
+
+
default_machine=$($QEMU -machine help | sed -n '/(default)/ s/ .*//p')
default_alias_machine=$($QEMU -machine help | \
sed -n "/(alias of $default_machine)/ { s/ .*//p; q; }")
diff --git a/tests/qemu-iotests/common.filter b/tests/qemu-iotests/common.filter
index 8a6e1b57c1..7853dbbfdc 100644
--- a/tests/qemu-iotests/common.filter
+++ b/tests/qemu-iotests/common.filter
@@ -92,12 +92,14 @@ _filter_img_create()
-e "s# zeroed_grain=\\(on\\|off\\)##g" \
-e "s# subformat='[^']*'##g" \
-e "s# adapter_type='[^']*'##g" \
+ -e "s# hwversion=[^ ]*##g" \
-e "s# lazy_refcounts=\\(on\\|off\\)##g" \
-e "s# block_size=[0-9]\\+##g" \
-e "s# block_state_zero=\\(on\\|off\\)##g" \
-e "s# log_size=[0-9]\\+##g" \
-e "s/archipelago:a/TEST_DIR\//g" \
- -e "s# refcount_bits=[0-9]\\+##g"
+ -e "s# refcount_bits=[0-9]\\+##g" \
+ -e "s# key-secret=[a-zA-Z0-9]\\+##g"
}
_filter_img_info()
@@ -115,6 +117,7 @@ _filter_img_info()
-e "/zeroed_grain: \\(on\\|off\\)/d" \
-e "/subformat: '[^']*'/d" \
-e "/adapter_type: '[^']*'/d" \
+ -e "/hwversion: '[^']*'/d" \
-e "/lazy_refcounts: \\(on\\|off\\)/d" \
-e "/block_size: [0-9]\\+/d" \
-e "/block_state_zero: \\(on\\|off\\)/d" \
diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc
index 5249ec5922..306b00c210 100644
--- a/tests/qemu-iotests/common.rc
+++ b/tests/qemu-iotests/common.rc
@@ -53,21 +53,45 @@ fi
# make sure we have a standard umask
umask 022
-if [ "$IMGPROTO" = "file" ]; then
- TEST_IMG=$TEST_DIR/t.$IMGFMT
-elif [ "$IMGPROTO" = "nbd" ]; then
- TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
- TEST_IMG="nbd:127.0.0.1:10810"
-elif [ "$IMGPROTO" = "ssh" ]; then
- TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
- TEST_IMG="ssh://127.0.0.1$TEST_IMG_FILE"
-elif [ "$IMGPROTO" = "nfs" ]; then
- TEST_DIR="nfs://127.0.0.1/$TEST_DIR"
- TEST_IMG=$TEST_DIR/t.$IMGFMT
-elif [ "$IMGPROTO" = "archipelago" ]; then
- TEST_IMG="archipelago:at.$IMGFMT"
+if [ "$IMGOPTSSYNTAX" = "true" ]; then
+ DRIVER="driver=$IMGFMT"
+ if [ "$IMGFMT" = "luks" ]; then
+ DRIVER="$DRIVER,key-secret=keysec0"
+ fi
+ if [ "$IMGPROTO" = "file" ]; then
+ TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
+ TEST_IMG="$DRIVER,file.filename=$TEST_DIR/t.$IMGFMT"
+ elif [ "$IMGPROTO" = "nbd" ]; then
+ TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
+ TEST_IMG="$DRIVER,file.driver=nbd,file.host=127.0.0.1,file.port=10810"
+ elif [ "$IMGPROTO" = "ssh" ]; then
+ TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
+ TEST_IMG="$DRIVER,file.driver=ssh,file.host=127.0.0.1,file.path=$TEST_IMG_FILE"
+ elif [ "$IMGPROTO" = "nfs" ]; then
+ TEST_DIR="$DRIVER,file.driver=nfs,file.filename=nfs://127.0.0.1/$TEST_DIR"
+ TEST_IMG=$TEST_DIR_OPTS/t.$IMGFMT
+ elif [ "$IMGPROTO" = "archipelago" ]; then
+ TEST_IMG="$DRIVER,file.driver=archipelago,file.volume=:at.$IMGFMT"
+ else
+ TEST_IMG="$DRIVER,file.driver=$IMGPROTO,file.filename=$TEST_DIR/t.$IMGFMT"
+ fi
else
- TEST_IMG=$IMGPROTO:$TEST_DIR/t.$IMGFMT
+ if [ "$IMGPROTO" = "file" ]; then
+ TEST_IMG=$TEST_DIR/t.$IMGFMT
+ elif [ "$IMGPROTO" = "nbd" ]; then
+ TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
+ TEST_IMG="nbd:127.0.0.1:10810"
+ elif [ "$IMGPROTO" = "ssh" ]; then
+ TEST_IMG_FILE=$TEST_DIR/t.$IMGFMT
+ TEST_IMG="ssh://127.0.0.1$TEST_IMG_FILE"
+ elif [ "$IMGPROTO" = "nfs" ]; then
+ TEST_DIR="nfs://127.0.0.1/$TEST_DIR"
+ TEST_IMG=$TEST_DIR/t.$IMGFMT
+ elif [ "$IMGPROTO" = "archipelago" ]; then
+ TEST_IMG="archipelago:at.$IMGFMT"
+ else
+ TEST_IMG=$IMGPROTO:$TEST_DIR/t.$IMGFMT
+ fi
fi
_optstr_add()
@@ -108,6 +132,7 @@ _make_test_img()
local img_name=""
local use_backing=0
local backing_file=""
+ local object_options=""
if [ -n "$TEST_IMG_FILE" ]; then
img_name=$TEST_IMG_FILE
@@ -118,6 +143,10 @@ _make_test_img()
if [ -n "$IMGOPTS" ]; then
optstr=$(_optstr_add "$optstr" "$IMGOPTS")
fi
+ if [ -n "$IMGKEYSECRET" ]; then
+ object_options="--object secret,id=keysec0,data=$IMGKEYSECRET"
+ optstr=$(_optstr_add "$optstr" "key-secret=keysec0")
+ fi
if [ "$1" = "-b" ]; then
use_backing=1
@@ -135,9 +164,9 @@ _make_test_img()
# XXX(hch): have global image options?
(
if [ $use_backing = 1 ]; then
- $QEMU_IMG create -f $IMGFMT $extra_img_options -b "$backing_file" "$img_name" $image_size 2>&1
+ $QEMU_IMG create $object_options -f $IMGFMT $extra_img_options -b "$backing_file" "$img_name" $image_size 2>&1
else
- $QEMU_IMG create -f $IMGFMT $extra_img_options "$img_name" $image_size 2>&1
+ $QEMU_IMG create $object_options -f $IMGFMT $extra_img_options "$img_name" $image_size 2>&1
fi
) | _filter_img_create
@@ -199,7 +228,13 @@ _cleanup_test_img()
_check_test_img()
{
- $QEMU_IMG check "$@" -f $IMGFMT "$TEST_IMG" 2>&1 | _filter_testdir | \
+ (
+ if [ "$IMGOPTSSYNTAX" = "true" ]; then
+ $QEMU_IMG check $QEMU_IMG_EXTRA_ARGS "$@" "$TEST_IMG" 2>&1
+ else
+ $QEMU_IMG check "$@" -f $IMGFMT "$TEST_IMG" 2>&1
+ fi
+ ) | _filter_testdir | \
sed -e '/allocated.*fragmented.*compressed clusters/d' \
-e 's/qemu-img: This image format does not support checks/No errors were found on the image./' \
-e '/Image end offset: [0-9]\+/d'
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 56f988ab3d..1687c33efd 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -47,7 +47,7 @@ if os.environ.get('QEMU_OPTIONS'):
imgfmt = os.environ.get('IMGFMT', 'raw')
imgproto = os.environ.get('IMGPROTO', 'file')
-test_dir = os.environ.get('TEST_DIR', '/var/tmp')
+test_dir = os.environ.get('TEST_DIR')
output_dir = os.environ.get('OUTPUT_DIR', '.')
cachemode = os.environ.get('CACHEMODE')
qemu_default_machine = os.environ.get('QEMU_DEFAULT_MACHINE')
@@ -461,6 +461,14 @@ def verify_quorum():
def main(supported_fmts=[], supported_oses=['linux']):
'''Run tests'''
+ # We are using TEST_DIR and QEMU_DEFAULT_MACHINE as proxies to
+ # indicate that we're not being run via "check". There may be
+ # other things set up by "check" that individual test cases rely
+ # on.
+ if test_dir is None or qemu_default_machine is None:
+ sys.stderr.write('Please run this test via the "check" script\n')
+ sys.exit(os.EX_USAGE)
+
debug = '-d' in sys.argv
verbosity = 1
verify_image_format(supported_fmts)
diff --git a/tests/test-qmp-commands.c b/tests/test-qmp-commands.c
index 14a9ebbd5a..5c3edd753a 100644
--- a/tests/test-qmp-commands.c
+++ b/tests/test-qmp-commands.c
@@ -222,20 +222,19 @@ static void test_dealloc_partial(void)
ud2_dict = qdict_new();
qdict_put_obj(ud2_dict, "string0", QOBJECT(qstring_from_str(text)));
- qiv = qmp_input_visitor_new(QOBJECT(ud2_dict));
+ qiv = qmp_input_visitor_new(QOBJECT(ud2_dict), true);
visit_type_UserDefTwo(qmp_input_get_visitor(qiv), NULL, &ud2, &err);
qmp_input_visitor_cleanup(qiv);
QDECREF(ud2_dict);
}
- /* verify partial success */
- assert(ud2 != NULL);
- assert(ud2->string0 != NULL);
- assert(strcmp(ud2->string0, text) == 0);
- assert(ud2->dict1 == NULL);
-
- /* confirm & release construction error */
+ /* verify that visit_type_XXX() cleans up properly on error */
error_free_or_abort(&err);
+ assert(!ud2);
+
+ /* Manually create a partial object, leaving ud2->dict1 at NULL */
+ ud2 = g_new0(UserDefTwo, 1);
+ ud2->string0 = g_strdup(text);
/* tear down partial object */
qapi_free_UserDefTwo(ud2);
diff --git a/tests/test-qmp-input-strict.c b/tests/test-qmp-input-strict.c
index d71727e272..4602529ea0 100644
--- a/tests/test-qmp-input-strict.c
+++ b/tests/test-qmp-input-strict.c
@@ -55,7 +55,7 @@ static Visitor *validate_test_init_internal(TestInputVisitorData *data,
data->obj = qobject_from_jsonv(json_string, ap);
g_assert(data->obj);
- data->qiv = qmp_input_visitor_new_strict(data->obj);
+ data->qiv = qmp_input_visitor_new(data->obj, true);
g_assert(data->qiv);
v = qmp_input_get_visitor(data->qiv);
@@ -182,10 +182,7 @@ static void test_validate_fail_struct(TestInputVisitorData *data,
visit_type_TestStruct(v, NULL, &p, &err);
error_free_or_abort(&err);
- if (p) {
- g_free(p->string);
- }
- g_free(p);
+ g_assert(!p);
}
static void test_validate_fail_struct_nested(TestInputVisitorData *data,
@@ -199,7 +196,7 @@ static void test_validate_fail_struct_nested(TestInputVisitorData *data,
visit_type_UserDefTwo(v, NULL, &udp, &err);
error_free_or_abort(&err);
- qapi_free_UserDefTwo(udp);
+ g_assert(!udp);
}
static void test_validate_fail_list(TestInputVisitorData *data,
@@ -213,7 +210,7 @@ static void test_validate_fail_list(TestInputVisitorData *data,
visit_type_UserDefOneList(v, NULL, &head, &err);
error_free_or_abort(&err);
- qapi_free_UserDefOneList(head);
+ g_assert(!head);
}
static void test_validate_fail_union_native_list(TestInputVisitorData *data,
@@ -228,7 +225,7 @@ static void test_validate_fail_union_native_list(TestInputVisitorData *data,
visit_type_UserDefNativeListUnion(v, NULL, &tmp, &err);
error_free_or_abort(&err);
- qapi_free_UserDefNativeListUnion(tmp);
+ g_assert(!tmp);
}
static void test_validate_fail_union_flat(TestInputVisitorData *data,
@@ -242,7 +239,7 @@ static void test_validate_fail_union_flat(TestInputVisitorData *data,
visit_type_UserDefFlatUnion(v, NULL, &tmp, &err);
error_free_or_abort(&err);
- qapi_free_UserDefFlatUnion(tmp);
+ g_assert(!tmp);
}
static void test_validate_fail_union_flat_no_discrim(TestInputVisitorData *data,
@@ -257,13 +254,13 @@ static void test_validate_fail_union_flat_no_discrim(TestInputVisitorData *data,
visit_type_UserDefFlatUnion2(v, NULL, &tmp, &err);
error_free_or_abort(&err);
- qapi_free_UserDefFlatUnion2(tmp);
+ g_assert(!tmp);
}
static void test_validate_fail_alternate(TestInputVisitorData *data,
const void *unused)
{
- UserDefAlternate *tmp = NULL;
+ UserDefAlternate *tmp;
Visitor *v;
Error *err = NULL;
@@ -271,7 +268,7 @@ static void test_validate_fail_alternate(TestInputVisitorData *data,
visit_type_UserDefAlternate(v, NULL, &tmp, &err);
error_free_or_abort(&err);
- qapi_free_UserDefAlternate(tmp);
+ g_assert(!tmp);
}
static void do_test_validate_qmp_introspect(TestInputVisitorData *data,
diff --git a/tests/test-qmp-input-visitor.c b/tests/test-qmp-input-visitor.c
index 80527eb850..cee07ce8dd 100644
--- a/tests/test-qmp-input-visitor.c
+++ b/tests/test-qmp-input-visitor.c
@@ -51,7 +51,7 @@ static Visitor *visitor_input_test_init_internal(TestInputVisitorData *data,
data->obj = qobject_from_jsonv(json_string, ap);
g_assert(data->obj);
- data->qiv = qmp_input_visitor_new(data->obj);
+ data->qiv = qmp_input_visitor_new(data->obj, false);
g_assert(data->qiv);
v = qmp_input_get_visitor(data->qiv);
@@ -279,6 +279,34 @@ static void test_visitor_in_any(TestInputVisitorData *data,
qobject_decref(res);
}
+static void test_visitor_in_null(TestInputVisitorData *data,
+ const void *unused)
+{
+ Visitor *v;
+ Error *err = NULL;
+ char *tmp;
+
+ /*
+ * FIXME: Since QAPI doesn't know the 'null' type yet, we can't
+ * test visit_type_null() by reading into a QAPI struct then
+ * checking that it was populated correctly. The best we can do
+ * for now is ensure that we consumed null from the input, proven
+ * by the fact that we can't re-read the key; and that we detect
+ * when input is not null.
+ */
+
+ v = visitor_input_test_init(data, "{ 'a': null, 'b': '' }");
+ visit_start_struct(v, NULL, NULL, 0, &error_abort);
+ visit_type_null(v, "a", &error_abort);
+ visit_type_str(v, "a", &tmp, &err);
+ g_assert(!tmp);
+ error_free_or_abort(&err);
+ visit_type_null(v, "b", &err);
+ error_free_or_abort(&err);
+ visit_check_struct(v, &error_abort);
+ visit_end_struct(v);
+}
+
static void test_visitor_in_union_flat(TestInputVisitorData *data,
const void *unused)
{
@@ -745,18 +773,12 @@ static void test_visitor_in_errors(TestInputVisitorData *data,
visit_type_TestStruct(v, NULL, &p, &err);
error_free_or_abort(&err);
- /* FIXME - a failed parse should not leave a partially-allocated p
- * for us to clean up; this could cause callers to leak memory. */
- g_assert(p->string == NULL);
-
- g_free(p->string);
- g_free(p);
+ g_assert(!p);
v = visitor_input_test_init(data, "[ '1', '2', false, '3' ]");
visit_type_strList(v, NULL, &q, &err);
error_free_or_abort(&err);
- assert(q);
- qapi_free_strList(q);
+ assert(!q);
}
static void test_visitor_in_wrong_type(TestInputVisitorData *data,
@@ -829,6 +851,8 @@ int main(int argc, char **argv)
&in_visitor_data, test_visitor_in_list);
input_visitor_test_add("/visitor/input/any",
&in_visitor_data, test_visitor_in_any);
+ input_visitor_test_add("/visitor/input/null",
+ &in_visitor_data, test_visitor_in_null);
input_visitor_test_add("/visitor/input/union-flat",
&in_visitor_data, test_visitor_in_union_flat);
input_visitor_test_add("/visitor/input/alternate",
diff --git a/tests/test-qmp-output-visitor.c b/tests/test-qmp-output-visitor.c
index c70926793a..1f80e696ea 100644
--- a/tests/test-qmp-output-visitor.c
+++ b/tests/test-qmp-output-visitor.c
@@ -43,6 +43,12 @@ static void visitor_output_teardown(TestOutputVisitorData *data,
data->ov = NULL;
}
+static void visitor_reset(TestOutputVisitorData *data)
+{
+ visitor_output_teardown(data, NULL);
+ visitor_output_setup(data, NULL);
+}
+
static void test_visitor_out_int(TestOutputVisitorData *data,
const void *unused)
{
@@ -139,6 +145,7 @@ static void test_visitor_out_enum(TestOutputVisitorData *data,
g_assert_cmpstr(qstring_get_str(qobject_to_qstring(obj)), ==,
EnumOne_lookup[i]);
qobject_decref(obj);
+ visitor_reset(data);
}
}
@@ -153,6 +160,7 @@ static void test_visitor_out_enum_errors(TestOutputVisitorData *data,
visit_type_EnumOne(data->ov, "unused", &bad_values[i], &err);
g_assert(err);
error_free(err);
+ visitor_reset(data);
}
}
@@ -262,6 +270,7 @@ static void test_visitor_out_struct_errors(TestOutputVisitorData *data,
visit_type_UserDefOne(data->ov, "unused", &pu, &err);
g_assert(err);
error_free(err);
+ visitor_reset(data);
}
}
@@ -366,6 +375,7 @@ static void test_visitor_out_any(TestOutputVisitorData *data,
qobject_decref(obj);
qobject_decref(qobj);
+ visitor_reset(data);
qdict = qdict_new();
qdict_put(qdict, "integer", qint_from_int(-42));
qdict_put(qdict, "boolean", qbool_from_bool(true));
@@ -442,6 +452,7 @@ static void test_visitor_out_alternate(TestOutputVisitorData *data,
qapi_free_UserDefAlternate(tmp);
qobject_decref(arg);
+ visitor_reset(data);
tmp = g_new0(UserDefAlternate, 1);
tmp->type = QTYPE_QSTRING;
tmp->u.s = g_strdup("hello");
@@ -455,6 +466,7 @@ static void test_visitor_out_alternate(TestOutputVisitorData *data,
qapi_free_UserDefAlternate(tmp);
qobject_decref(arg);
+ visitor_reset(data);
tmp = g_new0(UserDefAlternate, 1);
tmp->type = QTYPE_QDICT;
tmp->u.udfu.integer = 1;
@@ -477,15 +489,24 @@ static void test_visitor_out_alternate(TestOutputVisitorData *data,
qobject_decref(arg);
}
-static void test_visitor_out_empty(TestOutputVisitorData *data,
- const void *unused)
+static void test_visitor_out_null(TestOutputVisitorData *data,
+ const void *unused)
{
QObject *arg;
+ QDict *qdict;
+ QObject *nil;
+ visit_start_struct(data->ov, NULL, NULL, 0, &error_abort);
+ visit_type_null(data->ov, "a", &error_abort);
+ visit_check_struct(data->ov, &error_abort);
+ visit_end_struct(data->ov);
arg = qmp_output_get_qobject(data->qov);
- g_assert(qobject_type(arg) == QTYPE_QNULL);
- /* Check that qnull reference counting is sane */
- g_assert(arg->refcnt == 2);
+ g_assert(qobject_type(arg) == QTYPE_QDICT);
+ qdict = qobject_to_qdict(arg);
+ g_assert_cmpint(qdict_size(qdict), ==, 1);
+ nil = qdict_get(qdict, "a");
+ g_assert(nil);
+ g_assert(qobject_type(nil) == QTYPE_QNULL);
qobject_decref(arg);
}
@@ -839,8 +860,8 @@ int main(int argc, char **argv)
&out_visitor_data, test_visitor_out_union_flat);
output_visitor_test_add("/visitor/output/alternate",
&out_visitor_data, test_visitor_out_alternate);
- output_visitor_test_add("/visitor/output/empty",
- &out_visitor_data, test_visitor_out_empty);
+ output_visitor_test_add("/visitor/output/null",
+ &out_visitor_data, test_visitor_out_null);
output_visitor_test_add("/visitor/output/native_list/int",
&out_visitor_data,
test_visitor_out_native_list_int);
diff --git a/tests/test-string-input-visitor.c b/tests/test-string-input-visitor.c
index 9e6906a567..5a56920222 100644
--- a/tests/test-string-input-visitor.c
+++ b/tests/test-string-input-visitor.c
@@ -63,6 +63,13 @@ static void test_visitor_in_int(TestInputVisitorData *data,
visit_type_int(v, NULL, &res, &err);
g_assert(!err);
g_assert_cmpint(res, ==, value);
+
+ visitor_input_teardown(data, unused);
+
+ v = visitor_input_test_init(data, "not an int");
+
+ visit_type_int(v, NULL, &res, &err);
+ error_free_or_abort(&err);
}
static void test_visitor_in_intList(TestInputVisitorData *data,
@@ -70,6 +77,7 @@ static void test_visitor_in_intList(TestInputVisitorData *data,
{
int64_t value[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 20};
int16List *res = NULL, *tmp;
+ Error *err = NULL;
Visitor *v;
int i = 0;
@@ -84,12 +92,15 @@ static void test_visitor_in_intList(TestInputVisitorData *data,
}
g_assert(!tmp);
- tmp = res;
- while (tmp) {
- res = res->next;
- g_free(tmp);
- tmp = res;
- }
+ qapi_free_int16List(res);
+
+ visitor_input_teardown(data, unused);
+
+ v = visitor_input_test_init(data, "not an int list");
+
+ visit_type_int16List(v, NULL, &res, &err);
+ error_free_or_abort(&err);
+ g_assert(!res);
}
static void test_visitor_in_bool(TestInputVisitorData *data,
diff --git a/tests/test-visitor-serialization.c b/tests/test-visitor-serialization.c
index 9adbc30a41..7b14b5a7af 100644
--- a/tests/test-visitor-serialization.c
+++ b/tests/test-visitor-serialization.c
@@ -1038,7 +1038,7 @@ static void qmp_deserialize(void **native_out, void *datap,
obj = qobject_from_json(qstring_get_str(output_json));
QDECREF(output_json);
- d->qiv = qmp_input_visitor_new(obj);
+ d->qiv = qmp_input_visitor_new(obj, true);
qobject_decref(obj_orig);
qobject_decref(obj);
visit(qmp_input_get_visitor(d->qiv), native_out, errp);
diff --git a/trace-events b/trace-events
index 8350743878..4fce005afd 100644
--- a/trace-events
+++ b/trace-events
@@ -74,7 +74,6 @@ bdrv_co_copy_on_readv(void *bs, int64_t sector_num, int nb_sector) "bs %p sector
bdrv_co_readv_no_serialising(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
bdrv_co_writev(void *bs, int64_t sector_num, int nb_sector) "bs %p sector_num %"PRId64" nb_sectors %d"
bdrv_co_write_zeroes(void *bs, int64_t sector_num, int nb_sector, int flags) "bs %p sector_num %"PRId64" nb_sectors %d flags %#x"
-bdrv_co_io_em(void *bs, int64_t sector_num, int nb_sectors, int is_write, void *acb) "bs %p sector_num %"PRId64" nb_sectors %d is_write %d acb %p"
bdrv_co_do_copy_on_readv(void *bs, int64_t sector_num, int nb_sectors, int64_t cluster_sector_num, int cluster_nb_sectors) "bs %p sector_num %"PRId64" nb_sectors %d cluster_sector_num %"PRId64" cluster_nb_sectors %d"
# block/stream.c
@@ -119,7 +118,7 @@ virtio_blk_req_complete(void *req, int status) "req %p status %d"
virtio_blk_rw_complete(void *req, int ret) "req %p ret %d"
virtio_blk_handle_write(void *req, uint64_t sector, size_t nsectors) "req %p sector %"PRIu64" nsectors %zu"
virtio_blk_handle_read(void *req, uint64_t sector, size_t nsectors) "req %p sector %"PRIu64" nsectors %zu"
-virtio_blk_submit_multireq(void *mrb, int start, int num_reqs, uint64_t sector, size_t nsectors, bool is_write) "mrb %p start %d num_reqs %d sector %"PRIu64" nsectors %zu is_write %d"
+virtio_blk_submit_multireq(void *mrb, int start, int num_reqs, uint64_t offset, size_t size, bool is_write) "mrb %p start %d num_reqs %d offset %"PRIu64" size %zu is_write %d"
# hw/block/dataplane/virtio-blk.c
virtio_blk_data_plane_start(void *s) "dataplane %p"
@@ -1615,7 +1614,7 @@ kvm_failed_spr_get(int str, const char *msg) "Warning: Unable to retrieve SPR %d
# cpu-exec.c
disable exec_tb(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR
disable exec_tb_nocache(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR
-disable exec_tb_exit(void *next_tb, unsigned int flags) "tb:%p flags=%x"
+disable exec_tb_exit(void *last_tb, unsigned int flags) "tb:%p flags=%x"
# translate-all.c
translate_block(void *tb, uintptr_t pc, uint8_t *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p"
diff --git a/translate-all.c b/translate-all.c
index 8329ea60ee..b54f472531 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -72,11 +72,12 @@
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
TranslationBlock *first_tb;
+#ifdef CONFIG_SOFTMMU
/* in order to optimize self modifying code, we count the number
of lookups we do to a given page to use a bitmap */
unsigned int code_write_count;
unsigned long *code_bitmap;
-#if defined(CONFIG_USER_ONLY)
+#else
unsigned long flags;
#endif
} PageDesc;
@@ -153,8 +154,6 @@ void tb_lock_reset(void)
#endif
}
-static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
- tb_page_addr_t phys_page2);
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
void cpu_gen_init(void)
@@ -306,7 +305,6 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
cpu_restore_state_from_tb(cpu, tb, retaddr);
if (tb->cflags & CF_NOCACHE) {
/* one-shot translation, invalidate it immediately */
- cpu->current_tb = NULL;
tb_phys_invalidate(tb, -1);
tb_free(tb);
}
@@ -464,6 +462,8 @@ static inline PageDesc *page_find(tb_page_addr_t index)
# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
#elif defined(__powerpc64__)
# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
+#elif defined(__powerpc__)
+# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
#elif defined(__aarch64__)
# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
#elif defined(__arm__)
@@ -505,7 +505,6 @@ static inline size_t size_code_gen_buffer(size_t tb_size)
if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
tb_size = MAX_CODE_GEN_BUFFER_SIZE;
}
- tcg_ctx.code_gen_buffer_size = tb_size;
return tb_size;
}
@@ -514,7 +513,7 @@ static inline size_t size_code_gen_buffer(size_t tb_size)
that the buffer not cross a 256MB boundary. */
static inline bool cross_256mb(void *addr, size_t size)
{
- return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
+ return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
}
/* We weren't able to allocate a buffer without crossing that boundary,
@@ -522,7 +521,7 @@ static inline bool cross_256mb(void *addr, size_t size)
Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
static inline void *split_cross_256mb(void *buf1, size_t size1)
{
- void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
+ void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
size_t size2 = buf1 + size1 - buf2;
size1 = buf2 - buf1;
@@ -683,11 +682,11 @@ static inline void *alloc_code_gen_buffer(void)
case 1:
if (!cross_256mb(buf2, size)) {
/* Success! Use the new buffer. */
- munmap(buf, size);
+ munmap(buf, size + qemu_real_host_page_size);
break;
}
/* Failure. Work with what we had. */
- munmap(buf2, size);
+ munmap(buf2, size + qemu_real_host_page_size);
/* fallthru */
default:
/* Split the original buffer. Free the smaller half. */
@@ -784,9 +783,11 @@ void tb_free(TranslationBlock *tb)
static inline void invalidate_page_bitmap(PageDesc *p)
{
+#ifdef CONFIG_SOFTMMU
g_free(p->code_bitmap);
p->code_bitmap = NULL;
p->code_write_count = 0;
+#endif
}
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
@@ -841,6 +842,7 @@ void tb_flush(CPUState *cpu)
CPU_FOREACH(cpu) {
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
+ cpu->tb_flushed = true;
}
memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
@@ -925,32 +927,33 @@ static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
}
}
-static inline void tb_jmp_remove(TranslationBlock *tb, int n)
+/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
+static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
{
- TranslationBlock *tb1, **ptb;
+ TranslationBlock *tb1;
+ uintptr_t *ptb, ntb;
unsigned int n1;
- ptb = &tb->jmp_next[n];
- tb1 = *ptb;
- if (tb1) {
+ ptb = &tb->jmp_list_next[n];
+ if (*ptb) {
/* find tb(n) in circular list */
for (;;) {
- tb1 = *ptb;
- n1 = (uintptr_t)tb1 & 3;
- tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
+ ntb = *ptb;
+ n1 = ntb & 3;
+ tb1 = (TranslationBlock *)(ntb & ~3);
if (n1 == n && tb1 == tb) {
break;
}
if (n1 == 2) {
- ptb = &tb1->jmp_first;
+ ptb = &tb1->jmp_list_first;
} else {
- ptb = &tb1->jmp_next[n1];
+ ptb = &tb1->jmp_list_next[n1];
}
}
/* now we can suppress tb(n) from the list */
- *ptb = tb->jmp_next[n];
+ *ptb = tb->jmp_list_next[n];
- tb->jmp_next[n] = NULL;
+ tb->jmp_list_next[n] = (uintptr_t)NULL;
}
}
@@ -958,7 +961,29 @@ static inline void tb_jmp_remove(TranslationBlock *tb, int n)
another TB */
static inline void tb_reset_jump(TranslationBlock *tb, int n)
{
- tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
+ uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
+ tb_set_jmp_target(tb, n, addr);
+}
+
+/* remove any jumps to the TB */
+static inline void tb_jmp_unlink(TranslationBlock *tb)
+{
+ TranslationBlock *tb1;
+ uintptr_t *ptb, ntb;
+ unsigned int n1;
+
+ ptb = &tb->jmp_list_first;
+ for (;;) {
+ ntb = *ptb;
+ n1 = ntb & 3;
+ tb1 = (TranslationBlock *)(ntb & ~3);
+ if (n1 == 2) {
+ break;
+ }
+ tb_reset_jump(tb1, n1);
+ *ptb = tb1->jmp_list_next[n1];
+ tb1->jmp_list_next[n1] = (uintptr_t)NULL;
+ }
}
/* invalidate one TB */
@@ -966,9 +991,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{
CPUState *cpu;
PageDesc *p;
- unsigned int h, n1;
+ unsigned int h;
tb_page_addr_t phys_pc;
- TranslationBlock *tb1, *tb2;
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
@@ -987,8 +1011,6 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
invalidate_page_bitmap(p);
}
- tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
-
/* remove the TB from the hash list */
h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
@@ -998,27 +1020,16 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
}
/* suppress this TB from the two jump lists */
- tb_jmp_remove(tb, 0);
- tb_jmp_remove(tb, 1);
+ tb_remove_from_jmp_list(tb, 0);
+ tb_remove_from_jmp_list(tb, 1);
/* suppress any remaining jumps to this TB */
- tb1 = tb->jmp_first;
- for (;;) {
- n1 = (uintptr_t)tb1 & 3;
- if (n1 == 2) {
- break;
- }
- tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
- tb2 = tb1->jmp_next[n1];
- tb_reset_jump(tb1, n1);
- tb1->jmp_next[n1] = NULL;
- tb1 = tb2;
- }
- tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
+ tb_jmp_unlink(tb);
tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
}
+#ifdef CONFIG_SOFTMMU
static void build_page_bitmap(PageDesc *p)
{
int n, tb_start, tb_end;
@@ -1047,11 +1058,100 @@ static void build_page_bitmap(PageDesc *p)
tb = tb->page_next[n];
}
}
+#endif
+
+/* add the tb in the target page and protect it if necessary
+ *
+ * Called with mmap_lock held for user-mode emulation.
+ */
+static inline void tb_alloc_page(TranslationBlock *tb,
+ unsigned int n, tb_page_addr_t page_addr)
+{
+ PageDesc *p;
+#ifndef CONFIG_USER_ONLY
+ bool page_already_protected;
+#endif
+
+ tb->page_addr[n] = page_addr;
+ p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
+ tb->page_next[n] = p->first_tb;
+#ifndef CONFIG_USER_ONLY
+ page_already_protected = p->first_tb != NULL;
+#endif
+ p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
+ invalidate_page_bitmap(p);
+
+#if defined(CONFIG_USER_ONLY)
+ if (p->flags & PAGE_WRITE) {
+ target_ulong addr;
+ PageDesc *p2;
+ int prot;
+
+ /* force the host page as non writable (writes will have a
+ page fault + mprotect overhead) */
+ page_addr &= qemu_host_page_mask;
+ prot = 0;
+ for (addr = page_addr; addr < page_addr + qemu_host_page_size;
+ addr += TARGET_PAGE_SIZE) {
+
+ p2 = page_find(addr >> TARGET_PAGE_BITS);
+ if (!p2) {
+ continue;
+ }
+ prot |= p2->flags;
+ p2->flags &= ~PAGE_WRITE;
+ }
+ mprotect(g2h(page_addr), qemu_host_page_size,
+ (prot & PAGE_BITS) & ~PAGE_WRITE);
+#ifdef DEBUG_TB_INVALIDATE
+ printf("protecting code page: 0x" TARGET_FMT_lx "\n",
+ page_addr);
+#endif
+ }
+#else
+ /* if some code is already present, then the pages are already
+ protected. So we handle the case where only the first TB is
+ allocated in a physical page */
+ if (!page_already_protected) {
+ tlb_protect_code(page_addr);
+ }
+#endif
+}
+
+/* add a new TB and link it to the physical page tables. phys_page2 is
+ * (-1) to indicate that only one page contains the TB.
+ *
+ * Called with mmap_lock held for user-mode emulation.
+ */
+static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
+ tb_page_addr_t phys_page2)
+{
+ unsigned int h;
+ TranslationBlock **ptb;
+
+ /* add in the physical hash table */
+ h = tb_phys_hash_func(phys_pc);
+ ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
+ tb->phys_hash_next = *ptb;
+ *ptb = tb;
+
+ /* add in the page list */
+ tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
+ if (phys_page2 != -1) {
+ tb_alloc_page(tb, 1, phys_page2);
+ } else {
+ tb->page_addr[1] = -1;
+ }
+
+#ifdef DEBUG_TB_CHECK
+ tb_page_check();
+#endif
+}
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu,
target_ulong pc, target_ulong cs_base,
- int flags, int cflags)
+ uint32_t flags, int cflags)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb;
@@ -1076,8 +1176,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
/* cannot fail at this point */
tb = tb_alloc(pc);
assert(tb != NULL);
- /* Don't forget to invalidate previous TB info. */
- tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
}
gen_code_buf = tcg_ctx.code_gen_ptr;
@@ -1099,15 +1197,15 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
trace_translate_block(tb, tb->pc, tb->tc_ptr);
/* generate machine code */
- tb->tb_next_offset[0] = 0xffff;
- tb->tb_next_offset[1] = 0xffff;
- tcg_ctx.tb_next_offset = tb->tb_next_offset;
+ tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
+ tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
+ tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
#ifdef USE_DIRECT_JUMP
- tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset;
- tcg_ctx.tb_next = NULL;
+ tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
+ tcg_ctx.tb_jmp_target_addr = NULL;
#else
- tcg_ctx.tb_jmp_offset = NULL;
- tcg_ctx.tb_next = tb->tb_next;
+ tcg_ctx.tb_jmp_insn_offset = NULL;
+ tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
#endif
#ifdef CONFIG_PROFILER
@@ -1151,12 +1249,31 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
CODE_GEN_ALIGN);
+ /* init jump list */
+ assert(((uintptr_t)tb & 3) == 0);
+ tb->jmp_list_first = (uintptr_t)tb | 2;
+ tb->jmp_list_next[0] = (uintptr_t)NULL;
+ tb->jmp_list_next[1] = (uintptr_t)NULL;
+
+ /* init original jump addresses wich has been set during tcg_gen_code() */
+ if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
+ tb_reset_jump(tb, 0);
+ }
+ if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
+ tb_reset_jump(tb, 1);
+ }
+
/* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_page_addr_code(env, virt_page2);
}
+ /* As long as consistency of the TB stuff is provided by tb_lock in user
+ * mode and is implicit in single-threaded softmmu emulation, no explicit
+ * memory barrier is required before tb_link_page() makes the TB visible
+ * through the physical hash table and physical page list.
+ */
tb_link_page(tb, phys_pc, phys_page2);
return tb;
}
@@ -1191,9 +1308,9 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access)
{
- TranslationBlock *tb, *tb_next, *saved_tb;
- CPUState *cpu = current_cpu;
+ TranslationBlock *tb, *tb_next;
#if defined(TARGET_HAS_PRECISE_SMC)
+ CPUState *cpu = current_cpu;
CPUArchState *env = NULL;
#endif
tb_page_addr_t tb_start, tb_end;
@@ -1205,7 +1322,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int current_tb_modified = 0;
target_ulong current_pc = 0;
target_ulong current_cs_base = 0;
- int current_flags = 0;
+ uint32_t current_flags = 0;
#endif /* TARGET_HAS_PRECISE_SMC */
p = page_find(start >> TARGET_PAGE_BITS);
@@ -1260,20 +1377,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
&current_flags);
}
#endif /* TARGET_HAS_PRECISE_SMC */
- /* we need to do that to handle the case where a signal
- occurs while doing tb_phys_invalidate() */
- saved_tb = NULL;
- if (cpu != NULL) {
- saved_tb = cpu->current_tb;
- cpu->current_tb = NULL;
- }
tb_phys_invalidate(tb, -1);
- if (cpu != NULL) {
- cpu->current_tb = saved_tb;
- if (cpu->interrupt_request && cpu->current_tb) {
- cpu_interrupt(cpu, cpu->interrupt_request);
- }
- }
}
tb = tb_next;
}
@@ -1289,13 +1393,13 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
- cpu->current_tb = NULL;
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
cpu_resume_from_signal(cpu, NULL);
}
#endif
}
+#ifdef CONFIG_SOFTMMU
/* len must be <= 8 and start must be a multiple of len */
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
{
@@ -1333,8 +1437,7 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
tb_invalidate_phys_page_range(start, start + len, 1);
}
}
-
-#if !defined(CONFIG_SOFTMMU)
+#else
/* Called with mmap_lock held. */
static void tb_invalidate_phys_page(tb_page_addr_t addr,
uintptr_t pc, void *puc,
@@ -1350,7 +1453,7 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
int current_tb_modified = 0;
target_ulong current_pc = 0;
target_ulong current_cs_base = 0;
- int current_flags = 0;
+ uint32_t current_flags = 0;
#endif
addr &= TARGET_PAGE_MASK;
@@ -1394,7 +1497,6 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
- cpu->current_tb = NULL;
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
if (locked) {
mmap_unlock();
@@ -1405,106 +1507,6 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
}
#endif
-/* add the tb in the target page and protect it if necessary
- *
- * Called with mmap_lock held for user-mode emulation.
- */
-static inline void tb_alloc_page(TranslationBlock *tb,
- unsigned int n, tb_page_addr_t page_addr)
-{
- PageDesc *p;
-#ifndef CONFIG_USER_ONLY
- bool page_already_protected;
-#endif
-
- tb->page_addr[n] = page_addr;
- p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
- tb->page_next[n] = p->first_tb;
-#ifndef CONFIG_USER_ONLY
- page_already_protected = p->first_tb != NULL;
-#endif
- p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
- invalidate_page_bitmap(p);
-
-#if defined(CONFIG_USER_ONLY)
- if (p->flags & PAGE_WRITE) {
- target_ulong addr;
- PageDesc *p2;
- int prot;
-
- /* force the host page as non writable (writes will have a
- page fault + mprotect overhead) */
- page_addr &= qemu_host_page_mask;
- prot = 0;
- for (addr = page_addr; addr < page_addr + qemu_host_page_size;
- addr += TARGET_PAGE_SIZE) {
-
- p2 = page_find(addr >> TARGET_PAGE_BITS);
- if (!p2) {
- continue;
- }
- prot |= p2->flags;
- p2->flags &= ~PAGE_WRITE;
- }
- mprotect(g2h(page_addr), qemu_host_page_size,
- (prot & PAGE_BITS) & ~PAGE_WRITE);
-#ifdef DEBUG_TB_INVALIDATE
- printf("protecting code page: 0x" TARGET_FMT_lx "\n",
- page_addr);
-#endif
- }
-#else
- /* if some code is already present, then the pages are already
- protected. So we handle the case where only the first TB is
- allocated in a physical page */
- if (!page_already_protected) {
- tlb_protect_code(page_addr);
- }
-#endif
-}
-
-/* add a new TB and link it to the physical page tables. phys_page2 is
- * (-1) to indicate that only one page contains the TB.
- *
- * Called with mmap_lock held for user-mode emulation.
- */
-static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
- tb_page_addr_t phys_page2)
-{
- unsigned int h;
- TranslationBlock **ptb;
-
- /* add in the physical hash table */
- h = tb_phys_hash_func(phys_pc);
- ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
- tb->phys_hash_next = *ptb;
- *ptb = tb;
-
- /* add in the page list */
- tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
- if (phys_page2 != -1) {
- tb_alloc_page(tb, 1, phys_page2);
- } else {
- tb->page_addr[1] = -1;
- }
-
- tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
- tb->jmp_next[0] = NULL;
- tb->jmp_next[1] = NULL;
-
- /* init original jump addresses */
- if (tb->tb_next_offset[0] != 0xffff) {
- tb_reset_jump(tb, 0);
- }
- if (tb->tb_next_offset[1] != 0xffff) {
- tb_reset_jump(tb, 1);
- }
-
-#ifdef DEBUG_TB_CHECK
- tb_page_check();
-#endif
-}
-
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
tb[1].tc_ptr. Return NULL if not found */
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
@@ -1574,7 +1576,7 @@ void tb_check_watchpoint(CPUState *cpu)
CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base;
tb_page_addr_t addr;
- int flags;
+ uint32_t flags;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
addr = get_page_addr_code(env, pc);
@@ -1593,7 +1595,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
TranslationBlock *tb;
uint32_t n, cflags;
target_ulong pc, cs_base;
- uint64_t flags;
+ uint32_t flags;
tb = tb_find_pc(retaddr);
if (!tb) {
@@ -1689,9 +1691,9 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
if (tb->page_addr[1] != -1) {
cross_page++;
}
- if (tb->tb_next_offset[0] != 0xffff) {
+ if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
direct_jmp_count++;
- if (tb->tb_next_offset[1] != 0xffff) {
+ if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
direct_jmp2_count++;
}
}
diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c
index 0d536911c9..2a2c5243a1 100644
--- a/util/qemu-sockets.c
+++ b/util/qemu-sockets.c
@@ -1145,7 +1145,7 @@ void qapi_copy_SocketAddress(SocketAddress **p_dest,
return;
}
- qiv = qmp_input_visitor_new(obj);
+ qiv = qmp_input_visitor_new(obj, true);
iv = qmp_input_get_visitor(qiv);
visit_type_SocketAddress(iv, NULL, p_dest, &error_abort);
qmp_input_visitor_cleanup(qiv);