summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorEric Auger2020-10-29 10:33:03 +0100
committerStefan Hajnoczi2020-11-03 20:06:22 +0100
commit9e13d598843cca1cdab7b7bdcb9cc0868ebf7fed (patch)
treec7e89ec4f3b1dd522426b05a34fb081cd1285f52 /block
parentblock/nvme: Change size and alignment of prp_list_pages (diff)
downloadqemu-9e13d598843cca1cdab7b7bdcb9cc0868ebf7fed.tar.gz
qemu-9e13d598843cca1cdab7b7bdcb9cc0868ebf7fed.tar.xz
qemu-9e13d598843cca1cdab7b7bdcb9cc0868ebf7fed.zip
block/nvme: Align iov's va and size on host page size
Make sure iov's va and size are properly aligned on the host page size. Signed-off-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Eric Auger <eric.auger@redhat.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-id: 20201029093306.1063879-23-philmd@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Eric Auger <eric.auger@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/nvme.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/block/nvme.c b/block/nvme.c
index e807dd56df..f1e2fd34cd 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -1015,11 +1015,12 @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
for (i = 0; i < qiov->niov; ++i) {
bool retry = true;
uint64_t iova;
+ size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
+ qemu_real_host_page_size);
try_map:
r = qemu_vfio_dma_map(s->vfio,
qiov->iov[i].iov_base,
- qiov->iov[i].iov_len,
- true, &iova);
+ len, true, &iova);
if (r == -ENOMEM && retry) {
retry = false;
trace_nvme_dma_flush_queue_wait(s);
@@ -1163,8 +1164,9 @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs,
BDRVNVMeState *s = bs->opaque;
for (i = 0; i < qiov->niov; ++i) {
- if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, s->page_size) ||
- !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, s->page_size)) {
+ if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base,
+ qemu_real_host_page_size) ||
+ !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) {
trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
qiov->iov[i].iov_len, s->page_size);
return false;
@@ -1180,7 +1182,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
int r;
uint8_t *buf = NULL;
QEMUIOVector local_qiov;
-
+ size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size);
assert(QEMU_IS_ALIGNED(offset, s->page_size));
assert(QEMU_IS_ALIGNED(bytes, s->page_size));
assert(bytes <= s->max_transfer);
@@ -1190,7 +1192,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
}
s->stats.unaligned_accesses++;
trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
- buf = qemu_try_memalign(s->page_size, bytes);
+ buf = qemu_try_memalign(qemu_real_host_page_size, len);
if (!buf) {
return -ENOMEM;