summaryrefslogtreecommitdiffstats
path: root/hw/block
diff options
context:
space:
mode:
authorKeith Busch2020-09-30 21:22:27 +0200
committerKlaus Jensen2020-10-27 11:29:20 +0100
commit8c125590dfa33699a267c797a41939c1ac8b77bf (patch)
tree8383817d3c0b9b8ae545ac218839d0d5b42ab544 /hw/block
parenthw/block/nvme: update nsid when registered (diff)
downloadqemu-8c125590dfa33699a267c797a41939c1ac8b77bf.tar.gz
qemu-8c125590dfa33699a267c797a41939c1ac8b77bf.tar.xz
qemu-8c125590dfa33699a267c797a41939c1ac8b77bf.zip
hw/block/nvme: remove pointless rw indirection
The code switches on the opcode to invoke a function specific to that opcode. There's no point in consolidating back to a common function that just switches on that same opcode without any actual common code. Restore the opcode specific behavior without going back through another level of switches. Signed-off-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Diffstat (limited to 'hw/block')
-rw-r--r--hw/block/nvme.c91
-rw-r--r--hw/block/trace-events1
2 files changed, 29 insertions, 63 deletions
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 2225b944f9..a168f0bf4a 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -927,68 +927,12 @@ static void nvme_rw_cb(void *opaque, int ret)
nvme_enqueue_req_completion(nvme_cq(req), req);
}
-static uint16_t nvme_do_aio(BlockBackend *blk, int64_t offset, size_t len,
- NvmeRequest *req)
-{
- BlockAcctCookie *acct = &req->acct;
- BlockAcctStats *stats = blk_get_stats(blk);
-
- bool is_write = false;
-
- trace_pci_nvme_do_aio(nvme_cid(req), req->cmd.opcode,
- nvme_io_opc_str(req->cmd.opcode), blk_name(blk),
- offset, len);
-
- switch (req->cmd.opcode) {
- case NVME_CMD_FLUSH:
- block_acct_start(stats, acct, 0, BLOCK_ACCT_FLUSH);
- req->aiocb = blk_aio_flush(blk, nvme_rw_cb, req);
- break;
-
- case NVME_CMD_WRITE_ZEROES:
- block_acct_start(stats, acct, len, BLOCK_ACCT_WRITE);
- req->aiocb = blk_aio_pwrite_zeroes(blk, offset, len,
- BDRV_REQ_MAY_UNMAP, nvme_rw_cb,
- req);
- break;
-
- case NVME_CMD_WRITE:
- is_write = true;
-
- /* fallthrough */
-
- case NVME_CMD_READ:
- block_acct_start(stats, acct, len,
- is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
-
- if (req->qsg.sg) {
- if (is_write) {
- req->aiocb = dma_blk_write(blk, &req->qsg, offset,
- BDRV_SECTOR_SIZE, nvme_rw_cb, req);
- } else {
- req->aiocb = dma_blk_read(blk, &req->qsg, offset,
- BDRV_SECTOR_SIZE, nvme_rw_cb, req);
- }
- } else {
- if (is_write) {
- req->aiocb = blk_aio_pwritev(blk, offset, &req->iov, 0,
- nvme_rw_cb, req);
- } else {
- req->aiocb = blk_aio_preadv(blk, offset, &req->iov, 0,
- nvme_rw_cb, req);
- }
- }
-
- break;
- }
-
- return NVME_NO_COMPLETE;
-}
-
static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
{
- NvmeNamespace *ns = req->ns;
- return nvme_do_aio(ns->blkconf.blk, 0, 0, req);
+ block_acct_start(blk_get_stats(req->ns->blkconf.blk), &req->acct, 0,
+ BLOCK_ACCT_FLUSH);
+ req->aiocb = blk_aio_flush(req->ns->blkconf.blk, nvme_rw_cb, req);
+ return NVME_NO_COMPLETE;
}
static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
@@ -1009,7 +953,11 @@ static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
return status;
}
- return nvme_do_aio(ns->blkconf.blk, offset, count, req);
+ block_acct_start(blk_get_stats(req->ns->blkconf.blk), &req->acct, 0,
+ BLOCK_ACCT_WRITE);
+ req->aiocb = blk_aio_pwrite_zeroes(req->ns->blkconf.blk, offset, count,
+ BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req);
+ return NVME_NO_COMPLETE;
}
static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
@@ -1023,6 +971,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
uint64_t data_offset = nvme_l2b(ns, slba);
enum BlockAcctType acct = req->cmd.opcode == NVME_CMD_WRITE ?
BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
+ BlockBackend *blk = ns->blkconf.blk;
uint16_t status;
trace_pci_nvme_rw(nvme_cid(req), nvme_io_opc_str(rw->opcode),
@@ -1045,7 +994,25 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
goto invalid;
}
- return nvme_do_aio(ns->blkconf.blk, data_offset, data_size, req);
+ block_acct_start(blk_get_stats(blk), &req->acct, data_size, acct);
+ if (req->qsg.sg) {
+ if (acct == BLOCK_ACCT_WRITE) {
+ req->aiocb = dma_blk_write(blk, &req->qsg, data_offset,
+ BDRV_SECTOR_SIZE, nvme_rw_cb, req);
+ } else {
+ req->aiocb = dma_blk_read(blk, &req->qsg, data_offset,
+ BDRV_SECTOR_SIZE, nvme_rw_cb, req);
+ }
+ } else {
+ if (acct == BLOCK_ACCT_WRITE) {
+ req->aiocb = blk_aio_pwritev(blk, data_offset, &req->iov, 0,
+ nvme_rw_cb, req);
+ } else {
+ req->aiocb = blk_aio_preadv(blk, data_offset, &req->iov, 0,
+ nvme_rw_cb, req);
+ }
+ }
+ return NVME_NO_COMPLETE;
invalid:
block_acct_invalid(blk_get_stats(ns->blkconf.blk), acct);
diff --git a/hw/block/trace-events b/hw/block/trace-events
index 446cca08e9..e56d688b88 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -43,7 +43,6 @@ pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode, const char *opna
pci_nvme_rw(uint16_t cid, const char *verb, uint32_t nsid, uint32_t nlb, uint64_t count, uint64_t lba) "cid %"PRIu16" opname '%s' nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba 0x%"PRIx64""
pci_nvme_rw_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'"
pci_nvme_write_zeroes(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba %"PRIu64" nlb %"PRIu32""
-pci_nvme_do_aio(uint16_t cid, uint8_t opc, const char *opname, const char *blkname, int64_t offset, size_t len) "cid %"PRIu16" opc 0x%"PRIx8" opname '%s' blk '%s' offset %"PRId64" len %zu"
pci_nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16""
pci_nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d"
pci_nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16""