summaryrefslogtreecommitdiffstats
path: root/hw/block/nvme.c
diff options
context:
space:
mode:
authorKlaus Jensen2020-12-18 00:32:57 +0100
committerKlaus Jensen2021-02-08 21:15:54 +0100
commit38001f73403808dde35c523695ee895587bcc6ba (patch)
treea445adc5281788f8d6d45bcc27007039e807829c /hw/block/nvme.c
parenthw/block/nvme: bump to v1.4 (diff)
downloadqemu-38001f73403808dde35c523695ee895587bcc6ba.tar.gz
qemu-38001f73403808dde35c523695ee895587bcc6ba.tar.xz
qemu-38001f73403808dde35c523695ee895587bcc6ba.zip
hw/block/nvme: lift cmb restrictions
The controller now implements v1.4 and we can lift the restrictions on CMB Data Pointer and Command Independent Locations Support (CDPCILS) and CMB Data Pointer Mixed Locations Support (CDPMLS) since the device really does not care about mixed host/cmb pointers in those cases. Reviewed-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com> Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Diffstat (limited to 'hw/block/nvme.c')
-rw-r--r--hw/block/nvme.c33
1 files changed, 2 insertions, 31 deletions
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index c4c968f595..40784bd908 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -509,7 +509,6 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
trans_len = MIN(len, trans_len);
int num_prps = (len >> n->page_bits) + 1;
uint16_t status;
- bool prp_list_in_cmb = false;
int ret;
QEMUSGList *qsg = &req->qsg;
@@ -535,10 +534,6 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
uint32_t nents, prp_trans;
int i = 0;
- if (nvme_addr_is_cmb(n, prp2)) {
- prp_list_in_cmb = true;
- }
-
nents = (len + n->page_size - 1) >> n->page_bits;
prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
@@ -555,10 +550,6 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
return NVME_INVALID_PRP_OFFSET | NVME_DNR;
}
- if (prp_list_in_cmb != nvme_addr_is_cmb(n, prp_ent)) {
- return NVME_INVALID_USE_OF_CMB | NVME_DNR;
- }
-
i = 0;
nents = (len + n->page_size - 1) >> n->page_bits;
prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
@@ -692,7 +683,6 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
uint64_t nsgld;
uint32_t seg_len;
uint16_t status;
- bool sgl_in_cmb = false;
hwaddr addr;
int ret;
@@ -714,18 +704,6 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
goto out;
}
- /*
- * If the segment is located in the CMB, the submission queue of the
- * request must also reside there.
- */
- if (nvme_addr_is_cmb(n, addr)) {
- if (!nvme_addr_is_cmb(n, req->sq->dma_addr)) {
- return NVME_INVALID_USE_OF_CMB | NVME_DNR;
- }
-
- sgl_in_cmb = true;
- }
-
for (;;) {
switch (NVME_SGL_TYPE(sgld->type)) {
case NVME_SGL_DESCR_TYPE_SEGMENT:
@@ -814,15 +792,6 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
if (status) {
goto unmap;
}
-
- /*
- * If the next segment is in the CMB, make sure that the sgl was
- * already located there.
- */
- if (sgl_in_cmb != nvme_addr_is_cmb(n, addr)) {
- status = NVME_INVALID_USE_OF_CMB | NVME_DNR;
- goto unmap;
- }
}
out:
@@ -3777,6 +3746,8 @@ static int nvme_start_ctrl(NvmeCtrl *n)
static void nvme_cmb_enable_regs(NvmeCtrl *n)
{
+ NVME_CMBLOC_SET_CDPCILS(n->bar.cmbloc, 1);
+ NVME_CMBLOC_SET_CDPMLS(n->bar.cmbloc, 1);
NVME_CMBLOC_SET_BIR(n->bar.cmbloc, NVME_CMB_BIR);
NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);