summaryrefslogtreecommitdiffstats
path: root/hw/arm/smmuv3.c
diff options
context:
space:
mode:
authorEric Auger2021-03-09 11:27:40 +0100
committerPeter Maydell2021-03-12 13:40:10 +0100
commit6d9cd115b9dfee08faef0f64c3b90ac5c79ededc (patch)
tree4584fc3f659b31a57a6e499d90baeb4e74b1fb6a /hw/arm/smmuv3.c
parenthw/arm/smmu-common: Fix smmu_iotlb_inv_iova when asid is not set (diff)
downloadqemu-6d9cd115b9dfee08faef0f64c3b90ac5c79ededc.tar.gz
qemu-6d9cd115b9dfee08faef0f64c3b90ac5c79ededc.tar.xz
qemu-6d9cd115b9dfee08faef0f64c3b90ac5c79ededc.zip
hw/arm/smmuv3: Enforce invalidation on a power of two range
As of today, the driver can invalidate a number of pages that is not a power of 2. However IOTLB unmap notifications and internal IOTLB invalidations work with masks leading to erroneous invalidations. In case the range is not a power of 2, split invalidations into power of 2 invalidations. When looking for a single page entry in the vSMMU internal IOTLB, let's make sure that if the entry is not found using a g_hash_table_remove() we iterate over all the entries to find a potential range that overlaps it. Signed-off-by: Eric Auger <eric.auger@redhat.com> Message-id: 20210309102742.30442-6-eric.auger@redhat.com Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw/arm/smmuv3.c')
-rw-r--r--hw/arm/smmuv3.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index bd1f97000d..fdd6332ce5 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -861,7 +861,8 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
uint16_t vmid = CMD_VMID(cmd);
bool leaf = CMD_LEAF(cmd);
uint8_t tg = CMD_TG(cmd);
- hwaddr num_pages = 1;
+ uint64_t first_page = 0, last_page;
+ uint64_t num_pages = 1;
int asid = -1;
if (tg) {
@@ -874,9 +875,24 @@ static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
if (type == SMMU_CMD_TLBI_NH_VA) {
asid = CMD_ASID(cmd);
}
- trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
- smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
- smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
+
+ /* Split invalidations into ^2 range invalidations */
+ last_page = num_pages - 1;
+ while (num_pages) {
+ uint8_t granule = tg * 2 + 10;
+ uint64_t mask, count;
+
+ mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
+ count = mask + 1;
+
+ trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
+ smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
+ smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
+
+ num_pages -= count;
+ first_page += count;
+ addr += count * BIT_ULL(granule);
+ }
}
static int smmuv3_cmdq_consume(SMMUv3State *s)