diff options
author | Eric Auger | 2021-03-09 11:27:37 +0100 |
---|---|---|
committer | Peter Maydell | 2021-03-12 13:40:10 +0100 |
commit | f14fb6c2db961c3665a61b342ab329b7bd20d1e7 (patch) | |
tree | 57f311ed8f4d445ad5c1474751e76206e25bf972 | |
parent | intel_iommu: Fix mask may be uninitialized in vtd_context_device_invalidate (diff) | |
download | qemu-f14fb6c2db961c3665a61b342ab329b7bd20d1e7.tar.gz qemu-f14fb6c2db961c3665a61b342ab329b7bd20d1e7.tar.xz qemu-f14fb6c2db961c3665a61b342ab329b7bd20d1e7.zip |
dma: Introduce dma_aligned_pow2_mask()
Currently get_naturally_aligned_size() is used by the intel iommu
to compute the maximum invalidation range based on @size which is
a power of 2 while being aligned with the @start address and less
than the maximum range defined by @gaw.
This helper is also useful for other iommu devices (virtio-iommu,
SMMUv3) to make sure IOMMU UNMAP notifiers only are called with
power of 2 range sizes.
Let's move this latter into dma-helpers.c and rename it into
dma_aligned_pow2_mask(). Also rewrite the helper so that it
accomodates UINT64_MAX values for the size mask and max mask.
It now returns a mask instead of a size. Change the caller.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Message-id: 20210309102742.30442-3-eric.auger@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r-- | hw/i386/intel_iommu.c | 30 | ||||
-rw-r--r-- | include/sysemu/dma.h | 12 | ||||
-rw-r--r-- | softmmu/dma-helpers.c | 26 |
3 files changed, 45 insertions, 23 deletions
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 3206f379f8..6be8f32918 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -35,6 +35,7 @@ #include "hw/i386/x86-iommu.h" #include "hw/pci-host/q35.h" #include "sysemu/kvm.h" +#include "sysemu/dma.h" #include "sysemu/sysemu.h" #include "hw/i386/apic_internal.h" #include "kvm/kvm_i386.h" @@ -3455,24 +3456,6 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) return vtd_dev_as; } -static uint64_t get_naturally_aligned_size(uint64_t start, - uint64_t size, int gaw) -{ - uint64_t max_mask = 1ULL << gaw; - uint64_t alignment = start ? start & -start : max_mask; - - alignment = MIN(alignment, max_mask); - size = MIN(size, max_mask); - - if (alignment <= size) { - /* Increase the alignment of start */ - return alignment; - } else { - /* Find the largest page mask from size */ - return 1ULL << (63 - clz64(size)); - } -} - /* Unmap the whole range in the notifier's scope. */ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) { @@ -3501,13 +3484,14 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) while (remain >= VTD_PAGE_SIZE) { IOMMUTLBEvent event; - uint64_t mask = get_naturally_aligned_size(start, remain, s->aw_bits); + uint64_t mask = dma_aligned_pow2_mask(start, end, s->aw_bits); + uint64_t size = mask + 1; - assert(mask); + assert(size); event.type = IOMMU_NOTIFIER_UNMAP; event.entry.iova = start; - event.entry.addr_mask = mask - 1; + event.entry.addr_mask = mask; event.entry.target_as = &address_space_memory; event.entry.perm = IOMMU_NONE; /* This field is meaningless for unmap */ @@ -3515,8 +3499,8 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) memory_region_notify_iommu_one(n, &event); - start += mask; - remain -= mask; + start += size; + remain -= size; } assert(!remain); diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h index a052f7bca3..3201e7901d 100644 --- a/include/sysemu/dma.h +++ b/include/sysemu/dma.h @@ -296,4 +296,16 @@ uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg); void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie, QEMUSGList *sg, enum BlockAcctType type); +/** + * dma_aligned_pow2_mask: Return the address bit mask of the largest + * power of 2 size less or equal than @end - @start + 1, aligned with @start, + * and bounded by 1 << @max_addr_bits bits. + * + * @start: range start address + * @end: range end address (greater than @start) + * @max_addr_bits: max address bits (<= 64) + */ +uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end, + int max_addr_bits); + #endif diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c index 29001b5459..7d766a5e89 100644 --- a/softmmu/dma-helpers.c +++ b/softmmu/dma-helpers.c @@ -330,3 +330,29 @@ void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie, { block_acct_start(blk_get_stats(blk), cookie, sg->size, type); } + +uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end, int max_addr_bits) +{ + uint64_t max_mask = UINT64_MAX, addr_mask = end - start; + uint64_t alignment_mask, size_mask; + + if (max_addr_bits != 64) { + max_mask = (1ULL << max_addr_bits) - 1; + } + + alignment_mask = start ? (start & -start) - 1 : max_mask; + alignment_mask = MIN(alignment_mask, max_mask); + size_mask = MIN(addr_mask, max_mask); + + if (alignment_mask <= size_mask) { + /* Increase the alignment of start */ + return alignment_mask; + } else { + /* Find the largest page mask from size */ + if (addr_mask == UINT64_MAX) { + return UINT64_MAX; + } + return (1ULL << (63 - clz64(addr_mask + 1))) - 1; + } +} + |