summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel2015-12-21 19:07:50 +0100
committerJoerg Roedel2015-12-28 17:18:53 +0100
commit7bfa5bd2708d096c79fc2c9f32de478ade7a013f (patch)
tree3afa9e8640a67ca1370d67a1bff364cad7151561
parentiommu/amd: Allocate new aperture ranges in dma_ops_alloc_addresses (diff)
downloadkernel-qcow2-linux-7bfa5bd2708d096c79fc2c9f32de478ade7a013f.tar.gz
kernel-qcow2-linux-7bfa5bd2708d096c79fc2c9f32de478ade7a013f.tar.xz
kernel-qcow2-linux-7bfa5bd2708d096c79fc2c9f32de478ade7a013f.zip
iommu/amd: Build io page-tables with cmpxchg64
This allows to build up the page-tables without holding any locks. As a consequence it removes the need to pre-populate dma_ops page-tables. Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/amd_iommu.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4c926dadb281..ecdd3f7dfb89 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1206,11 +1206,21 @@ static u64 *alloc_pte(struct protection_domain *domain,
end_lvl = PAGE_SIZE_LEVEL(page_size);
while (level > end_lvl) {
- if (!IOMMU_PTE_PRESENT(*pte)) {
+ u64 __pte, __npte;
+
+ __pte = *pte;
+
+ if (!IOMMU_PTE_PRESENT(__pte)) {
page = (u64 *)get_zeroed_page(gfp);
if (!page)
return NULL;
- *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
+
+ __npte = PM_LEVEL_PDE(level, virt_to_phys(page));
+
+ if (cmpxchg64(pte, __pte, __npte)) {
+ free_page((unsigned long)page);
+ continue;
+ }
}
/* No level skipping support yet */
@@ -1607,7 +1617,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
address = dma_ops_area_alloc(dev, dom, pages,
align_mask, dma_mask);
- if (address == -1 && alloc_new_range(dom, true, GFP_ATOMIC))
+ if (address == -1 && alloc_new_range(dom, false, GFP_ATOMIC))
break;
}