summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
authorCatalin Marinas2015-03-18 12:28:06 +0100
committerWill Deacon2015-07-28 12:44:01 +0200
commit4150e50bf5f2171fbe7dfdbc7f2cdf44676b79a4 (patch)
tree6c016c8cd4a70b019a17d0e58ea01a22c98dd846 /arch/arm64/include/asm
parentarm64: Clean up __flush_tlb(_kernel)_range functions (diff)
downloadkernel-qcow2-linux-4150e50bf5f2171fbe7dfdbc7f2cdf44676b79a4.tar.gz
kernel-qcow2-linux-4150e50bf5f2171fbe7dfdbc7f2cdf44676b79a4.tar.xz
kernel-qcow2-linux-4150e50bf5f2171fbe7dfdbc7f2cdf44676b79a4.zip
arm64: Use last level TLBI for user pte changes
The flush_tlb_page() function is used on user address ranges when PTEs (or PMDs/PUDs for huge pages) were changed (attributes or clearing). For such cases, it is more efficient to invalidate only the last level of the TLB with the "tlbi vale1is" instruction. In the TLB shoot-down case, the TLB caching of the intermediate page table levels (pmd, pud, pgd) is handled by __flush_tlb_pgtable() via the __(pte|pmd|pud)_free_tlb() functions and it is not deferred to tlb_finish_mmu() (as of commit 285994a62c80 - "arm64: Invalidate the TLB corresponding to intermediate page table levels"). The tlb_flush() function only needs to invalidate the TLB for the last level of page tables; the __flush_tlb_range() function gains a fourth argument for last level TLBI. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/tlb.h7
-rw-r--r--arch/arm64/include/asm/tlbflush.h21
2 files changed, 22 insertions, 6 deletions
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 3a0242c7eb8d..d6e6b6660380 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -41,7 +41,12 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm);
} else {
struct vm_area_struct vma = { .vm_mm = tlb->mm, };
- flush_tlb_range(&vma, tlb->start, tlb->end);
+ /*
+ * The intermediate page table levels are already handled by
+ * the __(pte|pmd|pud)_free_tlb() functions, so last level
+ * TLBI is sufficient here.
+ */
+ __flush_tlb_range(&vma, tlb->start, tlb->end, true);
}
}
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index e972bf456558..7bd2da021658 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -87,7 +87,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
((unsigned long)ASID(vma->vm_mm) << 48);
dsb(ishst);
- asm("tlbi vae1is, %0" : : "r" (addr));
+ asm("tlbi vale1is, %0" : : "r" (addr));
dsb(ish);
}
@@ -97,8 +97,9 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
*/
#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ bool last_level)
{
unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
unsigned long addr;
@@ -112,11 +113,21 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
end = asid | (end >> 12);
dsb(ishst);
- for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
- asm("tlbi vae1is, %0" : : "r"(addr));
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
+ if (last_level)
+ asm("tlbi vale1is, %0" : : "r"(addr));
+ else
+ asm("tlbi vae1is, %0" : : "r"(addr));
+ }
dsb(ish);
}
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ __flush_tlb_range(vma, start, end, false);
+}
+
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long addr;