diff options
author | Will Deacon | 2018-08-23 22:16:50 +0200 |
---|---|---|
committer | Catalin Marinas | 2018-09-11 17:49:12 +0200 |
commit | ace8cb754539077ed75f3f15b77b2b51b5b7a431 (patch) | |
tree | bab41795267bc907a837e48799405ee489672604 /arch/arm64/include/asm/tlbflush.h | |
parent | arm64: tlb: Adjust stride and type of TLBI according to mmu_gather (diff) | |
download | kernel-qcow2-linux-ace8cb754539077ed75f3f15b77b2b51b5b7a431.tar.gz kernel-qcow2-linux-ace8cb754539077ed75f3f15b77b2b51b5b7a431.tar.xz kernel-qcow2-linux-ace8cb754539077ed75f3f15b77b2b51b5b7a431.zip |
arm64: tlb: Avoid synchronous TLBIs when freeing page tables
By selecting HAVE_RCU_TABLE_INVALIDATE, we can rely on tlb_flush() being
called if we fail to batch table pages for freeing. This in turn allows
us to postpone walk-cache invalidation until tlb_finish_mmu(), which
avoids lots of unnecessary DSBs and means we can shoot down the ASID if
the range is large enough.
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/tlbflush.h')
-rw-r--r-- | arch/arm64/include/asm/tlbflush.h | 11 |
1 files changed, 0 insertions, 11 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 37ccdb246b20..c98ed8871030 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -215,17 +215,6 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end * Used to invalidate the TLB (walk caches) corresponding to intermediate page * table levels (pgd/pud/pmd). */ -static inline void __flush_tlb_pgtable(struct mm_struct *mm, - unsigned long uaddr) -{ - unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm)); - - dsb(ishst); - __tlbi(vae1is, addr); - __tlbi_user(vae1is, addr); - dsb(ish); -} - static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) { unsigned long addr = __TLBI_VADDR(kaddr, 0); |