diff options
author | Ingo Molnar | 2017-08-11 13:51:59 +0200 |
---|---|---|
committer | Ingo Molnar | 2017-08-11 13:51:59 +0200 |
commit | 040cca3ab2f6f8b8d26e0e4965abea2b9aa14818 (patch) | |
tree | 25709ba52ee06fccf4bfbfbf2897bb8cf86da828 /include/linux/mm_types.h | |
parent | locking/lockdep: Add 'crossrelease' feature documentation (diff) | |
parent | Merge tag 'drm-fixes-for-v4.13-rc5' of git://people.freedesktop.org/~airlied/... (diff) | |
download | kernel-qcow2-linux-040cca3ab2f6f8b8d26e0e4965abea2b9aa14818.tar.gz kernel-qcow2-linux-040cca3ab2f6f8b8d26e0e4965abea2b9aa14818.tar.xz kernel-qcow2-linux-040cca3ab2f6f8b8d26e0e4965abea2b9aa14818.zip |
Merge branch 'linus' into locking/core, to resolve conflicts
Conflicts:
include/linux/mm_types.h
mm/huge_memory.c
I removed the smp_mb__before_spinlock() like the following commit does:
8b1b436dd1cc ("mm, locking: Rework {set,clear,mm}_tlb_flush_pending()")
and fixed up the affected commits.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/mm_types.h')
-rw-r--r-- | include/linux/mm_types.h | 66 |
1 files changed, 40 insertions, 26 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 36ea3cf7d85e..dc1edec05a3f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -487,14 +487,12 @@ struct mm_struct { /* numa_scan_seq prevents two threads setting pte_numa */ int numa_scan_seq; #endif -#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) /* * An operation with batched TLB flushing is going on. Anything that * can move process memory needs to flush the TLB when moving a * PROT_NONE or PROT_NUMA mapped page. */ - bool tlb_flush_pending; -#endif + atomic_t tlb_flush_pending; #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH /* See flush_tlb_batched_pending() */ bool tlb_flush_batched; @@ -522,12 +520,18 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) return mm->cpu_vm_mask_var; } -#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) +struct mmu_gather; +extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void tlb_finish_mmu(struct mmu_gather *tlb, + unsigned long start, unsigned long end); + /* * Memory barriers to keep this state in sync are graciously provided by * the page table locks, outside of which no page table modifications happen. - * The barriers below prevent the compiler from re-ordering the instructions - * around the memory barriers that are already present in the code. + * The barriers are used to ensure the order between tlb_flush_pending updates, + * which happen while the lock is not taken, and the PTE updates, which happen + * while the lock is taken, are serialized. */ static inline bool mm_tlb_flush_pending(struct mm_struct *mm) { @@ -535,11 +539,26 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm) * Must be called with PTL held; such that our PTL acquire will have * observed the store from set_tlb_flush_pending(). */ - return mm->tlb_flush_pending; + return atomic_read(&mm->tlb_flush_pending) > 0; +} + +/* + * Returns true if there are two above TLB batching threads in parallel. + */ +static inline bool mm_tlb_flush_nested(struct mm_struct *mm) +{ + return atomic_read(&mm->tlb_flush_pending) > 1; +} + +static inline void init_tlb_flush_pending(struct mm_struct *mm) +{ + atomic_set(&mm->tlb_flush_pending, 0); } -static inline void set_tlb_flush_pending(struct mm_struct *mm) + +static inline void inc_tlb_flush_pending(struct mm_struct *mm) { - mm->tlb_flush_pending = true; + atomic_inc(&mm->tlb_flush_pending); + /* * The only time this value is relevant is when there are indeed pages * to flush. And we'll only flush pages after changing them, which @@ -547,7 +566,7 @@ static inline void set_tlb_flush_pending(struct mm_struct *mm) * * So the ordering here is: * - * mm->tlb_flush_pending = true; + * atomic_inc(&mm->tlb_flush_pending); * spin_lock(&ptl); * ... * set_pte_at(); @@ -559,30 +578,25 @@ static inline void set_tlb_flush_pending(struct mm_struct *mm) * spin_unlock(&ptl); * * flush_tlb_range(); - * mm->tlb_flush_pending = false; + * atomic_dec(&mm->tlb_flush_pending); * * So the =true store is constrained by the PTL unlock, and the =false * store is constrained by the TLB invalidate. */ } + /* Clearing is done after a TLB flush, which also provides a barrier. */ -static inline void clear_tlb_flush_pending(struct mm_struct *mm) -{ - /* see set_tlb_flush_pending */ - mm->tlb_flush_pending = false; -} -#else -static inline bool mm_tlb_flush_pending(struct mm_struct *mm) -{ - return false; -} -static inline void set_tlb_flush_pending(struct mm_struct *mm) -{ -} -static inline void clear_tlb_flush_pending(struct mm_struct *mm) +static inline void dec_tlb_flush_pending(struct mm_struct *mm) { + /* + * Guarantee that the tlb_flush_pending does not not leak into the + * critical section, since we must order the PTE change and changes to + * the pending TLB flush indication. We could have relied on TLB flush + * as a memory barrier, but this behavior is not clearly documented. + */ + smp_mb__before_atomic(); + atomic_dec(&mm->tlb_flush_pending); } -#endif struct vm_fault; |