From 6b4ebc3a9078c5b7b8c4cf495a0b1d2d0e0bfe7a Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 4 Jun 2014 16:06:47 -0700 Subject: mm,vmacache: optimize overflow system-wide flushing For single threaded workloads, we can avoid flushing and iterating through the entire list of tasks, making the whole function a lot faster, requiring only a single atomic read for the mm_users. Signed-off-by: Davidlohr Bueso Suggested-by: Oleg Nesterov Cc: Aswin Chandramouleeswaran Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmacache.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'mm/vmacache.c') diff --git a/mm/vmacache.c b/mm/vmacache.c index 658ed3b3e38d..9f25af825dec 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c @@ -17,6 +17,16 @@ void vmacache_flush_all(struct mm_struct *mm) { struct task_struct *g, *p; + /* + * Single threaded tasks need not iterate the entire + * list of process. We can avoid the flushing as well + * since the mm's seqnum was increased and don't have + * to worry about other threads' seqnum. Current's + * flush will occur upon the next lookup. + */ + if (atomic_read(&mm->mm_users) == 1) + return; + rcu_read_lock(); for_each_process_thread(g, p) { /* -- cgit v1.2.3-55-g7522