summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJohannes Weiner2016-03-15 22:57:25 +0100
committerLinus Torvalds2016-03-16 00:55:16 +0100
commitfdf1cdb91b6ab7a8a91df68c384f36b8a0909cab (patch)
tree828ad38946acad9990dcc733a10ebdeed0c1fbd5 /include
parentmm: simplify lock_page_memcg() (diff)
downloadkernel-qcow2-linux-fdf1cdb91b6ab7a8a91df68c384f36b8a0909cab.tar.gz
kernel-qcow2-linux-fdf1cdb91b6ab7a8a91df68c384f36b8a0909cab.tar.xz
kernel-qcow2-linux-fdf1cdb91b6ab7a8a91df68c384f36b8a0909cab.zip
mm: remove unnecessary uses of lock_page_memcg()
There are several users that nest lock_page_memcg() inside lock_page() to prevent page->mem_cgroup from changing. But the page lock prevents pages from moving between cgroups, so that is unnecessary overhead. Remove lock_page_memcg() in contexts with locked contexts and fix the debug code in the page stat functions to be okay with the page lock. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/memcontrol.h12
1 files changed, 7 insertions, 5 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d560c9a3cadf..f0c4bec6565b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -28,6 +28,7 @@
#include <linux/eventfd.h>
#include <linux/mmzone.h>
#include <linux/writeback.h>
+#include <linux/page-flags.h>
struct mem_cgroup;
struct page;
@@ -464,18 +465,19 @@ void unlock_page_memcg(struct page *page);
* @idx: page state item to account
* @val: number of pages (positive or negative)
*
- * Callers must use lock_page_memcg() to prevent double accounting
- * when the page is concurrently being moved to another memcg:
+ * The @page must be locked or the caller must use lock_page_memcg()
+ * to prevent double accounting when the page is concurrently being
+ * moved to another memcg:
*
- * lock_page_memcg(page);
+ * lock_page(page) or lock_page_memcg(page)
* if (TestClearPageState(page))
* mem_cgroup_update_page_stat(page, state, -1);
- * unlock_page_memcg(page);
+ * unlock_page(page) or unlock_page_memcg(page)
*/
static inline void mem_cgroup_update_page_stat(struct page *page,
enum mem_cgroup_stat_index idx, int val)
{
- VM_BUG_ON(!rcu_read_lock_held());
+ VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
if (page->mem_cgroup)
this_cpu_add(page->mem_cgroup->stat->count[idx], val);