summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/memcontrol.h2
-rw-r--r--include/linux/mm_inline.h5
-rw-r--r--mm/memcontrol.c5
-rw-r--r--mm/vmscan.c40
4 files changed, 37 insertions, 15 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index b759827b2f1e..5147e650287a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -430,7 +430,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
- enum zone_type zid, int nr_pages);
+ int nr_pages);
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index dd22b08c47be..bcc4ed07fa90 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -52,10 +52,9 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
int nr_pages)
{
-#ifdef CONFIG_MEMCG
- mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
-#else
__update_lru_size(lruvec, lru, zid, nr_pages);
+#ifdef CONFIG_MEMCG
+ mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
#endif
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9cbd40ebccd1..13be30c3ea78 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -965,7 +965,6 @@ out:
* mem_cgroup_update_lru_size - account for adding or removing an lru page
* @lruvec: mem_cgroup per zone lru vector
* @lru: index of lru list the page is sitting on
- * @zid: Zone ID of the zone pages have been added to
* @nr_pages: positive when adding or negative when removing
*
* This function must be called under lru_lock, just before a page is added
@@ -973,15 +972,13 @@ out:
* so as to allow it to check that lru_size 0 is consistent with list_empty).
*/
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
- enum zone_type zid, int nr_pages)
+ int nr_pages)
{
struct mem_cgroup_per_node *mz;
unsigned long *lru_size;
long size;
bool empty;
- __update_lru_size(lruvec, lru, zid, nr_pages);
-
if (mem_cgroup_disabled())
return;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5625eccc0140..b3f5b359280d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1350,6 +1350,38 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
return ret;
}
+
+/*
+ * Update LRU sizes after isolating pages. The LRU size updates must
+ * be complete before mem_cgroup_update_lru_size due to a santity check.
+ */
+static __always_inline void update_lru_sizes(struct lruvec *lruvec,
+ enum lru_list lru, unsigned long *nr_zone_taken,
+ unsigned long nr_taken)
+{
+#ifdef CONFIG_HIGHMEM
+ int zid;
+
+ /*
+ * Highmem has separate accounting for highmem pages so each zone
+ * is updated separately.
+ */
+ for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+ if (!nr_zone_taken[zid])
+ continue;
+
+ __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
+ }
+#else
+ /* Zone ID does not matter on !HIGHMEM */
+ __update_lru_size(lruvec, lru, 0, -nr_taken);
+#endif
+
+#ifdef CONFIG_MEMCG
+ mem_cgroup_update_lru_size(lruvec, lru, -nr_taken);
+#endif
+}
+
/*
* zone_lru_lock is heavily contended. Some of the functions that
* shrink the lists perform better by taking out a batch of pages
@@ -1436,13 +1468,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
*nr_scanned = scan;
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
nr_taken, mode, is_file_lru(lru));
- for (scan = 0; scan < MAX_NR_ZONES; scan++) {
- nr_pages = nr_zone_taken[scan];
- if (!nr_pages)
- continue;
-
- update_lru_size(lruvec, lru, scan, -nr_pages);
- }
+ update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken);
return nr_taken;
}