summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman2018-12-28 09:35:44 +0100
committerLinus Torvalds2018-12-28 21:11:48 +0100
commita921444382b49cc7fdeca3fba3e278bc09484a27 (patch)
tree2437250398beef4bc4343ae123716a3d9d7f46ee /mm
parentmm, page_alloc: spread allocations across zones before introducing fragmentation (diff)
downloadkernel-qcow2-linux-a921444382b49cc7fdeca3fba3e278bc09484a27.tar.gz
kernel-qcow2-linux-a921444382b49cc7fdeca3fba3e278bc09484a27.tar.xz
kernel-qcow2-linux-a921444382b49cc7fdeca3fba3e278bc09484a27.zip
mm: move zone watermark accesses behind an accessor
This is a preparation patch only, no functional change. Link: http://lkml.kernel.org/r/20181123114528.28802-3-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Zi Yan <zi.yan@cs.rutgers.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/page_alloc.c12
2 files changed, 7 insertions, 7 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 7c607479de4a..ef29490b0f46 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1431,7 +1431,7 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
if (is_via_compact_memory(order))
return COMPACT_CONTINUE;
- watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+ watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
/*
* If watermarks for high-order allocation are already met, there
* should be no need for compaction at all.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 251b8a0c9c5d..2046e333ea8f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3376,7 +3376,7 @@ retry:
}
}
- mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+ mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
if (!zone_watermark_fast(zone, order, mark,
ac_classzone_idx(ac), alloc_flags)) {
int ret;
@@ -4793,7 +4793,7 @@ long si_mem_available(void)
pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
for_each_zone(zone)
- wmark_low += zone->watermark[WMARK_LOW];
+ wmark_low += low_wmark_pages(zone);
/*
* Estimate the amount of memory available for userspace allocations,
@@ -7431,13 +7431,13 @@ static void __setup_per_zone_wmarks(void)
min_pages = zone_managed_pages(zone) / 1024;
min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
- zone->watermark[WMARK_MIN] = min_pages;
+ zone->_watermark[WMARK_MIN] = min_pages;
} else {
/*
* If it's a lowmem zone, reserve a number of pages
* proportionate to the zone's size.
*/
- zone->watermark[WMARK_MIN] = tmp;
+ zone->_watermark[WMARK_MIN] = tmp;
}
/*
@@ -7449,8 +7449,8 @@ static void __setup_per_zone_wmarks(void)
mult_frac(zone_managed_pages(zone),
watermark_scale_factor, 10000));
- zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
- zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
+ zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
+ zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
spin_unlock_irqrestore(&zone->lock, flags);
}