summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/vmscan.c13
2 files changed, 17 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0f92e04b58db..a34d9fcf1339 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3367,10 +3367,14 @@ static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
{
struct zoneref *z;
struct zone *zone;
+ pg_data_t *last_pgdat = NULL;
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
- ac->high_zoneidx, ac->nodemask)
- wakeup_kswapd(zone, order, ac_classzone_idx(ac));
+ ac->high_zoneidx, ac->nodemask) {
+ if (last_pgdat != zone->zone_pgdat)
+ wakeup_kswapd(zone, order, ac_classzone_idx(ac));
+ last_pgdat = zone->zone_pgdat;
+ }
}
static inline unsigned int
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9b61a55b6e38..31edd7776289 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3421,6 +3421,7 @@ kswapd_try_sleep:
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
{
pg_data_t *pgdat;
+ int z;
if (!populated_zone(zone))
return;
@@ -3432,8 +3433,16 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
pgdat->kswapd_order = max(pgdat->kswapd_order, order);
if (!waitqueue_active(&pgdat->kswapd_wait))
return;
- if (zone_balanced(zone, order, 0))
- return;
+
+ /* Only wake kswapd if all zones are unbalanced */
+ for (z = 0; z <= classzone_idx; z++) {
+ zone = pgdat->node_zones + z;
+ if (!populated_zone(zone))
+ continue;
+
+ if (zone_balanced(zone, order, classzone_idx))
+ return;
+ }
trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
wake_up_interruptible(&pgdat->kswapd_wait);