From 86a294a81f93d6f36d00ec3ff779d36d218f852d Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Fri, 20 May 2016 16:57:12 -0700 Subject: mm, oom, compaction: prevent from should_compact_retry looping for ever for costly orders "mm: consider compaction feedback also for costly allocation" has removed the upper bound for the reclaim/compaction retries based on the number of reclaimed pages for costly orders. While this is desirable the patch did miss a mis interaction between reclaim, compaction and the retry logic. The direct reclaim tries to get zones over min watermark while compaction backs off and returns COMPACT_SKIPPED when all zones are below low watermark + 1< Acked-by: Hillf Danton Acked-by: Vlastimil Babka Cc: David Rientjes Cc: Johannes Weiner Cc: Joonsoo Kim Cc: Mel Gorman Cc: Tetsuo Handa Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dea406a62e3d..089f760ce64a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2750,10 +2750,9 @@ static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) * one free page of a suitable size. Checking now avoids taking the zone lock * to check in the allocation paths if no pages are free. */ -static bool __zone_watermark_ok(struct zone *z, unsigned int order, - unsigned long mark, int classzone_idx, - unsigned int alloc_flags, - long free_pages) +bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, + int classzone_idx, unsigned int alloc_flags, + long free_pages) { long min = mark; int o; @@ -3256,8 +3255,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, } static inline bool -should_compact_retry(unsigned int order, enum compact_result compact_result, - enum migrate_mode *migrate_mode, +should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, + enum compact_result compact_result, enum migrate_mode *migrate_mode, int compaction_retries) { int max_retries = MAX_COMPACT_RETRIES; @@ -3281,9 +3280,11 @@ should_compact_retry(unsigned int order, enum compact_result compact_result, /* * make sure the compaction wasn't deferred or didn't bail out early * due to locks contention before we declare that we should give up. + * But do not retry if the given zonelist is not suitable for + * compaction. */ if (compaction_withdrawn(compact_result)) - return true; + return compaction_zonelist_suitable(ac, order, alloc_flags); /* * !costly requests are much more important than __GFP_REPEAT @@ -3311,7 +3312,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, } static inline bool -should_compact_retry(unsigned int order, enum compact_result compact_result, +should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, + enum compact_result compact_result, enum migrate_mode *migrate_mode, int compaction_retries) { @@ -3706,8 +3708,9 @@ retry: * of free memory (see __compaction_suitable) */ if (did_some_progress > 0 && - should_compact_retry(order, compact_result, - &migration_mode, compaction_retries)) + should_compact_retry(ac, order, alloc_flags, + compact_result, &migration_mode, + compaction_retries)) goto retry; /* Reclaim has failed us, start killing things */ -- cgit v1.2.3-55-g7522