diff mbox

[4/5] Revert "mm, oom, compaction: prevent from should_compact_retry looping for ever for costly orders"

Message ID 1487864792-6491-5-git-send-email-paolo.pisati@canonical.com
State New
Headers show

Commit Message

Paolo Pisati Feb. 23, 2017, 3:46 p.m. UTC
BugLink: http://bugs.launchpad.net/bugs/1665280

This reverts commit 8f32a02e8fc47de4e963f97defacbac9f22d173f.

Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
---
 include/linux/compaction.h |  4 ----
 include/linux/mmzone.h     |  3 ---
 mm/compaction.c            | 42 +++---------------------------------------
 mm/page_alloc.c            | 23 ++++++++++-------------
 4 files changed, 13 insertions(+), 59 deletions(-)
diff mbox

Patch

diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 25f82af..781fd9b 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -135,10 +135,6 @@  static inline bool compaction_withdrawn(enum compact_result result)
 
 	return false;
 }
-
-bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
-					int alloc_flags);
-
 #else
 static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask,
 			unsigned int order, int alloc_flags,
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2b79965..2896a0f 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -737,9 +737,6 @@  static inline bool is_dev_zone(const struct zone *zone)
 extern struct mutex zonelists_mutex;
 void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
-bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
-			 int classzone_idx, unsigned int alloc_flags,
-			 long free_pages);
 bool zone_watermark_ok(struct zone *z, unsigned int order,
 		unsigned long mark, int classzone_idx,
 		unsigned int alloc_flags);
diff --git a/mm/compaction.c b/mm/compaction.c
index 070f30d..3a4ec9f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1271,8 +1271,7 @@  static enum compact_result compact_finished(struct zone *zone,
  */
 static enum compact_result __compaction_suitable(struct zone *zone, int order,
 					unsigned int alloc_flags,
-					int classzone_idx,
-					unsigned long wmark_target)
+					int classzone_idx)
 {
 	int fragindex;
 	unsigned long watermark;
@@ -1295,8 +1294,7 @@  static enum compact_result __compaction_suitable(struct zone *zone, int order,
 	 * allocated and for a short time, the footprint is higher
 	 */
 	watermark += (2UL << order);
-	if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
-				 alloc_flags, wmark_target))
+	if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags))
 		return COMPACT_SKIPPED;
 
 	/*
@@ -1323,8 +1321,7 @@  enum compact_result compaction_suitable(struct zone *zone, int order,
 {
 	enum compact_result ret;
 
-	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
-				    zone_page_state(zone, NR_FREE_PAGES));
+	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
 	trace_mm_compaction_suitable(zone, order, ret);
 	if (ret == COMPACT_NOT_SUITABLE_ZONE)
 		ret = COMPACT_SKIPPED;
@@ -1332,39 +1329,6 @@  enum compact_result compaction_suitable(struct zone *zone, int order,
 	return ret;
 }
 
-bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
-		int alloc_flags)
-{
-	struct zone *zone;
-	struct zoneref *z;
-
-	/*
-	 * Make sure at least one zone would pass __compaction_suitable if we continue
-	 * retrying the reclaim.
-	 */
-	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
-					ac->nodemask) {
-		unsigned long available;
-		enum compact_result compact_result;
-
-		/*
-		 * Do not consider all the reclaimable memory because we do not
-		 * want to trash just for a single high order allocation which
-		 * is even not guaranteed to appear even if __compaction_suitable
-		 * is happy about the watermark check.
-		 */
-		available = zone_reclaimable_pages(zone) / order;
-		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
-		compact_result = __compaction_suitable(zone, order, alloc_flags,
-				ac->classzone_idx, available);
-		if (compact_result != COMPACT_SKIPPED &&
-				compact_result != COMPACT_NOT_SUITABLE_ZONE)
-			return true;
-	}
-
-	return false;
-}
-
 static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
 {
 	enum compact_result ret;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0f5c3a6..689c35a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2373,9 +2373,10 @@  static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  * one free page of a suitable size. Checking now avoids taking the zone lock
  * to check in the allocation paths if no pages are free.
  */
-bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
-			 int classzone_idx, unsigned int alloc_flags,
-			 long free_pages)
+static bool __zone_watermark_ok(struct zone *z, unsigned int order,
+			unsigned long mark, int classzone_idx,
+			unsigned int alloc_flags,
+			long free_pages)
 {
 	long min = mark;
 	int o;
@@ -2846,8 +2847,8 @@  __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 }
 
 static inline bool
-should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
-		     enum compact_result compact_result, enum migrate_mode *migrate_mode,
+should_compact_retry(unsigned int order, enum compact_result compact_result,
+		     enum migrate_mode *migrate_mode,
 		     int compaction_retries)
 {
 	int max_retries = MAX_COMPACT_RETRIES;
@@ -2871,11 +2872,9 @@  should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
 	/*
 	 * make sure the compaction wasn't deferred or didn't bail out early
 	 * due to locks contention before we declare that we should give up.
-	 * But do not retry if the given zonelist is not suitable for
-	 * compaction.
 	 */
 	if (compaction_withdrawn(compact_result))
-		return compaction_zonelist_suitable(ac, order, alloc_flags);
+		return true;
 
 	/*
 	 * !costly requests are much more important than __GFP_REPEAT
@@ -2903,8 +2902,7 @@  __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 }
 
 static inline bool
-should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
-		     enum compact_result compact_result,
+should_compact_retry(unsigned int order, enum compact_result compact_result,
 		     enum migrate_mode *migrate_mode,
 		     int compaction_retries)
 {
@@ -3332,9 +3330,8 @@  retry:
 	 * of free memory (see __compaction_suitable)
 	 */
 	if (did_some_progress > 0 &&
-			should_compact_retry(ac, order, alloc_flags,
-				compact_result, &migration_mode,
-				compaction_retries))
+			should_compact_retry(order, compact_result,
+				&migration_mode, compaction_retries))
 		goto retry;
 
 	/* Reclaim has failed us, start killing things */