diff mbox

[RFC,v4,34/40] mm: Restructure the compaction part of CMA for wider use

Message ID 20130925232120.26184.71686.stgit@srivatsabhat.in.ibm.com (mailing list archive)
State RFC, archived
Headers show

Commit Message

Srivatsa S. Bhat Sept. 25, 2013, 11:21 p.m. UTC
CMA uses bits and pieces of the memory compaction algorithms to perform
large contiguous allocations. Those algorithms would be useful for
memory power management too, to evacuate entire regions of memory.
So rewrite the code in a way that helps us to easily reuse the code for
both use-cases.

Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
---

 mm/compaction.c |   81 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 mm/internal.h   |   40 +++++++++++++++++++++++++++
 mm/page_alloc.c |   51 +++++++++--------------------------
 3 files changed, 134 insertions(+), 38 deletions(-)


--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/mm/compaction.c b/mm/compaction.c
index 511b191..c775066 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -816,6 +816,87 @@  static isolate_migrate_t isolate_migratepages(struct zone *zone,
 	return ISOLATE_SUCCESS;
 }
 
+/*
+ * Make free pages available within the given range, using compaction to
+ * migrate used pages elsewhere.
+ *
+ * [start, end) must belong to a single zone.
+ *
+ * This function is roughly based on the logic inside compact_zone().
+ */
+int compact_range(struct compact_control *cc, struct aggression_control *ac,
+		  struct free_page_control *fc, unsigned long start,
+		  unsigned long end)
+{
+	unsigned long pfn = start;
+	int ret = 0, tries, migrate_mode;
+
+	if (ac->prep_all)
+		migrate_prep();
+	else
+		migrate_prep_local();
+
+	while (pfn < end || !list_empty(&cc->migratepages)) {
+		if (list_empty(&cc->migratepages)) {
+			cc->nr_migratepages = 0;
+			pfn = isolate_migratepages_range(cc->zone, cc,
+					pfn, end, ac->isolate_unevictable);
+
+			if (!pfn) {
+				ret = -EINTR;
+				break;
+			}
+		}
+
+		for (tries = 0; tries < ac->max_tries; tries++) {
+			unsigned long nr_migrate, nr_remaining;
+
+			if (fatal_signal_pending(current)){
+				ret = -EINTR;
+				goto out;
+			}
+
+			if (ac->reclaim_clean) {
+				int nr_reclaimed;
+
+				nr_reclaimed =
+					reclaim_clean_pages_from_list(cc->zone,
+							&cc->migratepages);
+
+				cc->nr_migratepages -= nr_reclaimed;
+			}
+
+			migrate_mode = cc->sync ? MIGRATE_SYNC : MIGRATE_ASYNC;
+			nr_migrate = cc->nr_migratepages;
+			ret = migrate_pages(&cc->migratepages,
+					    fc->free_page_alloc, fc->alloc_data,
+					    migrate_mode, ac->reason);
+
+			update_nr_listpages(cc);
+			nr_remaining = cc->nr_migratepages;
+			trace_mm_compaction_migratepages(
+				nr_migrate - nr_remaining, nr_remaining);
+		}
+
+		if (tries == ac->max_tries) {
+			ret = ret < 0 ? ret : -EBUSY;
+			break;
+		}
+	}
+
+out:
+	if (ret < 0)
+		putback_movable_pages(&cc->migratepages);
+
+	/* Release free pages and check accounting */
+	if (fc->release_freepages)
+		cc->nr_freepages -= fc->release_freepages(fc->free_data);
+
+	VM_BUG_ON(cc->nr_freepages != 0);
+
+	return ret;
+}
+
 static int compact_finished(struct zone *zone,
 			    struct compact_control *cc)
 {
diff --git a/mm/internal.h b/mm/internal.h
index 684f7aa..acb50f8 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -107,6 +107,42 @@  extern bool is_free_buddy_page(struct page *page);
 /*
  * in mm/compaction.c
  */
+
+struct free_page_control {
+
+	/* Function used to allocate free pages as target of migration. */
+	struct page * (*free_page_alloc)(struct page *migratepage,
+					 unsigned long data,
+					 int **result);
+
+	unsigned long alloc_data;	/* Private data for free_page_alloc() */
+
+	/*
+	 * Function to release the accumulated free pages after the compaction
+	 * run.
+	 */
+	unsigned long (*release_freepages)(unsigned long info);
+	unsigned long free_data;	/* Private data for release_freepages() */
+};
+
+/*
+ * aggression_control gives us fine-grained control to specify how aggressively
+ * we want to compact memory.
+ */
+struct aggression_control {
+	bool isolate_unevictable;	/* Isolate unevictable pages too */
+	bool prep_all;			/* Use migrate_prep() instead of
+					 * migrate_prep_local().
+					 */
+	bool reclaim_clean;		/* Reclaim clean page-cache pages */
+	int max_tries;			/* No. of tries to migrate the
+					 * isolated pages before giving up.
+					 */
+	int reason;			/* Reason for compaction, passed on
+					 * as reason for migrate_pages().
+					 */
+};
+
 /*
  * compact_control is used to track pages being migrated and the free pages
  * they are being migrated to during memory compaction. The free_pfn starts
@@ -141,6 +177,10 @@  unsigned long
 isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
 	unsigned long low_pfn, unsigned long end_pfn, bool unevictable);
 
+int compact_range(struct compact_control *cc, struct aggression_control *ac,
+		  struct free_page_control *fc, unsigned long start,
+		  unsigned long end);
+
 #endif
 
 /*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a15ac96..70c3d7a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6893,46 +6893,21 @@  static unsigned long pfn_max_align_up(unsigned long pfn)
 static int __alloc_contig_migrate_range(struct compact_control *cc,
 					unsigned long start, unsigned long end)
 {
-	/* This function is based on compact_zone() from compaction.c. */
-	unsigned long nr_reclaimed;
-	unsigned long pfn = start;
-	unsigned int tries = 0;
-	int ret = 0;
-
-	migrate_prep();
-
-	while (pfn < end || !list_empty(&cc->migratepages)) {
-		if (fatal_signal_pending(current)) {
-			ret = -EINTR;
-			break;
-		}
-
-		if (list_empty(&cc->migratepages)) {
-			cc->nr_migratepages = 0;
-			pfn = isolate_migratepages_range(cc->zone, cc,
-							 pfn, end, true);
-			if (!pfn) {
-				ret = -EINTR;
-				break;
-			}
-			tries = 0;
-		} else if (++tries == 5) {
-			ret = ret < 0 ? ret : -EBUSY;
-			break;
-		}
+	struct aggression_control ac = {
+		.isolate_unevictable = true,
+		.prep_all = true,
+		.reclaim_clean = true,
+		.max_tries = 5,
+		.reason = MR_CMA,
+	};
 
-		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
-							&cc->migratepages);
-		cc->nr_migratepages -= nr_reclaimed;
+	struct free_page_control fc = {
+		.free_page_alloc = alloc_migrate_target,
+		.alloc_data = 0,
+		.release_freepages = NULL,
+	};
 
-		ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
-				    0, MIGRATE_SYNC, MR_CMA);
-	}
-	if (ret < 0) {
-		putback_movable_pages(&cc->migratepages);
-		return ret;
-	}
-	return 0;
+	return compact_range(cc, &ac, &fc, start, end);
 }
 
 /**