diff mbox series

[v4,07/11] mm/gup: use a standard migration target allocation callback

Message ID 1594107889-32228-8-git-send-email-iamjoonsoo.kim@lge.com (mailing list archive)
State New, archived
Headers show
Series clean-up the migration target allocation functions | expand

Commit Message

Joonsoo Kim July 7, 2020, 7:44 a.m. UTC
From: Joonsoo Kim <iamjoonsoo.kim@lge.com>

There is a well-defined migration target allocation callback.  It's mostly
similar with new_non_cma_page() except considering CMA pages.

This patch adds a CMA consideration to the standard migration target
allocation callback and use it on gup.c.

Acked-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 mm/gup.c      | 61 +++++++----------------------------------------------------
 mm/internal.h |  1 +
 mm/migrate.c  |  9 ++++++++-
 3 files changed, 16 insertions(+), 55 deletions(-)

Comments

Michal Hocko July 7, 2020, 11:46 a.m. UTC | #1
On Tue 07-07-20 16:44:45, Joonsoo Kim wrote:
[...]
> @@ -1551,9 +1552,12 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
>  
>  		gfp_mask |= htlb_alloc_mask(h);
>  		return alloc_huge_page_nodemask(h, nid, mtc->nmask,
> -						gfp_mask, false);
> +						gfp_mask, mtc->skip_cma);
>  	}
>  
> +	if (mtc->skip_cma)
> +		flags = memalloc_nocma_save();
> +

As already mentioned in previous email this is a completely wrong usage
of the scope API. The scope should be defined by the caller and this
should be all transparent by the allocator layer.

>  	if (PageTransHuge(page)) {
>  		/*
>  		 * clear __GFP_RECALIM since GFP_TRANSHUGE is the gfp_mask
> @@ -1572,6 +1576,9 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
>  	if (new_page && PageTransHuge(new_page))
>  		prep_transhuge_page(new_page);
>  
> +	if (mtc->skip_cma)
> +		memalloc_nocma_restore(flags);
> +
>  	return new_page;
>  }
>  
> -- 
> 2.7.4
Joonsoo Kim July 8, 2020, 7:21 a.m. UTC | #2
On Tue, Jul 07, 2020 at 01:46:14PM +0200, Michal Hocko wrote:
> On Tue 07-07-20 16:44:45, Joonsoo Kim wrote:
> [...]
> > @@ -1551,9 +1552,12 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
> >  
> >  		gfp_mask |= htlb_alloc_mask(h);
> >  		return alloc_huge_page_nodemask(h, nid, mtc->nmask,
> > -						gfp_mask, false);
> > +						gfp_mask, mtc->skip_cma);
> >  	}
> >  
> > +	if (mtc->skip_cma)
> > +		flags = memalloc_nocma_save();
> > +
> 
> As already mentioned in previous email this is a completely wrong usage
> of the scope API. The scope should be defined by the caller and this
> should be all transparent by the allocator layer.

Okay. Like as newly sent patch for 04/11, this patch will also be changed.

Thanks.
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index 2c3dab4..6a74c30 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1608,58 +1608,6 @@  static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
 }
 
 #ifdef CONFIG_CMA
-static struct page *new_non_cma_page(struct page *page, unsigned long private)
-{
-	/*
-	 * We want to make sure we allocate the new page from the same node
-	 * as the source page.
-	 */
-	int nid = page_to_nid(page);
-	/*
-	 * Trying to allocate a page for migration. Ignore allocation
-	 * failure warnings. We don't force __GFP_THISNODE here because
-	 * this node here is the node where we have CMA reservation and
-	 * in some case these nodes will have really less non movable
-	 * allocation memory.
-	 */
-	gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
-
-	if (PageHighMem(page))
-		gfp_mask |= __GFP_HIGHMEM;
-
-#ifdef CONFIG_HUGETLB_PAGE
-	if (PageHuge(page)) {
-		struct hstate *h = page_hstate(page);
-
-		/*
-		 * We don't want to dequeue from the pool because pool pages will
-		 * mostly be from the CMA region.
-		 */
-		return alloc_huge_page_nodemask(h, nid, NULL, gfp_mask, true);
-	}
-#endif
-	if (PageTransHuge(page)) {
-		struct page *thp;
-		/*
-		 * ignore allocation failure warnings
-		 */
-		gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
-
-		/*
-		 * Remove the movable mask so that we don't allocate from
-		 * CMA area again.
-		 */
-		thp_gfpmask &= ~__GFP_MOVABLE;
-		thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
-		if (!thp)
-			return NULL;
-		prep_transhuge_page(thp);
-		return thp;
-	}
-
-	return __alloc_pages_node(nid, gfp_mask, 0);
-}
-
 static long check_and_migrate_cma_pages(struct task_struct *tsk,
 					struct mm_struct *mm,
 					unsigned long start,
@@ -1674,6 +1622,11 @@  static long check_and_migrate_cma_pages(struct task_struct *tsk,
 	bool migrate_allow = true;
 	LIST_HEAD(cma_page_list);
 	long ret = nr_pages;
+	struct migration_target_control mtc = {
+		.nid = NUMA_NO_NODE,
+		.gfp_mask = GFP_USER | __GFP_NOWARN,
+		.skip_cma = true,
+	};
 
 check_again:
 	for (i = 0; i < nr_pages;) {
@@ -1719,8 +1672,8 @@  static long check_and_migrate_cma_pages(struct task_struct *tsk,
 		for (i = 0; i < nr_pages; i++)
 			put_page(pages[i]);
 
-		if (migrate_pages(&cma_page_list, new_non_cma_page,
-				  NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
+		if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
+			(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
 			/*
 			 * some of the pages failed migration. Do get_user_pages
 			 * without migration.
diff --git a/mm/internal.h b/mm/internal.h
index 0beacf3..3236fef 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -619,6 +619,7 @@  struct migration_target_control {
 	int nid;		/* preferred node id */
 	nodemask_t *nmask;
 	gfp_t gfp_mask;
+	bool skip_cma;
 };
 
 #endif	/* __MM_INTERNAL_H */
diff --git a/mm/migrate.c b/mm/migrate.c
index 00cd81c..ab18b9c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1539,6 +1539,7 @@  struct page *alloc_migration_target(struct page *page, unsigned long private)
 	struct page *new_page = NULL;
 	int nid;
 	int zidx;
+	unsigned int flags = 0;
 
 	mtc = (struct migration_target_control *)private;
 	gfp_mask = mtc->gfp_mask;
@@ -1551,9 +1552,12 @@  struct page *alloc_migration_target(struct page *page, unsigned long private)
 
 		gfp_mask |= htlb_alloc_mask(h);
 		return alloc_huge_page_nodemask(h, nid, mtc->nmask,
-						gfp_mask, false);
+						gfp_mask, mtc->skip_cma);
 	}
 
+	if (mtc->skip_cma)
+		flags = memalloc_nocma_save();
+
 	if (PageTransHuge(page)) {
 		/*
 		 * clear __GFP_RECALIM since GFP_TRANSHUGE is the gfp_mask
@@ -1572,6 +1576,9 @@  struct page *alloc_migration_target(struct page *page, unsigned long private)
 	if (new_page && PageTransHuge(new_page))
 		prep_transhuge_page(new_page);
 
+	if (mtc->skip_cma)
+		memalloc_nocma_restore(flags);
+
 	return new_page;
 }