[4/4] mm/gup: use a standard migration target allocation callback
diff mbox series

Message ID 1594789529-6206-4-git-send-email-iamjoonsoo.kim@lge.com
State New
Headers show
Series
  • [1/4] mm/page_alloc: fix non cma alloc context
Related show

Commit Message

Joonsoo Kim July 15, 2020, 5:05 a.m. UTC
From: Joonsoo Kim <iamjoonsoo.kim@lge.com>

There is a well-defined migration target allocation callback. Use it.

Acked-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 mm/gup.c | 54 ++++++------------------------------------------------
 1 file changed, 6 insertions(+), 48 deletions(-)

Comments

Michal Hocko July 15, 2020, 8:36 a.m. UTC | #1
On Wed 15-07-20 14:05:29, Joonsoo Kim wrote:
> From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> 
> There is a well-defined migration target allocation callback. Use it.
> 
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  mm/gup.c | 54 ++++++------------------------------------------------
>  1 file changed, 6 insertions(+), 48 deletions(-)
> 
> diff --git a/mm/gup.c b/mm/gup.c
> index 4ba822a..628ca4c 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -1608,52 +1608,6 @@ static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
>  }
>  
>  #ifdef CONFIG_CMA
> -static struct page *new_non_cma_page(struct page *page, unsigned long private)
> -{
> -	/*
> -	 * We want to make sure we allocate the new page from the same node
> -	 * as the source page.
> -	 */
> -	int nid = page_to_nid(page);
> -	/*
> -	 * Trying to allocate a page for migration. Ignore allocation
> -	 * failure warnings. We don't force __GFP_THISNODE here because
> -	 * this node here is the node where we have CMA reservation and
> -	 * in some case these nodes will have really less non CMA
> -	 * allocation memory.
> -	 *
> -	 * Note that CMA region is prohibited by allocation scope.
> -	 */
> -	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN;
> -
> -	if (PageHighMem(page))
> -		gfp_mask |= __GFP_HIGHMEM;
> -
> -#ifdef CONFIG_HUGETLB_PAGE
> -	if (PageHuge(page)) {
> -		struct hstate *h = page_hstate(page);
> -
> -		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
> -		return alloc_huge_page_nodemask(h, nid, NULL, gfp_mask);
> -	}
> -#endif
> -	if (PageTransHuge(page)) {
> -		struct page *thp;
> -		/*
> -		 * ignore allocation failure warnings
> -		 */
> -		gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
> -
> -		thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
> -		if (!thp)
> -			return NULL;
> -		prep_transhuge_page(thp);
> -		return thp;
> -	}
> -
> -	return __alloc_pages_node(nid, gfp_mask, 0);
> -}
> -
>  static long check_and_migrate_cma_pages(struct task_struct *tsk,
>  					struct mm_struct *mm,
>  					unsigned long start,
> @@ -1668,6 +1622,10 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
>  	bool migrate_allow = true;
>  	LIST_HEAD(cma_page_list);
>  	long ret = nr_pages;
> +	struct migration_target_control mtc = {
> +		.nid = NUMA_NO_NODE,
> +		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
> +	};
>  
>  check_again:
>  	for (i = 0; i < nr_pages;) {
> @@ -1713,8 +1671,8 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
>  		for (i = 0; i < nr_pages; i++)
>  			put_page(pages[i]);
>  
> -		if (migrate_pages(&cma_page_list, new_non_cma_page,
> -				  NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
> +		if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
> +			(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
>  			/*
>  			 * some of the pages failed migration. Do get_user_pages
>  			 * without migration.
> -- 
> 2.7.4
>

Patch
diff mbox series

diff --git a/mm/gup.c b/mm/gup.c
index 4ba822a..628ca4c 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1608,52 +1608,6 @@  static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
 }
 
 #ifdef CONFIG_CMA
-static struct page *new_non_cma_page(struct page *page, unsigned long private)
-{
-	/*
-	 * We want to make sure we allocate the new page from the same node
-	 * as the source page.
-	 */
-	int nid = page_to_nid(page);
-	/*
-	 * Trying to allocate a page for migration. Ignore allocation
-	 * failure warnings. We don't force __GFP_THISNODE here because
-	 * this node here is the node where we have CMA reservation and
-	 * in some case these nodes will have really less non CMA
-	 * allocation memory.
-	 *
-	 * Note that CMA region is prohibited by allocation scope.
-	 */
-	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN;
-
-	if (PageHighMem(page))
-		gfp_mask |= __GFP_HIGHMEM;
-
-#ifdef CONFIG_HUGETLB_PAGE
-	if (PageHuge(page)) {
-		struct hstate *h = page_hstate(page);
-
-		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
-		return alloc_huge_page_nodemask(h, nid, NULL, gfp_mask);
-	}
-#endif
-	if (PageTransHuge(page)) {
-		struct page *thp;
-		/*
-		 * ignore allocation failure warnings
-		 */
-		gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
-
-		thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
-		if (!thp)
-			return NULL;
-		prep_transhuge_page(thp);
-		return thp;
-	}
-
-	return __alloc_pages_node(nid, gfp_mask, 0);
-}
-
 static long check_and_migrate_cma_pages(struct task_struct *tsk,
 					struct mm_struct *mm,
 					unsigned long start,
@@ -1668,6 +1622,10 @@  static long check_and_migrate_cma_pages(struct task_struct *tsk,
 	bool migrate_allow = true;
 	LIST_HEAD(cma_page_list);
 	long ret = nr_pages;
+	struct migration_target_control mtc = {
+		.nid = NUMA_NO_NODE,
+		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
+	};
 
 check_again:
 	for (i = 0; i < nr_pages;) {
@@ -1713,8 +1671,8 @@  static long check_and_migrate_cma_pages(struct task_struct *tsk,
 		for (i = 0; i < nr_pages; i++)
 			put_page(pages[i]);
 
-		if (migrate_pages(&cma_page_list, new_non_cma_page,
-				  NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
+		if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
+			(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
 			/*
 			 * some of the pages failed migration. Do get_user_pages
 			 * without migration.