diff mbox series

mm/gup: remove unnecessary check against CMA in __gup_longterm_locked()

Message ID 1559633160-14809-1-git-send-email-kernelfans@gmail.com (mailing list archive)
State New, archived
Headers show
Series mm/gup: remove unnecessary check against CMA in __gup_longterm_locked() | expand

Commit Message

Pingfan Liu June 4, 2019, 7:26 a.m. UTC
The PF_MEMALLOC_NOCMA is set by memalloc_nocma_save(), which is finally
cast to ~_GFP_MOVABLE.  So __get_user_pages_locked() will get pages from
non CMA area and pin them.  There is no need to
check_and_migrate_cma_pages().

Signed-off-by: Pingfan Liu <kernelfans@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: linux-kernel@vger.kernel.org
---
 mm/gup.c | 146 ---------------------------------------------------------------
 1 file changed, 146 deletions(-)

Comments

Aneesh Kumar K.V June 4, 2019, 8:30 a.m. UTC | #1
On 6/4/19 12:56 PM, Pingfan Liu wrote:
> The PF_MEMALLOC_NOCMA is set by memalloc_nocma_save(), which is finally
> cast to ~_GFP_MOVABLE.  So __get_user_pages_locked() will get pages from
> non CMA area and pin them.  There is no need to
> check_and_migrate_cma_pages().


That is not completely correct. We can fault in that pages outside 
get_user_pages_longterm at which point those pages can get allocated 
from CMA region. memalloc_nocma_save() as added as an optimization to 
avoid unnecessary page migration.


-aneesh
Pingfan Liu June 4, 2019, 8:50 a.m. UTC | #2
On Tue, Jun 4, 2019 at 4:30 PM Aneesh Kumar K.V
<aneesh.kumar@linux.ibm.com> wrote:
>
> On 6/4/19 12:56 PM, Pingfan Liu wrote:
> > The PF_MEMALLOC_NOCMA is set by memalloc_nocma_save(), which is finally
> > cast to ~_GFP_MOVABLE.  So __get_user_pages_locked() will get pages from
> > non CMA area and pin them.  There is no need to
> > check_and_migrate_cma_pages().
>
>
> That is not completely correct. We can fault in that pages outside
> get_user_pages_longterm at which point those pages can get allocated
> from CMA region. memalloc_nocma_save() as added as an optimization to
> avoid unnecessary page migration.
Yes, you are right.

Thanks,
  Pingfan
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index f173fcb..9d931d1 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1275,149 +1275,6 @@  static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
 	return false;
 }
 
-#ifdef CONFIG_CMA
-static struct page *new_non_cma_page(struct page *page, unsigned long private)
-{
-	/*
-	 * We want to make sure we allocate the new page from the same node
-	 * as the source page.
-	 */
-	int nid = page_to_nid(page);
-	/*
-	 * Trying to allocate a page for migration. Ignore allocation
-	 * failure warnings. We don't force __GFP_THISNODE here because
-	 * this node here is the node where we have CMA reservation and
-	 * in some case these nodes will have really less non movable
-	 * allocation memory.
-	 */
-	gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
-
-	if (PageHighMem(page))
-		gfp_mask |= __GFP_HIGHMEM;
-
-#ifdef CONFIG_HUGETLB_PAGE
-	if (PageHuge(page)) {
-		struct hstate *h = page_hstate(page);
-		/*
-		 * We don't want to dequeue from the pool because pool pages will
-		 * mostly be from the CMA region.
-		 */
-		return alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
-	}
-#endif
-	if (PageTransHuge(page)) {
-		struct page *thp;
-		/*
-		 * ignore allocation failure warnings
-		 */
-		gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
-
-		/*
-		 * Remove the movable mask so that we don't allocate from
-		 * CMA area again.
-		 */
-		thp_gfpmask &= ~__GFP_MOVABLE;
-		thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
-		if (!thp)
-			return NULL;
-		prep_transhuge_page(thp);
-		return thp;
-	}
-
-	return __alloc_pages_node(nid, gfp_mask, 0);
-}
-
-static long check_and_migrate_cma_pages(struct task_struct *tsk,
-					struct mm_struct *mm,
-					unsigned long start,
-					unsigned long nr_pages,
-					struct page **pages,
-					struct vm_area_struct **vmas,
-					unsigned int gup_flags)
-{
-	long i;
-	bool drain_allow = true;
-	bool migrate_allow = true;
-	LIST_HEAD(cma_page_list);
-
-check_again:
-	for (i = 0; i < nr_pages; i++) {
-		/*
-		 * If we get a page from the CMA zone, since we are going to
-		 * be pinning these entries, we might as well move them out
-		 * of the CMA zone if possible.
-		 */
-		if (is_migrate_cma_page(pages[i])) {
-
-			struct page *head = compound_head(pages[i]);
-
-			if (PageHuge(head)) {
-				isolate_huge_page(head, &cma_page_list);
-			} else {
-				if (!PageLRU(head) && drain_allow) {
-					lru_add_drain_all();
-					drain_allow = false;
-				}
-
-				if (!isolate_lru_page(head)) {
-					list_add_tail(&head->lru, &cma_page_list);
-					mod_node_page_state(page_pgdat(head),
-							    NR_ISOLATED_ANON +
-							    page_is_file_cache(head),
-							    hpage_nr_pages(head));
-				}
-			}
-		}
-	}
-
-	if (!list_empty(&cma_page_list)) {
-		/*
-		 * drop the above get_user_pages reference.
-		 */
-		for (i = 0; i < nr_pages; i++)
-			put_page(pages[i]);
-
-		if (migrate_pages(&cma_page_list, new_non_cma_page,
-				  NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
-			/*
-			 * some of the pages failed migration. Do get_user_pages
-			 * without migration.
-			 */
-			migrate_allow = false;
-
-			if (!list_empty(&cma_page_list))
-				putback_movable_pages(&cma_page_list);
-		}
-		/*
-		 * We did migrate all the pages, Try to get the page references
-		 * again migrating any new CMA pages which we failed to isolate
-		 * earlier.
-		 */
-		nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages,
-						   pages, vmas, NULL,
-						   gup_flags);
-
-		if ((nr_pages > 0) && migrate_allow) {
-			drain_allow = true;
-			goto check_again;
-		}
-	}
-
-	return nr_pages;
-}
-#else
-static long check_and_migrate_cma_pages(struct task_struct *tsk,
-					struct mm_struct *mm,
-					unsigned long start,
-					unsigned long nr_pages,
-					struct page **pages,
-					struct vm_area_struct **vmas,
-					unsigned int gup_flags)
-{
-	return nr_pages;
-}
-#endif
-
 /*
  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
  * allows us to process the FOLL_LONGTERM flag.
@@ -1462,9 +1319,6 @@  static long __gup_longterm_locked(struct task_struct *tsk,
 			rc = -EOPNOTSUPP;
 			goto out;
 		}
-
-		rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages,
-						 vmas_tmp, gup_flags);
 	}
 
 out: