diff mbox series

[2/2] mm: gup: do not call try_grab_folio() in slow path

Message ID 20240604234858.948986-2-yang@os.amperecomputing.com (mailing list archive)
State New
Headers show
Series [1/2] mm: page_ref: remove folio_try_get_rcu() | expand

Commit Message

Yang Shi June 4, 2024, 11:48 p.m. UTC
The try_grab_folio() is supposed to be used in fast path and it elevates
folio refcount by using add ref unless zero.  We are guaranteed to have
at least one stable reference in slow path, so the simple atomic add
could be used.  The performance difference should be trivial, but the
misuse may be confusing and misleading.

Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
---
 mm/gup.c         | 112 +++++++++++++++++++++++++++--------------------
 mm/huge_memory.c |   2 +-
 mm/internal.h    |   3 +-
 3 files changed, 66 insertions(+), 51 deletions(-)

Comments

kernel test robot June 5, 2024, 2:57 a.m. UTC | #1
Hi Yang,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-everything]

url:    https://github.com/intel-lab-lkp/linux/commits/Yang-Shi/mm-gup-do-not-call-try_grab_folio-in-slow-path/20240605-075027
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20240604234858.948986-2-yang%40os.amperecomputing.com
patch subject: [PATCH 2/2] mm: gup: do not call try_grab_folio() in slow path
config: openrisc-allnoconfig (https://download.01.org/0day-ci/archive/20240605/202406051039.9m00gwIx-lkp@intel.com/config)
compiler: or1k-linux-gcc (GCC) 13.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240605/202406051039.9m00gwIx-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202406051039.9m00gwIx-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> mm/gup.c:131:22: warning: 'try_grab_folio_fast' defined but not used [-Wunused-function]
     131 | static struct folio *try_grab_folio_fast(struct page *page, int refs,
         |                      ^~~~~~~~~~~~~~~~~~~


vim +/try_grab_folio_fast +131 mm/gup.c

   101	
   102	/**
   103	 * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
   104	 * @page:  pointer to page to be grabbed
   105	 * @refs:  the value to (effectively) add to the folio's refcount
   106	 * @flags: gup flags: these are the FOLL_* flag values.
   107	 *
   108	 * "grab" names in this file mean, "look at flags to decide whether to use
   109	 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
   110	 *
   111	 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
   112	 * same time. (That's true throughout the get_user_pages*() and
   113	 * pin_user_pages*() APIs.) Cases:
   114	 *
   115	 *    FOLL_GET: folio's refcount will be incremented by @refs.
   116	 *
   117	 *    FOLL_PIN on large folios: folio's refcount will be incremented by
   118	 *    @refs, and its pincount will be incremented by @refs.
   119	 *
   120	 *    FOLL_PIN on single-page folios: folio's refcount will be incremented by
   121	 *    @refs * GUP_PIN_COUNTING_BIAS.
   122	 *
   123	 * Return: The folio containing @page (with refcount appropriately
   124	 * incremented) for success, or NULL upon failure. If neither FOLL_GET
   125	 * nor FOLL_PIN was set, that's considered failure, and furthermore,
   126	 * a likely bug in the caller, so a warning is also emitted.
   127	 *
   128	 * It uses add ref unless zero to elevate the folio refcount and must be called
   129	 * in fast path only.
   130	 */
 > 131	static struct folio *try_grab_folio_fast(struct page *page, int refs,
   132						 unsigned int flags)
   133	{
   134		struct folio *folio;
   135	
   136		/* Raise warn if it is not called in fast GUP */
   137		VM_WARN_ON_ONCE(!irqs_disabled());
   138	
   139		if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
   140			return NULL;
   141	
   142		if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
   143			return NULL;
   144	
   145		if (flags & FOLL_GET)
   146			return try_get_folio(page, refs);
   147	
   148		/* FOLL_PIN is set */
   149	
   150		/*
   151		 * Don't take a pin on the zero page - it's not going anywhere
   152		 * and it is used in a *lot* of places.
   153		 */
   154		if (is_zero_page(page))
   155			return page_folio(page);
   156	
   157		folio = try_get_folio(page, refs);
   158		if (!folio)
   159			return NULL;
   160	
   161		/*
   162		 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
   163		 * right zone, so fail and let the caller fall back to the slow
   164		 * path.
   165		 */
   166		if (unlikely((flags & FOLL_LONGTERM) &&
   167			     !folio_is_longterm_pinnable(folio))) {
   168			if (!put_devmap_managed_folio_refs(folio, refs))
   169				folio_put_refs(folio, refs);
   170			return NULL;
   171		}
   172	
   173		/*
   174		 * When pinning a large folio, use an exact count to track it.
   175		 *
   176		 * However, be sure to *also* increment the normal folio
   177		 * refcount field at least once, so that the folio really
   178		 * is pinned.  That's why the refcount from the earlier
   179		 * try_get_folio() is left intact.
   180		 */
   181		if (folio_test_large(folio))
   182			atomic_add(refs, &folio->_pincount);
   183		else
   184			folio_ref_add(folio,
   185					refs * (GUP_PIN_COUNTING_BIAS - 1));
   186		/*
   187		 * Adjust the pincount before re-checking the PTE for changes.
   188		 * This is essentially a smp_mb() and is paired with a memory
   189		 * barrier in folio_try_share_anon_rmap_*().
   190		 */
   191		smp_mb__after_atomic();
   192	
   193		node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
   194	
   195		return folio;
   196	}
   197
Yang Shi June 5, 2024, 4:19 p.m. UTC | #2
On 6/4/24 7:57 PM, kernel test robot wrote:
> Hi Yang,
>
> kernel test robot noticed the following build warnings:
>
> [auto build test WARNING on akpm-mm/mm-everything]
>
> url:    https://github.com/intel-lab-lkp/linux/commits/Yang-Shi/mm-gup-do-not-call-try_grab_folio-in-slow-path/20240605-075027
> base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
> patch link:    https://lore.kernel.org/r/20240604234858.948986-2-yang%40os.amperecomputing.com
> patch subject: [PATCH 2/2] mm: gup: do not call try_grab_folio() in slow path
> config: openrisc-allnoconfig (https://download.01.org/0day-ci/archive/20240605/202406051039.9m00gwIx-lkp@intel.com/config)
> compiler: or1k-linux-gcc (GCC) 13.2.0
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240605/202406051039.9m00gwIx-lkp@intel.com/reproduce)
>
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <lkp@intel.com>
> | Closes: https://lore.kernel.org/oe-kbuild-all/202406051039.9m00gwIx-lkp@intel.com/
>
> All warnings (new ones prefixed by >>):
>
>>> mm/gup.c:131:22: warning: 'try_grab_folio_fast' defined but not used [-Wunused-function]

Thanks for reporting the problem. It seems try_grab_folio_fast() 
definition should be protected by CONFIG_HAVE_FAST_GUP, will fix it in v2.

>       131 | static struct folio *try_grab_folio_fast(struct page *page, int refs,
>           |                      ^~~~~~~~~~~~~~~~~~~
>
>
> vim +/try_grab_folio_fast +131 mm/gup.c
>
>     101	
>     102	/**
>     103	 * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
>     104	 * @page:  pointer to page to be grabbed
>     105	 * @refs:  the value to (effectively) add to the folio's refcount
>     106	 * @flags: gup flags: these are the FOLL_* flag values.
>     107	 *
>     108	 * "grab" names in this file mean, "look at flags to decide whether to use
>     109	 * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
>     110	 *
>     111	 * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
>     112	 * same time. (That's true throughout the get_user_pages*() and
>     113	 * pin_user_pages*() APIs.) Cases:
>     114	 *
>     115	 *    FOLL_GET: folio's refcount will be incremented by @refs.
>     116	 *
>     117	 *    FOLL_PIN on large folios: folio's refcount will be incremented by
>     118	 *    @refs, and its pincount will be incremented by @refs.
>     119	 *
>     120	 *    FOLL_PIN on single-page folios: folio's refcount will be incremented by
>     121	 *    @refs * GUP_PIN_COUNTING_BIAS.
>     122	 *
>     123	 * Return: The folio containing @page (with refcount appropriately
>     124	 * incremented) for success, or NULL upon failure. If neither FOLL_GET
>     125	 * nor FOLL_PIN was set, that's considered failure, and furthermore,
>     126	 * a likely bug in the caller, so a warning is also emitted.
>     127	 *
>     128	 * It uses add ref unless zero to elevate the folio refcount and must be called
>     129	 * in fast path only.
>     130	 */
>   > 131	static struct folio *try_grab_folio_fast(struct page *page, int refs,
>     132						 unsigned int flags)
>     133	{
>     134		struct folio *folio;
>     135	
>     136		/* Raise warn if it is not called in fast GUP */
>     137		VM_WARN_ON_ONCE(!irqs_disabled());
>     138	
>     139		if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
>     140			return NULL;
>     141	
>     142		if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
>     143			return NULL;
>     144	
>     145		if (flags & FOLL_GET)
>     146			return try_get_folio(page, refs);
>     147	
>     148		/* FOLL_PIN is set */
>     149	
>     150		/*
>     151		 * Don't take a pin on the zero page - it's not going anywhere
>     152		 * and it is used in a *lot* of places.
>     153		 */
>     154		if (is_zero_page(page))
>     155			return page_folio(page);
>     156	
>     157		folio = try_get_folio(page, refs);
>     158		if (!folio)
>     159			return NULL;
>     160	
>     161		/*
>     162		 * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
>     163		 * right zone, so fail and let the caller fall back to the slow
>     164		 * path.
>     165		 */
>     166		if (unlikely((flags & FOLL_LONGTERM) &&
>     167			     !folio_is_longterm_pinnable(folio))) {
>     168			if (!put_devmap_managed_folio_refs(folio, refs))
>     169				folio_put_refs(folio, refs);
>     170			return NULL;
>     171		}
>     172	
>     173		/*
>     174		 * When pinning a large folio, use an exact count to track it.
>     175		 *
>     176		 * However, be sure to *also* increment the normal folio
>     177		 * refcount field at least once, so that the folio really
>     178		 * is pinned.  That's why the refcount from the earlier
>     179		 * try_get_folio() is left intact.
>     180		 */
>     181		if (folio_test_large(folio))
>     182			atomic_add(refs, &folio->_pincount);
>     183		else
>     184			folio_ref_add(folio,
>     185					refs * (GUP_PIN_COUNTING_BIAS - 1));
>     186		/*
>     187		 * Adjust the pincount before re-checking the PTE for changes.
>     188		 * This is essentially a smp_mb() and is paired with a memory
>     189		 * barrier in folio_try_share_anon_rmap_*().
>     190		 */
>     191		smp_mb__after_atomic();
>     192	
>     193		node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
>     194	
>     195		return folio;
>     196	}
>     197	
>
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index 17f89e8d31f1..a683e7ac47b5 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -100,7 +100,7 @@  static inline struct folio *try_get_folio(struct page *page, int refs)
 }
 
 /**
- * try_grab_folio() - Attempt to get or pin a folio.
+ * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
  * @page:  pointer to page to be grabbed
  * @refs:  the value to (effectively) add to the folio's refcount
  * @flags: gup flags: these are the FOLL_* flag values.
@@ -124,11 +124,18 @@  static inline struct folio *try_get_folio(struct page *page, int refs)
  * incremented) for success, or NULL upon failure. If neither FOLL_GET
  * nor FOLL_PIN was set, that's considered failure, and furthermore,
  * a likely bug in the caller, so a warning is also emitted.
+ *
+ * It uses add ref unless zero to elevate the folio refcount and must be called
+ * in fast path only.
  */
-struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
+static struct folio *try_grab_folio_fast(struct page *page, int refs,
+					 unsigned int flags)
 {
 	struct folio *folio;
 
+	/* Raise warn if it is not called in fast GUP */
+	VM_WARN_ON_ONCE(!irqs_disabled());
+
 	if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
 		return NULL;
 
@@ -205,28 +212,31 @@  static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
 }
 
 /**
- * try_grab_page() - elevate a page's refcount by a flag-dependent amount
- * @page:    pointer to page to be grabbed
- * @flags:   gup flags: these are the FOLL_* flag values.
+ * try_grab_folio() - add a folio's refcount by a flag-dependent amount
+ * @folio:    pointer to folio to be grabbed
+ * @refs:     the value to (effectively) add to the folio's refcount
+ * @flags:    gup flags: these are the FOLL_* flag values.
  *
  * This might not do anything at all, depending on the flags argument.
  *
  * "grab" names in this file mean, "look at flags to decide whether to use
- * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
+ * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
  *
  * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
- * time. Cases: please see the try_grab_folio() documentation, with
- * "refs=1".
+ * time.
  *
  * Return: 0 for success, or if no action was required (if neither FOLL_PIN
  * nor FOLL_GET was set, nothing is done). A negative error code for failure:
  *
- *   -ENOMEM		FOLL_GET or FOLL_PIN was set, but the page could not
+ *   -ENOMEM		FOLL_GET or FOLL_PIN was set, but the folio could not
  *			be grabbed.
+ *
+ * It is called when we have a stable reference for the folio, typically in
+ * GUP slow path.
  */
-int __must_check try_grab_page(struct page *page, unsigned int flags)
+int __must_check try_grab_folio(struct folio *folio, int refs, unsigned int flags)
 {
-	struct folio *folio = page_folio(page);
+	struct page *page = &folio->page;
 
 	if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
 		return -ENOMEM;
@@ -235,7 +245,7 @@  int __must_check try_grab_page(struct page *page, unsigned int flags)
 		return -EREMOTEIO;
 
 	if (flags & FOLL_GET)
-		folio_ref_inc(folio);
+		folio_ref_add(folio, refs);
 	else if (flags & FOLL_PIN) {
 		/*
 		 * Don't take a pin on the zero page - it's not going anywhere
@@ -245,18 +255,18 @@  int __must_check try_grab_page(struct page *page, unsigned int flags)
 			return 0;
 
 		/*
-		 * Similar to try_grab_folio(): be sure to *also*
-		 * increment the normal page refcount field at least once,
+		 * Increment the normal page refcount field at least once,
 		 * so that the page really is pinned.
 		 */
 		if (folio_test_large(folio)) {
-			folio_ref_add(folio, 1);
-			atomic_add(1, &folio->_pincount);
+			folio_ref_add(folio, refs);
+			atomic_add(refs, &folio->_pincount);
 		} else {
-			folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
+			folio_ref_add(folio,
+					refs * GUP_PIN_COUNTING_BIAS);
 		}
 
-		node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
+		node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
 	}
 
 	return 0;
@@ -584,7 +594,7 @@  static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
  */
 static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz,
 		       unsigned long addr, unsigned long end, unsigned int flags,
-		       struct page **pages, int *nr)
+		       struct page **pages, int *nr, bool fast)
 {
 	unsigned long pte_end;
 	struct page *page;
@@ -607,9 +617,15 @@  static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz
 	page = pte_page(pte);
 	refs = record_subpages(page, sz, addr, end, pages + *nr);
 
-	folio = try_grab_folio(page, refs, flags);
-	if (!folio)
-		return 0;
+	if (fast) {
+		folio = try_grab_folio_fast(page, refs, flags);
+		if (!folio)
+			return 0;
+	} else {
+		folio = page_folio(page);
+		if (try_grab_folio(folio, refs, flags))
+			return 0;
+	}
 
 	if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
 		gup_put_folio(folio, refs, flags);
@@ -637,7 +653,7 @@  static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz
 static int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
 		      unsigned long addr, unsigned int pdshift,
 		      unsigned long end, unsigned int flags,
-		      struct page **pages, int *nr)
+		      struct page **pages, int *nr, bool fast)
 {
 	pte_t *ptep;
 	unsigned long sz = 1UL << hugepd_shift(hugepd);
@@ -647,7 +663,7 @@  static int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
 	ptep = hugepte_offset(hugepd, addr, pdshift);
 	do {
 		next = hugepte_addr_end(addr, end, sz);
-		ret = gup_hugepte(vma, ptep, sz, addr, end, flags, pages, nr);
+		ret = gup_hugepte(vma, ptep, sz, addr, end, flags, pages, nr, fast);
 		if (ret != 1)
 			return ret;
 	} while (ptep++, addr = next, addr != end);
@@ -674,7 +690,7 @@  static struct page *follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
 	ptep = hugepte_offset(hugepd, addr, pdshift);
 	ptl = huge_pte_lock(h, vma->vm_mm, ptep);
 	ret = gup_hugepd(vma, hugepd, addr, pdshift, addr + PAGE_SIZE,
-			 flags, &page, &nr);
+			 flags, &page, &nr, false);
 	spin_unlock(ptl);
 
 	if (ret == 1) {
@@ -691,7 +707,7 @@  static struct page *follow_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
 static inline int gup_hugepd(struct vm_area_struct *vma, hugepd_t hugepd,
 			     unsigned long addr, unsigned int pdshift,
 			     unsigned long end, unsigned int flags,
-			     struct page **pages, int *nr)
+			     struct page **pages, int *nr, bool fast)
 {
 	return 0;
 }
@@ -778,7 +794,7 @@  static struct page *follow_huge_pud(struct vm_area_struct *vma,
 	    gup_must_unshare(vma, flags, page))
 		return ERR_PTR(-EMLINK);
 
-	ret = try_grab_page(page, flags);
+	ret = try_grab_folio(page_folio(page), 1, flags);
 	if (ret)
 		page = ERR_PTR(ret);
 	else
@@ -855,7 +871,7 @@  static struct page *follow_huge_pmd(struct vm_area_struct *vma,
 	VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
 			!PageAnonExclusive(page), page);
 
-	ret = try_grab_page(page, flags);
+	ret = try_grab_folio(page_folio(page), 1, flags);
 	if (ret)
 		return ERR_PTR(ret);
 
@@ -1017,8 +1033,8 @@  static struct page *follow_page_pte(struct vm_area_struct *vma,
 	VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
 		       !PageAnonExclusive(page), page);
 
-	/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
-	ret = try_grab_page(page, flags);
+	/* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
+	ret = try_grab_folio(page_folio(page), 1, flags);
 	if (unlikely(ret)) {
 		page = ERR_PTR(ret);
 		goto out;
@@ -1282,7 +1298,7 @@  static int get_gate_page(struct mm_struct *mm, unsigned long address,
 			goto unmap;
 		*page = pte_page(entry);
 	}
-	ret = try_grab_page(*page, gup_flags);
+	ret = try_grab_folio(page_folio(*page), 1, gup_flags);
 	if (unlikely(ret))
 		goto unmap;
 out:
@@ -1685,20 +1701,19 @@  static long __get_user_pages(struct mm_struct *mm,
 			 * pages.
 			 */
 			if (page_increm > 1) {
-				struct folio *folio;
+				struct folio *folio = page_folio(page);
 
 				/*
 				 * Since we already hold refcount on the
 				 * large folio, this should never fail.
 				 */
-				folio = try_grab_folio(page, page_increm - 1,
-						       foll_flags);
-				if (WARN_ON_ONCE(!folio)) {
+				if (try_grab_folio(folio, page_increm - 1,
+						   foll_flags)) {
 					/*
 					 * Release the 1st page ref if the
 					 * folio is problematic, fail hard.
 					 */
-					gup_put_folio(page_folio(page), 1,
+					gup_put_folio(folio, 1,
 						      foll_flags);
 					ret = -EFAULT;
 					goto out;
@@ -3041,7 +3056,7 @@  static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 		page = pte_page(pte);
 
-		folio = try_grab_folio(page, 1, flags);
+		folio = try_grab_folio_fast(page, 1, flags);
 		if (!folio)
 			goto pte_unmap;
 
@@ -3128,7 +3143,7 @@  static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr,
 			break;
 		}
 
-		folio = try_grab_folio(page, 1, flags);
+		folio = try_grab_folio_fast(page, 1, flags);
 		if (!folio) {
 			gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
 			break;
@@ -3217,7 +3232,7 @@  static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
 	page = pmd_page(orig);
 	refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr);
 
-	folio = try_grab_folio(page, refs, flags);
+	folio = try_grab_folio_fast(page, refs, flags);
 	if (!folio)
 		return 0;
 
@@ -3261,7 +3276,7 @@  static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
 	page = pud_page(orig);
 	refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr);
 
-	folio = try_grab_folio(page, refs, flags);
+	folio = try_grab_folio_fast(page, refs, flags);
 	if (!folio)
 		return 0;
 
@@ -3301,7 +3316,7 @@  static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr,
 	page = pgd_page(orig);
 	refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);
 
-	folio = try_grab_folio(page, refs, flags);
+	folio = try_grab_folio_fast(page, refs, flags);
 	if (!folio)
 		return 0;
 
@@ -3355,7 +3370,7 @@  static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
 			 * pmd format and THP pmd format
 			 */
 			if (gup_hugepd(NULL, __hugepd(pmd_val(pmd)), addr,
-				       PMD_SHIFT, next, flags, pages, nr) != 1)
+				       PMD_SHIFT, next, flags, pages, nr, true) != 1)
 				return 0;
 		} else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags,
 					       pages, nr))
@@ -3385,7 +3400,7 @@  static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
 				return 0;
 		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
 			if (gup_hugepd(NULL, __hugepd(pud_val(pud)), addr,
-				       PUD_SHIFT, next, flags, pages, nr) != 1)
+				       PUD_SHIFT, next, flags, pages, nr, true) != 1)
 				return 0;
 		} else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags,
 					       pages, nr))
@@ -3412,7 +3427,7 @@  static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
 		BUILD_BUG_ON(p4d_leaf(p4d));
 		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
 			if (gup_hugepd(NULL, __hugepd(p4d_val(p4d)), addr,
-				       P4D_SHIFT, next, flags, pages, nr) != 1)
+				       P4D_SHIFT, next, flags, pages, nr, true) != 1)
 				return 0;
 		} else if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags,
 					       pages, nr))
@@ -3441,7 +3456,7 @@  static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
 				return;
 		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
 			if (gup_hugepd(NULL, __hugepd(pgd_val(pgd)), addr,
-				       PGDIR_SHIFT, next, flags, pages, nr) != 1)
+				       PGDIR_SHIFT, next, flags, pages, nr, true) != 1)
 				return;
 		} else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
 					       pages, nr))
@@ -3842,13 +3857,14 @@  long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
 				    next_idx != folio_index(fbatch.folios[i]))
 					continue;
 
-				folio = try_grab_folio(&fbatch.folios[i]->page,
-						       1, FOLL_PIN);
-				if (!folio) {
+				if (try_grab_folio(fbatch.folios[i],
+						       1, FOLL_PIN)) {
 					folio_batch_release(&fbatch);
 					goto err;
 				}
 
+				folio = fbatch.folios[i];
+
 				if (nr_folios == 0)
 					*offset = offset_in_folio(folio, start);
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8e49f402d7c7..b6280a01c5fd 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1331,7 +1331,7 @@  struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
 	if (!*pgmap)
 		return ERR_PTR(-EFAULT);
 	page = pfn_to_page(pfn);
-	ret = try_grab_page(page, flags);
+	ret = try_grab_folio(page_folio(page), 1, flags);
 	if (ret)
 		page = ERR_PTR(ret);
 
diff --git a/mm/internal.h b/mm/internal.h
index 3419c329b3bc..dc358cd51135 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1215,8 +1215,7 @@  int migrate_device_coherent_page(struct page *page);
 /*
  * mm/gup.c
  */
-struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
-int __must_check try_grab_page(struct page *page, unsigned int flags);
+int __must_check try_grab_folio(struct folio *folio, int refs, unsigned int flags);
 
 /*
  * mm/huge_memory.c