diff mbox series

[v2] mm: make folio_pte_batch available outside of mm/memory.c

Message ID 20240227104201.337988-1-21cnbao@gmail.com (mailing list archive)
State New
Headers show
Series [v2] mm: make folio_pte_batch available outside of mm/memory.c | expand

Commit Message

Barry Song Feb. 27, 2024, 10:42 a.m. UTC
From: Barry Song <v-songbaohua@oppo.com>

madvise, mprotect and some others might need folio_pte_batch to check if
a range of PTEs are completely mapped to a large folio with contiguous
physical addresses. Let's make it available in mm/internal.h.

Suggested-by: David Hildenbrand <david@redhat.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
[david@redhat.com: improve the doc for the exported func]
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
---
 -v2:
 * inline folio_pte_batch according to Ryan and David;
 * improve the doc, thanks to David's work on this;
 * fix tags of David and add David's s-o-b;
 -v1:
 https://lore.kernel.org/all/20240227024050.244567-1-21cnbao@gmail.com/

 mm/internal.h | 90 +++++++++++++++++++++++++++++++++++++++++++++++++++
 mm/memory.c   | 76 -------------------------------------------
 2 files changed, 90 insertions(+), 76 deletions(-)

Comments

Ryan Roberts Feb. 27, 2024, 10:48 a.m. UTC | #1
On 27/02/2024 10:42, Barry Song wrote:
> From: Barry Song <v-songbaohua@oppo.com>
> 
> madvise, mprotect and some others might need folio_pte_batch to check if
> a range of PTEs are completely mapped to a large folio with contiguous
> physical addresses. Let's make it available in mm/internal.h.
> 
> Suggested-by: David Hildenbrand <david@redhat.com>
> Cc: Lance Yang <ioworker0@gmail.com>
> Cc: Ryan Roberts <ryan.roberts@arm.com>
> Cc: Yin Fengwei <fengwei.yin@intel.com>
> [david@redhat.com: improve the doc for the exported func]
> Signed-off-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>

Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>

> ---
>  -v2:
>  * inline folio_pte_batch according to Ryan and David;
>  * improve the doc, thanks to David's work on this;
>  * fix tags of David and add David's s-o-b;
>  -v1:
>  https://lore.kernel.org/all/20240227024050.244567-1-21cnbao@gmail.com/
> 
>  mm/internal.h | 90 +++++++++++++++++++++++++++++++++++++++++++++++++++
>  mm/memory.c   | 76 -------------------------------------------
>  2 files changed, 90 insertions(+), 76 deletions(-)
> 
> diff --git a/mm/internal.h b/mm/internal.h
> index 13b59d384845..fa9e2f7db506 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -83,6 +83,96 @@ static inline void *folio_raw_mapping(struct folio *folio)
>  	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
>  }
>  
> +/* Flags for folio_pte_batch(). */
> +typedef int __bitwise fpb_t;
> +
> +/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
> +#define FPB_IGNORE_DIRTY		((__force fpb_t)BIT(0))
> +
> +/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
> +#define FPB_IGNORE_SOFT_DIRTY		((__force fpb_t)BIT(1))
> +
> +static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
> +{
> +	if (flags & FPB_IGNORE_DIRTY)
> +		pte = pte_mkclean(pte);
> +	if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
> +		pte = pte_clear_soft_dirty(pte);
> +	return pte_wrprotect(pte_mkold(pte));
> +}
> +
> +/**
> + * folio_pte_batch - detect a PTE batch for a large folio
> + * @folio: The large folio to detect a PTE batch for.
> + * @addr: The user virtual address the first page is mapped at.
> + * @start_ptep: Page table pointer for the first entry.
> + * @pte: Page table entry for the first page.
> + * @max_nr: The maximum number of table entries to consider.
> + * @flags: Flags to modify the PTE batch semantics.
> + * @any_writable: Optional pointer to indicate whether any entry except the
> + *		  first one is writable.
> + *
> + * Detect a PTE batch: consecutive (present) PTEs that map consecutive
> + * pages of the same large folio.
> + *
> + * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
> + * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
> + * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
> + *
> + * start_ptep must map any page of the folio. max_nr must be at least one and
> + * must be limited by the caller so scanning cannot exceed a single page table.
> + *
> + * Return: the number of table entries in the batch.
> + */
> +static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> +		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
> +		bool *any_writable)
> +{
> +	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
> +	const pte_t *end_ptep = start_ptep + max_nr;
> +	pte_t expected_pte, *ptep;
> +	bool writable;
> +	int nr;
> +
> +	if (any_writable)
> +		*any_writable = false;
> +
> +	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
> +	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
> +	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
> +
> +	nr = pte_batch_hint(start_ptep, pte);
> +	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
> +	ptep = start_ptep + nr;
> +
> +	while (ptep < end_ptep) {
> +		pte = ptep_get(ptep);
> +		if (any_writable)
> +			writable = !!pte_write(pte);
> +		pte = __pte_batch_clear_ignored(pte, flags);
> +
> +		if (!pte_same(pte, expected_pte))
> +			break;
> +
> +		/*
> +		 * Stop immediately once we reached the end of the folio. In
> +		 * corner cases the next PFN might fall into a different
> +		 * folio.
> +		 */
> +		if (pte_pfn(pte) >= folio_end_pfn)
> +			break;
> +
> +		if (any_writable)
> +			*any_writable |= writable;
> +
> +		nr = pte_batch_hint(ptep, pte);
> +		expected_pte = pte_advance_pfn(expected_pte, nr);
> +		ptep += nr;
> +	}
> +
> +	return min(ptep - start_ptep, max_nr);
> +}
> +
>  void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
>  						int nr_throttled);
>  static inline void acct_reclaim_writeback(struct folio *folio)
> diff --git a/mm/memory.c b/mm/memory.c
> index 1c45b6a42a1b..a7bcc39de56b 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -953,82 +953,6 @@ static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
>  	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
>  }
>  
> -/* Flags for folio_pte_batch(). */
> -typedef int __bitwise fpb_t;
> -
> -/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
> -#define FPB_IGNORE_DIRTY		((__force fpb_t)BIT(0))
> -
> -/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
> -#define FPB_IGNORE_SOFT_DIRTY		((__force fpb_t)BIT(1))
> -
> -static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
> -{
> -	if (flags & FPB_IGNORE_DIRTY)
> -		pte = pte_mkclean(pte);
> -	if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
> -		pte = pte_clear_soft_dirty(pte);
> -	return pte_wrprotect(pte_mkold(pte));
> -}
> -
> -/*
> - * Detect a PTE batch: consecutive (present) PTEs that map consecutive
> - * pages of the same folio.
> - *
> - * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
> - * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
> - * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
> - *
> - * If "any_writable" is set, it will indicate if any other PTE besides the
> - * first (given) PTE is writable.
> - */
> -static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
> -		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
> -		bool *any_writable)
> -{
> -	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
> -	const pte_t *end_ptep = start_ptep + max_nr;
> -	pte_t expected_pte, *ptep;
> -	bool writable;
> -	int nr;
> -
> -	if (any_writable)
> -		*any_writable = false;
> -
> -	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
> -
> -	nr = pte_batch_hint(start_ptep, pte);
> -	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
> -	ptep = start_ptep + nr;
> -
> -	while (ptep < end_ptep) {
> -		pte = ptep_get(ptep);
> -		if (any_writable)
> -			writable = !!pte_write(pte);
> -		pte = __pte_batch_clear_ignored(pte, flags);
> -
> -		if (!pte_same(pte, expected_pte))
> -			break;
> -
> -		/*
> -		 * Stop immediately once we reached the end of the folio. In
> -		 * corner cases the next PFN might fall into a different
> -		 * folio.
> -		 */
> -		if (pte_pfn(pte) >= folio_end_pfn)
> -			break;
> -
> -		if (any_writable)
> -			*any_writable |= writable;
> -
> -		nr = pte_batch_hint(ptep, pte);
> -		expected_pte = pte_advance_pfn(expected_pte, nr);
> -		ptep += nr;
> -	}
> -
> -	return min(ptep - start_ptep, max_nr);
> -}
> -
>  /*
>   * Copy one present PTE, trying to batch-process subsequent PTEs that map
>   * consecutive pages of the same folio by copying them as well.
David Hildenbrand Feb. 27, 2024, 3:08 p.m. UTC | #2
On 27.02.24 11:42, Barry Song wrote:
> From: Barry Song <v-songbaohua@oppo.com>
> 
> madvise, mprotect and some others might need folio_pte_batch to check if
> a range of PTEs are completely mapped to a large folio with contiguous
> physical addresses. Let's make it available in mm/internal.h.
> 

Would have added

"
While at it, add proper kernel doc and sanity-check more input 
parameters using two additional VM_WARN_ON_FOLIO().
"

Acked-by: David Hildenbrand <david@redhat.com>
SeongJae Park Feb. 27, 2024, 9:54 p.m. UTC | #3
On Tue, 27 Feb 2024 23:42:01 +1300 Barry Song <21cnbao@gmail.com> wrote:

> From: Barry Song <v-songbaohua@oppo.com>
> 
> madvise, mprotect and some others might need folio_pte_batch to check if
> a range of PTEs are completely mapped to a large folio with contiguous
> physical addresses. Let's make it available in mm/internal.h.

Hi Barry,


I found this patch makes some of my build test that not setting CONFIG_MMU
fails with multiple errors including below:

    In file included from .../mm/nommu.c:43:
    .../mm/internal.h: In function '__pte_batch_clear_ignored':
    .../mm/internal.h:98:23: error: implicit declaration of function 'pte_mkclean'; did you mean 'page_mkclean'? [-Werror=implicit-function-declaration]
       98 |                 pte = pte_mkclean(pte);
          |                       ^~~~~~~~~~~
          |                       page_mkclean

Enabling CONFIG_MMU made the build success.  I haven't had a time to look into
the code yet.  May I ask your opinion?


Thanks,
SJ

[...]
Barry Song Feb. 28, 2024, 12:10 a.m. UTC | #4
On Wed, Feb 28, 2024 at 10:54 AM SeongJae Park <sj@kernel.org> wrote:
>
> On Tue, 27 Feb 2024 23:42:01 +1300 Barry Song <21cnbao@gmail.com> wrote:
>
> > From: Barry Song <v-songbaohua@oppo.com>
> >
> > madvise, mprotect and some others might need folio_pte_batch to check if
> > a range of PTEs are completely mapped to a large folio with contiguous
> > physical addresses. Let's make it available in mm/internal.h.
>
> Hi Barry,
>
>
> I found this patch makes some of my build test that not setting CONFIG_MMU
> fails with multiple errors including below:
>
>     In file included from .../mm/nommu.c:43:
>     .../mm/internal.h: In function '__pte_batch_clear_ignored':
>     .../mm/internal.h:98:23: error: implicit declaration of function 'pte_mkclean'; did you mean 'page_mkclean'? [-Werror=implicit-function-declaration]
>        98 |                 pte = pte_mkclean(pte);
>           |                       ^~~~~~~~~~~
>           |                       page_mkclean
>
> Enabling CONFIG_MMU made the build success.  I haven't had a time to look into
> the code yet.  May I ask your opinion?

i guess it is because we have moved some func from memory.c to
headfile. and this headfile is included by lots of
files in mm.  memory.c, for itself, will only be compiled with enabled
CONFIG_MMU:
mmu-$(CONFIG_MMU)       := highmem.o memory.o mincore.o \
                           mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
                           msync.o page_vma_mapped.o pagewalk.o \
                           pgtable-generic.o rmap.o vmalloc.o

Does the below fix your build?

diff --git a/mm/internal.h b/mm/internal.h
index fa9e2f7db506..4e57680b74e1 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -83,6 +83,8 @@ static inline void *folio_raw_mapping(struct folio *folio)
        return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
 }

+#ifdef CONFIG_MMU
+
 /* Flags for folio_pte_batch(). */
 typedef int __bitwise fpb_t;

@@ -172,6 +174,7 @@ static inline int folio_pte_batch(struct folio
*folio, unsigned long addr,

        return min(ptep - start_ptep, max_nr);
 }
+#endif /* CONFIG_MMU */

 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
                                                int nr_throttled);


> Thanks,
> SJ

Thanks
Barry
SeongJae Park Feb. 28, 2024, 12:15 a.m. UTC | #5
On Wed, 28 Feb 2024 13:10:06 +1300 Barry Song <21cnbao@gmail.com> wrote:

> On Wed, Feb 28, 2024 at 10:54 AM SeongJae Park <sj@kernel.org> wrote:
> >
> > On Tue, 27 Feb 2024 23:42:01 +1300 Barry Song <21cnbao@gmail.com> wrote:
> >
> > > From: Barry Song <v-songbaohua@oppo.com>
> > >
> > > madvise, mprotect and some others might need folio_pte_batch to check if
> > > a range of PTEs are completely mapped to a large folio with contiguous
> > > physical addresses. Let's make it available in mm/internal.h.
> >
> > Hi Barry,
> >
> >
> > I found this patch makes some of my build test that not setting CONFIG_MMU
> > fails with multiple errors including below:
> >
> >     In file included from .../mm/nommu.c:43:
> >     .../mm/internal.h: In function '__pte_batch_clear_ignored':
> >     .../mm/internal.h:98:23: error: implicit declaration of function 'pte_mkclean'; did you mean 'page_mkclean'? [-Werror=implicit-function-declaration]
> >        98 |                 pte = pte_mkclean(pte);
> >           |                       ^~~~~~~~~~~
> >           |                       page_mkclean
> >
> > Enabling CONFIG_MMU made the build success.  I haven't had a time to look into
> > the code yet.  May I ask your opinion?
> 
> i guess it is because we have moved some func from memory.c to
> headfile. and this headfile is included by lots of
> files in mm.  memory.c, for itself, will only be compiled with enabled
> CONFIG_MMU:
> mmu-$(CONFIG_MMU)       := highmem.o memory.o mincore.o \
>                            mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
>                            msync.o page_vma_mapped.o pagewalk.o \
>                            pgtable-generic.o rmap.o vmalloc.o
> 
> Does the below fix your build?
> 
> diff --git a/mm/internal.h b/mm/internal.h
> index fa9e2f7db506..4e57680b74e1 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -83,6 +83,8 @@ static inline void *folio_raw_mapping(struct folio *folio)
>         return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
>  }
> 
> +#ifdef CONFIG_MMU
> +
>  /* Flags for folio_pte_batch(). */
>  typedef int __bitwise fpb_t;
> 
> @@ -172,6 +174,7 @@ static inline int folio_pte_batch(struct folio
> *folio, unsigned long addr,
> 
>         return min(ptep - start_ptep, max_nr);
>  }
> +#endif /* CONFIG_MMU */
> 
>  void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
>                                                 int nr_throttled);

Yes, applying the above diff fixed mine :)


Thanks,
SJ

[...]
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index 13b59d384845..fa9e2f7db506 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -83,6 +83,96 @@  static inline void *folio_raw_mapping(struct folio *folio)
 	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
 }
 
+/* Flags for folio_pte_batch(). */
+typedef int __bitwise fpb_t;
+
+/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
+#define FPB_IGNORE_DIRTY		((__force fpb_t)BIT(0))
+
+/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
+#define FPB_IGNORE_SOFT_DIRTY		((__force fpb_t)BIT(1))
+
+static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
+{
+	if (flags & FPB_IGNORE_DIRTY)
+		pte = pte_mkclean(pte);
+	if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
+		pte = pte_clear_soft_dirty(pte);
+	return pte_wrprotect(pte_mkold(pte));
+}
+
+/**
+ * folio_pte_batch - detect a PTE batch for a large folio
+ * @folio: The large folio to detect a PTE batch for.
+ * @addr: The user virtual address the first page is mapped at.
+ * @start_ptep: Page table pointer for the first entry.
+ * @pte: Page table entry for the first page.
+ * @max_nr: The maximum number of table entries to consider.
+ * @flags: Flags to modify the PTE batch semantics.
+ * @any_writable: Optional pointer to indicate whether any entry except the
+ *		  first one is writable.
+ *
+ * Detect a PTE batch: consecutive (present) PTEs that map consecutive
+ * pages of the same large folio.
+ *
+ * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
+ * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
+ * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
+ *
+ * start_ptep must map any page of the folio. max_nr must be at least one and
+ * must be limited by the caller so scanning cannot exceed a single page table.
+ *
+ * Return: the number of table entries in the batch.
+ */
+static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
+		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
+		bool *any_writable)
+{
+	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
+	const pte_t *end_ptep = start_ptep + max_nr;
+	pte_t expected_pte, *ptep;
+	bool writable;
+	int nr;
+
+	if (any_writable)
+		*any_writable = false;
+
+	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
+	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
+	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
+
+	nr = pte_batch_hint(start_ptep, pte);
+	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
+	ptep = start_ptep + nr;
+
+	while (ptep < end_ptep) {
+		pte = ptep_get(ptep);
+		if (any_writable)
+			writable = !!pte_write(pte);
+		pte = __pte_batch_clear_ignored(pte, flags);
+
+		if (!pte_same(pte, expected_pte))
+			break;
+
+		/*
+		 * Stop immediately once we reached the end of the folio. In
+		 * corner cases the next PFN might fall into a different
+		 * folio.
+		 */
+		if (pte_pfn(pte) >= folio_end_pfn)
+			break;
+
+		if (any_writable)
+			*any_writable |= writable;
+
+		nr = pte_batch_hint(ptep, pte);
+		expected_pte = pte_advance_pfn(expected_pte, nr);
+		ptep += nr;
+	}
+
+	return min(ptep - start_ptep, max_nr);
+}
+
 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
 						int nr_throttled);
 static inline void acct_reclaim_writeback(struct folio *folio)
diff --git a/mm/memory.c b/mm/memory.c
index 1c45b6a42a1b..a7bcc39de56b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -953,82 +953,6 @@  static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma,
 	set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr);
 }
 
-/* Flags for folio_pte_batch(). */
-typedef int __bitwise fpb_t;
-
-/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
-#define FPB_IGNORE_DIRTY		((__force fpb_t)BIT(0))
-
-/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
-#define FPB_IGNORE_SOFT_DIRTY		((__force fpb_t)BIT(1))
-
-static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
-{
-	if (flags & FPB_IGNORE_DIRTY)
-		pte = pte_mkclean(pte);
-	if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
-		pte = pte_clear_soft_dirty(pte);
-	return pte_wrprotect(pte_mkold(pte));
-}
-
-/*
- * Detect a PTE batch: consecutive (present) PTEs that map consecutive
- * pages of the same folio.
- *
- * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
- * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
- * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
- *
- * If "any_writable" is set, it will indicate if any other PTE besides the
- * first (given) PTE is writable.
- */
-static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
-		pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
-		bool *any_writable)
-{
-	unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
-	const pte_t *end_ptep = start_ptep + max_nr;
-	pte_t expected_pte, *ptep;
-	bool writable;
-	int nr;
-
-	if (any_writable)
-		*any_writable = false;
-
-	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
-
-	nr = pte_batch_hint(start_ptep, pte);
-	expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
-	ptep = start_ptep + nr;
-
-	while (ptep < end_ptep) {
-		pte = ptep_get(ptep);
-		if (any_writable)
-			writable = !!pte_write(pte);
-		pte = __pte_batch_clear_ignored(pte, flags);
-
-		if (!pte_same(pte, expected_pte))
-			break;
-
-		/*
-		 * Stop immediately once we reached the end of the folio. In
-		 * corner cases the next PFN might fall into a different
-		 * folio.
-		 */
-		if (pte_pfn(pte) >= folio_end_pfn)
-			break;
-
-		if (any_writable)
-			*any_writable |= writable;
-
-		nr = pte_batch_hint(ptep, pte);
-		expected_pte = pte_advance_pfn(expected_pte, nr);
-		ptep += nr;
-	}
-
-	return min(ptep - start_ptep, max_nr);
-}
-
 /*
  * Copy one present PTE, trying to batch-process subsequent PTEs that map
  * consecutive pages of the same folio by copying them as well.