diff mbox series

[v2,4/7] mm/gup: remove gup_fast_pgd_leaf() and clean up the relevant codes

Message ID 20250331081327.256412-5-bhe@redhat.com (mailing list archive)
State New
Headers show
Series mm/gup: Minor fix, cleanup and improvements | expand

Commit Message

Baoquan He March 31, 2025, 8:13 a.m. UTC
In the current kernel, only pud huge page is supported in some
architectures. P4d and pgd huge pages haven't been supported yet.
And in mm/gup.c, there's no pgd huge page handling in the
follow_page_mask() code path. Hence it doesn't make sense to have
gup_fast_pgd_leaf() in gup_fast code path.

Here remove gup_fast_pgd_leaf() and clean up the relevant codes.

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 mm/gup.c | 49 +++----------------------------------------------
 1 file changed, 3 insertions(+), 46 deletions(-)

Comments

David Hildenbrand April 1, 2025, 8:19 a.m. UTC | #1
On 31.03.25 10:13, Baoquan He wrote:
> In the current kernel, only pud huge page is supported in some
> architectures. P4d and pgd huge pages haven't been supported yet.
> And in mm/gup.c, there's no pgd huge page handling in the
> follow_page_mask() code path. Hence it doesn't make sense to have
> gup_fast_pgd_leaf() in gup_fast code path.

I wonder if that was in place to handle (prepare for) very large hugetlb 
folios. Until a while ago, follow_page_mask() did not have to handle 
these hugetlb folios.

But I assume it never got used.

Acked-by: David Hildenbrand <david@redhat.com>

> 
> Here remove gup_fast_pgd_leaf() and clean up the relevant codes.
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>
> ---
>   mm/gup.c | 49 +++----------------------------------------------
>   1 file changed, 3 insertions(+), 46 deletions(-)
> 
> diff --git a/mm/gup.c b/mm/gup.c
> index a15317cf6641..58cdc5605a4a 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -3168,46 +3168,6 @@ static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
>   	return 1;
>   }
>   
> -static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr,
> -		unsigned long end, unsigned int flags, struct page **pages,
> -		int *nr)
> -{
> -	int refs;
> -	struct page *page;
> -	struct folio *folio;
> -
> -	if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
> -		return 0;
> -
> -	BUILD_BUG_ON(pgd_devmap(orig));
> -
> -	page = pgd_page(orig);
> -	refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);
> -
> -	folio = try_grab_folio_fast(page, refs, flags);
> -	if (!folio)
> -		return 0;
> -
> -	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
> -		gup_put_folio(folio, refs, flags);
> -		return 0;
> -	}
> -
> -	if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
> -		gup_put_folio(folio, refs, flags);
> -		return 0;
> -	}
> -
> -	if (!gup_fast_folio_allowed(folio, flags)) {
> -		gup_put_folio(folio, refs, flags);
> -		return 0;
> -	}
> -
> -	*nr += refs;
> -	folio_set_referenced(folio);
> -	return 1;
> -}
> -
>   static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
>   		unsigned long end, unsigned int flags, struct page **pages,
>   		int *nr)
> @@ -3302,12 +3262,9 @@ static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
>   		next = pgd_addr_end(addr, end);
>   		if (pgd_none(pgd))
>   			return;
> -		if (unlikely(pgd_leaf(pgd))) {
> -			if (!gup_fast_pgd_leaf(pgd, pgdp, addr, next, flags,
> -					       pages, nr))
> -				return;
> -		} else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
> -					       pages, nr))
> +		BUILD_BUG_ON(pgd_leaf(pgd));
> +		if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
> +					pages, nr))
>   			return;
>   	} while (pgdp++, addr = next, addr != end);
>   }
Oscar Salvador April 1, 2025, 2:11 p.m. UTC | #2
On Mon, Mar 31, 2025 at 04:13:24PM +0800, Baoquan He wrote:
> In the current kernel, only pud huge page is supported in some
> architectures. P4d and pgd huge pages haven't been supported yet.
> And in mm/gup.c, there's no pgd huge page handling in the
> follow_page_mask() code path. Hence it doesn't make sense to have
> gup_fast_pgd_leaf() in gup_fast code path.
> 
> Here remove gup_fast_pgd_leaf() and clean up the relevant codes.
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>

Reviewed-by: Oscar Salvador <osalvador@suse.de>
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index a15317cf6641..58cdc5605a4a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -3168,46 +3168,6 @@  static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
 	return 1;
 }
 
-static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr,
-		unsigned long end, unsigned int flags, struct page **pages,
-		int *nr)
-{
-	int refs;
-	struct page *page;
-	struct folio *folio;
-
-	if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
-		return 0;
-
-	BUILD_BUG_ON(pgd_devmap(orig));
-
-	page = pgd_page(orig);
-	refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);
-
-	folio = try_grab_folio_fast(page, refs, flags);
-	if (!folio)
-		return 0;
-
-	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
-		gup_put_folio(folio, refs, flags);
-		return 0;
-	}
-
-	if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
-		gup_put_folio(folio, refs, flags);
-		return 0;
-	}
-
-	if (!gup_fast_folio_allowed(folio, flags)) {
-		gup_put_folio(folio, refs, flags);
-		return 0;
-	}
-
-	*nr += refs;
-	folio_set_referenced(folio);
-	return 1;
-}
-
 static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
 		unsigned long end, unsigned int flags, struct page **pages,
 		int *nr)
@@ -3302,12 +3262,9 @@  static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
 		next = pgd_addr_end(addr, end);
 		if (pgd_none(pgd))
 			return;
-		if (unlikely(pgd_leaf(pgd))) {
-			if (!gup_fast_pgd_leaf(pgd, pgdp, addr, next, flags,
-					       pages, nr))
-				return;
-		} else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
-					       pages, nr))
+		BUILD_BUG_ON(pgd_leaf(pgd));
+		if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
+					pages, nr))
 			return;
 	} while (pgdp++, addr = next, addr != end);
 }