diff mbox series

[v1,11/11] mm/gup: grab head page refcount once for group of subpages

Message ID 20210325230938.30752-12-joao.m.martins@oracle.com (mailing list archive)
State New, archived
Headers show
Series mm, sparse-vmemmap: Introduce compound pagemaps | expand

Commit Message

Joao Martins March 25, 2021, 11:09 p.m. UTC
Much like hugetlbfs or THPs, treat device pagemaps with
compound pages like the rest of GUP handling of compound pages.

Rather than incrementing the refcount every 4K, we record
all sub pages and increment by @refs amount *once*.

Performance measured by gup_benchmark improves considerably
get_user_pages_fast() and pin_user_pages_fast() with NVDIMMs:

 $ gup_test -f /dev/dax1.0 -m 16384 -r 10 -S [-u,-a] -n 512 -w
(get_user_pages_fast 2M pages) ~59 ms -> ~6.1 ms
(pin_user_pages_fast 2M pages) ~87 ms -> ~6.2 ms
[altmap]
(get_user_pages_fast 2M pages) ~494 ms -> ~9 ms
(pin_user_pages_fast 2M pages) ~494 ms -> ~10 ms

 $ gup_test -f /dev/dax1.0 -m 129022 -r 10 -S [-u,-a] -n 512 -w
(get_user_pages_fast 2M pages) ~492 ms -> ~49 ms
(pin_user_pages_fast 2M pages) ~493 ms -> ~50 ms
[altmap with -m 127004]
(get_user_pages_fast 2M pages) ~3.91 sec -> ~70 ms
(pin_user_pages_fast 2M pages) ~3.97 sec -> ~74 ms

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
 mm/gup.c | 52 ++++++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 20 deletions(-)

Comments

Dan Williams June 2, 2021, 1:05 a.m. UTC | #1
On Thu, Mar 25, 2021 at 4:10 PM Joao Martins <joao.m.martins@oracle.com> wrote:
>
> Much like hugetlbfs or THPs, treat device pagemaps with
> compound pages like the rest of GUP handling of compound pages.
>

How about:

"Use try_grab_compound_head() for device-dax GUP when configured with
a compound pagemap."

> Rather than incrementing the refcount every 4K, we record
> all sub pages and increment by @refs amount *once*.

"Rather than incrementing the refcount for each page, do one atomic
addition for all the pages to be pinned."

>
> Performance measured by gup_benchmark improves considerably
> get_user_pages_fast() and pin_user_pages_fast() with NVDIMMs:
>
>  $ gup_test -f /dev/dax1.0 -m 16384 -r 10 -S [-u,-a] -n 512 -w
> (get_user_pages_fast 2M pages) ~59 ms -> ~6.1 ms
> (pin_user_pages_fast 2M pages) ~87 ms -> ~6.2 ms
> [altmap]
> (get_user_pages_fast 2M pages) ~494 ms -> ~9 ms
> (pin_user_pages_fast 2M pages) ~494 ms -> ~10 ms

Hmm what is altmap representing here? The altmap case does not support
compound geometry, so this last test is comparing pinning this amount
of memory without compound pages where the memmap is in PMEM to the
speed *with* compound pages and the memmap in DRAM?

>
>  $ gup_test -f /dev/dax1.0 -m 129022 -r 10 -S [-u,-a] -n 512 -w
> (get_user_pages_fast 2M pages) ~492 ms -> ~49 ms
> (pin_user_pages_fast 2M pages) ~493 ms -> ~50 ms
> [altmap with -m 127004]
> (get_user_pages_fast 2M pages) ~3.91 sec -> ~70 ms
> (pin_user_pages_fast 2M pages) ~3.97 sec -> ~74 ms
>
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> ---
>  mm/gup.c | 52 ++++++++++++++++++++++++++++++++--------------------
>  1 file changed, 32 insertions(+), 20 deletions(-)
>
> diff --git a/mm/gup.c b/mm/gup.c
> index b3e647c8b7ee..514f12157a0f 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -2159,31 +2159,54 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
>  }
>  #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
>
> +
> +static int record_subpages(struct page *page, unsigned long addr,
> +                          unsigned long end, struct page **pages)
> +{
> +       int nr;
> +
> +       for (nr = 0; addr != end; addr += PAGE_SIZE)
> +               pages[nr++] = page++;
> +
> +       return nr;
> +}
> +
>  #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
>  static int __gup_device_huge(unsigned long pfn, unsigned long addr,
>                              unsigned long end, unsigned int flags,
>                              struct page **pages, int *nr)
>  {
> -       int nr_start = *nr;
> +       int refs, nr_start = *nr;
>         struct dev_pagemap *pgmap = NULL;
>
>         do {
> -               struct page *page = pfn_to_page(pfn);
> +               struct page *head, *page = pfn_to_page(pfn);
> +               unsigned long next;
>
>                 pgmap = get_dev_pagemap(pfn, pgmap);
>                 if (unlikely(!pgmap)) {
>                         undo_dev_pagemap(nr, nr_start, flags, pages);
>                         return 0;
>                 }
> -               SetPageReferenced(page);
> -               pages[*nr] = page;
> -               if (unlikely(!try_grab_page(page, flags))) {
> -                       undo_dev_pagemap(nr, nr_start, flags, pages);
> +
> +               head = compound_head(page);
> +               next = PageCompound(head) ? end : addr + PAGE_SIZE;

This looks a tad messy, and makes assumptions that upper layers are
not sending this routine multiple huge pages to map. next should be
set to the next compound page, not end.

> +               refs = record_subpages(page, addr, next, pages + *nr);
> +
> +               SetPageReferenced(head);
> +               head = try_grab_compound_head(head, refs, flags);
> +               if (!head) {
> +                       if (PageCompound(head)) {

@head is NULL here, I think you wanted to rename the result of
try_grab_compound_head() to something like pinned_head so that you
don't undo the work you did above. However I feel like there's one too
PageCompund() checks.


> +                               ClearPageReferenced(head);
> +                               put_dev_pagemap(pgmap);
> +                       } else {
> +                               undo_dev_pagemap(nr, nr_start, flags, pages);
> +                       }
>                         return 0;
>                 }
> -               (*nr)++;
> -               pfn++;
> -       } while (addr += PAGE_SIZE, addr != end);
> +               *nr += refs;
> +               pfn += refs;
> +       } while (addr += (refs << PAGE_SHIFT), addr != end);
>
>         if (pgmap)
>                 put_dev_pagemap(pgmap);
> @@ -2243,17 +2266,6 @@ static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
>  }
>  #endif
>
> -static int record_subpages(struct page *page, unsigned long addr,
> -                          unsigned long end, struct page **pages)
> -{
> -       int nr;
> -
> -       for (nr = 0; addr != end; addr += PAGE_SIZE)
> -               pages[nr++] = page++;
> -
> -       return nr;
> -}
> -
>  #ifdef CONFIG_ARCH_HAS_HUGEPD
>  static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
>                                       unsigned long sz)
> --
> 2.17.1
>
Joao Martins June 7, 2021, 3:21 p.m. UTC | #2
On 6/2/21 2:05 AM, Dan Williams wrote:
> On Thu, Mar 25, 2021 at 4:10 PM Joao Martins <joao.m.martins@oracle.com> wrote:
>>
>> Much like hugetlbfs or THPs, treat device pagemaps with
>> compound pages like the rest of GUP handling of compound pages.
>>
> 
> How about:
> 
> "Use try_grab_compound_head() for device-dax GUP when configured with
> a compound pagemap."
> 
Yeap, a bit clearer indeed.

>> Rather than incrementing the refcount every 4K, we record
>> all sub pages and increment by @refs amount *once*.
> 
> "Rather than incrementing the refcount for each page, do one atomic
> addition for all the pages to be pinned."
> 
ACK.

>>
>> Performance measured by gup_benchmark improves considerably
>> get_user_pages_fast() and pin_user_pages_fast() with NVDIMMs:
>>
>>  $ gup_test -f /dev/dax1.0 -m 16384 -r 10 -S [-u,-a] -n 512 -w
>> (get_user_pages_fast 2M pages) ~59 ms -> ~6.1 ms
>> (pin_user_pages_fast 2M pages) ~87 ms -> ~6.2 ms
>> [altmap]
>> (get_user_pages_fast 2M pages) ~494 ms -> ~9 ms
>> (pin_user_pages_fast 2M pages) ~494 ms -> ~10 ms
> 
> Hmm what is altmap representing here? The altmap case does not support
> compound geometry, 

It does support compound geometry and so we use compound pages with altmap case.
What altmap doesn't support is the memory savings in the vmemmap that can be
done when using compound pages. That's what is represented here.

> so this last test is comparing pinning this amount
> of memory without compound pages where the memmap is in PMEM to the
> speed *with* compound pages and the memmap in DRAM?
> 
The test compares pinning this amount of memory with compound pages placed
in PMEM and in DRAM. It just exposes just how ineficient this can get if huge pages aren't
represented with compound pages.

>>
>>  $ gup_test -f /dev/dax1.0 -m 129022 -r 10 -S [-u,-a] -n 512 -w
>> (get_user_pages_fast 2M pages) ~492 ms -> ~49 ms
>> (pin_user_pages_fast 2M pages) ~493 ms -> ~50 ms
>> [altmap with -m 127004]
>> (get_user_pages_fast 2M pages) ~3.91 sec -> ~70 ms
>> (pin_user_pages_fast 2M pages) ~3.97 sec -> ~74 ms
>>
>> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
>> ---
>>  mm/gup.c | 52 ++++++++++++++++++++++++++++++++--------------------
>>  1 file changed, 32 insertions(+), 20 deletions(-)
>>
>> diff --git a/mm/gup.c b/mm/gup.c
>> index b3e647c8b7ee..514f12157a0f 100644
>> --- a/mm/gup.c
>> +++ b/mm/gup.c
>> @@ -2159,31 +2159,54 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
>>  }
>>  #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
>>
>> +
>> +static int record_subpages(struct page *page, unsigned long addr,
>> +                          unsigned long end, struct page **pages)
>> +{
>> +       int nr;
>> +
>> +       for (nr = 0; addr != end; addr += PAGE_SIZE)
>> +               pages[nr++] = page++;
>> +
>> +       return nr;
>> +}
>> +
>>  #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
>>  static int __gup_device_huge(unsigned long pfn, unsigned long addr,
>>                              unsigned long end, unsigned int flags,
>>                              struct page **pages, int *nr)
>>  {
>> -       int nr_start = *nr;
>> +       int refs, nr_start = *nr;
>>         struct dev_pagemap *pgmap = NULL;
>>
>>         do {
>> -               struct page *page = pfn_to_page(pfn);
>> +               struct page *head, *page = pfn_to_page(pfn);
>> +               unsigned long next;
>>
>>                 pgmap = get_dev_pagemap(pfn, pgmap);
>>                 if (unlikely(!pgmap)) {
>>                         undo_dev_pagemap(nr, nr_start, flags, pages);
>>                         return 0;
>>                 }
>> -               SetPageReferenced(page);
>> -               pages[*nr] = page;
>> -               if (unlikely(!try_grab_page(page, flags))) {
>> -                       undo_dev_pagemap(nr, nr_start, flags, pages);
>> +
>> +               head = compound_head(page);
>> +               next = PageCompound(head) ? end : addr + PAGE_SIZE;
> 
> This looks a tad messy, and makes assumptions that upper layers are
> not sending this routine multiple huge pages to map. next should be
> set to the next compound page, not end.

Although for devmap (and same could be said for hugetlbfs), __gup_device_huge() (as called
by __gup_device_huge_{pud,pmd}) would only ever be called on a compound page which
represents the same level, as opposed to many compound pages i.e. @end already represents
the next compound page of the PMD or PUD level.

But of course, should we represent devmap pages in geometries other than the values of
hpagesize/align other than PMD or PUD size then it's true that relying on @end value being
next compound page is fragile. But so as the rest of the surrounding code.

> 
>> +               refs = record_subpages(page, addr, next, pages + *nr);
>> +
>> +               SetPageReferenced(head);
>> +               head = try_grab_compound_head(head, refs, flags);
>> +               if (!head) {
>> +                       if (PageCompound(head)) {
> 
> @head is NULL here, I think you wanted to rename the result of
> try_grab_compound_head() to something like pinned_head so that you
> don't undo the work you did above. 

Yes. pinned_head is what I actually should have written. Let me fix that.

> However I feel like there's one too
> PageCompund() checks.
> 

I agree, but I am not fully sure how I can remove them :(

The previous approach was to separate the logic into two distinct helpers namely
__gup_device_huge() and __gup_device_compound_huge(). But that sort of special casing
wasn't a good idea, so I tried merging both cases in __gup_device_huge() solely
differentiating on PageCompound().

I could make this slightly less bad by moving the error case PageCompound checks to
undo_dev_pagemap() and record_subpages().

But we still have the pagemap refcount to be taken until your other series removes the
need for it. So perhaps I should place the remaining PageCompound based check inside
record_subpages to accomodate the PAGE_SIZE geometry case (similarly hinted by Jason in
the previous version but that I didn't fully address).

How does the above sound?

longterm once we stop having devmap use non compound struct pages on PMDs/PUDs and the
pgmap refcount on gup is removed then perhaps we can move to existing regular huge page
path that is not devmap specific.

> 
>> +                               ClearPageReferenced(head);
>> +                               put_dev_pagemap(pgmap);
>> +                       } else {
>> +                               undo_dev_pagemap(nr, nr_start, flags, pages);
>> +                       }
>>                         return 0;
>>                 }
>> -               (*nr)++;
>> -               pfn++;
>> -       } while (addr += PAGE_SIZE, addr != end);
>> +               *nr += refs;
>> +               pfn += refs;
>> +       } while (addr += (refs << PAGE_SHIFT), addr != end);
>>
>>         if (pgmap)
>>                 put_dev_pagemap(pgmap);
>> @@ -2243,17 +2266,6 @@ static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
>>  }
>>  #endif
>>
>> -static int record_subpages(struct page *page, unsigned long addr,
>> -                          unsigned long end, struct page **pages)
>> -{
>> -       int nr;
>> -
>> -       for (nr = 0; addr != end; addr += PAGE_SIZE)
>> -               pages[nr++] = page++;
>> -
>> -       return nr;
>> -}
>> -
>>  #ifdef CONFIG_ARCH_HAS_HUGEPD
>>  static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
>>                                       unsigned long sz)
>> --
>> 2.17.1
>>
Dan Williams June 7, 2021, 7:22 p.m. UTC | #3
On Mon, Jun 7, 2021 at 8:22 AM Joao Martins <joao.m.martins@oracle.com> wrote:
>
> On 6/2/21 2:05 AM, Dan Williams wrote:
> > On Thu, Mar 25, 2021 at 4:10 PM Joao Martins <joao.m.martins@oracle.com> wrote:
> >>
> >> Much like hugetlbfs or THPs, treat device pagemaps with
> >> compound pages like the rest of GUP handling of compound pages.
> >>
> >
> > How about:
> >
> > "Use try_grab_compound_head() for device-dax GUP when configured with
> > a compound pagemap."
> >
> Yeap, a bit clearer indeed.
>
> >> Rather than incrementing the refcount every 4K, we record
> >> all sub pages and increment by @refs amount *once*.
> >
> > "Rather than incrementing the refcount for each page, do one atomic
> > addition for all the pages to be pinned."
> >
> ACK.
>
> >>
> >> Performance measured by gup_benchmark improves considerably
> >> get_user_pages_fast() and pin_user_pages_fast() with NVDIMMs:
> >>
> >>  $ gup_test -f /dev/dax1.0 -m 16384 -r 10 -S [-u,-a] -n 512 -w
> >> (get_user_pages_fast 2M pages) ~59 ms -> ~6.1 ms
> >> (pin_user_pages_fast 2M pages) ~87 ms -> ~6.2 ms
> >> [altmap]
> >> (get_user_pages_fast 2M pages) ~494 ms -> ~9 ms
> >> (pin_user_pages_fast 2M pages) ~494 ms -> ~10 ms
> >
> > Hmm what is altmap representing here? The altmap case does not support
> > compound geometry,
>
> It does support compound geometry and so we use compound pages with altmap case.
> What altmap doesn't support is the memory savings in the vmemmap that can be
> done when using compound pages. That's what is represented here.

Ah, I missed that detail, might be good to mention this in the
Documentation/vm/ overview doc for this capability.

>
> > so this last test is comparing pinning this amount
> > of memory without compound pages where the memmap is in PMEM to the
> > speed *with* compound pages and the memmap in DRAM?
> >
> The test compares pinning this amount of memory with compound pages placed
> in PMEM and in DRAM. It just exposes just how ineficient this can get if huge pages aren't
> represented with compound pages.

Got it.

>
> >>
> >>  $ gup_test -f /dev/dax1.0 -m 129022 -r 10 -S [-u,-a] -n 512 -w
> >> (get_user_pages_fast 2M pages) ~492 ms -> ~49 ms
> >> (pin_user_pages_fast 2M pages) ~493 ms -> ~50 ms
> >> [altmap with -m 127004]
> >> (get_user_pages_fast 2M pages) ~3.91 sec -> ~70 ms
> >> (pin_user_pages_fast 2M pages) ~3.97 sec -> ~74 ms
> >>
> >> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> >> ---
> >>  mm/gup.c | 52 ++++++++++++++++++++++++++++++++--------------------
> >>  1 file changed, 32 insertions(+), 20 deletions(-)
> >>
> >> diff --git a/mm/gup.c b/mm/gup.c
> >> index b3e647c8b7ee..514f12157a0f 100644
> >> --- a/mm/gup.c
> >> +++ b/mm/gup.c
> >> @@ -2159,31 +2159,54 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
> >>  }
> >>  #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
> >>
> >> +
> >> +static int record_subpages(struct page *page, unsigned long addr,
> >> +                          unsigned long end, struct page **pages)
> >> +{
> >> +       int nr;
> >> +
> >> +       for (nr = 0; addr != end; addr += PAGE_SIZE)
> >> +               pages[nr++] = page++;
> >> +
> >> +       return nr;
> >> +}
> >> +
> >>  #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
> >>  static int __gup_device_huge(unsigned long pfn, unsigned long addr,
> >>                              unsigned long end, unsigned int flags,
> >>                              struct page **pages, int *nr)
> >>  {
> >> -       int nr_start = *nr;
> >> +       int refs, nr_start = *nr;
> >>         struct dev_pagemap *pgmap = NULL;
> >>
> >>         do {
> >> -               struct page *page = pfn_to_page(pfn);
> >> +               struct page *head, *page = pfn_to_page(pfn);
> >> +               unsigned long next;
> >>
> >>                 pgmap = get_dev_pagemap(pfn, pgmap);
> >>                 if (unlikely(!pgmap)) {
> >>                         undo_dev_pagemap(nr, nr_start, flags, pages);
> >>                         return 0;
> >>                 }
> >> -               SetPageReferenced(page);
> >> -               pages[*nr] = page;
> >> -               if (unlikely(!try_grab_page(page, flags))) {
> >> -                       undo_dev_pagemap(nr, nr_start, flags, pages);
> >> +
> >> +               head = compound_head(page);
> >> +               next = PageCompound(head) ? end : addr + PAGE_SIZE;
> >
> > This looks a tad messy, and makes assumptions that upper layers are
> > not sending this routine multiple huge pages to map. next should be
> > set to the next compound page, not end.
>
> Although for devmap (and same could be said for hugetlbfs), __gup_device_huge() (as called
> by __gup_device_huge_{pud,pmd}) would only ever be called on a compound page which
> represents the same level, as opposed to many compound pages i.e. @end already represents
> the next compound page of the PMD or PUD level.
>
> But of course, should we represent devmap pages in geometries other than the values of
> hpagesize/align other than PMD or PUD size then it's true that relying on @end value being
> next compound page is fragile. But so as the rest of the surrounding code.

Ok, for now maybe a:

/* @end is assumed to be limited at most 1 compound page */

...would remind whoever refactors this later about the assumption.

>
> >
> >> +               refs = record_subpages(page, addr, next, pages + *nr);
> >> +
> >> +               SetPageReferenced(head);
> >> +               head = try_grab_compound_head(head, refs, flags);
> >> +               if (!head) {
> >> +                       if (PageCompound(head)) {
> >
> > @head is NULL here, I think you wanted to rename the result of
> > try_grab_compound_head() to something like pinned_head so that you
> > don't undo the work you did above.
>
> Yes. pinned_head is what I actually should have written. Let me fix that.
>
> > However I feel like there's one too
> > PageCompund() checks.
> >
>
> I agree, but I am not fully sure how I can remove them :(

If you fix the bug above that's sufficient for me, I may be wishing
for something more pretty but is not possible in practice...

>
> The previous approach was to separate the logic into two distinct helpers namely
> __gup_device_huge() and __gup_device_compound_huge(). But that sort of special casing
> wasn't a good idea, so I tried merging both cases in __gup_device_huge() solely
> differentiating on PageCompound().
>
> I could make this slightly less bad by moving the error case PageCompound checks to
> undo_dev_pagemap() and record_subpages().
>
> But we still have the pagemap refcount to be taken until your other series removes the
> need for it. So perhaps I should place the remaining PageCompound based check inside
> record_subpages to accomodate the PAGE_SIZE geometry case (similarly hinted by Jason in
> the previous version but that I didn't fully address).
>
> How does the above sound?

Sounds worth a try, but not a hard requirement for this to move
forward from my perspective.

>
> longterm once we stop having devmap use non compound struct pages on PMDs/PUDs and the
> pgmap refcount on gup is removed then perhaps we can move to existing regular huge page
> path that is not devmap specific.
>

Ok.
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index b3e647c8b7ee..514f12157a0f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2159,31 +2159,54 @@  static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 }
 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
 
+
+static int record_subpages(struct page *page, unsigned long addr,
+			   unsigned long end, struct page **pages)
+{
+	int nr;
+
+	for (nr = 0; addr != end; addr += PAGE_SIZE)
+		pages[nr++] = page++;
+
+	return nr;
+}
+
 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
 static int __gup_device_huge(unsigned long pfn, unsigned long addr,
 			     unsigned long end, unsigned int flags,
 			     struct page **pages, int *nr)
 {
-	int nr_start = *nr;
+	int refs, nr_start = *nr;
 	struct dev_pagemap *pgmap = NULL;
 
 	do {
-		struct page *page = pfn_to_page(pfn);
+		struct page *head, *page = pfn_to_page(pfn);
+		unsigned long next;
 
 		pgmap = get_dev_pagemap(pfn, pgmap);
 		if (unlikely(!pgmap)) {
 			undo_dev_pagemap(nr, nr_start, flags, pages);
 			return 0;
 		}
-		SetPageReferenced(page);
-		pages[*nr] = page;
-		if (unlikely(!try_grab_page(page, flags))) {
-			undo_dev_pagemap(nr, nr_start, flags, pages);
+
+		head = compound_head(page);
+		next = PageCompound(head) ? end : addr + PAGE_SIZE;
+		refs = record_subpages(page, addr, next, pages + *nr);
+
+		SetPageReferenced(head);
+		head = try_grab_compound_head(head, refs, flags);
+		if (!head) {
+			if (PageCompound(head)) {
+				ClearPageReferenced(head);
+				put_dev_pagemap(pgmap);
+			} else {
+				undo_dev_pagemap(nr, nr_start, flags, pages);
+			}
 			return 0;
 		}
-		(*nr)++;
-		pfn++;
-	} while (addr += PAGE_SIZE, addr != end);
+		*nr += refs;
+		pfn += refs;
+	} while (addr += (refs << PAGE_SHIFT), addr != end);
 
 	if (pgmap)
 		put_dev_pagemap(pgmap);
@@ -2243,17 +2266,6 @@  static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
 }
 #endif
 
-static int record_subpages(struct page *page, unsigned long addr,
-			   unsigned long end, struct page **pages)
-{
-	int nr;
-
-	for (nr = 0; addr != end; addr += PAGE_SIZE)
-		pages[nr++] = page++;
-
-	return nr;
-}
-
 #ifdef CONFIG_ARCH_HAS_HUGEPD
 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
 				      unsigned long sz)