diff mbox series

[1/2] mm/hugetlb: grab head page refcount once per group of subpages

Message ID 20210125205744.10203-2-joao.m.martins@oracle.com (mailing list archive)
State New, archived
Headers show
Series mm/hugetlb: follow_hugetlb_page() improvements | expand

Commit Message

Joao Martins Jan. 25, 2021, 8:57 p.m. UTC
follow_hugetlb_page() once it locks the pmd/pud, it checks all the
subpages in a huge page and grabs a reference for each one,
depending on how many pages we can store or the size of va range.
Similar to gup-fast, have follow_hugetlb_page() grab the head
page refcount only after counting all its subpages that are part
of the just faulted huge page.

Consequently we reduce the number of atomics necessary to pin
said huge page, which improves non-fast gup() considerably:

 - 16G with 1G huge page size
 gup_test -f /mnt/huge/file -m 16384 -r 10 -L -S -n 512 -w

 PIN_LONGTERM_BENCHMARK: ~87.6k us -> ~11k us

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
 include/linux/mm.h |  3 +++
 mm/gup.c           |  5 ++---
 mm/hugetlb.c       | 43 ++++++++++++++++++++++++-------------------
 3 files changed, 29 insertions(+), 22 deletions(-)

Comments

Mike Kravetz Jan. 26, 2021, 2:14 a.m. UTC | #1
On 1/25/21 12:57 PM, Joao Martins wrote:
> follow_hugetlb_page() once it locks the pmd/pud, it checks all the
> subpages in a huge page and grabs a reference for each one,
> depending on how many pages we can store or the size of va range.
> Similar to gup-fast, have follow_hugetlb_page() grab the head
> page refcount only after counting all its subpages that are part
> of the just faulted huge page.
> 
> Consequently we reduce the number of atomics necessary to pin
> said huge page, which improves non-fast gup() considerably:
> 
>  - 16G with 1G huge page size
>  gup_test -f /mnt/huge/file -m 16384 -r 10 -L -S -n 512 -w
> 
>  PIN_LONGTERM_BENCHMARK: ~87.6k us -> ~11k us
> 
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> ---
>  include/linux/mm.h |  3 +++
>  mm/gup.c           |  5 ++---
>  mm/hugetlb.c       | 43 ++++++++++++++++++++++++-------------------
>  3 files changed, 29 insertions(+), 22 deletions(-)

Thanks.  Nice straight forward improvement.

Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index a5d618d08506..0d793486822b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1182,6 +1182,9 @@  static inline void get_page(struct page *page)
 }
 
 bool __must_check try_grab_page(struct page *page, unsigned int flags);
+__maybe_unused struct page *try_grab_compound_head(struct page *page, int refs,
+						   unsigned int flags);
+
 
 static inline __must_check bool try_get_page(struct page *page)
 {
diff --git a/mm/gup.c b/mm/gup.c
index 3e086b073624..ecadc80934b2 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -79,9 +79,8 @@  static inline struct page *try_get_compound_head(struct page *page, int refs)
  * considered failure, and furthermore, a likely bug in the caller, so a warning
  * is also emitted.
  */
-static __maybe_unused struct page *try_grab_compound_head(struct page *page,
-							  int refs,
-							  unsigned int flags)
+__maybe_unused struct page *try_grab_compound_head(struct page *page,
+						   int refs, unsigned int flags)
 {
 	if (flags & FOLL_GET)
 		return try_get_compound_head(page, refs);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a6bad1f686c5..016addc8e413 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4798,7 +4798,7 @@  long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	unsigned long vaddr = *position;
 	unsigned long remainder = *nr_pages;
 	struct hstate *h = hstate_vma(vma);
-	int err = -EFAULT;
+	int err = -EFAULT, refs;
 
 	while (vaddr < vma->vm_end && remainder) {
 		pte_t *pte;
@@ -4918,26 +4918,11 @@  long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			continue;
 		}
 
+		refs = 0;
+
 same_page:
-		if (pages) {
+		if (pages)
 			pages[i] = mem_map_offset(page, pfn_offset);
-			/*
-			 * try_grab_page() should always succeed here, because:
-			 * a) we hold the ptl lock, and b) we've just checked
-			 * that the huge page is present in the page tables. If
-			 * the huge page is present, then the tail pages must
-			 * also be present. The ptl prevents the head page and
-			 * tail pages from being rearranged in any way. So this
-			 * page must be available at this point, unless the page
-			 * refcount overflowed:
-			 */
-			if (WARN_ON_ONCE(!try_grab_page(pages[i], flags))) {
-				spin_unlock(ptl);
-				remainder = 0;
-				err = -ENOMEM;
-				break;
-			}
-		}
 
 		if (vmas)
 			vmas[i] = vma;
@@ -4946,6 +4931,7 @@  long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 		++pfn_offset;
 		--remainder;
 		++i;
+		refs++;
 		if (vaddr < vma->vm_end && remainder &&
 				pfn_offset < pages_per_huge_page(h)) {
 			/*
@@ -4953,6 +4939,25 @@  long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			 * of this compound page.
 			 */
 			goto same_page;
+		} else if (pages) {
+			/*
+			 * try_grab_compound_head() should always succeed here,
+			 * because: a) we hold the ptl lock, and b) we've just
+			 * checked that the huge page is present in the page
+			 * tables. If the huge page is present, then the tail
+			 * pages must also be present. The ptl prevents the
+			 * head page and tail pages from being rearranged in
+			 * any way. So this page must be available at this
+			 * point, unless the page refcount overflowed:
+			 */
+			if (WARN_ON_ONCE(!try_grab_compound_head(pages[i-1],
+								 refs,
+								 flags))) {
+				spin_unlock(ptl);
+				remainder = 0;
+				err = -ENOMEM;
+				break;
+			}
 		}
 		spin_unlock(ptl);
 	}