diff mbox series

[v3,2/8] mm/hugetlb: Prepare hugetlb_follow_page_mask() for FOLL_PIN

Message ID 20230623142936.268456-3-peterx@redhat.com (mailing list archive)
State New
Headers show
Series mm/gup: Unify hugetlb, speed up thp | expand

Commit Message

Peter Xu June 23, 2023, 2:29 p.m. UTC
follow_page() doesn't use FOLL_PIN, meanwhile hugetlb seems to not be the
target of FOLL_WRITE either.  However add the checks.

Namely, either the need to CoW due to missing write bit, or proper
unsharing on !AnonExclusive pages over R/O pins to reject the follow page.
That brings this function closer to follow_hugetlb_page().

So we don't care before, and also for now.  But we'll care if we switch
over slow-gup to use hugetlb_follow_page_mask().  We'll also care when to
return -EMLINK properly, as that's the gup internal api to mean "we should
unshare".  Not really needed for follow page path, though.

When at it, switching the try_grab_page() to use WARN_ON_ONCE(), to be
clear that it just should never fail.  When error happens, instead of
setting page==NULL, capture the errno instead.

Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
 mm/hugetlb.c | 31 ++++++++++++++++++++-----------
 1 file changed, 20 insertions(+), 11 deletions(-)

Comments

David Hildenbrand June 26, 2023, 8:06 a.m. UTC | #1
On 23.06.23 16:29, Peter Xu wrote:
> follow_page() doesn't use FOLL_PIN, meanwhile hugetlb seems to not be the
> target of FOLL_WRITE either.  However add the checks.
> 
> Namely, either the need to CoW due to missing write bit, or proper
> unsharing on !AnonExclusive pages over R/O pins to reject the follow page.
> That brings this function closer to follow_hugetlb_page().
> 
> So we don't care before, and also for now.  But we'll care if we switch
> over slow-gup to use hugetlb_follow_page_mask().  We'll also care when to
> return -EMLINK properly, as that's the gup internal api to mean "we should
> unshare".  Not really needed for follow page path, though.
> 
> When at it, switching the try_grab_page() to use WARN_ON_ONCE(), to be
> clear that it just should never fail.  When error happens, instead of
> setting page==NULL, capture the errno instead.
> 
> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>   mm/hugetlb.c | 31 ++++++++++++++++++++-----------
>   1 file changed, 20 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index f75f5e78ff0b..27367edf5c72 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6462,13 +6462,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
>   	struct page *page = NULL;
>   	spinlock_t *ptl;
>   	pte_t *pte, entry;
> -
> -	/*
> -	 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
> -	 * follow_hugetlb_page().
> -	 */
> -	if (WARN_ON_ONCE(flags & FOLL_PIN))
> -		return NULL;
> +	int ret;
>   
>   	hugetlb_vma_lock_read(vma);
>   	pte = hugetlb_walk(vma, haddr, huge_page_size(h));
> @@ -6478,8 +6472,21 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
>   	ptl = huge_pte_lock(h, mm, pte);
>   	entry = huge_ptep_get(pte);
>   	if (pte_present(entry)) {
> -		page = pte_page(entry) +
> -				((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
> +		page = pte_page(entry);
> +
> +		if ((flags & FOLL_WRITE) && !huge_pte_write(entry)) {
> +			page = NULL;
> +			goto out;
> +		}
> +
> +		if (gup_must_unshare(vma, flags, page)) {
> +			/* Tell the caller to do unsharing */
> +			page = ERR_PTR(-EMLINK);
> +			goto out;
> +		}


No need to check if the page is writable (like all other callers and as 
gup_must_unshare() documents -- "for which pages that are 
write-protected in the page table")

if (!huge_pte_write(entry) && gup_must_unshare(vma, flags, page)) {


With that

Reviewed-by: David Hildenbrand <david@redhat.com>
Peter Xu June 26, 2023, 4:23 p.m. UTC | #2
On Mon, Jun 26, 2023 at 10:06:24AM +0200, David Hildenbrand wrote:
> On 23.06.23 16:29, Peter Xu wrote:
> > follow_page() doesn't use FOLL_PIN, meanwhile hugetlb seems to not be the
> > target of FOLL_WRITE either.  However add the checks.
> > 
> > Namely, either the need to CoW due to missing write bit, or proper
> > unsharing on !AnonExclusive pages over R/O pins to reject the follow page.
> > That brings this function closer to follow_hugetlb_page().
> > 
> > So we don't care before, and also for now.  But we'll care if we switch
> > over slow-gup to use hugetlb_follow_page_mask().  We'll also care when to
> > return -EMLINK properly, as that's the gup internal api to mean "we should
> > unshare".  Not really needed for follow page path, though.
> > 
> > When at it, switching the try_grab_page() to use WARN_ON_ONCE(), to be
> > clear that it just should never fail.  When error happens, instead of
> > setting page==NULL, capture the errno instead.
> > 
> > Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
> > Signed-off-by: Peter Xu <peterx@redhat.com>
> > ---
> >   mm/hugetlb.c | 31 ++++++++++++++++++++-----------
> >   1 file changed, 20 insertions(+), 11 deletions(-)
> > 
> > diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> > index f75f5e78ff0b..27367edf5c72 100644
> > --- a/mm/hugetlb.c
> > +++ b/mm/hugetlb.c
> > @@ -6462,13 +6462,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
> >   	struct page *page = NULL;
> >   	spinlock_t *ptl;
> >   	pte_t *pte, entry;
> > -
> > -	/*
> > -	 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
> > -	 * follow_hugetlb_page().
> > -	 */
> > -	if (WARN_ON_ONCE(flags & FOLL_PIN))
> > -		return NULL;
> > +	int ret;
> >   	hugetlb_vma_lock_read(vma);
> >   	pte = hugetlb_walk(vma, haddr, huge_page_size(h));
> > @@ -6478,8 +6472,21 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
> >   	ptl = huge_pte_lock(h, mm, pte);
> >   	entry = huge_ptep_get(pte);
> >   	if (pte_present(entry)) {
> > -		page = pte_page(entry) +
> > -				((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
> > +		page = pte_page(entry);
> > +
> > +		if ((flags & FOLL_WRITE) && !huge_pte_write(entry)) {
> > +			page = NULL;
> > +			goto out;
> > +		}
> > +
> > +		if (gup_must_unshare(vma, flags, page)) {
> > +			/* Tell the caller to do unsharing */
> > +			page = ERR_PTR(-EMLINK);
> > +			goto out;
> > +		}
> 
> 
> No need to check if the page is writable (like all other callers and as
> gup_must_unshare() documents -- "for which pages that are write-protected in
> the page table")
> 
> if (!huge_pte_write(entry) && gup_must_unshare(vma, flags, page)) {

Sure.

I was wondering whether we should just allow passing in "write" into
gup_must_unshare(), it'll just be a bit weird that it'll return false
directly if write, meanwhile hopefully that makes it easier to be
consistent.  I'll leave that as-is for now, anyway.

For this one I'll just merge it into:

		if (!huge_pte_write(entry)) {
			if (flags & FOLL_WRITE) {
				page = NULL;
				goto out;
			}

			if (gup_must_unshare(vma, flags, page)) {
				/* Tell the caller to do unsharing */
				page = ERR_PTR(-EMLINK);
				goto out;
			}
		}

> 
> 
> With that
> 
> Reviewed-by: David Hildenbrand <david@redhat.com>

Thanks,
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f75f5e78ff0b..27367edf5c72 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6462,13 +6462,7 @@  struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
 	struct page *page = NULL;
 	spinlock_t *ptl;
 	pte_t *pte, entry;
-
-	/*
-	 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
-	 * follow_hugetlb_page().
-	 */
-	if (WARN_ON_ONCE(flags & FOLL_PIN))
-		return NULL;
+	int ret;
 
 	hugetlb_vma_lock_read(vma);
 	pte = hugetlb_walk(vma, haddr, huge_page_size(h));
@@ -6478,8 +6472,21 @@  struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
 	ptl = huge_pte_lock(h, mm, pte);
 	entry = huge_ptep_get(pte);
 	if (pte_present(entry)) {
-		page = pte_page(entry) +
-				((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
+		page = pte_page(entry);
+
+		if ((flags & FOLL_WRITE) && !huge_pte_write(entry)) {
+			page = NULL;
+			goto out;
+		}
+
+		if (gup_must_unshare(vma, flags, page)) {
+			/* Tell the caller to do unsharing */
+			page = ERR_PTR(-EMLINK);
+			goto out;
+		}
+
+		page += ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
+
 		/*
 		 * Note that page may be a sub-page, and with vmemmap
 		 * optimizations the page struct may be read only.
@@ -6489,8 +6496,10 @@  struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
 		 * try_grab_page() should always be able to get the page here,
 		 * because we hold the ptl lock and have verified pte_present().
 		 */
-		if (try_grab_page(page, flags)) {
-			page = NULL;
+		ret = try_grab_page(page, flags);
+
+		if (WARN_ON_ONCE(ret)) {
+			page = ERR_PTR(ret);
 			goto out;
 		}
 	}