diff mbox series

[v2,4/8] mm/gup: Cleanup next_page handling

Message ID 20230619231044.112894-5-peterx@redhat.com (mailing list archive)
State New
Headers show
Series mm/gup: Unify hugetlb, speed up thp | expand

Commit Message

Peter Xu June 19, 2023, 11:10 p.m. UTC
The only path that doesn't use generic "**pages" handling is the gate vma.
Make it use the same path, meanwhile tune the next_page label upper to
cover "**pages" handling.  This prepares for THP handling for "**pages".

Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
 mm/gup.c | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

Comments

David Hildenbrand June 20, 2023, 3:23 p.m. UTC | #1
On 20.06.23 01:10, Peter Xu wrote:
> The only path that doesn't use generic "**pages" handling is the gate vma.
> Make it use the same path, meanwhile tune the next_page label upper to
> cover "**pages" handling.  This prepares for THP handling for "**pages".
> 
> Reviewed-by: Lorenzo Stoakes <lstoakes@gmail.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>   mm/gup.c | 7 +++----
>   1 file changed, 3 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/gup.c b/mm/gup.c
> index 9fc9271cba8d..4a00d609033e 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -1124,7 +1124,7 @@ static long __get_user_pages(struct mm_struct *mm,
>   			if (!vma && in_gate_area(mm, start)) {
>   				ret = get_gate_page(mm, start & PAGE_MASK,
>   						gup_flags, &vma,
> -						pages ? &pages[i] : NULL);
> +						pages ? &page : NULL);
>   				if (ret)
>   					goto out;
>   				ctx.page_mask = 0;
> @@ -1194,19 +1194,18 @@ static long __get_user_pages(struct mm_struct *mm,
>   				ret = PTR_ERR(page);
>   				goto out;
>   			}
> -
> -			goto next_page;
>   		} else if (IS_ERR(page)) {
>   			ret = PTR_ERR(page);
>   			goto out;
>   		}
> +next_page:
>   		if (pages) {
>   			pages[i] = page;
>   			flush_anon_page(vma, page, start);
>   			flush_dcache_page(page);
>   			ctx.page_mask = 0;
>   		}
> -next_page:
> +
>   		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
>   		if (page_increm > nr_pages)
>   			page_increm = nr_pages;

Acked-by: David Hildenbrand <david@redhat.com>
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index 9fc9271cba8d..4a00d609033e 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1124,7 +1124,7 @@  static long __get_user_pages(struct mm_struct *mm,
 			if (!vma && in_gate_area(mm, start)) {
 				ret = get_gate_page(mm, start & PAGE_MASK,
 						gup_flags, &vma,
-						pages ? &pages[i] : NULL);
+						pages ? &page : NULL);
 				if (ret)
 					goto out;
 				ctx.page_mask = 0;
@@ -1194,19 +1194,18 @@  static long __get_user_pages(struct mm_struct *mm,
 				ret = PTR_ERR(page);
 				goto out;
 			}
-
-			goto next_page;
 		} else if (IS_ERR(page)) {
 			ret = PTR_ERR(page);
 			goto out;
 		}
+next_page:
 		if (pages) {
 			pages[i] = page;
 			flush_anon_page(vma, page, start);
 			flush_dcache_page(page);
 			ctx.page_mask = 0;
 		}
-next_page:
+
 		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
 		if (page_increm > nr_pages)
 			page_increm = nr_pages;