diff mbox series

[v4,3/6] userfaultfd: convert copy_huge_page_from_user() to copy_folio_from_user()

Message ID 20230330134045.375163-4-zhangpeng362@huawei.com (mailing list archive)
State New
Headers show
Series userfaultfd: convert userfaultfd functions to use folios | expand

Commit Message

zhangpeng (AS) March 30, 2023, 1:40 p.m. UTC
From: ZhangPeng <zhangpeng362@huawei.com>

Replace copy_huge_page_from_user() with copy_folio_from_user().
copy_folio_from_user() does the same as copy_huge_page_from_user(), but
takes in a folio instead of a page. Convert page_kaddr to kaddr in
copy_folio_from_user() to do indenting cleanup.

Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
---
 include/linux/mm.h |  7 +++----
 mm/hugetlb.c       |  5 ++---
 mm/memory.c        | 26 ++++++++++++--------------
 mm/userfaultfd.c   |  6 ++----
 4 files changed, 19 insertions(+), 25 deletions(-)

Comments

Sidhartha Kumar March 30, 2023, 5:18 p.m. UTC | #1
On 3/30/23 6:40 AM, Peng Zhang wrote:
> From: ZhangPeng <zhangpeng362@huawei.com>
> 
> Replace copy_huge_page_from_user() with copy_folio_from_user().
> copy_folio_from_user() does the same as copy_huge_page_from_user(), but
> takes in a folio instead of a page. Convert page_kaddr to kaddr in
> copy_folio_from_user() to do indenting cleanup.
> 
> Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
> ---
>   include/linux/mm.h |  7 +++----
>   mm/hugetlb.c       |  5 ++---
>   mm/memory.c        | 26 ++++++++++++--------------
>   mm/userfaultfd.c   |  6 ++----
>   4 files changed, 19 insertions(+), 25 deletions(-)
> 
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index e249208f8fbe..cf4d773ca506 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -3682,10 +3682,9 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
>   				unsigned long addr_hint,
>   				struct vm_area_struct *vma,
>   				unsigned int pages_per_huge_page);
> -extern long copy_huge_page_from_user(struct page *dst_page,
> -				const void __user *usr_src,
> -				unsigned int pages_per_huge_page,
> -				bool allow_pagefault);
> +long copy_folio_from_user(struct folio *dst_folio,
> +			   const void __user *usr_src,
> +			   bool allow_pagefault);
>   
>   /**
>    * vma_is_special_huge - Are transhuge page-table entries considered special?
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 7e4a80769c9e..aade1b513474 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6217,9 +6217,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
>   			goto out;
>   		}
>   
> -		ret = copy_huge_page_from_user(&folio->page,
> -						(const void __user *) src_addr,
> -						pages_per_huge_page(h), false);
> +		ret = copy_folio_from_user(folio, (const void __user *) src_addr,
> +					   false);
>   
>   		/* fallback to copy_from_user outside mmap_lock */
>   		if (unlikely(ret)) {
> diff --git a/mm/memory.c b/mm/memory.c
> index c47b8991410a..9d59dad319b3 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -5854,35 +5854,33 @@ void copy_user_huge_page(struct page *dst, struct page *src,
>   	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
>   }
>   
> -long copy_huge_page_from_user(struct page *dst_page,
> -				const void __user *usr_src,
> -				unsigned int pages_per_huge_page,
> -				bool allow_pagefault)
> +long copy_folio_from_user(struct folio *dst_folio,
> +			   const void __user *usr_src,
> +			   bool allow_pagefault)
>   {
> -	void *page_kaddr;
> +	void *kaddr;
>   	unsigned long i, rc = 0;
> -	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
> +	unsigned int nr_pages = folio_nr_pages(dst_folio);
> +	unsigned long ret_val = nr_pages * PAGE_SIZE;
>   	struct page *subpage;
>   
> -	for (i = 0; i < pages_per_huge_page; i++) {
> -		subpage = nth_page(dst_page, i);
> -		page_kaddr = kmap_local_page(subpage);
> +	for (i = 0; i < nr_pages; i++) {
> +		subpage = folio_page(dst_folio, i);
> +		kaddr = kmap_local_page(subpage);
>   		if (!allow_pagefault)
>   			pagefault_disable();
> -		rc = copy_from_user(page_kaddr,
> -				usr_src + i * PAGE_SIZE, PAGE_SIZE);
> +		rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
>   		if (!allow_pagefault)
>   			pagefault_enable();
> -		kunmap_local(page_kaddr);
> +		kunmap_local(kaddr);
>   
>   		ret_val -= (PAGE_SIZE - rc);
>   		if (rc)
>   			break;
>   
> -		flush_dcache_page(subpage);
> -
>   		cond_resched();
>   	}
> +	flush_dcache_folio(dst_folio);
>   	return ret_val;
>   }
>   #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index efa9e1d681ee..b453a4d2a0d3 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -422,10 +422,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
>   			mmap_read_unlock(dst_mm);
>   			BUG_ON(!page);
>   
> -			err = copy_huge_page_from_user(page,
> -						(const void __user *)src_addr,
> -						vma_hpagesize / PAGE_SIZE,
> -						true);
> +			err = copy_folio_from_user(page_folio(page),
> +						   (const void __user *)src_addr, true);
>   			if (unlikely(err)) {
>   				err = -EFAULT;
>   				goto out;

Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index e249208f8fbe..cf4d773ca506 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3682,10 +3682,9 @@  extern void copy_user_huge_page(struct page *dst, struct page *src,
 				unsigned long addr_hint,
 				struct vm_area_struct *vma,
 				unsigned int pages_per_huge_page);
-extern long copy_huge_page_from_user(struct page *dst_page,
-				const void __user *usr_src,
-				unsigned int pages_per_huge_page,
-				bool allow_pagefault);
+long copy_folio_from_user(struct folio *dst_folio,
+			   const void __user *usr_src,
+			   bool allow_pagefault);
 
 /**
  * vma_is_special_huge - Are transhuge page-table entries considered special?
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7e4a80769c9e..aade1b513474 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6217,9 +6217,8 @@  int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 			goto out;
 		}
 
-		ret = copy_huge_page_from_user(&folio->page,
-						(const void __user *) src_addr,
-						pages_per_huge_page(h), false);
+		ret = copy_folio_from_user(folio, (const void __user *) src_addr,
+					   false);
 
 		/* fallback to copy_from_user outside mmap_lock */
 		if (unlikely(ret)) {
diff --git a/mm/memory.c b/mm/memory.c
index c47b8991410a..9d59dad319b3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5854,35 +5854,33 @@  void copy_user_huge_page(struct page *dst, struct page *src,
 	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
 }
 
-long copy_huge_page_from_user(struct page *dst_page,
-				const void __user *usr_src,
-				unsigned int pages_per_huge_page,
-				bool allow_pagefault)
+long copy_folio_from_user(struct folio *dst_folio,
+			   const void __user *usr_src,
+			   bool allow_pagefault)
 {
-	void *page_kaddr;
+	void *kaddr;
 	unsigned long i, rc = 0;
-	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
+	unsigned int nr_pages = folio_nr_pages(dst_folio);
+	unsigned long ret_val = nr_pages * PAGE_SIZE;
 	struct page *subpage;
 
-	for (i = 0; i < pages_per_huge_page; i++) {
-		subpage = nth_page(dst_page, i);
-		page_kaddr = kmap_local_page(subpage);
+	for (i = 0; i < nr_pages; i++) {
+		subpage = folio_page(dst_folio, i);
+		kaddr = kmap_local_page(subpage);
 		if (!allow_pagefault)
 			pagefault_disable();
-		rc = copy_from_user(page_kaddr,
-				usr_src + i * PAGE_SIZE, PAGE_SIZE);
+		rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
 		if (!allow_pagefault)
 			pagefault_enable();
-		kunmap_local(page_kaddr);
+		kunmap_local(kaddr);
 
 		ret_val -= (PAGE_SIZE - rc);
 		if (rc)
 			break;
 
-		flush_dcache_page(subpage);
-
 		cond_resched();
 	}
+	flush_dcache_folio(dst_folio);
 	return ret_val;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index efa9e1d681ee..b453a4d2a0d3 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -422,10 +422,8 @@  static __always_inline ssize_t mfill_atomic_hugetlb(
 			mmap_read_unlock(dst_mm);
 			BUG_ON(!page);
 
-			err = copy_huge_page_from_user(page,
-						(const void __user *)src_addr,
-						vma_hpagesize / PAGE_SIZE,
-						true);
+			err = copy_folio_from_user(page_folio(page),
+						   (const void __user *)src_addr, true);
 			if (unlikely(err)) {
 				err = -EFAULT;
 				goto out;