diff mbox series

[v2,4/5] hugetlb: Use vmf_anon_prepare() instead of anon_vma_prepare()

Message ID 20240221234732.187629-5-vishal.moola@gmail.com (mailing list archive)
State New
Headers show
Series Handle hugetlb faults under the VMA lock | expand

Commit Message

Vishal Moola (Oracle) Feb. 21, 2024, 11:47 p.m. UTC
hugetlb_no_page() and hugetlb_wp() call anon_vma_prepare(). In
preparation for hugetlb to safely handle faults under the VMA lock,
use vmf_anon_prepare() here instead.

Additionally, passing hugetlb_wp() the vm_fault struct from hugetlb_fault()
works toward cleaning up the hugetlb code and function stack.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 mm/hugetlb.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

Comments

Matthew Wilcox Feb. 22, 2024, 3:51 a.m. UTC | #1
On Wed, Feb 21, 2024 at 03:47:31PM -0800, Vishal Moola (Oracle) wrote:
> hugetlb_no_page() and hugetlb_wp() call anon_vma_prepare(). In
> preparation for hugetlb to safely handle faults under the VMA lock,
> use vmf_anon_prepare() here instead.
> 
> Additionally, passing hugetlb_wp() the vm_fault struct from hugetlb_fault()
> works toward cleaning up the hugetlb code and function stack.

Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>

>  static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
>  		       unsigned long address, pte_t *ptep, unsigned int flags,
> -		       struct folio *pagecache_folio, spinlock_t *ptl)
> +		       struct folio *pagecache_folio, spinlock_t *ptl,
> +		       struct vm_fault *vmf)

Is it worth removing vma, address and flags?
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 70c5870e859e..ae8c8b3da981 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5826,7 +5826,8 @@  static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  */
 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
 		       unsigned long address, pte_t *ptep, unsigned int flags,
-		       struct folio *pagecache_folio, spinlock_t *ptl)
+		       struct folio *pagecache_folio, spinlock_t *ptl,
+		       struct vm_fault *vmf)
 {
 	const bool unshare = flags & FAULT_FLAG_UNSHARE;
 	pte_t pte = huge_ptep_get(ptep);
@@ -5960,10 +5961,9 @@  static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
 	 * When the original hugepage is shared one, it does not have
 	 * anon_vma prepared.
 	 */
-	if (unlikely(anon_vma_prepare(vma))) {
-		ret = VM_FAULT_OOM;
+	ret = vmf_anon_prepare(vmf);
+	if (unlikely(ret))
 		goto out_release_all;
-	}
 
 	if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
 		ret = VM_FAULT_HWPOISON_LARGE;
@@ -6203,10 +6203,10 @@  static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
 			new_pagecache_folio = true;
 		} else {
 			folio_lock(folio);
-			if (unlikely(anon_vma_prepare(vma))) {
-				ret = VM_FAULT_OOM;
+
+			ret = vmf_anon_prepare(vmf);
+			if (unlikely(ret))
 				goto backout_unlocked;
-			}
 			anon_rmap = 1;
 		}
 	} else {
@@ -6273,7 +6273,7 @@  static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
 	hugetlb_count_add(pages_per_huge_page(h), mm);
 	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
 		/* Optimization, do the COW without a second fault */
-		ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
+		ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl, vmf);
 	}
 
 	spin_unlock(ptl);
@@ -6496,7 +6496,7 @@  vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 	if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
 		if (!huge_pte_write(entry)) {
 			ret = hugetlb_wp(mm, vma, address, ptep, flags,
-					 pagecache_folio, ptl);
+					 pagecache_folio, ptl, &vmf);
 			goto out_put_page;
 		} else if (likely(flags & FAULT_FLAG_WRITE)) {
 			entry = huge_pte_mkdirty(entry);