diff mbox series

mm/hugetlb: Unify clearing of RestoreReserve for private pages

Message ID 20221020193832.776173-1-peterx@redhat.com (mailing list archive)
State New
Headers show
Series mm/hugetlb: Unify clearing of RestoreReserve for private pages | expand

Commit Message

Peter Xu Oct. 20, 2022, 7:38 p.m. UTC
A trivial cleanup to move clearing of RestoreReserve into adding anon rmap
of private hugetlb mappings.  It matches with the shared mappings where we
only clear the bit when adding into page cache, rather than spreading it
around the code paths.

Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
 mm/hugetlb.c | 14 ++++----------
 mm/rmap.c    |  2 +-
 2 files changed, 5 insertions(+), 11 deletions(-)

Comments

Mike Kravetz Oct. 24, 2022, 8:50 p.m. UTC | #1
On 10/20/22 15:38, Peter Xu wrote:
> A trivial cleanup to move clearing of RestoreReserve into adding anon rmap
> of private hugetlb mappings.  It matches with the shared mappings where we
> only clear the bit when adding into page cache, rather than spreading it
> around the code paths.
> 
> Cc: Mike Kravetz <mike.kravetz@oracle.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  mm/hugetlb.c | 14 ++++----------
>  mm/rmap.c    |  2 +-
>  2 files changed, 5 insertions(+), 11 deletions(-)

Thanks Peter!

Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1a7dc7b2e16c..931789a8f734 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4784,7 +4784,6 @@  hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr
 	hugepage_add_new_anon_rmap(new_page, vma, addr);
 	set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
 	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
-	ClearHPageRestoreReserve(new_page);
 	SetHPageMigratable(new_page);
 }
 
@@ -5447,8 +5446,6 @@  static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
 	spin_lock(ptl);
 	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
-		ClearHPageRestoreReserve(new_page);
-
 		/* Break COW or unshare */
 		huge_ptep_clear_flush(vma, haddr, ptep);
 		mmu_notifier_invalidate_range(mm, range.start, range.end);
@@ -5743,10 +5740,9 @@  static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
 	if (!pte_same(huge_ptep_get(ptep), old_pte))
 		goto backout;
 
-	if (anon_rmap) {
-		ClearHPageRestoreReserve(page);
+	if (anon_rmap)
 		hugepage_add_new_anon_rmap(page, vma, haddr);
-	} else
+	else
 		page_dup_file_rmap(page, true);
 	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
 				&& (vma->vm_flags & VM_SHARED)));
@@ -6133,12 +6129,10 @@  int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 	if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
 		goto out_release_unlock;
 
-	if (page_in_pagecache) {
+	if (page_in_pagecache)
 		page_dup_file_rmap(page, true);
-	} else {
-		ClearHPageRestoreReserve(page);
+	else
 		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
-	}
 
 	/*
 	 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
diff --git a/mm/rmap.c b/mm/rmap.c
index 9bba65b30e4d..3b2d18bbdc44 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2571,7 +2571,7 @@  void hugepage_add_new_anon_rmap(struct page *page,
 	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
 	atomic_set(compound_mapcount_ptr(page), 0);
 	atomic_set(compound_pincount_ptr(page), 0);
-
+	ClearHPageRestoreReserve(page);
 	__page_set_anon_rmap(page, vma, address, 1);
 }
 #endif /* CONFIG_HUGETLB_PAGE */