diff mbox series

[v1,3/4] mm/memory: pass folio and pte to restore_exclusive_pte()

Message ID 20250129115803.2084769-4-david@redhat.com (mailing list archive)
State New
Headers show
Series mm: cleanups for device-exclusive entries (hmm) | expand

Commit Message

David Hildenbrand Jan. 29, 2025, 11:58 a.m. UTC
Let's pass the folio and the pte to restore_exclusive_pte(), so we
can avoid repeated page_folio() and ptep_get(). To do that,
pass the pte to try_restore_exclusive_pte() and use a folio in there
already.

While at it, just avoid the "swp_entry_t entry" variable in
try_restore_exclusive_pte() and add a folio-locked check to
restore_exclusive_pte().

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/memory.c | 29 ++++++++++++++---------------
 1 file changed, 14 insertions(+), 15 deletions(-)

Comments

Alistair Popple Jan. 30, 2025, 5:37 a.m. UTC | #1
On Wed, Jan 29, 2025 at 12:58:01PM +0100, David Hildenbrand wrote:
> Let's pass the folio and the pte to restore_exclusive_pte(), so we
> can avoid repeated page_folio() and ptep_get(). To do that,
> pass the pte to try_restore_exclusive_pte() and use a folio in there
> already.
> 
> While at it, just avoid the "swp_entry_t entry" variable in
> try_restore_exclusive_pte() and add a folio-locked check to
> restore_exclusive_pte().

Seems reasonable.

Reviewed-by: Alistair Popple <apopple@nvidia.com>
 
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  mm/memory.c | 29 ++++++++++++++---------------
>  1 file changed, 14 insertions(+), 15 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index cd689cd8a7c8..46956994aaff 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -719,14 +719,13 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
>  #endif
>  
>  static void restore_exclusive_pte(struct vm_area_struct *vma,
> -				  struct page *page, unsigned long address,
> -				  pte_t *ptep)
> +		struct folio *folio, struct page *page, unsigned long address,
> +		pte_t *ptep, pte_t orig_pte)
>  {
> -	struct folio *folio = page_folio(page);
> -	pte_t orig_pte;
>  	pte_t pte;
>  
> -	orig_pte = ptep_get(ptep);
> +	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
> +
>  	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
>  	if (pte_swp_soft_dirty(orig_pte))
>  		pte = pte_mksoft_dirty(pte);
> @@ -756,16 +755,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
>   * Tries to restore an exclusive pte if the page lock can be acquired without
>   * sleeping.
>   */
> -static int
> -try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
> -			unsigned long addr)
> +static int try_restore_exclusive_pte(struct vm_area_struct *vma,
> +		unsigned long addr, pte_t *ptep, pte_t orig_pte)
>  {
> -	swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
> -	struct page *page = pfn_swap_entry_to_page(entry);
> +	struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte));
> +	struct folio *folio = page_folio(page);
>  
> -	if (trylock_page(page)) {
> -		restore_exclusive_pte(vma, page, addr, src_pte);
> -		unlock_page(page);
> +	if (folio_trylock(folio)) {
> +		restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte);
> +		folio_unlock(folio);
>  		return 0;
>  	}
>  
> @@ -871,7 +869,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
>  		 * (ie. COW) mappings.
>  		 */
>  		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
> -		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
> +		if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte))
>  			return -EBUSY;
>  		return -ENOENT;
>  	} else if (is_pte_marker_entry(entry)) {
> @@ -3979,7 +3977,8 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
>  	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
>  				&vmf->ptl);
>  	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
> -		restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
> +		restore_exclusive_pte(vma, folio, vmf->page, vmf->address,
> +				      vmf->pte, vmf->orig_pte);
>  
>  	if (vmf->pte)
>  		pte_unmap_unlock(vmf->pte, vmf->ptl);
> -- 
> 2.48.1
>
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index cd689cd8a7c8..46956994aaff 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -719,14 +719,13 @@  struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
 #endif
 
 static void restore_exclusive_pte(struct vm_area_struct *vma,
-				  struct page *page, unsigned long address,
-				  pte_t *ptep)
+		struct folio *folio, struct page *page, unsigned long address,
+		pte_t *ptep, pte_t orig_pte)
 {
-	struct folio *folio = page_folio(page);
-	pte_t orig_pte;
 	pte_t pte;
 
-	orig_pte = ptep_get(ptep);
+	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
+
 	pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
 	if (pte_swp_soft_dirty(orig_pte))
 		pte = pte_mksoft_dirty(pte);
@@ -756,16 +755,15 @@  static void restore_exclusive_pte(struct vm_area_struct *vma,
  * Tries to restore an exclusive pte if the page lock can be acquired without
  * sleeping.
  */
-static int
-try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
-			unsigned long addr)
+static int try_restore_exclusive_pte(struct vm_area_struct *vma,
+		unsigned long addr, pte_t *ptep, pte_t orig_pte)
 {
-	swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
-	struct page *page = pfn_swap_entry_to_page(entry);
+	struct page *page = pfn_swap_entry_to_page(pte_to_swp_entry(orig_pte));
+	struct folio *folio = page_folio(page);
 
-	if (trylock_page(page)) {
-		restore_exclusive_pte(vma, page, addr, src_pte);
-		unlock_page(page);
+	if (folio_trylock(folio)) {
+		restore_exclusive_pte(vma, folio, page, addr, ptep, orig_pte);
+		folio_unlock(folio);
 		return 0;
 	}
 
@@ -871,7 +869,7 @@  copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 		 * (ie. COW) mappings.
 		 */
 		VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
-		if (try_restore_exclusive_pte(src_pte, src_vma, addr))
+		if (try_restore_exclusive_pte(src_vma, addr, src_pte, orig_pte))
 			return -EBUSY;
 		return -ENOENT;
 	} else if (is_pte_marker_entry(entry)) {
@@ -3979,7 +3977,8 @@  static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
 				&vmf->ptl);
 	if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
-		restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
+		restore_exclusive_pte(vma, folio, vmf->page, vmf->address,
+				      vmf->pte, vmf->orig_pte);
 
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);