diff mbox series

[02/31] mm/migrate: remove cruft from migration_entry_wait()s

Message ID 1659568-468a-6d36-c26-6a52a335ab59@google.com (mailing list archive)
State New
Headers show
Series mm: allow pte_offset_map[_lock]() to fail | expand

Commit Message

Hugh Dickins May 22, 2023, 4:51 a.m. UTC
migration_entry_wait_on_locked() does not need to take a mapped pte
pointer, its callers can do the unmap first.  Annotate it with
__releases(ptl) to reduce sparse warnings.

Fold __migration_entry_wait_huge() into migration_entry_wait_huge().
Fold __migration_entry_wait() into migration_entry_wait(), preferring
the tighter pte_offset_map_lock() to pte_offset_map() and pte_lockptr().

Signed-off-by: Hugh Dickins <hughd@google.com>
---
 include/linux/migrate.h |  4 ++--
 include/linux/swapops.h | 17 +++--------------
 mm/filemap.c            | 13 ++++---------
 mm/migrate.c            | 37 +++++++++++++------------------------
 4 files changed, 22 insertions(+), 49 deletions(-)

Comments

Alistair Popple May 23, 2023, 1:45 a.m. UTC | #1
Hugh Dickins <hughd@google.com> writes:

> migration_entry_wait_on_locked() does not need to take a mapped pte
> pointer, its callers can do the unmap first.  Annotate it with
> __releases(ptl) to reduce sparse warnings.

Thanks Hugh, I debated doing something similar when I reworked this to
not take a pageref. However I was unsure if the pte unmap/unlock
ordering mattered as this reverses the order of operations (from unlock
then unmap to unmap then unlock).

I never found any reason why that would be a problem though so please
add:

Reviewed-by: Alistair Popple <apopple@nvidia.com>

> Fold __migration_entry_wait_huge() into migration_entry_wait_huge().
> Fold __migration_entry_wait() into migration_entry_wait(), preferring
> the tighter pte_offset_map_lock() to pte_offset_map() and pte_lockptr().
>
> Signed-off-by: Hugh Dickins <hughd@google.com>
> ---
>  include/linux/migrate.h |  4 ++--
>  include/linux/swapops.h | 17 +++--------------
>  mm/filemap.c            | 13 ++++---------
>  mm/migrate.c            | 37 +++++++++++++------------------------
>  4 files changed, 22 insertions(+), 49 deletions(-)
>
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index 6241a1596a75..affea3063473 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -75,8 +75,8 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode);
>  
>  int migrate_huge_page_move_mapping(struct address_space *mapping,
>  		struct folio *dst, struct folio *src);
> -void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
> -				spinlock_t *ptl);
> +void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
> +		__releases(ptl);
>  void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
>  void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
>  int folio_migrate_mapping(struct address_space *mapping,
> diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> index 3a451b7afcb3..4c932cb45e0b 100644
> --- a/include/linux/swapops.h
> +++ b/include/linux/swapops.h
> @@ -332,15 +332,9 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
>  	return false;
>  }
>  
> -extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
> -					spinlock_t *ptl);
>  extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
>  					unsigned long address);
> -#ifdef CONFIG_HUGETLB_PAGE
> -extern void __migration_entry_wait_huge(struct vm_area_struct *vma,
> -					pte_t *ptep, spinlock_t *ptl);
>  extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
> -#endif	/* CONFIG_HUGETLB_PAGE */
>  #else  /* CONFIG_MIGRATION */
>  static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
>  {
> @@ -362,15 +356,10 @@ static inline int is_migration_entry(swp_entry_t swp)
>  	return 0;
>  }
>  
> -static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
> -					spinlock_t *ptl) { }
>  static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
> -					 unsigned long address) { }
> -#ifdef CONFIG_HUGETLB_PAGE
> -static inline void __migration_entry_wait_huge(struct vm_area_struct *vma,
> -					       pte_t *ptep, spinlock_t *ptl) { }
> -static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
> -#endif	/* CONFIG_HUGETLB_PAGE */
> +					unsigned long address) { }
> +static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
> +					pte_t *pte) { }
>  static inline int is_writable_migration_entry(swp_entry_t entry)
>  {
>  	return 0;
> diff --git a/mm/filemap.c b/mm/filemap.c
> index b4c9bd368b7e..28b42ee848a4 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -1359,8 +1359,6 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>  /**
>   * migration_entry_wait_on_locked - Wait for a migration entry to be removed
>   * @entry: migration swap entry.
> - * @ptep: mapped pte pointer. Will return with the ptep unmapped. Only required
> - *        for pte entries, pass NULL for pmd entries.
>   * @ptl: already locked ptl. This function will drop the lock.
>   *
>   * Wait for a migration entry referencing the given page to be removed. This is
> @@ -1369,13 +1367,13 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>   * should be called while holding the ptl for the migration entry referencing
>   * the page.
>   *
> - * Returns after unmapping and unlocking the pte/ptl with pte_unmap_unlock().
> + * Returns after unlocking the ptl.
>   *
>   * This follows the same logic as folio_wait_bit_common() so see the comments
>   * there.
>   */
> -void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
> -				spinlock_t *ptl)
> +void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
> +	__releases(ptl)
>  {
>  	struct wait_page_queue wait_page;
>  	wait_queue_entry_t *wait = &wait_page.wait;
> @@ -1409,10 +1407,7 @@ void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
>  	 * a valid reference to the page, and it must take the ptl to remove the
>  	 * migration entry. So the page is valid until the ptl is dropped.
>  	 */
> -	if (ptep)
> -		pte_unmap_unlock(ptep, ptl);
> -	else
> -		spin_unlock(ptl);
> +	spin_unlock(ptl);
>  
>  	for (;;) {
>  		unsigned int flags;
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 01cac26a3127..3ecb7a40075f 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -296,14 +296,18 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
>   * get to the page and wait until migration is finished.
>   * When we return from this function the fault will be retried.
>   */
> -void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
> -				spinlock_t *ptl)
> +void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
> +			  unsigned long address)
>  {
> +	spinlock_t *ptl;
> +	pte_t *ptep;
>  	pte_t pte;
>  	swp_entry_t entry;
>  
> -	spin_lock(ptl);
> +	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
>  	pte = *ptep;
> +	pte_unmap(ptep);
> +
>  	if (!is_swap_pte(pte))
>  		goto out;
>  
> @@ -311,18 +315,10 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
>  	if (!is_migration_entry(entry))
>  		goto out;
>  
> -	migration_entry_wait_on_locked(entry, ptep, ptl);
> +	migration_entry_wait_on_locked(entry, ptl);
>  	return;
>  out:
> -	pte_unmap_unlock(ptep, ptl);
> -}
> -
> -void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
> -				unsigned long address)
> -{
> -	spinlock_t *ptl = pte_lockptr(mm, pmd);
> -	pte_t *ptep = pte_offset_map(pmd, address);
> -	__migration_entry_wait(mm, ptep, ptl);
> +	spin_unlock(ptl);
>  }
>  
>  #ifdef CONFIG_HUGETLB_PAGE
> @@ -332,9 +328,9 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
>   *
>   * This function will release the vma lock before returning.
>   */
> -void __migration_entry_wait_huge(struct vm_area_struct *vma,
> -				 pte_t *ptep, spinlock_t *ptl)
> +void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
>  {
> +	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
>  	pte_t pte;
>  
>  	hugetlb_vma_assert_locked(vma);
> @@ -352,16 +348,9 @@ void __migration_entry_wait_huge(struct vm_area_struct *vma,
>  		 * lock release in migration_entry_wait_on_locked().
>  		 */
>  		hugetlb_vma_unlock_read(vma);
> -		migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
> +		migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
>  	}
>  }
> -
> -void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
> -{
> -	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
> -
> -	__migration_entry_wait_huge(vma, pte, ptl);
> -}
>  #endif
>  
>  #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
> @@ -372,7 +361,7 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
>  	ptl = pmd_lock(mm, pmd);
>  	if (!is_pmd_migration_entry(*pmd))
>  		goto unlock;
> -	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
> +	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
>  	return;
>  unlock:
>  	spin_unlock(ptl);
Hugh Dickins May 24, 2023, 1:57 a.m. UTC | #2
On Tue, 23 May 2023, Alistair Popple wrote:
> 
> Hugh Dickins <hughd@google.com> writes:
> 
> > migration_entry_wait_on_locked() does not need to take a mapped pte
> > pointer, its callers can do the unmap first.  Annotate it with
> > __releases(ptl) to reduce sparse warnings.
> 
> Thanks Hugh, I debated doing something similar when I reworked this to
> not take a pageref. However I was unsure if the pte unmap/unlock
> ordering mattered as this reverses the order of operations (from unlock
> then unmap to unmap then unlock).
> 
> I never found any reason why that would be a problem though so please
> add:
> 
> Reviewed-by: Alistair Popple <apopple@nvidia.com>

Thanks Alistair.  Right, the ordering doesn't matter: you might briefly
think that because I'm going to put the rcu_read_unlock() into the
pte_unmap(), that there's something wrong with doing that early; but
you'd quickly realize that holding on to the spinlock is good enough.

Hugh
diff mbox series

Patch

diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 6241a1596a75..affea3063473 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -75,8 +75,8 @@  bool isolate_movable_page(struct page *page, isolate_mode_t mode);
 
 int migrate_huge_page_move_mapping(struct address_space *mapping,
 		struct folio *dst, struct folio *src);
-void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
-				spinlock_t *ptl);
+void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
+		__releases(ptl);
 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
 void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
 int folio_migrate_mapping(struct address_space *mapping,
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 3a451b7afcb3..4c932cb45e0b 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -332,15 +332,9 @@  static inline bool is_migration_entry_dirty(swp_entry_t entry)
 	return false;
 }
 
-extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
-					spinlock_t *ptl);
 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 					unsigned long address);
-#ifdef CONFIG_HUGETLB_PAGE
-extern void __migration_entry_wait_huge(struct vm_area_struct *vma,
-					pte_t *ptep, spinlock_t *ptl);
 extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
-#endif	/* CONFIG_HUGETLB_PAGE */
 #else  /* CONFIG_MIGRATION */
 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
 {
@@ -362,15 +356,10 @@  static inline int is_migration_entry(swp_entry_t swp)
 	return 0;
 }
 
-static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
-					spinlock_t *ptl) { }
 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
-					 unsigned long address) { }
-#ifdef CONFIG_HUGETLB_PAGE
-static inline void __migration_entry_wait_huge(struct vm_area_struct *vma,
-					       pte_t *ptep, spinlock_t *ptl) { }
-static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
-#endif	/* CONFIG_HUGETLB_PAGE */
+					unsigned long address) { }
+static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
+					pte_t *pte) { }
 static inline int is_writable_migration_entry(swp_entry_t entry)
 {
 	return 0;
diff --git a/mm/filemap.c b/mm/filemap.c
index b4c9bd368b7e..28b42ee848a4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1359,8 +1359,6 @@  static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
 /**
  * migration_entry_wait_on_locked - Wait for a migration entry to be removed
  * @entry: migration swap entry.
- * @ptep: mapped pte pointer. Will return with the ptep unmapped. Only required
- *        for pte entries, pass NULL for pmd entries.
  * @ptl: already locked ptl. This function will drop the lock.
  *
  * Wait for a migration entry referencing the given page to be removed. This is
@@ -1369,13 +1367,13 @@  static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
  * should be called while holding the ptl for the migration entry referencing
  * the page.
  *
- * Returns after unmapping and unlocking the pte/ptl with pte_unmap_unlock().
+ * Returns after unlocking the ptl.
  *
  * This follows the same logic as folio_wait_bit_common() so see the comments
  * there.
  */
-void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
-				spinlock_t *ptl)
+void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
+	__releases(ptl)
 {
 	struct wait_page_queue wait_page;
 	wait_queue_entry_t *wait = &wait_page.wait;
@@ -1409,10 +1407,7 @@  void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
 	 * a valid reference to the page, and it must take the ptl to remove the
 	 * migration entry. So the page is valid until the ptl is dropped.
 	 */
-	if (ptep)
-		pte_unmap_unlock(ptep, ptl);
-	else
-		spin_unlock(ptl);
+	spin_unlock(ptl);
 
 	for (;;) {
 		unsigned int flags;
diff --git a/mm/migrate.c b/mm/migrate.c
index 01cac26a3127..3ecb7a40075f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -296,14 +296,18 @@  void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
  * get to the page and wait until migration is finished.
  * When we return from this function the fault will be retried.
  */
-void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
-				spinlock_t *ptl)
+void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+			  unsigned long address)
 {
+	spinlock_t *ptl;
+	pte_t *ptep;
 	pte_t pte;
 	swp_entry_t entry;
 
-	spin_lock(ptl);
+	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
 	pte = *ptep;
+	pte_unmap(ptep);
+
 	if (!is_swap_pte(pte))
 		goto out;
 
@@ -311,18 +315,10 @@  void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 	if (!is_migration_entry(entry))
 		goto out;
 
-	migration_entry_wait_on_locked(entry, ptep, ptl);
+	migration_entry_wait_on_locked(entry, ptl);
 	return;
 out:
-	pte_unmap_unlock(ptep, ptl);
-}
-
-void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
-				unsigned long address)
-{
-	spinlock_t *ptl = pte_lockptr(mm, pmd);
-	pte_t *ptep = pte_offset_map(pmd, address);
-	__migration_entry_wait(mm, ptep, ptl);
+	spin_unlock(ptl);
 }
 
 #ifdef CONFIG_HUGETLB_PAGE
@@ -332,9 +328,9 @@  void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
  *
  * This function will release the vma lock before returning.
  */
-void __migration_entry_wait_huge(struct vm_area_struct *vma,
-				 pte_t *ptep, spinlock_t *ptl)
+void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
 {
+	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
 	pte_t pte;
 
 	hugetlb_vma_assert_locked(vma);
@@ -352,16 +348,9 @@  void __migration_entry_wait_huge(struct vm_area_struct *vma,
 		 * lock release in migration_entry_wait_on_locked().
 		 */
 		hugetlb_vma_unlock_read(vma);
-		migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
+		migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
 	}
 }
-
-void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
-{
-	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
-
-	__migration_entry_wait_huge(vma, pte, ptl);
-}
 #endif
 
 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
@@ -372,7 +361,7 @@  void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
 	ptl = pmd_lock(mm, pmd);
 	if (!is_pmd_migration_entry(*pmd))
 		goto unlock;
-	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
+	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
 	return;
 unlock:
 	spin_unlock(ptl);