diff mbox series

[RFC,1/4] mm/swap: Add swp_offset_pfn() to fetch PFN from swap entry

Message ID 20220729014041.21292-2-peterx@redhat.com (mailing list archive)
State New
Headers show
Series mm: Remember young bit for migration entries | expand

Commit Message

Peter Xu July 29, 2022, 1:40 a.m. UTC
We've got a bunch of special swap entries that stores PFN inside the swap
offset fields.  To fetch the PFN, normally the user just calls swp_offset()
assuming that'll be the PFN.

Add a helper swp_offset_pfn() to fetch the PFN instead, fetching only the
max possible length of a PFN on the host, meanwhile doing proper check with
MAX_PHYSMEM_BITS to make sure the swap offsets can actually store the PFNs
properly always using the BUILD_BUG_ON() in is_pfn_swap_entry().

One reason to do so is we never tried to sanitize whether swap offset can
really fit for storing PFN.  At the meantime, this patch also prepares us
with the future possibility to store more information inside the swp offset
field, so assuming "swp_offset(entry)" to be the PFN will not stand any
more very soon.

Replace many of the swp_offset() callers to use swp_offset_pfn() where
proper.  Note that many of the existing users are not candidates for the
replacement, e.g.:

  (1) When the swap entry is not a pfn swap entry at all, or,
  (2) when we wanna keep the whole swp_offset but only change the swp type.

For the latter, it can happen when fork() triggered on a write-migration
swap entry pte, we may want to only change the migration type from
write->read but keep the rest, so it's not "fetching PFN" but "changing
swap type only".  They're left aside so that when there're more information
within the swp offset they'll be carried over naturally in those cases.

Since at it, dropping hwpoison_entry_to_pfn() because that's exactly what
the new swp_offset_pfn() is about.

Signed-off-by: Peter Xu <peterx@redhat.com>
---
 arch/arm64/mm/hugetlbpage.c |  2 +-
 include/linux/swapops.h     | 28 ++++++++++++++++++++++------
 mm/hmm.c                    |  2 +-
 mm/memory-failure.c         |  2 +-
 mm/page_vma_mapped.c        |  6 +++---
 5 files changed, 28 insertions(+), 12 deletions(-)

Comments

Huang, Ying Aug. 1, 2022, 3:13 a.m. UTC | #1
Peter Xu <peterx@redhat.com> writes:

> We've got a bunch of special swap entries that stores PFN inside the swap
> offset fields.  To fetch the PFN, normally the user just calls swp_offset()
> assuming that'll be the PFN.
>
> Add a helper swp_offset_pfn() to fetch the PFN instead, fetching only the
> max possible length of a PFN on the host, meanwhile doing proper check with
> MAX_PHYSMEM_BITS to make sure the swap offsets can actually store the PFNs
> properly always using the BUILD_BUG_ON() in is_pfn_swap_entry().
>
> One reason to do so is we never tried to sanitize whether swap offset can
> really fit for storing PFN.  At the meantime, this patch also prepares us
> with the future possibility to store more information inside the swp offset
> field, so assuming "swp_offset(entry)" to be the PFN will not stand any
> more very soon.
>
> Replace many of the swp_offset() callers to use swp_offset_pfn() where
> proper.  Note that many of the existing users are not candidates for the
> replacement, e.g.:
>
>   (1) When the swap entry is not a pfn swap entry at all, or,
>   (2) when we wanna keep the whole swp_offset but only change the swp type.
>
> For the latter, it can happen when fork() triggered on a write-migration
> swap entry pte, we may want to only change the migration type from
> write->read but keep the rest, so it's not "fetching PFN" but "changing
> swap type only".  They're left aside so that when there're more information
> within the swp offset they'll be carried over naturally in those cases.
>
> Since at it, dropping hwpoison_entry_to_pfn() because that's exactly what
> the new swp_offset_pfn() is about.
>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
>  arch/arm64/mm/hugetlbpage.c |  2 +-
>  include/linux/swapops.h     | 28 ++++++++++++++++++++++------
>  mm/hmm.c                    |  2 +-
>  mm/memory-failure.c         |  2 +-
>  mm/page_vma_mapped.c        |  6 +++---
>  5 files changed, 28 insertions(+), 12 deletions(-)
>
> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
> index 7430060cb0d6..f897d40821dd 100644
> --- a/arch/arm64/mm/hugetlbpage.c
> +++ b/arch/arm64/mm/hugetlbpage.c
> @@ -242,7 +242,7 @@ static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
>  {
>  	VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
>  
> -	return page_folio(pfn_to_page(swp_offset(entry)));
> +	return page_folio(pfn_to_page(swp_offset_pfn(entry)));
>  }
>  
>  void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
> diff --git a/include/linux/swapops.h b/include/linux/swapops.h
> index a3d435bf9f97..5378f77860fb 100644
> --- a/include/linux/swapops.h
> +++ b/include/linux/swapops.h
> @@ -23,6 +23,14 @@
>  #define SWP_TYPE_SHIFT	(BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
>  #define SWP_OFFSET_MASK	((1UL << SWP_TYPE_SHIFT) - 1)
>  
> +/*
> + * Definitions only for PFN swap entries (see is_pfn_swap_entry()).  To
> + * store PFN, we only need SWP_PFN_BITS bits.  Each of the pfn swap entries
> + * can use the extra bits to store other information besides PFN.
> + */
> +#define SWP_PFN_BITS			(MAX_PHYSMEM_BITS - PAGE_SHIFT)
> +#define SWP_PFN_MASK			((1UL << SWP_PFN_BITS) - 1)
> +
>  /* Clear all flags but only keep swp_entry_t related information */
>  static inline pte_t pte_swp_clear_flags(pte_t pte)
>  {
> @@ -64,6 +72,16 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
>  	return entry.val & SWP_OFFSET_MASK;
>  }
>  
> +/*
> + * This should only be called upon a pfn swap entry to get the PFN stored
> + * in the swap entry.  Please refers to is_pfn_swap_entry() for definition
> + * of pfn swap entry.
> + */
> +static inline unsigned long swp_offset_pfn(swp_entry_t entry)
> +{

Is it good to call is_pfn_swap_entry() here for debug that can be
eliminated in the production kernel?

> +	return swp_offset(entry) & SWP_PFN_MASK;
> +}
> +
>  /* check whether a pte points to a swap entry */
>  static inline int is_swap_pte(pte_t pte)
>  {
> @@ -369,7 +387,7 @@ static inline int pte_none_mostly(pte_t pte)
>  
>  static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
>  {
> -	struct page *p = pfn_to_page(swp_offset(entry));
> +	struct page *p = pfn_to_page(swp_offset_pfn(entry));
>  
>  	/*
>  	 * Any use of migration entries may only occur while the
> @@ -387,6 +405,9 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
>   */
>  static inline bool is_pfn_swap_entry(swp_entry_t entry)
>  {
> +	/* Make sure the swp offset can always store the needed fields */
> +	BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);

	BUILD_BUG_ON(SWP_TYPE_SHIFT <= SWP_PFN_BITS);

?

Best Regards,
Huang, Ying

> +
>  	return is_migration_entry(entry) || is_device_private_entry(entry) ||
>  	       is_device_exclusive_entry(entry);
>  }
> @@ -475,11 +496,6 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
>  	return swp_type(entry) == SWP_HWPOISON;
>  }
>  
> -static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry)
> -{
> -	return swp_offset(entry);
> -}
> -
>  static inline void num_poisoned_pages_inc(void)
>  {
>  	atomic_long_inc(&num_poisoned_pages);
> diff --git a/mm/hmm.c b/mm/hmm.c
> index f2aa63b94d9b..3850fb625dda 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -253,7 +253,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
>  			cpu_flags = HMM_PFN_VALID;
>  			if (is_writable_device_private_entry(entry))
>  				cpu_flags |= HMM_PFN_WRITE;
> -			*hmm_pfn = swp_offset(entry) | cpu_flags;
> +			*hmm_pfn = swp_offset_pfn(entry) | cpu_flags;
>  			return 0;
>  		}
>  
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index cc6fc9be8d22..e451219124dd 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -632,7 +632,7 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
>  		swp_entry_t swp = pte_to_swp_entry(pte);
>  
>  		if (is_hwpoison_entry(swp))
> -			pfn = hwpoison_entry_to_pfn(swp);
> +			pfn = swp_offset_pfn(swp);
>  	}
>  
>  	if (!pfn || pfn != poisoned_pfn)
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index 8e9e574d535a..93e13fc17d3c 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -86,7 +86,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
>  		    !is_device_exclusive_entry(entry))
>  			return false;
>  
> -		pfn = swp_offset(entry);
> +		pfn = swp_offset_pfn(entry);
>  	} else if (is_swap_pte(*pvmw->pte)) {
>  		swp_entry_t entry;
>  
> @@ -96,7 +96,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
>  		    !is_device_exclusive_entry(entry))
>  			return false;
>  
> -		pfn = swp_offset(entry);
> +		pfn = swp_offset_pfn(entry);
>  	} else {
>  		if (!pte_present(*pvmw->pte))
>  			return false;
> @@ -221,7 +221,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  					return not_found(pvmw);
>  				entry = pmd_to_swp_entry(pmde);
>  				if (!is_migration_entry(entry) ||
> -				    !check_pmd(swp_offset(entry), pvmw))
> +				    !check_pmd(swp_offset_pfn(entry), pvmw))
>  					return not_found(pvmw);
>  				return true;
>  			}
Peter Xu Aug. 1, 2022, 10:29 p.m. UTC | #2
On Mon, Aug 01, 2022 at 11:13:58AM +0800, Huang, Ying wrote:
> > +/*
> > + * This should only be called upon a pfn swap entry to get the PFN stored
> > + * in the swap entry.  Please refers to is_pfn_swap_entry() for definition
> > + * of pfn swap entry.
> > + */
> > +static inline unsigned long swp_offset_pfn(swp_entry_t entry)
> > +{
> 
> Is it good to call is_pfn_swap_entry() here for debug that can be
> eliminated in the production kernel?

Sure, I'll add a VM_BUG_ON() there in the next spin.

> 
> > +	return swp_offset(entry) & SWP_PFN_MASK;
> > +}
> > +
> >  /* check whether a pte points to a swap entry */
> >  static inline int is_swap_pte(pte_t pte)
> >  {
> > @@ -369,7 +387,7 @@ static inline int pte_none_mostly(pte_t pte)
> >  
> >  static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
> >  {
> > -	struct page *p = pfn_to_page(swp_offset(entry));
> > +	struct page *p = pfn_to_page(swp_offset_pfn(entry));
> >  
> >  	/*
> >  	 * Any use of migration entries may only occur while the
> > @@ -387,6 +405,9 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
> >   */
> >  static inline bool is_pfn_swap_entry(swp_entry_t entry)
> >  {
> > +	/* Make sure the swp offset can always store the needed fields */
> > +	BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
> 
> 	BUILD_BUG_ON(SWP_TYPE_SHIFT <= SWP_PFN_BITS);

Logiclaly it's okay to have SWP_TYPE_SHIFT==SWP_PFN_BITS?

Thanks,
Huang, Ying Aug. 2, 2022, 1:22 a.m. UTC | #3
Peter Xu <peterx@redhat.com> writes:

> On Mon, Aug 01, 2022 at 11:13:58AM +0800, Huang, Ying wrote:
>> > +/*
>> > + * This should only be called upon a pfn swap entry to get the PFN stored
>> > + * in the swap entry.  Please refers to is_pfn_swap_entry() for definition
>> > + * of pfn swap entry.
>> > + */
>> > +static inline unsigned long swp_offset_pfn(swp_entry_t entry)
>> > +{
>> 
>> Is it good to call is_pfn_swap_entry() here for debug that can be
>> eliminated in the production kernel?
>
> Sure, I'll add a VM_BUG_ON() there in the next spin.
>
>> 
>> > +	return swp_offset(entry) & SWP_PFN_MASK;
>> > +}
>> > +
>> >  /* check whether a pte points to a swap entry */
>> >  static inline int is_swap_pte(pte_t pte)
>> >  {
>> > @@ -369,7 +387,7 @@ static inline int pte_none_mostly(pte_t pte)
>> >  
>> >  static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
>> >  {
>> > -	struct page *p = pfn_to_page(swp_offset(entry));
>> > +	struct page *p = pfn_to_page(swp_offset_pfn(entry));
>> >  
>> >  	/*
>> >  	 * Any use of migration entries may only occur while the
>> > @@ -387,6 +405,9 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
>> >   */
>> >  static inline bool is_pfn_swap_entry(swp_entry_t entry)
>> >  {
>> > +	/* Make sure the swp offset can always store the needed fields */
>> > +	BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
>> 
>> 	BUILD_BUG_ON(SWP_TYPE_SHIFT <= SWP_PFN_BITS);
>
> Logiclaly it's okay to have SWP_TYPE_SHIFT==SWP_PFN_BITS?

Sorry, I misunderstood the original code.  Please ignore this comment.

Best Regards,
Huang, Ying
diff mbox series

Patch

diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 7430060cb0d6..f897d40821dd 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -242,7 +242,7 @@  static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
 {
 	VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
 
-	return page_folio(pfn_to_page(swp_offset(entry)));
+	return page_folio(pfn_to_page(swp_offset_pfn(entry)));
 }
 
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index a3d435bf9f97..5378f77860fb 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -23,6 +23,14 @@ 
 #define SWP_TYPE_SHIFT	(BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
 #define SWP_OFFSET_MASK	((1UL << SWP_TYPE_SHIFT) - 1)
 
+/*
+ * Definitions only for PFN swap entries (see is_pfn_swap_entry()).  To
+ * store PFN, we only need SWP_PFN_BITS bits.  Each of the pfn swap entries
+ * can use the extra bits to store other information besides PFN.
+ */
+#define SWP_PFN_BITS			(MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#define SWP_PFN_MASK			((1UL << SWP_PFN_BITS) - 1)
+
 /* Clear all flags but only keep swp_entry_t related information */
 static inline pte_t pte_swp_clear_flags(pte_t pte)
 {
@@ -64,6 +72,16 @@  static inline pgoff_t swp_offset(swp_entry_t entry)
 	return entry.val & SWP_OFFSET_MASK;
 }
 
+/*
+ * This should only be called upon a pfn swap entry to get the PFN stored
+ * in the swap entry.  Please refers to is_pfn_swap_entry() for definition
+ * of pfn swap entry.
+ */
+static inline unsigned long swp_offset_pfn(swp_entry_t entry)
+{
+	return swp_offset(entry) & SWP_PFN_MASK;
+}
+
 /* check whether a pte points to a swap entry */
 static inline int is_swap_pte(pte_t pte)
 {
@@ -369,7 +387,7 @@  static inline int pte_none_mostly(pte_t pte)
 
 static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
 {
-	struct page *p = pfn_to_page(swp_offset(entry));
+	struct page *p = pfn_to_page(swp_offset_pfn(entry));
 
 	/*
 	 * Any use of migration entries may only occur while the
@@ -387,6 +405,9 @@  static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
  */
 static inline bool is_pfn_swap_entry(swp_entry_t entry)
 {
+	/* Make sure the swp offset can always store the needed fields */
+	BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+
 	return is_migration_entry(entry) || is_device_private_entry(entry) ||
 	       is_device_exclusive_entry(entry);
 }
@@ -475,11 +496,6 @@  static inline int is_hwpoison_entry(swp_entry_t entry)
 	return swp_type(entry) == SWP_HWPOISON;
 }
 
-static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry)
-{
-	return swp_offset(entry);
-}
-
 static inline void num_poisoned_pages_inc(void)
 {
 	atomic_long_inc(&num_poisoned_pages);
diff --git a/mm/hmm.c b/mm/hmm.c
index f2aa63b94d9b..3850fb625dda 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -253,7 +253,7 @@  static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
 			cpu_flags = HMM_PFN_VALID;
 			if (is_writable_device_private_entry(entry))
 				cpu_flags |= HMM_PFN_WRITE;
-			*hmm_pfn = swp_offset(entry) | cpu_flags;
+			*hmm_pfn = swp_offset_pfn(entry) | cpu_flags;
 			return 0;
 		}
 
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index cc6fc9be8d22..e451219124dd 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -632,7 +632,7 @@  static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
 		swp_entry_t swp = pte_to_swp_entry(pte);
 
 		if (is_hwpoison_entry(swp))
-			pfn = hwpoison_entry_to_pfn(swp);
+			pfn = swp_offset_pfn(swp);
 	}
 
 	if (!pfn || pfn != poisoned_pfn)
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 8e9e574d535a..93e13fc17d3c 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -86,7 +86,7 @@  static bool check_pte(struct page_vma_mapped_walk *pvmw)
 		    !is_device_exclusive_entry(entry))
 			return false;
 
-		pfn = swp_offset(entry);
+		pfn = swp_offset_pfn(entry);
 	} else if (is_swap_pte(*pvmw->pte)) {
 		swp_entry_t entry;
 
@@ -96,7 +96,7 @@  static bool check_pte(struct page_vma_mapped_walk *pvmw)
 		    !is_device_exclusive_entry(entry))
 			return false;
 
-		pfn = swp_offset(entry);
+		pfn = swp_offset_pfn(entry);
 	} else {
 		if (!pte_present(*pvmw->pte))
 			return false;
@@ -221,7 +221,7 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 					return not_found(pvmw);
 				entry = pmd_to_swp_entry(pmde);
 				if (!is_migration_entry(entry) ||
-				    !check_pmd(swp_offset(entry), pvmw))
+				    !check_pmd(swp_offset_pfn(entry), pvmw))
 					return not_found(pvmw);
 				return true;
 			}