diff mbox series

[v3,01/15] arm64/mm: Make set_ptes() robust when OAs cross 48-bit boundary

Message ID 20240129124649.189745-2-david@redhat.com (mailing list archive)
State New, archived
Headers show
Series mm/memory: optimize fork() with PTE-mapped THP | expand

Commit Message

David Hildenbrand Jan. 29, 2024, 12:46 p.m. UTC
From: Ryan Roberts <ryan.roberts@arm.com>

Since the high bits [51:48] of an OA are not stored contiguously in the
PTE, there is a theoretical bug in set_ptes(), which just adds PAGE_SIZE
to the pte to get the pte with the next pfn. This works until the pfn
crosses the 48-bit boundary, at which point we overflow into the upper
attributes.

Of course one could argue (and Matthew Wilcox has :) that we will never
see a folio cross this boundary because we only allow naturally aligned
power-of-2 allocation, so this would require a half-petabyte folio. So
its only a theoretical bug. But its better that the code is robust
regardless.

I've implemented pte_next_pfn() as part of the fix, which is an opt-in
core-mm interface. So that is now available to the core-mm, which will
be needed shortly to support forthcoming fork()-batching optimizations.

Link: https://lkml.kernel.org/r/20240125173534.1659317-1-ryan.roberts@arm.com
Fixes: 4a169d61c2ed ("arm64: implement the new page table range API")
Closes: https://lore.kernel.org/linux-mm/fdaeb9a5-d890-499a-92c8-d171df43ad01@arm.com/
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
 arch/arm64/include/asm/pgtable.h | 28 +++++++++++++++++-----------
 1 file changed, 17 insertions(+), 11 deletions(-)

Comments

Mike Rapoport Feb. 8, 2024, 6:10 a.m. UTC | #1
On Mon, Jan 29, 2024 at 01:46:35PM +0100, David Hildenbrand wrote:
> From: Ryan Roberts <ryan.roberts@arm.com>
> 
> Since the high bits [51:48] of an OA are not stored contiguously in the
> PTE, there is a theoretical bug in set_ptes(), which just adds PAGE_SIZE
> to the pte to get the pte with the next pfn. This works until the pfn
> crosses the 48-bit boundary, at which point we overflow into the upper
> attributes.
> 
> Of course one could argue (and Matthew Wilcox has :) that we will never
> see a folio cross this boundary because we only allow naturally aligned
> power-of-2 allocation, so this would require a half-petabyte folio. So
> its only a theoretical bug. But its better that the code is robust
> regardless.
> 
> I've implemented pte_next_pfn() as part of the fix, which is an opt-in
> core-mm interface. So that is now available to the core-mm, which will
> be needed shortly to support forthcoming fork()-batching optimizations.
> 
> Link: https://lkml.kernel.org/r/20240125173534.1659317-1-ryan.roberts@arm.com
> Fixes: 4a169d61c2ed ("arm64: implement the new page table range API")
> Closes: https://lore.kernel.org/linux-mm/fdaeb9a5-d890-499a-92c8-d171df43ad01@arm.com/
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
> Reviewed-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: David Hildenbrand <david@redhat.com>

Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>

> ---
>  arch/arm64/include/asm/pgtable.h | 28 +++++++++++++++++-----------
>  1 file changed, 17 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index b50270107e2f..9428801c1040 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -341,6 +341,22 @@ static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
>  		mte_sync_tags(pte, nr_pages);
>  }
>  
> +/*
> + * Select all bits except the pfn
> + */
> +static inline pgprot_t pte_pgprot(pte_t pte)
> +{
> +	unsigned long pfn = pte_pfn(pte);
> +
> +	return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
> +}
> +
> +#define pte_next_pfn pte_next_pfn
> +static inline pte_t pte_next_pfn(pte_t pte)
> +{
> +	return pfn_pte(pte_pfn(pte) + 1, pte_pgprot(pte));
> +}
> +
>  static inline void set_ptes(struct mm_struct *mm,
>  			    unsigned long __always_unused addr,
>  			    pte_t *ptep, pte_t pte, unsigned int nr)
> @@ -354,7 +370,7 @@ static inline void set_ptes(struct mm_struct *mm,
>  		if (--nr == 0)
>  			break;
>  		ptep++;
> -		pte_val(pte) += PAGE_SIZE;
> +		pte = pte_next_pfn(pte);
>  	}
>  }
>  #define set_ptes set_ptes
> @@ -433,16 +449,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
>  	return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
>  }
>  
> -/*
> - * Select all bits except the pfn
> - */
> -static inline pgprot_t pte_pgprot(pte_t pte)
> -{
> -	unsigned long pfn = pte_pfn(pte);
> -
> -	return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
> -}
> -
>  #ifdef CONFIG_NUMA_BALANCING
>  /*
>   * See the comment in include/linux/pgtable.h
> -- 
> 2.43.0
> 
>
David Hildenbrand Feb. 9, 2024, 10:36 p.m. UTC | #2
On 08.02.24 07:10, Mike Rapoport wrote:
> On Mon, Jan 29, 2024 at 01:46:35PM +0100, David Hildenbrand wrote:
>> From: Ryan Roberts <ryan.roberts@arm.com>
>>
>> Since the high bits [51:48] of an OA are not stored contiguously in the
>> PTE, there is a theoretical bug in set_ptes(), which just adds PAGE_SIZE
>> to the pte to get the pte with the next pfn. This works until the pfn
>> crosses the 48-bit boundary, at which point we overflow into the upper
>> attributes.
>>
>> Of course one could argue (and Matthew Wilcox has :) that we will never
>> see a folio cross this boundary because we only allow naturally aligned
>> power-of-2 allocation, so this would require a half-petabyte folio. So
>> its only a theoretical bug. But its better that the code is robust
>> regardless.
>>
>> I've implemented pte_next_pfn() as part of the fix, which is an opt-in
>> core-mm interface. So that is now available to the core-mm, which will
>> be needed shortly to support forthcoming fork()-batching optimizations.
>>
>> Link: https://lkml.kernel.org/r/20240125173534.1659317-1-ryan.roberts@arm.com
>> Fixes: 4a169d61c2ed ("arm64: implement the new page table range API")
>> Closes: https://lore.kernel.org/linux-mm/fdaeb9a5-d890-499a-92c8-d171df43ad01@arm.com/
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
>> Reviewed-by: David Hildenbrand <david@redhat.com>
>> Signed-off-by: David Hildenbrand <david@redhat.com>
> 
> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>

Thanks for the review Mike, appreciated!
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b50270107e2f..9428801c1040 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -341,6 +341,22 @@  static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
 		mte_sync_tags(pte, nr_pages);
 }
 
+/*
+ * Select all bits except the pfn
+ */
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+	unsigned long pfn = pte_pfn(pte);
+
+	return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+}
+
+#define pte_next_pfn pte_next_pfn
+static inline pte_t pte_next_pfn(pte_t pte)
+{
+	return pfn_pte(pte_pfn(pte) + 1, pte_pgprot(pte));
+}
+
 static inline void set_ptes(struct mm_struct *mm,
 			    unsigned long __always_unused addr,
 			    pte_t *ptep, pte_t pte, unsigned int nr)
@@ -354,7 +370,7 @@  static inline void set_ptes(struct mm_struct *mm,
 		if (--nr == 0)
 			break;
 		ptep++;
-		pte_val(pte) += PAGE_SIZE;
+		pte = pte_next_pfn(pte);
 	}
 }
 #define set_ptes set_ptes
@@ -433,16 +449,6 @@  static inline pte_t pte_swp_clear_exclusive(pte_t pte)
 	return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
 }
 
-/*
- * Select all bits except the pfn
- */
-static inline pgprot_t pte_pgprot(pte_t pte)
-{
-	unsigned long pfn = pte_pfn(pte);
-
-	return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
-}
-
 #ifdef CONFIG_NUMA_BALANCING
 /*
  * See the comment in include/linux/pgtable.h