diff mbox series

[RFC,05/14] mm/slub: create folio_set/clear_slab helpers

Message ID 20230915105933.495735-6-matteorizzo@google.com (mailing list archive)
State Handled Elsewhere
Headers show
Series Prevent cross-cache attacks in the SLUB allocator | expand

Commit Message

Matteo Rizzo Sept. 15, 2023, 10:59 a.m. UTC
From: Jann Horn <jannh@google.com>

This is refactoring in preparation for SLAB_VIRTUAL. Extract this code
to separate functions so that it's not duplicated in the code that
allocates and frees page with SLAB_VIRTUAL enabled.

Signed-off-by: Jann Horn <jannh@google.com>
Co-developed-by: Matteo Rizzo <matteorizzo@google.com>
Signed-off-by: Matteo Rizzo <matteorizzo@google.com>
---
 mm/slub.c | 32 ++++++++++++++++++++++----------
 1 file changed, 22 insertions(+), 10 deletions(-)

Comments

Kees Cook Sept. 15, 2023, 9:02 p.m. UTC | #1
On Fri, Sep 15, 2023 at 10:59:24AM +0000, Matteo Rizzo wrote:
> From: Jann Horn <jannh@google.com>
> 
> This is refactoring in preparation for SLAB_VIRTUAL. Extract this code
> to separate functions so that it's not duplicated in the code that
> allocates and frees page with SLAB_VIRTUAL enabled.
> 
> Signed-off-by: Jann Horn <jannh@google.com>
> Co-developed-by: Matteo Rizzo <matteorizzo@google.com>
> Signed-off-by: Matteo Rizzo <matteorizzo@google.com>
> ---
>  mm/slub.c | 32 ++++++++++++++++++++++----------
>  1 file changed, 22 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index ad33d9e1601d..9b87afade125 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1849,6 +1849,26 @@ static void *setup_object(struct kmem_cache *s, void *object)
>  /*
>   * Slab allocation and freeing
>   */
> +
> +static void folio_set_slab(struct folio *folio, struct slab *slab)
> +{
> +	__folio_set_slab(folio);
> +	/* Make the flag visible before any changes to folio->mapping */
> +	smp_wmb();
> +
> +	if (folio_is_pfmemalloc(folio))
> +		slab_set_pfmemalloc(slab);
> +}
> +
> +static void folio_clear_slab(struct folio *folio, struct slab *slab)
> +{
> +	__slab_clear_pfmemalloc(slab);
> +	folio->mapping = NULL;
> +	/* Make the mapping reset visible before clearing the flag */
> +	smp_wmb();
> +	__folio_clear_slab(folio);
> +}

Perhaps these should be explicitly marked as inlines?

> +
>  static inline struct slab *alloc_slab_page(gfp_t flags, int node,
>  		struct kmem_cache_order_objects oo)
>  {
> @@ -1865,11 +1885,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
>  		return NULL;
>  
>  	slab = folio_slab(folio);
> -	__folio_set_slab(folio);
> -	/* Make the flag visible before any changes to folio->mapping */
> -	smp_wmb();
> -	if (folio_is_pfmemalloc(folio))
> -		slab_set_pfmemalloc(slab);
> +	folio_set_slab(folio, slab);
>  
>  	return slab;
>  }
> @@ -2067,11 +2083,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
>  	int order = folio_order(folio);
>  	int pages = 1 << order;
>  
> -	__slab_clear_pfmemalloc(slab);
> -	folio->mapping = NULL;
> -	/* Make the mapping reset visible before clearing the flag */
> -	smp_wmb();
> -	__folio_clear_slab(folio);
> +	folio_clear_slab(folio, slab);
>  	mm_account_reclaimed_pages(pages);
>  	unaccount_slab(slab, order, s);
>  	__free_pages(&folio->page, order);
> -- 
> 2.42.0.459.ge4e396fd5e-goog

Otherwise this is a straight function extraction.

Reviewed-by: Kees Cook <keescook@chromium.org>
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index ad33d9e1601d..9b87afade125 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1849,6 +1849,26 @@  static void *setup_object(struct kmem_cache *s, void *object)
 /*
  * Slab allocation and freeing
  */
+
+static void folio_set_slab(struct folio *folio, struct slab *slab)
+{
+	__folio_set_slab(folio);
+	/* Make the flag visible before any changes to folio->mapping */
+	smp_wmb();
+
+	if (folio_is_pfmemalloc(folio))
+		slab_set_pfmemalloc(slab);
+}
+
+static void folio_clear_slab(struct folio *folio, struct slab *slab)
+{
+	__slab_clear_pfmemalloc(slab);
+	folio->mapping = NULL;
+	/* Make the mapping reset visible before clearing the flag */
+	smp_wmb();
+	__folio_clear_slab(folio);
+}
+
 static inline struct slab *alloc_slab_page(gfp_t flags, int node,
 		struct kmem_cache_order_objects oo)
 {
@@ -1865,11 +1885,7 @@  static inline struct slab *alloc_slab_page(gfp_t flags, int node,
 		return NULL;
 
 	slab = folio_slab(folio);
-	__folio_set_slab(folio);
-	/* Make the flag visible before any changes to folio->mapping */
-	smp_wmb();
-	if (folio_is_pfmemalloc(folio))
-		slab_set_pfmemalloc(slab);
+	folio_set_slab(folio, slab);
 
 	return slab;
 }
@@ -2067,11 +2083,7 @@  static void __free_slab(struct kmem_cache *s, struct slab *slab)
 	int order = folio_order(folio);
 	int pages = 1 << order;
 
-	__slab_clear_pfmemalloc(slab);
-	folio->mapping = NULL;
-	/* Make the mapping reset visible before clearing the flag */
-	smp_wmb();
-	__folio_clear_slab(folio);
+	folio_clear_slab(folio, slab);
 	mm_account_reclaimed_pages(pages);
 	unaccount_slab(slab, order, s);
 	__free_pages(&folio->page, order);