diff mbox series

[RFC,1/2] mm/slub: perform free consistency checks before call_rcu

Message ID 20220826090912.11292-1-vbabka@suse.cz (mailing list archive)
State New, archived
Headers show
Series [RFC,1/2] mm/slub: perform free consistency checks before call_rcu | expand

Commit Message

Vlastimil Babka Aug. 26, 2022, 9:09 a.m. UTC
For SLAB_TYPESAFE_BY_RCU caches we use call_rcu to perform empty slab
freeing. The rcu callback rcu_free_slab() calls __free_slab() that
currently includes checking the slab consistency for caches with
SLAB_CONSISTENCY_CHECKS flags. This check needs the slab->objects field
to be intact.

Because in the next patch we want to allow rcu_head in struct slab to
become larger in debug configurations and thus potentially overwrite
more fields through a union than slab_list, we want to limit the fields
used in rcu_free_slab().  Thus move the consistency checks to
free_slab() before call_rcu(). This can be done safely even for
SLAB_TYPESAFE_BY_RCU caches where accesses to the objects can still
occur after freeing them.

As a result, only the slab->slab_cache field has to be physically
separate from rcu_head for the freeing callback to work. We also save
some cycles in the rcu callback for caches with consistency checks
enabled.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slub.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

Comments

Hyeonggon Yoo Aug. 29, 2022, 2:50 a.m. UTC | #1
On Fri, Aug 26, 2022 at 11:09:11AM +0200, Vlastimil Babka wrote:
> For SLAB_TYPESAFE_BY_RCU caches we use call_rcu to perform empty slab
> freeing. The rcu callback rcu_free_slab() calls __free_slab() that
> currently includes checking the slab consistency for caches with
> SLAB_CONSISTENCY_CHECKS flags. This check needs the slab->objects field
> to be intact.
> 
> Because in the next patch we want to allow rcu_head in struct slab to
> become larger in debug configurations and thus potentially overwrite
> more fields through a union than slab_list, we want to limit the fields
> used in rcu_free_slab().  Thus move the consistency checks to
> free_slab() before call_rcu(). This can be done safely even for
> SLAB_TYPESAFE_BY_RCU caches where accesses to the objects can still
> occur after freeing them.
> 
> As a result, only the slab->slab_cache field has to be physically
> separate from rcu_head for the freeing callback to work. We also save
> some cycles in the rcu callback for caches with consistency checks
> enabled.
> 
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  mm/slub.c | 20 ++++++++++----------
>  1 file changed, 10 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 862dbd9af4f5..d86be1b0d09f 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2036,14 +2036,6 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
>  	int order = folio_order(folio);
>  	int pages = 1 << order;
>  
> -	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
> -		void *p;
> -
> -		slab_pad_check(s, slab);
> -		for_each_object(p, s, slab_address(slab), slab->objects)
> -			check_object(s, slab, p, SLUB_RED_INACTIVE);
> -	}
> -
>  	__slab_clear_pfmemalloc(slab);
>  	__folio_clear_slab(folio);
>  	folio->mapping = NULL;
> @@ -2062,9 +2054,17 @@ static void rcu_free_slab(struct rcu_head *h)
>  
>  static void free_slab(struct kmem_cache *s, struct slab *slab)
>  {
> -	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
> +	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
> +		void *p;
> +
> +		slab_pad_check(s, slab);
> +		for_each_object(p, s, slab_address(slab), slab->objects)
> +			check_object(s, slab, p, SLUB_RED_INACTIVE);
> +	}
> +
> +	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
>  		call_rcu(&slab->rcu_head, rcu_free_slab);
> -	} else
> +	else
>  		__free_slab(s, slab);
>  }

So this allows corrupting 'counters' with patch 2.

The code looks still safe to me as we do only
redzone checking for SLAB_TYPESAFE_RCU caches.

Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

> -- 
> 2.37.2
>
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 862dbd9af4f5..d86be1b0d09f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2036,14 +2036,6 @@  static void __free_slab(struct kmem_cache *s, struct slab *slab)
 	int order = folio_order(folio);
 	int pages = 1 << order;
 
-	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
-		void *p;
-
-		slab_pad_check(s, slab);
-		for_each_object(p, s, slab_address(slab), slab->objects)
-			check_object(s, slab, p, SLUB_RED_INACTIVE);
-	}
-
 	__slab_clear_pfmemalloc(slab);
 	__folio_clear_slab(folio);
 	folio->mapping = NULL;
@@ -2062,9 +2054,17 @@  static void rcu_free_slab(struct rcu_head *h)
 
 static void free_slab(struct kmem_cache *s, struct slab *slab)
 {
-	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
+	if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
+		void *p;
+
+		slab_pad_check(s, slab);
+		for_each_object(p, s, slab_address(slab), slab->objects)
+			check_object(s, slab, p, SLUB_RED_INACTIVE);
+	}
+
+	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
 		call_rcu(&slab->rcu_head, rcu_free_slab);
-	} else
+	else
 		__free_slab(s, slab);
 }