diff mbox series

[v2,09/15] sl*b: remove rcu_freeptr_offset from struct kmem_cache

Message ID 20240903-work-kmem_cache_args-v2-9-76f97e9a4560@kernel.org (mailing list archive)
State New
Headers show
Series slab: add struct kmem_cache_args | expand

Commit Message

Christian Brauner Sept. 3, 2024, 2:20 p.m. UTC
Now that we pass down struct kmem_cache_args to calculate_sizes() we
don't need it anymore.

Signed-off-by: Christian Brauner <brauner@kernel.org>
---
 mm/slab.h |  2 --
 mm/slub.c | 25 +++++++------------------
 2 files changed, 7 insertions(+), 20 deletions(-)

Comments

Mike Rapoport Sept. 4, 2024, 5:08 a.m. UTC | #1
On Tue, Sep 03, 2024 at 04:20:50PM +0200, Christian Brauner wrote:
> Now that we pass down struct kmem_cache_args to calculate_sizes() we
> don't need it anymore.
> 
> Signed-off-by: Christian Brauner <brauner@kernel.org>
> ---
>  mm/slab.h |  2 --
>  mm/slub.c | 25 +++++++------------------
>  2 files changed, 7 insertions(+), 20 deletions(-)
> 
> diff --git a/mm/slab.h b/mm/slab.h
> index c7a4e0fc3cf1..36ac38e21fcb 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -261,8 +261,6 @@ struct kmem_cache {
>  	unsigned int object_size;	/* Object size without metadata */
>  	struct reciprocal_value reciprocal_size;
>  	unsigned int offset;		/* Free pointer offset */
> -	/* Specific free pointer requested (if not UINT_MAX) */
> -	unsigned int rcu_freeptr_offset;
>  #ifdef CONFIG_SLUB_CPU_PARTIAL
>  	/* Number of per cpu partial objects to keep around */
>  	unsigned int cpu_partial;
> diff --git a/mm/slub.c b/mm/slub.c
> index 4719b60215b8..a23c7036cd61 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3916,8 +3916,7 @@ static void *__slab_alloc_node(struct kmem_cache *s,
>   * If the object has been wiped upon free, make sure it's fully initialized by
>   * zeroing out freelist pointer.
>   *
> - * Note that we also wipe custom freelist pointers specified via
> - * s->rcu_freeptr_offset.
> + * Note that we also wipe custom freelist pointers.
>   */
>  static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
>  						   void *obj)
> @@ -5141,17 +5140,11 @@ static void set_cpu_partial(struct kmem_cache *s)
>  #endif
>  }
>  
> -/* Was a valid freeptr offset requested? */
> -static inline bool has_freeptr_offset(const struct kmem_cache *s)
> -{
> -	return s->rcu_freeptr_offset != UINT_MAX;
> -}
> -
>  /*
>   * calculate_sizes() determines the order and the distribution of data within
>   * a slab object.
>   */
> -static int calculate_sizes(struct kmem_cache *s)
> +static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)

I'd keep kmem_cache the first argument.
As for the rest

Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>

>  {
>  	slab_flags_t flags = s->flags;
>  	unsigned int size = s->object_size;
> @@ -5192,7 +5185,7 @@ static int calculate_sizes(struct kmem_cache *s)
>  	 */
>  	s->inuse = size;
>  
> -	if (((flags & SLAB_TYPESAFE_BY_RCU) && !has_freeptr_offset(s)) ||
> +	if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
>  	    (flags & SLAB_POISON) || s->ctor ||
>  	    ((flags & SLAB_RED_ZONE) &&
>  	     (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
> @@ -5214,8 +5207,8 @@ static int calculate_sizes(struct kmem_cache *s)
>  		 */
>  		s->offset = size;
>  		size += sizeof(void *);
> -	} else if ((flags & SLAB_TYPESAFE_BY_RCU) && has_freeptr_offset(s)) {
> -		s->offset = s->rcu_freeptr_offset;
> +	} else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) {
> +		s->offset = args->freeptr_offset;
>  	} else {
>  		/*
>  		 * Store freelist pointer near middle of object to keep
> @@ -5856,10 +5849,6 @@ int do_kmem_cache_create(struct kmem_cache *s, const char *name,
>  #ifdef CONFIG_SLAB_FREELIST_HARDENED
>  	s->random = get_random_long();
>  #endif
> -	if (args->use_freeptr_offset)
> -		s->rcu_freeptr_offset = args->freeptr_offset;
> -	else
> -		s->rcu_freeptr_offset = UINT_MAX;
>  	s->align = args->align;
>  	s->ctor = args->ctor;
>  #ifdef CONFIG_HARDENED_USERCOPY
> @@ -5867,7 +5856,7 @@ int do_kmem_cache_create(struct kmem_cache *s, const char *name,
>  	s->usersize = args->usersize;
>  #endif
>  
> -	if (!calculate_sizes(s))
> +	if (!calculate_sizes(args, s))
>  		goto out;
>  	if (disable_higher_order_debug) {
>  		/*
> @@ -5877,7 +5866,7 @@ int do_kmem_cache_create(struct kmem_cache *s, const char *name,
>  		if (get_order(s->size) > get_order(s->object_size)) {
>  			s->flags &= ~DEBUG_METADATA_FLAGS;
>  			s->offset = 0;
> -			if (!calculate_sizes(s))
> +			if (!calculate_sizes(args, s))
>  				goto out;
>  		}
>  	}
> 
> -- 
> 2.45.2
>
Vlastimil Babka Sept. 4, 2024, 8:16 a.m. UTC | #2
On 9/3/24 16:20, Christian Brauner wrote:
> Now that we pass down struct kmem_cache_args to calculate_sizes() we
> don't need it anymore.

Nit: that sounds like a previous patch did the "pass down" part? Fine to do
both at once but maybe adjust description that we do both here?

> 
> Signed-off-by: Christian Brauner <brauner@kernel.org>
> ---
>  mm/slab.h |  2 --
>  mm/slub.c | 25 +++++++------------------
>  2 files changed, 7 insertions(+), 20 deletions(-)
> 
> diff --git a/mm/slab.h b/mm/slab.h
> index c7a4e0fc3cf1..36ac38e21fcb 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -261,8 +261,6 @@ struct kmem_cache {
>  	unsigned int object_size;	/* Object size without metadata */
>  	struct reciprocal_value reciprocal_size;
>  	unsigned int offset;		/* Free pointer offset */
> -	/* Specific free pointer requested (if not UINT_MAX) */
> -	unsigned int rcu_freeptr_offset;
>  #ifdef CONFIG_SLUB_CPU_PARTIAL
>  	/* Number of per cpu partial objects to keep around */
>  	unsigned int cpu_partial;
> diff --git a/mm/slub.c b/mm/slub.c
> index 4719b60215b8..a23c7036cd61 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3916,8 +3916,7 @@ static void *__slab_alloc_node(struct kmem_cache *s,
>   * If the object has been wiped upon free, make sure it's fully initialized by
>   * zeroing out freelist pointer.
>   *
> - * Note that we also wipe custom freelist pointers specified via
> - * s->rcu_freeptr_offset.
> + * Note that we also wipe custom freelist pointers.
>   */
>  static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
>  						   void *obj)
> @@ -5141,17 +5140,11 @@ static void set_cpu_partial(struct kmem_cache *s)
>  #endif
>  }
>  
> -/* Was a valid freeptr offset requested? */
> -static inline bool has_freeptr_offset(const struct kmem_cache *s)
> -{
> -	return s->rcu_freeptr_offset != UINT_MAX;
> -}
> -
>  /*
>   * calculate_sizes() determines the order and the distribution of data within
>   * a slab object.
>   */
> -static int calculate_sizes(struct kmem_cache *s)
> +static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
>  {
>  	slab_flags_t flags = s->flags;
>  	unsigned int size = s->object_size;
> @@ -5192,7 +5185,7 @@ static int calculate_sizes(struct kmem_cache *s)
>  	 */
>  	s->inuse = size;
>  
> -	if (((flags & SLAB_TYPESAFE_BY_RCU) && !has_freeptr_offset(s)) ||
> +	if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
>  	    (flags & SLAB_POISON) || s->ctor ||
>  	    ((flags & SLAB_RED_ZONE) &&
>  	     (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
> @@ -5214,8 +5207,8 @@ static int calculate_sizes(struct kmem_cache *s)
>  		 */
>  		s->offset = size;
>  		size += sizeof(void *);
> -	} else if ((flags & SLAB_TYPESAFE_BY_RCU) && has_freeptr_offset(s)) {
> -		s->offset = s->rcu_freeptr_offset;
> +	} else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) {
> +		s->offset = args->freeptr_offset;
>  	} else {
>  		/*
>  		 * Store freelist pointer near middle of object to keep
> @@ -5856,10 +5849,6 @@ int do_kmem_cache_create(struct kmem_cache *s, const char *name,
>  #ifdef CONFIG_SLAB_FREELIST_HARDENED
>  	s->random = get_random_long();
>  #endif
> -	if (args->use_freeptr_offset)
> -		s->rcu_freeptr_offset = args->freeptr_offset;
> -	else
> -		s->rcu_freeptr_offset = UINT_MAX;
>  	s->align = args->align;
>  	s->ctor = args->ctor;
>  #ifdef CONFIG_HARDENED_USERCOPY
> @@ -5867,7 +5856,7 @@ int do_kmem_cache_create(struct kmem_cache *s, const char *name,
>  	s->usersize = args->usersize;
>  #endif
>  
> -	if (!calculate_sizes(s))
> +	if (!calculate_sizes(args, s))
>  		goto out;
>  	if (disable_higher_order_debug) {
>  		/*
> @@ -5877,7 +5866,7 @@ int do_kmem_cache_create(struct kmem_cache *s, const char *name,
>  		if (get_order(s->size) > get_order(s->object_size)) {
>  			s->flags &= ~DEBUG_METADATA_FLAGS;
>  			s->offset = 0;
> -			if (!calculate_sizes(s))
> +			if (!calculate_sizes(args, s))
>  				goto out;
>  		}
>  	}
>
Christian Brauner Sept. 4, 2024, 8:58 a.m. UTC | #3
On Wed, Sep 04, 2024 at 10:16:17AM GMT, Vlastimil Babka wrote:
> On 9/3/24 16:20, Christian Brauner wrote:
> > Now that we pass down struct kmem_cache_args to calculate_sizes() we
> > don't need it anymore.
> 
> Nit: that sounds like a previous patch did the "pass down" part? Fine to do
> both at once but maybe adjust description that we do both here?

Hm, maybe that was misleadingly phrased but my intention was to express
that this patch passes it down and removes the now unneeded member from
struct kmem_cache. I've rephrased this to:

"Pass down struct kmem_cache_args to calculate_sizes() so we can use
args->{use}_freeptr_offset directly. This allows us to remove
->rcu_freeptr_offset from struct kmem_cache."

on the work.kmem_cache_args branch.
diff mbox series

Patch

diff --git a/mm/slab.h b/mm/slab.h
index c7a4e0fc3cf1..36ac38e21fcb 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -261,8 +261,6 @@  struct kmem_cache {
 	unsigned int object_size;	/* Object size without metadata */
 	struct reciprocal_value reciprocal_size;
 	unsigned int offset;		/* Free pointer offset */
-	/* Specific free pointer requested (if not UINT_MAX) */
-	unsigned int rcu_freeptr_offset;
 #ifdef CONFIG_SLUB_CPU_PARTIAL
 	/* Number of per cpu partial objects to keep around */
 	unsigned int cpu_partial;
diff --git a/mm/slub.c b/mm/slub.c
index 4719b60215b8..a23c7036cd61 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3916,8 +3916,7 @@  static void *__slab_alloc_node(struct kmem_cache *s,
  * If the object has been wiped upon free, make sure it's fully initialized by
  * zeroing out freelist pointer.
  *
- * Note that we also wipe custom freelist pointers specified via
- * s->rcu_freeptr_offset.
+ * Note that we also wipe custom freelist pointers.
  */
 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
 						   void *obj)
@@ -5141,17 +5140,11 @@  static void set_cpu_partial(struct kmem_cache *s)
 #endif
 }
 
-/* Was a valid freeptr offset requested? */
-static inline bool has_freeptr_offset(const struct kmem_cache *s)
-{
-	return s->rcu_freeptr_offset != UINT_MAX;
-}
-
 /*
  * calculate_sizes() determines the order and the distribution of data within
  * a slab object.
  */
-static int calculate_sizes(struct kmem_cache *s)
+static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
 {
 	slab_flags_t flags = s->flags;
 	unsigned int size = s->object_size;
@@ -5192,7 +5185,7 @@  static int calculate_sizes(struct kmem_cache *s)
 	 */
 	s->inuse = size;
 
-	if (((flags & SLAB_TYPESAFE_BY_RCU) && !has_freeptr_offset(s)) ||
+	if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
 	    (flags & SLAB_POISON) || s->ctor ||
 	    ((flags & SLAB_RED_ZONE) &&
 	     (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
@@ -5214,8 +5207,8 @@  static int calculate_sizes(struct kmem_cache *s)
 		 */
 		s->offset = size;
 		size += sizeof(void *);
-	} else if ((flags & SLAB_TYPESAFE_BY_RCU) && has_freeptr_offset(s)) {
-		s->offset = s->rcu_freeptr_offset;
+	} else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) {
+		s->offset = args->freeptr_offset;
 	} else {
 		/*
 		 * Store freelist pointer near middle of object to keep
@@ -5856,10 +5849,6 @@  int do_kmem_cache_create(struct kmem_cache *s, const char *name,
 #ifdef CONFIG_SLAB_FREELIST_HARDENED
 	s->random = get_random_long();
 #endif
-	if (args->use_freeptr_offset)
-		s->rcu_freeptr_offset = args->freeptr_offset;
-	else
-		s->rcu_freeptr_offset = UINT_MAX;
 	s->align = args->align;
 	s->ctor = args->ctor;
 #ifdef CONFIG_HARDENED_USERCOPY
@@ -5867,7 +5856,7 @@  int do_kmem_cache_create(struct kmem_cache *s, const char *name,
 	s->usersize = args->usersize;
 #endif
 
-	if (!calculate_sizes(s))
+	if (!calculate_sizes(args, s))
 		goto out;
 	if (disable_higher_order_debug) {
 		/*
@@ -5877,7 +5866,7 @@  int do_kmem_cache_create(struct kmem_cache *s, const char *name,
 		if (get_order(s->size) > get_order(s->object_size)) {
 			s->flags &= ~DEBUG_METADATA_FLAGS;
 			s->offset = 0;
-			if (!calculate_sizes(s))
+			if (!calculate_sizes(args, s))
 				goto out;
 		}
 	}