diff mbox series

[5/7] mm/slab: remove CONFIG_SLOB code from slab common code

Message ID 20230310103210.22372-6-vbabka@suse.cz (mailing list archive)
State Not Applicable
Headers show
Series remove SLOB and allow kfree() with kmem_cache_alloc() | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply

Commit Message

Vlastimil Babka March 10, 2023, 10:32 a.m. UTC
CONFIG_SLOB has been removed from Kconfig. Remove code and #ifdef's
specific to SLOB in the slab headers and common code.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 include/linux/slab.h | 39 ----------------------------
 mm/slab.h            | 61 --------------------------------------------
 mm/slab_common.c     |  2 --
 3 files changed, 102 deletions(-)

Comments

Hyeonggon Yoo March 14, 2023, 9:28 a.m. UTC | #1
On Fri, Mar 10, 2023 at 11:32:07AM +0100, Vlastimil Babka wrote:
> CONFIG_SLOB has been removed from Kconfig. Remove code and #ifdef's
> specific to SLOB in the slab headers and common code.
> 
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  include/linux/slab.h | 39 ----------------------------
>  mm/slab.h            | 61 --------------------------------------------
>  mm/slab_common.c     |  2 --
>  3 files changed, 102 deletions(-)
> 
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index 45af70315a94..7f645a4c1298 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -298,19 +298,6 @@ static inline unsigned int arch_slab_minalign(void)
>  #endif
>  #endif
>  
> -#ifdef CONFIG_SLOB
> -/*
> - * SLOB passes all requests larger than one page to the page allocator.
> - * No kmalloc array is necessary since objects of different sizes can
> - * be allocated from the same page.
> - */
> -#define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
> -#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
> -#ifndef KMALLOC_SHIFT_LOW
> -#define KMALLOC_SHIFT_LOW	3
> -#endif
> -#endif
> -
>  /* Maximum allocatable size */
>  #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
>  /* Maximum size for which we actually use a slab cache */
> @@ -366,7 +353,6 @@ enum kmalloc_cache_type {
>  	NR_KMALLOC_TYPES
>  };
>  
> -#ifndef CONFIG_SLOB
>  extern struct kmem_cache *
>  kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
>  
> @@ -458,7 +444,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
>  }
>  static_assert(PAGE_SHIFT <= 20);
>  #define kmalloc_index(s) __kmalloc_index(s, true)
> -#endif /* !CONFIG_SLOB */
>  
>  void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
>  
> @@ -487,10 +472,6 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);
>  void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
>  int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
>  
> -/*
> - * Caller must not use kfree_bulk() on memory not originally allocated
> - * by kmalloc(), because the SLOB allocator cannot handle this.
> - */
>  static __always_inline void kfree_bulk(size_t size, void **p)
>  {
>  	kmem_cache_free_bulk(NULL, size, p);
> @@ -567,7 +548,6 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
>   *	Try really hard to succeed the allocation but fail
>   *	eventually.
>   */
> -#ifndef CONFIG_SLOB
>  static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
>  {
>  	if (__builtin_constant_p(size) && size) {
> @@ -583,17 +563,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
>  	}
>  	return __kmalloc(size, flags);
>  }
> -#else
> -static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
> -{
> -	if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
> -		return kmalloc_large(size, flags);
> -
> -	return __kmalloc(size, flags);
> -}
> -#endif
>  
> -#ifndef CONFIG_SLOB
>  static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
>  {
>  	if (__builtin_constant_p(size) && size) {
> @@ -609,15 +579,6 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
>  	}
>  	return __kmalloc_node(size, flags, node);
>  }
> -#else
> -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
> -{
> -	if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
> -		return kmalloc_large_node(size, flags, node);
> -
> -	return __kmalloc_node(size, flags, node);
> -}
> -#endif
>  
>  /**
>   * kmalloc_array - allocate memory for an array.
> diff --git a/mm/slab.h b/mm/slab.h
> index 43966aa5fadf..399966b3ce52 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -51,14 +51,6 @@ struct slab {
>  	};
>  	unsigned int __unused;
>  
> -#elif defined(CONFIG_SLOB)
> -
> -	struct list_head slab_list;
> -	void *__unused_1;
> -	void *freelist;		/* first free block */
> -	long units;
> -	unsigned int __unused_2;
> -
>  #else
>  #error "Unexpected slab allocator configured"
>  #endif
> @@ -72,11 +64,7 @@ struct slab {
>  #define SLAB_MATCH(pg, sl)						\
>  	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
>  SLAB_MATCH(flags, __page_flags);
> -#ifndef CONFIG_SLOB
>  SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
> -#else
> -SLAB_MATCH(compound_head, slab_list);	/* Ensure bit 0 is clear */
> -#endif
>  SLAB_MATCH(_refcount, __page_refcount);
>  #ifdef CONFIG_MEMCG
>  SLAB_MATCH(memcg_data, memcg_data);
> @@ -200,31 +188,6 @@ static inline size_t slab_size(const struct slab *slab)
>  	return PAGE_SIZE << slab_order(slab);
>  }
>  
> -#ifdef CONFIG_SLOB
> -/*
> - * Common fields provided in kmem_cache by all slab allocators
> - * This struct is either used directly by the allocator (SLOB)
> - * or the allocator must include definitions for all fields
> - * provided in kmem_cache_common in their definition of kmem_cache.
> - *
> - * Once we can do anonymous structs (C11 standard) we could put a
> - * anonymous struct definition in these allocators so that the
> - * separate allocations in the kmem_cache structure of SLAB and
> - * SLUB is no longer needed.
> - */
> -struct kmem_cache {
> -	unsigned int object_size;/* The original size of the object */
> -	unsigned int size;	/* The aligned/padded/added on size  */
> -	unsigned int align;	/* Alignment as calculated */
> -	slab_flags_t flags;	/* Active flags on the slab */
> -	const char *name;	/* Slab name for sysfs */
> -	int refcount;		/* Use counter */
> -	void (*ctor)(void *);	/* Called on object slot creation */
> -	struct list_head list;	/* List of all slab caches on the system */
> -};
> -
> -#endif /* CONFIG_SLOB */
> -
>  #ifdef CONFIG_SLAB
>  #include <linux/slab_def.h>
>  #endif
> @@ -274,7 +237,6 @@ extern const struct kmalloc_info_struct {
>  	unsigned int size;
>  } kmalloc_info[];
>  
> -#ifndef CONFIG_SLOB
>  /* Kmalloc array related functions */
>  void setup_kmalloc_cache_index_table(void);
>  void create_kmalloc_caches(slab_flags_t);
> @@ -286,7 +248,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
>  			      int node, size_t orig_size,
>  			      unsigned long caller);
>  void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
> -#endif
>  
>  gfp_t kmalloc_fix_flags(gfp_t flags);
>  
> @@ -303,33 +264,16 @@ extern void create_boot_cache(struct kmem_cache *, const char *name,
>  int slab_unmergeable(struct kmem_cache *s);
>  struct kmem_cache *find_mergeable(unsigned size, unsigned align,
>  		slab_flags_t flags, const char *name, void (*ctor)(void *));
> -#ifndef CONFIG_SLOB
>  struct kmem_cache *
>  __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
>  		   slab_flags_t flags, void (*ctor)(void *));
>  
>  slab_flags_t kmem_cache_flags(unsigned int object_size,
>  	slab_flags_t flags, const char *name);
> -#else
> -static inline struct kmem_cache *
> -__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
> -		   slab_flags_t flags, void (*ctor)(void *))
> -{ return NULL; }
> -
> -static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
> -	slab_flags_t flags, const char *name)
> -{
> -	return flags;
> -}
> -#endif
>  
>  static inline bool is_kmalloc_cache(struct kmem_cache *s)
>  {
> -#ifndef CONFIG_SLOB
>  	return (s->flags & SLAB_KMALLOC);
> -#else
> -	return false;
> -#endif
>  }
>  
>  /* Legal flag mask for kmem_cache_create(), for various configurations */
> @@ -634,7 +578,6 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
>  }
>  #endif /* CONFIG_MEMCG_KMEM */
>  
> -#ifndef CONFIG_SLOB
>  static inline struct kmem_cache *virt_to_cache(const void *obj)
>  {
>  	struct slab *slab;
> @@ -684,8 +627,6 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
>  
>  void free_large_kmalloc(struct folio *folio, void *object);
>  
> -#endif /* CONFIG_SLOB */
> -
>  size_t __ksize(const void *objp);
>  
>  static inline size_t slab_ksize(const struct kmem_cache *s)
> @@ -777,7 +718,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
>  	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
>  }
>  
> -#ifndef CONFIG_SLOB
>  /*
>   * The slab lists for all objects.
>   */
> @@ -824,7 +764,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
>  	for (__node = 0; __node < nr_node_ids; __node++) \
>  		 if ((__n = get_node(__s, __node)))
>  
> -#endif
>  
>  #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
>  void dump_unreclaimable_slab(void);
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index bf4e777cfe90..1522693295f5 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -625,7 +625,6 @@ void kmem_dump_obj(void *object)
>  EXPORT_SYMBOL_GPL(kmem_dump_obj);
>  #endif
>  
> -#ifndef CONFIG_SLOB
>  /* Create a cache during boot when no slab services are available yet */
>  void __init create_boot_cache(struct kmem_cache *s, const char *name,
>  		unsigned int size, slab_flags_t flags,
> @@ -1079,7 +1078,6 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
>  	return ret;
>  }
>  EXPORT_SYMBOL(kmalloc_node_trace);
> -#endif /* !CONFIG_SLOB */
>  
>  gfp_t kmalloc_fix_flags(gfp_t flags)
>  {

Looks good to me,

Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

> -- 
> 2.39.2
>
Lorenzo Stoakes March 14, 2023, 10:16 p.m. UTC | #2
On Tue, Mar 14, 2023 at 09:28:32AM +0000, Hyeonggon Yoo wrote:
> On Fri, Mar 10, 2023 at 11:32:07AM +0100, Vlastimil Babka wrote:
> > CONFIG_SLOB has been removed from Kconfig. Remove code and #ifdef's
> > specific to SLOB in the slab headers and common code.
> >
> > Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> > ---
> >  include/linux/slab.h | 39 ----------------------------
> >  mm/slab.h            | 61 --------------------------------------------
> >  mm/slab_common.c     |  2 --
> >  3 files changed, 102 deletions(-)
> >
> > diff --git a/include/linux/slab.h b/include/linux/slab.h
> > index 45af70315a94..7f645a4c1298 100644
> > --- a/include/linux/slab.h
> > +++ b/include/linux/slab.h
> > @@ -298,19 +298,6 @@ static inline unsigned int arch_slab_minalign(void)
> >  #endif
> >  #endif
> >
> > -#ifdef CONFIG_SLOB
> > -/*
> > - * SLOB passes all requests larger than one page to the page allocator.
> > - * No kmalloc array is necessary since objects of different sizes can
> > - * be allocated from the same page.
> > - */
> > -#define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
> > -#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
> > -#ifndef KMALLOC_SHIFT_LOW
> > -#define KMALLOC_SHIFT_LOW	3
> > -#endif
> > -#endif
> > -
> >  /* Maximum allocatable size */
> >  #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
> >  /* Maximum size for which we actually use a slab cache */
> > @@ -366,7 +353,6 @@ enum kmalloc_cache_type {
> >  	NR_KMALLOC_TYPES
> >  };
> >
> > -#ifndef CONFIG_SLOB
> >  extern struct kmem_cache *
> >  kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
> >
> > @@ -458,7 +444,6 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
> >  }
> >  static_assert(PAGE_SHIFT <= 20);
> >  #define kmalloc_index(s) __kmalloc_index(s, true)
> > -#endif /* !CONFIG_SLOB */
> >
> >  void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
> >
> > @@ -487,10 +472,6 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);
> >  void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
> >  int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
> >
> > -/*
> > - * Caller must not use kfree_bulk() on memory not originally allocated
> > - * by kmalloc(), because the SLOB allocator cannot handle this.
> > - */
> >  static __always_inline void kfree_bulk(size_t size, void **p)
> >  {
> >  	kmem_cache_free_bulk(NULL, size, p);
> > @@ -567,7 +548,6 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
> >   *	Try really hard to succeed the allocation but fail
> >   *	eventually.
> >   */
> > -#ifndef CONFIG_SLOB
> >  static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
> >  {
> >  	if (__builtin_constant_p(size) && size) {
> > @@ -583,17 +563,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
> >  	}
> >  	return __kmalloc(size, flags);
> >  }
> > -#else
> > -static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
> > -{
> > -	if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
> > -		return kmalloc_large(size, flags);
> > -
> > -	return __kmalloc(size, flags);
> > -}
> > -#endif
> >
> > -#ifndef CONFIG_SLOB
> >  static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
> >  {
> >  	if (__builtin_constant_p(size) && size) {
> > @@ -609,15 +579,6 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
> >  	}
> >  	return __kmalloc_node(size, flags, node);
> >  }
> > -#else
> > -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
> > -{
> > -	if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
> > -		return kmalloc_large_node(size, flags, node);
> > -
> > -	return __kmalloc_node(size, flags, node);
> > -}
> > -#endif
> >
> >  /**
> >   * kmalloc_array - allocate memory for an array.
> > diff --git a/mm/slab.h b/mm/slab.h
> > index 43966aa5fadf..399966b3ce52 100644
> > --- a/mm/slab.h
> > +++ b/mm/slab.h
> > @@ -51,14 +51,6 @@ struct slab {
> >  	};
> >  	unsigned int __unused;
> >
> > -#elif defined(CONFIG_SLOB)
> > -
> > -	struct list_head slab_list;
> > -	void *__unused_1;
> > -	void *freelist;		/* first free block */
> > -	long units;
> > -	unsigned int __unused_2;
> > -
> >  #else
> >  #error "Unexpected slab allocator configured"
> >  #endif
> > @@ -72,11 +64,7 @@ struct slab {
> >  #define SLAB_MATCH(pg, sl)						\
> >  	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
> >  SLAB_MATCH(flags, __page_flags);
> > -#ifndef CONFIG_SLOB
> >  SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
> > -#else
> > -SLAB_MATCH(compound_head, slab_list);	/* Ensure bit 0 is clear */
> > -#endif
> >  SLAB_MATCH(_refcount, __page_refcount);
> >  #ifdef CONFIG_MEMCG
> >  SLAB_MATCH(memcg_data, memcg_data);
> > @@ -200,31 +188,6 @@ static inline size_t slab_size(const struct slab *slab)
> >  	return PAGE_SIZE << slab_order(slab);
> >  }
> >
> > -#ifdef CONFIG_SLOB
> > -/*
> > - * Common fields provided in kmem_cache by all slab allocators
> > - * This struct is either used directly by the allocator (SLOB)
> > - * or the allocator must include definitions for all fields
> > - * provided in kmem_cache_common in their definition of kmem_cache.
> > - *
> > - * Once we can do anonymous structs (C11 standard) we could put a
> > - * anonymous struct definition in these allocators so that the
> > - * separate allocations in the kmem_cache structure of SLAB and
> > - * SLUB is no longer needed.
> > - */
> > -struct kmem_cache {
> > -	unsigned int object_size;/* The original size of the object */
> > -	unsigned int size;	/* The aligned/padded/added on size  */
> > -	unsigned int align;	/* Alignment as calculated */
> > -	slab_flags_t flags;	/* Active flags on the slab */
> > -	const char *name;	/* Slab name for sysfs */
> > -	int refcount;		/* Use counter */
> > -	void (*ctor)(void *);	/* Called on object slot creation */
> > -	struct list_head list;	/* List of all slab caches on the system */
> > -};
> > -
> > -#endif /* CONFIG_SLOB */
> > -
> >  #ifdef CONFIG_SLAB
> >  #include <linux/slab_def.h>
> >  #endif
> > @@ -274,7 +237,6 @@ extern const struct kmalloc_info_struct {
> >  	unsigned int size;
> >  } kmalloc_info[];
> >
> > -#ifndef CONFIG_SLOB
> >  /* Kmalloc array related functions */
> >  void setup_kmalloc_cache_index_table(void);
> >  void create_kmalloc_caches(slab_flags_t);
> > @@ -286,7 +248,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
> >  			      int node, size_t orig_size,
> >  			      unsigned long caller);
> >  void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
> > -#endif
> >
> >  gfp_t kmalloc_fix_flags(gfp_t flags);
> >
> > @@ -303,33 +264,16 @@ extern void create_boot_cache(struct kmem_cache *, const char *name,
> >  int slab_unmergeable(struct kmem_cache *s);
> >  struct kmem_cache *find_mergeable(unsigned size, unsigned align,
> >  		slab_flags_t flags, const char *name, void (*ctor)(void *));
> > -#ifndef CONFIG_SLOB
> >  struct kmem_cache *
> >  __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
> >  		   slab_flags_t flags, void (*ctor)(void *));
> >
> >  slab_flags_t kmem_cache_flags(unsigned int object_size,
> >  	slab_flags_t flags, const char *name);
> > -#else
> > -static inline struct kmem_cache *
> > -__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
> > -		   slab_flags_t flags, void (*ctor)(void *))
> > -{ return NULL; }
> > -
> > -static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
> > -	slab_flags_t flags, const char *name)
> > -{
> > -	return flags;
> > -}
> > -#endif
> >
> >  static inline bool is_kmalloc_cache(struct kmem_cache *s)
> >  {
> > -#ifndef CONFIG_SLOB
> >  	return (s->flags & SLAB_KMALLOC);
> > -#else
> > -	return false;
> > -#endif
> >  }
> >
> >  /* Legal flag mask for kmem_cache_create(), for various configurations */
> > @@ -634,7 +578,6 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> >  }
> >  #endif /* CONFIG_MEMCG_KMEM */
> >
> > -#ifndef CONFIG_SLOB
> >  static inline struct kmem_cache *virt_to_cache(const void *obj)
> >  {
> >  	struct slab *slab;
> > @@ -684,8 +627,6 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
> >
> >  void free_large_kmalloc(struct folio *folio, void *object);
> >
> > -#endif /* CONFIG_SLOB */
> > -
> >  size_t __ksize(const void *objp);
> >
> >  static inline size_t slab_ksize(const struct kmem_cache *s)
> > @@ -777,7 +718,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
> >  	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
> >  }
> >
> > -#ifndef CONFIG_SLOB
> >  /*
> >   * The slab lists for all objects.
> >   */
> > @@ -824,7 +764,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
> >  	for (__node = 0; __node < nr_node_ids; __node++) \
> >  		 if ((__n = get_node(__s, __node)))
> >
> > -#endif
> >
> >  #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
> >  void dump_unreclaimable_slab(void);
> > diff --git a/mm/slab_common.c b/mm/slab_common.c
> > index bf4e777cfe90..1522693295f5 100644
> > --- a/mm/slab_common.c
> > +++ b/mm/slab_common.c
> > @@ -625,7 +625,6 @@ void kmem_dump_obj(void *object)
> >  EXPORT_SYMBOL_GPL(kmem_dump_obj);
> >  #endif
> >
> > -#ifndef CONFIG_SLOB
> >  /* Create a cache during boot when no slab services are available yet */
> >  void __init create_boot_cache(struct kmem_cache *s, const char *name,
> >  		unsigned int size, slab_flags_t flags,
> > @@ -1079,7 +1078,6 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
> >  	return ret;
> >  }
> >  EXPORT_SYMBOL(kmalloc_node_trace);
> > -#endif /* !CONFIG_SLOB */
> >
> >  gfp_t kmalloc_fix_flags(gfp_t flags)
> >  {
>
> Looks good to me,
>
> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
>
> > --
> > 2.39.2
> >

Looks good to me too,

Acked-by: Lorenzo Stoakes <lstoakes@gmail.com>
diff mbox series

Patch

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 45af70315a94..7f645a4c1298 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -298,19 +298,6 @@  static inline unsigned int arch_slab_minalign(void)
 #endif
 #endif
 
-#ifdef CONFIG_SLOB
-/*
- * SLOB passes all requests larger than one page to the page allocator.
- * No kmalloc array is necessary since objects of different sizes can
- * be allocated from the same page.
- */
-#define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
-#ifndef KMALLOC_SHIFT_LOW
-#define KMALLOC_SHIFT_LOW	3
-#endif
-#endif
-
 /* Maximum allocatable size */
 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_MAX)
 /* Maximum size for which we actually use a slab cache */
@@ -366,7 +353,6 @@  enum kmalloc_cache_type {
 	NR_KMALLOC_TYPES
 };
 
-#ifndef CONFIG_SLOB
 extern struct kmem_cache *
 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
 
@@ -458,7 +444,6 @@  static __always_inline unsigned int __kmalloc_index(size_t size,
 }
 static_assert(PAGE_SHIFT <= 20);
 #define kmalloc_index(s) __kmalloc_index(s, true)
-#endif /* !CONFIG_SLOB */
 
 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
 
@@ -487,10 +472,6 @@  void kmem_cache_free(struct kmem_cache *s, void *objp);
 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
 
-/*
- * Caller must not use kfree_bulk() on memory not originally allocated
- * by kmalloc(), because the SLOB allocator cannot handle this.
- */
 static __always_inline void kfree_bulk(size_t size, void **p)
 {
 	kmem_cache_free_bulk(NULL, size, p);
@@ -567,7 +548,6 @@  void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
  *	Try really hard to succeed the allocation but fail
  *	eventually.
  */
-#ifndef CONFIG_SLOB
 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
 {
 	if (__builtin_constant_p(size) && size) {
@@ -583,17 +563,7 @@  static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
 	}
 	return __kmalloc(size, flags);
 }
-#else
-static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
-{
-	if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
-		return kmalloc_large(size, flags);
-
-	return __kmalloc(size, flags);
-}
-#endif
 
-#ifndef CONFIG_SLOB
 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
 	if (__builtin_constant_p(size) && size) {
@@ -609,15 +579,6 @@  static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
 	}
 	return __kmalloc_node(size, flags, node);
 }
-#else
-static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
-	if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
-		return kmalloc_large_node(size, flags, node);
-
-	return __kmalloc_node(size, flags, node);
-}
-#endif
 
 /**
  * kmalloc_array - allocate memory for an array.
diff --git a/mm/slab.h b/mm/slab.h
index 43966aa5fadf..399966b3ce52 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -51,14 +51,6 @@  struct slab {
 	};
 	unsigned int __unused;
 
-#elif defined(CONFIG_SLOB)
-
-	struct list_head slab_list;
-	void *__unused_1;
-	void *freelist;		/* first free block */
-	long units;
-	unsigned int __unused_2;
-
 #else
 #error "Unexpected slab allocator configured"
 #endif
@@ -72,11 +64,7 @@  struct slab {
 #define SLAB_MATCH(pg, sl)						\
 	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
 SLAB_MATCH(flags, __page_flags);
-#ifndef CONFIG_SLOB
 SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
-#else
-SLAB_MATCH(compound_head, slab_list);	/* Ensure bit 0 is clear */
-#endif
 SLAB_MATCH(_refcount, __page_refcount);
 #ifdef CONFIG_MEMCG
 SLAB_MATCH(memcg_data, memcg_data);
@@ -200,31 +188,6 @@  static inline size_t slab_size(const struct slab *slab)
 	return PAGE_SIZE << slab_order(slab);
 }
 
-#ifdef CONFIG_SLOB
-/*
- * Common fields provided in kmem_cache by all slab allocators
- * This struct is either used directly by the allocator (SLOB)
- * or the allocator must include definitions for all fields
- * provided in kmem_cache_common in their definition of kmem_cache.
- *
- * Once we can do anonymous structs (C11 standard) we could put a
- * anonymous struct definition in these allocators so that the
- * separate allocations in the kmem_cache structure of SLAB and
- * SLUB is no longer needed.
- */
-struct kmem_cache {
-	unsigned int object_size;/* The original size of the object */
-	unsigned int size;	/* The aligned/padded/added on size  */
-	unsigned int align;	/* Alignment as calculated */
-	slab_flags_t flags;	/* Active flags on the slab */
-	const char *name;	/* Slab name for sysfs */
-	int refcount;		/* Use counter */
-	void (*ctor)(void *);	/* Called on object slot creation */
-	struct list_head list;	/* List of all slab caches on the system */
-};
-
-#endif /* CONFIG_SLOB */
-
 #ifdef CONFIG_SLAB
 #include <linux/slab_def.h>
 #endif
@@ -274,7 +237,6 @@  extern const struct kmalloc_info_struct {
 	unsigned int size;
 } kmalloc_info[];
 
-#ifndef CONFIG_SLOB
 /* Kmalloc array related functions */
 void setup_kmalloc_cache_index_table(void);
 void create_kmalloc_caches(slab_flags_t);
@@ -286,7 +248,6 @@  void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
 			      int node, size_t orig_size,
 			      unsigned long caller);
 void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
-#endif
 
 gfp_t kmalloc_fix_flags(gfp_t flags);
 
@@ -303,33 +264,16 @@  extern void create_boot_cache(struct kmem_cache *, const char *name,
 int slab_unmergeable(struct kmem_cache *s);
 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
 		slab_flags_t flags, const char *name, void (*ctor)(void *));
-#ifndef CONFIG_SLOB
 struct kmem_cache *
 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
 		   slab_flags_t flags, void (*ctor)(void *));
 
 slab_flags_t kmem_cache_flags(unsigned int object_size,
 	slab_flags_t flags, const char *name);
-#else
-static inline struct kmem_cache *
-__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
-		   slab_flags_t flags, void (*ctor)(void *))
-{ return NULL; }
-
-static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
-	slab_flags_t flags, const char *name)
-{
-	return flags;
-}
-#endif
 
 static inline bool is_kmalloc_cache(struct kmem_cache *s)
 {
-#ifndef CONFIG_SLOB
 	return (s->flags & SLAB_KMALLOC);
-#else
-	return false;
-#endif
 }
 
 /* Legal flag mask for kmem_cache_create(), for various configurations */
@@ -634,7 +578,6 @@  static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
 }
 #endif /* CONFIG_MEMCG_KMEM */
 
-#ifndef CONFIG_SLOB
 static inline struct kmem_cache *virt_to_cache(const void *obj)
 {
 	struct slab *slab;
@@ -684,8 +627,6 @@  static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 
 void free_large_kmalloc(struct folio *folio, void *object);
 
-#endif /* CONFIG_SLOB */
-
 size_t __ksize(const void *objp);
 
 static inline size_t slab_ksize(const struct kmem_cache *s)
@@ -777,7 +718,6 @@  static inline void slab_post_alloc_hook(struct kmem_cache *s,
 	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
 }
 
-#ifndef CONFIG_SLOB
 /*
  * The slab lists for all objects.
  */
@@ -824,7 +764,6 @@  static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
 	for (__node = 0; __node < nr_node_ids; __node++) \
 		 if ((__n = get_node(__s, __node)))
 
-#endif
 
 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
 void dump_unreclaimable_slab(void);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index bf4e777cfe90..1522693295f5 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -625,7 +625,6 @@  void kmem_dump_obj(void *object)
 EXPORT_SYMBOL_GPL(kmem_dump_obj);
 #endif
 
-#ifndef CONFIG_SLOB
 /* Create a cache during boot when no slab services are available yet */
 void __init create_boot_cache(struct kmem_cache *s, const char *name,
 		unsigned int size, slab_flags_t flags,
@@ -1079,7 +1078,6 @@  void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
 	return ret;
 }
 EXPORT_SYMBOL(kmalloc_node_trace);
-#endif /* !CONFIG_SLOB */
 
 gfp_t kmalloc_fix_flags(gfp_t flags)
 {