diff mbox series

[v2] mm/slub, kunit: Make slub_kunit unaffected by user specified flags

Message ID Yk0sY9yoJhFEXWOg@hyeyoo (mailing list archive)
State New
Headers show
Series [v2] mm/slub, kunit: Make slub_kunit unaffected by user specified flags | expand

Commit Message

Hyeonggon Yoo April 6, 2022, 6 a.m. UTC
slub_kunit does not expect other debugging flags to be set when running
tests. When SLAB_RED_ZONE flag is set globally, test fails because the
flag affects number of errors reported.

To make slub_kunit unaffected by user specified debugging flags,
introduce SLAB_NO_USER_FLAGS to ignore them. With this flag, only flags
specified in the code are used and others are ignored.

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 include/linux/slab.h |  7 +++++++
 lib/slub_kunit.c     | 10 +++++-----
 mm/slab.h            |  5 +++--
 mm/slub.c            |  3 +++
 4 files changed, 18 insertions(+), 7 deletions(-)

Comments

Vlastimil Babka April 6, 2022, 8:17 a.m. UTC | #1
On 4/6/22 08:00, Hyeonggon Yoo wrote:
> slub_kunit does not expect other debugging flags to be set when running
> tests. When SLAB_RED_ZONE flag is set globally, test fails because the
> flag affects number of errors reported.
> 
> To make slub_kunit unaffected by user specified debugging flags,
> introduce SLAB_NO_USER_FLAGS to ignore them. With this flag, only flags
> specified in the code are used and others are ignored.
> 
> Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

Thanks, applied.

> ---
>  include/linux/slab.h |  7 +++++++
>  lib/slub_kunit.c     | 10 +++++-----
>  mm/slab.h            |  5 +++--
>  mm/slub.c            |  3 +++
>  4 files changed, 18 insertions(+), 7 deletions(-)
> 
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index 373b3ef99f4e..11ceddcae9f4 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -112,6 +112,13 @@
>  #define SLAB_KASAN		0
>  #endif
>  
> +/*
> + * Ignore user specified debugging flags.
> + * Intended for caches created for self-tests so they have only flags
> + * specified in the code and other flags are ignored.
> + */
> +#define SLAB_NO_USER_FLAGS	((slab_flags_t __force)0x10000000U)
> +
>  /* The following flags affect the page allocator grouping pages by mobility */
>  /* Objects are reclaimable */
>  #define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0x00020000U)
> diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
> index 8662dc6cb509..7a0564d7cb7a 100644
> --- a/lib/slub_kunit.c
> +++ b/lib/slub_kunit.c
> @@ -12,7 +12,7 @@ static int slab_errors;
>  static void test_clobber_zone(struct kunit *test)
>  {
>  	struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
> -				SLAB_RED_ZONE, NULL);
> +				SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
>  	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
>  
>  	kasan_disable_current();
> @@ -30,7 +30,7 @@ static void test_clobber_zone(struct kunit *test)
>  static void test_next_pointer(struct kunit *test)
>  {
>  	struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
> -				SLAB_POISON, NULL);
> +				SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
>  	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
>  	unsigned long tmp;
>  	unsigned long *ptr_addr;
> @@ -75,7 +75,7 @@ static void test_next_pointer(struct kunit *test)
>  static void test_first_word(struct kunit *test)
>  {
>  	struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
> -				SLAB_POISON, NULL);
> +				SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
>  	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
>  
>  	kmem_cache_free(s, p);
> @@ -90,7 +90,7 @@ static void test_first_word(struct kunit *test)
>  static void test_clobber_50th_byte(struct kunit *test)
>  {
>  	struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
> -				SLAB_POISON, NULL);
> +				SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
>  	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
>  
>  	kmem_cache_free(s, p);
> @@ -106,7 +106,7 @@ static void test_clobber_50th_byte(struct kunit *test)
>  static void test_clobber_redzone_free(struct kunit *test)
>  {
>  	struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
> -				SLAB_RED_ZONE, NULL);
> +				SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
>  	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
>  
>  	kasan_disable_current();
> diff --git a/mm/slab.h b/mm/slab.h
> index fd7ae2024897..f7d018100994 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -331,7 +331,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
>  			  SLAB_ACCOUNT)
>  #elif defined(CONFIG_SLUB)
>  #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
> -			  SLAB_TEMPORARY | SLAB_ACCOUNT)
> +			  SLAB_TEMPORARY | SLAB_ACCOUNT | SLAB_NO_USER_FLAGS)
>  #else
>  #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
>  #endif
> @@ -350,7 +350,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
>  			      SLAB_NOLEAKTRACE | \
>  			      SLAB_RECLAIM_ACCOUNT | \
>  			      SLAB_TEMPORARY | \
> -			      SLAB_ACCOUNT)
> +			      SLAB_ACCOUNT | \
> +			      SLAB_NO_USER_FLAGS)
>  
>  bool __kmem_cache_empty(struct kmem_cache *);
>  int __kmem_cache_shutdown(struct kmem_cache *);
> diff --git a/mm/slub.c b/mm/slub.c
> index 74d92aa4a3a2..4c78f5919356 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -1584,6 +1584,9 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
>  	slab_flags_t block_flags;
>  	slab_flags_t slub_debug_local = slub_debug;
>  
> +	if (flags & SLAB_NO_USER_FLAGS)
> +		return flags;
> +
>  	/*
>  	 * If the slab cache is for debugging (e.g. kmemleak) then
>  	 * don't store user (stack trace) information by default,
diff mbox series

Patch

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 373b3ef99f4e..11ceddcae9f4 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -112,6 +112,13 @@ 
 #define SLAB_KASAN		0
 #endif
 
+/*
+ * Ignore user specified debugging flags.
+ * Intended for caches created for self-tests so they have only flags
+ * specified in the code and other flags are ignored.
+ */
+#define SLAB_NO_USER_FLAGS	((slab_flags_t __force)0x10000000U)
+
 /* The following flags affect the page allocator grouping pages by mobility */
 /* Objects are reclaimable */
 #define SLAB_RECLAIM_ACCOUNT	((slab_flags_t __force)0x00020000U)
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 8662dc6cb509..7a0564d7cb7a 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -12,7 +12,7 @@  static int slab_errors;
 static void test_clobber_zone(struct kunit *test)
 {
 	struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
-				SLAB_RED_ZONE, NULL);
+				SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 
 	kasan_disable_current();
@@ -30,7 +30,7 @@  static void test_clobber_zone(struct kunit *test)
 static void test_next_pointer(struct kunit *test)
 {
 	struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
-				SLAB_POISON, NULL);
+				SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 	unsigned long tmp;
 	unsigned long *ptr_addr;
@@ -75,7 +75,7 @@  static void test_next_pointer(struct kunit *test)
 static void test_first_word(struct kunit *test)
 {
 	struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
-				SLAB_POISON, NULL);
+				SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 
 	kmem_cache_free(s, p);
@@ -90,7 +90,7 @@  static void test_first_word(struct kunit *test)
 static void test_clobber_50th_byte(struct kunit *test)
 {
 	struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
-				SLAB_POISON, NULL);
+				SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 
 	kmem_cache_free(s, p);
@@ -106,7 +106,7 @@  static void test_clobber_50th_byte(struct kunit *test)
 static void test_clobber_redzone_free(struct kunit *test)
 {
 	struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
-				SLAB_RED_ZONE, NULL);
+				SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
 	u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
 
 	kasan_disable_current();
diff --git a/mm/slab.h b/mm/slab.h
index fd7ae2024897..f7d018100994 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -331,7 +331,7 @@  static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
 			  SLAB_ACCOUNT)
 #elif defined(CONFIG_SLUB)
 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
-			  SLAB_TEMPORARY | SLAB_ACCOUNT)
+			  SLAB_TEMPORARY | SLAB_ACCOUNT | SLAB_NO_USER_FLAGS)
 #else
 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
 #endif
@@ -350,7 +350,8 @@  static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
 			      SLAB_NOLEAKTRACE | \
 			      SLAB_RECLAIM_ACCOUNT | \
 			      SLAB_TEMPORARY | \
-			      SLAB_ACCOUNT)
+			      SLAB_ACCOUNT | \
+			      SLAB_NO_USER_FLAGS)
 
 bool __kmem_cache_empty(struct kmem_cache *);
 int __kmem_cache_shutdown(struct kmem_cache *);
diff --git a/mm/slub.c b/mm/slub.c
index 74d92aa4a3a2..4c78f5919356 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1584,6 +1584,9 @@  slab_flags_t kmem_cache_flags(unsigned int object_size,
 	slab_flags_t block_flags;
 	slab_flags_t slub_debug_local = slub_debug;
 
+	if (flags & SLAB_NO_USER_FLAGS)
+		return flags;
+
 	/*
 	 * If the slab cache is for debugging (e.g. kmemleak) then
 	 * don't store user (stack trace) information by default,