@@ -53,6 +53,7 @@ Possible debug options are::
U User tracking (free and alloc)
T Trace (please only use on single slabs)
A Enable failslab filter mark for the cache
+ C Panic if object corruption is checked.
O Switch debugging off for caches that would have
caused higher minimum slab orders
- Switch all debugging off (useful if the kernel is
@@ -113,6 +114,7 @@ options from the ``slab_debug`` parameter translate to the following files::
U store_user
T trace
A failslab
+ C corruption_panic
failslab file is writable, so writing 1 or 0 will enable or disable
the option at runtime. Write returns -EINVAL if cache is an alias.
@@ -31,6 +31,7 @@ enum _slab_flag_bits {
_SLAB_CACHE_DMA32,
_SLAB_STORE_USER,
_SLAB_PANIC,
+ _SLAB_CORRUPTION_PANIC,
_SLAB_TYPESAFE_BY_RCU,
_SLAB_TRACE,
#ifdef CONFIG_DEBUG_OBJECTS
@@ -97,6 +98,9 @@ enum _slab_flag_bits {
#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
/* Panic if kmem_cache_create() fails */
#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
+/* Panic if object corruption is checked */
+#define SLAB_CORRUPTION_PANIC __SLAB_FLAG_BIT(_SLAB_CORRUPTION_PANIC)
+
/**
* define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
@@ -1306,6 +1306,8 @@ slab_pad_check(struct kmem_cache *s, struct slab *slab)
fault, end - 1, fault - start);
print_section(KERN_ERR, "Padding ", pad, remainder);
+ BUG_ON(s->flags & SLAB_CORRUPTION_PANIC);
+
restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
}
@@ -1389,6 +1391,8 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
if (!ret && !slab_in_kunit_test()) {
print_trailer(s, slab, object);
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+
+ BUG_ON(s->flags & SLAB_CORRUPTION_PANIC);
}
return ret;
@@ -1689,6 +1693,9 @@ parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
case 'a':
*flags |= SLAB_FAILSLAB;
break;
+ case 'c':
+ *flags |= SLAB_CORRUPTION_PANIC;
+ break;
case 'o':
/*
* Avoid enabling debugging on caches if its minimum
@@ -6874,6 +6881,12 @@ static ssize_t store_user_show(struct kmem_cache *s, char *buf)
SLAB_ATTR_RO(store_user);
+static ssize_t corruption_panic_show(struct kmem_cache *s, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CORRUPTION_PANIC));
+}
+SLAB_ATTR_RO(corruption_panic);
+
static ssize_t validate_show(struct kmem_cache *s, char *buf)
{
return 0;
@@ -7092,6 +7105,7 @@ static struct attribute *slab_attrs[] = {
&red_zone_attr.attr,
&poison_attr.attr,
&store_user_attr.attr,
+ &corruption_panic_attr.attr,
&validate_attr.attr,
#endif
#ifdef CONFIG_ZONE_DMA
If a slab object is corrupted or an error occurs in its internal value, continuing after restoration may cause other side effects. At this point, it is difficult to debug because the problem occurred in the past. A flag has been added that can cause a panic when there is a problem with the object. Signed-off-by: Hyesoo Yu <hyesoo.yu@samsung.com> Change-Id: I4e7e5e0ec3421a7f6c84d591db052f79d3775493 --- Documentation/mm/slub.rst | 2 ++ include/linux/slab.h | 4 ++++ mm/slub.c | 14 ++++++++++++++ 3 files changed, 20 insertions(+)