@@ -359,6 +359,25 @@ static void krealloc_pagealloc_less_oob(struct kunit *test)
KMALLOC_MAX_CACHE_SIZE + 201);
}
+/*
+ * Check that krealloc() detects a use-after-free, returns NULL,
+ * and doesn't unpoison the freed object.
+ */
+static void krealloc_uaf(struct kunit *test)
+{
+ char *ptr1, *ptr2;
+ int size1 = 201;
+ int size2 = 235;
+
+ ptr1 = kmalloc(size1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
+ kfree(ptr1);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
+ KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
+ KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
+}
+
static void kmalloc_oob_16(struct kunit *test)
{
struct {
@@ -1056,6 +1075,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(krealloc_less_oob),
KUNIT_CASE(krealloc_pagealloc_more_oob),
KUNIT_CASE(krealloc_pagealloc_less_oob),
+ KUNIT_CASE(krealloc_uaf),
KUNIT_CASE(kmalloc_oob_16),
KUNIT_CASE(kmalloc_uaf_16),
KUNIT_CASE(kmalloc_oob_in_memset),
@@ -1140,6 +1140,9 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
void *ret;
size_t ks;
+ if (likely(!ZERO_OR_NULL_PTR(p)) && !kasan_check_byte(p))
+ return NULL;
+
ks = ksize(p);
if (ks >= new_size) {