@@ -33,6 +33,20 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t;
#include <linux/pgtable.h>
+#ifndef kasan_mem_to_shadow
+static inline void *kasan_mem_to_shadow(const void *addr)
+{
+ void *scaled;
+
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ scaled = (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT);
+ else
+ scaled = (void *)((long)addr >> KASAN_SHADOW_SCALE_SHIFT);
+
+ return KASAN_SHADOW_OFFSET + scaled;
+}
+#endif
+
/* Software KASAN implementations use shadow memory. */
#ifdef CONFIG_KASAN_SW_TAGS_DENSE
@@ -53,6 +67,25 @@ static inline u8 kasan_dense_tag(u8 tag)
#define KASAN_GRANULE_SIZE (1UL << KASAN_GRANULE_SHIFT)
+#ifdef CONFIG_KASAN_SW_TAGS_DENSE
+static inline u8 kasan_get_shadow_tag(const void *ptr)
+{
+ u8 shadow_byte = *(u8 *)kasan_mem_to_shadow(ptr);
+ unsigned long addr = (unsigned long)ptr;
+ int shift;
+
+ shift = !!(addr & KASAN_GRANULE_SIZE) * KASAN_TAG_WIDTH;
+ shadow_byte >>= shift;
+
+ return shadow_byte & KASAN_TAG_KERNEL;
+}
+#else
+static inline u8 kasan_get_shadow_tag(const void *addr)
+{
+ return (*(u8 *)kasan_mem_to_shadow(addr));
+}
+#endif
+
#ifdef CONFIG_KASAN_SW_TAGS
/* This matches KASAN_TAG_INVALID. */
#define KASAN_SHADOW_INIT 0xFE
@@ -73,20 +106,6 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
-#ifndef kasan_mem_to_shadow
-static inline void *kasan_mem_to_shadow(const void *addr)
-{
- void *scaled;
-
- if (IS_ENABLED(CONFIG_KASAN_GENERIC))
- scaled = (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT);
- else
- scaled = (void *)((long)addr >> KASAN_SHADOW_SCALE_SHIFT);
-
- return KASAN_SHADOW_OFFSET + scaled;
-}
-#endif
-
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
@@ -5,6 +5,7 @@ KCOV_INSTRUMENT := n
# Disable ftrace to avoid recursion.
CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_dense.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_generic.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_quarantine.o = $(CC_FLAGS_FTRACE)
@@ -24,6 +25,7 @@ CC_FLAGS_KASAN_RUNTIME += -fno-stack-protector
CC_FLAGS_KASAN_RUNTIME += -DDISABLE_BRANCH_PROFILING
CFLAGS_common.o := $(CC_FLAGS_KASAN_RUNTIME)
+CFLAGS_dense.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_generic.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_init.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_quarantine.o := $(CC_FLAGS_KASAN_RUNTIME)
@@ -49,6 +51,7 @@ RUSTFLAGS_kasan_test_rust.o := $(RUSTFLAGS_KASAN)
CFLAGS_kasan_test_module.o := $(CFLAGS_KASAN_TEST)
obj-y := common.o report.o
+obj-$(CONFIG_KASAN_SW_TAGS_DENSE) += dense.o
obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quarantine.o
obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o tags.o report_tags.o
obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o tags.o report_tags.o
new file mode 100644
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "kasan.h"
+
+static __always_inline bool kasan_check_range_inline(const void *addr,
+ size_t size, bool write,
+ unsigned long ret_ip)
+{
+ u8 *shadow_first, *shadow_last, *shadow, *shadow_first_aligned, *shadow_last_aligned;
+ u64 addr_start_aligned, addr_end_aligned;
+ u8 tag, kasan_granule_offset;
+ size_t aligned_size;
+ void *untagged_addr;
+
+ if (unlikely(size == 0))
+ return true;
+
+ if (unlikely(addr + size < addr))
+ return !kasan_report(addr, size, write, ret_ip);
+
+ tag = get_tag((const void *)addr);
+
+ /*
+ * Ignore accesses for pointers tagged with native kernel
+ * pointer tag to suppress false positives caused by kmap.
+ *
+ * Some kernel code was written to account for archs that don't keep
+ * high memory mapped all the time, but rather map and unmap particular
+ * pages when needed. Instead of storing a pointer to the kernel memory,
+ * this code saves the address of the page structure and offset within
+ * that page for later use. Those pages are then mapped and unmapped
+ * with kmap/kunmap when necessary and virt_to_page is used to get the
+ * virtual address of the page. For arm64 (that keeps the high memory
+ * mapped all the time), kmap is turned into a page_address call.
+
+ * The issue is that with use of the page_address + virt_to_page
+ * sequence the top byte value of the original pointer gets lost (gets
+ * set to KASAN_TAG_KERNEL).
+ */
+ if (tag == KASAN_TAG_KERNEL)
+ return true;
+
+ untagged_addr = kasan_reset_tag((void *)round_down((u64)addr, KASAN_GRANULE_SIZE));
+ if (unlikely(!addr_has_metadata(untagged_addr)))
+ return !kasan_report(addr, size, write, ret_ip);
+
+ kasan_granule_offset = ((u64)addr & KASAN_GRANULE_MASK);
+ aligned_size = round_up(size + kasan_granule_offset, KASAN_GRANULE_SIZE);
+ shadow_first = kasan_mem_to_shadow(untagged_addr);
+ shadow_last = kasan_mem_to_shadow(untagged_addr + aligned_size);
+ addr_start_aligned = round_up((u64)untagged_addr, KASAN_SHADOW_SCALE_SIZE);
+ addr_end_aligned = round_down((u64)untagged_addr + aligned_size, KASAN_SHADOW_SCALE_SIZE);
+ shadow_first_aligned = kasan_mem_to_shadow((void *)addr_start_aligned);
+ shadow_last_aligned = kasan_mem_to_shadow((void *)addr_end_aligned);
+
+ /* Check the first unaligned tag in shadow memory. */
+ if ((u64)untagged_addr % KASAN_SHADOW_SCALE_SIZE) {
+ if (unlikely((*shadow_first >> KASAN_TAG_WIDTH) != tag))
+ return !kasan_report(addr, size, write, ret_ip);
+ }
+
+ /* Check the middle aligned part in shadow memory. */
+ for (shadow = shadow_first_aligned; shadow < shadow_last_aligned; shadow++) {
+ if (unlikely(*shadow != ((tag << KASAN_TAG_WIDTH) | tag)))
+ return !kasan_report(addr, size, write, ret_ip);
+ }
+
+ /* Check the last unaligned tag in shadow memory. */
+ if (((u64)untagged_addr + aligned_size) % KASAN_SHADOW_SCALE_SIZE) {
+ if (unlikely((*shadow_last & KASAN_TAG_MASK) != tag))
+ return !kasan_report(addr, size, write, ret_ip);
+ }
+
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_KASAN_SW_TAGS_DENSE)
+bool kasan_check_range(const void *addr, size_t size, bool write,
+ unsigned long ret_ip)
+{
+ return kasan_check_range_inline(addr, size, write, ret_ip);
+}
+#endif
@@ -183,7 +183,7 @@ static inline bool kasan_requires_meta(void)
#define META_BYTES_PER_BLOCK 1
#define META_BLOCKS_PER_ROW 16
#define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK)
-#define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
+#define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_SHADOW_SCALE_SIZE)
#define META_ROWS_AROUND_ADDR 2
#define KASAN_STACK_DEPTH 64
@@ -436,7 +436,7 @@ static int meta_pointer_offset(const void *row, const void *addr)
* plus 1 byte for space.
*/
return 3 + (BITS_PER_LONG / 8) * 2 +
- (addr - row) / KASAN_GRANULE_SIZE * 3 + 1;
+ (addr - row) / KASAN_SHADOW_SCALE_SIZE * 3 + 1;
}
static void print_memory_metadata(const void *addr)
@@ -39,7 +39,7 @@ const void *kasan_find_first_bad_addr(const void *addr, size_t size)
if (!addr_has_metadata(p))
return p;
- while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p))
+ while (p < end && tag == kasan_get_shadow_tag(p))
p += KASAN_GRANULE_SIZE;
return p;
@@ -48,7 +48,6 @@ const void *kasan_find_first_bad_addr(const void *addr, size_t size)
size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache)
{
size_t size = 0;
- u8 *shadow;
/*
* Skip the addr_has_metadata check, as this function only operates on
@@ -59,13 +58,11 @@ size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache)
* The loop below returns 0 for freed objects, for which KASAN cannot
* calculate the allocation size based on the metadata.
*/
- shadow = (u8 *)kasan_mem_to_shadow(object);
while (size < cache->object_size) {
- if (*shadow != KASAN_TAG_INVALID)
+ if (kasan_get_shadow_tag(object + size) != KASAN_TAG_INVALID)
size += KASAN_GRANULE_SIZE;
else
return size;
- shadow++;
}
return cache->object_size;
@@ -78,9 +75,8 @@ void kasan_metadata_fetch_row(char *buffer, void *row)
void kasan_print_tags(u8 addr_tag, const void *addr)
{
- u8 *shadow = (u8 *)kasan_mem_to_shadow(addr);
-
- pr_err("Pointer tag: [%02x], memory tag: [%02x]\n", addr_tag, *shadow);
+ pr_err("Pointer tag: [%02x], memory tag: [%02x]\n", addr_tag,
+ kasan_get_shadow_tag(addr));
}
#ifdef CONFIG_KASAN_STACK
@@ -79,6 +79,7 @@ u8 __hwasan_generate_tag(void)
}
EXPORT_SYMBOL(__hwasan_generate_tag);
+#if !IS_ENABLED(CONFIG_KASAN_SW_TAGS_DENSE)
bool kasan_check_range(const void *addr, size_t size, bool write,
unsigned long ret_ip)
{
@@ -127,17 +128,24 @@ bool kasan_check_range(const void *addr, size_t size, bool write,
return true;
}
+#endif
bool kasan_byte_accessible(const void *addr)
{
u8 tag = get_tag(addr);
void *untagged_addr = kasan_reset_tag(addr);
u8 shadow_byte;
+ int shift;
if (!addr_has_metadata(untagged_addr))
return false;
shadow_byte = READ_ONCE(*(u8 *)kasan_mem_to_shadow(untagged_addr));
+ if (IS_ENABLED(CONFIG_KASAN_SW_TAGS_DENSE)) {
+ shift = !!((u64)addr & BIT(KASAN_TAG_WIDTH)) * KASAN_TAG_WIDTH;
+ shadow_byte = (shadow_byte >> shift) & KASAN_TAG_KERNEL;
+ }
+
return tag == KASAN_TAG_KERNEL || tag == shadow_byte;
}
In KASAN's tag-based mode (arm64) when a memory access occurs, the tag stored in the top 8 bits of the pointer is compared with tags saved in the region of the shadow memory that maps to memory the pointer points to. If any of the tags in the shadow memory region do not match the one stored in the pointer an error report is generated. With the introduction of the dense mode, tags won't necessarily occupy whole bytes of shadow memory if the previously allocated memory wasn't aligned to 32 bytes - which is the coverage of one shadow byte. Add an alternative implementation of kasan_check_range() that performs special checks on first and last bytes of shadow memory ranges if the originally allocated memory wasn't aligned to 32 bytes. Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com> --- include/linux/kasan.h | 47 +++++++++++++++------- mm/kasan/Makefile | 3 ++ mm/kasan/dense.c | 83 +++++++++++++++++++++++++++++++++++++++ mm/kasan/kasan.h | 2 +- mm/kasan/report.c | 2 +- mm/kasan/report_sw_tags.c | 12 ++---- mm/kasan/sw_tags.c | 8 ++++ 7 files changed, 133 insertions(+), 24 deletions(-) create mode 100644 mm/kasan/dense.c