@@ -25,6 +25,12 @@ struct kunit_kasan_expectation {
#endif
+typedef unsigned int __bitwise kasan_vmalloc_flags_t;
+
+#define KASAN_VMALLOC_NONE 0x00u
+#define KASAN_VMALLOC_INIT 0x01u
+#define KASAN_VMALLOC_VM_ALLOC 0x02u
+
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#include <linux/pgtable.h>
@@ -418,18 +424,39 @@ static inline void kasan_init_hw_tags(void) { }
#ifdef CONFIG_KASAN_VMALLOC
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end);
-void *__kasan_unpoison_vmalloc(const void *start, unsigned long size);
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size)
+{ }
+static inline int kasan_populate_vmalloc(unsigned long start,
+ unsigned long size)
+{
+ return 0;
+}
+static inline void kasan_release_vmalloc(unsigned long start,
+ unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end) { }
+
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ kasan_vmalloc_flags_t flags);
static __always_inline void *kasan_unpoison_vmalloc(const void *start,
- unsigned long size)
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
{
if (kasan_enabled())
- return __kasan_unpoison_vmalloc(start, size);
+ return __kasan_unpoison_vmalloc(start, size, flags);
return (void *)start;
}
@@ -456,7 +483,8 @@ static inline void kasan_release_vmalloc(unsigned long start,
unsigned long free_region_end) { }
static inline void *kasan_unpoison_vmalloc(const void *start,
- unsigned long size)
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
{
return (void *)start;
}
@@ -32,7 +32,7 @@ static void *__scs_alloc(int node)
for (i = 0; i < NR_CACHED_SCS; i++) {
s = this_cpu_xchg(scs_cache[i], NULL);
if (s) {
- kasan_unpoison_vmalloc(s, SCS_SIZE);
+ kasan_unpoison_vmalloc(s, SCS_SIZE, KASAN_VMALLOC_NONE);
memset(s, 0, SCS_SIZE);
return s;
}
@@ -78,7 +78,7 @@ void scs_free(void *s)
if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL)
return;
- kasan_unpoison_vmalloc(s, SCS_SIZE);
+ kasan_unpoison_vmalloc(s, SCS_SIZE, KASAN_VMALLOC_NONE);
vfree_atomic(s);
}
@@ -192,6 +192,98 @@ void __init kasan_init_hw_tags(void)
kasan_stack_collection_enabled() ? "on" : "off");
}
+#ifdef CONFIG_KASAN_VMALLOC
+
+static void unpoison_vmalloc_pages(const void *addr, u8 tag)
+{
+ struct vm_struct *area;
+ int i;
+
+ /*
+ * As hardware tag-based KASAN only tags VM_ALLOC vmalloc allocations
+ * (see the comment in __kasan_unpoison_vmalloc), all of the pages
+ * should belong to a single area.
+ */
+ area = find_vm_area((void *)addr);
+ if (WARN_ON(!area))
+ return;
+
+ for (i = 0; i < area->nr_pages; i++) {
+ struct page *page = area->pages[i];
+
+ page_kasan_tag_set(page, tag);
+ }
+}
+
+void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ u8 tag;
+ unsigned long redzone_start, redzone_size;
+
+ if (!is_vmalloc_or_module_addr(start))
+ return (void *)start;
+
+ /*
+ * Skip unpoisoning and assigning a pointer tag for non-VM_ALLOC
+ * mappings as:
+ *
+ * 1. Unlike the software KASAN modes, hardware tag-based KASAN only
+ * supports tagging physical memory. Therefore, it can only tag a
+ * single mapping of normal physical pages.
+ * 2. Hardware tag-based KASAN can only tag memory mapped with special
+ * mapping protection bits, see arch_vmalloc_pgprot_modify().
+ * As non-VM_ALLOC mappings can be mapped outside of vmalloc code,
+ * providing these bits would require tracking all non-VM_ALLOC
+ * mappers.
+ *
+ * Thus, for VM_ALLOC mappings, hardware tag-based KASAN only tags
+ * the first virtual mapping, which is created by vmalloc().
+ * Tagging the page_alloc memory backing that vmalloc() allocation is
+ * skipped, see ___GFP_SKIP_KASAN_UNPOISON.
+ *
+ * For non-VM_ALLOC allocations, page_alloc memory is tagged as usual.
+ */
+ if (!(flags & KASAN_VMALLOC_VM_ALLOC))
+ return (void *)start;
+
+ tag = kasan_random_tag();
+ start = set_tag(start, tag);
+
+ /* Unpoison and initialize memory up to size. */
+ kasan_unpoison(start, size, flags & KASAN_VMALLOC_INIT);
+
+ /*
+ * Explicitly poison and initialize the in-page vmalloc() redzone.
+ * Unlike software KASAN modes, hardware tag-based KASAN doesn't
+ * unpoison memory when populating shadow for vmalloc() space.
+ */
+ redzone_start = round_up((unsigned long)start + size,
+ KASAN_GRANULE_SIZE);
+ redzone_size = round_up(redzone_start, PAGE_SIZE) - redzone_start;
+ kasan_poison((void *)redzone_start, redzone_size, KASAN_TAG_INVALID,
+ flags & KASAN_VMALLOC_INIT);
+
+ /*
+ * Set per-page tag flags to allow accessing physical memory for the
+ * vmalloc() mapping through page_address(vmalloc_to_page()).
+ */
+ unpoison_vmalloc_pages(start, tag);
+
+ return (void *)start;
+}
+
+void __kasan_poison_vmalloc(const void *start, unsigned long size)
+{
+ /*
+ * No tagging here.
+ * The physical pages backing the vmalloc() allocation are poisoned
+ * through the usual page_alloc paths.
+ */
+}
+
+#endif
+
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
void kasan_enable_tagging_sync(void)
@@ -475,8 +475,16 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
}
}
-void *__kasan_unpoison_vmalloc(const void *start, unsigned long size)
+void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ kasan_vmalloc_flags_t flags)
{
+ /*
+ * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC
+ * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored.
+ * Software KASAN modes can't optimize zeroing memory by combining it
+ * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
+ */
+
if (!is_vmalloc_or_module_addr(start))
return (void *)start;
@@ -2215,8 +2215,12 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
return NULL;
}
- /* Mark the pages as accessible, now that they are mapped. */
- mem = kasan_unpoison_vmalloc(mem, size);
+ /*
+ * Mark the pages as accessible, now that they are mapped.
+ * With hardware tag-based KASAN, marking is skipped for
+ * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
+ */
+ mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_NONE);
return mem;
}
@@ -2450,9 +2454,12 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
* best-effort approach, as they can be mapped outside of vmalloc code.
* For VM_ALLOC mappings, the pages are marked as accessible after
* getting mapped in __vmalloc_node_range().
+ * With hardware tag-based KASAN, marking is skipped for
+ * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
*/
if (!(flags & VM_ALLOC))
- area->addr = kasan_unpoison_vmalloc(area->addr, requested_size);
+ area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
+ KASAN_VMALLOC_NONE);
return area;
}
@@ -3058,6 +3065,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
{
struct vm_struct *area;
void *ret;
+ kasan_vmalloc_flags_t kasan_flags;
unsigned long real_size = size;
unsigned long real_align = align;
unsigned int shift = PAGE_SHIFT;
@@ -3110,21 +3118,39 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
goto fail;
}
- /*
- * Modify protection bits to allow tagging.
- * This must be done before mapping by __vmalloc_area_node().
- */
+ /* Prepare arguments for __vmalloc_area_node(). */
if (kasan_hw_tags_enabled() &&
- pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
+ pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
+ /*
+ * Modify protection bits to allow tagging.
+ * This must be done before mapping in __vmalloc_area_node().
+ */
prot = arch_vmap_pgprot_tagged(prot);
+ /*
+ * Skip page_alloc poisoning and zeroing for physical pages
+ * backing VM_ALLOC mapping. Memory is instead poisoned and
+ * zeroed by kasan_unpoison_vmalloc().
+ */
+ gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
+ }
+
/* Allocate physical pages and map them into vmalloc space. */
ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
if (!ret)
goto fail;
- /* Mark the pages as accessible, now that they are mapped. */
- area->addr = kasan_unpoison_vmalloc(area->addr, real_size);
+ /*
+ * Mark the pages as accessible, now that they are mapped.
+ * The init condition should match the one in post_alloc_hook()
+ * (except for the should_skip_init() check) to make sure that memory
+ * is initialized under the same conditions regardless of the enabled
+ * KASAN mode.
+ */
+ kasan_flags = KASAN_VMALLOC_VM_ALLOC;
+ if (!want_init_on_free() && want_init_on_alloc(gfp_mask))
+ kasan_flags |= KASAN_VMALLOC_INIT;
+ area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
/*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
@@ -3832,10 +3858,13 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
/*
* Mark allocated areas as accessible. Do it now as a best-effort
* approach, as they can be mapped outside of vmalloc code.
+ * With hardware tag-based KASAN, marking is skipped for
+ * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
*/
for (area = 0; area < nr_vms; area++)
vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
- vms[area]->size);
+ vms[area]->size,
+ KASAN_VMALLOC_NONE);
kfree(vas);
return vms;