diff mbox

[v11,19/19] kasan: enable instrumentation of global variables

Message ID 1422985392-28652-20-git-send-email-a.ryabinin@samsung.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrey Ryabinin Feb. 3, 2015, 5:43 p.m. UTC
This feature let us to detect accesses out of bounds of
global variables. This will work as for globals in kernel
image, so for globals in modules. Currently this won't work
for symbols in user-specified sections (e.g. __init, __read_mostly, ...)

The idea of this is simple. Compiler increases each global variable
by redzone size and add constructors invoking __asan_register_globals()
function. Information about global variable (address, size,
size with redzone ...) passed to __asan_register_globals() so we could
poison variable's redzone.

This patch also forces module_alloc() to return 8*PAGE_SIZE aligned
address making shadow memory handling ( kasan_module_alloc()/kasan_module_free() )
more simple. Such alignment guarantees that each shadow page backing
modules address space correspond to only one module_alloc() allocation.

Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
---
 arch/x86/kernel/module.c      | 12 +++++++++-
 arch/x86/mm/kasan_init_64.c   |  2 +-
 include/linux/compiler-gcc4.h |  4 ++++
 include/linux/compiler-gcc5.h |  2 ++
 include/linux/kasan.h         | 10 +++++++++
 kernel/module.c               |  2 ++
 lib/Kconfig.kasan             |  1 +
 mm/kasan/kasan.c              | 52 +++++++++++++++++++++++++++++++++++++++++++
 mm/kasan/kasan.h              | 25 +++++++++++++++++++++
 mm/kasan/report.c             | 22 ++++++++++++++++++
 scripts/Makefile.kasan        |  5 +++--
 11 files changed, 133 insertions(+), 4 deletions(-)

Comments

Rusty Russell Feb. 16, 2015, 2:58 a.m. UTC | #1
Andrey Ryabinin <a.ryabinin@samsung.com> writes:
> This feature let us to detect accesses out of bounds of
> global variables. This will work as for globals in kernel
> image, so for globals in modules. Currently this won't work
> for symbols in user-specified sections (e.g. __init, __read_mostly, ...)
>
> The idea of this is simple. Compiler increases each global variable
> by redzone size and add constructors invoking __asan_register_globals()
> function. Information about global variable (address, size,
> size with redzone ...) passed to __asan_register_globals() so we could
> poison variable's redzone.
>
> This patch also forces module_alloc() to return 8*PAGE_SIZE aligned
> address making shadow memory handling ( kasan_module_alloc()/kasan_module_free() )
> more simple. Such alignment guarantees that each shadow page backing
> modules address space correspond to only one module_alloc() allocation.

Hmm, I understand why you only fixed x86, but it's weird.

I think MODULE_ALIGN belongs in linux/moduleloader.h, and every arch
should be fixed up to use it (though you could leave that for later).

Might as well fix the default implementation at least.

> @@ -49,8 +49,15 @@ void kasan_krealloc(const void *object, size_t new_size);
>  void kasan_slab_alloc(struct kmem_cache *s, void *object);
>  void kasan_slab_free(struct kmem_cache *s, void *object);
>  
> +#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
> +
> +int kasan_module_alloc(void *addr, size_t size);
> +void kasan_module_free(void *addr);
> +
>  #else /* CONFIG_KASAN */
>  
> +#define MODULE_ALIGN 1

Hmm, that should be PAGE_SIZE (we assume that in several places).

> @@ -1807,6 +1808,7 @@ static void unset_module_init_ro_nx(struct module *mod) { }
>  void __weak module_memfree(void *module_region)
>  {
>  	vfree(module_region);
> +	kasan_module_free(module_region);
>  }

This looks racy (memory reuse?).  Perhaps try other order?

Cheers,
Rusty.
--
To unsubscribe from this list: send the line "unsubscribe linux-kbuild" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andrey Ryabinin Feb. 16, 2015, 2:44 p.m. UTC | #2
On 02/16/2015 05:58 AM, Rusty Russell wrote:
> Andrey Ryabinin <a.ryabinin@samsung.com> writes:
>> This feature let us to detect accesses out of bounds of
>> global variables. This will work as for globals in kernel
>> image, so for globals in modules. Currently this won't work
>> for symbols in user-specified sections (e.g. __init, __read_mostly, ...)
>>
>> The idea of this is simple. Compiler increases each global variable
>> by redzone size and add constructors invoking __asan_register_globals()
>> function. Information about global variable (address, size,
>> size with redzone ...) passed to __asan_register_globals() so we could
>> poison variable's redzone.
>>
>> This patch also forces module_alloc() to return 8*PAGE_SIZE aligned
>> address making shadow memory handling ( kasan_module_alloc()/kasan_module_free() )
>> more simple. Such alignment guarantees that each shadow page backing
>> modules address space correspond to only one module_alloc() allocation.
> 
> Hmm, I understand why you only fixed x86, but it's weird.
> 
> I think MODULE_ALIGN belongs in linux/moduleloader.h, and every arch
> should be fixed up to use it (though you could leave that for later).
> 
> Might as well fix the default implementation at least.
> 
>> @@ -49,8 +49,15 @@ void kasan_krealloc(const void *object, size_t new_size);
>>  void kasan_slab_alloc(struct kmem_cache *s, void *object);
>>  void kasan_slab_free(struct kmem_cache *s, void *object);
>>  
>> +#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
>> +
>> +int kasan_module_alloc(void *addr, size_t size);
>> +void kasan_module_free(void *addr);
>> +
>>  #else /* CONFIG_KASAN */
>>  
>> +#define MODULE_ALIGN 1
> 
> Hmm, that should be PAGE_SIZE (we assume that in several places).
> 
>> @@ -1807,6 +1808,7 @@ static void unset_module_init_ro_nx(struct module *mod) { }
>>  void __weak module_memfree(void *module_region)
>>  {
>>  	vfree(module_region);
>> +	kasan_module_free(module_region);
>>  }
> 
> This looks racy (memory reuse?).  Perhaps try other order?
> 

You are right, it's racy. Concurrent kasan_module_alloc() could fail because
kasan_module_free() wasn't called/finished yet, so whole module_alloc() will fail
and module loading will fail.
However, I just find out that this race is not the worst problem here.
When vfree(addr) called in interrupt context, memory at addr will be reused for
storing 'struct llist_node':

void vfree(const void *addr)
{
...
	if (unlikely(in_interrupt())) {
		struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
		if (llist_add((struct llist_node *)addr, &p->list))
			schedule_work(&p->wq);


In this case we have to free shadow *after* freeing 'module_region', because 'module_region'
is still used in llist_add() and in free_work() latter.
free_work() (in mm/vmalloc.c) processes list in LIFO order, so to free shadow after freeing
'module_region' kasan_module_free(module_region); should be called before vfree(module_region);

It will be racy still, but this is not so bad as potential crash that we have now.
Honestly, I have no idea how to fix this race nicely. Any suggestions?



--
To unsubscribe from this list: send the line "unsubscribe linux-kbuild" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Dmitry Vyukov Feb. 16, 2015, 2:47 p.m. UTC | #3
Can a module be freed in an interrupt?


On Mon, Feb 16, 2015 at 5:44 PM, Andrey Ryabinin <a.ryabinin@samsung.com> wrote:
> On 02/16/2015 05:58 AM, Rusty Russell wrote:
>> Andrey Ryabinin <a.ryabinin@samsung.com> writes:
>>> This feature let us to detect accesses out of bounds of
>>> global variables. This will work as for globals in kernel
>>> image, so for globals in modules. Currently this won't work
>>> for symbols in user-specified sections (e.g. __init, __read_mostly, ...)
>>>
>>> The idea of this is simple. Compiler increases each global variable
>>> by redzone size and add constructors invoking __asan_register_globals()
>>> function. Information about global variable (address, size,
>>> size with redzone ...) passed to __asan_register_globals() so we could
>>> poison variable's redzone.
>>>
>>> This patch also forces module_alloc() to return 8*PAGE_SIZE aligned
>>> address making shadow memory handling ( kasan_module_alloc()/kasan_module_free() )
>>> more simple. Such alignment guarantees that each shadow page backing
>>> modules address space correspond to only one module_alloc() allocation.
>>
>> Hmm, I understand why you only fixed x86, but it's weird.
>>
>> I think MODULE_ALIGN belongs in linux/moduleloader.h, and every arch
>> should be fixed up to use it (though you could leave that for later).
>>
>> Might as well fix the default implementation at least.
>>
>>> @@ -49,8 +49,15 @@ void kasan_krealloc(const void *object, size_t new_size);
>>>  void kasan_slab_alloc(struct kmem_cache *s, void *object);
>>>  void kasan_slab_free(struct kmem_cache *s, void *object);
>>>
>>> +#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
>>> +
>>> +int kasan_module_alloc(void *addr, size_t size);
>>> +void kasan_module_free(void *addr);
>>> +
>>>  #else /* CONFIG_KASAN */
>>>
>>> +#define MODULE_ALIGN 1
>>
>> Hmm, that should be PAGE_SIZE (we assume that in several places).
>>
>>> @@ -1807,6 +1808,7 @@ static void unset_module_init_ro_nx(struct module *mod) { }
>>>  void __weak module_memfree(void *module_region)
>>>  {
>>>      vfree(module_region);
>>> +    kasan_module_free(module_region);
>>>  }
>>
>> This looks racy (memory reuse?).  Perhaps try other order?
>>
>
> You are right, it's racy. Concurrent kasan_module_alloc() could fail because
> kasan_module_free() wasn't called/finished yet, so whole module_alloc() will fail
> and module loading will fail.
> However, I just find out that this race is not the worst problem here.
> When vfree(addr) called in interrupt context, memory at addr will be reused for
> storing 'struct llist_node':
>
> void vfree(const void *addr)
> {
> ...
>         if (unlikely(in_interrupt())) {
>                 struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
>                 if (llist_add((struct llist_node *)addr, &p->list))
>                         schedule_work(&p->wq);
>
>
> In this case we have to free shadow *after* freeing 'module_region', because 'module_region'
> is still used in llist_add() and in free_work() latter.
> free_work() (in mm/vmalloc.c) processes list in LIFO order, so to free shadow after freeing
> 'module_region' kasan_module_free(module_region); should be called before vfree(module_region);
>
> It will be racy still, but this is not so bad as potential crash that we have now.
> Honestly, I have no idea how to fix this race nicely. Any suggestions?
>
>
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kbuild" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andrey Ryabinin Feb. 16, 2015, 3:09 p.m. UTC | #4
On 02/16/2015 05:47 PM, Dmitry Vyukov wrote:
> Can a module be freed in an interrupt?
> 
> 

Since commit: c749637909ee ("module: fix race in kallsyms resolution during module load success.")
module's init section always freed rcu callback (rcu callbacks executed from softirq)

Currently, with DEBUG_PAGEALLOC and KASAN module loading always causing kernel crash.
It's harder to trigger this without DEBUG_PAGEALLOC because of lazy tlb flushing in vmalloc.

BUG: unable to handle kernel paging request at fffffbfff4011000
IP: [<ffffffff811d8f7b>] __asan_load8+0x2b/0xa0
PGD 7ffa3063 PUD 7ffa2063 PMD 484ea067 PTE 0
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN
Dumping ftrace buffer:
   (ftrace buffer empty)
Modules linked in: ipv6
CPU: 0 PID: 30 Comm: kworker/0:1 Tainted: G        W       3.19.0-rc7-next-20150209+ #209
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014
Workqueue: events free_work
task: ffff88006c5a8870 ti: ffff88006c630000 task.ti: ffff88006c630000
RIP: 0010:[<ffffffff811d8f7b>]  [<ffffffff811d8f7b>] __asan_load8+0x2b/0xa0
RSP: 0018:ffff88006c637cd8  EFLAGS: 00010286
RAX: fffffbfff4011000 RBX: ffffffffa0088000 RCX: ffffed000da000a9
RDX: dffffc0000000000 RSI: 0000000000000001 RDI: ffffffffa0088000
RBP: ffff88006c637d08 R08: 0000000000000000 R09: ffff88006d007840
R10: ffff88006d000540 R11: ffffed000da000a9 R12: ffffffffa0088000
R13: ffff88006d61a5d8 R14: ffff88006d61a5d8 R15: ffff88006d61a5c0
FS:  0000000000000000(0000) GS:ffff88006d600000(0000) knlGS:0000000000000000
CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: fffffbfff4011000 CR3: 000000004d967000 CR4: 00000000000006b0
Stack:
 ffff88006c637ce8 fffffbfff4011000 ffffffffa0088000 ffff88006d61a5d8
 ffff88006d61a5d8 ffff88006d61a5c0 ffff88006c637d28 ffffffff811bb1b8
 ffff88006c5bc618 ffff88006d617b28 ffff88006c637db8 ffffffff8108e1b0
Call Trace:
 [<ffffffff811bb1b8>] free_work+0x38/0x60
 [<ffffffff8108e1b0>] process_one_work+0x2a0/0x7d0
 [<ffffffff8108f653>] worker_thread+0x93/0x840
 [<ffffffff8108f5c0>] ? init_pwq.part.11+0x10/0x10
 [<ffffffff81096f37>] kthread+0x177/0x1a0
 [<ffffffff81096dc0>] ? kthread_worker_fn+0x290/0x290
 [<ffffffff81096dc0>] ? kthread_worker_fn+0x290/0x290
 [<ffffffff8158cd7c>] ret_from_fork+0x7c/0xb0
 [<ffffffff81096dc0>] ? kthread_worker_fn+0x290/0x290
Code: 48 b8 ff ff ff ff ff 7f ff ff 55 48 89 e5 48 83 ec 30 48 39 c7 76 59 48 ba 00 00 00 00 00 fc ff df 48 89 f8 48 c1 e8 03 48 01 d0 <66> 83 38 00 75 07 c9 c3 0f 1f 44 00 00 48 8d 4f 07 48 89 ce 48
RIP  [<ffffffff811d8f7b>] __asan_load8+0x2b/0xa0
 RSP <ffff88006c637cd8>
CR2: fffffbfff4011000
---[ end trace b9411d841784b6cf ]---


--
To unsubscribe from this list: send the line "unsubscribe linux-kbuild" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Rusty Russell Feb. 16, 2015, 11:55 p.m. UTC | #5
Andrey Ryabinin <a.ryabinin@samsung.com> writes:
> On 02/16/2015 05:58 AM, Rusty Russell wrote:
>> Andrey Ryabinin <a.ryabinin@samsung.com> writes:
>>> This feature let us to detect accesses out of bounds of
>>> global variables. This will work as for globals in kernel
>>> image, so for globals in modules. Currently this won't work
>>> for symbols in user-specified sections (e.g. __init, __read_mostly, ...)
>>> @@ -1807,6 +1808,7 @@ static void unset_module_init_ro_nx(struct module *mod) { }
>>>  void __weak module_memfree(void *module_region)
>>>  {
>>>  	vfree(module_region);
>>> +	kasan_module_free(module_region);
>>>  }
>> 
>> This looks racy (memory reuse?).  Perhaps try other order?
>> 
>
> You are right, it's racy. Concurrent kasan_module_alloc() could fail because
> kasan_module_free() wasn't called/finished yet, so whole module_alloc() will fail
> and module loading will fail.
> However, I just find out that this race is not the worst problem here.
> When vfree(addr) called in interrupt context, memory at addr will be reused for
> storing 'struct llist_node':
>
> void vfree(const void *addr)
> {
> ...
> 	if (unlikely(in_interrupt())) {
> 		struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
> 		if (llist_add((struct llist_node *)addr, &p->list))
> 			schedule_work(&p->wq);
>
>
> In this case we have to free shadow *after* freeing 'module_region', because 'module_region'
> is still used in llist_add() and in free_work() latter.
> free_work() (in mm/vmalloc.c) processes list in LIFO order, so to free shadow after freeing
> 'module_region' kasan_module_free(module_region); should be called before vfree(module_region);
>
> It will be racy still, but this is not so bad as potential crash that we have now.
> Honestly, I have no idea how to fix this race nicely. Any suggestions?

I think you need to take over the rcu callback for the kasan case.

Perhaps we rename that __module_memfree(), and do:

void module_memfree(void *p)
{
#ifdef CONFIG_KASAN
        ...
#endif
        __module_memfree(p);        
}

Note: there are calls to module_memfree from other code (BPF and
kprobes).  I assume you looked at those too...

Cheers,
Rusty.
--
To unsubscribe from this list: send the line "unsubscribe linux-kbuild" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index e830e61..d1ac80b 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -24,6 +24,7 @@ 
 #include <linux/fs.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
+#include <linux/kasan.h>
 #include <linux/bug.h>
 #include <linux/mm.h>
 #include <linux/gfp.h>
@@ -83,13 +84,22 @@  static unsigned long int get_module_load_offset(void)
 
 void *module_alloc(unsigned long size)
 {
+	void *p;
+
 	if (PAGE_ALIGN(size) > MODULES_LEN)
 		return NULL;
-	return __vmalloc_node_range(size, 1,
+
+	p = __vmalloc_node_range(size, MODULE_ALIGN,
 				    MODULES_VADDR + get_module_load_offset(),
 				    MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
 				    PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
 				    __builtin_return_address(0));
+	if (p && (kasan_module_alloc(p, size) < 0)) {
+		vfree(p);
+		return NULL;
+	}
+
+	return p;
 }
 
 #ifdef CONFIG_X86_32
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 5350870..4860906 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -196,7 +196,7 @@  void __init kasan_init(void)
 			(unsigned long)kasan_mem_to_shadow(_end),
 			NUMA_NO_NODE);
 
-	populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_VADDR),
+	populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
 			(void *)KASAN_SHADOW_END);
 
 	memset(kasan_zero_page, 0, PAGE_SIZE);
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index d1a5582..769e198 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -85,3 +85,7 @@ 
 #define __HAVE_BUILTIN_BSWAP16__
 #endif
 #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#if GCC_VERSION >= 40902
+#define KASAN_ABI_VERSION 3
+#endif
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
index c8c5659..efee493 100644
--- a/include/linux/compiler-gcc5.h
+++ b/include/linux/compiler-gcc5.h
@@ -63,3 +63,5 @@ 
 #define __HAVE_BUILTIN_BSWAP64__
 #define __HAVE_BUILTIN_BSWAP16__
 #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#define KASAN_ABI_VERSION 4
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d5310ee..72ba725 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -49,8 +49,15 @@  void kasan_krealloc(const void *object, size_t new_size);
 void kasan_slab_alloc(struct kmem_cache *s, void *object);
 void kasan_slab_free(struct kmem_cache *s, void *object);
 
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+
+int kasan_module_alloc(void *addr, size_t size);
+void kasan_module_free(void *addr);
+
 #else /* CONFIG_KASAN */
 
+#define MODULE_ALIGN 1
+
 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
 
 static inline void kasan_enable_current(void) {}
@@ -74,6 +81,9 @@  static inline void kasan_krealloc(const void *object, size_t new_size) {}
 static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
 static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
 
+static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline void kasan_module_free(void *addr) {}
+
 #endif /* CONFIG_KASAN */
 
 #endif /* LINUX_KASAN_H */
diff --git a/kernel/module.c b/kernel/module.c
index d856e96..f842027 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -56,6 +56,7 @@ 
 #include <linux/async.h>
 #include <linux/percpu.h>
 #include <linux/kmemleak.h>
+#include <linux/kasan.h>
 #include <linux/jump_label.h>
 #include <linux/pfn.h>
 #include <linux/bsearch.h>
@@ -1807,6 +1808,7 @@  static void unset_module_init_ro_nx(struct module *mod) { }
 void __weak module_memfree(void *module_region)
 {
 	vfree(module_region);
+	kasan_module_free(module_region);
 }
 
 void __weak module_arch_cleanup(struct module *mod)
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4d47d87..4fecaedc 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -6,6 +6,7 @@  if HAVE_ARCH_KASAN
 config KASAN
 	bool "KASan: runtime memory debugger"
 	depends on SLUB_DEBUG
+	select CONSTRUCTORS
 	help
 	  Enables kernel address sanitizer - runtime memory debugger,
 	  designed to find out-of-bounds accesses and use-after-free bugs.
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 799c52b..78fee63 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -22,6 +22,7 @@ 
 #include <linux/memblock.h>
 #include <linux/memory.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/printk.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -395,6 +396,57 @@  void kasan_kfree_large(const void *ptr)
 			KASAN_FREE_PAGE);
 }
 
+int kasan_module_alloc(void *addr, size_t size)
+{
+	void *ret;
+	size_t shadow_size;
+	unsigned long shadow_start;
+
+	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
+	shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
+			PAGE_SIZE);
+
+	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
+		return -EINVAL;
+
+	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
+			shadow_start + shadow_size,
+			GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
+			__builtin_return_address(0));
+	return ret ? 0 : -ENOMEM;
+}
+
+void kasan_module_free(void *addr)
+{
+	vfree(kasan_mem_to_shadow(addr));
+}
+
+static void register_global(struct kasan_global *global)
+{
+	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+
+	kasan_unpoison_shadow(global->beg, global->size);
+
+	kasan_poison_shadow(global->beg + aligned_size,
+		global->size_with_redzone - aligned_size,
+		KASAN_GLOBAL_REDZONE);
+}
+
+void __asan_register_globals(struct kasan_global *globals, size_t size)
+{
+	int i;
+
+	for (i = 0; i < size; i++)
+		register_global(&globals[i]);
+}
+EXPORT_SYMBOL(__asan_register_globals);
+
+void __asan_unregister_globals(struct kasan_global *globals, size_t size)
+{
+}
+EXPORT_SYMBOL(__asan_unregister_globals);
+
 #define DEFINE_ASAN_LOAD_STORE(size)				\
 	void __asan_load##size(unsigned long addr)		\
 	{							\
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 1fcc1d8..4986b0a 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -11,6 +11,7 @@ 
 #define KASAN_PAGE_REDZONE      0xFE  /* redzone for kmalloc_large allocations */
 #define KASAN_KMALLOC_REDZONE   0xFC  /* redzone inside slub object */
 #define KASAN_KMALLOC_FREE      0xFB  /* object was freed (kmem_cache_free/kfree) */
+#define KASAN_GLOBAL_REDZONE    0xFA  /* redzone for global variable */
 
 /*
  * Stack redzone shadow values
@@ -21,6 +22,10 @@ 
 #define KASAN_STACK_RIGHT       0xF3
 #define KASAN_STACK_PARTIAL     0xF4
 
+/* Don't break randconfig/all*config builds */
+#ifndef KASAN_ABI_VERSION
+#define KASAN_ABI_VERSION 1
+#endif
 
 struct kasan_access_info {
 	const void *access_addr;
@@ -30,6 +35,26 @@  struct kasan_access_info {
 	unsigned long ip;
 };
 
+/* The layout of struct dictated by compiler */
+struct kasan_source_location {
+	const char *filename;
+	int line_no;
+	int column_no;
+};
+
+/* The layout of struct dictated by compiler */
+struct kasan_global {
+	const void *beg;		/* Address of the beginning of the global variable. */
+	size_t size;			/* Size of the global variable. */
+	size_t size_with_redzone;	/* Size of the variable + size of the red zone. 32 bytes aligned */
+	const void *name;
+	const void *module_name;	/* Name of the module where the global variable is declared. */
+	unsigned long has_dynamic_init;	/* This needed for C++ */
+#if KASAN_ABI_VERSION >= 4
+	struct kasan_source_location *location;
+#endif
+};
+
 void kasan_report_error(struct kasan_access_info *info);
 void kasan_report_user_access(struct kasan_access_info *info);
 
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 866732e..680ceed 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -23,6 +23,8 @@ 
 #include <linux/types.h>
 #include <linux/kasan.h>
 
+#include <asm/sections.h>
+
 #include "kasan.h"
 #include "../slab.h"
 
@@ -61,6 +63,7 @@  static void print_error_description(struct kasan_access_info *info)
 		break;
 	case KASAN_PAGE_REDZONE:
 	case KASAN_KMALLOC_REDZONE:
+	case KASAN_GLOBAL_REDZONE:
 	case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
 		bug_type = "out of bounds access";
 		break;
@@ -80,6 +83,20 @@  static void print_error_description(struct kasan_access_info *info)
 		info->access_size, current->comm, task_pid_nr(current));
 }
 
+static inline bool kernel_or_module_addr(const void *addr)
+{
+	return (addr >= (void *)_stext && addr < (void *)_end)
+		|| (addr >= (void *)MODULES_VADDR
+			&& addr < (void *)MODULES_END);
+}
+
+static inline bool init_task_stack_addr(const void *addr)
+{
+	return addr >= (void *)&init_thread_union.stack &&
+		(addr <= (void *)&init_thread_union.stack +
+			sizeof(init_thread_union.stack));
+}
+
 static void print_address_description(struct kasan_access_info *info)
 {
 	const void *addr = info->access_addr;
@@ -107,6 +124,11 @@  static void print_address_description(struct kasan_access_info *info)
 		dump_page(page, "kasan: bad access detected");
 	}
 
+	if (kernel_or_module_addr(addr)) {
+		if (!init_task_stack_addr(addr))
+			pr_err("Address belongs to variable %pS\n", addr);
+	}
+
 	dump_stack();
 }
 
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 0ac7d1d..df302f8 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -5,11 +5,12 @@  else
 	call_threshold := 0
 endif
 
-CFLAGS_KASAN_MINIMAL := $(call cc-option, -fsanitize=kernel-address)
+CFLAGS_KASAN_MINIMAL := $(call cc-option, -fsanitize=kernel-address \
+				--param asan-globals=1)
 
 CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
 		-fasan-shadow-offset=$(CONFIG_KASAN_SHADOW_OFFSET) \
-		--param asan-stack=1 \
+		--param asan-stack=1 --param asan-globals=1 \
 		--param asan-instrumentation-with-call-threshold=$(call_threshold))
 
 ifeq ($(CFLAGS_KASAN_MINIMAL),)