@@ -23,6 +23,7 @@
*/
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/kasan.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
@@ -115,16 +116,40 @@ int ioremap_page(unsigned long virt, unsigned long phys,
}
EXPORT_SYMBOL(ioremap_page);
+#ifdef CONFIG_KASAN
+static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
+{
+ return (unsigned long)kasan_mem_to_shadow((void *)addr);
+}
+#else
+static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
+{
+ return 0;
+}
+#endif
+
+static void memcpy_pgd(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ end = ALIGN(end, PGDIR_SIZE);
+ memcpy(pgd_offset(mm, start), pgd_offset_k(start),
+ sizeof(pgd_t) * (pgd_index(end) - pgd_index(start)));
+}
+
void __check_vmalloc_seq(struct mm_struct *mm)
{
int seq;
do {
seq = atomic_read(&init_mm.context.vmalloc_seq);
- memcpy(pgd_offset(mm, VMALLOC_START),
- pgd_offset_k(VMALLOC_START),
- sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
- pgd_index(VMALLOC_START)));
+ memcpy_pgd(mm, VMALLOC_START, VMALLOC_END);
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ unsigned long start =
+ arm_kasan_mem_to_shadow(VMALLOC_START);
+ unsigned long end =
+ arm_kasan_mem_to_shadow(VMALLOC_END);
+ memcpy_pgd(mm, start, end);
+ }
/*
* Use a store-release so that other CPUs that observe the
* counter's new value are guaranteed to see the results of the