Message ID | 20210809093750.131091-4-wangkefeng.wang@huawei.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: support page mapping percpu first chunk allocator | expand |
On 2021/8/9 17:37, Kefeng Wang wrote: > With KASAN_VMALLOC and NEED_PER_CPU_PAGE_FIRST_CHUNK, it crashs, > > Unable to handle kernel paging request at virtual address ffff7000028f2000 > ... > swapper pgtable: 64k pages, 48-bit VAs, pgdp=0000000042440000 > [ffff7000028f2000] pgd=000000063e7c0003, p4d=000000063e7c0003, pud=000000063e7c0003, pmd=000000063e7b0003, pte=0000000000000000 > Internal error: Oops: 96000007 [#1] PREEMPT SMP > Modules linked in: > CPU: 0 PID: 0 Comm: swapper Not tainted 5.13.0-rc4-00003-gc6e6e28f3f30-dirty #62 > Hardware name: linux,dummy-virt (DT) > pstate: 200000c5 (nzCv daIF -PAN -UAO -TCO BTYPE=--) > pc : kasan_check_range+0x90/0x1a0 > lr : memcpy+0x88/0xf4 > sp : ffff80001378fe20 > ... > Call trace: > kasan_check_range+0x90/0x1a0 > pcpu_page_first_chunk+0x3f0/0x568 > setup_per_cpu_areas+0xb8/0x184 > start_kernel+0x8c/0x328 > > The vm area used in vm_area_register_early() has no kasan shadow memory, > Let's add a new kasan_populate_early_vm_area_shadow() function to populate > the vm area shadow memory to fix the issue. Should add Acked-by: Marco Elver <elver@google.com> [for KASAN parts] , missed here :( > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> > --- > arch/arm64/mm/kasan_init.c | 16 ++++++++++++++++ > include/linux/kasan.h | 6 ++++++ > mm/kasan/init.c | 5 +++++ > mm/vmalloc.c | 1 + > 4 files changed, 28 insertions(+) > > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c > index 61b52a92b8b6..5b996ca4d996 100644 > --- a/arch/arm64/mm/kasan_init.c > +++ b/arch/arm64/mm/kasan_init.c > @@ -287,6 +287,22 @@ static void __init kasan_init_depth(void) > init_task.kasan_depth = 0; > } > > +#ifdef CONFIG_KASAN_VMALLOC > +void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) > +{ > + unsigned long shadow_start, shadow_end; > + > + if (!is_vmalloc_or_module_addr(start)) > + return; > + > + shadow_start = (unsigned long)kasan_mem_to_shadow(start); > + shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); > + shadow_end = (unsigned long)kasan_mem_to_shadow(start + size); > + shadow_end = ALIGN(shadow_end, PAGE_SIZE); > + kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE); > +} > +#endif > + > void __init kasan_init(void) > { > kasan_init_shadow(); > diff --git a/include/linux/kasan.h b/include/linux/kasan.h > index dd874a1ee862..3f8c26d9ef82 100644 > --- a/include/linux/kasan.h > +++ b/include/linux/kasan.h > @@ -133,6 +133,8 @@ struct kasan_cache { > bool is_kmalloc; > }; > > +void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); > + > slab_flags_t __kasan_never_merge(void); > static __always_inline slab_flags_t kasan_never_merge(void) > { > @@ -303,6 +305,10 @@ void kasan_restore_multi_shot(bool enabled); > > #else /* CONFIG_KASAN */ > > +static inline void kasan_populate_early_vm_area_shadow(void *start, > + unsigned long size) > +{ } > + > static inline slab_flags_t kasan_never_merge(void) > { > return 0; > diff --git a/mm/kasan/init.c b/mm/kasan/init.c > index cc64ed6858c6..d39577d088a1 100644 > --- a/mm/kasan/init.c > +++ b/mm/kasan/init.c > @@ -279,6 +279,11 @@ int __ref kasan_populate_early_shadow(const void *shadow_start, > return 0; > } > > +void __init __weak kasan_populate_early_vm_area_shadow(void *start, > + unsigned long size) > +{ > +} > + > static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) > { > pte_t *pte; > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 1e8fe08725b8..66a7e1ea2561 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -2253,6 +2253,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align) > vm->addr = (void *)addr; > > vm_area_add_early(vm); > + kasan_populate_early_vm_area_shadow(vm->addr, vm->size); > } > > static void vmap_init_free_space(void)
On Mon, 9 Aug 2021 at 13:10, Kefeng Wang <wangkefeng.wang@huawei.com> wrote: > > > On 2021/8/9 17:37, Kefeng Wang wrote: > > With KASAN_VMALLOC and NEED_PER_CPU_PAGE_FIRST_CHUNK, it crashs, > > > > Unable to handle kernel paging request at virtual address ffff7000028f2000 > > ... > > swapper pgtable: 64k pages, 48-bit VAs, pgdp=0000000042440000 > > [ffff7000028f2000] pgd=000000063e7c0003, p4d=000000063e7c0003, pud=000000063e7c0003, pmd=000000063e7b0003, pte=0000000000000000 > > Internal error: Oops: 96000007 [#1] PREEMPT SMP > > Modules linked in: > > CPU: 0 PID: 0 Comm: swapper Not tainted 5.13.0-rc4-00003-gc6e6e28f3f30-dirty #62 > > Hardware name: linux,dummy-virt (DT) > > pstate: 200000c5 (nzCv daIF -PAN -UAO -TCO BTYPE=--) > > pc : kasan_check_range+0x90/0x1a0 > > lr : memcpy+0x88/0xf4 > > sp : ffff80001378fe20 > > ... > > Call trace: > > kasan_check_range+0x90/0x1a0 > > pcpu_page_first_chunk+0x3f0/0x568 > > setup_per_cpu_areas+0xb8/0x184 > > start_kernel+0x8c/0x328 > > > > The vm area used in vm_area_register_early() has no kasan shadow memory, > > Let's add a new kasan_populate_early_vm_area_shadow() function to populate > > the vm area shadow memory to fix the issue. > > Should add Acked-by: Marco Elver <elver@google.com> [for KASAN parts] , My Ack is still valid, thanks for noting. > > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> > > --- > > arch/arm64/mm/kasan_init.c | 16 ++++++++++++++++ > > include/linux/kasan.h | 6 ++++++ > > mm/kasan/init.c | 5 +++++ > > mm/vmalloc.c | 1 + > > 4 files changed, 28 insertions(+) > > > > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c > > index 61b52a92b8b6..5b996ca4d996 100644 > > --- a/arch/arm64/mm/kasan_init.c > > +++ b/arch/arm64/mm/kasan_init.c > > @@ -287,6 +287,22 @@ static void __init kasan_init_depth(void) > > init_task.kasan_depth = 0; > > } > > > > +#ifdef CONFIG_KASAN_VMALLOC > > +void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) > > +{ > > + unsigned long shadow_start, shadow_end; > > + > > + if (!is_vmalloc_or_module_addr(start)) > > + return; > > + > > + shadow_start = (unsigned long)kasan_mem_to_shadow(start); > > + shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); > > + shadow_end = (unsigned long)kasan_mem_to_shadow(start + size); > > + shadow_end = ALIGN(shadow_end, PAGE_SIZE); > > + kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE); > > +} > > +#endif > > + > > void __init kasan_init(void) > > { > > kasan_init_shadow(); > > diff --git a/include/linux/kasan.h b/include/linux/kasan.h > > index dd874a1ee862..3f8c26d9ef82 100644 > > --- a/include/linux/kasan.h > > +++ b/include/linux/kasan.h > > @@ -133,6 +133,8 @@ struct kasan_cache { > > bool is_kmalloc; > > }; > > > > +void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); > > + > > slab_flags_t __kasan_never_merge(void); > > static __always_inline slab_flags_t kasan_never_merge(void) > > { > > @@ -303,6 +305,10 @@ void kasan_restore_multi_shot(bool enabled); > > > > #else /* CONFIG_KASAN */ > > > > +static inline void kasan_populate_early_vm_area_shadow(void *start, > > + unsigned long size) > > +{ } > > + > > static inline slab_flags_t kasan_never_merge(void) > > { > > return 0; > > diff --git a/mm/kasan/init.c b/mm/kasan/init.c > > index cc64ed6858c6..d39577d088a1 100644 > > --- a/mm/kasan/init.c > > +++ b/mm/kasan/init.c > > @@ -279,6 +279,11 @@ int __ref kasan_populate_early_shadow(const void *shadow_start, > > return 0; > > } > > > > +void __init __weak kasan_populate_early_vm_area_shadow(void *start, > > + unsigned long size) > > +{ > > +} > > + > > static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) > > { > > pte_t *pte; > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > > index 1e8fe08725b8..66a7e1ea2561 100644 > > --- a/mm/vmalloc.c > > +++ b/mm/vmalloc.c > > @@ -2253,6 +2253,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align) > > vm->addr = (void *)addr; > > > > vm_area_add_early(vm); > > + kasan_populate_early_vm_area_shadow(vm->addr, vm->size); > > } > > > > static void vmap_init_free_space(void)
On 2021/8/9 19:21, Marco Elver wrote: > On Mon, 9 Aug 2021 at 13:10, Kefeng Wang <wangkefeng.wang@huawei.com> wrote: >> >> On 2021/8/9 17:37, Kefeng Wang wrote: >>> With KASAN_VMALLOC and NEED_PER_CPU_PAGE_FIRST_CHUNK, it crashs, >>> >>> Unable to handle kernel paging request at virtual address ffff7000028f2000 >>> ... >>> swapper pgtable: 64k pages, 48-bit VAs, pgdp=0000000042440000 >>> [ffff7000028f2000] pgd=000000063e7c0003, p4d=000000063e7c0003, pud=000000063e7c0003, pmd=000000063e7b0003, pte=0000000000000000 >>> Internal error: Oops: 96000007 [#1] PREEMPT SMP >>> Modules linked in: >>> CPU: 0 PID: 0 Comm: swapper Not tainted 5.13.0-rc4-00003-gc6e6e28f3f30-dirty #62 >>> Hardware name: linux,dummy-virt (DT) >>> pstate: 200000c5 (nzCv daIF -PAN -UAO -TCO BTYPE=--) >>> pc : kasan_check_range+0x90/0x1a0 >>> lr : memcpy+0x88/0xf4 >>> sp : ffff80001378fe20 >>> ... >>> Call trace: >>> kasan_check_range+0x90/0x1a0 >>> pcpu_page_first_chunk+0x3f0/0x568 >>> setup_per_cpu_areas+0xb8/0x184 >>> start_kernel+0x8c/0x328 >>> >>> The vm area used in vm_area_register_early() has no kasan shadow memory, >>> Let's add a new kasan_populate_early_vm_area_shadow() function to populate >>> the vm area shadow memory to fix the issue. >> Should add Acked-by: Marco Elver <elver@google.com> [for KASAN parts] , > My Ack is still valid, thanks for noting. Thanks, Marco ;)
On Mon, Aug 9, 2021 at 11:32 AM Kefeng Wang <wangkefeng.wang@huawei.com> wrote: > > With KASAN_VMALLOC and NEED_PER_CPU_PAGE_FIRST_CHUNK, it crashs, > > Unable to handle kernel paging request at virtual address ffff7000028f2000 > ... > swapper pgtable: 64k pages, 48-bit VAs, pgdp=0000000042440000 > [ffff7000028f2000] pgd=000000063e7c0003, p4d=000000063e7c0003, pud=000000063e7c0003, pmd=000000063e7b0003, pte=0000000000000000 > Internal error: Oops: 96000007 [#1] PREEMPT SMP > Modules linked in: > CPU: 0 PID: 0 Comm: swapper Not tainted 5.13.0-rc4-00003-gc6e6e28f3f30-dirty #62 > Hardware name: linux,dummy-virt (DT) > pstate: 200000c5 (nzCv daIF -PAN -UAO -TCO BTYPE=--) > pc : kasan_check_range+0x90/0x1a0 > lr : memcpy+0x88/0xf4 > sp : ffff80001378fe20 > ... > Call trace: > kasan_check_range+0x90/0x1a0 > pcpu_page_first_chunk+0x3f0/0x568 > setup_per_cpu_areas+0xb8/0x184 > start_kernel+0x8c/0x328 > > The vm area used in vm_area_register_early() has no kasan shadow memory, > Let's add a new kasan_populate_early_vm_area_shadow() function to populate > the vm area shadow memory to fix the issue. > > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> > --- > arch/arm64/mm/kasan_init.c | 16 ++++++++++++++++ > include/linux/kasan.h | 6 ++++++ > mm/kasan/init.c | 5 +++++ > mm/vmalloc.c | 1 + > 4 files changed, 28 insertions(+) > > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c > index 61b52a92b8b6..5b996ca4d996 100644 > --- a/arch/arm64/mm/kasan_init.c > +++ b/arch/arm64/mm/kasan_init.c > @@ -287,6 +287,22 @@ static void __init kasan_init_depth(void) > init_task.kasan_depth = 0; > } > > +#ifdef CONFIG_KASAN_VMALLOC > +void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) > +{ > + unsigned long shadow_start, shadow_end; > + > + if (!is_vmalloc_or_module_addr(start)) > + return; > + > + shadow_start = (unsigned long)kasan_mem_to_shadow(start); > + shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); > + shadow_end = (unsigned long)kasan_mem_to_shadow(start + size); > + shadow_end = ALIGN(shadow_end, PAGE_SIZE); > + kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE); > +} > +#endif > + > void __init kasan_init(void) > { > kasan_init_shadow(); > diff --git a/include/linux/kasan.h b/include/linux/kasan.h > index dd874a1ee862..3f8c26d9ef82 100644 > --- a/include/linux/kasan.h > +++ b/include/linux/kasan.h > @@ -133,6 +133,8 @@ struct kasan_cache { > bool is_kmalloc; > }; > > +void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); > + > slab_flags_t __kasan_never_merge(void); > static __always_inline slab_flags_t kasan_never_merge(void) > { > @@ -303,6 +305,10 @@ void kasan_restore_multi_shot(bool enabled); > > #else /* CONFIG_KASAN */ > > +static inline void kasan_populate_early_vm_area_shadow(void *start, > + unsigned long size) > +{ } > + > static inline slab_flags_t kasan_never_merge(void) > { > return 0; > diff --git a/mm/kasan/init.c b/mm/kasan/init.c > index cc64ed6858c6..d39577d088a1 100644 > --- a/mm/kasan/init.c > +++ b/mm/kasan/init.c > @@ -279,6 +279,11 @@ int __ref kasan_populate_early_shadow(const void *shadow_start, > return 0; > } > > +void __init __weak kasan_populate_early_vm_area_shadow(void *start, > + unsigned long size) > +{ > +} > + > static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) > { > pte_t *pte; > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index 1e8fe08725b8..66a7e1ea2561 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -2253,6 +2253,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align) > vm->addr = (void *)addr; > > vm_area_add_early(vm); > + kasan_populate_early_vm_area_shadow(vm->addr, vm->size); > } > > static void vmap_init_free_space(void) > -- > 2.26.2 > Acked-by: Andrey Konovalov <andreyknvl@gmail.com> for KASAN parts. Thanks!
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 61b52a92b8b6..5b996ca4d996 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -287,6 +287,22 @@ static void __init kasan_init_depth(void) init_task.kasan_depth = 0; } +#ifdef CONFIG_KASAN_VMALLOC +void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) +{ + unsigned long shadow_start, shadow_end; + + if (!is_vmalloc_or_module_addr(start)) + return; + + shadow_start = (unsigned long)kasan_mem_to_shadow(start); + shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE); + shadow_end = (unsigned long)kasan_mem_to_shadow(start + size); + shadow_end = ALIGN(shadow_end, PAGE_SIZE); + kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE); +} +#endif + void __init kasan_init(void) { kasan_init_shadow(); diff --git a/include/linux/kasan.h b/include/linux/kasan.h index dd874a1ee862..3f8c26d9ef82 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -133,6 +133,8 @@ struct kasan_cache { bool is_kmalloc; }; +void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); + slab_flags_t __kasan_never_merge(void); static __always_inline slab_flags_t kasan_never_merge(void) { @@ -303,6 +305,10 @@ void kasan_restore_multi_shot(bool enabled); #else /* CONFIG_KASAN */ +static inline void kasan_populate_early_vm_area_shadow(void *start, + unsigned long size) +{ } + static inline slab_flags_t kasan_never_merge(void) { return 0; diff --git a/mm/kasan/init.c b/mm/kasan/init.c index cc64ed6858c6..d39577d088a1 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -279,6 +279,11 @@ int __ref kasan_populate_early_shadow(const void *shadow_start, return 0; } +void __init __weak kasan_populate_early_vm_area_shadow(void *start, + unsigned long size) +{ +} + static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) { pte_t *pte; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 1e8fe08725b8..66a7e1ea2561 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2253,6 +2253,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align) vm->addr = (void *)addr; vm_area_add_early(vm); + kasan_populate_early_vm_area_shadow(vm->addr, vm->size); } static void vmap_init_free_space(void)
With KASAN_VMALLOC and NEED_PER_CPU_PAGE_FIRST_CHUNK, it crashs, Unable to handle kernel paging request at virtual address ffff7000028f2000 ... swapper pgtable: 64k pages, 48-bit VAs, pgdp=0000000042440000 [ffff7000028f2000] pgd=000000063e7c0003, p4d=000000063e7c0003, pud=000000063e7c0003, pmd=000000063e7b0003, pte=0000000000000000 Internal error: Oops: 96000007 [#1] PREEMPT SMP Modules linked in: CPU: 0 PID: 0 Comm: swapper Not tainted 5.13.0-rc4-00003-gc6e6e28f3f30-dirty #62 Hardware name: linux,dummy-virt (DT) pstate: 200000c5 (nzCv daIF -PAN -UAO -TCO BTYPE=--) pc : kasan_check_range+0x90/0x1a0 lr : memcpy+0x88/0xf4 sp : ffff80001378fe20 ... Call trace: kasan_check_range+0x90/0x1a0 pcpu_page_first_chunk+0x3f0/0x568 setup_per_cpu_areas+0xb8/0x184 start_kernel+0x8c/0x328 The vm area used in vm_area_register_early() has no kasan shadow memory, Let's add a new kasan_populate_early_vm_area_shadow() function to populate the vm area shadow memory to fix the issue. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- arch/arm64/mm/kasan_init.c | 16 ++++++++++++++++ include/linux/kasan.h | 6 ++++++ mm/kasan/init.c | 5 +++++ mm/vmalloc.c | 1 + 4 files changed, 28 insertions(+)