diff mbox series

[v3,1/5] arm64: kasan: don't populate vmalloc area for CONFIG_KASAN_VMALLOC

Message ID 20210206083552.24394-2-lecopzer.chen@mediatek.com (mailing list archive)
State New, archived
Headers show
Series arm64: kasan: support CONFIG_KASAN_VMALLOC | expand

Commit Message

Lecopzer Chen Feb. 6, 2021, 8:35 a.m. UTC
Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
("kasan: support backing vmalloc space with real shadow memory")

Like how the MODULES_VADDR does now, just not to early populate
the VMALLOC_START between VMALLOC_END.

Before:

MODULE_VADDR: no mapping, no zoreo shadow at init
VMALLOC_VADDR: backed with zero shadow at init

After:

MODULE_VADDR: no mapping, no zoreo shadow at init
VMALLOC_VADDR: no mapping, no zoreo shadow at init

Thus the mapping will get allocated on demand by the core function
of KASAN_VMALLOC.

  -----------  vmalloc_shadow_start
 |           |
 |           |
 |           | <= non-mapping
 |           |
 |           |
 |-----------|
 |///////////|<- kimage shadow with page table mapping.
 |-----------|
 |           |
 |           | <= non-mapping
 |           |
 ------------- vmalloc_shadow_end
 |00000000000|
 |00000000000| <= Zero shadow
 |00000000000|
 ------------- KASAN_SHADOW_END

Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
---
 arch/arm64/mm/kasan_init.c | 18 +++++++++++++-----
 1 file changed, 13 insertions(+), 5 deletions(-)

Comments

Catalin Marinas March 19, 2021, 5:37 p.m. UTC | #1
On Sat, Feb 06, 2021 at 04:35:48PM +0800, Lecopzer Chen wrote:
> Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> ("kasan: support backing vmalloc space with real shadow memory")
> 
> Like how the MODULES_VADDR does now, just not to early populate
> the VMALLOC_START between VMALLOC_END.
> 
> Before:
> 
> MODULE_VADDR: no mapping, no zoreo shadow at init
> VMALLOC_VADDR: backed with zero shadow at init
> 
> After:
> 
> MODULE_VADDR: no mapping, no zoreo shadow at init
> VMALLOC_VADDR: no mapping, no zoreo shadow at init

s/zoreo/zero/

> Thus the mapping will get allocated on demand by the core function
> of KASAN_VMALLOC.
> 
>   -----------  vmalloc_shadow_start
>  |           |
>  |           |
>  |           | <= non-mapping
>  |           |
>  |           |
>  |-----------|
>  |///////////|<- kimage shadow with page table mapping.
>  |-----------|
>  |           |
>  |           | <= non-mapping
>  |           |
>  ------------- vmalloc_shadow_end
>  |00000000000|
>  |00000000000| <= Zero shadow
>  |00000000000|
>  ------------- KASAN_SHADOW_END
> 
> Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> ---
>  arch/arm64/mm/kasan_init.c | 18 +++++++++++++-----
>  1 file changed, 13 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> index d8e66c78440e..20d06008785f 100644
> --- a/arch/arm64/mm/kasan_init.c
> +++ b/arch/arm64/mm/kasan_init.c
> @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
>  {
>  	u64 kimg_shadow_start, kimg_shadow_end;
>  	u64 mod_shadow_start, mod_shadow_end;
> +	u64 vmalloc_shadow_end;
>  	phys_addr_t pa_start, pa_end;
>  	u64 i;
>  
> @@ -223,6 +224,8 @@ static void __init kasan_init_shadow(void)
>  	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
>  	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
>  
> +	vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> +
>  	/*
>  	 * We are going to perform proper setup of shadow memory.
>  	 * At first we should unmap early shadow (clear_pgds() call below).
> @@ -241,12 +244,17 @@ static void __init kasan_init_shadow(void)
>  
>  	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
>  				   (void *)mod_shadow_start);
> -	kasan_populate_early_shadow((void *)kimg_shadow_end,
> -				   (void *)KASAN_SHADOW_END);
>  
> -	if (kimg_shadow_start > mod_shadow_end)
> -		kasan_populate_early_shadow((void *)mod_shadow_end,
> -					    (void *)kimg_shadow_start);

Not something introduced by this patch but what happens if this
condition is false? It means that kimg_shadow_end < mod_shadow_start and
the above kasan_populate_early_shadow(PAGE_END, mod_shadow_start)
overlaps with the earlier kasan_map_populate(kimg_shadow_start,
kimg_shadow_end).

> +	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
> +		kasan_populate_early_shadow((void *)vmalloc_shadow_end,
> +					    (void *)KASAN_SHADOW_END);
> +	else {
> +		kasan_populate_early_shadow((void *)kimg_shadow_end,
> +					    (void *)KASAN_SHADOW_END);
> +		if (kimg_shadow_start > mod_shadow_end)
> +			kasan_populate_early_shadow((void *)mod_shadow_end,
> +						    (void *)kimg_shadow_start);
> +	}
>  
>  	for_each_mem_range(i, &pa_start, &pa_end) {
>  		void *start = (void *)__phys_to_virt(pa_start);
> -- 
> 2.25.1
>
Lecopzer Chen March 20, 2021, 1:01 p.m. UTC | #2
On Sat, Mar 20, 2021 at 1:38 AM Catalin Marinas <catalin.marinas@arm.com> wrote:
>
> On Sat, Feb 06, 2021 at 04:35:48PM +0800, Lecopzer Chen wrote:
> > Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> > ("kasan: support backing vmalloc space with real shadow memory")
> >
> > Like how the MODULES_VADDR does now, just not to early populate
> > the VMALLOC_START between VMALLOC_END.
> >
> > Before:
> >
> > MODULE_VADDR: no mapping, no zoreo shadow at init
> > VMALLOC_VADDR: backed with zero shadow at init
> >
> > After:
> >
> > MODULE_VADDR: no mapping, no zoreo shadow at init
> > VMALLOC_VADDR: no mapping, no zoreo shadow at init
>
> s/zoreo/zero/
>

thanks!

> > Thus the mapping will get allocated on demand by the core function
> > of KASAN_VMALLOC.
> >
> >   -----------  vmalloc_shadow_start
> >  |           |
> >  |           |
> >  |           | <= non-mapping
> >  |           |
> >  |           |
> >  |-----------|
> >  |///////////|<- kimage shadow with page table mapping.
> >  |-----------|
> >  |           |
> >  |           | <= non-mapping
> >  |           |
> >  ------------- vmalloc_shadow_end
> >  |00000000000|
> >  |00000000000| <= Zero shadow
> >  |00000000000|
> >  ------------- KASAN_SHADOW_END
> >
> > Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> > ---
> >  arch/arm64/mm/kasan_init.c | 18 +++++++++++++-----
> >  1 file changed, 13 insertions(+), 5 deletions(-)
> >
> > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> > index d8e66c78440e..20d06008785f 100644
> > --- a/arch/arm64/mm/kasan_init.c
> > +++ b/arch/arm64/mm/kasan_init.c
> > @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
> >  {
> >       u64 kimg_shadow_start, kimg_shadow_end;
> >       u64 mod_shadow_start, mod_shadow_end;
> > +     u64 vmalloc_shadow_end;
> >       phys_addr_t pa_start, pa_end;
> >       u64 i;
> >
> > @@ -223,6 +224,8 @@ static void __init kasan_init_shadow(void)
> >       mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
> >       mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
> >
> > +     vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> > +
> >       /*
> >        * We are going to perform proper setup of shadow memory.
> >        * At first we should unmap early shadow (clear_pgds() call below).
> > @@ -241,12 +244,17 @@ static void __init kasan_init_shadow(void)
> >
> >       kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
> >                                  (void *)mod_shadow_start);
> > -     kasan_populate_early_shadow((void *)kimg_shadow_end,
> > -                                (void *)KASAN_SHADOW_END);
> >
> > -     if (kimg_shadow_start > mod_shadow_end)
> > -             kasan_populate_early_shadow((void *)mod_shadow_end,
> > -                                         (void *)kimg_shadow_start);
>
> Not something introduced by this patch but what happens if this
> condition is false? It means that kimg_shadow_end < mod_shadow_start and
> the above kasan_populate_early_shadow(PAGE_END, mod_shadow_start)
> overlaps with the earlier kasan_map_populate(kimg_shadow_start,
> kimg_shadow_end).

In this case, the area between mod_shadow_start and kimg_shadow_end
was mapping when kasan init.

Thus the corner case is that module_alloc() allocates that range
(the area between mod_shadow_start and kimg_shadow_end) again.


With VMALLOC_KASAN,
module_alloc() ->
    ... ->
        kasan_populate_vmalloc ->
            apply_to_page_range()
will check the mapping exists or not and bypass allocating new mapping
if it exists.
So it should be fine in the second allocation.

Without VMALLOC_KASAN,
module_alloc() ->
    kasan_module_alloc()
will allocate the range twice, first time is kasan_map_populate() and
second time is vmalloc(),
and this should have some problems(?).

Now the only possibility that the module area can overlap with kimage
should be KASLR on.
I'm not sure if this is the case that really happens in KASLR, it depends on
how __relocate_kernel() calculates kimage and how kaslr_earlt_init()
decides module_alloc_base.


> > +     if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
> > +             kasan_populate_early_shadow((void *)vmalloc_shadow_end,
> > +                                         (void *)KASAN_SHADOW_END);
> > +     else {
> > +             kasan_populate_early_shadow((void *)kimg_shadow_end,
> > +                                         (void *)KASAN_SHADOW_END);
> > +             if (kimg_shadow_start > mod_shadow_end)
> > +                     kasan_populate_early_shadow((void *)mod_shadow_end,
> > +                                                 (void *)kimg_shadow_start);
> > +     }
> >
> >       for_each_mem_range(i, &pa_start, &pa_end) {
> >               void *start = (void *)__phys_to_virt(pa_start);
> > --
> > 2.25.1
> >
diff mbox series

Patch

diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index d8e66c78440e..20d06008785f 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -214,6 +214,7 @@  static void __init kasan_init_shadow(void)
 {
 	u64 kimg_shadow_start, kimg_shadow_end;
 	u64 mod_shadow_start, mod_shadow_end;
+	u64 vmalloc_shadow_end;
 	phys_addr_t pa_start, pa_end;
 	u64 i;
 
@@ -223,6 +224,8 @@  static void __init kasan_init_shadow(void)
 	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
 	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
 
+	vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
+
 	/*
 	 * We are going to perform proper setup of shadow memory.
 	 * At first we should unmap early shadow (clear_pgds() call below).
@@ -241,12 +244,17 @@  static void __init kasan_init_shadow(void)
 
 	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
 				   (void *)mod_shadow_start);
-	kasan_populate_early_shadow((void *)kimg_shadow_end,
-				   (void *)KASAN_SHADOW_END);
 
-	if (kimg_shadow_start > mod_shadow_end)
-		kasan_populate_early_shadow((void *)mod_shadow_end,
-					    (void *)kimg_shadow_start);
+	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+		kasan_populate_early_shadow((void *)vmalloc_shadow_end,
+					    (void *)KASAN_SHADOW_END);
+	else {
+		kasan_populate_early_shadow((void *)kimg_shadow_end,
+					    (void *)KASAN_SHADOW_END);
+		if (kimg_shadow_start > mod_shadow_end)
+			kasan_populate_early_shadow((void *)mod_shadow_end,
+						    (void *)kimg_shadow_start);
+	}
 
 	for_each_mem_range(i, &pa_start, &pa_end) {
 		void *start = (void *)__phys_to_virt(pa_start);