diff mbox series

[v2,1/4] arm64: kasan: don't populate vmalloc area for CONFIG_KASAN_VMALLOC

Message ID 20210109103252.812517-2-lecopzer@gmail.com (mailing list archive)
State New, archived
Headers show
Series arm64: kasan: support CONFIG_KASAN_VMALLOC | expand

Commit Message

Lecopzer Chen Jan. 9, 2021, 10:32 a.m. UTC
Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
("kasan: support backing vmalloc space with real shadow memory")

Like how the MODULES_VADDR does now, just not to early populate
the VMALLOC_START between VMALLOC_END.
similarly, the kernel code mapping is now in the VMALLOC area and
should keep these area populated.

Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
---
 arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
 1 file changed, 18 insertions(+), 5 deletions(-)

Comments

Ard Biesheuvel Feb. 3, 2021, 6:37 p.m. UTC | #1
On Sat, 9 Jan 2021 at 11:33, Lecopzer Chen <lecopzer@gmail.com> wrote:
>
> Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> ("kasan: support backing vmalloc space with real shadow memory")
>
> Like how the MODULES_VADDR does now, just not to early populate
> the VMALLOC_START between VMALLOC_END.
> similarly, the kernel code mapping is now in the VMALLOC area and
> should keep these area populated.
>
> Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>


This commit log text is a bit hard to follow. You are saying that the
vmalloc region is *not* backed with zero shadow or any default mapping
at all, right, and everything gets allocated on demand, just like is
the case for modules?

> ---
>  arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
>  1 file changed, 18 insertions(+), 5 deletions(-)
>
> diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> index d8e66c78440e..39b218a64279 100644
> --- a/arch/arm64/mm/kasan_init.c
> +++ b/arch/arm64/mm/kasan_init.c
> @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
>  {
>         u64 kimg_shadow_start, kimg_shadow_end;
>         u64 mod_shadow_start, mod_shadow_end;
> +       u64 vmalloc_shadow_start, vmalloc_shadow_end;
>         phys_addr_t pa_start, pa_end;
>         u64 i;
>
> @@ -223,6 +224,9 @@ static void __init kasan_init_shadow(void)
>         mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
>         mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
>
> +       vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
> +       vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> +


This and the below seems overly complicated, given that VMALLOC_START
== MODULES_END. Can we simplify this?

>         /*
>          * We are going to perform proper setup of shadow memory.
>          * At first we should unmap early shadow (clear_pgds() call below).
> @@ -241,12 +245,21 @@ static void __init kasan_init_shadow(void)
>
>         kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
>                                    (void *)mod_shadow_start);
> -       kasan_populate_early_shadow((void *)kimg_shadow_end,
> -                                  (void *)KASAN_SHADOW_END);
> +       if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
> +               kasan_populate_early_shadow((void *)vmalloc_shadow_end,
> +                                           (void *)KASAN_SHADOW_END);
> +               if (vmalloc_shadow_start > mod_shadow_end)
> +                       kasan_populate_early_shadow((void *)mod_shadow_end,
> +                                                   (void *)vmalloc_shadow_start);
> +
> +       } else {
> +               kasan_populate_early_shadow((void *)kimg_shadow_end,
> +                                           (void *)KASAN_SHADOW_END);
> +               if (kimg_shadow_start > mod_shadow_end)
> +                       kasan_populate_early_shadow((void *)mod_shadow_end,
> +                                                   (void *)kimg_shadow_start);
> +       }
>
> -       if (kimg_shadow_start > mod_shadow_end)
> -               kasan_populate_early_shadow((void *)mod_shadow_end,
> -                                           (void *)kimg_shadow_start);
>
>         for_each_mem_range(i, &pa_start, &pa_end) {
>                 void *start = (void *)__phys_to_virt(pa_start);
> --
> 2.25.1
>
Lecopzer Chen Feb. 4, 2021, 6:21 a.m. UTC | #2
> On Sat, 9 Jan 2021 at 11:33, Lecopzer Chen <lecopzer@gmail.com> wrote:
> >
> > Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> > ("kasan: support backing vmalloc space with real shadow memory")
> >
> > Like how the MODULES_VADDR does now, just not to early populate
> > the VMALLOC_START between VMALLOC_END.
> > similarly, the kernel code mapping is now in the VMALLOC area and
> > should keep these area populated.
> >
> > Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> 
> 
> This commit log text is a bit hard to follow. You are saying that the
> vmalloc region is *not* backed with zero shadow or any default mapping
> at all, right, and everything gets allocated on demand, just like is
> the case for modules?

It's much more like:

before:

MODULE_VADDR: no mapping, no zoreo shadow at init
VMALLOC_VADDR: backed with zero shadow at init

after:

MODULE_VADDR: no mapping, no zoreo shadow at init
VMALLOC_VADDR: no mapping, no zoreo shadow at init

So it should be both "not backed with zero shadow" and
"not any mapping and everything gets allocated on demand".

And the "not backed with zero shadow" is like a subset of "not any mapping ...".


Is that being more clear if the commit revises to:

----------------------
Like how the MODULES_VADDR does now, just not to early populate
the VMALLOC_START between VMALLOC_END.

Before:

MODULE_VADDR: no mapping, no zoreo shadow at init
VMALLOC_VADDR: backed with zero shadow at init

After:

VMALLOC_VADDR: no mapping, no zoreo shadow at init

Thus the mapping will get allocate on demand by the core function
of KASAN vmalloc.

similarly, the kernel code mapping is now in the VMALLOC area and
should keep these area populated.
--------------------

Or would you have any suggestion?


Thanks a lot for your review!

BRs,
Lecopzer
Will Deacon Feb. 4, 2021, 12:45 p.m. UTC | #3
On Sat, Jan 09, 2021 at 06:32:49PM +0800, Lecopzer Chen wrote:
> Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> ("kasan: support backing vmalloc space with real shadow memory")
> 
> Like how the MODULES_VADDR does now, just not to early populate
> the VMALLOC_START between VMALLOC_END.
> similarly, the kernel code mapping is now in the VMALLOC area and
> should keep these area populated.
> 
> Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> ---
>  arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
>  1 file changed, 18 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> index d8e66c78440e..39b218a64279 100644
> --- a/arch/arm64/mm/kasan_init.c
> +++ b/arch/arm64/mm/kasan_init.c
> @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
>  {
>  	u64 kimg_shadow_start, kimg_shadow_end;
>  	u64 mod_shadow_start, mod_shadow_end;
> +	u64 vmalloc_shadow_start, vmalloc_shadow_end;
>  	phys_addr_t pa_start, pa_end;
>  	u64 i;
>  
> @@ -223,6 +224,9 @@ static void __init kasan_init_shadow(void)
>  	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
>  	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
>  
> +	vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
> +	vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> +
>  	/*
>  	 * We are going to perform proper setup of shadow memory.
>  	 * At first we should unmap early shadow (clear_pgds() call below).
> @@ -241,12 +245,21 @@ static void __init kasan_init_shadow(void)
>  
>  	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
>  				   (void *)mod_shadow_start);
> -	kasan_populate_early_shadow((void *)kimg_shadow_end,
> -				   (void *)KASAN_SHADOW_END);
> +	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {

Do we really need yet another CONFIG option for KASAN? What's the use-case
for *not* enabling this if you're already enabling one of the KASAN
backends?

> +		kasan_populate_early_shadow((void *)vmalloc_shadow_end,
> +					    (void *)KASAN_SHADOW_END);
> +		if (vmalloc_shadow_start > mod_shadow_end)

To echo Ard's concern: when is the above 'if' condition true?

Will
Lecopzer Chen Feb. 4, 2021, 2:46 p.m. UTC | #4
> On Sat, Jan 09, 2021 at 06:32:49PM +0800, Lecopzer Chen wrote:
> > Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> > ("kasan: support backing vmalloc space with real shadow memory")
> > 
> > Like how the MODULES_VADDR does now, just not to early populate
> > the VMALLOC_START between VMALLOC_END.
> > similarly, the kernel code mapping is now in the VMALLOC area and
> > should keep these area populated.
> > 
> > Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> > ---
> >  arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
> >  1 file changed, 18 insertions(+), 5 deletions(-)
> > 
> > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> > index d8e66c78440e..39b218a64279 100644
> > --- a/arch/arm64/mm/kasan_init.c
> > +++ b/arch/arm64/mm/kasan_init.c
> > @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
> >  {
> >  	u64 kimg_shadow_start, kimg_shadow_end;
> >  	u64 mod_shadow_start, mod_shadow_end;
> > +	u64 vmalloc_shadow_start, vmalloc_shadow_end;
> >  	phys_addr_t pa_start, pa_end;
> >  	u64 i;
> >  
> > @@ -223,6 +224,9 @@ static void __init kasan_init_shadow(void)
> >  	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
> >  	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
> >  
> > +	vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
> > +	vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> > +
> >  	/*
> >  	 * We are going to perform proper setup of shadow memory.
> >  	 * At first we should unmap early shadow (clear_pgds() call below).
> > @@ -241,12 +245,21 @@ static void __init kasan_init_shadow(void)
> >  
> >  	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
> >  				   (void *)mod_shadow_start);
> > -	kasan_populate_early_shadow((void *)kimg_shadow_end,
> > -				   (void *)KASAN_SHADOW_END);
> > +	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
> 
> Do we really need yet another CONFIG option for KASAN? What's the use-case
> for *not* enabling this if you're already enabling one of the KASAN
> backends?

As I know, KASAN_VMALLOC now only supports KASAN_GENERIC and also
KASAN_VMALLOC uses more memory to map real shadow memory (1/8 of vmalloc va).

There should be someone can enable KASAN_GENERIC but can't use VMALLOC
due to memory issue.
 
> > +		kasan_populate_early_shadow((void *)vmalloc_shadow_end,
> > +					    (void *)KASAN_SHADOW_END);
> > +		if (vmalloc_shadow_start > mod_shadow_end)
> 
> To echo Ard's concern: when is the above 'if' condition true?

After reviewing this code,
since VMALLOC_STAR is a compiler defined macro of MODULES_END,
this if-condition will never be true.

I also test it with removing this and works fine.

I'll remove this in the next version patch,
thanks a lot for pointing out this.

BRs,
Lecopzer
Will Deacon Feb. 4, 2021, 3:01 p.m. UTC | #5
On Thu, Feb 04, 2021 at 10:46:12PM +0800, Lecopzer Chen wrote:
> > On Sat, Jan 09, 2021 at 06:32:49PM +0800, Lecopzer Chen wrote:
> > > Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> > > ("kasan: support backing vmalloc space with real shadow memory")
> > > 
> > > Like how the MODULES_VADDR does now, just not to early populate
> > > the VMALLOC_START between VMALLOC_END.
> > > similarly, the kernel code mapping is now in the VMALLOC area and
> > > should keep these area populated.
> > > 
> > > Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> > > ---
> > >  arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
> > >  1 file changed, 18 insertions(+), 5 deletions(-)
> > > 
> > > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> > > index d8e66c78440e..39b218a64279 100644
> > > --- a/arch/arm64/mm/kasan_init.c
> > > +++ b/arch/arm64/mm/kasan_init.c
> > > @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
> > >  {
> > >  	u64 kimg_shadow_start, kimg_shadow_end;
> > >  	u64 mod_shadow_start, mod_shadow_end;
> > > +	u64 vmalloc_shadow_start, vmalloc_shadow_end;
> > >  	phys_addr_t pa_start, pa_end;
> > >  	u64 i;
> > >  
> > > @@ -223,6 +224,9 @@ static void __init kasan_init_shadow(void)
> > >  	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
> > >  	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
> > >  
> > > +	vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
> > > +	vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> > > +
> > >  	/*
> > >  	 * We are going to perform proper setup of shadow memory.
> > >  	 * At first we should unmap early shadow (clear_pgds() call below).
> > > @@ -241,12 +245,21 @@ static void __init kasan_init_shadow(void)
> > >  
> > >  	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
> > >  				   (void *)mod_shadow_start);
> > > -	kasan_populate_early_shadow((void *)kimg_shadow_end,
> > > -				   (void *)KASAN_SHADOW_END);
> > > +	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
> > 
> > Do we really need yet another CONFIG option for KASAN? What's the use-case
> > for *not* enabling this if you're already enabling one of the KASAN
> > backends?
> 
> As I know, KASAN_VMALLOC now only supports KASAN_GENERIC and also
> KASAN_VMALLOC uses more memory to map real shadow memory (1/8 of vmalloc va).

The shadow is allocated dynamically though, isn't it?

> There should be someone can enable KASAN_GENERIC but can't use VMALLOC
> due to memory issue.

That doesn't sound particularly realistic to me. The reason I'm pushing here
is because I would _really_ like to move to VMAP stack unconditionally, and
that would effectively force KASAN_VMALLOC to be set if KASAN is in use.

So unless there's a really good reason not to do that, please can we make
this unconditional for arm64? Pretty please?

Will
Lecopzer Chen Feb. 4, 2021, 4:37 p.m. UTC | #6
> On Thu, Feb 04, 2021 at 10:46:12PM +0800, Lecopzer Chen wrote:
> > > On Sat, Jan 09, 2021 at 06:32:49PM +0800, Lecopzer Chen wrote:
> > > > Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> > > > ("kasan: support backing vmalloc space with real shadow memory")
> > > >
> > > > Like how the MODULES_VADDR does now, just not to early populate
> > > > the VMALLOC_START between VMALLOC_END.
> > > > similarly, the kernel code mapping is now in the VMALLOC area and
> > > > should keep these area populated.
> > > >
> > > > Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> > > > ---
> > > >  arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
> > > >  1 file changed, 18 insertions(+), 5 deletions(-)
> > > >
> > > > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> > > > index d8e66c78440e..39b218a64279 100644
> > > > --- a/arch/arm64/mm/kasan_init.c
> > > > +++ b/arch/arm64/mm/kasan_init.c
> > > > @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
> > > >  {
> > > >   u64 kimg_shadow_start, kimg_shadow_end;
> > > >   u64 mod_shadow_start, mod_shadow_end;
> > > > + u64 vmalloc_shadow_start, vmalloc_shadow_end;
> > > >   phys_addr_t pa_start, pa_end;
> > > >   u64 i;
> > > >
> > > > @@ -223,6 +224,9 @@ static void __init kasan_init_shadow(void)
> > > >   mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
> > > >   mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
> > > >
> > > > + vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
> > > > + vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> > > > +
> > > >   /*
> > > >    * We are going to perform proper setup of shadow memory.
> > > >    * At first we should unmap early shadow (clear_pgds() call below).
> > > > @@ -241,12 +245,21 @@ static void __init kasan_init_shadow(void)
> > > >
> > > >   kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
> > > >                              (void *)mod_shadow_start);
> > > > - kasan_populate_early_shadow((void *)kimg_shadow_end,
> > > > -                            (void *)KASAN_SHADOW_END);
> > > > + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
> > >
> > > Do we really need yet another CONFIG option for KASAN? What's the use-case
> > > for *not* enabling this if you're already enabling one of the KASAN
> > > backends?
> >
> > As I know, KASAN_VMALLOC now only supports KASAN_GENERIC and also
> > KASAN_VMALLOC uses more memory to map real shadow memory (1/8 of vmalloc va).
>
> The shadow is allocated dynamically though, isn't it?

Yes, but It's still a cost.

> > There should be someone can enable KASAN_GENERIC but can't use VMALLOC
> > due to memory issue.
>
> That doesn't sound particularly realistic to me. The reason I'm pushing here
> is because I would _really_ like to move to VMAP stack unconditionally, and
> that would effectively force KASAN_VMALLOC to be set if KASAN is in use.
>
> So unless there's a really good reason not to do that, please can we make
> this unconditional for arm64? Pretty please?

I think it's fine since we have a good reason.
Also if someone have memory issue in KASAN_VMALLOC,
they can use SW_TAG, right?

However the SW_TAG/HW_TAG is not supported VMALLOC yet.
So the code would be like

	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
		/* explain the relationship between 
		 * KASAN_GENERIC and KASAN_VMALLOC in arm64
		 * XXX: because we want VMAP stack....
		 */
		kasan_populate_early_shadow((void *)vmalloc_shadow_end,
					    (void *)KASAN_SHADOW_END);
	else {
		kasan_populate_early_shadow((void *)kimg_shadow_end,
					    (void *)KASAN_SHADOW_END);
		if (kimg_shadow_start > mod_shadow_end)
			kasan_populate_early_shadow((void *)mod_shadow_end,
						    (void *)kimg_shadow_start);
	}

and the arch/arm64/Kconfig will add
	select KASAN_VMALLOC if KASAN_GENERIC

Is this code same as your thought?

BRs,
Lecopzer
Will Deacon Feb. 5, 2021, 5:18 p.m. UTC | #7
On Fri, Feb 05, 2021 at 12:37:21AM +0800, Lecopzer Chen wrote:
> 
> > On Thu, Feb 04, 2021 at 10:46:12PM +0800, Lecopzer Chen wrote:
> > > > On Sat, Jan 09, 2021 at 06:32:49PM +0800, Lecopzer Chen wrote:
> > > > > Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> > > > > ("kasan: support backing vmalloc space with real shadow memory")
> > > > >
> > > > > Like how the MODULES_VADDR does now, just not to early populate
> > > > > the VMALLOC_START between VMALLOC_END.
> > > > > similarly, the kernel code mapping is now in the VMALLOC area and
> > > > > should keep these area populated.
> > > > >
> > > > > Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> > > > > ---
> > > > >  arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
> > > > >  1 file changed, 18 insertions(+), 5 deletions(-)
> > > > >
> > > > > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> > > > > index d8e66c78440e..39b218a64279 100644
> > > > > --- a/arch/arm64/mm/kasan_init.c
> > > > > +++ b/arch/arm64/mm/kasan_init.c
> > > > > @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
> > > > >  {
> > > > >   u64 kimg_shadow_start, kimg_shadow_end;
> > > > >   u64 mod_shadow_start, mod_shadow_end;
> > > > > + u64 vmalloc_shadow_start, vmalloc_shadow_end;
> > > > >   phys_addr_t pa_start, pa_end;
> > > > >   u64 i;
> > > > >
> > > > > @@ -223,6 +224,9 @@ static void __init kasan_init_shadow(void)
> > > > >   mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
> > > > >   mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
> > > > >
> > > > > + vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
> > > > > + vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> > > > > +
> > > > >   /*
> > > > >    * We are going to perform proper setup of shadow memory.
> > > > >    * At first we should unmap early shadow (clear_pgds() call below).
> > > > > @@ -241,12 +245,21 @@ static void __init kasan_init_shadow(void)
> > > > >
> > > > >   kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
> > > > >                              (void *)mod_shadow_start);
> > > > > - kasan_populate_early_shadow((void *)kimg_shadow_end,
> > > > > -                            (void *)KASAN_SHADOW_END);
> > > > > + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
> > > >
> > > > Do we really need yet another CONFIG option for KASAN? What's the use-case
> > > > for *not* enabling this if you're already enabling one of the KASAN
> > > > backends?
> > >
> > > As I know, KASAN_VMALLOC now only supports KASAN_GENERIC and also
> > > KASAN_VMALLOC uses more memory to map real shadow memory (1/8 of vmalloc va).
> >
> > The shadow is allocated dynamically though, isn't it?
> 
> Yes, but It's still a cost.
> 
> > > There should be someone can enable KASAN_GENERIC but can't use VMALLOC
> > > due to memory issue.
> >
> > That doesn't sound particularly realistic to me. The reason I'm pushing here
> > is because I would _really_ like to move to VMAP stack unconditionally, and
> > that would effectively force KASAN_VMALLOC to be set if KASAN is in use.
> >
> > So unless there's a really good reason not to do that, please can we make
> > this unconditional for arm64? Pretty please?
> 
> I think it's fine since we have a good reason.
> Also if someone have memory issue in KASAN_VMALLOC,
> they can use SW_TAG, right?
> 
> However the SW_TAG/HW_TAG is not supported VMALLOC yet.
> So the code would be like
> 
> 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))

Just make this CONFIG_KASAN_VMALLOC, since that depends on KASAN_GENERIC.

> 		/* explain the relationship between 
> 		 * KASAN_GENERIC and KASAN_VMALLOC in arm64
> 		 * XXX: because we want VMAP stack....
> 		 */

I don't understand the relation with SW_TAGS. The VMAP_STACK dependency is:

	depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC

which doesn't mention SW_TAGS at all. So that seems to imply that SW_TAGS
and VMAP_STACK are mutually exclusive :(

Will
Andrey Konovalov Feb. 5, 2021, 5:30 p.m. UTC | #8
On Fri, Feb 5, 2021 at 6:19 PM Will Deacon <will@kernel.org> wrote:
>
> On Fri, Feb 05, 2021 at 12:37:21AM +0800, Lecopzer Chen wrote:
> >
> > > On Thu, Feb 04, 2021 at 10:46:12PM +0800, Lecopzer Chen wrote:
> > > > > On Sat, Jan 09, 2021 at 06:32:49PM +0800, Lecopzer Chen wrote:
> > > > > > Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> > > > > > ("kasan: support backing vmalloc space with real shadow memory")
> > > > > >
> > > > > > Like how the MODULES_VADDR does now, just not to early populate
> > > > > > the VMALLOC_START between VMALLOC_END.
> > > > > > similarly, the kernel code mapping is now in the VMALLOC area and
> > > > > > should keep these area populated.
> > > > > >
> > > > > > Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> > > > > > ---
> > > > > >  arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
> > > > > >  1 file changed, 18 insertions(+), 5 deletions(-)
> > > > > >
> > > > > > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> > > > > > index d8e66c78440e..39b218a64279 100644
> > > > > > --- a/arch/arm64/mm/kasan_init.c
> > > > > > +++ b/arch/arm64/mm/kasan_init.c
> > > > > > @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
> > > > > >  {
> > > > > >   u64 kimg_shadow_start, kimg_shadow_end;
> > > > > >   u64 mod_shadow_start, mod_shadow_end;
> > > > > > + u64 vmalloc_shadow_start, vmalloc_shadow_end;
> > > > > >   phys_addr_t pa_start, pa_end;
> > > > > >   u64 i;
> > > > > >
> > > > > > @@ -223,6 +224,9 @@ static void __init kasan_init_shadow(void)
> > > > > >   mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
> > > > > >   mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
> > > > > >
> > > > > > + vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
> > > > > > + vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> > > > > > +
> > > > > >   /*
> > > > > >    * We are going to perform proper setup of shadow memory.
> > > > > >    * At first we should unmap early shadow (clear_pgds() call below).
> > > > > > @@ -241,12 +245,21 @@ static void __init kasan_init_shadow(void)
> > > > > >
> > > > > >   kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
> > > > > >                              (void *)mod_shadow_start);
> > > > > > - kasan_populate_early_shadow((void *)kimg_shadow_end,
> > > > > > -                            (void *)KASAN_SHADOW_END);
> > > > > > + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
> > > > >
> > > > > Do we really need yet another CONFIG option for KASAN? What's the use-case
> > > > > for *not* enabling this if you're already enabling one of the KASAN
> > > > > backends?
> > > >
> > > > As I know, KASAN_VMALLOC now only supports KASAN_GENERIC and also
> > > > KASAN_VMALLOC uses more memory to map real shadow memory (1/8 of vmalloc va).
> > >
> > > The shadow is allocated dynamically though, isn't it?
> >
> > Yes, but It's still a cost.
> >
> > > > There should be someone can enable KASAN_GENERIC but can't use VMALLOC
> > > > due to memory issue.
> > >
> > > That doesn't sound particularly realistic to me. The reason I'm pushing here
> > > is because I would _really_ like to move to VMAP stack unconditionally, and
> > > that would effectively force KASAN_VMALLOC to be set if KASAN is in use.
> > >
> > > So unless there's a really good reason not to do that, please can we make
> > > this unconditional for arm64? Pretty please?
> >
> > I think it's fine since we have a good reason.
> > Also if someone have memory issue in KASAN_VMALLOC,
> > they can use SW_TAG, right?
> >
> > However the SW_TAG/HW_TAG is not supported VMALLOC yet.
> > So the code would be like
> >
> >       if (IS_ENABLED(CONFIG_KASAN_GENERIC))
>
> Just make this CONFIG_KASAN_VMALLOC, since that depends on KASAN_GENERIC.
>
> >               /* explain the relationship between
> >                * KASAN_GENERIC and KASAN_VMALLOC in arm64
> >                * XXX: because we want VMAP stack....
> >                */
>
> I don't understand the relation with SW_TAGS. The VMAP_STACK dependency is:
>
>         depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC

This means that VMAP_STACK can be only enabled if KASAN_HW_TAGS=y or
if KASAN_VMALLOC=y for other modes.

>
> which doesn't mention SW_TAGS at all. So that seems to imply that SW_TAGS
> and VMAP_STACK are mutually exclusive :(

SW_TAGS doesn't yet have vmalloc support, so it's not compatible with
VMAP_STACK. Once vmalloc support is added to SW_TAGS, KASAN_VMALLOC
should be allowed to be enabled with SW_TAGS. This series is a step
towards having that support, but doesn't implement it. That will be a
separate effort.
Will Deacon Feb. 5, 2021, 5:43 p.m. UTC | #9
On Fri, Feb 05, 2021 at 06:30:44PM +0100, Andrey Konovalov wrote:
> On Fri, Feb 5, 2021 at 6:19 PM Will Deacon <will@kernel.org> wrote:
> >
> > On Fri, Feb 05, 2021 at 12:37:21AM +0800, Lecopzer Chen wrote:
> > >
> > > > On Thu, Feb 04, 2021 at 10:46:12PM +0800, Lecopzer Chen wrote:
> > > > > > On Sat, Jan 09, 2021 at 06:32:49PM +0800, Lecopzer Chen wrote:
> > > > > > > Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> > > > > > > ("kasan: support backing vmalloc space with real shadow memory")
> > > > > > >
> > > > > > > Like how the MODULES_VADDR does now, just not to early populate
> > > > > > > the VMALLOC_START between VMALLOC_END.
> > > > > > > similarly, the kernel code mapping is now in the VMALLOC area and
> > > > > > > should keep these area populated.
> > > > > > >
> > > > > > > Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> > > > > > > ---
> > > > > > >  arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
> > > > > > >  1 file changed, 18 insertions(+), 5 deletions(-)
> > > > > > >
> > > > > > > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> > > > > > > index d8e66c78440e..39b218a64279 100644
> > > > > > > --- a/arch/arm64/mm/kasan_init.c
> > > > > > > +++ b/arch/arm64/mm/kasan_init.c
> > > > > > > @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
> > > > > > >  {
> > > > > > >   u64 kimg_shadow_start, kimg_shadow_end;
> > > > > > >   u64 mod_shadow_start, mod_shadow_end;
> > > > > > > + u64 vmalloc_shadow_start, vmalloc_shadow_end;
> > > > > > >   phys_addr_t pa_start, pa_end;
> > > > > > >   u64 i;
> > > > > > >
> > > > > > > @@ -223,6 +224,9 @@ static void __init kasan_init_shadow(void)
> > > > > > >   mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
> > > > > > >   mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
> > > > > > >
> > > > > > > + vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
> > > > > > > + vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> > > > > > > +
> > > > > > >   /*
> > > > > > >    * We are going to perform proper setup of shadow memory.
> > > > > > >    * At first we should unmap early shadow (clear_pgds() call below).
> > > > > > > @@ -241,12 +245,21 @@ static void __init kasan_init_shadow(void)
> > > > > > >
> > > > > > >   kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
> > > > > > >                              (void *)mod_shadow_start);
> > > > > > > - kasan_populate_early_shadow((void *)kimg_shadow_end,
> > > > > > > -                            (void *)KASAN_SHADOW_END);
> > > > > > > + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
> > > > > >
> > > > > > Do we really need yet another CONFIG option for KASAN? What's the use-case
> > > > > > for *not* enabling this if you're already enabling one of the KASAN
> > > > > > backends?
> > > > >
> > > > > As I know, KASAN_VMALLOC now only supports KASAN_GENERIC and also
> > > > > KASAN_VMALLOC uses more memory to map real shadow memory (1/8 of vmalloc va).
> > > >
> > > > The shadow is allocated dynamically though, isn't it?
> > >
> > > Yes, but It's still a cost.
> > >
> > > > > There should be someone can enable KASAN_GENERIC but can't use VMALLOC
> > > > > due to memory issue.
> > > >
> > > > That doesn't sound particularly realistic to me. The reason I'm pushing here
> > > > is because I would _really_ like to move to VMAP stack unconditionally, and
> > > > that would effectively force KASAN_VMALLOC to be set if KASAN is in use.
> > > >
> > > > So unless there's a really good reason not to do that, please can we make
> > > > this unconditional for arm64? Pretty please?
> > >
> > > I think it's fine since we have a good reason.
> > > Also if someone have memory issue in KASAN_VMALLOC,
> > > they can use SW_TAG, right?
> > >
> > > However the SW_TAG/HW_TAG is not supported VMALLOC yet.
> > > So the code would be like
> > >
> > >       if (IS_ENABLED(CONFIG_KASAN_GENERIC))
> >
> > Just make this CONFIG_KASAN_VMALLOC, since that depends on KASAN_GENERIC.
> >
> > >               /* explain the relationship between
> > >                * KASAN_GENERIC and KASAN_VMALLOC in arm64
> > >                * XXX: because we want VMAP stack....
> > >                */
> >
> > I don't understand the relation with SW_TAGS. The VMAP_STACK dependency is:
> >
> >         depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC
> 
> This means that VMAP_STACK can be only enabled if KASAN_HW_TAGS=y or
> if KASAN_VMALLOC=y for other modes.
> 
> >
> > which doesn't mention SW_TAGS at all. So that seems to imply that SW_TAGS
> > and VMAP_STACK are mutually exclusive :(
> 
> SW_TAGS doesn't yet have vmalloc support, so it's not compatible with
> VMAP_STACK. Once vmalloc support is added to SW_TAGS, KASAN_VMALLOC
> should be allowed to be enabled with SW_TAGS. This series is a step
> towards having that support, but doesn't implement it. That will be a
> separate effort.

Ok, thanks. Then I think we should try to invert the dependency here, if
possible, so that the KASAN backends depend on !VMAP_STACK if they don't
support it, rather than silently disabling VMAP_STACK when they are
selected.

Will
Lecopzer Chen Feb. 5, 2021, 6:10 p.m. UTC | #10
Will Deacon <will@kernel.org> 於 2021年2月6日 週六 上午1:19寫道:
>
> On Fri, Feb 05, 2021 at 12:37:21AM +0800, Lecopzer Chen wrote:
> >
> > > On Thu, Feb 04, 2021 at 10:46:12PM +0800, Lecopzer Chen wrote:
> > > > > On Sat, Jan 09, 2021 at 06:32:49PM +0800, Lecopzer Chen wrote:
> > > > > > Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> > > > > > ("kasan: support backing vmalloc space with real shadow memory")
> > > > > >
> > > > > > Like how the MODULES_VADDR does now, just not to early populate
> > > > > > the VMALLOC_START between VMALLOC_END.
> > > > > > similarly, the kernel code mapping is now in the VMALLOC area and
> > > > > > should keep these area populated.
> > > > > >
> > > > > > Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> > > > > > ---
> > > > > >  arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
> > > > > >  1 file changed, 18 insertions(+), 5 deletions(-)
> > > > > >
> > > > > > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> > > > > > index d8e66c78440e..39b218a64279 100644
> > > > > > --- a/arch/arm64/mm/kasan_init.c
> > > > > > +++ b/arch/arm64/mm/kasan_init.c
> > > > > > @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
> > > > > >  {
> > > > > >   u64 kimg_shadow_start, kimg_shadow_end;
> > > > > >   u64 mod_shadow_start, mod_shadow_end;
> > > > > > + u64 vmalloc_shadow_start, vmalloc_shadow_end;
> > > > > >   phys_addr_t pa_start, pa_end;
> > > > > >   u64 i;
> > > > > >
> > > > > > @@ -223,6 +224,9 @@ static void __init kasan_init_shadow(void)
> > > > > >   mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
> > > > > >   mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
> > > > > >
> > > > > > + vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
> > > > > > + vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> > > > > > +
> > > > > >   /*
> > > > > >    * We are going to perform proper setup of shadow memory.
> > > > > >    * At first we should unmap early shadow (clear_pgds() call below).
> > > > > > @@ -241,12 +245,21 @@ static void __init kasan_init_shadow(void)
> > > > > >
> > > > > >   kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
> > > > > >                              (void *)mod_shadow_start);
> > > > > > - kasan_populate_early_shadow((void *)kimg_shadow_end,
> > > > > > -                            (void *)KASAN_SHADOW_END);
> > > > > > + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
> > > > >
> > > > > Do we really need yet another CONFIG option for KASAN? What's the use-case
> > > > > for *not* enabling this if you're already enabling one of the KASAN
> > > > > backends?
> > > >commit message
> > > > As I know, KASAN_VMALLOC now only supports KASAN_GENERIC and also
> > > > KASAN_VMALLOC uses more memory to map real shadow memory (1/8 of vmalloc va).
> > >
> > > The shadow is allocated dynamically though, isn't it?
> >
> > Yes, but It's still a cost.
> >
> > > > There should be someone can enable KASAN_GENERIC but can't use VMALLOC
> > > > due to memory issue.
> > >
> > > That doesn't sound particularly realistic to me. The reason I'm pushing here
> > > is because I would _really_ like to move to VMAP stack unconditionally, and
> > > that would effectively force KASAN_VMALLOC to be set if KASAN is in use.
> > >
> > > So unless there's a really good reason not to do that, please can we make
> > > this unconditional for arm64? Pretty please?
> >
> > I think it's fine since we have a good reason.
> > Also if someone have memory issue in KASAN_VMALLOC,
> > they can use SW_TAG, right?
> >
> > However the SW_TAG/HW_TAG is not supported VMALLOC yet.
> > So the code would be like
> >
> >       if (IS_ENABLED(CONFIG_KASAN_GENERIC))
>
> Just make this CONFIG_KASAN_VMALLOC, since that depends on KASAN_GENERIC.

OK, this also make sense.
My first thought was that selecting KASAN_GENERIC implies VMALLOC in
arm64 is a special case so this need well documented.
I'll document this in the commit message of Kconfig patch to avoid
messing up the code here.

I'm going to send V3 patch, thanks again for your review.

BRs,
Lecopzer
Andrey Konovalov Feb. 5, 2021, 8:50 p.m. UTC | #11
On Fri, Feb 5, 2021 at 6:43 PM Will Deacon <will@kernel.org> wrote:
>
> On Fri, Feb 05, 2021 at 06:30:44PM +0100, Andrey Konovalov wrote:
> > On Fri, Feb 5, 2021 at 6:19 PM Will Deacon <will@kernel.org> wrote:
> > >
> > > On Fri, Feb 05, 2021 at 12:37:21AM +0800, Lecopzer Chen wrote:
> > > >
> > > > > On Thu, Feb 04, 2021 at 10:46:12PM +0800, Lecopzer Chen wrote:
> > > > > > > On Sat, Jan 09, 2021 at 06:32:49PM +0800, Lecopzer Chen wrote:
> > > > > > > > Linux support KAsan for VMALLOC since commit 3c5c3cfb9ef4da9
> > > > > > > > ("kasan: support backing vmalloc space with real shadow memory")
> > > > > > > >
> > > > > > > > Like how the MODULES_VADDR does now, just not to early populate
> > > > > > > > the VMALLOC_START between VMALLOC_END.
> > > > > > > > similarly, the kernel code mapping is now in the VMALLOC area and
> > > > > > > > should keep these area populated.
> > > > > > > >
> > > > > > > > Signed-off-by: Lecopzer Chen <lecopzer.chen@mediatek.com>
> > > > > > > > ---
> > > > > > > >  arch/arm64/mm/kasan_init.c | 23 ++++++++++++++++++-----
> > > > > > > >  1 file changed, 18 insertions(+), 5 deletions(-)
> > > > > > > >
> > > > > > > > diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
> > > > > > > > index d8e66c78440e..39b218a64279 100644
> > > > > > > > --- a/arch/arm64/mm/kasan_init.c
> > > > > > > > +++ b/arch/arm64/mm/kasan_init.c
> > > > > > > > @@ -214,6 +214,7 @@ static void __init kasan_init_shadow(void)
> > > > > > > >  {
> > > > > > > >   u64 kimg_shadow_start, kimg_shadow_end;
> > > > > > > >   u64 mod_shadow_start, mod_shadow_end;
> > > > > > > > + u64 vmalloc_shadow_start, vmalloc_shadow_end;
> > > > > > > >   phys_addr_t pa_start, pa_end;
> > > > > > > >   u64 i;
> > > > > > > >
> > > > > > > > @@ -223,6 +224,9 @@ static void __init kasan_init_shadow(void)
> > > > > > > >   mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
> > > > > > > >   mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
> > > > > > > >
> > > > > > > > + vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
> > > > > > > > + vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
> > > > > > > > +
> > > > > > > >   /*
> > > > > > > >    * We are going to perform proper setup of shadow memory.
> > > > > > > >    * At first we should unmap early shadow (clear_pgds() call below).
> > > > > > > > @@ -241,12 +245,21 @@ static void __init kasan_init_shadow(void)
> > > > > > > >
> > > > > > > >   kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
> > > > > > > >                              (void *)mod_shadow_start);
> > > > > > > > - kasan_populate_early_shadow((void *)kimg_shadow_end,
> > > > > > > > -                            (void *)KASAN_SHADOW_END);
> > > > > > > > + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
> > > > > > >
> > > > > > > Do we really need yet another CONFIG option for KASAN? What's the use-case
> > > > > > > for *not* enabling this if you're already enabling one of the KASAN
> > > > > > > backends?
> > > > > >
> > > > > > As I know, KASAN_VMALLOC now only supports KASAN_GENERIC and also
> > > > > > KASAN_VMALLOC uses more memory to map real shadow memory (1/8 of vmalloc va).
> > > > >
> > > > > The shadow is allocated dynamically though, isn't it?
> > > >
> > > > Yes, but It's still a cost.
> > > >
> > > > > > There should be someone can enable KASAN_GENERIC but can't use VMALLOC
> > > > > > due to memory issue.
> > > > >
> > > > > That doesn't sound particularly realistic to me. The reason I'm pushing here
> > > > > is because I would _really_ like to move to VMAP stack unconditionally, and
> > > > > that would effectively force KASAN_VMALLOC to be set if KASAN is in use.
> > > > >
> > > > > So unless there's a really good reason not to do that, please can we make
> > > > > this unconditional for arm64? Pretty please?
> > > >
> > > > I think it's fine since we have a good reason.
> > > > Also if someone have memory issue in KASAN_VMALLOC,
> > > > they can use SW_TAG, right?
> > > >
> > > > However the SW_TAG/HW_TAG is not supported VMALLOC yet.
> > > > So the code would be like
> > > >
> > > >       if (IS_ENABLED(CONFIG_KASAN_GENERIC))
> > >
> > > Just make this CONFIG_KASAN_VMALLOC, since that depends on KASAN_GENERIC.
> > >
> > > >               /* explain the relationship between
> > > >                * KASAN_GENERIC and KASAN_VMALLOC in arm64
> > > >                * XXX: because we want VMAP stack....
> > > >                */
> > >
> > > I don't understand the relation with SW_TAGS. The VMAP_STACK dependency is:
> > >
> > >         depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC
> >
> > This means that VMAP_STACK can be only enabled if KASAN_HW_TAGS=y or
> > if KASAN_VMALLOC=y for other modes.
> >
> > >
> > > which doesn't mention SW_TAGS at all. So that seems to imply that SW_TAGS
> > > and VMAP_STACK are mutually exclusive :(
> >
> > SW_TAGS doesn't yet have vmalloc support, so it's not compatible with
> > VMAP_STACK. Once vmalloc support is added to SW_TAGS, KASAN_VMALLOC
> > should be allowed to be enabled with SW_TAGS. This series is a step
> > towards having that support, but doesn't implement it. That will be a
> > separate effort.
>
> Ok, thanks. Then I think we should try to invert the dependency here, if
> possible, so that the KASAN backends depend on !VMAP_STACK if they don't
> support it, rather than silently disabling VMAP_STACK when they are
> selected.

SGTM. Not sure if I will get to this in the nearest future, so I filed
a bug: https://bugzilla.kernel.org/show_bug.cgi?id=211581
diff mbox series

Patch

diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index d8e66c78440e..39b218a64279 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -214,6 +214,7 @@  static void __init kasan_init_shadow(void)
 {
 	u64 kimg_shadow_start, kimg_shadow_end;
 	u64 mod_shadow_start, mod_shadow_end;
+	u64 vmalloc_shadow_start, vmalloc_shadow_end;
 	phys_addr_t pa_start, pa_end;
 	u64 i;
 
@@ -223,6 +224,9 @@  static void __init kasan_init_shadow(void)
 	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
 	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
 
+	vmalloc_shadow_start = (u64)kasan_mem_to_shadow((void *)VMALLOC_START);
+	vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
+
 	/*
 	 * We are going to perform proper setup of shadow memory.
 	 * At first we should unmap early shadow (clear_pgds() call below).
@@ -241,12 +245,21 @@  static void __init kasan_init_shadow(void)
 
 	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
 				   (void *)mod_shadow_start);
-	kasan_populate_early_shadow((void *)kimg_shadow_end,
-				   (void *)KASAN_SHADOW_END);
+	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+		kasan_populate_early_shadow((void *)vmalloc_shadow_end,
+					    (void *)KASAN_SHADOW_END);
+		if (vmalloc_shadow_start > mod_shadow_end)
+			kasan_populate_early_shadow((void *)mod_shadow_end,
+						    (void *)vmalloc_shadow_start);
+
+	} else {
+		kasan_populate_early_shadow((void *)kimg_shadow_end,
+					    (void *)KASAN_SHADOW_END);
+		if (kimg_shadow_start > mod_shadow_end)
+			kasan_populate_early_shadow((void *)mod_shadow_end,
+						    (void *)kimg_shadow_start);
+	}
 
-	if (kimg_shadow_start > mod_shadow_end)
-		kasan_populate_early_shadow((void *)mod_shadow_end,
-					    (void *)kimg_shadow_start);
 
 	for_each_mem_range(i, &pa_start, &pa_end) {
 		void *start = (void *)__phys_to_virt(pa_start);