diff mbox series

[3/4] KVM: arm64: Convert the host S2 over to __load_guest_stage2()

Message ID 20210806113109.2475-5-will@kernel.org (mailing list archive)
State New, archived
Headers show
Series Fix racing TLBI with ASID/VMID reallocation | expand

Commit Message

Will Deacon Aug. 6, 2021, 11:31 a.m. UTC
From: Marc Zyngier <maz@kernel.org>

The protected mode relies on a separate helper to load the
S2 context. Move over to the __load_guest_stage2() helper
instead.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Jade Alglave <jade.alglave@arm.com>
Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/kvm_mmu.h              | 11 +++--------
 arch/arm64/kvm/hyp/include/nvhe/mem_protect.h |  2 +-
 arch/arm64/kvm/hyp/nvhe/mem_protect.c         |  2 +-
 3 files changed, 5 insertions(+), 10 deletions(-)

Comments

Quentin Perret Aug. 6, 2021, 1:40 p.m. UTC | #1
On Friday 06 Aug 2021 at 12:31:07 (+0100), Will Deacon wrote:
> From: Marc Zyngier <maz@kernel.org>
> 
> The protected mode relies on a separate helper to load the
> S2 context. Move over to the __load_guest_stage2() helper
> instead.
> 
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Jade Alglave <jade.alglave@arm.com>
> Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> Signed-off-by: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/include/asm/kvm_mmu.h              | 11 +++--------
>  arch/arm64/kvm/hyp/include/nvhe/mem_protect.h |  2 +-
>  arch/arm64/kvm/hyp/nvhe/mem_protect.c         |  2 +-
>  3 files changed, 5 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 05e089653a1a..934ef0deff9f 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -267,9 +267,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
>   * Must be called from hyp code running at EL2 with an updated VTTBR
>   * and interrupts disabled.
>   */
> -static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
> +static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
> +						struct kvm_arch *arch)
>  {
> -	write_sysreg(vtcr, vtcr_el2);
> +	write_sysreg(arch->vtcr, vtcr_el2);
>  	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
>  
>  	/*
> @@ -280,12 +281,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
>  	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
>  }
>  
> -static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
> -						struct kvm_arch *arch)
> -{
> -	__load_stage2(mmu, arch->vtcr);
> -}
> -
>  static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
>  {
>  	return container_of(mmu->arch, struct kvm, arch);
> diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> index 9c227d87c36d..a910648bc71b 100644
> --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> @@ -29,7 +29,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
>  static __always_inline void __load_host_stage2(void)
>  {
>  	if (static_branch_likely(&kvm_protected_mode_initialized))
> -		__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
> +		__load_guest_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
>  	else
>  		write_sysreg(0, vttbr_el2);
>  }
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> index d938ce95d3bd..d4e74ca7f876 100644
> --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -126,7 +126,7 @@ int __pkvm_prot_finalize(void)
>  	kvm_flush_dcache_to_poc(params, sizeof(*params));
>  
>  	write_sysreg(params->hcr_el2, hcr_el2);
> -	__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
> +	__load_guest_stage2(&host_kvm.arch.mmu, &host_kvm.arch);

Nit: clearly we're not loading a guest stage-2 here, so maybe the
function should take a more generic name?

Thanks,
Quentin
Marc Zyngier Aug. 20, 2021, 8:01 a.m. UTC | #2
On Fri, 06 Aug 2021 14:40:00 +0100,
Quentin Perret <qperret@google.com> wrote:
> 
> On Friday 06 Aug 2021 at 12:31:07 (+0100), Will Deacon wrote:
> > From: Marc Zyngier <maz@kernel.org>
> > 
> > The protected mode relies on a separate helper to load the
> > S2 context. Move over to the __load_guest_stage2() helper
> > instead.
> > 
> > Cc: Catalin Marinas <catalin.marinas@arm.com>
> > Cc: Jade Alglave <jade.alglave@arm.com>
> > Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > Signed-off-by: Will Deacon <will@kernel.org>
> > ---
> >  arch/arm64/include/asm/kvm_mmu.h              | 11 +++--------
> >  arch/arm64/kvm/hyp/include/nvhe/mem_protect.h |  2 +-
> >  arch/arm64/kvm/hyp/nvhe/mem_protect.c         |  2 +-
> >  3 files changed, 5 insertions(+), 10 deletions(-)
> > 
> > diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> > index 05e089653a1a..934ef0deff9f 100644
> > --- a/arch/arm64/include/asm/kvm_mmu.h
> > +++ b/arch/arm64/include/asm/kvm_mmu.h
> > @@ -267,9 +267,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
> >   * Must be called from hyp code running at EL2 with an updated VTTBR
> >   * and interrupts disabled.
> >   */
> > -static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
> > +static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
> > +						struct kvm_arch *arch)
> >  {
> > -	write_sysreg(vtcr, vtcr_el2);
> > +	write_sysreg(arch->vtcr, vtcr_el2);
> >  	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
> >  
> >  	/*
> > @@ -280,12 +281,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
> >  	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
> >  }
> >  
> > -static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
> > -						struct kvm_arch *arch)
> > -{
> > -	__load_stage2(mmu, arch->vtcr);
> > -}
> > -
> >  static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
> >  {
> >  	return container_of(mmu->arch, struct kvm, arch);
> > diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> > index 9c227d87c36d..a910648bc71b 100644
> > --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> > +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> > @@ -29,7 +29,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
> >  static __always_inline void __load_host_stage2(void)
> >  {
> >  	if (static_branch_likely(&kvm_protected_mode_initialized))
> > -		__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
> > +		__load_guest_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
> >  	else
> >  		write_sysreg(0, vttbr_el2);
> >  }
> > diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > index d938ce95d3bd..d4e74ca7f876 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > @@ -126,7 +126,7 @@ int __pkvm_prot_finalize(void)
> >  	kvm_flush_dcache_to_poc(params, sizeof(*params));
> >  
> >  	write_sysreg(params->hcr_el2, hcr_el2);
> > -	__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
> > +	__load_guest_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
> 
> Nit: clearly we're not loading a guest stage-2 here, so maybe the
> function should take a more generic name?

How about we rename __load_guest_stage2() to __load_stage2() instead,
with the same parameters?

Thanks,

	M.
Quentin Perret Aug. 20, 2021, 9:08 a.m. UTC | #3
On Friday 20 Aug 2021 at 09:01:41 (+0100), Marc Zyngier wrote:
> On Fri, 06 Aug 2021 14:40:00 +0100,
> Quentin Perret <qperret@google.com> wrote:
> > 
> > On Friday 06 Aug 2021 at 12:31:07 (+0100), Will Deacon wrote:
> > > From: Marc Zyngier <maz@kernel.org>
> > > 
> > > The protected mode relies on a separate helper to load the
> > > S2 context. Move over to the __load_guest_stage2() helper
> > > instead.
> > > 
> > > Cc: Catalin Marinas <catalin.marinas@arm.com>
> > > Cc: Jade Alglave <jade.alglave@arm.com>
> > > Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
> > > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > > Signed-off-by: Will Deacon <will@kernel.org>
> > > ---
> > >  arch/arm64/include/asm/kvm_mmu.h              | 11 +++--------
> > >  arch/arm64/kvm/hyp/include/nvhe/mem_protect.h |  2 +-
> > >  arch/arm64/kvm/hyp/nvhe/mem_protect.c         |  2 +-
> > >  3 files changed, 5 insertions(+), 10 deletions(-)
> > > 
> > > diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> > > index 05e089653a1a..934ef0deff9f 100644
> > > --- a/arch/arm64/include/asm/kvm_mmu.h
> > > +++ b/arch/arm64/include/asm/kvm_mmu.h
> > > @@ -267,9 +267,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
> > >   * Must be called from hyp code running at EL2 with an updated VTTBR
> > >   * and interrupts disabled.
> > >   */
> > > -static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
> > > +static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
> > > +						struct kvm_arch *arch)
> > >  {
> > > -	write_sysreg(vtcr, vtcr_el2);
> > > +	write_sysreg(arch->vtcr, vtcr_el2);
> > >  	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
> > >  
> > >  	/*
> > > @@ -280,12 +281,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
> > >  	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
> > >  }
> > >  
> > > -static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
> > > -						struct kvm_arch *arch)
> > > -{
> > > -	__load_stage2(mmu, arch->vtcr);
> > > -}
> > > -
> > >  static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
> > >  {
> > >  	return container_of(mmu->arch, struct kvm, arch);
> > > diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> > > index 9c227d87c36d..a910648bc71b 100644
> > > --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> > > +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> > > @@ -29,7 +29,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
> > >  static __always_inline void __load_host_stage2(void)
> > >  {
> > >  	if (static_branch_likely(&kvm_protected_mode_initialized))
> > > -		__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
> > > +		__load_guest_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
> > >  	else
> > >  		write_sysreg(0, vttbr_el2);
> > >  }
> > > diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > > index d938ce95d3bd..d4e74ca7f876 100644
> > > --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > > +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> > > @@ -126,7 +126,7 @@ int __pkvm_prot_finalize(void)
> > >  	kvm_flush_dcache_to_poc(params, sizeof(*params));
> > >  
> > >  	write_sysreg(params->hcr_el2, hcr_el2);
> > > -	__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
> > > +	__load_guest_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
> > 
> > Nit: clearly we're not loading a guest stage-2 here, so maybe the
> > function should take a more generic name?
> 
> How about we rename __load_guest_stage2() to __load_stage2() instead,
> with the same parameters?

Yep, that'd work for me.

Thanks,
Quentin
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 05e089653a1a..934ef0deff9f 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -267,9 +267,10 @@  static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
  * Must be called from hyp code running at EL2 with an updated VTTBR
  * and interrupts disabled.
  */
-static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
+static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
+						struct kvm_arch *arch)
 {
-	write_sysreg(vtcr, vtcr_el2);
+	write_sysreg(arch->vtcr, vtcr_el2);
 	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
 
 	/*
@@ -280,12 +281,6 @@  static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 }
 
-static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
-						struct kvm_arch *arch)
-{
-	__load_stage2(mmu, arch->vtcr);
-}
-
 static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
 {
 	return container_of(mmu->arch, struct kvm, arch);
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 9c227d87c36d..a910648bc71b 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -29,7 +29,7 @@  void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
 static __always_inline void __load_host_stage2(void)
 {
 	if (static_branch_likely(&kvm_protected_mode_initialized))
-		__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
+		__load_guest_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
 	else
 		write_sysreg(0, vttbr_el2);
 }
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index d938ce95d3bd..d4e74ca7f876 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -126,7 +126,7 @@  int __pkvm_prot_finalize(void)
 	kvm_flush_dcache_to_poc(params, sizeof(*params));
 
 	write_sysreg(params->hcr_el2, hcr_el2);
-	__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
+	__load_guest_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
 
 	/*
 	 * Make sure to have an ISB before the TLB maintenance below but only