diff mbox series

[v2,08/13] KVM: arm64: nv: Handle HCR_EL2.{API,APK} independently

Message ID 20240226100601.2379693-9-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM/arm64: Add NV support for ERET and PAuth | expand

Commit Message

Marc Zyngier Feb. 26, 2024, 10:05 a.m. UTC
Although KVM couples API and APK for simplicity, the architecture
makes no such requirement, and the two can be independently set or
cleared.

Check for which of the two possible reasons we have trapped here,
and if the corresponding L1 control bit isn't set, delegate the
handling for forwarding.

Otherwise, set this exact bit in HCR_EL2 and resume the guest.
Of course, in the non-NV case, we keep setting both bits and
be done with it. Note that the entry core already saves/restores
the keys should any of the two control bits be set.

This results in a bit of rework, and the removal of the (trivial)
vcpu_ptrauth_enable() helper.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_emulate.h    |  5 ----
 arch/arm64/kvm/hyp/include/hyp/switch.h | 32 +++++++++++++++++++++----
 2 files changed, 27 insertions(+), 10 deletions(-)

Comments

Joey Gouly March 7, 2024, 3:14 p.m. UTC | #1
On Mon, Feb 26, 2024 at 10:05:56AM +0000, Marc Zyngier wrote:
> Although KVM couples API and APK for simplicity, the architecture
> makes no such requirement, and the two can be independently set or
> cleared.
> 
> Check for which of the two possible reasons we have trapped here,
> and if the corresponding L1 control bit isn't set, delegate the
> handling for forwarding.
> 
> Otherwise, set this exact bit in HCR_EL2 and resume the guest.
> Of course, in the non-NV case, we keep setting both bits and
> be done with it. Note that the entry core already saves/restores
> the keys should any of the two control bits be set.
> 
> This results in a bit of rework, and the removal of the (trivial)
> vcpu_ptrauth_enable() helper.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/include/asm/kvm_emulate.h    |  5 ----
>  arch/arm64/kvm/hyp/include/hyp/switch.h | 32 +++++++++++++++++++++----
>  2 files changed, 27 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index debc3753d2ef..d2177bc77844 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -125,11 +125,6 @@ static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
>  	vcpu->arch.hcr_el2 |= HCR_TWI;
>  }
>  
> -static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
> -{
> -	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
> -}
> -
>  static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
>  {
>  	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index f5f701f309a9..a0908d7a8f56 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -480,11 +480,35 @@ DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
>  static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
>  	struct kvm_cpu_context *ctxt;
> -	u64 val;
> +	u64 enable = 0;
>  
>  	if (!vcpu_has_ptrauth(vcpu))
>  		return false;
>  
> +	/*
> +	 * NV requires us to handle API and APK independently, just in
> +	 * case the hypervisor is totally nuts. Please barf >here<.
> +	 */
> +	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
> +		switch (ESR_ELx_EC(kvm_vcpu_get_esr(vcpu))) {
> +		case ESR_ELx_EC_PAC:
> +			if (!(__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_API))
> +				return false;
> +
> +			enable |= HCR_API;
> +			break;
> +
> +		case ESR_ELx_EC_SYS64:
> +			if (!(__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_APK))
> +				return false;
> +
> +			enable |= HCR_APK;
> +			break;
> +		}
> +	} else {
> +		enable = HCR_API | HCR_APK;
> +	}
> +
>  	ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
>  	__ptrauth_save_key(ctxt, APIA);
>  	__ptrauth_save_key(ctxt, APIB);
> @@ -492,11 +516,9 @@ static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
>  	__ptrauth_save_key(ctxt, APDB);
>  	__ptrauth_save_key(ctxt, APGA);
>  
> -	vcpu_ptrauth_enable(vcpu);
>  
> -	val = read_sysreg(hcr_el2);
> -	val |= (HCR_API | HCR_APK);
> -	write_sysreg(val, hcr_el2);
> +	vcpu->arch.hcr_el2 |= enable;
> +	sysreg_clear_set(hcr_el2, 0, enable);
>  
>  	return true;
>  }

A bit of sleuthing tells me you plan to delete kvm_hyp_handle_ptrauth() anyway,
so presumably it makes some sense to put that patch before this to avoid
modifying the code just to delete it!

Thanks,
Joey
Marc Zyngier March 7, 2024, 3:58 p.m. UTC | #2
On Thu, 07 Mar 2024 15:14:54 +0000,
Joey Gouly <joey.gouly@arm.com> wrote:
> 
> On Mon, Feb 26, 2024 at 10:05:56AM +0000, Marc Zyngier wrote:
> > Although KVM couples API and APK for simplicity, the architecture
> > makes no such requirement, and the two can be independently set or
> > cleared.
> > 
> > Check for which of the two possible reasons we have trapped here,
> > and if the corresponding L1 control bit isn't set, delegate the
> > handling for forwarding.
> > 
> > Otherwise, set this exact bit in HCR_EL2 and resume the guest.
> > Of course, in the non-NV case, we keep setting both bits and
> > be done with it. Note that the entry core already saves/restores
> > the keys should any of the two control bits be set.
> > 
> > This results in a bit of rework, and the removal of the (trivial)
> > vcpu_ptrauth_enable() helper.
> > 
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/include/asm/kvm_emulate.h    |  5 ----
> >  arch/arm64/kvm/hyp/include/hyp/switch.h | 32 +++++++++++++++++++++----
> >  2 files changed, 27 insertions(+), 10 deletions(-)
> > 
> > diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> > index debc3753d2ef..d2177bc77844 100644
> > --- a/arch/arm64/include/asm/kvm_emulate.h
> > +++ b/arch/arm64/include/asm/kvm_emulate.h
> > @@ -125,11 +125,6 @@ static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
> >  	vcpu->arch.hcr_el2 |= HCR_TWI;
> >  }
> >  
> > -static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
> > -{
> > -	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
> > -}
> > -
> >  static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
> >  {
> >  	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
> > diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> > index f5f701f309a9..a0908d7a8f56 100644
> > --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> > +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> > @@ -480,11 +480,35 @@ DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
> >  static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
> >  {
> >  	struct kvm_cpu_context *ctxt;
> > -	u64 val;
> > +	u64 enable = 0;
> >  
> >  	if (!vcpu_has_ptrauth(vcpu))
> >  		return false;
> >  
> > +	/*
> > +	 * NV requires us to handle API and APK independently, just in
> > +	 * case the hypervisor is totally nuts. Please barf >here<.
> > +	 */
> > +	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
> > +		switch (ESR_ELx_EC(kvm_vcpu_get_esr(vcpu))) {
> > +		case ESR_ELx_EC_PAC:
> > +			if (!(__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_API))
> > +				return false;
> > +
> > +			enable |= HCR_API;
> > +			break;
> > +
> > +		case ESR_ELx_EC_SYS64:
> > +			if (!(__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_APK))
> > +				return false;
> > +
> > +			enable |= HCR_APK;
> > +			break;
> > +		}
> > +	} else {
> > +		enable = HCR_API | HCR_APK;
> > +	}
> > +
> >  	ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
> >  	__ptrauth_save_key(ctxt, APIA);
> >  	__ptrauth_save_key(ctxt, APIB);
> > @@ -492,11 +516,9 @@ static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
> >  	__ptrauth_save_key(ctxt, APDB);
> >  	__ptrauth_save_key(ctxt, APGA);
> >  
> > -	vcpu_ptrauth_enable(vcpu);
> >  
> > -	val = read_sysreg(hcr_el2);
> > -	val |= (HCR_API | HCR_APK);
> > -	write_sysreg(val, hcr_el2);
> > +	vcpu->arch.hcr_el2 |= enable;
> > +	sysreg_clear_set(hcr_el2, 0, enable);
> >  
> >  	return true;
> >  }
> 
> A bit of sleuthing tells me you plan to delete kvm_hyp_handle_ptrauth() anyway,
> so presumably it makes some sense to put that patch before this to avoid
> modifying the code just to delete it!

Well, I haven't posted that patch yet (soon!), but it is also
important to show how these things interact overall. *if* we agree
that there is no point in the current approach, then I'll squash the
two.

But there is a lot to be said about:

- discussion on the list first
- minimal changes to track regressions

So I think there is still value in reviewing this patch on its own!

Thanks,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index debc3753d2ef..d2177bc77844 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -125,11 +125,6 @@  static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
 	vcpu->arch.hcr_el2 |= HCR_TWI;
 }
 
-static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
-{
-	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
-}
-
 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
 {
 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index f5f701f309a9..a0908d7a8f56 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -480,11 +480,35 @@  DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
 static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
 	struct kvm_cpu_context *ctxt;
-	u64 val;
+	u64 enable = 0;
 
 	if (!vcpu_has_ptrauth(vcpu))
 		return false;
 
+	/*
+	 * NV requires us to handle API and APK independently, just in
+	 * case the hypervisor is totally nuts. Please barf >here<.
+	 */
+	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+		switch (ESR_ELx_EC(kvm_vcpu_get_esr(vcpu))) {
+		case ESR_ELx_EC_PAC:
+			if (!(__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_API))
+				return false;
+
+			enable |= HCR_API;
+			break;
+
+		case ESR_ELx_EC_SYS64:
+			if (!(__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_APK))
+				return false;
+
+			enable |= HCR_APK;
+			break;
+		}
+	} else {
+		enable = HCR_API | HCR_APK;
+	}
+
 	ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
 	__ptrauth_save_key(ctxt, APIA);
 	__ptrauth_save_key(ctxt, APIB);
@@ -492,11 +516,9 @@  static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
 	__ptrauth_save_key(ctxt, APDB);
 	__ptrauth_save_key(ctxt, APGA);
 
-	vcpu_ptrauth_enable(vcpu);
 
-	val = read_sysreg(hcr_el2);
-	val |= (HCR_API | HCR_APK);
-	write_sysreg(val, hcr_el2);
+	vcpu->arch.hcr_el2 |= enable;
+	sysreg_clear_set(hcr_el2, 0, enable);
 
 	return true;
 }