diff mbox series

[v2,12/13] KVM: arm64: nv: Handle ERETA[AB] instructions

Message ID 20240226100601.2379693-13-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM/arm64: Add NV support for ERET and PAuth | expand

Commit Message

Marc Zyngier Feb. 26, 2024, 10:06 a.m. UTC
Now that we have some emulation in place for ERETA[AB], we can
plug it into the exception handling machinery.

As for a bare ERET, an "easy" ERETAx instruction is processed as
a fixup, while something that requires a translation regime
transition or an exception delivery is left to the slow path.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/emulate-nested.c | 22 ++++++++++++++++++++--
 arch/arm64/kvm/handle_exit.c    |  3 ++-
 arch/arm64/kvm/hyp/vhe/switch.c | 13 +++++++++++--
 3 files changed, 33 insertions(+), 5 deletions(-)

Comments

Joey Gouly March 12, 2024, 11:17 a.m. UTC | #1
On Mon, Feb 26, 2024 at 10:06:00AM +0000, Marc Zyngier wrote:
> Now that we have some emulation in place for ERETA[AB], we can
> plug it into the exception handling machinery.
> 
> As for a bare ERET, an "easy" ERETAx instruction is processed as
> a fixup, while something that requires a translation regime
> transition or an exception delivery is left to the slow path.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/kvm/emulate-nested.c | 22 ++++++++++++++++++++--
>  arch/arm64/kvm/handle_exit.c    |  3 ++-
>  arch/arm64/kvm/hyp/vhe/switch.c | 13 +++++++++++--
>  3 files changed, 33 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
> index 63a74c0330f1..72d733c74a38 100644
> --- a/arch/arm64/kvm/emulate-nested.c
> +++ b/arch/arm64/kvm/emulate-nested.c
> @@ -2172,7 +2172,7 @@ static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
>  
>  void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
>  {
> -	u64 spsr, elr;
> +	u64 spsr, elr, esr;
>  
>  	/*
>  	 * Forward this trap to the virtual EL2 if the virtual
> @@ -2181,12 +2181,30 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
>  	if (forward_traps(vcpu, HCR_NV))
>  		return;
>  
> +	/* Check for an ERETAx */
> +	esr = kvm_vcpu_get_esr(vcpu);
> +	if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
> +		/*
> +		 * Oh no, ERETAx failed to authenticate.  If we have
> +		 * FPACCOMBINE, deliver an exception right away.  If we
> +		 * don't, then let the mangled ELR value trickle down the
> +		 * ERET handling, and the guest will have a little surprise.
> +		 */
> +		if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
> +			esr &= ESR_ELx_ERET_ISS_ERETA;
> +			esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
> +			kvm_inject_nested_sync(vcpu, esr);
> +			return;
> +		}
> +	}
> +
>  	preempt_disable();
>  	kvm_arch_vcpu_put(vcpu);
>  
>  	spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
>  	spsr = kvm_check_illegal_exception_return(vcpu, spsr);
> -	elr = __vcpu_sys_reg(vcpu, ELR_EL2);
> +	if (!esr_iss_is_eretax(esr))
> +		elr = __vcpu_sys_reg(vcpu, ELR_EL2);
>  
>  	trace_kvm_nested_eret(vcpu, elr, spsr);
>  
> diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
> index 1ba2f788b2c3..407bdfbb572b 100644
> --- a/arch/arm64/kvm/handle_exit.c
> +++ b/arch/arm64/kvm/handle_exit.c
> @@ -248,7 +248,8 @@ static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
>  
>  static int kvm_handle_eret(struct kvm_vcpu *vcpu)
>  {
> -	if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)))
> +	if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)) &&
> +	    !vcpu_has_ptrauth(vcpu))
>  		return kvm_handle_ptrauth(vcpu);
>  
>  	/*
> diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
> index 3ea9bdf6b555..49d36666040e 100644
> --- a/arch/arm64/kvm/hyp/vhe/switch.c
> +++ b/arch/arm64/kvm/hyp/vhe/switch.c
> @@ -208,7 +208,8 @@ void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
>  
>  static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
>  {
> -	u64 spsr, mode;
> +	u64 esr = kvm_vcpu_get_esr(vcpu);
> +	u64 spsr, elr, mode;
>  
>  	/*
>  	 * Going through the whole put/load motions is a waste of time
> @@ -242,10 +243,18 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
>  		return false;
>  	}
>  
> +	/* If ERETAx fails, take the slow path */
> +	if (esr_iss_is_eretax(esr)) {
> +		if (!(vcpu_has_ptrauth(vcpu) && kvm_auth_eretax(vcpu, &elr)))
> +			return false;
> +	} else {
> +		elr = read_sysreg_el1(SYS_ELR);
> +	}
> +
>  	spsr = (spsr & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode;
>  
>  	write_sysreg_el2(spsr, SYS_SPSR);
> -	write_sysreg_el2(read_sysreg_el1(SYS_ELR), SYS_ELR);
> +	write_sysreg_el2(elr, SYS_ELR);
>  
>  	return true;
>  }
> 

Reviewed-by: Joey Gouly <joey.gouly@arm.com>

Thanks,
Joey
diff mbox series

Patch

diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 63a74c0330f1..72d733c74a38 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -2172,7 +2172,7 @@  static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
 
 void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
 {
-	u64 spsr, elr;
+	u64 spsr, elr, esr;
 
 	/*
 	 * Forward this trap to the virtual EL2 if the virtual
@@ -2181,12 +2181,30 @@  void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
 	if (forward_traps(vcpu, HCR_NV))
 		return;
 
+	/* Check for an ERETAx */
+	esr = kvm_vcpu_get_esr(vcpu);
+	if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) {
+		/*
+		 * Oh no, ERETAx failed to authenticate.  If we have
+		 * FPACCOMBINE, deliver an exception right away.  If we
+		 * don't, then let the mangled ELR value trickle down the
+		 * ERET handling, and the guest will have a little surprise.
+		 */
+		if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
+			esr &= ESR_ELx_ERET_ISS_ERETA;
+			esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
+			kvm_inject_nested_sync(vcpu, esr);
+			return;
+		}
+	}
+
 	preempt_disable();
 	kvm_arch_vcpu_put(vcpu);
 
 	spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
 	spsr = kvm_check_illegal_exception_return(vcpu, spsr);
-	elr = __vcpu_sys_reg(vcpu, ELR_EL2);
+	if (!esr_iss_is_eretax(esr))
+		elr = __vcpu_sys_reg(vcpu, ELR_EL2);
 
 	trace_kvm_nested_eret(vcpu, elr, spsr);
 
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 1ba2f788b2c3..407bdfbb572b 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -248,7 +248,8 @@  static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
 
 static int kvm_handle_eret(struct kvm_vcpu *vcpu)
 {
-	if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)))
+	if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)) &&
+	    !vcpu_has_ptrauth(vcpu))
 		return kvm_handle_ptrauth(vcpu);
 
 	/*
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 3ea9bdf6b555..49d36666040e 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -208,7 +208,8 @@  void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
 
 static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
-	u64 spsr, mode;
+	u64 esr = kvm_vcpu_get_esr(vcpu);
+	u64 spsr, elr, mode;
 
 	/*
 	 * Going through the whole put/load motions is a waste of time
@@ -242,10 +243,18 @@  static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
 		return false;
 	}
 
+	/* If ERETAx fails, take the slow path */
+	if (esr_iss_is_eretax(esr)) {
+		if (!(vcpu_has_ptrauth(vcpu) && kvm_auth_eretax(vcpu, &elr)))
+			return false;
+	} else {
+		elr = read_sysreg_el1(SYS_ELR);
+	}
+
 	spsr = (spsr & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode;
 
 	write_sysreg_el2(spsr, SYS_SPSR);
-	write_sysreg_el2(read_sysreg_el1(SYS_ELR), SYS_ELR);
+	write_sysreg_el2(elr, SYS_ELR);
 
 	return true;
 }