diff mbox series

[03/18] KVM: arm64: Handle trapping of FEAT_LS64* instructions

Message ID 20250210184150.2145093-4-maz@kernel.org (mailing list archive)
State New
Headers show
Series KVM: arm64: Revamp Fine Grained Trap handling | expand

Commit Message

Marc Zyngier Feb. 10, 2025, 6:41 p.m. UTC
We generally don't expect FEAT_LS64* instructions to trap, unless
they are trapped by a guest hypervisor.

Otherwise, this is just the guest playing tricks on us by using
an instruction that isn't advertised, which we handle with a well
deserved UNDEF.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/handle_exit.c | 64 ++++++++++++++++++++++++++++++++++++
 1 file changed, 64 insertions(+)

Comments

Mark Rutland Feb. 11, 2025, 12:28 p.m. UTC | #1
On Mon, Feb 10, 2025 at 06:41:34PM +0000, Marc Zyngier wrote:
> We generally don't expect FEAT_LS64* instructions to trap, unless
> they are trapped by a guest hypervisor.
> 
> Otherwise, this is just the guest playing tricks on us by using
> an instruction that isn't advertised, which we handle with a well
> deserved UNDEF.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/kvm/handle_exit.c | 64 ++++++++++++++++++++++++++++++++++++
>  1 file changed, 64 insertions(+)
> 
> diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
> index 512d152233ff2..4f8354bf7dc5f 100644
> --- a/arch/arm64/kvm/handle_exit.c
> +++ b/arch/arm64/kvm/handle_exit.c
> @@ -294,6 +294,69 @@ static int handle_svc(struct kvm_vcpu *vcpu)
>  	return 1;
>  }
>  
> +static int handle_ls64b(struct kvm_vcpu *vcpu)

Structurally this looks good. As noted on patch 2, I think that
naming-wise this should be more general, e.g. handle_other_insn().

Mark.

> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	u64 esr = kvm_vcpu_get_esr(vcpu);
> +	u64 iss = ESR_ELx_ISS(esr);
> +	bool allowed;
> +
> +	switch (iss) {
> +	case ESR_ELx_ISS_ST64BV:
> +		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
> +		break;
> +	case ESR_ELx_ISS_ST64BV0:
> +		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
> +		break;
> +	case ESR_ELx_ISS_LDST64B:
> +		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
> +		break;
> +	default:
> +		/* Clearly, we're missing something. */
> +		goto unknown_trap;
> +	}
> +
> +	if (!allowed)
> +		goto undef;
> +
> +	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
> +		u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
> +		bool fwd;
> +
> +		switch (iss) {
> +		case ESR_ELx_ISS_ST64BV:
> +			fwd = !(hcrx & HCRX_EL2_EnASR);
> +			break;
> +		case ESR_ELx_ISS_ST64BV0:
> +			fwd = !(hcrx & HCRX_EL2_EnAS0);
> +			break;
> +		case ESR_ELx_ISS_LDST64B:
> +			fwd = !(hcrx & HCRX_EL2_EnALS);
> +			break;
> +		default:
> +			/* We don't expect to be here */
> +			fwd = false;
> +		}
> +
> +		if (fwd) {
> +			kvm_inject_nested_sync(vcpu, esr);
> +			return 1;
> +		}
> +	}
> +
> +unknown_trap:
> +	/*
> +	 * If we land here, something must be very wrong, because we
> +	 * have no idea why we trapped at all. Warn and undef as a
> +	 * fallback.
> +	 */
> +	WARN_ON(1);
> +
> +undef:
> +	kvm_inject_undefined(vcpu);
> +	return 1;
> +}
> +
>  static exit_handle_fn arm_exit_handlers[] = {
>  	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
>  	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
> @@ -303,6 +366,7 @@ static exit_handle_fn arm_exit_handlers[] = {
>  	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
>  	[ESR_ELx_EC_CP10_ID]	= kvm_handle_cp10_id,
>  	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
> +	[ESR_ELx_EC_LS64B]	= handle_ls64b,
>  	[ESR_ELx_EC_HVC32]	= handle_hvc,
>  	[ESR_ELx_EC_SMC32]	= handle_smc,
>  	[ESR_ELx_EC_HVC64]	= handle_hvc,
> -- 
> 2.39.2
>
diff mbox series

Patch

diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 512d152233ff2..4f8354bf7dc5f 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -294,6 +294,69 @@  static int handle_svc(struct kvm_vcpu *vcpu)
 	return 1;
 }
 
+static int handle_ls64b(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+	u64 esr = kvm_vcpu_get_esr(vcpu);
+	u64 iss = ESR_ELx_ISS(esr);
+	bool allowed;
+
+	switch (iss) {
+	case ESR_ELx_ISS_ST64BV:
+		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
+		break;
+	case ESR_ELx_ISS_ST64BV0:
+		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
+		break;
+	case ESR_ELx_ISS_LDST64B:
+		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
+		break;
+	default:
+		/* Clearly, we're missing something. */
+		goto unknown_trap;
+	}
+
+	if (!allowed)
+		goto undef;
+
+	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+		u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
+		bool fwd;
+
+		switch (iss) {
+		case ESR_ELx_ISS_ST64BV:
+			fwd = !(hcrx & HCRX_EL2_EnASR);
+			break;
+		case ESR_ELx_ISS_ST64BV0:
+			fwd = !(hcrx & HCRX_EL2_EnAS0);
+			break;
+		case ESR_ELx_ISS_LDST64B:
+			fwd = !(hcrx & HCRX_EL2_EnALS);
+			break;
+		default:
+			/* We don't expect to be here */
+			fwd = false;
+		}
+
+		if (fwd) {
+			kvm_inject_nested_sync(vcpu, esr);
+			return 1;
+		}
+	}
+
+unknown_trap:
+	/*
+	 * If we land here, something must be very wrong, because we
+	 * have no idea why we trapped at all. Warn and undef as a
+	 * fallback.
+	 */
+	WARN_ON(1);
+
+undef:
+	kvm_inject_undefined(vcpu);
+	return 1;
+}
+
 static exit_handle_fn arm_exit_handlers[] = {
 	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
 	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
@@ -303,6 +366,7 @@  static exit_handle_fn arm_exit_handlers[] = {
 	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
 	[ESR_ELx_EC_CP10_ID]	= kvm_handle_cp10_id,
 	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
+	[ESR_ELx_EC_LS64B]	= handle_ls64b,
 	[ESR_ELx_EC_HVC32]	= handle_hvc,
 	[ESR_ELx_EC_SMC32]	= handle_smc,
 	[ESR_ELx_EC_HVC64]	= handle_hvc,