diff mbox series

[03/18] KVM: arm64: Handle trapping of FEAT_LS64* instructions

Message ID 20250210184150.2145093-4-maz@kernel.org (mailing list archive)
State New
Headers show
Series KVM: arm64: Revamp Fine Grained Trap handling | expand

Commit Message

Marc Zyngier Feb. 10, 2025, 6:41 p.m. UTC
We generally don't expect FEAT_LS64* instructions to trap, unless
they are trapped by a guest hypervisor.

Otherwise, this is just the guest playing tricks on us by using
an instruction that isn't advertised, which we handle with a well
deserved UNDEF.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/handle_exit.c | 64 ++++++++++++++++++++++++++++++++++++
 1 file changed, 64 insertions(+)

Comments

Mark Rutland Feb. 11, 2025, 12:28 p.m. UTC | #1
On Mon, Feb 10, 2025 at 06:41:34PM +0000, Marc Zyngier wrote:
> We generally don't expect FEAT_LS64* instructions to trap, unless
> they are trapped by a guest hypervisor.
> 
> Otherwise, this is just the guest playing tricks on us by using
> an instruction that isn't advertised, which we handle with a well
> deserved UNDEF.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/kvm/handle_exit.c | 64 ++++++++++++++++++++++++++++++++++++
>  1 file changed, 64 insertions(+)
> 
> diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
> index 512d152233ff2..4f8354bf7dc5f 100644
> --- a/arch/arm64/kvm/handle_exit.c
> +++ b/arch/arm64/kvm/handle_exit.c
> @@ -294,6 +294,69 @@ static int handle_svc(struct kvm_vcpu *vcpu)
>  	return 1;
>  }
>  
> +static int handle_ls64b(struct kvm_vcpu *vcpu)

Structurally this looks good. As noted on patch 2, I think that
naming-wise this should be more general, e.g. handle_other_insn().

Mark.

> +{
> +	struct kvm *kvm = vcpu->kvm;
> +	u64 esr = kvm_vcpu_get_esr(vcpu);
> +	u64 iss = ESR_ELx_ISS(esr);
> +	bool allowed;
> +
> +	switch (iss) {
> +	case ESR_ELx_ISS_ST64BV:
> +		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
> +		break;
> +	case ESR_ELx_ISS_ST64BV0:
> +		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
> +		break;
> +	case ESR_ELx_ISS_LDST64B:
> +		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
> +		break;
> +	default:
> +		/* Clearly, we're missing something. */
> +		goto unknown_trap;
> +	}
> +
> +	if (!allowed)
> +		goto undef;
> +
> +	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
> +		u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
> +		bool fwd;
> +
> +		switch (iss) {
> +		case ESR_ELx_ISS_ST64BV:
> +			fwd = !(hcrx & HCRX_EL2_EnASR);
> +			break;
> +		case ESR_ELx_ISS_ST64BV0:
> +			fwd = !(hcrx & HCRX_EL2_EnAS0);
> +			break;
> +		case ESR_ELx_ISS_LDST64B:
> +			fwd = !(hcrx & HCRX_EL2_EnALS);
> +			break;
> +		default:
> +			/* We don't expect to be here */
> +			fwd = false;
> +		}
> +
> +		if (fwd) {
> +			kvm_inject_nested_sync(vcpu, esr);
> +			return 1;
> +		}
> +	}
> +
> +unknown_trap:
> +	/*
> +	 * If we land here, something must be very wrong, because we
> +	 * have no idea why we trapped at all. Warn and undef as a
> +	 * fallback.
> +	 */
> +	WARN_ON(1);
> +
> +undef:
> +	kvm_inject_undefined(vcpu);
> +	return 1;
> +}
> +
>  static exit_handle_fn arm_exit_handlers[] = {
>  	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
>  	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
> @@ -303,6 +366,7 @@ static exit_handle_fn arm_exit_handlers[] = {
>  	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
>  	[ESR_ELx_EC_CP10_ID]	= kvm_handle_cp10_id,
>  	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
> +	[ESR_ELx_EC_LS64B]	= handle_ls64b,
>  	[ESR_ELx_EC_HVC32]	= handle_hvc,
>  	[ESR_ELx_EC_SMC32]	= handle_smc,
>  	[ESR_ELx_EC_HVC64]	= handle_hvc,
> -- 
> 2.39.2
>
Fuad Tabba March 4, 2025, 2:36 p.m. UTC | #2
Hi Marc,

On Mon, 10 Feb 2025 at 18:42, Marc Zyngier <maz@kernel.org> wrote:
>
> We generally don't expect FEAT_LS64* instructions to trap, unless
> they are trapped by a guest hypervisor.
>
> Otherwise, this is just the guest playing tricks on us by using
> an instruction that isn't advertised, which we handle with a well
> deserved UNDEF.
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/kvm/handle_exit.c | 64 ++++++++++++++++++++++++++++++++++++
>  1 file changed, 64 insertions(+)
>
> diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
> index 512d152233ff2..4f8354bf7dc5f 100644
> --- a/arch/arm64/kvm/handle_exit.c
> +++ b/arch/arm64/kvm/handle_exit.c
> @@ -294,6 +294,69 @@ static int handle_svc(struct kvm_vcpu *vcpu)
>         return 1;
>  }
>
> +static int handle_ls64b(struct kvm_vcpu *vcpu)
> +{
> +       struct kvm *kvm = vcpu->kvm;
> +       u64 esr = kvm_vcpu_get_esr(vcpu);
> +       u64 iss = ESR_ELx_ISS(esr);
> +       bool allowed;
> +
> +       switch (iss) {
> +       case ESR_ELx_ISS_ST64BV:
> +               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
> +               break;
> +       case ESR_ELx_ISS_ST64BV0:
> +               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
> +               break;
> +       case ESR_ELx_ISS_LDST64B:
> +               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
> +               break;
> +       default:
> +               /* Clearly, we're missing something. */
> +               goto unknown_trap;
> +       }
> +
> +       if (!allowed)
> +               goto undef;
> +
> +       if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
> +               u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
> +               bool fwd;
> +
> +               switch (iss) {
> +               case ESR_ELx_ISS_ST64BV:
> +                       fwd = !(hcrx & HCRX_EL2_EnASR);
> +                       break;
> +               case ESR_ELx_ISS_ST64BV0:
> +                       fwd = !(hcrx & HCRX_EL2_EnAS0);
> +                       break;
> +               case ESR_ELx_ISS_LDST64B:
> +                       fwd = !(hcrx & HCRX_EL2_EnALS);
> +                       break;
> +               default:
> +                       /* We don't expect to be here */
> +                       fwd = false;
> +               }
> +
> +               if (fwd) {
> +                       kvm_inject_nested_sync(vcpu, esr);
> +                       return 1;
> +               }
> +       }
> +
> +unknown_trap:
> +       /*
> +        * If we land here, something must be very wrong, because we
> +        * have no idea why we trapped at all. Warn and undef as a
> +        * fallback.
> +        */
> +       WARN_ON(1);

nit: should this be WARN_ONCE() instead?

> +
> +undef:
> +       kvm_inject_undefined(vcpu);
> +       return 1;
> +}

I'm wondering if this can be simplified by having one switch()
statement that toggles both allowed and fwd (or maybe even only fwd),
and then inject depending on that, e.g.,

+static int handle_ls64b(struct kvm_vcpu *vcpu)
+{
+    struct kvm *kvm = vcpu->kvm;
+    bool is_nv = vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
+    u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
+    u64 esr = kvm_vcpu_get_esr(vcpu);
+    u64 iss = ESR_ELx_ISS(esr);
+    bool fwd = false;
+
+    switch (iss) {
+    case ESR_ELx_ISS_ST64BV:
+         fwd = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V) &&
+                   !(hcrx & HCRX_EL2_EnASR)
+         break;
...
+    default:
+        WARN_ONCE(1);
+ }
+
+    if (is_nv && fwd) {
+        kvm_inject_nested_sync(vcpu, esr);
+    else
+        kvm_inject_undefined(vcpu);
+
+    return 1;
+}

I think this has the same effect as the code above.

Cheers,
/fuad



> +
>  static exit_handle_fn arm_exit_handlers[] = {
>         [0 ... ESR_ELx_EC_MAX]  = kvm_handle_unknown_ec,
>         [ESR_ELx_EC_WFx]        = kvm_handle_wfx,
> @@ -303,6 +366,7 @@ static exit_handle_fn arm_exit_handlers[] = {
>         [ESR_ELx_EC_CP14_LS]    = kvm_handle_cp14_load_store,
>         [ESR_ELx_EC_CP10_ID]    = kvm_handle_cp10_id,
>         [ESR_ELx_EC_CP14_64]    = kvm_handle_cp14_64,
> +       [ESR_ELx_EC_LS64B]      = handle_ls64b,
>         [ESR_ELx_EC_HVC32]      = handle_hvc,
>         [ESR_ELx_EC_SMC32]      = handle_smc,
>         [ESR_ELx_EC_HVC64]      = handle_hvc,
> --
> 2.39.2
>
Marc Zyngier March 4, 2025, 3:25 p.m. UTC | #3
Hi Fuad,

On Tue, 04 Mar 2025 14:36:19 +0000,
Fuad Tabba <tabba@google.com> wrote:
> 
> Hi Marc,
> 
> On Mon, 10 Feb 2025 at 18:42, Marc Zyngier <maz@kernel.org> wrote:
> >
> > We generally don't expect FEAT_LS64* instructions to trap, unless
> > they are trapped by a guest hypervisor.
> >
> > Otherwise, this is just the guest playing tricks on us by using
> > an instruction that isn't advertised, which we handle with a well
> > deserved UNDEF.
> >
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/kvm/handle_exit.c | 64 ++++++++++++++++++++++++++++++++++++
> >  1 file changed, 64 insertions(+)
> >
> > diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
> > index 512d152233ff2..4f8354bf7dc5f 100644
> > --- a/arch/arm64/kvm/handle_exit.c
> > +++ b/arch/arm64/kvm/handle_exit.c
> > @@ -294,6 +294,69 @@ static int handle_svc(struct kvm_vcpu *vcpu)
> >         return 1;
> >  }
> >
> > +static int handle_ls64b(struct kvm_vcpu *vcpu)
> > +{
> > +       struct kvm *kvm = vcpu->kvm;
> > +       u64 esr = kvm_vcpu_get_esr(vcpu);
> > +       u64 iss = ESR_ELx_ISS(esr);
> > +       bool allowed;
> > +
> > +       switch (iss) {
> > +       case ESR_ELx_ISS_ST64BV:
> > +               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
> > +               break;
> > +       case ESR_ELx_ISS_ST64BV0:
> > +               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
> > +               break;
> > +       case ESR_ELx_ISS_LDST64B:
> > +               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
> > +               break;
> > +       default:
> > +               /* Clearly, we're missing something. */
> > +               goto unknown_trap;
> > +       }
> > +
> > +       if (!allowed)
> > +               goto undef;
> > +
> > +       if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
> > +               u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
> > +               bool fwd;
> > +
> > +               switch (iss) {
> > +               case ESR_ELx_ISS_ST64BV:
> > +                       fwd = !(hcrx & HCRX_EL2_EnASR);
> > +                       break;
> > +               case ESR_ELx_ISS_ST64BV0:
> > +                       fwd = !(hcrx & HCRX_EL2_EnAS0);
> > +                       break;
> > +               case ESR_ELx_ISS_LDST64B:
> > +                       fwd = !(hcrx & HCRX_EL2_EnALS);
> > +                       break;
> > +               default:
> > +                       /* We don't expect to be here */
> > +                       fwd = false;
> > +               }
> > +
> > +               if (fwd) {
> > +                       kvm_inject_nested_sync(vcpu, esr);
> > +                       return 1;
> > +               }
> > +       }
> > +
> > +unknown_trap:
> > +       /*
> > +        * If we land here, something must be very wrong, because we
> > +        * have no idea why we trapped at all. Warn and undef as a
> > +        * fallback.
> > +        */
> > +       WARN_ON(1);
> 
> nit: should this be WARN_ONCE() instead?
> 
> > +
> > +undef:
> > +       kvm_inject_undefined(vcpu);
> > +       return 1;
> > +}
> 
> I'm wondering if this can be simplified by having one switch()
> statement that toggles both allowed and fwd (or maybe even only fwd),
> and then inject depending on that, e.g.,
> 
> +static int handle_ls64b(struct kvm_vcpu *vcpu)
> +{
> +    struct kvm *kvm = vcpu->kvm;
> +    bool is_nv = vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
> +    u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
> +    u64 esr = kvm_vcpu_get_esr(vcpu);
> +    u64 iss = ESR_ELx_ISS(esr);
> +    bool fwd = false;
> +
> +    switch (iss) {
> +    case ESR_ELx_ISS_ST64BV:
> +         fwd = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V) &&
> +                   !(hcrx & HCRX_EL2_EnASR)
> +         break;
> ...
> +    default:
> +        WARN_ONCE(1);
> + }
> +
> +    if (is_nv && fwd) {
> +        kvm_inject_nested_sync(vcpu, esr);
> +    else
> +        kvm_inject_undefined(vcpu);
> +
> +    return 1;
> +}
> 
> I think this has the same effect as the code above.

Yeah, I think something like that could work. I initially was trying
to handle all 4 states (allowed, fwd), but obviously some states do
not exist (you don't forward something that isn't allowed).

My only concern here is that we don't have clear rules on the init of
EL2 guest registers such as HCRX_EL2 when !NV, but that's something
that can be easily done (or even worked around locally).

Maybe we should introduce the dreaded notion of "effective value" for
EL2 registers when NV is not enabled...

Thanks,

	M.
Marc Zyngier March 4, 2025, 3:47 p.m. UTC | #4
On Tue, 04 Mar 2025 14:36:19 +0000,
Fuad Tabba <tabba@google.com> wrote:
> 
> Hi Marc,
> 
> On Mon, 10 Feb 2025 at 18:42, Marc Zyngier <maz@kernel.org> wrote:
> >
> > We generally don't expect FEAT_LS64* instructions to trap, unless
> > they are trapped by a guest hypervisor.
> >
> > Otherwise, this is just the guest playing tricks on us by using
> > an instruction that isn't advertised, which we handle with a well
> > deserved UNDEF.
> >
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/kvm/handle_exit.c | 64 ++++++++++++++++++++++++++++++++++++
> >  1 file changed, 64 insertions(+)
> >
> > diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
> > index 512d152233ff2..4f8354bf7dc5f 100644
> > --- a/arch/arm64/kvm/handle_exit.c
> > +++ b/arch/arm64/kvm/handle_exit.c
> > @@ -294,6 +294,69 @@ static int handle_svc(struct kvm_vcpu *vcpu)
> >         return 1;
> >  }
> >
> > +static int handle_ls64b(struct kvm_vcpu *vcpu)
> > +{
> > +       struct kvm *kvm = vcpu->kvm;
> > +       u64 esr = kvm_vcpu_get_esr(vcpu);
> > +       u64 iss = ESR_ELx_ISS(esr);
> > +       bool allowed;
> > +
> > +       switch (iss) {
> > +       case ESR_ELx_ISS_ST64BV:
> > +               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
> > +               break;
> > +       case ESR_ELx_ISS_ST64BV0:
> > +               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
> > +               break;
> > +       case ESR_ELx_ISS_LDST64B:
> > +               allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
> > +               break;
> > +       default:
> > +               /* Clearly, we're missing something. */
> > +               goto unknown_trap;
> > +       }
> > +
> > +       if (!allowed)
> > +               goto undef;
> > +
> > +       if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
> > +               u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
> > +               bool fwd;
> > +
> > +               switch (iss) {
> > +               case ESR_ELx_ISS_ST64BV:
> > +                       fwd = !(hcrx & HCRX_EL2_EnASR);
> > +                       break;
> > +               case ESR_ELx_ISS_ST64BV0:
> > +                       fwd = !(hcrx & HCRX_EL2_EnAS0);
> > +                       break;
> > +               case ESR_ELx_ISS_LDST64B:
> > +                       fwd = !(hcrx & HCRX_EL2_EnALS);
> > +                       break;
> > +               default:
> > +                       /* We don't expect to be here */
> > +                       fwd = false;
> > +               }
> > +
> > +               if (fwd) {
> > +                       kvm_inject_nested_sync(vcpu, esr);
> > +                       return 1;
> > +               }
> > +       }
> > +
> > +unknown_trap:
> > +       /*
> > +        * If we land here, something must be very wrong, because we
> > +        * have no idea why we trapped at all. Warn and undef as a
> > +        * fallback.
> > +        */
> > +       WARN_ON(1);
> 
> nit: should this be WARN_ONCE() instead?
> 
> > +
> > +undef:
> > +       kvm_inject_undefined(vcpu);
> > +       return 1;
> > +}
> 
> I'm wondering if this can be simplified by having one switch()
> statement that toggles both allowed and fwd (or maybe even only fwd),
> and then inject depending on that, e.g.,
> 
> +static int handle_ls64b(struct kvm_vcpu *vcpu)
> +{
> +    struct kvm *kvm = vcpu->kvm;
> +    bool is_nv = vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
> +    u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
> +    u64 esr = kvm_vcpu_get_esr(vcpu);
> +    u64 iss = ESR_ELx_ISS(esr);
> +    bool fwd = false;
> +
> +    switch (iss) {
> +    case ESR_ELx_ISS_ST64BV:
> +         fwd = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V) &&
> +                   !(hcrx & HCRX_EL2_EnASR)

Ah, I know what I dislike about this approach: If your L1 guest runs
at EL2, HCRX_EL2 doesn't apply (it is only for an L2 guest). Yet we
evaluate it.

I think this still works because you shouldn't have HCRX_EL2.EnASR
clear on the host if LS64V is advertised to the guest, but it feels a
bit fragile.

I'll have another think at refactoring that code.

Thanks,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 512d152233ff2..4f8354bf7dc5f 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -294,6 +294,69 @@  static int handle_svc(struct kvm_vcpu *vcpu)
 	return 1;
 }
 
+static int handle_ls64b(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+	u64 esr = kvm_vcpu_get_esr(vcpu);
+	u64 iss = ESR_ELx_ISS(esr);
+	bool allowed;
+
+	switch (iss) {
+	case ESR_ELx_ISS_ST64BV:
+		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
+		break;
+	case ESR_ELx_ISS_ST64BV0:
+		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
+		break;
+	case ESR_ELx_ISS_LDST64B:
+		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
+		break;
+	default:
+		/* Clearly, we're missing something. */
+		goto unknown_trap;
+	}
+
+	if (!allowed)
+		goto undef;
+
+	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+		u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
+		bool fwd;
+
+		switch (iss) {
+		case ESR_ELx_ISS_ST64BV:
+			fwd = !(hcrx & HCRX_EL2_EnASR);
+			break;
+		case ESR_ELx_ISS_ST64BV0:
+			fwd = !(hcrx & HCRX_EL2_EnAS0);
+			break;
+		case ESR_ELx_ISS_LDST64B:
+			fwd = !(hcrx & HCRX_EL2_EnALS);
+			break;
+		default:
+			/* We don't expect to be here */
+			fwd = false;
+		}
+
+		if (fwd) {
+			kvm_inject_nested_sync(vcpu, esr);
+			return 1;
+		}
+	}
+
+unknown_trap:
+	/*
+	 * If we land here, something must be very wrong, because we
+	 * have no idea why we trapped at all. Warn and undef as a
+	 * fallback.
+	 */
+	WARN_ON(1);
+
+undef:
+	kvm_inject_undefined(vcpu);
+	return 1;
+}
+
 static exit_handle_fn arm_exit_handlers[] = {
 	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
 	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
@@ -303,6 +366,7 @@  static exit_handle_fn arm_exit_handlers[] = {
 	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
 	[ESR_ELx_EC_CP10_ID]	= kvm_handle_cp10_id,
 	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
+	[ESR_ELx_EC_LS64B]	= handle_ls64b,
 	[ESR_ELx_EC_HVC32]	= handle_hvc,
 	[ESR_ELx_EC_SMC32]	= handle_smc,
 	[ESR_ELx_EC_HVC64]	= handle_hvc,