@@ -2172,7 +2172,7 @@ static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
{
- u64 spsr, elr;
+ u64 spsr, elr, esr;
/*
* Forward this trap to the virtual EL2 if the virtual
@@ -2181,12 +2181,30 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
if (forward_traps(vcpu, HCR_NV))
return;
+ /* Check for an ERETAx */
+ esr = kvm_vcpu_get_esr(vcpu);
+ if ((esr & ESR_ELx_ERET_ISS_ERETA) && !kvm_auth_eretax(vcpu, &elr)) {
+ /*
+ * Oh no, ERETAx failed to authenticate. If we have
+ * FPACCOMBINE, deliver an exception right away. If we
+ * don't, then let the mangled ELR value trickle down the
+ * ERET handling, and the guest will have a little surprise.
+ */
+ if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) {
+ esr &= ESR_ELx_ERET_ISS_ERETAB;
+ esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC);
+ kvm_inject_nested_sync(vcpu, esr);
+ return;
+ }
+ }
+
preempt_disable();
kvm_arch_vcpu_put(vcpu);
spsr = __vcpu_sys_reg(vcpu, SPSR_EL2);
spsr = kvm_check_illegal_exception_return(vcpu, spsr);
- elr = __vcpu_sys_reg(vcpu, ELR_EL2);
+ if (!(esr & ESR_ELx_ERET_ISS_ERETA))
+ elr = __vcpu_sys_reg(vcpu, ELR_EL2);
trace_kvm_nested_eret(vcpu, elr, spsr);
@@ -248,7 +248,8 @@ static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
static int kvm_handle_eret(struct kvm_vcpu *vcpu)
{
- if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERETA)
+ if ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERETA) &&
+ !vcpu_has_ptrauth(vcpu))
return kvm_handle_ptrauth(vcpu);
/*
@@ -207,7 +207,8 @@ void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
{
- u64 spsr, mode;
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+ u64 spsr, elr, mode;
/*
* Going through the whole put/load motions is a waste of time
@@ -241,10 +242,18 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
+ /* If ERETAx fails, take the slow path */
+ if (esr & ESR_ELx_ERET_ISS_ERETA) {
+ if (!(vcpu_has_ptrauth(vcpu) && kvm_auth_eretax(vcpu, &elr)))
+ return false;
+ } else {
+ elr = read_sysreg_el1(SYS_ELR);
+ }
+
spsr = (spsr & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode;
write_sysreg_el2(spsr, SYS_SPSR);
- write_sysreg_el2(read_sysreg_el1(SYS_ELR), SYS_ELR);
+ write_sysreg_el2(elr, SYS_ELR);
return true;
}
Now that we have some emulation in place for ERETA[AB], we can plug it into the exception handling machinery. As for a bare ERET, an "easy" ERETAx instruction is processed as a fixup, while something that requires a translation regime transition or an exception delivery is left to the slow path. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/kvm/emulate-nested.c | 22 ++++++++++++++++++++-- arch/arm64/kvm/handle_exit.c | 3 ++- arch/arm64/kvm/hyp/vhe/switch.c | 13 +++++++++++-- 3 files changed, 33 insertions(+), 5 deletions(-)