@@ -127,9 +127,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
static void enter_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
enum exception_type type)
{
+ u64 spsr = *vcpu_cpsr(vcpu);
+
trace_kvm_inject_nested_exception(vcpu, esr_el2, type);
- vcpu_write_sys_reg(vcpu, *vcpu_cpsr(vcpu), SPSR_EL2);
+ vcpu_write_sys_reg(vcpu, spsr, SPSR_EL2);
vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL2);
vcpu_write_sys_reg(vcpu, esr_el2, ESR_EL2);
@@ -137,6 +139,15 @@ static void enter_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
/* On an exception, PSTATE.SP becomes 1 */
*vcpu_cpsr(vcpu) = PSR_MODE_EL2h;
*vcpu_cpsr(vcpu) |= PSR_A_BIT | PSR_F_BIT | PSR_I_BIT | PSR_D_BIT;
+
+ /*
+ * If SPAN is clear, set the PAN bit on exception entry
+ * if SPAN is set, copy the PAN bit across
+ */
+ if (!(vcpu_read_sys_reg(vcpu, SCTLR_EL2) & SCTLR_EL1_SPAN))
+ *vcpu_cpsr(vcpu) |= PSR_PAN_BIT;
+ else
+ *vcpu_cpsr(vcpu) |= (spsr & PSR_PAN_BIT);
}
/*
On entering vEL2, we must honor the SCTLR_EL2.SPAN bit so that PSTATE.PAN reflect the expected setting. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> --- arch/arm64/kvm/emulate-nested.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-)