@@ -60,6 +60,7 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
return ttbr0 & ~GENMASK_ULL(63, 48);
}
+extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
int kvm_init_nv_sysregs(struct kvm *kvm);
@@ -2117,6 +2117,26 @@ bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
return true;
}
+static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit)
+{
+ bool control_bit_set;
+
+ if (!vcpu_has_nv(vcpu))
+ return false;
+
+ control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit;
+ if (!is_hyp_ctxt(vcpu) && control_bit_set) {
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+ return true;
+ }
+ return false;
+}
+
+bool forward_smc_trap(struct kvm_vcpu *vcpu)
+{
+ return forward_traps(vcpu, HCR_TSC);
+}
+
static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
{
u64 mode = spsr & PSR_MODE_MASK;
@@ -2155,6 +2175,13 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
u64 spsr, elr, mode;
bool direct_eret;
+ /*
+ * Forward this trap to the virtual EL2 if the virtual
+ * HCR_EL2.NV bit is set and this is coming from !EL2.
+ */
+ if (forward_traps(vcpu, HCR_NV))
+ return;
+
/*
* Going through the whole put/load motions is a waste of time
* if this is a VHE guest hypervisor returning to its own
@@ -55,6 +55,13 @@ static int handle_hvc(struct kvm_vcpu *vcpu)
static int handle_smc(struct kvm_vcpu *vcpu)
{
+ /*
+ * Forward this trapped smc instruction to the virtual EL2 if
+ * the guest has asked for it.
+ */
+ if (forward_smc_trap(vcpu))
+ return 1;
+
/*
* "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a