@@ -20,6 +20,7 @@
#define HCR_AMVOFFEN (UL(1) << 51)
#define HCR_FIEN (UL(1) << 47)
#define HCR_FWB (UL(1) << 46)
+#define HCR_NV (UL(1) << 42)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
#define HCR_TEA (UL(1) << 37)
@@ -59,4 +59,7 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
return ttbr0 & ~GENMASK_ULL(63, 48);
}
+extern bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit);
+extern bool forward_nv_traps(struct kvm_vcpu *vcpu);
+
#endif /* __ARM64_KVM_NESTED_H */
@@ -14,6 +14,26 @@
#include "trace.h"
+bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit)
+{
+ bool control_bit_set;
+
+ if (!vcpu_has_nv(vcpu))
+ return false;
+
+ control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit;
+ if (!vcpu_is_el2(vcpu) && control_bit_set) {
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+ return true;
+ }
+ return false;
+}
+
+bool forward_nv_traps(struct kvm_vcpu *vcpu)
+{
+ return forward_traps(vcpu, HCR_NV);
+}
+
static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
{
u64 mode = spsr & PSR_MODE_MASK;
@@ -52,6 +72,13 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
u64 spsr, elr, mode;
bool direct_eret;
+ /*
+ * Forward this trap to the virtual EL2 if the virtual
+ * HCR_EL2.NV bit is set and this is coming from !EL2.
+ */
+ if (forward_nv_traps(vcpu))
+ return;
+
/*
* Going through the whole put/load motions is a waste of time
* if this is a VHE guest hypervisor returning to its own
@@ -65,6 +65,13 @@ static int handle_smc(struct kvm_vcpu *vcpu)
{
int ret;
+ /*
+ * Forward this trapped smc instruction to the virtual EL2 if
+ * the guest has asked for it.
+ */
+ if (forward_traps(vcpu, HCR_TSC))
+ return 1;
+
/*
* "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
@@ -336,10 +336,19 @@ static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
return 0;
}
+static bool el12_reg(struct sys_reg_params *p)
+{
+ /* All *_EL12 registers have Op1=5. */
+ return (p->Op1 == 5);
+}
+
static bool access_rw(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ if (el12_reg(p) && forward_nv_traps(vcpu))
+ return false;
+
if (p->is_write)
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
else
@@ -408,6 +417,9 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
bool was_enabled = vcpu_has_cache_enabled(vcpu);
u64 val, mask, shift;
+ if (el12_reg(p) && forward_nv_traps(vcpu))
+ return false;
+
/* We don't expect TRVM on the host */
BUG_ON(!vcpu_is_el2(vcpu) && !p->is_write);
@@ -1869,6 +1881,9 @@ static bool access_elr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ if (el12_reg(p) && forward_nv_traps(vcpu))
+ return false;
+
if (p->is_write)
vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
else
@@ -1881,6 +1896,9 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ if (el12_reg(p) && forward_nv_traps(vcpu))
+ return false;
+
if (p->is_write)
__vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
else
@@ -1893,6 +1911,9 @@ static bool access_spsr_el2(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ if (el12_reg(p) && forward_nv_traps(vcpu))
+ return false;
+
if (p->is_write)
vcpu_write_sys_reg(vcpu, p->regval, SPSR_EL2);
else