@@ -60,6 +60,7 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
return ttbr0 & ~GENMASK_ULL(63, 48);
}
+extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
int kvm_init_nv_sysregs(struct kvm *kvm);
@@ -498,6 +498,7 @@
#define SYS_TCR_EL2 sys_reg(3, 4, 2, 0, 2)
#define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0)
#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2)
+#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0)
@@ -1933,6 +1933,26 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
return true;
}
+static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit)
+{
+ bool control_bit_set;
+
+ if (!vcpu_has_nv(vcpu))
+ return false;
+
+ control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit;
+ if (!vcpu_is_el2(vcpu) && control_bit_set) {
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+ return true;
+ }
+ return false;
+}
+
+bool forward_smc_trap(struct kvm_vcpu *vcpu)
+{
+ return forward_traps(vcpu, HCR_TSC);
+}
+
static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
{
u64 mode = spsr & PSR_MODE_MASK;
@@ -1971,6 +1991,13 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
u64 spsr, elr, mode;
bool direct_eret;
+ /*
+ * Forward this trap to the virtual EL2 if the virtual
+ * HCR_EL2.NV bit is set and this is coming from !EL2.
+ */
+ if (forward_traps(vcpu, HCR_NV))
+ return;
+
/*
* Going through the whole put/load motions is a waste of time
* if this is a VHE guest hypervisor returning to its own
@@ -55,6 +55,13 @@ static int handle_hvc(struct kvm_vcpu *vcpu)
static int handle_smc(struct kvm_vcpu *vcpu)
{
+ /*
+ * Forward this trapped smc instruction to the virtual EL2 if
+ * the guest has asked for it.
+ */
+ if (forward_smc_trap(vcpu))
+ return 1;
+
/*
* "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
@@ -234,10 +234,8 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
__deactivate_traps_hfgxtr(vcpu);
}
-static inline void ___activate_traps(struct kvm_vcpu *vcpu)
+static inline void ___activate_traps(struct kvm_vcpu *vcpu, u64 hcr)
{
- u64 hcr = vcpu->arch.hcr_el2;
-
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
hcr |= HCR_TVM;
@@ -40,7 +40,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
- ___activate_traps(vcpu);
+ ___activate_traps(vcpu, vcpu->arch.hcr_el2);
__activate_traps_common(vcpu);
val = vcpu->arch.cptr_el2;
@@ -33,11 +33,62 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+/*
+ * Common set of HCR_EL2 bits that we do not want to set while running
+ * a NV guest, irrespective of the context the guest is in.
+ */
+#define __HCR_GUEST_NV_FILTER \
+ (HCR_TGE | HCR_ATA | HCR_API | HCR_APK | HCR_FIEN | \
+ HCR_BSU | HCR_NV | HCR_NV1 | HCR_NV2)
+
+/*
+ * Running as a host? Drop HCR_EL2 setting that should not affect the
+ * execution of EL2, or that are guaranteed to be enforced by the L0
+ * host anyway. For the time being, we don't allow anything. At all.
+ */
+#define HCR_GUEST_NV_INHOST_FILTER (~(0U) | __HCR_GUEST_NV_FILTER)
+
+/*
+ * Running as a guest? Drop HCR_EL2 setting that should not affect the
+ * execution of EL0/EL1, or that are guaranteed to be enforced by the
+ * L0 host anyway.
+ */
+#define HCR_GUEST_NV_INGUEST_FILTER __HCR_GUEST_NV_FILTER
+
+static u64 __compute_hcr(struct kvm_vcpu *vcpu)
+{
+ u64 hcr, vhcr_el2, mask;
+
+ hcr = vcpu->arch.hcr_el2;
+
+ if (!vcpu_has_nv(vcpu))
+ return hcr;
+
+ vhcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2);
+
+ if (is_hyp_ctxt(vcpu)) {
+ hcr |= HCR_NV | HCR_NV2 | HCR_AT | HCR_TTLB;
+
+ if (!vcpu_el2_e2h_is_set(vcpu))
+ hcr |= HCR_NV1;
+
+ mask = HCR_GUEST_NV_INHOST_FILTER;
+
+ write_sysreg_s(vcpu->arch.ctxt.vncr_array, SYS_VNCR_EL2);
+ } else {
+ mask = HCR_GUEST_NV_INGUEST_FILTER;
+ }
+
+ hcr |= vhcr_el2 & ~mask;
+
+ return hcr;
+}
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
- ___activate_traps(vcpu);
+ ___activate_traps(vcpu, __compute_hcr(vcpu));
if (has_cntpoff()) {
struct timer_map map;
Add the HCR_EL2 configuration for FEAT_NV2: - when running a guest hypervisor, completely ignore the guest's HCR_EL2 (although this will eventually be relaxed as we improve the NV support) - when running a L2 guest, fold in a limited number of the L1 hypervisor's HCR_EL2 properties Non-NV guests are completely unaffected by this. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/kvm_nested.h | 1 + arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kvm/emulate-nested.c | 27 +++++++++++++ arch/arm64/kvm/handle_exit.c | 7 ++++ arch/arm64/kvm/hyp/include/hyp/switch.h | 4 +- arch/arm64/kvm/hyp/nvhe/switch.c | 2 +- arch/arm64/kvm/hyp/vhe/switch.c | 53 ++++++++++++++++++++++++- 7 files changed, 90 insertions(+), 5 deletions(-)