@@ -107,6 +107,22 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
return (unsigned long *)&vcpu->arch.hcr_el2;
}
+static inline void vcpu_reset_fgt(struct kvm_vcpu *vcpu)
+{
+ if (!cpus_have_const_cap(ARM64_HAS_FGT))
+ return;
+
+ /*
+ * Enable traps for the guest by default:
+ *
+ * ACCDATA_EL1, GCSPR_EL0, GCSCRE0_EL1, GCSPR_EL1, GCSCR_EL1,
+ * SMPRI_EL1, TPIDR2_EL0, RCWMASK_EL1, PIRE0_EL1, PIR_EL1,
+ * POR_EL0, POR_EL1, S2POR_EL1, MAIR2_EL1, and AMAIR_EL1,
+ */
+ vcpu->arch.ctxt.hfgrtr_el2 = 0;
+ vcpu->arch.ctxt.hfgwtr_el2 = 0;
+}
+
static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 &= ~HCR_TWE;
@@ -391,6 +391,9 @@ struct kvm_cpu_context {
u64 spsr_irq;
u64 spsr_fiq;
+ u64 hfgrtr_el2;
+ u64 hfgwtr_el2;
+
struct user_fpsimd_state fp_regs;
u64 sys_regs[NR_SYS_REGS];
@@ -1240,6 +1240,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
}
vcpu_reset_hcr(vcpu);
+ vcpu_reset_fgt(vcpu);
vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
/*
@@ -72,6 +72,8 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
{
+ struct kvm_cpu_context *host_ctxt;
+
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
@@ -89,33 +91,36 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
- if (cpus_have_final_cap(ARM64_SME)) {
- sysreg_clear_set_s(SYS_HFGRTR_EL2,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK,
- 0);
- sysreg_clear_set_s(SYS_HFGWTR_EL2,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK,
- 0);
+ if (cpus_have_final_cap(ARM64_HAS_FGT)) {
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+
+ host_ctxt->hfgrtr_el2 = read_sysreg_s(SYS_HFGRTR_EL2);
+ host_ctxt->hfgrtr_el2 = read_sysreg_s(SYS_HFGRTR_EL2);
+
+ write_sysreg_s(vcpu->arch.ctxt.hfgrtr_el2, SYS_HFGRTR_EL2);
+ write_sysreg_s(vcpu->arch.ctxt.hfgwtr_el2, SYS_HFGWTR_EL2);
}
}
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{
+ struct kvm_cpu_context *host_ctxt;
+
write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2);
write_sysreg(0, hstr_el2);
if (kvm_arm_support_pmu_v3())
write_sysreg(0, pmuserenr_el0);
- if (cpus_have_final_cap(ARM64_SME)) {
- sysreg_clear_set_s(SYS_HFGRTR_EL2, 0,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK);
- sysreg_clear_set_s(SYS_HFGWTR_EL2, 0,
- HFGxTR_EL2_nSMPRI_EL1_MASK |
- HFGxTR_EL2_nTPIDR2_EL0_MASK);
+ /*
+ * Restore the host FGT configuration here since it's managing
+ * traps.
+ */
+ if (cpus_have_final_cap(ARM64_HAS_FGT)) {
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+
+ write_sysreg_s(host_ctxt->hfgrtr_el2, SYS_HFGRTR_EL2);
+ write_sysreg_s(host_ctxt->hfgwtr_el2, SYS_HFGWTR_EL2);
}
}
Currently the only fine grained traps we use are the SME ones and we decide if we want to manage fine grained traps for the guest and which to enable based on the presence of that feature. In order to support SME, PIE and other features where we need fine grained traps we will need to select per guest which traps are enabled. Move to storing the traps to enable in the vCPU data, updating the registers if fine grained traps are supported and any are enabled. In order to ensure that the fine grained traps are restored along with other traps there is a bit of asymmetry with where the registers are restored on guest exit. Currently we always set this register to 0 when running the guest so unconditionally use that value for guests, future patches will configure this. No functional change, though we will do additional saves of the guest FGT register configurations and will save and restore even if the host and guest states are identical. Signed-off-by: Mark Brown <broonie@kernel.org> --- arch/arm64/include/asm/kvm_emulate.h | 16 ++++++++++++++ arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/hyp/include/hyp/switch.h | 37 +++++++++++++++++++-------------- 4 files changed, 41 insertions(+), 16 deletions(-)