@@ -534,6 +534,10 @@ static void __init calculate_hvm_max_policy(void)
raw_cpuid_policy.basic.sep )
__set_bit(X86_FEATURE_SEP, hvm_featureset);
+ if ( !boot_cpu_has(X86_FEATURE_VIRT_SC_MSR_HVM) )
+ /* Clear VIRT_SSBD if VIRT_SPEC_CTRL is not exposed to guests. */
+ __clear_bit(X86_FEATURE_VIRT_SSBD, hvm_featureset);
+
/*
* If Xen isn't virtualising MSR_SPEC_CTRL for HVM guests (functional
* availability, or admin choice), hide the feature.
@@ -590,6 +594,13 @@ static void __init calculate_hvm_def_policy(void)
guest_common_feature_adjustments(hvm_featureset);
guest_common_default_feature_adjustments(hvm_featureset);
+ /*
+ * AMD_SSBD if preferred over VIRT_SSBD, so don't expose the later by
+ * default if the former is available.
+ */
+ if ( boot_cpu_has(X86_FEATURE_AMD_SSBD) )
+ __clear_bit(X86_FEATURE_VIRT_SSBD, hvm_featureset);
+
sanitise_featureset(hvm_featureset);
cpuid_featureset_to_policy(hvm_featureset, p);
recalculate_xstate(p);
@@ -57,6 +57,9 @@ __UNLIKELY_END(nsvm_hap)
clgi
+ ALTERNATIVE "", STR(call vmentry_virt_spec_ctrl), \
+ X86_FEATURE_VIRT_SC_MSR_HVM
+
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
/* SPEC_CTRL_EXIT_TO_SVM Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
.macro svm_vmentry_spec_ctrl
@@ -114,6 +117,9 @@ __UNLIKELY_END(nsvm_hap)
ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+ ALTERNATIVE "", STR(call vmexit_virt_spec_ctrl), \
+ X86_FEATURE_VIRT_SC_MSR_HVM
+
stgi
GLOBAL(svm_stgi_label)
mov %rsp,%rdi
@@ -52,6 +52,7 @@
#include <asm/hvm/svm/svmdebug.h>
#include <asm/hvm/svm/nestedsvm.h>
#include <asm/hvm/nestedhvm.h>
+#include <asm/spec_ctrl.h>
#include <asm/x86_emulate.h>
#include <public/sched.h>
#include <asm/hvm/vpt.h>
@@ -610,6 +611,15 @@ static void cf_check svm_cpuid_policy_changed(struct vcpu *v)
svm_intercept_msr(v, MSR_SPEC_CTRL,
cp->extd.ibrs ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
+ /*
+ * Give access to MSR_VIRT_SPEC_CTRL if the guest has been told about it
+ * and the hardware implements it.
+ */
+ svm_intercept_msr(v, MSR_VIRT_SPEC_CTRL,
+ cp->extd.virt_ssbd && cpu_has_virt_ssbd &&
+ !cpu_has_amd_ssbd ?
+ MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
+
/* Give access to MSR_PRED_CMD if the guest has been told about it. */
svm_intercept_msr(v, MSR_PRED_CMD,
cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
@@ -3105,6 +3115,36 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
vmcb_set_vintr(vmcb, intr);
}
+/* Called with GIF=0. */
+void vmexit_virt_spec_ctrl(void)
+{
+ unsigned int val = opt_ssbd ? SPEC_CTRL_SSBD : 0;
+
+ if ( cpu_has_virt_ssbd )
+ {
+ unsigned int lo, hi;
+
+ /*
+ * Need to read from the hardware because VIRT_SPEC_CTRL is not context
+ * switched by the hardware, and we allow the guest untrapped access to
+ * the register.
+ */
+ rdmsr(MSR_VIRT_SPEC_CTRL, lo, hi);
+ if ( val != lo )
+ wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
+ current->arch.msrs->virt_spec_ctrl.raw = lo;
+ }
+}
+
+/* Called with GIF=0. */
+void vmentry_virt_spec_ctrl(void)
+{
+ unsigned int val = current->arch.msrs->virt_spec_ctrl.raw;
+
+ if ( val != (opt_ssbd ? SPEC_CTRL_SSBD : 0) )
+ wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
+}
+
/*
* Local variables:
* mode: C
@@ -40,6 +40,7 @@ XEN_CPUFEATURE(SC_VERW_HVM, X86_SYNTH(24)) /* VERW used by Xen for HVM */
XEN_CPUFEATURE(SC_VERW_IDLE, X86_SYNTH(25)) /* VERW used by Xen for idle */
XEN_CPUFEATURE(XEN_SHSTK, X86_SYNTH(26)) /* Xen uses CET Shadow Stacks */
XEN_CPUFEATURE(XEN_IBT, X86_SYNTH(27)) /* Xen uses CET Indirect Branch Tracking */
+XEN_CPUFEATURE(VIRT_SC_MSR_HVM, X86_SYNTH(28)) /* MSR_VIRT_SPEC_CTRL exposed to HVM */
/* Bug words follow the synthetic words. */
#define X86_NR_BUG 1
@@ -375,6 +375,16 @@ struct vcpu_msrs
*/
uint32_t tsc_aux;
+ /*
+ * 0xc001011f - MSR_VIRT_SPEC_CTRL (if !X86_FEATURE_AMD_SSBD)
+ *
+ * AMD only. Guest selected value, saved and restored on guest VM
+ * entry/exit.
+ */
+ struct {
+ uint32_t raw;
+ } virt_spec_ctrl;
+
/*
* 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK
*
@@ -385,7 +385,10 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
if ( !cp->extd.virt_ssbd )
goto gp_fault;
- *val = msrs->spec_ctrl.raw & SPEC_CTRL_SSBD;
+ if ( cpu_has_amd_ssbd )
+ *val = msrs->spec_ctrl.raw & SPEC_CTRL_SSBD;
+ else
+ *val = msrs->virt_spec_ctrl.raw;
break;
case MSR_AMD64_DE_CFG:
@@ -678,10 +681,15 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
goto gp_fault;
/* Only supports SSBD bit, the rest are ignored. */
- if ( val & SPEC_CTRL_SSBD )
- msrs->spec_ctrl.raw |= SPEC_CTRL_SSBD;
+ if ( cpu_has_amd_ssbd )
+ {
+ if ( val & SPEC_CTRL_SSBD )
+ msrs->spec_ctrl.raw |= SPEC_CTRL_SSBD;
+ else
+ msrs->spec_ctrl.raw &= ~SPEC_CTRL_SSBD;
+ }
else
- msrs->spec_ctrl.raw &= ~SPEC_CTRL_SSBD;
+ msrs->virt_spec_ctrl.raw = val & SPEC_CTRL_SSBD;
break;
case MSR_AMD64_DE_CFG:
@@ -406,9 +406,12 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
(boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ||
boot_cpu_has(X86_FEATURE_MD_CLEAR) ||
+ boot_cpu_has(X86_FEATURE_VIRT_SC_MSR_HVM) ||
opt_eager_fpu) ? "" : " None",
boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ? " MSR_SPEC_CTRL" : "",
- boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ? " MSR_VIRT_SPEC_CTRL" : "",
+ (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
+ boot_cpu_has(X86_FEATURE_VIRT_SC_MSR_HVM)) ? " MSR_VIRT_SPEC_CTRL"
+ : "",
boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB" : "",
opt_eager_fpu ? " EAGER_FPU" : "",
boot_cpu_has(X86_FEATURE_MD_CLEAR) ? " MD_CLEAR" : "");
@@ -1069,6 +1072,10 @@ void __init init_speculation_mitigations(void)
setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM);
}
+ /* Support VIRT_SPEC_CTRL.SSBD if AMD_SSBD is not available. */
+ if ( opt_msr_sc_hvm && !cpu_has_amd_ssbd && cpu_has_virt_ssbd )
+ setup_force_cpu_cap(X86_FEATURE_VIRT_SC_MSR_HVM);
+
/* If we have IBRS available, see whether we should use it. */
if ( has_spec_ctrl && ibrs )
default_xen_spec_ctrl |= SPEC_CTRL_IBRS;
@@ -265,7 +265,7 @@ XEN_CPUFEATURE(IBRS_SAME_MODE, 8*32+19) /*S IBRS provides same-mode protection
XEN_CPUFEATURE(NO_LMSL, 8*32+20) /*S EFER.LMSLE no longer supported. */
XEN_CPUFEATURE(AMD_PPIN, 8*32+23) /* Protected Processor Inventory Number */
XEN_CPUFEATURE(AMD_SSBD, 8*32+24) /*S MSR_SPEC_CTRL.SSBD available */
-XEN_CPUFEATURE(VIRT_SSBD, 8*32+25) /*!s MSR_VIRT_SPEC_CTRL.SSBD */
+XEN_CPUFEATURE(VIRT_SSBD, 8*32+25) /*!S MSR_VIRT_SPEC_CTRL.SSBD */
XEN_CPUFEATURE(SSB_NO, 8*32+26) /*A Hardware not vulnerable to SSB */
XEN_CPUFEATURE(PSFD, 8*32+28) /*S MSR_SPEC_CTRL.PSFD */
Allow HVM guests untrapped access to MSR_VIRT_SPEC_CTRL if the hardware has support for it. This requires adding logic in the vm{entry,exit} paths for SVM in order to context switch between the hypervisor value and the guest one. The added handlers for context switch will also be used for the legacy SSBD support. Introduce a new synthetic feature leaf (X86_FEATURE_VIRT_SC_MSR_HVM) to signal whether VIRT_SPEC_CTRL needs to be handled on guest vm{entry,exit}. This patch changes the annotation 's' to 'S' because it introduces support to expose VIRT_SSBD to guests by default when the host (virtual) hardware also supports it. Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com> Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- Changes since v2: - Reword part of the commit message regarding annotation change. - Fix MSR intercept. - Add handling of VIRT_SPEC_CTRL to guest_{rd,wr}msr when using VIRT_SSBD also. Changes since v1: - Introduce virt_spec_ctrl vCPU field. - Context switch VIRT_SPEC_CTRL on vmentry/vmexit separately from SPEC_CTRL. --- xen/arch/x86/cpuid.c | 11 ++++++ xen/arch/x86/hvm/svm/entry.S | 6 ++++ xen/arch/x86/hvm/svm/svm.c | 40 +++++++++++++++++++++ xen/arch/x86/include/asm/cpufeatures.h | 1 + xen/arch/x86/include/asm/msr.h | 10 ++++++ xen/arch/x86/msr.c | 16 ++++++--- xen/arch/x86/spec_ctrl.c | 9 ++++- xen/include/public/arch-x86/cpufeatureset.h | 2 +- 8 files changed, 89 insertions(+), 6 deletions(-)