diff mbox series

[2/3] amd/msr: allow passthrough of VIRT_SPEC_CTRL for HVM guests

Message ID 20220201164651.6369-3-roger.pau@citrix.com (mailing list archive)
State Superseded
Headers show
Series amd/msr: implement MSR_VIRT_SPEC_CTRL for HVM guests | expand

Commit Message

Roger Pau Monné Feb. 1, 2022, 4:46 p.m. UTC
Allow HVM guests untrapped access to MSR_VIRT_SPEC_CTRL if the
hardware has support for it. This requires adding logic in the
vm{entry,exit} paths for SVM in order to context switch between the
hypervisor value and the guest one. The added handlers for context
switch will also be used for the legacy SSBD support.

Note that the implementation relies on storing the guest value in the
spec_ctrl MSR per-vCPU variable, as the usage of VIRT_SPEC_CTRL
precludes the usage of SPEC_CTRL. Also store the current and
hypervisor states of VIRT_SPEC_CTRL in the per-pCPU spec_ctrl fields
at cpu_info in order to properly context switch the values between
Xen and HVM guests.

Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
 xen/arch/x86/cpu/amd.c                 |  7 +++-
 xen/arch/x86/cpuid.c                   | 11 ++++++
 xen/arch/x86/hvm/svm/entry.S           |  8 +++-
 xen/arch/x86/hvm/svm/svm.c             | 55 ++++++++++++++++++++++++++
 xen/arch/x86/include/asm/cpufeatures.h |  1 +
 xen/arch/x86/spec_ctrl.c               |  8 +++-
 6 files changed, 86 insertions(+), 4 deletions(-)

Comments

Jan Beulich Feb. 14, 2022, 4:02 p.m. UTC | #1
On 01.02.2022 17:46, Roger Pau Monne wrote:
> Allow HVM guests untrapped access to MSR_VIRT_SPEC_CTRL if the
> hardware has support for it. This requires adding logic in the
> vm{entry,exit} paths for SVM in order to context switch between the
> hypervisor value and the guest one. The added handlers for context
> switch will also be used for the legacy SSBD support.

So by "hardware" you mean virtual hardware here, when we run
virtualized ourselves? While the wording in AMD's whitepaper suggests
hardware could exist with both MSRs implemented, so far it was my
understanding that VIRT_SPEC_CTRL was rather left for hypervisors to
implement. Maybe I'm wrong with this, in which case some of the
further comments may also be wrong.

> --- a/xen/arch/x86/cpu/amd.c
> +++ b/xen/arch/x86/cpu/amd.c
> @@ -687,6 +687,7 @@ void amd_init_lfence(struct cpuinfo_x86 *c)
>   */
>  void amd_init_ssbd(const struct cpuinfo_x86 *c)
>  {
> +	struct cpu_info *info = get_cpu_info();
>  	int bit = -1;
>  
>  	if (cpu_has_ssb_no)
> @@ -699,7 +700,7 @@ void amd_init_ssbd(const struct cpuinfo_x86 *c)
>  
>  	if (cpu_has_virt_ssbd) {
>  		wrmsrl(MSR_VIRT_SPEC_CTRL, opt_ssbd ? SPEC_CTRL_SSBD : 0);
> -		return;
> +		goto out;
>  	}
>  
>  	switch (c->x86) {
> @@ -729,6 +730,10 @@ void amd_init_ssbd(const struct cpuinfo_x86 *c)
>  
>  	if (bit < 0)
>  		printk_once(XENLOG_ERR "No SSBD controls available\n");
> +
> + out:
> +	info->last_spec_ctrl = info->xen_spec_ctrl = opt_ssbd ? SPEC_CTRL_SSBD
> +							      : 0;
>  }

Besides me being uncertain about the placement of these (preferably
the writes would be where the other similar writes are), this re-use
of the values suggests that you mean to prefer VIRT_SPEC_CTRL use
over that of SPEC_CTRL (see below).

Additionally - the value you store isn't necessarily the value you
wrote to the MSR. It only is if you cam here via the "goto out".

> --- a/xen/arch/x86/hvm/svm/entry.S
> +++ b/xen/arch/x86/hvm/svm/entry.S
> @@ -71,7 +71,9 @@ __UNLIKELY_END(nsvm_hap)
>              mov    %al, CPUINFO_last_spec_ctrl(%rsp)
>  1:          /* No Spectre v1 concerns.  Execution will hit VMRUN imminently. */
>          .endm
> -        ALTERNATIVE "", svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM
> +        ALTERNATIVE_2 "", STR(call vmentry_virt_spec_ctrl), \

I'm afraid this violates the "ret" part of the warning a few lines up,
while ...

> +                          X86_FEATURE_VIRT_SC_MSR_HVM, \
> +                      svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM
>  
>          pop  %r15
>          pop  %r14
> @@ -111,7 +113,9 @@ __UNLIKELY_END(nsvm_hap)
>              wrmsr
>              mov    %al, CPUINFO_last_spec_ctrl(%rsp)
>          .endm
> -        ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
> +        ALTERNATIVE_2 "", STR(call vmexit_virt_spec_ctrl), \

... this violates ...

> +                          X86_FEATURE_VIRT_SC_MSR_HVM, \
> +                      svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
>          /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */

... the "ret" part of this warning.

Furthermore, opposite to what the change to amd_init_ssbd() suggests,
the ordering of the alternatives here means you prefer SPEC_CTRL over
VIRT_SPEC_CTRL; see the comment near the top of _apply_alternatives().
Unless I've missed logic guaranteeing that both of the keyed to
features can't be active at the same time.

Jan
Roger Pau Monné March 10, 2022, 4:41 p.m. UTC | #2
On Mon, Feb 14, 2022 at 05:02:52PM +0100, Jan Beulich wrote:
> On 01.02.2022 17:46, Roger Pau Monne wrote:
> > --- a/xen/arch/x86/hvm/svm/entry.S
> > +++ b/xen/arch/x86/hvm/svm/entry.S
> > @@ -71,7 +71,9 @@ __UNLIKELY_END(nsvm_hap)
> >              mov    %al, CPUINFO_last_spec_ctrl(%rsp)
> >  1:          /* No Spectre v1 concerns.  Execution will hit VMRUN imminently. */
> >          .endm
> > -        ALTERNATIVE "", svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM
> > +        ALTERNATIVE_2 "", STR(call vmentry_virt_spec_ctrl), \
> 
> I'm afraid this violates the "ret" part of the warning a few lines up,
> while ...
> 
> > +                          X86_FEATURE_VIRT_SC_MSR_HVM, \
> > +                      svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM
> >  
> >          pop  %r15
> >          pop  %r14
> > @@ -111,7 +113,9 @@ __UNLIKELY_END(nsvm_hap)
> >              wrmsr
> >              mov    %al, CPUINFO_last_spec_ctrl(%rsp)
> >          .endm
> > -        ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
> > +        ALTERNATIVE_2 "", STR(call vmexit_virt_spec_ctrl), \
> 
> ... this violates ...
> 
> > +                          X86_FEATURE_VIRT_SC_MSR_HVM, \
> > +                      svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
> >          /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
> 
> ... the "ret" part of this warning.

Hm, so while I could load VIRT_SPEC_CTRL easily from assembly, loading
of the legacy non-architectural setting of SSBD for Fam18h and earlier
it's likely not doable from assembly.

Since those helpers would only set SSBD, isn't it fine to execute a
`ret` after either having set or clear SSBD?

AFAICT the requirement would be to either have loaded SPEC_CTRL first
(if present) in the VM exit path, or to set SSBD before setting
SPEC_CTRL in the VM entry path.

> Furthermore, opposite to what the change to amd_init_ssbd() suggests,
> the ordering of the alternatives here means you prefer SPEC_CTRL over
> VIRT_SPEC_CTRL; see the comment near the top of _apply_alternatives().
> Unless I've missed logic guaranteeing that both of the keyed to
> features can't be active at the same time.

Xen itself will only use a single one (either SPEC_CTRL.SSBD or
VIRT_SPEC_CTRL.SSBD) in order to implement support on behalf of the
guest. amd_init_ssbd already prefer to use SPEC_CTRL.SSBD over
VIRT_SPEC_CTRL.SSBD when both are available, so we aim to do the same
here.

I think part of the confusion steams from using info->{last_spec_ctrl,
xen_spec_ctrl} even when SPEC_CTRL MSR is not used by Xen, I need to
clarify this somehow, maybe by not using those fields in the first
place.

Thanks, Roger.
Jan Beulich March 11, 2022, 7:31 a.m. UTC | #3
On 10.03.2022 17:41, Roger Pau Monné wrote:
> On Mon, Feb 14, 2022 at 05:02:52PM +0100, Jan Beulich wrote:
>> On 01.02.2022 17:46, Roger Pau Monne wrote:
>>> --- a/xen/arch/x86/hvm/svm/entry.S
>>> +++ b/xen/arch/x86/hvm/svm/entry.S
>>> @@ -71,7 +71,9 @@ __UNLIKELY_END(nsvm_hap)
>>>              mov    %al, CPUINFO_last_spec_ctrl(%rsp)
>>>  1:          /* No Spectre v1 concerns.  Execution will hit VMRUN imminently. */
>>>          .endm
>>> -        ALTERNATIVE "", svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM
>>> +        ALTERNATIVE_2 "", STR(call vmentry_virt_spec_ctrl), \
>>
>> I'm afraid this violates the "ret" part of the warning a few lines up,
>> while ...
>>
>>> +                          X86_FEATURE_VIRT_SC_MSR_HVM, \
>>> +                      svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM
>>>  
>>>          pop  %r15
>>>          pop  %r14
>>> @@ -111,7 +113,9 @@ __UNLIKELY_END(nsvm_hap)
>>>              wrmsr
>>>              mov    %al, CPUINFO_last_spec_ctrl(%rsp)
>>>          .endm
>>> -        ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
>>> +        ALTERNATIVE_2 "", STR(call vmexit_virt_spec_ctrl), \
>>
>> ... this violates ...
>>
>>> +                          X86_FEATURE_VIRT_SC_MSR_HVM, \
>>> +                      svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
>>>          /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
>>
>> ... the "ret" part of this warning.
> 
> Hm, so while I could load VIRT_SPEC_CTRL easily from assembly, loading
> of the legacy non-architectural setting of SSBD for Fam18h and earlier
> it's likely not doable from assembly.
> 
> Since those helpers would only set SSBD, isn't it fine to execute a
> `ret` after either having set or clear SSBD?
> 
> AFAICT the requirement would be to either have loaded SPEC_CTRL first
> (if present) in the VM exit path, or to set SSBD before setting
> SPEC_CTRL in the VM entry path.

Yes, setting SSBD with SPEC_CTRL already / still set ought to be fine.

>> Furthermore, opposite to what the change to amd_init_ssbd() suggests,
>> the ordering of the alternatives here means you prefer SPEC_CTRL over
>> VIRT_SPEC_CTRL; see the comment near the top of _apply_alternatives().
>> Unless I've missed logic guaranteeing that both of the keyed to
>> features can't be active at the same time.
> 
> Xen itself will only use a single one (either SPEC_CTRL.SSBD or
> VIRT_SPEC_CTRL.SSBD) in order to implement support on behalf of the
> guest. amd_init_ssbd already prefer to use SPEC_CTRL.SSBD over
> VIRT_SPEC_CTRL.SSBD when both are available, so we aim to do the same
> here.

Hmm, I can't see the change to init_speculation_mitigations()
guaranteeing that at most one of the two would be enabled.

> I think part of the confusion steams from using info->{last_spec_ctrl,
> xen_spec_ctrl} even when SPEC_CTRL MSR is not used by Xen, I need to
> clarify this somehow, maybe by not using those fields in the first
> place.

I don't think this matters for this particular aspect of my reply.
It was possibly causing some confusion to me, but elsewhere.

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index a8e37dbb1f..c3fcc0e558 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -687,6 +687,7 @@  void amd_init_lfence(struct cpuinfo_x86 *c)
  */
 void amd_init_ssbd(const struct cpuinfo_x86 *c)
 {
+	struct cpu_info *info = get_cpu_info();
 	int bit = -1;
 
 	if (cpu_has_ssb_no)
@@ -699,7 +700,7 @@  void amd_init_ssbd(const struct cpuinfo_x86 *c)
 
 	if (cpu_has_virt_ssbd) {
 		wrmsrl(MSR_VIRT_SPEC_CTRL, opt_ssbd ? SPEC_CTRL_SSBD : 0);
-		return;
+		goto out;
 	}
 
 	switch (c->x86) {
@@ -729,6 +730,10 @@  void amd_init_ssbd(const struct cpuinfo_x86 *c)
 
 	if (bit < 0)
 		printk_once(XENLOG_ERR "No SSBD controls available\n");
+
+ out:
+	info->last_spec_ctrl = info->xen_spec_ctrl = opt_ssbd ? SPEC_CTRL_SSBD
+							      : 0;
 }
 
 void __init detect_zen2_null_seg_behaviour(void)
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index 29b4cfc9e6..7b10fbf12f 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -551,6 +551,9 @@  static void __init calculate_hvm_max_policy(void)
          */
         __set_bit(X86_FEATURE_VIRT_SSBD, hvm_featureset);
 
+    if ( boot_cpu_has(X86_FEATURE_VIRT_SC_MSR_HVM) )
+        __set_bit(X86_FEATURE_VIRT_SSBD, hvm_featureset);
+
     /*
      * With VT-x, some features are only supported by Xen if dedicated
      * hardware support is also available.
@@ -590,6 +593,14 @@  static void __init calculate_hvm_def_policy(void)
     guest_common_feature_adjustments(hvm_featureset);
     guest_common_default_feature_adjustments(hvm_featureset);
 
+    /*
+     * Only expose VIRT_SPEC_CTRL support by default if SPEC_CTRL is not
+     * supported.
+     */
+    if ( boot_cpu_has(X86_FEATURE_VIRT_SC_MSR_HVM) &&
+         !boot_cpu_has(X86_FEATURE_SC_MSR_HVM) )
+        __set_bit(X86_FEATURE_VIRT_SSBD, hvm_featureset);
+
     sanitise_featureset(hvm_featureset);
     cpuid_featureset_to_policy(hvm_featureset, p);
     recalculate_xstate(p);
diff --git a/xen/arch/x86/hvm/svm/entry.S b/xen/arch/x86/hvm/svm/entry.S
index 4ae55a2ef6..2a0c41625b 100644
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -71,7 +71,9 @@  __UNLIKELY_END(nsvm_hap)
             mov    %al, CPUINFO_last_spec_ctrl(%rsp)
 1:          /* No Spectre v1 concerns.  Execution will hit VMRUN imminently. */
         .endm
-        ALTERNATIVE "", svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM
+        ALTERNATIVE_2 "", STR(call vmentry_virt_spec_ctrl), \
+                          X86_FEATURE_VIRT_SC_MSR_HVM, \
+                      svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM
 
         pop  %r15
         pop  %r14
@@ -111,7 +113,9 @@  __UNLIKELY_END(nsvm_hap)
             wrmsr
             mov    %al, CPUINFO_last_spec_ctrl(%rsp)
         .endm
-        ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
+        ALTERNATIVE_2 "", STR(call vmexit_virt_spec_ctrl), \
+                          X86_FEATURE_VIRT_SC_MSR_HVM, \
+                      svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
         /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
 
         stgi
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index c4ce3f75ab..56c7b30b32 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -610,6 +610,14 @@  static void svm_cpuid_policy_changed(struct vcpu *v)
     svm_intercept_msr(v, MSR_SPEC_CTRL,
                       cp->extd.ibrs ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
 
+    /*
+     * Give access to MSR_VIRT_SPEC_CTRL if the guest has been told about it
+     * and the hardware implements it.
+     */
+    svm_intercept_msr(v, MSR_VIRT_SPEC_CTRL,
+                      cp->extd.virt_ssbd && cpu_has_virt_ssbd ?
+                      MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
+
     /* Give access to MSR_PRED_CMD if the guest has been told about it. */
     svm_intercept_msr(v, MSR_PRED_CMD,
                       cp->extd.ibpb ? MSR_INTERCEPT_NONE : MSR_INTERCEPT_RW);
@@ -3099,6 +3107,53 @@  void svm_vmexit_handler(struct cpu_user_regs *regs)
     vmcb_set_vintr(vmcb, intr);
 }
 
+/* Called with GIF=0. */
+void vmexit_virt_spec_ctrl(void)
+{
+    struct cpu_info *info = get_cpu_info();
+    unsigned int val = info->xen_spec_ctrl;
+
+    /*
+     * On AMD we will never use MSR_SPEC_CTRL together with MSR_VIRT_SPEC_CTRL
+     * or any legacy way of setting SSBD, so reuse the spec_ctrl fields in
+     * cpu_info for context switching the other means of setting SSBD.
+     */
+    ASSERT(!boot_cpu_has(X86_FEATURE_SC_MSR_HVM));
+    if ( cpu_has_virt_ssbd )
+    {
+        unsigned int lo, hi;
+        struct vcpu *curr = current;
+
+        /*
+         * Need to read from the hardware because VIRT_SPEC_CTRL is not context
+         * switched by the hardware, and we allow the guest untrapped access to
+         * the register.
+         */
+        rdmsr(MSR_VIRT_SPEC_CTRL, lo, hi);
+        if ( val != lo )
+            wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
+        curr->arch.msrs->spec_ctrl.raw = lo;
+        info->last_spec_ctrl = val;
+    }
+}
+
+/* Called with GIF=0. */
+void vmentry_virt_spec_ctrl(void)
+{
+    struct cpu_info *info = get_cpu_info();
+    const struct vcpu *curr = current;
+    unsigned int val = curr->arch.msrs->spec_ctrl.raw;
+
+    ASSERT(!boot_cpu_has(X86_FEATURE_SC_MSR_HVM));
+    if ( val != info->last_spec_ctrl )
+    {
+        wrmsr(MSR_VIRT_SPEC_CTRL, val, 0);
+        info->last_spec_ctrl = val;
+    }
+
+    /* No Spectre v1 concerns.  Execution is going to hit VMRUN imminently. */
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/include/asm/cpufeatures.h b/xen/arch/x86/include/asm/cpufeatures.h
index b10154fc44..a2c37bfdd4 100644
--- a/xen/arch/x86/include/asm/cpufeatures.h
+++ b/xen/arch/x86/include/asm/cpufeatures.h
@@ -39,6 +39,7 @@  XEN_CPUFEATURE(SC_VERW_PV,        X86_SYNTH(23)) /* VERW used by Xen for PV */
 XEN_CPUFEATURE(SC_VERW_HVM,       X86_SYNTH(24)) /* VERW used by Xen for HVM */
 XEN_CPUFEATURE(SC_VERW_IDLE,      X86_SYNTH(25)) /* VERW used by Xen for idle */
 XEN_CPUFEATURE(XEN_SHSTK,         X86_SYNTH(26)) /* Xen uses CET Shadow Stacks */
+XEN_CPUFEATURE(VIRT_SC_MSR_HVM,   X86_SYNTH(27)) /* MSR_VIRT_SPEC_CTRL exposed to HVM */
 
 /* Bug words follow the synthetic words. */
 #define X86_NR_BUG 1
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index 64b154b2d3..2c46e1485f 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -399,9 +399,12 @@  static void __init print_details(enum ind_thunk thunk, uint64_t caps)
            (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
             boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ||
             boot_cpu_has(X86_FEATURE_MD_CLEAR)   ||
+            boot_cpu_has(X86_FEATURE_VIRT_SC_MSR_HVM) ||
             opt_eager_fpu)                           ? ""               : " None",
            boot_cpu_has(X86_FEATURE_SC_MSR_HVM)      ? " MSR_SPEC_CTRL" : "",
-           boot_cpu_has(X86_FEATURE_SC_MSR_HVM)      ? " MSR_VIRT_SPEC_CTRL" : "",
+           (boot_cpu_has(X86_FEATURE_SC_MSR_HVM) ||
+            boot_cpu_has(X86_FEATURE_VIRT_SC_MSR_HVM)) ? " MSR_VIRT_SPEC_CTRL"
+                                                       : "",
            boot_cpu_has(X86_FEATURE_SC_RSB_HVM)      ? " RSB"           : "",
            opt_eager_fpu                             ? " EAGER_FPU"     : "",
            boot_cpu_has(X86_FEATURE_MD_CLEAR)        ? " MD_CLEAR"      : "");
@@ -1053,6 +1056,9 @@  void __init init_speculation_mitigations(void)
             setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM);
     }
 
+    if ( opt_msr_sc_hvm && cpu_has_virt_ssbd )
+        setup_force_cpu_cap(X86_FEATURE_VIRT_SC_MSR_HVM);
+
     /* If we have IBRS available, see whether we should use it. */
     if ( has_spec_ctrl && ibrs )
         default_xen_spec_ctrl |= SPEC_CTRL_IBRS;