diff mbox series

[7/8] x86/spec-ctrl: Issue VERW during IST exit to Xen

Message ID 20230913202758.508225-8-andrew.cooper3@citrix.com (mailing list archive)
State Superseded
Headers show
Series x86/spec-ctrl: AMD DIV fix, and VERW prerequisite bugfixes | expand

Commit Message

Andrew Cooper Sept. 13, 2023, 8:27 p.m. UTC
There is a corner case where e.g. an NMI hitting an exit-to-guest path after
SPEC_CTRL_EXIT_TO_* would have run the entire NMI handler *after* the VERW
flush to scrub potentially sensitive data from uarch buffers.

In order to compensate, issue VERW when exiting to Xen from an IST entry.

SPEC_CTRL_EXIT_TO_XEN already has two reads of spec_ctrl_flags off the stack,
and we're about to add a third.  Load the field into %ebx, and list the
register as clobbered.

%r12 has been arranged to be the ist_exit signal, so add this as an input
dependency and use it to identify when to issue a VERW.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Wei Liu <wl@xen.org>
---
 xen/arch/x86/include/asm/spec_ctrl_asm.h | 20 +++++++++++++++-----
 xen/arch/x86/x86_64/entry.S              |  2 +-
 2 files changed, 16 insertions(+), 6 deletions(-)

Comments

Jan Beulich Sept. 14, 2023, 10:01 a.m. UTC | #1
On 13.09.2023 22:27, Andrew Cooper wrote:
> There is a corner case where e.g. an NMI hitting an exit-to-guest path after
> SPEC_CTRL_EXIT_TO_* would have run the entire NMI handler *after* the VERW
> flush to scrub potentially sensitive data from uarch buffers.
> 
> In order to compensate, issue VERW when exiting to Xen from an IST entry.
> 
> SPEC_CTRL_EXIT_TO_XEN already has two reads of spec_ctrl_flags off the stack,
> and we're about to add a third.  Load the field into %ebx, and list the
> register as clobbered.
> 
> %r12 has been arranged to be the ist_exit signal, so add this as an input
> dependency and use it to identify when to issue a VERW.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

While looking technically okay, I still have a couple of remarks:

> --- a/xen/arch/x86/include/asm/spec_ctrl_asm.h
> +++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h
> @@ -344,10 +344,12 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
>   */
>  .macro SPEC_CTRL_EXIT_TO_XEN
>  /*
> - * Requires %r14=stack_end
> - * Clobbers %rax, %rcx, %rdx
> + * Requires %r12=ist_exit, %r14=stack_end
> + * Clobbers %rax, %rbx, %rcx, %rdx

While I'd generally be a little concerned of the growing dependency and
clobber lists, there being a single use site makes this pretty okay. The
macro invocation being immediately followed by RESTORE_ALL effectively
means we can clobber almost everything here.

As to register usage and my comment on patch 5: There's no real need
to switch %rbx to %r14 there if I'm not mistaken; in particular you
don't re-use any of the other macros which require use of %r14. You
could as well use %r14b (or about any other register that isn't already
used for something, yet whichever was picked apparently wouldn't make a
difference) for the flags here, getting away with fewer new REX prefixes
overall.

>   */
> -    testb $SCF_ist_sc_msr, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
> +    movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %ebx
> +
> +    testb $SCF_ist_sc_msr, %bl
>      jz .L\@_skip_sc_msr
>  
>      /*
> @@ -358,7 +360,7 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
>       */
>      xor %edx, %edx
>  
> -    testb $SCF_use_shadow, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
> +    testb $SCF_use_shadow, %bl
>      jz .L\@_skip_sc_msr
>  
>      mov STACK_CPUINFO_FIELD(shadow_spec_ctrl)(%r14), %eax
> @@ -367,8 +369,16 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
>  
>  .L\@_skip_sc_msr:
>  
> -    /* TODO VERW */
> +    test %r12, %r12
> +    jz .L\@_skip_ist_exit
> +
> +    /* Logically DO_SPEC_CTRL_COND_VERW but without the %rsp=cpuinfo dependency */
> +    testb $SCF_verw, %bl
> +    jz .L\@_verw_skip
> +    verw STACK_CPUINFO_FIELD(verw_sel)(%r14)
> +.L\@_verw_skip:

Nit: .L\@_skip_verw would be more consistent with the other label names.

> +.L\@_skip_ist_exit:

I was going to ask why the separate label (and whether the two JZ above
couldn't sensibly be folded), but the to both answer lies in the next
patch.

Jan
Andrew Cooper Sept. 14, 2023, 7:49 p.m. UTC | #2
On 14/09/2023 11:01 am, Jan Beulich wrote:
> On 13.09.2023 22:27, Andrew Cooper wrote:
>> There is a corner case where e.g. an NMI hitting an exit-to-guest path after
>> SPEC_CTRL_EXIT_TO_* would have run the entire NMI handler *after* the VERW
>> flush to scrub potentially sensitive data from uarch buffers.
>>
>> In order to compensate, issue VERW when exiting to Xen from an IST entry.
>>
>> SPEC_CTRL_EXIT_TO_XEN already has two reads of spec_ctrl_flags off the stack,
>> and we're about to add a third.  Load the field into %ebx, and list the
>> register as clobbered.
>>
>> %r12 has been arranged to be the ist_exit signal, so add this as an input
>> dependency and use it to identify when to issue a VERW.
>>
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> While looking technically okay, I still have a couple of remarks:
>
>> --- a/xen/arch/x86/include/asm/spec_ctrl_asm.h
>> +++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h
>> @@ -344,10 +344,12 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
>>   */
>>  .macro SPEC_CTRL_EXIT_TO_XEN
>>  /*
>> - * Requires %r14=stack_end
>> - * Clobbers %rax, %rcx, %rdx
>> + * Requires %r12=ist_exit, %r14=stack_end
>> + * Clobbers %rax, %rbx, %rcx, %rdx
> While I'd generally be a little concerned of the growing dependency and
> clobber lists, there being a single use site makes this pretty okay. The
> macro invocation being immediately followed by RESTORE_ALL effectively
> means we can clobber almost everything here.
>
> As to register usage and my comment on patch 5: There's no real need
> to switch %rbx to %r14 there if I'm not mistaken

As said, it's about consistency of the helpers.


>>   */
>> -    testb $SCF_ist_sc_msr, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
>> +    movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %ebx
>> +
>> +    testb $SCF_ist_sc_msr, %bl
>>      jz .L\@_skip_sc_msr
>>  
>>      /*
>> @@ -358,7 +360,7 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
>>       */
>>      xor %edx, %edx
>>  
>> -    testb $SCF_use_shadow, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
>> +    testb $SCF_use_shadow, %bl
>>      jz .L\@_skip_sc_msr
>>  
>>      mov STACK_CPUINFO_FIELD(shadow_spec_ctrl)(%r14), %eax
>> @@ -367,8 +369,16 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
>>  
>>  .L\@_skip_sc_msr:
>>  
>> -    /* TODO VERW */
>> +    test %r12, %r12
>> +    jz .L\@_skip_ist_exit
>> +
>> +    /* Logically DO_SPEC_CTRL_COND_VERW but without the %rsp=cpuinfo dependency */
>> +    testb $SCF_verw, %bl
>> +    jz .L\@_verw_skip
>> +    verw STACK_CPUINFO_FIELD(verw_sel)(%r14)
>> +.L\@_verw_skip:
> Nit: .L\@_skip_verw would be more consistent with the other label names.

So it would.  I'll tweak.

~Andrew
Jan Beulich Sept. 15, 2023, 7:15 a.m. UTC | #3
On 14.09.2023 21:49, Andrew Cooper wrote:
> On 14/09/2023 11:01 am, Jan Beulich wrote:
>> On 13.09.2023 22:27, Andrew Cooper wrote:
>>> There is a corner case where e.g. an NMI hitting an exit-to-guest path after
>>> SPEC_CTRL_EXIT_TO_* would have run the entire NMI handler *after* the VERW
>>> flush to scrub potentially sensitive data from uarch buffers.
>>>
>>> In order to compensate, issue VERW when exiting to Xen from an IST entry.
>>>
>>> SPEC_CTRL_EXIT_TO_XEN already has two reads of spec_ctrl_flags off the stack,
>>> and we're about to add a third.  Load the field into %ebx, and list the
>>> register as clobbered.
>>>
>>> %r12 has been arranged to be the ist_exit signal, so add this as an input
>>> dependency and use it to identify when to issue a VERW.
>>>
>>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>> While looking technically okay, I still have a couple of remarks:
>>
>>> --- a/xen/arch/x86/include/asm/spec_ctrl_asm.h
>>> +++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h
>>> @@ -344,10 +344,12 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
>>>   */
>>>  .macro SPEC_CTRL_EXIT_TO_XEN
>>>  /*
>>> - * Requires %r14=stack_end
>>> - * Clobbers %rax, %rcx, %rdx
>>> + * Requires %r12=ist_exit, %r14=stack_end
>>> + * Clobbers %rax, %rbx, %rcx, %rdx
>> While I'd generally be a little concerned of the growing dependency and
>> clobber lists, there being a single use site makes this pretty okay. The
>> macro invocation being immediately followed by RESTORE_ALL effectively
>> means we can clobber almost everything here.
>>
>> As to register usage and my comment on patch 5: There's no real need
>> to switch %rbx to %r14 there if I'm not mistaken
> 
> As said, it's about consistency of the helpers.

While I'm not entirely happy with this, ...

>>> @@ -367,8 +369,16 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
>>>  
>>>  .L\@_skip_sc_msr:
>>>  
>>> -    /* TODO VERW */
>>> +    test %r12, %r12
>>> +    jz .L\@_skip_ist_exit
>>> +
>>> +    /* Logically DO_SPEC_CTRL_COND_VERW but without the %rsp=cpuinfo dependency */
>>> +    testb $SCF_verw, %bl
>>> +    jz .L\@_verw_skip
>>> +    verw STACK_CPUINFO_FIELD(verw_sel)(%r14)
>>> +.L\@_verw_skip:
>> Nit: .L\@_skip_verw would be more consistent with the other label names.
> 
> So it would.  I'll tweak.

... then
Reviewed-by: Jan Beulich <jbeulich@suse.com>

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/include/asm/spec_ctrl_asm.h b/xen/arch/x86/include/asm/spec_ctrl_asm.h
index acdb526d292d..9740697114ad 100644
--- a/xen/arch/x86/include/asm/spec_ctrl_asm.h
+++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h
@@ -344,10 +344,12 @@  UNLIKELY_DISPATCH_LABEL(\@_serialise):
  */
 .macro SPEC_CTRL_EXIT_TO_XEN
 /*
- * Requires %r14=stack_end
- * Clobbers %rax, %rcx, %rdx
+ * Requires %r12=ist_exit, %r14=stack_end
+ * Clobbers %rax, %rbx, %rcx, %rdx
  */
-    testb $SCF_ist_sc_msr, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+    movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %ebx
+
+    testb $SCF_ist_sc_msr, %bl
     jz .L\@_skip_sc_msr
 
     /*
@@ -358,7 +360,7 @@  UNLIKELY_DISPATCH_LABEL(\@_serialise):
      */
     xor %edx, %edx
 
-    testb $SCF_use_shadow, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+    testb $SCF_use_shadow, %bl
     jz .L\@_skip_sc_msr
 
     mov STACK_CPUINFO_FIELD(shadow_spec_ctrl)(%r14), %eax
@@ -367,8 +369,16 @@  UNLIKELY_DISPATCH_LABEL(\@_serialise):
 
 .L\@_skip_sc_msr:
 
-    /* TODO VERW */
+    test %r12, %r12
+    jz .L\@_skip_ist_exit
+
+    /* Logically DO_SPEC_CTRL_COND_VERW but without the %rsp=cpuinfo dependency */
+    testb $SCF_verw, %bl
+    jz .L\@_verw_skip
+    verw STACK_CPUINFO_FIELD(verw_sel)(%r14)
+.L\@_verw_skip:
 
+.L\@_skip_ist_exit:
 .endm
 
 #endif /* __ASSEMBLY__ */
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index da084a7e8e54..f70752fa36c1 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -686,7 +686,7 @@  UNLIKELY_START(ne, exit_cr3)
 UNLIKELY_END(exit_cr3)
 
         /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
-        SPEC_CTRL_EXIT_TO_XEN     /* Req: %r14=end, Clob: acd */
+        SPEC_CTRL_EXIT_TO_XEN     /* Req: %r12=ist_exit %r14=end, Clob: abcd */
 
         RESTORE_ALL adj=8
         iretq