diff mbox series

[5/8] x86/entry: Adjust restore_all_xen to hold stack_end in %r14

Message ID 20230913202758.508225-6-andrew.cooper3@citrix.com (mailing list archive)
State Superseded
Headers show
Series x86/spec-ctrl: AMD DIV fix, and VERW prerequisite bugfixes | expand

Commit Message

Andrew Cooper Sept. 13, 2023, 8:27 p.m. UTC
All other SPEC_CTRL_{ENTRY,EXIT}_* helpers hold stack_end in %r14.  Adjust it
for consistency, freeing up %rbx to be used differently.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Wei Liu <wl@xen.org>
---
 xen/arch/x86/include/asm/spec_ctrl_asm.h | 8 ++++----
 xen/arch/x86/x86_64/entry.S              | 8 ++++----
 2 files changed, 8 insertions(+), 8 deletions(-)

Comments

Jan Beulich Sept. 14, 2023, 8:51 a.m. UTC | #1
On 13.09.2023 22:27, Andrew Cooper wrote:
> All other SPEC_CTRL_{ENTRY,EXIT}_* helpers hold stack_end in %r14.  Adjust it
> for consistency, freeing up %rbx to be used differently.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>

The choice of %rbx was, iirc, for avoiding unnecessary code size increase
(due to extra REX prefixes needed in some of the affected insns). I
understand you view this as not a real concern, but I think saying so
explicitly in the description would be at least desirable. After all there
would also be the alternative of further parametrizing the involved macros.

Jan
Andrew Cooper Sept. 14, 2023, 7:28 p.m. UTC | #2
On 14/09/2023 9:51 am, Jan Beulich wrote:
> On 13.09.2023 22:27, Andrew Cooper wrote:
>> All other SPEC_CTRL_{ENTRY,EXIT}_* helpers hold stack_end in %r14.  Adjust it
>> for consistency, freeing up %rbx to be used differently.
>>
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
>
> The choice of %rbx was, iirc, for avoiding unnecessary code size increase
> (due to extra REX prefixes needed in some of the affected insns). I
> understand you view this as not a real concern, but I think saying so
> explicitly in the description would be at least desirable. After all there
> would also be the alternative of further parametrizing the involved macros.

Yeah I'm seriously not concerned about a REX prefix or two.

This is about making all the helpers more consistent, to cut down on the
number of errors involved in modifying this mess.

~Andrew
diff mbox series

Patch

diff --git a/xen/arch/x86/include/asm/spec_ctrl_asm.h b/xen/arch/x86/include/asm/spec_ctrl_asm.h
index 77f6c35bb9c5..acdb526d292d 100644
--- a/xen/arch/x86/include/asm/spec_ctrl_asm.h
+++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h
@@ -344,10 +344,10 @@  UNLIKELY_DISPATCH_LABEL(\@_serialise):
  */
 .macro SPEC_CTRL_EXIT_TO_XEN
 /*
- * Requires %rbx=stack_end
+ * Requires %r14=stack_end
  * Clobbers %rax, %rcx, %rdx
  */
-    testb $SCF_ist_sc_msr, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx)
+    testb $SCF_ist_sc_msr, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
     jz .L\@_skip_sc_msr
 
     /*
@@ -358,10 +358,10 @@  UNLIKELY_DISPATCH_LABEL(\@_serialise):
      */
     xor %edx, %edx
 
-    testb $SCF_use_shadow, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%rbx)
+    testb $SCF_use_shadow, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
     jz .L\@_skip_sc_msr
 
-    mov STACK_CPUINFO_FIELD(shadow_spec_ctrl)(%rbx), %eax
+    mov STACK_CPUINFO_FIELD(shadow_spec_ctrl)(%r14), %eax
     mov $MSR_SPEC_CTRL, %ecx
     wrmsr
 
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index a1c860f56949..525877e97330 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -665,15 +665,15 @@  restore_all_xen:
          * Check whether we need to switch to the per-CPU page tables, in
          * case we return to late PV exit code (from an NMI or #MC).
          */
-        GET_STACK_END(bx)
-        cmpb  $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
+        GET_STACK_END(14)
+        cmpb  $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
 UNLIKELY_START(ne, exit_cr3)
-        mov   STACK_CPUINFO_FIELD(pv_cr3)(%rbx), %rax
+        mov   STACK_CPUINFO_FIELD(pv_cr3)(%r14), %rax
         mov   %rax, %cr3
 UNLIKELY_END(exit_cr3)
 
         /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
-        SPEC_CTRL_EXIT_TO_XEN     /* Req: %rbx=end, Clob: acd */
+        SPEC_CTRL_EXIT_TO_XEN     /* Req: %r14=end, Clob: acd */
 
         RESTORE_ALL adj=8
         iretq