diff mbox series

[v2] Reduce assembly code size of exception entry points

Message ID 20240214103558.13655-1-frediano.ziglio@cloud.com (mailing list archive)
State Superseded
Headers show
Series [v2] Reduce assembly code size of exception entry points | expand

Commit Message

Frediano Ziglio Feb. 14, 2024, 10:35 a.m. UTC
We just pushed a 8-bytes zero and exception constants are
small so we can just write a single byte saving 3 bytes for
instruction.
With ENDBR64 this reduces the size of many entry points from 32 to
16 bytes (due to alignment).
Similar code is already used in autogen_stubs.

Signed-off-by: Frediano Ziglio <frediano.ziglio@cloud.com>
--
v2:
- added missing entry points;
- add mention to autogen_stubs code, as suggested.
---
 xen/arch/x86/x86_64/entry.S | 40 ++++++++++++++++++-------------------
 1 file changed, 20 insertions(+), 20 deletions(-)

Comments

Jan Beulich Feb. 14, 2024, 1:56 p.m. UTC | #1
On 14.02.2024 11:35, Frediano Ziglio wrote:
> We just pushed a 8-bytes zero

This part is now somewhat stale.

> and exception constants are
> small so we can just write a single byte saving 3 bytes for
> instruction.
> With ENDBR64 this reduces the size of many entry points from 32 to
> 16 bytes (due to alignment).
> Similar code is already used in autogen_stubs.
> 
> Signed-off-by: Frediano Ziglio <frediano.ziglio@cloud.com>
> --
> v2:
> - added missing entry points;

What about entry_int82?

> @@ -653,7 +653,7 @@ END(ret_from_intr)
>          .section .init.text, "ax", @progbits
>  FUNC(early_page_fault)
>          ENDBR64
> -        movl  $X86_EXC_PF, 4(%rsp)
> +        movb  $X86_EXC_PF, 4(%rsp)
>          SAVE_ALL
>          movq  %rsp, %rdi
>          call  do_early_page_fault

Between this and the next hunk there's also entry_PF.

Jan

> @@ -898,105 +898,105 @@ END(handle_exception)
>  FUNC(entry_DE)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_DE, 4(%rsp)
> +        movb  $X86_EXC_DE, 4(%rsp)
>          jmp   handle_exception
>  END(entry_DE)
>  
>  FUNC(entry_MF)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_MF, 4(%rsp)
> +        movb  $X86_EXC_MF, 4(%rsp)
>          jmp   handle_exception
>  END(entry_MF)
>  
>  FUNC(entry_XM)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_XM, 4(%rsp)
> +        movb  $X86_EXC_XM, 4(%rsp)
>          jmp   handle_exception
>  END(entry_XM)
>  
>  FUNC(entry_NM)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_NM, 4(%rsp)
> +        movb  $X86_EXC_NM, 4(%rsp)
>          jmp   handle_exception
>  END(entry_NM)
>  
>  FUNC(entry_DB)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_DB, 4(%rsp)
> +        movb  $X86_EXC_DB, 4(%rsp)
>          jmp   handle_ist_exception
>  END(entry_DB)
>  
>  FUNC(entry_BP)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_BP, 4(%rsp)
> +        movb  $X86_EXC_BP, 4(%rsp)
>          jmp   handle_exception
>  END(entry_BP)
>  
>  FUNC(entry_OF)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_OF, 4(%rsp)
> +        movb  $X86_EXC_OF, 4(%rsp)
>          jmp   handle_exception
>  END(entry_OF)
>  
>  FUNC(entry_BR)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_BR, 4(%rsp)
> +        movb  $X86_EXC_BR, 4(%rsp)
>          jmp   handle_exception
>  END(entry_BR)
>  
>  FUNC(entry_UD)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_UD, 4(%rsp)
> +        movb  $X86_EXC_UD, 4(%rsp)
>          jmp   handle_exception
>  END(entry_UD)
>  
>  FUNC(entry_TS)
>          ENDBR64
> -        movl  $X86_EXC_TS, 4(%rsp)
> +        movb  $X86_EXC_TS, 4(%rsp)
>          jmp   handle_exception
>  END(entry_TS)
>  
>  FUNC(entry_NP)
>          ENDBR64
> -        movl  $X86_EXC_NP, 4(%rsp)
> +        movb  $X86_EXC_NP, 4(%rsp)
>          jmp   handle_exception
>  END(entry_NP)
>  
>  FUNC(entry_SS)
>          ENDBR64
> -        movl  $X86_EXC_SS, 4(%rsp)
> +        movb  $X86_EXC_SS, 4(%rsp)
>          jmp   handle_exception
>  END(entry_SS)
>  
>  FUNC(entry_GP)
>          ENDBR64
> -        movl  $X86_EXC_GP, 4(%rsp)
> +        movb  $X86_EXC_GP, 4(%rsp)
>          jmp   handle_exception
>  END(entry_GP)
>  
>  FUNC(entry_AC)
>          ENDBR64
> -        movl  $X86_EXC_AC, 4(%rsp)
> +        movb  $X86_EXC_AC, 4(%rsp)
>          jmp   handle_exception
>  END(entry_AC)
>  
>  FUNC(entry_CP)
>          ENDBR64
> -        movl  $X86_EXC_CP, 4(%rsp)
> +        movb  $X86_EXC_CP, 4(%rsp)
>          jmp   handle_exception
>  END(entry_CP)
>  
>  FUNC(entry_DF)
>          ENDBR64
> -        movl  $X86_EXC_DF, 4(%rsp)
> +        movb  $X86_EXC_DF, 4(%rsp)
>          /* Set AC to reduce chance of further SMAP faults */
>          ALTERNATIVE "", stac, X86_FEATURE_XEN_SMAP
>          SAVE_ALL
> @@ -1022,7 +1022,7 @@ END(entry_DF)
>  FUNC(entry_NMI)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_NMI, 4(%rsp)
> +        movb  $X86_EXC_NMI, 4(%rsp)
>  END(entry_NMI)
>  
>  FUNC(handle_ist_exception)
> @@ -1158,7 +1158,7 @@ END(handle_ist_exception)
>  FUNC(entry_MC)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_MC, 4(%rsp)
> +        movb  $X86_EXC_MC, 4(%rsp)
>          jmp   handle_ist_exception
>  END(entry_MC)
>
Jan Beulich Feb. 14, 2024, 2:20 p.m. UTC | #2
On 14.02.2024 11:35, Frediano Ziglio wrote:
> @@ -898,105 +898,105 @@ END(handle_exception)
>  FUNC(entry_DE)
>          ENDBR64
>          pushq $0
> -        movl  $X86_EXC_DE, 4(%rsp)
> +        movb  $X86_EXC_DE, 4(%rsp)

As we're trying to compact things: This writes 0 over the previously
pushed 0. The insn therefore could be replaced by
"BUILD_BUG_ON X86_EXC_DE".

Jan
Roger Pau Monné Feb. 14, 2024, 3:02 p.m. UTC | #3
On Wed, Feb 14, 2024 at 10:35:58AM +0000, Frediano Ziglio wrote:
> We just pushed a 8-bytes zero and exception constants are
> small so we can just write a single byte saving 3 bytes for
> instruction.
> With ENDBR64 this reduces the size of many entry points from 32 to
> 16 bytes (due to alignment).
> Similar code is already used in autogen_stubs.

Will using movb instead of movl have any performance impact?  I don't
think we should trade speed for code size, so this needs to be
mentioned in the commit message.

Thanks, Roger.
Jan Beulich Feb. 14, 2024, 3:08 p.m. UTC | #4
On 14.02.2024 16:02, Roger Pau Monné wrote:
> On Wed, Feb 14, 2024 at 10:35:58AM +0000, Frediano Ziglio wrote:
>> We just pushed a 8-bytes zero and exception constants are
>> small so we can just write a single byte saving 3 bytes for
>> instruction.
>> With ENDBR64 this reduces the size of many entry points from 32 to
>> 16 bytes (due to alignment).
>> Similar code is already used in autogen_stubs.
> 
> Will using movb instead of movl have any performance impact?  I don't
> think we should trade speed for code size, so this needs to be
> mentioned in the commit message.

That's really what the last sentence is about (it could have been said
more explicitly though): If doing so on interrupt paths is fine, it
ought to be fine on exception paths as well. Plus, no matter what, we
have two overlapping stores in every one of these places. But yes,
their sizes may still be relevant to the overall result.

Jan
Roger Pau Monné Feb. 14, 2024, 3:29 p.m. UTC | #5
On Wed, Feb 14, 2024 at 04:08:12PM +0100, Jan Beulich wrote:
> On 14.02.2024 16:02, Roger Pau Monné wrote:
> > On Wed, Feb 14, 2024 at 10:35:58AM +0000, Frediano Ziglio wrote:
> >> We just pushed a 8-bytes zero and exception constants are
> >> small so we can just write a single byte saving 3 bytes for
> >> instruction.
> >> With ENDBR64 this reduces the size of many entry points from 32 to
> >> 16 bytes (due to alignment).
> >> Similar code is already used in autogen_stubs.
> > 
> > Will using movb instead of movl have any performance impact?  I don't
> > think we should trade speed for code size, so this needs to be
> > mentioned in the commit message.
> 
> That's really what the last sentence is about (it could have been said
> more explicitly though): If doing so on interrupt paths is fine, it
> ought to be fine on exception paths as well.

I might view it the other way around: maybe it's autogen_stubs that
needs changing to use movl instead of movb for performance reasons?

I think this needs to be clearly stated, and ideally some kind of
benchmarks should be provided to demonstrate no performance change if
there are doubts whether movl and movb might perform differently.

Thanks, Roger.
Andrew Cooper Feb. 14, 2024, 3:53 p.m. UTC | #6
On 14/02/2024 3:29 pm, Roger Pau Monné wrote:
> On Wed, Feb 14, 2024 at 04:08:12PM +0100, Jan Beulich wrote:
>> On 14.02.2024 16:02, Roger Pau Monné wrote:
>>> On Wed, Feb 14, 2024 at 10:35:58AM +0000, Frediano Ziglio wrote:
>>>> We just pushed a 8-bytes zero and exception constants are
>>>> small so we can just write a single byte saving 3 bytes for
>>>> instruction.
>>>> With ENDBR64 this reduces the size of many entry points from 32 to
>>>> 16 bytes (due to alignment).
>>>> Similar code is already used in autogen_stubs.
>>> Will using movb instead of movl have any performance impact?  I don't
>>> think we should trade speed for code size, so this needs to be
>>> mentioned in the commit message.
>> That's really what the last sentence is about (it could have been said
>> more explicitly though): If doing so on interrupt paths is fine, it
>> ought to be fine on exception paths as well.
> I might view it the other way around: maybe it's autogen_stubs that
> needs changing to use movl instead of movb for performance reasons?
>
> I think this needs to be clearly stated, and ideally some kind of
> benchmarks should be provided to demonstrate no performance change if
> there are doubts whether movl and movb might perform differently.

The push and the mov are overlapping stores either way.  Swapping
between movl and movb will make no difference at all.

However, the shorter instruction ends up halving the size of the entry
stub when alignment is considered, and that will make a marginal
difference.  Fewer cache misses (to a first approximation, even #PF will
be L1-cold), and better utilisation of branch prediction resource (~>
less likely to be BP-cold).

I doubt you'll be able to see a difference without perf counters
(whatever difference is covered here will be dwarfed by the speculation
workarounds), but a marginal win is still a win.

~Andrew
Roger Pau Monné Feb. 14, 2024, 4:05 p.m. UTC | #7
On Wed, Feb 14, 2024 at 03:53:24PM +0000, Andrew Cooper wrote:
> On 14/02/2024 3:29 pm, Roger Pau Monné wrote:
> > On Wed, Feb 14, 2024 at 04:08:12PM +0100, Jan Beulich wrote:
> >> On 14.02.2024 16:02, Roger Pau Monné wrote:
> >>> On Wed, Feb 14, 2024 at 10:35:58AM +0000, Frediano Ziglio wrote:
> >>>> We just pushed a 8-bytes zero and exception constants are
> >>>> small so we can just write a single byte saving 3 bytes for
> >>>> instruction.
> >>>> With ENDBR64 this reduces the size of many entry points from 32 to
> >>>> 16 bytes (due to alignment).
> >>>> Similar code is already used in autogen_stubs.
> >>> Will using movb instead of movl have any performance impact?  I don't
> >>> think we should trade speed for code size, so this needs to be
> >>> mentioned in the commit message.
> >> That's really what the last sentence is about (it could have been said
> >> more explicitly though): If doing so on interrupt paths is fine, it
> >> ought to be fine on exception paths as well.
> > I might view it the other way around: maybe it's autogen_stubs that
> > needs changing to use movl instead of movb for performance reasons?
> >
> > I think this needs to be clearly stated, and ideally some kind of
> > benchmarks should be provided to demonstrate no performance change if
> > there are doubts whether movl and movb might perform differently.
> 
> The push and the mov are overlapping stores either way.  Swapping
> between movl and movb will make no difference at all.
> 
> However, the shorter instruction ends up halving the size of the entry
> stub when alignment is considered, and that will make a marginal
> difference.  Fewer cache misses (to a first approximation, even #PF will
> be L1-cold), and better utilisation of branch prediction resource (~>
> less likely to be BP-cold).
> 
> I doubt you'll be able to see a difference without perf counters
> (whatever difference is covered here will be dwarfed by the speculation
> workarounds), but a marginal win is still a win.

I'm happy just stating in the commit message that the change doesn't
make any performance difference.

Thanks, Roger.
diff mbox series

Patch

diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index ecdd6e5b47..a28a0d4044 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -389,7 +389,7 @@  FUNC(entry_int80)
         ENDBR64
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         pushq $0
-        movl  $0x80, 4(%rsp)
+        movb  $0x80, 4(%rsp)
         SAVE_ALL
 
         SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */
@@ -653,7 +653,7 @@  END(ret_from_intr)
         .section .init.text, "ax", @progbits
 FUNC(early_page_fault)
         ENDBR64
-        movl  $X86_EXC_PF, 4(%rsp)
+        movb  $X86_EXC_PF, 4(%rsp)
         SAVE_ALL
         movq  %rsp, %rdi
         call  do_early_page_fault
@@ -898,105 +898,105 @@  END(handle_exception)
 FUNC(entry_DE)
         ENDBR64
         pushq $0
-        movl  $X86_EXC_DE, 4(%rsp)
+        movb  $X86_EXC_DE, 4(%rsp)
         jmp   handle_exception
 END(entry_DE)
 
 FUNC(entry_MF)
         ENDBR64
         pushq $0
-        movl  $X86_EXC_MF, 4(%rsp)
+        movb  $X86_EXC_MF, 4(%rsp)
         jmp   handle_exception
 END(entry_MF)
 
 FUNC(entry_XM)
         ENDBR64
         pushq $0
-        movl  $X86_EXC_XM, 4(%rsp)
+        movb  $X86_EXC_XM, 4(%rsp)
         jmp   handle_exception
 END(entry_XM)
 
 FUNC(entry_NM)
         ENDBR64
         pushq $0
-        movl  $X86_EXC_NM, 4(%rsp)
+        movb  $X86_EXC_NM, 4(%rsp)
         jmp   handle_exception
 END(entry_NM)
 
 FUNC(entry_DB)
         ENDBR64
         pushq $0
-        movl  $X86_EXC_DB, 4(%rsp)
+        movb  $X86_EXC_DB, 4(%rsp)
         jmp   handle_ist_exception
 END(entry_DB)
 
 FUNC(entry_BP)
         ENDBR64
         pushq $0
-        movl  $X86_EXC_BP, 4(%rsp)
+        movb  $X86_EXC_BP, 4(%rsp)
         jmp   handle_exception
 END(entry_BP)
 
 FUNC(entry_OF)
         ENDBR64
         pushq $0
-        movl  $X86_EXC_OF, 4(%rsp)
+        movb  $X86_EXC_OF, 4(%rsp)
         jmp   handle_exception
 END(entry_OF)
 
 FUNC(entry_BR)
         ENDBR64
         pushq $0
-        movl  $X86_EXC_BR, 4(%rsp)
+        movb  $X86_EXC_BR, 4(%rsp)
         jmp   handle_exception
 END(entry_BR)
 
 FUNC(entry_UD)
         ENDBR64
         pushq $0
-        movl  $X86_EXC_UD, 4(%rsp)
+        movb  $X86_EXC_UD, 4(%rsp)
         jmp   handle_exception
 END(entry_UD)
 
 FUNC(entry_TS)
         ENDBR64
-        movl  $X86_EXC_TS, 4(%rsp)
+        movb  $X86_EXC_TS, 4(%rsp)
         jmp   handle_exception
 END(entry_TS)
 
 FUNC(entry_NP)
         ENDBR64
-        movl  $X86_EXC_NP, 4(%rsp)
+        movb  $X86_EXC_NP, 4(%rsp)
         jmp   handle_exception
 END(entry_NP)
 
 FUNC(entry_SS)
         ENDBR64
-        movl  $X86_EXC_SS, 4(%rsp)
+        movb  $X86_EXC_SS, 4(%rsp)
         jmp   handle_exception
 END(entry_SS)
 
 FUNC(entry_GP)
         ENDBR64
-        movl  $X86_EXC_GP, 4(%rsp)
+        movb  $X86_EXC_GP, 4(%rsp)
         jmp   handle_exception
 END(entry_GP)
 
 FUNC(entry_AC)
         ENDBR64
-        movl  $X86_EXC_AC, 4(%rsp)
+        movb  $X86_EXC_AC, 4(%rsp)
         jmp   handle_exception
 END(entry_AC)
 
 FUNC(entry_CP)
         ENDBR64
-        movl  $X86_EXC_CP, 4(%rsp)
+        movb  $X86_EXC_CP, 4(%rsp)
         jmp   handle_exception
 END(entry_CP)
 
 FUNC(entry_DF)
         ENDBR64
-        movl  $X86_EXC_DF, 4(%rsp)
+        movb  $X86_EXC_DF, 4(%rsp)
         /* Set AC to reduce chance of further SMAP faults */
         ALTERNATIVE "", stac, X86_FEATURE_XEN_SMAP
         SAVE_ALL
@@ -1022,7 +1022,7 @@  END(entry_DF)
 FUNC(entry_NMI)
         ENDBR64
         pushq $0
-        movl  $X86_EXC_NMI, 4(%rsp)
+        movb  $X86_EXC_NMI, 4(%rsp)
 END(entry_NMI)
 
 FUNC(handle_ist_exception)
@@ -1158,7 +1158,7 @@  END(handle_ist_exception)
 FUNC(entry_MC)
         ENDBR64
         pushq $0
-        movl  $X86_EXC_MC, 4(%rsp)
+        movb  $X86_EXC_MC, 4(%rsp)
         jmp   handle_ist_exception
 END(entry_MC)