diff mbox series

[2/2] x86: annotate entry points with type and size

Message ID 531ab7f7-ce5a-12b2-e7e7-528c26f9ff7f@suse.com (mailing list archive)
State New, archived
Headers show
Series x86: aid debug info generation in assembly files | expand

Commit Message

Jan Beulich April 12, 2022, 10:28 a.m. UTC
Future gas versions will generate minimalistic Dwarf debug info for
items annotated as functions and having their sizes specified [1].
"Borrow" Arm's END() and ENDPROC() to avoid open-coding (and perhaps
typo-ing) the respective directives.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

[1] https://sourceware.org/git?p=binutils-gdb.git;a=commitdiff;h=591cc9fbbfd6d51131c0f1d4a92e7893edcc7a28

Comments

Andrew Cooper April 14, 2022, 12:49 p.m. UTC | #1
On 12/04/2022 11:28, Jan Beulich wrote:
> Future gas versions will generate minimalistic Dwarf debug info for
> items annotated as functions and having their sizes specified [1].
> "Borrow" Arm's END() and ENDPROC() to avoid open-coding (and perhaps
> typo-ing) the respective directives.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> [1] https://sourceware.org/git?p=binutils-gdb.git;a=commitdiff;h=591cc9fbbfd6d51131c0f1d4a92e7893edcc7a28

I'm conflicted by this change.

You've clearly changed your mind since you rejected my patch introducing
this infrastructure and starting to use it.

Given that it is a reoccurring bug with livepatching which has been in
need of fixing since 2018, I'd organised some work to port Linux's
linkage.h as something more likely to have been acceptable.

~Andrew
Jan Beulich April 14, 2022, 12:59 p.m. UTC | #2
On 14.04.2022 14:49, Andrew Cooper wrote:
> On 12/04/2022 11:28, Jan Beulich wrote:
>> Future gas versions will generate minimalistic Dwarf debug info for
>> items annotated as functions and having their sizes specified [1].
>> "Borrow" Arm's END() and ENDPROC() to avoid open-coding (and perhaps
>> typo-ing) the respective directives.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>
>> [1] https://sourceware.org/git?p=binutils-gdb.git;a=commitdiff;h=591cc9fbbfd6d51131c0f1d4a92e7893edcc7a28
> 
> I'm conflicted by this change.
> 
> You've clearly changed your mind since you rejected my patch introducing
> this infrastructure and starting to use it.

Hmm, to be honest I don't recall me rejecting such work of yours.
In fact I have always been in favor of properly typing symbols,
where sensible and possible. I would therefore assume it was more
the "how" than the "that" which I wasn't happy with. If you have
a reference to the old thread to hand, I'd be interested in
looking up what made me oppose back at the time.

> Given that it is a reoccurring bug with livepatching which has been in
> need of fixing since 2018, I'd organised some work to port Linux's
> linkage.h as something more likely to have been acceptable.

Taking what they've got would likely be fine as well. At least in
a suitably stripped down manner (looking at their header they may
have gone a little overboard with this).

Jan
Jan Beulich June 23, 2022, 11:47 a.m. UTC | #3
On 14.04.2022 14:59, Jan Beulich wrote:
> On 14.04.2022 14:49, Andrew Cooper wrote:
>> On 12/04/2022 11:28, Jan Beulich wrote:
>>> Future gas versions will generate minimalistic Dwarf debug info for
>>> items annotated as functions and having their sizes specified [1].
>>> "Borrow" Arm's END() and ENDPROC() to avoid open-coding (and perhaps
>>> typo-ing) the respective directives.
>>>
>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>>
>>> [1] https://sourceware.org/git?p=binutils-gdb.git;a=commitdiff;h=591cc9fbbfd6d51131c0f1d4a92e7893edcc7a28
>>
>> I'm conflicted by this change.
>>
>> You've clearly changed your mind since you rejected my patch introducing
>> this infrastructure and starting to use it.
> 
> Hmm, to be honest I don't recall me rejecting such work of yours.
> In fact I have always been in favor of properly typing symbols,
> where sensible and possible. I would therefore assume it was more
> the "how" than the "that" which I wasn't happy with. If you have
> a reference to the old thread to hand, I'd be interested in
> looking up what made me oppose back at the time.
> 
>> Given that it is a reoccurring bug with livepatching which has been in
>> need of fixing since 2018, I'd organised some work to port Linux's
>> linkage.h as something more likely to have been acceptable.
> 
> Taking what they've got would likely be fine as well. At least in
> a suitably stripped down manner (looking at their header they may
> have gone a little overboard with this).

Over two months have passed. I wonder whether I had misunderstood your
reply: I took it to mean an alternative patch or series would be
posted. In the absence of that and considering that you say that you
did want such annotations anyway, I wonder what it is that stands in
the way of these two patches making it in.

Jan
diff mbox series

Patch

--- a/xen/arch/x86/include/asm/config.h
+++ b/xen/arch/x86/include/asm/config.h
@@ -53,6 +53,11 @@ 
 #define GLOBAL(name)                            \
   .globl name;                                  \
   name:
+#define END(name)                               \
+  .size name, . - name
+#define ENDPROC(name)                           \
+  .type name, @function;                        \
+  END(name)
 #endif
 
 #define NR_hypercalls 64
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -27,6 +27,7 @@  ENTRY(entry_int82)
 
         mov   %rsp, %rdi
         call  do_entry_int82
+ENDPROC(entry_int82)
 
 /* %rbx: struct vcpu */
 ENTRY(compat_test_all_events)
@@ -116,6 +117,7 @@  compat_process_trap:
         leaq  VCPU_trap_bounce(%rbx),%rdx
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
+ENDPROC(compat_test_all_events)
 
 /* %rbx: struct vcpu, interrupts disabled */
 ENTRY(compat_restore_all_guest)
@@ -161,6 +163,7 @@  ENTRY(compat_restore_all_guest)
         RESTORE_ALL adj=8 compat=1
 .Lft0:  iretq
         _ASM_PRE_EXTABLE(.Lft0, handle_exception)
+ENDPROC(compat_restore_all_guest)
 
 /* This mustn't modify registers other than %rax. */
 ENTRY(cr4_pv32_restore)
@@ -193,6 +196,7 @@  ENTRY(cr4_pv32_restore)
         pop   %rdx
         xor   %eax, %eax
         ret
+ENDPROC(cr4_pv32_restore)
 
 ENTRY(compat_syscall)
         /* Fix up reported %cs/%ss for compat domains. */
@@ -222,6 +226,7 @@  UNLIKELY_END(compat_syscall_gpf)
         movw  %si,TRAPBOUNCE_cs(%rdx)
         movb  %cl,TRAPBOUNCE_flags(%rdx)
         jmp   .Lcompat_bounce_exception
+ENDPROC(compat_syscall)
 
 ENTRY(compat_sysenter)
         CR4_PV32_RESTORE
@@ -236,11 +241,13 @@  ENTRY(compat_sysenter)
         movw  %ax,TRAPBOUNCE_cs(%rdx)
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
+ENDPROC(compat_sysenter)
 
 ENTRY(compat_int80_direct_trap)
         CR4_PV32_RESTORE
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
+ENDPROC(compat_int80_direct_trap)
 
 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
 /*   {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]}                             */
@@ -352,3 +359,4 @@  compat_crash_page_fault:
         jmp   .Lft14
 .previous
         _ASM_EXTABLE(.Lft14, .Lfx14)
+ENDPROC(compat_create_bounce_frame)
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -139,6 +139,7 @@  process_trap:
         leaq VCPU_trap_bounce(%rbx), %rdx
         call create_bounce_frame
         jmp  test_all_events
+ENDPROC(switch_to_kernel)
 
         .section .text.entry, "ax", @progbits
 
@@ -230,6 +231,7 @@  iret_exit_to_guest:
         addq  $8,%rsp
 .Lft0:  iretq
         _ASM_PRE_EXTABLE(.Lft0, handle_exception)
+ENDPROC(restore_all_guest)
 
 /*
  * When entering SYSCALL from kernel mode:
@@ -281,6 +283,7 @@  ENTRY(lstar_enter)
         mov   %rsp, %rdi
         call  pv_hypercall
         jmp   test_all_events
+ENDPROC(lstar_enter)
 
 /* See lstar_enter for entry register state. */
 ENTRY(cstar_enter)
@@ -320,6 +323,7 @@  ENTRY(cstar_enter)
         jne   compat_syscall
 #endif
         jmp   switch_to_kernel
+ENDPROC(cstar_enter)
 
 ENTRY(sysenter_entry)
         ENDBR64
@@ -384,6 +388,7 @@  UNLIKELY_END(sysenter_gpf)
         jne   compat_sysenter
 #endif
         jmp   .Lbounce_exception
+ENDPROC(sysenter_entry)
 
 ENTRY(int80_direct_trap)
         ENDBR64
@@ -473,6 +478,7 @@  int80_slow_path:
          */
         GET_STACK_END(14)
         jmp   handle_exception_saved
+ENDPROC(int80_direct_trap)
 
         /* create_bounce_frame & helpers don't need to be in .text.entry */
         .text
@@ -617,6 +623,7 @@  ENTRY(dom_crash_sync_extable)
         xorl  %edi,%edi
         jmp   asm_domain_crash_synchronous /* Does not return */
         .popsection
+ENDPROC(create_bounce_frame)
 #endif /* CONFIG_PV */
 
 /* --- CODE BELOW THIS LINE (MOSTLY) NOT GUEST RELATED --- */
@@ -640,10 +647,12 @@  ret_from_intr:
 #else
         jmp   test_all_events
 #endif
+ENDPROC(continue_pv_domain)
 #else
 ret_from_intr:
         ASSERT_CONTEXT_IS_XEN
         jmp   restore_all_xen
+ENDPROC(ret_from_intr)
 #endif
 
         .section .text.entry, "ax", @progbits
@@ -667,6 +676,7 @@  UNLIKELY_END(exit_cr3)
 
         RESTORE_ALL adj=8
         iretq
+ENDPROC(restore_all_xen)
 
 ENTRY(common_interrupt)
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
@@ -697,10 +707,12 @@  ENTRY(common_interrupt)
         mov   %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
         mov   %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
         jmp ret_from_intr
+ENDPROC(common_interrupt)
 
 ENTRY(page_fault)
         ENDBR64
         movl  $TRAP_page_fault,4(%rsp)
+ENDPROC(page_fault)
 /* No special register assumptions. */
 GLOBAL(handle_exception)
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
@@ -872,12 +884,14 @@  FATAL_exception_with_ints_disabled:
         movq  %rsp,%rdi
         call  fatal_trap
         BUG   /* fatal_trap() shouldn't return. */
+ENDPROC(handle_exception)
 
 ENTRY(divide_error)
         ENDBR64
         pushq $0
         movl  $TRAP_divide_error,4(%rsp)
         jmp   handle_exception
+ENDPROC(divide_error)
 
 ENTRY(coprocessor_error)
         ENDBR64
@@ -890,72 +904,85 @@  ENTRY(simd_coprocessor_error)
         pushq $0
         movl  $TRAP_simd_error,4(%rsp)
         jmp   handle_exception
+ENDPROC(coprocessor_error)
 
 ENTRY(device_not_available)
         ENDBR64
         pushq $0
         movl  $TRAP_no_device,4(%rsp)
         jmp   handle_exception
+ENDPROC(device_not_available)
 
 ENTRY(debug)
         ENDBR64
         pushq $0
         movl  $TRAP_debug,4(%rsp)
         jmp   handle_ist_exception
+ENDPROC(debug)
 
 ENTRY(int3)
         ENDBR64
         pushq $0
         movl  $TRAP_int3,4(%rsp)
         jmp   handle_exception
+ENDPROC(int3)
 
 ENTRY(overflow)
         ENDBR64
         pushq $0
         movl  $TRAP_overflow,4(%rsp)
         jmp   handle_exception
+ENDPROC(overflow)
 
 ENTRY(bounds)
         ENDBR64
         pushq $0
         movl  $TRAP_bounds,4(%rsp)
         jmp   handle_exception
+ENDPROC(bounds)
 
 ENTRY(invalid_op)
         ENDBR64
         pushq $0
         movl  $TRAP_invalid_op,4(%rsp)
         jmp   handle_exception
+ENDPROC(invalid_op)
 
 ENTRY(invalid_TSS)
         ENDBR64
         movl  $TRAP_invalid_tss,4(%rsp)
         jmp   handle_exception
+ENDPROC(invalid_TSS)
 
 ENTRY(segment_not_present)
         ENDBR64
         movl  $TRAP_no_segment,4(%rsp)
         jmp   handle_exception
+ENDPROC(segment_not_present)
 
 ENTRY(stack_segment)
         ENDBR64
         movl  $TRAP_stack_error,4(%rsp)
         jmp   handle_exception
+ENDPROC(stack_segment)
 
 ENTRY(general_protection)
         ENDBR64
         movl  $TRAP_gp_fault,4(%rsp)
         jmp   handle_exception
+ENDPROC(general_protection)
 
 ENTRY(alignment_check)
         ENDBR64
         movl  $TRAP_alignment_check,4(%rsp)
         jmp   handle_exception
+ENDPROC(alignment_check)
 
 ENTRY(entry_CP)
         ENDBR64
         movl  $X86_EXC_CP, 4(%rsp)
         jmp   handle_exception
+ENDPROC(entry_CP)
 
 ENTRY(double_fault)
         ENDBR64
@@ -981,6 +1008,7 @@  ENTRY(double_fault)
         movq  %rsp,%rdi
         call  do_double_fault
         BUG   /* do_double_fault() shouldn't return. */
+ENDPROC(double_fault)
 
         .pushsection .init.text, "ax", @progbits
 ENTRY(early_page_fault)
@@ -990,6 +1018,7 @@  ENTRY(early_page_fault)
         movq  %rsp,%rdi
         call  do_early_page_fault
         jmp   restore_all_xen
+ENDPROC(early_page_fault)
         .popsection
 
 ENTRY(nmi)
@@ -1120,17 +1149,20 @@  handle_ist_exception:
         ASSERT_CONTEXT_IS_XEN
         jmp   restore_all_xen
 #endif
+ENDPROC(nmi)
 
 ENTRY(machine_check)
         ENDBR64
         pushq $0
         movl  $TRAP_machine_check,4(%rsp)
         jmp   handle_ist_exception
+ENDPROC(machine_check)
 
 /* No op trap handler.  Required for kexec crash path. */
 GLOBAL(trap_nop)
         ENDBR64
         iretq
+ENDPROC(trap_nop)
 
 /* Table of automatically generated entry points.  One per vector. */
         .pushsection .init.rodata, "a", @progbits
@@ -1187,6 +1219,7 @@  autogen_stubs: /* Automatically generate
 
         vec = vec + 1
         .endr
+ENDPROC(autogen_stubs)
 
         .section .init.rodata, "a", @progbits
-        .size autogen_entrypoints, . - autogen_entrypoints
+        END(autogen_entrypoints)