diff mbox series

[v6,6/7] x86: convert misc assembly function annotations

Message ID 4b0a581d-be2b-444d-a044-668b5e2e2279@suse.com (mailing list archive)
State New
Headers show
Series [v6,1/7] common: honor CONFIG_CC_SPLIT_SECTIONS also for assembly functions | expand

Commit Message

Jan Beulich Feb. 7, 2024, 1:38 p.m. UTC
Use the generic framework from xen/linkage.h.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v6: New.

Comments

Andrew Cooper Feb. 7, 2024, 2:11 p.m. UTC | #1
On 07/02/2024 1:38 pm, Jan Beulich wrote:

> --- a/xen/arch/x86/x86_64/entry.S
> +++ b/xen/arch/x86/x86_64/entry.S
> @@ -599,7 +599,7 @@ domain_crash_page_fault_0x8:
>          ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
>          movq  %rsi,%rdi
>          call  show_page_walk
> -ENTRY(dom_crash_sync_extable)
> +LABEL(dom_crash_sync_extable, 0)
>          ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
>          # Get out of the guest-save area of the stack.
>          GET_STACK_END(ax)
>

This again is a function, and one even used across-TUs.

Furthermore, it's a (domain) fatal error path.  It has the least excuse
of all to not conform to a regular function-like layout.

Everything else looks fine.  If you want to split this out into a
separate patch to address its function-ness, then consider the remainder
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
diff mbox series

Patch

--- a/xen/arch/x86/clear_page.S
+++ b/xen/arch/x86/clear_page.S
@@ -1,8 +1,9 @@ 
         .file __FILE__
 
+#include <xen/linkage.h>
 #include <asm/page.h>
 
-ENTRY(clear_page_sse2)
+FUNC(clear_page_sse2)
         mov     $PAGE_SIZE/32, %ecx
         xor     %eax,%eax
 
@@ -16,6 +17,4 @@  ENTRY(clear_page_sse2)
 
         sfence
         ret
-
-        .type clear_page_sse2, @function
-        .size clear_page_sse2, . - clear_page_sse2
+END(clear_page_sse2)
--- a/xen/arch/x86/copy_page.S
+++ b/xen/arch/x86/copy_page.S
@@ -1,5 +1,6 @@ 
         .file __FILE__
 
+#include <xen/linkage.h>
 #include <asm/page.h>
 
 #define src_reg %rsi
@@ -10,7 +11,7 @@ 
 #define tmp3_reg %r10
 #define tmp4_reg %r11
 
-ENTRY(copy_page_sse2)
+FUNC(copy_page_sse2)
         mov     $PAGE_SIZE/(4*WORD_SIZE)-3, %ecx
 
         prefetchnta 2*4*WORD_SIZE(src_reg)
@@ -41,6 +42,4 @@  ENTRY(copy_page_sse2)
 
         sfence
         ret
-
-        .type copy_page_sse2, @function
-        .size copy_page_sse2, . - copy_page_sse2
+END(copy_page_sse2)
--- a/xen/arch/x86/guest/xen/hypercall_page.S
+++ b/xen/arch/x86/guest/xen/hypercall_page.S
@@ -3,13 +3,11 @@ 
 #include <public/xen.h>
 
         .section ".text.page_aligned", "ax", @progbits
-        .p2align PAGE_SHIFT
 
-GLOBAL(hypercall_page)
+DATA(hypercall_page, PAGE_SIZE)
          /* Poisoned with `ret` for safety before hypercalls are set up. */
         .fill PAGE_SIZE, 1, 0xc3
-        .type hypercall_page, STT_OBJECT
-        .size hypercall_page, PAGE_SIZE
+END(hypercall_page)
 
 /*
  * Identify a specific hypercall in the hypercall page
--- a/xen/arch/x86/indirect-thunk.S
+++ b/xen/arch/x86/indirect-thunk.S
@@ -35,15 +35,13 @@ 
 .macro GEN_INDIRECT_THUNK reg:req
         .section .text.__x86_indirect_thunk_\reg, "ax", @progbits
 
-ENTRY(__x86_indirect_thunk_\reg)
+FUNC(__x86_indirect_thunk_\reg)
         ALTERNATIVE_2 __stringify(IND_THUNK_RETPOLINE \reg),              \
         __stringify(IND_THUNK_LFENCE \reg), X86_FEATURE_IND_THUNK_LFENCE, \
         __stringify(IND_THUNK_JMP \reg),    X86_FEATURE_IND_THUNK_JMP
 
         int3 /* Halt straight-line speculation */
-
-        .size __x86_indirect_thunk_\reg, . - __x86_indirect_thunk_\reg
-        .type __x86_indirect_thunk_\reg, @function
+END(__x86_indirect_thunk_\reg)
 .endm
 
 /* Instantiate GEN_INDIRECT_THUNK for each register except %rsp. */
--- a/xen/arch/x86/pv/gpr_switch.S
+++ b/xen/arch/x86/pv/gpr_switch.S
@@ -10,7 +10,7 @@ 
 #include <asm/asm_defns.h>
 
 /* Load guest GPRs.  Parameter in %rdi, clobbers all registers. */
-ENTRY(load_guest_gprs)
+FUNC(load_guest_gprs)
         movq  UREGS_rdx(%rdi), %rdx
         movq  UREGS_rax(%rdi), %rax
         movq  UREGS_rbx(%rdi), %rbx
@@ -27,13 +27,10 @@  ENTRY(load_guest_gprs)
         movq  UREGS_rcx(%rdi), %rcx
         movq  UREGS_rdi(%rdi), %rdi
         ret
-
-        .size load_guest_gprs, . - load_guest_gprs
-        .type load_guest_gprs, STT_FUNC
-
+END(load_guest_gprs)
 
 /* Save guest GPRs.  Parameter on the stack above the return address. */
-ENTRY(save_guest_gprs)
+FUNC(save_guest_gprs)
         pushq %rdi
         movq  2*8(%rsp), %rdi
         movq  %rax, UREGS_rax(%rdi)
@@ -52,6 +49,4 @@  ENTRY(save_guest_gprs)
         movq  %rdx, UREGS_rdx(%rdi)
         movq  %rcx, UREGS_rcx(%rdi)
         ret
-
-        .size save_guest_gprs, . - save_guest_gprs
-        .type save_guest_gprs, STT_FUNC
+END(save_guest_gprs)
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -599,7 +599,7 @@  domain_crash_page_fault_0x8:
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         movq  %rsi,%rdi
         call  show_page_walk
-ENTRY(dom_crash_sync_extable)
+LABEL(dom_crash_sync_extable, 0)
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         # Get out of the guest-save area of the stack.
         GET_STACK_END(ax)