diff mbox series

[v2,1/1] xen/arm64: entry: Add missing code symbol annotations

Message ID 20240415231541.4140052-2-edgar.iglesias@gmail.com (mailing list archive)
State New
Headers show
Series xen/arm: Annotate code symbols | expand

Commit Message

Edgar E. Iglesias April 15, 2024, 11:15 p.m. UTC
From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>

Use the generic xen/linkage.h macros when and add missing
code symbol annotations.

Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
---
 xen/arch/arm/arm64/entry.S | 72 +++++++++++++++++++++++++-------------
 1 file changed, 48 insertions(+), 24 deletions(-)

Comments

Stefano Stabellini April 25, 2024, 11:13 p.m. UTC | #1
On Tue, 16 Apr 2024, Edgar E. Iglesias wrote:
> From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
> 
> Use the generic xen/linkage.h macros when and add missing
                                        ^ when what?

> code symbol annotations.
> 
> Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>

I am looking at the implementation of FUNC and as far as I can tell
there is no change compared to ENTRY. So from that point of view we are
good. I wonder if we should keep using "ENTRY" because it is nice to
mark explicitely the entry points as such but at the same time I am also
OK with this. I'll let the other ARM maintainers decide.

On the other hand, FUNC_LOCAL does introduce a change: it adds a .align
everywhere. Should not be harmful?

With the commit message fixed:

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>


> ---
>  xen/arch/arm/arm64/entry.S | 72 +++++++++++++++++++++++++-------------
>  1 file changed, 48 insertions(+), 24 deletions(-)
> 
> diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
> index f963c923bb..af9a592cae 100644
> --- a/xen/arch/arm/arm64/entry.S
> +++ b/xen/arch/arm/arm64/entry.S
> @@ -289,21 +289,25 @@
>          b       do_bad_mode
>          .endm
>  
> -hyp_sync_invalid:
> +FUNC_LOCAL(hyp_sync_invalid)
>          entry   hyp=1
>          invalid BAD_SYNC
> +END(hyp_sync_invalid)
>  
> -hyp_irq_invalid:
> +FUNC_LOCAL(hyp_irq_invalid)
>          entry   hyp=1
>          invalid BAD_IRQ
> +END(hyp_irq_invalid)
>  
> -hyp_fiq_invalid:
> +FUNC_LOCAL(hyp_fiq_invalid)
>          entry   hyp=1
>          invalid BAD_FIQ
> +END(hyp_fiq_invalid)
>  
> -hyp_error_invalid:
> +FUNC_LOCAL(hyp_error_invalid)
>          entry   hyp=1
>          invalid BAD_ERROR
> +END(hyp_error_invalid)
>  
>  /*
>   * SError received while running in the hypervisor mode.
> @@ -313,11 +317,12 @@ hyp_error_invalid:
>   * simplicity, as SError should be rare and potentially fatal,
>   * all interrupts are kept masked.
>   */
> -hyp_error:
> +FUNC_LOCAL(hyp_error)
>          entry   hyp=1
>          mov     x0, sp
>          bl      do_trap_hyp_serror
>          exit    hyp=1
> +END(hyp_error)
>  
>  /*
>   * Synchronous exception received while running in the hypervisor mode.
> @@ -327,7 +332,7 @@ hyp_error:
>   * some of them. So we want to inherit the state from the interrupted
>   * context.
>   */
> -hyp_sync:
> +FUNC_LOCAL(hyp_sync)
>          entry   hyp=1
>  
>          /* Inherit interrupts */
> @@ -338,6 +343,7 @@ hyp_sync:
>          mov     x0, sp
>          bl      do_trap_hyp_sync
>          exit    hyp=1
> +END(hyp_sync)
>  
>  /*
>   * IRQ received while running in the hypervisor mode.
> @@ -352,7 +358,7 @@ hyp_sync:
>   * would require some rework in some paths (e.g. panic, livepatch) to
>   * ensure the ordering is enforced everywhere.
>   */
> -hyp_irq:
> +FUNC_LOCAL(hyp_irq)
>          entry   hyp=1
>  
>          /* Inherit D, A, F interrupts and keep I masked */
> @@ -365,8 +371,9 @@ hyp_irq:
>          mov     x0, sp
>          bl      do_trap_irq
>          exit    hyp=1
> +END(hyp_irq)
>  
> -guest_sync:
> +FUNC_LOCAL(guest_sync)
>          /*
>           * Save x0, x1 in advance
>           */
> @@ -413,8 +420,9 @@ fastpath_out_workaround:
>          mov     x1, xzr
>          eret
>          sb
> +END(guest_sync)
>  
> -wa2_ssbd:
> +FUNC_LOCAL(wa2_ssbd)
>  #ifdef CONFIG_ARM_SSBD
>  alternative_cb arm_enable_wa2_handling
>          b       wa2_end
> @@ -450,42 +458,55 @@ wa2_end:
>          mov     x0, xzr
>          eret
>          sb
> -guest_sync_slowpath:
> +END(wa2_ssbd)
> +
> +FUNC_LOCAL(guest_sync_slowpath)
>          /*
>           * x0/x1 may have been scratch by the fast path above, so avoid
>           * to save them.
>           */
>          guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_sync, save_x0_x1=0
> +END(guest_sync_slowpath)
>  
> -guest_irq:
> +FUNC_LOCAL(guest_irq)
>          guest_vector compat=0, iflags=IFLAGS__A__, trap=irq
> +END(guest_irq)
>  
> -guest_fiq_invalid:
> +FUNC_LOCAL(guest_fiq_invalid)
>          entry   hyp=0, compat=0
>          invalid BAD_FIQ
> +END(guest_fiq_invalid)
>  
> -guest_error:
> +FUNC_LOCAL(guest_error)
>          guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_serror
> +END(guest_error)
>  
> -guest_sync_compat:
> +FUNC_LOCAL(guest_sync_compat)
>          guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_sync
> +END(guest_sync_compat)
>  
> -guest_irq_compat:
> +FUNC_LOCAL(guest_irq_compat)
>          guest_vector compat=1, iflags=IFLAGS__A__, trap=irq
> +END(guest_irq_compat)
>  
> -guest_fiq_invalid_compat:
> +FUNC_LOCAL(guest_fiq_invalid_compat)
>          entry   hyp=0, compat=1
>          invalid BAD_FIQ
> +END(guest_fiq_invalid_compat)
>  
> -guest_error_compat:
> +FUNC_LOCAL(guest_error_compat)
>          guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_serror
> +END(guest_error_compat)
>  
> -ENTRY(return_to_new_vcpu32)
> +FUNC(return_to_new_vcpu32)
>          exit    hyp=0, compat=1
> -ENTRY(return_to_new_vcpu64)
> +END(return_to_new_vcpu32)
> +
> +FUNC(return_to_new_vcpu64)
>          exit    hyp=0, compat=0
> +END(return_to_new_vcpu64)
>  
> -return_from_trap:
> +FUNC_LOCAL(return_from_trap)
>          msr     daifset, #IFLAGS___I_ /* Mask interrupts */
>  
>          ldr     x21, [sp, #UREGS_PC]            /* load ELR */
> @@ -524,6 +545,7 @@ return_from_trap:
>  
>          eret
>          sb
> +END(return_from_trap)
>  
>  /*
>   * Consume pending SError generated by the guest if any.
> @@ -536,7 +558,7 @@ return_from_trap:
>   * it. So the function will unmask SError exception for a small window and
>   * then mask it again.
>   */
> -check_pending_guest_serror:
> +FUNC_LOCAL(check_pending_guest_serror)
>          /*
>           * Save elr_el2 to check whether the pending SError exception takes
>           * place while we are doing this sync exception.
> @@ -586,7 +608,7 @@ abort_guest_exit_end:
>          cset    x19, ne
>  
>          ret
> -ENDPROC(check_pending_guest_serror)
> +END(check_pending_guest_serror)
>  
>  /*
>   * Exception vectors.
> @@ -597,7 +619,7 @@ ENDPROC(check_pending_guest_serror)
>          .endm
>  
>          .align  11
> -ENTRY(hyp_traps_vector)
> +FUNC(hyp_traps_vector)
>          ventry  hyp_sync_invalid            /* Synchronous EL2t */
>          ventry  hyp_irq_invalid             /* IRQ EL2t */
>          ventry  hyp_fiq_invalid             /* FIQ EL2t */
> @@ -617,6 +639,7 @@ ENTRY(hyp_traps_vector)
>          ventry  guest_irq_compat            /* IRQ 32-bit EL0/EL1 */
>          ventry  guest_fiq_invalid_compat    /* FIQ 32-bit EL0/EL1 */
>          ventry  guest_error_compat          /* Error 32-bit EL0/EL1 */
> +END(hyp_traps_vector)
>  
>  /*
>   * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
> @@ -626,7 +649,7 @@ ENTRY(hyp_traps_vector)
>   *
>   * Returns prev in x0
>   */
> -ENTRY(__context_switch)
> +FUNC(__context_switch)
>          add     x8, x0, #VCPU_arch_saved_context
>          mov     x9, sp
>          stp     x19, x20, [x8], #16         /* store callee-saved registers */
> @@ -647,6 +670,7 @@ ENTRY(__context_switch)
>          ldr     lr, [x8]
>          mov     sp, x9
>          ret
> +END(__context_switch)
>  
>  /*
>   * Local variables:
> -- 
> 2.40.1
>
Jan Beulich April 26, 2024, 6:02 a.m. UTC | #2
On 26.04.2024 01:13, Stefano Stabellini wrote:
> On Tue, 16 Apr 2024, Edgar E. Iglesias wrote:
>> From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
>>
>> Use the generic xen/linkage.h macros when and add missing
>                                         ^ when what?
> 
>> code symbol annotations.
>>
>> Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
> 
> I am looking at the implementation of FUNC and as far as I can tell
> there is no change compared to ENTRY. So from that point of view we are
> good. I wonder if we should keep using "ENTRY" because it is nice to
> mark explicitely the entry points as such but at the same time I am also
> OK with this. I'll let the other ARM maintainers decide.

Just to mention it: ENTRY should go away (and hence why PPC and RISC-V had
it dropped already, while x86 has patches pending to reduce its scope
enough), not the least to finally allow the oddity near the top of xen.lds.S
to go away.

Jan
Stefano Stabellini April 26, 2024, 7:13 p.m. UTC | #3
On Fri, 26 Apr 2024, Jan Beulich wrote:
> On 26.04.2024 01:13, Stefano Stabellini wrote:
> > On Tue, 16 Apr 2024, Edgar E. Iglesias wrote:
> >> From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
> >>
> >> Use the generic xen/linkage.h macros when and add missing
> >                                         ^ when what?
> > 
> >> code symbol annotations.
> >>
> >> Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
> > 
> > I am looking at the implementation of FUNC and as far as I can tell
> > there is no change compared to ENTRY. So from that point of view we are
> > good. I wonder if we should keep using "ENTRY" because it is nice to
> > mark explicitely the entry points as such but at the same time I am also
> > OK with this. I'll let the other ARM maintainers decide.
> 
> Just to mention it: ENTRY should go away (and hence why PPC and RISC-V had
> it dropped already, while x86 has patches pending to reduce its scope
> enough), not the least to finally allow the oddity near the top of xen.lds.S
> to go away.

I didn't realize that. OK, understood.
diff mbox series

Patch

diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
index f963c923bb..af9a592cae 100644
--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -289,21 +289,25 @@ 
         b       do_bad_mode
         .endm
 
-hyp_sync_invalid:
+FUNC_LOCAL(hyp_sync_invalid)
         entry   hyp=1
         invalid BAD_SYNC
+END(hyp_sync_invalid)
 
-hyp_irq_invalid:
+FUNC_LOCAL(hyp_irq_invalid)
         entry   hyp=1
         invalid BAD_IRQ
+END(hyp_irq_invalid)
 
-hyp_fiq_invalid:
+FUNC_LOCAL(hyp_fiq_invalid)
         entry   hyp=1
         invalid BAD_FIQ
+END(hyp_fiq_invalid)
 
-hyp_error_invalid:
+FUNC_LOCAL(hyp_error_invalid)
         entry   hyp=1
         invalid BAD_ERROR
+END(hyp_error_invalid)
 
 /*
  * SError received while running in the hypervisor mode.
@@ -313,11 +317,12 @@  hyp_error_invalid:
  * simplicity, as SError should be rare and potentially fatal,
  * all interrupts are kept masked.
  */
-hyp_error:
+FUNC_LOCAL(hyp_error)
         entry   hyp=1
         mov     x0, sp
         bl      do_trap_hyp_serror
         exit    hyp=1
+END(hyp_error)
 
 /*
  * Synchronous exception received while running in the hypervisor mode.
@@ -327,7 +332,7 @@  hyp_error:
  * some of them. So we want to inherit the state from the interrupted
  * context.
  */
-hyp_sync:
+FUNC_LOCAL(hyp_sync)
         entry   hyp=1
 
         /* Inherit interrupts */
@@ -338,6 +343,7 @@  hyp_sync:
         mov     x0, sp
         bl      do_trap_hyp_sync
         exit    hyp=1
+END(hyp_sync)
 
 /*
  * IRQ received while running in the hypervisor mode.
@@ -352,7 +358,7 @@  hyp_sync:
  * would require some rework in some paths (e.g. panic, livepatch) to
  * ensure the ordering is enforced everywhere.
  */
-hyp_irq:
+FUNC_LOCAL(hyp_irq)
         entry   hyp=1
 
         /* Inherit D, A, F interrupts and keep I masked */
@@ -365,8 +371,9 @@  hyp_irq:
         mov     x0, sp
         bl      do_trap_irq
         exit    hyp=1
+END(hyp_irq)
 
-guest_sync:
+FUNC_LOCAL(guest_sync)
         /*
          * Save x0, x1 in advance
          */
@@ -413,8 +420,9 @@  fastpath_out_workaround:
         mov     x1, xzr
         eret
         sb
+END(guest_sync)
 
-wa2_ssbd:
+FUNC_LOCAL(wa2_ssbd)
 #ifdef CONFIG_ARM_SSBD
 alternative_cb arm_enable_wa2_handling
         b       wa2_end
@@ -450,42 +458,55 @@  wa2_end:
         mov     x0, xzr
         eret
         sb
-guest_sync_slowpath:
+END(wa2_ssbd)
+
+FUNC_LOCAL(guest_sync_slowpath)
         /*
          * x0/x1 may have been scratch by the fast path above, so avoid
          * to save them.
          */
         guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_sync, save_x0_x1=0
+END(guest_sync_slowpath)
 
-guest_irq:
+FUNC_LOCAL(guest_irq)
         guest_vector compat=0, iflags=IFLAGS__A__, trap=irq
+END(guest_irq)
 
-guest_fiq_invalid:
+FUNC_LOCAL(guest_fiq_invalid)
         entry   hyp=0, compat=0
         invalid BAD_FIQ
+END(guest_fiq_invalid)
 
-guest_error:
+FUNC_LOCAL(guest_error)
         guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_serror
+END(guest_error)
 
-guest_sync_compat:
+FUNC_LOCAL(guest_sync_compat)
         guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_sync
+END(guest_sync_compat)
 
-guest_irq_compat:
+FUNC_LOCAL(guest_irq_compat)
         guest_vector compat=1, iflags=IFLAGS__A__, trap=irq
+END(guest_irq_compat)
 
-guest_fiq_invalid_compat:
+FUNC_LOCAL(guest_fiq_invalid_compat)
         entry   hyp=0, compat=1
         invalid BAD_FIQ
+END(guest_fiq_invalid_compat)
 
-guest_error_compat:
+FUNC_LOCAL(guest_error_compat)
         guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_serror
+END(guest_error_compat)
 
-ENTRY(return_to_new_vcpu32)
+FUNC(return_to_new_vcpu32)
         exit    hyp=0, compat=1
-ENTRY(return_to_new_vcpu64)
+END(return_to_new_vcpu32)
+
+FUNC(return_to_new_vcpu64)
         exit    hyp=0, compat=0
+END(return_to_new_vcpu64)
 
-return_from_trap:
+FUNC_LOCAL(return_from_trap)
         msr     daifset, #IFLAGS___I_ /* Mask interrupts */
 
         ldr     x21, [sp, #UREGS_PC]            /* load ELR */
@@ -524,6 +545,7 @@  return_from_trap:
 
         eret
         sb
+END(return_from_trap)
 
 /*
  * Consume pending SError generated by the guest if any.
@@ -536,7 +558,7 @@  return_from_trap:
  * it. So the function will unmask SError exception for a small window and
  * then mask it again.
  */
-check_pending_guest_serror:
+FUNC_LOCAL(check_pending_guest_serror)
         /*
          * Save elr_el2 to check whether the pending SError exception takes
          * place while we are doing this sync exception.
@@ -586,7 +608,7 @@  abort_guest_exit_end:
         cset    x19, ne
 
         ret
-ENDPROC(check_pending_guest_serror)
+END(check_pending_guest_serror)
 
 /*
  * Exception vectors.
@@ -597,7 +619,7 @@  ENDPROC(check_pending_guest_serror)
         .endm
 
         .align  11
-ENTRY(hyp_traps_vector)
+FUNC(hyp_traps_vector)
         ventry  hyp_sync_invalid            /* Synchronous EL2t */
         ventry  hyp_irq_invalid             /* IRQ EL2t */
         ventry  hyp_fiq_invalid             /* FIQ EL2t */
@@ -617,6 +639,7 @@  ENTRY(hyp_traps_vector)
         ventry  guest_irq_compat            /* IRQ 32-bit EL0/EL1 */
         ventry  guest_fiq_invalid_compat    /* FIQ 32-bit EL0/EL1 */
         ventry  guest_error_compat          /* Error 32-bit EL0/EL1 */
+END(hyp_traps_vector)
 
 /*
  * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
@@ -626,7 +649,7 @@  ENTRY(hyp_traps_vector)
  *
  * Returns prev in x0
  */
-ENTRY(__context_switch)
+FUNC(__context_switch)
         add     x8, x0, #VCPU_arch_saved_context
         mov     x9, sp
         stp     x19, x20, [x8], #16         /* store callee-saved registers */
@@ -647,6 +670,7 @@  ENTRY(__context_switch)
         ldr     lr, [x8]
         mov     sp, x9
         ret
+END(__context_switch)
 
 /*
  * Local variables: