diff mbox series

[v7,07/11] Arm32: use new-style entry annotations for library code

Message ID 658f0a05-742e-44ed-bf3f-bd6bb8c694a7@suse.com (mailing list archive)
State New
Headers show
Series (mostly) x86+Arm32: add/convert entry point annotations | expand

Commit Message

Jan Beulich Oct. 1, 2024, 3:16 p.m. UTC
No functional change, albeit all globals now become hidden, and aliasing
symbols (__aeabi_{u,}idiv) as well as __memzero lose their function-ness
and size.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
If the function-ness is important, some new construct would need
inventing. Not setting size for the aliases may even be desirable, as
I'm uncertain whether it is really legal in ELF that two entities
overlap in space.

I fear I'm breaking __memzero(), as I don't understand the purpose of
the ".word 0" next to where the FUNC_LOCAL() appears.
---
v7: New.
diff mbox series

Patch

--- a/xen/arch/arm/arm32/lib/findbit.S
+++ b/xen/arch/arm/arm32/lib/findbit.S
@@ -20,7 +20,7 @@ 
  * Purpose  : Find a 'zero' bit
  * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
  */
-ENTRY(_find_first_zero_bit_le)
+FUNC(_find_first_zero_bit_le)
 		teq	r1, #0	
 		beq	3f
 		mov	r2, #0
@@ -35,13 +35,13 @@  ENTRY(_find_first_zero_bit_le)
 		blo	1b
 3:		mov	r0, r1			@ no free bits
 		mov	pc, lr
-ENDPROC(_find_first_zero_bit_le)
+END(_find_first_zero_bit_le)
 
 /*
  * Purpose  : Find next 'zero' bit
  * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
  */
-ENTRY(_find_next_zero_bit_le)
+FUNC(_find_next_zero_bit_le)
 		cmp	r1, r2
 		bls	3b
 		ands	ip, r2, #7
@@ -55,13 +55,13 @@  ENTRY(_find_next_zero_bit_le)
 		orr	r2, r2, #7		@ if zero, then no bits here
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
-ENDPROC(_find_next_zero_bit_le)
+END(_find_next_zero_bit_le)
 
 /*
  * Purpose  : Find a 'one' bit
  * Prototype: int find_first_bit(const unsigned long *addr, unsigned int maxbit);
  */
-ENTRY(_find_first_bit_le)
+FUNC(_find_first_bit_le)
 		teq	r1, #0	
 		beq	3f
 		mov	r2, #0
@@ -76,13 +76,13 @@  ENTRY(_find_first_bit_le)
 		blo	1b
 3:		mov	r0, r1			@ no free bits
 		mov	pc, lr
-ENDPROC(_find_first_bit_le)
+END(_find_first_bit_le)
 
 /*
  * Purpose  : Find next 'one' bit
  * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset)
  */
-ENTRY(_find_next_bit_le)
+FUNC(_find_next_bit_le)
 		cmp	r1, r2
 		bls	3b
 		ands	ip, r2, #7
@@ -95,11 +95,11 @@  ENTRY(_find_next_bit_le)
 		orr	r2, r2, #7		@ if zero, then no bits here
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
-ENDPROC(_find_next_bit_le)
+END(_find_next_bit_le)
 
 #ifdef __ARMEB__
 
-ENTRY(_find_first_zero_bit_be)
+FUNC(_find_first_zero_bit_be)
 		teq	r1, #0
 		beq	3f
 		mov	r2, #0
@@ -114,9 +114,9 @@  ENTRY(_find_first_zero_bit_be)
 		blo	1b
 3:		mov	r0, r1			@ no free bits
 		mov	pc, lr
-ENDPROC(_find_first_zero_bit_be)
+END(_find_first_zero_bit_be)
 
-ENTRY(_find_next_zero_bit_be)
+FUNC(_find_next_zero_bit_be)
 		cmp	r1, r2
 		bls	3b
 		ands	ip, r2, #7
@@ -131,9 +131,9 @@  ENTRY(_find_next_zero_bit_be)
 		orr	r2, r2, #7		@ if zero, then no bits here
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
-ENDPROC(_find_next_zero_bit_be)
+END(_find_next_zero_bit_be)
 
-ENTRY(_find_first_bit_be)
+FUNC(_find_first_bit_be)
 		teq	r1, #0
 		beq	3f
 		mov	r2, #0
@@ -148,9 +148,9 @@  ENTRY(_find_first_bit_be)
 		blo	1b
 3:		mov	r0, r1			@ no free bits
 		mov	pc, lr
-ENDPROC(_find_first_bit_be)
+END(_find_first_bit_be)
 
-ENTRY(_find_next_bit_be)
+FUNC(_find_next_bit_be)
 		cmp	r1, r2
 		bls	3b
 		ands	ip, r2, #7
@@ -164,7 +164,7 @@  ENTRY(_find_next_bit_be)
 		orr	r2, r2, #7		@ if zero, then no bits here
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
-ENDPROC(_find_next_bit_be)
+END(_find_next_bit_be)
 
 #endif
 
--- a/xen/arch/arm/arm32/lib/lib1funcs.S
+++ b/xen/arch/arm/arm32/lib/lib1funcs.S
@@ -201,8 +201,8 @@  along with this program; see the file CO
 .endm
 
 
-ENTRY(__udivsi3)
-ENTRY(__aeabi_uidiv)
+FUNC(__udivsi3)
+LABEL(__aeabi_uidiv)
 UNWIND(.fnstart)
 
 	subs	r2, r1, #1
@@ -228,10 +228,9 @@  UNWIND(.fnstart)
 	mov	pc, lr
 
 UNWIND(.fnend)
-ENDPROC(__udivsi3)
-ENDPROC(__aeabi_uidiv)
+END(__udivsi3)
 
-ENTRY(__umodsi3)
+FUNC(__umodsi3)
 UNWIND(.fnstart)
 
 	subs	r2, r1, #1			@ compare divisor with 1
@@ -247,10 +246,10 @@  UNWIND(.fnstart)
 	mov	pc, lr
 
 UNWIND(.fnend)
-ENDPROC(__umodsi3)
+END(__umodsi3)
 
-ENTRY(__divsi3)
-ENTRY(__aeabi_idiv)
+FUNC(__divsi3)
+LABEL(__aeabi_idiv)
 UNWIND(.fnstart)
 
 	cmp	r1, #0
@@ -289,10 +288,9 @@  UNWIND(.fnstart)
 	mov	pc, lr
 
 UNWIND(.fnend)
-ENDPROC(__divsi3)
-ENDPROC(__aeabi_idiv)
+END(__divsi3)
 
-ENTRY(__modsi3)
+FUNC(__modsi3)
 UNWIND(.fnstart)
 
 	cmp	r1, #0
@@ -314,11 +312,11 @@  UNWIND(.fnstart)
 	mov	pc, lr
 
 UNWIND(.fnend)
-ENDPROC(__modsi3)
+END(__modsi3)
 
 #ifdef CONFIG_AEABI
 
-ENTRY(__aeabi_uidivmod)
+FUNC(__aeabi_uidivmod)
 UNWIND(.fnstart)
 UNWIND(.save {r0, r1, ip, lr}	)
 
@@ -330,9 +328,9 @@  UNWIND(.save {r0, r1, ip, lr}	)
 	mov	pc, lr
 
 UNWIND(.fnend)
-ENDPROC(__aeabi_uidivmod)
+END(__aeabi_uidivmod)
 
-ENTRY(__aeabi_idivmod)
+FUNC(__aeabi_idivmod)
 UNWIND(.fnstart)
 UNWIND(.save {r0, r1, ip, lr}	)
 	stmfd	sp!, {r0, r1, ip, lr}
@@ -343,9 +341,9 @@  UNWIND(.save {r0, r1, ip, lr}	)
 	mov	pc, lr
 
 UNWIND(.fnend)
-ENDPROC(__aeabi_idivmod)
+END(__aeabi_idivmod)
 
-ENTRY(__aeabi_uldivmod)
+FUNC(__aeabi_uldivmod)
 UNWIND(.fnstart)
 UNWIND(.save {lr}	)
 	sub sp, sp, #8
@@ -357,9 +355,9 @@  UNWIND(.save {lr}	)
 	mov	pc, lr
 
 UNWIND(.fnend)
-ENDPROC(__aeabi_uldivmod)
+END(__aeabi_uldivmod)
 
-ENTRY(__aeabi_ldivmod)
+FUNC(__aeabi_ldivmod)
 UNWIND(.fnstart)
 UNWIND(.save {lr}	)
 	sub sp, sp, #16
@@ -371,10 +369,10 @@  UNWIND(.save {lr}	)
 	mov	pc, lr
 	
 UNWIND(.fnend)
-ENDPROC(__aeabi_ldivmod)
+END(__aeabi_ldivmod)
 #endif
 
-Ldiv0:
+FUNC_LOCAL(Ldiv0)
 UNWIND(.fnstart)
 UNWIND(.pad #4)
 UNWIND(.save {lr})
@@ -383,4 +381,4 @@  UNWIND(.save {lr})
 	mov	r0, #0			@ About as wrong as it could be.
 	ldr	pc, [sp], #8
 UNWIND(.fnend)
-ENDPROC(Ldiv0)
+END(Ldiv0)
--- a/xen/arch/arm/arm32/lib/lshrdi3.S
+++ b/xen/arch/arm/arm32/lib/lshrdi3.S
@@ -34,8 +34,8 @@  along with this program; see the file CO
 #define ah r1
 #endif
 
-ENTRY(__lshrdi3)
-ENTRY(__aeabi_llsr)
+FUNC(__lshrdi3)
+LABEL(__aeabi_llsr)
 
 	subs	r3, r2, #32
 	rsb	ip, r2, #32
@@ -47,5 +47,4 @@  ENTRY(__aeabi_llsr)
 	mov	ah, ah, lsr r2
 	mov	pc, lr
 
-ENDPROC(__lshrdi3)
-ENDPROC(__aeabi_llsr)
+END(__lshrdi3)
--- a/xen/arch/arm/arm32/lib/memchr.S
+++ b/xen/arch/arm/arm32/lib/memchr.S
@@ -12,8 +12,7 @@ 
 #include "assembler.h"
 
 	.text
-	.align	5
-ENTRY(memchr)
+FUNC(memchr, 32)
 	and	r1, r1, #0xff
 1:	subs	r2, r2, #1
 	bmi	2f
@@ -23,4 +22,4 @@  ENTRY(memchr)
 	sub	r0, r0, #1
 2:	movne	r0, #0
 	mov	pc, lr
-ENDPROC(memchr)
+END(memchr)
--- a/xen/arch/arm/arm32/lib/memcpy.S
+++ b/xen/arch/arm/arm32/lib/memcpy.S
@@ -54,8 +54,8 @@ 
 
 /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
 
-ENTRY(memcpy)
+FUNC(memcpy)
 
 #include "copy_template.S"
 
-ENDPROC(memcpy)
+END(memcpy)
--- a/xen/arch/arm/arm32/lib/memmove.S
+++ b/xen/arch/arm/arm32/lib/memmove.S
@@ -24,7 +24,7 @@ 
  * occurring in the opposite direction.
  */
 
-ENTRY(memmove)
+FUNC(memmove)
 
 		subs	ip, r0, r1
 		cmphi	r2, ip
@@ -194,4 +194,4 @@  ENTRY(memmove)
 
 18:		backward_copy_shift	push=24	pull=8
 
-ENDPROC(memmove)
+END(memmove)
--- a/xen/arch/arm/arm32/lib/memset.S
+++ b/xen/arch/arm/arm32/lib/memset.S
@@ -12,9 +12,8 @@ 
 #include "assembler.h"
 
 	.text
-	.align	5
 
-ENTRY(memset)
+FUNC(memset, 32)
 	and	r1, r1, #0xff
 	ands	r3, r0, #3		@ 1 unaligned?
 	mov	ip, r0			@ preserve r0 as return value
@@ -120,4 +119,4 @@  ENTRY(memset)
 	strb	r1, [ip], #1		@ 1
 	add	r2, r2, r3		@ 1 (r2 = r2 - (4 - r3))
 	b	1b
-ENDPROC(memset)
+END(memset)
--- a/xen/arch/arm/arm32/lib/memzero.S
+++ b/xen/arch/arm/arm32/lib/memzero.S
@@ -10,7 +10,7 @@ 
 #include "assembler.h"
 
 	.text
-	.align	5
+FUNC_LOCAL(_memzero_, 32)
 	.word	0
 /*
  * Align the pointer in r0.  r3 contains the number of bytes that we are
@@ -29,7 +29,7 @@ 
  * memzero again.
  */
 
-ENTRY(__memzero)
+LABEL(__memzero)
 	mov	r2, #0			@ 1
 	ands	r3, r0, #3		@ 1 unaligned?
 	bne	1b			@ 1
@@ -121,4 +121,4 @@  ENTRY(__memzero)
 	tst	r1, #1			@ 1 a byte left over
 	strneb	r2, [r0], #1		@ 1
 	mov	pc, lr			@ 1
-ENDPROC(__memzero)
+END(_memzero_)
--- a/xen/arch/arm/arm32/lib/strchr.S
+++ b/xen/arch/arm/arm32/lib/strchr.S
@@ -14,8 +14,7 @@ 
 #include "assembler.h"
 
 		.text
-		.align	5
-ENTRY(strchr)
+FUNC(strchr, 32)
 		and	r1, r1, #0xff
 1:		ldrb	r2, [r0], #1
 		teq	r2, r1
@@ -25,4 +24,4 @@  ENTRY(strchr)
 		movne	r0, #0
 		subeq	r0, r0, #1
 		mov	pc, lr
-ENDPROC(strchr)
+END(strchr)
--- a/xen/arch/arm/arm32/lib/strrchr.S
+++ b/xen/arch/arm/arm32/lib/strrchr.S
@@ -12,8 +12,7 @@ 
 #include "assembler.h"
 
 		.text
-		.align	5
-ENTRY(strrchr)
+FUNC(strrchr, 32)
 		and	r1, r1, #0xff
 		mov	r3, #0
 1:		ldrb	r2, [r0], #1
@@ -23,4 +22,4 @@  ENTRY(strrchr)
 		bne	1b
 		mov	r0, r3
 		mov	pc, lr
-ENDPROC(strrchr)
+END(strrchr)