diff mbox series

[2/3] arm64: lib: Use modern annotations for assembly functions

Message ID 20200106195818.56351-3-broonie@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: Conversions to modern assembly annotations | expand

Commit Message

Mark Brown Jan. 6, 2020, 7:58 p.m. UTC
In an effort to clarify and simplify the annotation of assembly functions
in the kernel new macros have been introduced. These replace ENTRY and
ENDPROC and also add a new annotation for static functions which previously
had no ENTRY equivalent. Update the annotations in the library code to the
new macros.

Signed-off-by: Mark Brown <broonie@kernel.org>
---
 arch/arm64/lib/clear_page.S     |  4 ++--
 arch/arm64/lib/clear_user.S     |  4 ++--
 arch/arm64/lib/copy_from_user.S |  4 ++--
 arch/arm64/lib/copy_in_user.S   |  4 ++--
 arch/arm64/lib/copy_page.S      |  4 ++--
 arch/arm64/lib/copy_to_user.S   |  4 ++--
 arch/arm64/lib/crc32.S          |  8 ++++----
 arch/arm64/lib/memchr.S         |  4 ++--
 arch/arm64/lib/memcmp.S         |  4 ++--
 arch/arm64/lib/memcpy.S         |  8 ++++----
 arch/arm64/lib/memmove.S        |  8 ++++----
 arch/arm64/lib/memset.S         |  8 ++++----
 arch/arm64/lib/strchr.S         |  4 ++--
 arch/arm64/lib/strcmp.S         |  4 ++--
 arch/arm64/lib/strlen.S         |  4 ++--
 arch/arm64/lib/strncmp.S        |  4 ++--
 arch/arm64/lib/strnlen.S        |  4 ++--
 arch/arm64/lib/strrchr.S        |  4 ++--
 arch/arm64/lib/tishift.S        | 12 ++++++------
 19 files changed, 50 insertions(+), 50 deletions(-)

Comments

Will Deacon Jan. 7, 2020, 2:44 p.m. UTC | #1
Hi Mark,

[+Jiri -- couple of questions below]

On Mon, Jan 06, 2020 at 07:58:17PM +0000, Mark Brown wrote:
> In an effort to clarify and simplify the annotation of assembly functions
> in the kernel new macros have been introduced. These replace ENTRY and
> ENDPROC and also add a new annotation for static functions which previously
> had no ENTRY equivalent. Update the annotations in the library code to the
> new macros.
> 
> Signed-off-by: Mark Brown <broonie@kernel.org>
> ---
>  arch/arm64/lib/clear_page.S     |  4 ++--
>  arch/arm64/lib/clear_user.S     |  4 ++--
>  arch/arm64/lib/copy_from_user.S |  4 ++--
>  arch/arm64/lib/copy_in_user.S   |  4 ++--
>  arch/arm64/lib/copy_page.S      |  4 ++--
>  arch/arm64/lib/copy_to_user.S   |  4 ++--
>  arch/arm64/lib/crc32.S          |  8 ++++----
>  arch/arm64/lib/memchr.S         |  4 ++--
>  arch/arm64/lib/memcmp.S         |  4 ++--
>  arch/arm64/lib/memcpy.S         |  8 ++++----
>  arch/arm64/lib/memmove.S        |  8 ++++----
>  arch/arm64/lib/memset.S         |  8 ++++----
>  arch/arm64/lib/strchr.S         |  4 ++--
>  arch/arm64/lib/strcmp.S         |  4 ++--
>  arch/arm64/lib/strlen.S         |  4 ++--
>  arch/arm64/lib/strncmp.S        |  4 ++--
>  arch/arm64/lib/strnlen.S        |  4 ++--
>  arch/arm64/lib/strrchr.S        |  4 ++--
>  arch/arm64/lib/tishift.S        | 12 ++++++------
>  19 files changed, 50 insertions(+), 50 deletions(-)
> 
> diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S
> index 78a9ef66288a..073acbf02a7c 100644
> --- a/arch/arm64/lib/clear_page.S
> +++ b/arch/arm64/lib/clear_page.S
> @@ -14,7 +14,7 @@
>   * Parameters:
>   *	x0 - dest
>   */
> -ENTRY(clear_page)
> +SYM_FUNC_START(clear_page)
>  	mrs	x1, dczid_el0
>  	and	w1, w1, #0xf
>  	mov	x2, #4

Since this doesn't change behaviour, I think the patch is fine, however
on reading Documentation/asm-annotations.rst it's not completely clear to
me when SYM_FUNC_START() should be used. In this case, for example, we are
*not* pushing a stackframe and that would appear to be at odds with the
documentation.

Jiri -- is it ok to omit the stack frame for leaf functions annotated with
SYM_FUNC_START? I'm guessing it should be, since the link register isn't
going to be clobbered. Could we update the documentation to reflect that?

> diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S
> index e6135f16649b..243e107e9896 100644
> --- a/arch/arm64/lib/crc32.S
> +++ b/arch/arm64/lib/crc32.S
> @@ -85,17 +85,17 @@ CPU_BE(	rev16		w3, w3		)
>  	.endm
>  
>  	.align		5
> -ENTRY(crc32_le)
> +SYM_FUNC_START(crc32_le)
>  alternative_if_not ARM64_HAS_CRC32
>  	b		crc32_le_base

Similar thing here -- I'm assuming we are ok to tail-call crc32_le_base()
from a function marked with SYM_FUNC_START like this?

>  alternative_else_nop_endif
>  	__crc32
> -ENDPROC(crc32_le)
> +SYM_FUNC_END(crc32_le)
>  
>  	.align		5
> -ENTRY(__crc32c_le)
> +SYM_FUNC_START(__crc32c_le)
>  alternative_if_not ARM64_HAS_CRC32
>  	b		__crc32c_le_base
>  alternative_else_nop_endif
>  	__crc32		c
> -ENDPROC(__crc32c_le)
> +SYM_FUNC_END(__crc32c_le)
> diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
> index 48a3ab636e4f..99910c5d5db7 100644
> --- a/arch/arm64/lib/memchr.S
> +++ b/arch/arm64/lib/memchr.S
> @@ -19,7 +19,7 @@
>   * Returns:
>   *	x0 - address of first occurrence of 'c' or 0
>   */
> -WEAK(memchr)
> +SYM_FUNC_START_PI_WEAK(memchr)
>  	and	w1, w1, #0xff
>  1:	subs	x2, x2, #1
>  	b.mi	2f
> @@ -30,5 +30,5 @@ WEAK(memchr)
>  	ret
>  2:	mov	x0, #0
>  	ret
> -ENDPIPROC(memchr)
> +SYM_FUNC_END_PI(memchr)
>  EXPORT_SYMBOL_NOKASAN(memchr)
> diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
> index b297bdaaf549..b889f312bdb3 100644
> --- a/arch/arm64/lib/memcmp.S
> +++ b/arch/arm64/lib/memcmp.S
> @@ -46,7 +46,7 @@ pos		.req	x11
>  limit_wd	.req	x12
>  mask		.req	x13
>  
> -WEAK(memcmp)
> +SYM_FUNC_START_PI_WEAK(memcmp)
>  	cbz	limit, .Lret0
>  	eor	tmp1, src1, src2
>  	tst	tmp1, #7
> @@ -243,5 +243,5 @@ CPU_LE( rev	data2, data2 )
>  .Lret0:
>  	mov	result, #0
>  	ret
> -ENDPIPROC(memcmp)
> +SYM_FUNC_END_PI(memcmp)
>  EXPORT_SYMBOL_NOKASAN(memcmp)
> diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
> index d79f48994dbb..9f382adfa88a 100644
> --- a/arch/arm64/lib/memcpy.S
> +++ b/arch/arm64/lib/memcpy.S
> @@ -57,11 +57,11 @@
>  	.endm
>  
>  	.weak memcpy

Hmm, any idea why we use .weak explicitly here? Maybe better off using
the proper macros now? (same applies to many of the other lib/ functions
you're touching)

Will
Mark Brown Jan. 7, 2020, 5:47 p.m. UTC | #2
On Tue, Jan 07, 2020 at 02:44:46PM +0000, Will Deacon wrote:
> On Mon, Jan 06, 2020 at 07:58:17PM +0000, Mark Brown wrote:

> > -ENTRY(clear_page)
> > +SYM_FUNC_START(clear_page)
> >  	mrs	x1, dczid_el0
> >  	and	w1, w1, #0xf
> >  	mov	x2, #4

> Since this doesn't change behaviour, I think the patch is fine, however
> on reading Documentation/asm-annotations.rst it's not completely clear to
> me when SYM_FUNC_START() should be used. In this case, for example, we are
> *not* pushing a stackframe and that would appear to be at odds with the
> documentation.

> Jiri -- is it ok to omit the stack frame for leaf functions annotated with
> SYM_FUNC_START? I'm guessing it should be, since the link register isn't
> going to be clobbered. Could we update the documentation to reflect that?

Yeah, the documentation isn't great on that.  I was going on the basis
of both trying to minimize changes to the generated output as part of
the bulk change and looking at it from the point of view of the caller -
if as in this case the caller thinks it's a regular C function it seems
sensible to annotate it as such.

> > --- a/arch/arm64/lib/memcpy.S
> > +++ b/arch/arm64/lib/memcpy.S
> > @@ -57,11 +57,11 @@
> >  	.endm
> >  
> >  	.weak memcpy

> Hmm, any idea why we use .weak explicitly here? Maybe better off using
> the proper macros now? (same applies to many of the other lib/ functions
> you're touching)

Nope, there's a whole bunch of stuff where what we're currently doing is
a bit interesting and I'm a bit worried that we might be relying on some
of it.  My theory here was to do the bulk of the changes as a 1:1
replacement so the generated output is as close as possible for any big
changes and then do anything more detailed that isn't actually *needed*
on top of that.  It's looking like there'll also be some stuff that
definitely changes the output going in as well, I was going to do those
as individual patches so that it's easier to find any breakages that get
introduced and so the big, repetitive changes don't have other stuff
mixed in.
Will Deacon Jan. 8, 2020, 12:29 p.m. UTC | #3
On Tue, Jan 07, 2020 at 05:47:41PM +0000, Mark Brown wrote:
> On Tue, Jan 07, 2020 at 02:44:46PM +0000, Will Deacon wrote:
> > On Mon, Jan 06, 2020 at 07:58:17PM +0000, Mark Brown wrote:
> 
> > > -ENTRY(clear_page)
> > > +SYM_FUNC_START(clear_page)
> > >  	mrs	x1, dczid_el0
> > >  	and	w1, w1, #0xf
> > >  	mov	x2, #4
> 
> > Since this doesn't change behaviour, I think the patch is fine, however
> > on reading Documentation/asm-annotations.rst it's not completely clear to
> > me when SYM_FUNC_START() should be used. In this case, for example, we are
> > *not* pushing a stackframe and that would appear to be at odds with the
> > documentation.
> 
> > Jiri -- is it ok to omit the stack frame for leaf functions annotated with
> > SYM_FUNC_START? I'm guessing it should be, since the link register isn't
> > going to be clobbered. Could we update the documentation to reflect that?
> 
> Yeah, the documentation isn't great on that.  I was going on the basis
> of both trying to minimize changes to the generated output as part of
> the bulk change and looking at it from the point of view of the caller -
> if as in this case the caller thinks it's a regular C function it seems
> sensible to annotate it as such.

Maybe a small tweak to the documentation as per below, indicating that the
stack stuff is just an x86-specific example?

Jiri?

diff --git a/Documentation/asm-annotations.rst b/Documentation/asm-annotations.rst
index f55c2bb74d00..32ea57483378 100644
--- a/Documentation/asm-annotations.rst
+++ b/Documentation/asm-annotations.rst
@@ -73,10 +73,11 @@ The new macros are prefixed with the ``SYM_`` prefix and can be divided into
 three main groups:
 
 1. ``SYM_FUNC_*`` -- to annotate C-like functions. This means functions with
-   standard C calling conventions, i.e. the stack contains a return address at
-   the predefined place and a return from the function can happen in a
-   standard way. When frame pointers are enabled, save/restore of frame
-   pointer shall happen at the start/end of a function, respectively, too.
+   standard C calling conventions. For example, on x86, this means that the
+   stack contains a return address at the predefined place and a return from
+   the function can happen in a standard way. When frame pointers are enabled,
+   save/restore of frame pointer shall happen at the start/end of a function,
+   respectively, too.
 
    Checking tools like ``objtool`` should ensure such marked functions conform
    to these rules. The tools can also easily annotate these functions with

> > > --- a/arch/arm64/lib/memcpy.S
> > > +++ b/arch/arm64/lib/memcpy.S
> > > @@ -57,11 +57,11 @@
> > >  	.endm
> > >  
> > >  	.weak memcpy
> 
> > Hmm, any idea why we use .weak explicitly here? Maybe better off using
> > the proper macros now? (same applies to many of the other lib/ functions
> > you're touching)
> 
> Nope, there's a whole bunch of stuff where what we're currently doing is
> a bit interesting and I'm a bit worried that we might be relying on some
> of it.  My theory here was to do the bulk of the changes as a 1:1
> replacement so the generated output is as close as possible for any big
> changes and then do anything more detailed that isn't actually *needed*
> on top of that.  It's looking like there'll also be some stuff that
> definitely changes the output going in as well, I was going to do those
> as individual patches so that it's easier to find any breakages that get
> introduced and so the big, repetitive changes don't have other stuff
> mixed in.

Fair enough, I'll queue these three with the minor naming change and you can
send extra stuff on top of that.

Cheers,

Will
Jiri Slaby Jan. 10, 2020, 4:56 p.m. UTC | #4
On 08. 01. 20, 13:29, Will Deacon wrote:
> On Tue, Jan 07, 2020 at 05:47:41PM +0000, Mark Brown wrote:
>> On Tue, Jan 07, 2020 at 02:44:46PM +0000, Will Deacon wrote:
>>> Jiri -- is it ok to omit the stack frame for leaf functions annotated with
>>> SYM_FUNC_START? I'm guessing it should be, since the link register isn't
>>> going to be clobbered. Could we update the documentation to reflect that?
>>
>> Yeah, the documentation isn't great on that.  I was going on the basis
>> of both trying to minimize changes to the generated output as part of
>> the bulk change and looking at it from the point of view of the caller -
>> if as in this case the caller thinks it's a regular C function it seems
>> sensible to annotate it as such.
> 
> Maybe a small tweak to the documentation as per below, indicating that the
> stack stuff is just an x86-specific example?
> 
> Jiri?

Yes, the text in the documentation was too x86-specific. Could you send
the below as a proper patch? Thanks.

> diff --git a/Documentation/asm-annotations.rst b/Documentation/asm-annotations.rst
> index f55c2bb74d00..32ea57483378 100644
> --- a/Documentation/asm-annotations.rst
> +++ b/Documentation/asm-annotations.rst
> @@ -73,10 +73,11 @@ The new macros are prefixed with the ``SYM_`` prefix and can be divided into
>  three main groups:
>  
>  1. ``SYM_FUNC_*`` -- to annotate C-like functions. This means functions with
> -   standard C calling conventions, i.e. the stack contains a return address at
> -   the predefined place and a return from the function can happen in a
> -   standard way. When frame pointers are enabled, save/restore of frame
> -   pointer shall happen at the start/end of a function, respectively, too.
> +   standard C calling conventions. For example, on x86, this means that the
> +   stack contains a return address at the predefined place and a return from
> +   the function can happen in a standard way. When frame pointers are enabled,
> +   save/restore of frame pointer shall happen at the start/end of a function,
> +   respectively, too.
>  
>     Checking tools like ``objtool`` should ensure such marked functions conform
>     to these rules. The tools can also easily annotate these functions with

thanks,
Will Deacon Jan. 15, 2020, 6:43 p.m. UTC | #5
On Fri, Jan 10, 2020 at 05:56:22PM +0100, Jiri Slaby wrote:
> On 08. 01. 20, 13:29, Will Deacon wrote:
> > On Tue, Jan 07, 2020 at 05:47:41PM +0000, Mark Brown wrote:
> >> On Tue, Jan 07, 2020 at 02:44:46PM +0000, Will Deacon wrote:
> >>> Jiri -- is it ok to omit the stack frame for leaf functions annotated with
> >>> SYM_FUNC_START? I'm guessing it should be, since the link register isn't
> >>> going to be clobbered. Could we update the documentation to reflect that?
> >>
> >> Yeah, the documentation isn't great on that.  I was going on the basis
> >> of both trying to minimize changes to the generated output as part of
> >> the bulk change and looking at it from the point of view of the caller -
> >> if as in this case the caller thinks it's a regular C function it seems
> >> sensible to annotate it as such.
> > 
> > Maybe a small tweak to the documentation as per below, indicating that the
> > stack stuff is just an x86-specific example?
> > 
> > Jiri?
> 
> Yes, the text in the documentation was too x86-specific. Could you send
> the below as a proper patch? Thanks.

Sorry it took so long:

https://lkml.kernel.org/r/20200115184305.1187-1-will@kernel.org

Will
diff mbox series

Patch

diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S
index 78a9ef66288a..073acbf02a7c 100644
--- a/arch/arm64/lib/clear_page.S
+++ b/arch/arm64/lib/clear_page.S
@@ -14,7 +14,7 @@ 
  * Parameters:
  *	x0 - dest
  */
-ENTRY(clear_page)
+SYM_FUNC_START(clear_page)
 	mrs	x1, dczid_el0
 	and	w1, w1, #0xf
 	mov	x2, #4
@@ -25,5 +25,5 @@  ENTRY(clear_page)
 	tst	x0, #(PAGE_SIZE - 1)
 	b.ne	1b
 	ret
-ENDPROC(clear_page)
+SYM_FUNC_END(clear_page)
 EXPORT_SYMBOL(clear_page)
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index aeafc03e961a..48a3a26eff66 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -19,7 +19,7 @@ 
  *
  * Alignment fixed up by hardware.
  */
-ENTRY(__arch_clear_user)
+SYM_FUNC_START(__arch_clear_user)
 	mov	x2, x1			// save the size for fixup return
 	subs	x1, x1, #8
 	b.mi	2f
@@ -40,7 +40,7 @@  uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
 5:	mov	x0, #0
 	ret
-ENDPROC(__arch_clear_user)
+SYM_FUNC_END(__arch_clear_user)
 EXPORT_SYMBOL(__arch_clear_user)
 
 	.section .fixup,"ax"
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index ebb3c06cbb5d..8e25e89ad01f 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -53,12 +53,12 @@ 
 	.endm
 
 end	.req	x5
-ENTRY(__arch_copy_from_user)
+SYM_FUNC_START(__arch_copy_from_user)
 	add	end, x0, x2
 #include "copy_template.S"
 	mov	x0, #0				// Nothing to copy
 	ret
-ENDPROC(__arch_copy_from_user)
+SYM_FUNC_END(__arch_copy_from_user)
 EXPORT_SYMBOL(__arch_copy_from_user)
 
 	.section .fixup,"ax"
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 3d8153a1ebce..667139013ed1 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -55,12 +55,12 @@ 
 
 end	.req	x5
 
-ENTRY(__arch_copy_in_user)
+SYM_FUNC_START(__arch_copy_in_user)
 	add	end, x0, x2
 #include "copy_template.S"
 	mov	x0, #0
 	ret
-ENDPROC(__arch_copy_in_user)
+SYM_FUNC_END(__arch_copy_in_user)
 EXPORT_SYMBOL(__arch_copy_in_user)
 
 	.section .fixup,"ax"
diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S
index bbb8562396af..e125a84eb400 100644
--- a/arch/arm64/lib/copy_page.S
+++ b/arch/arm64/lib/copy_page.S
@@ -17,7 +17,7 @@ 
  *	x0 - dest
  *	x1 - src
  */
-ENTRY(copy_page)
+SYM_FUNC_START(copy_page)
 alternative_if ARM64_HAS_NO_HW_PREFETCH
 	// Prefetch three cache lines ahead.
 	prfm	pldl1strm, [x1, #128]
@@ -75,5 +75,5 @@  alternative_else_nop_endif
 	stnp	x16, x17, [x0, #112]
 
 	ret
-ENDPROC(copy_page)
+SYM_FUNC_END(copy_page)
 EXPORT_SYMBOL(copy_page)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 357eae2c18eb..1a104d0089f3 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -52,12 +52,12 @@ 
 	.endm
 
 end	.req	x5
-ENTRY(__arch_copy_to_user)
+SYM_FUNC_START(__arch_copy_to_user)
 	add	end, x0, x2
 #include "copy_template.S"
 	mov	x0, #0
 	ret
-ENDPROC(__arch_copy_to_user)
+SYM_FUNC_END(__arch_copy_to_user)
 EXPORT_SYMBOL(__arch_copy_to_user)
 
 	.section .fixup,"ax"
diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S
index e6135f16649b..243e107e9896 100644
--- a/arch/arm64/lib/crc32.S
+++ b/arch/arm64/lib/crc32.S
@@ -85,17 +85,17 @@  CPU_BE(	rev16		w3, w3		)
 	.endm
 
 	.align		5
-ENTRY(crc32_le)
+SYM_FUNC_START(crc32_le)
 alternative_if_not ARM64_HAS_CRC32
 	b		crc32_le_base
 alternative_else_nop_endif
 	__crc32
-ENDPROC(crc32_le)
+SYM_FUNC_END(crc32_le)
 
 	.align		5
-ENTRY(__crc32c_le)
+SYM_FUNC_START(__crc32c_le)
 alternative_if_not ARM64_HAS_CRC32
 	b		__crc32c_le_base
 alternative_else_nop_endif
 	__crc32		c
-ENDPROC(__crc32c_le)
+SYM_FUNC_END(__crc32c_le)
diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
index 48a3ab636e4f..99910c5d5db7 100644
--- a/arch/arm64/lib/memchr.S
+++ b/arch/arm64/lib/memchr.S
@@ -19,7 +19,7 @@ 
  * Returns:
  *	x0 - address of first occurrence of 'c' or 0
  */
-WEAK(memchr)
+SYM_FUNC_START_PI_WEAK(memchr)
 	and	w1, w1, #0xff
 1:	subs	x2, x2, #1
 	b.mi	2f
@@ -30,5 +30,5 @@  WEAK(memchr)
 	ret
 2:	mov	x0, #0
 	ret
-ENDPIPROC(memchr)
+SYM_FUNC_END_PI(memchr)
 EXPORT_SYMBOL_NOKASAN(memchr)
diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
index b297bdaaf549..b889f312bdb3 100644
--- a/arch/arm64/lib/memcmp.S
+++ b/arch/arm64/lib/memcmp.S
@@ -46,7 +46,7 @@  pos		.req	x11
 limit_wd	.req	x12
 mask		.req	x13
 
-WEAK(memcmp)
+SYM_FUNC_START_PI_WEAK(memcmp)
 	cbz	limit, .Lret0
 	eor	tmp1, src1, src2
 	tst	tmp1, #7
@@ -243,5 +243,5 @@  CPU_LE( rev	data2, data2 )
 .Lret0:
 	mov	result, #0
 	ret
-ENDPIPROC(memcmp)
+SYM_FUNC_END_PI(memcmp)
 EXPORT_SYMBOL_NOKASAN(memcmp)
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
index d79f48994dbb..9f382adfa88a 100644
--- a/arch/arm64/lib/memcpy.S
+++ b/arch/arm64/lib/memcpy.S
@@ -57,11 +57,11 @@ 
 	.endm
 
 	.weak memcpy
-ENTRY(__memcpy)
-ENTRY(memcpy)
+SYM_FUNC_START_ALIAS(__memcpy)
+SYM_FUNC_START_PI(memcpy)
 #include "copy_template.S"
 	ret
-ENDPIPROC(memcpy)
+SYM_FUNC_END_PI(memcpy)
 EXPORT_SYMBOL(memcpy)
-ENDPROC(__memcpy)
+SYM_FUNC_END_ALIAS(__memcpy)
 EXPORT_SYMBOL(__memcpy)
diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
index 784775136480..02cda2e33bde 100644
--- a/arch/arm64/lib/memmove.S
+++ b/arch/arm64/lib/memmove.S
@@ -46,8 +46,8 @@  D_l	.req	x13
 D_h	.req	x14
 
 	.weak memmove
-ENTRY(__memmove)
-ENTRY(memmove)
+SYM_FUNC_START_ALIAS(__memmove)
+SYM_FUNC_START_PI(memmove)
 	cmp	dstin, src
 	b.lo	__memcpy
 	add	tmp1, src, count
@@ -184,7 +184,7 @@  ENTRY(memmove)
 	tst	count, #0x3f
 	b.ne	.Ltail63
 	ret
-ENDPIPROC(memmove)
+SYM_FUNC_END_PI(memmove)
 EXPORT_SYMBOL(memmove)
-ENDPROC(__memmove)
+SYM_FUNC_END_ALIAS(__memmove)
 EXPORT_SYMBOL(__memmove)
diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S
index 9fb97e6bc560..77c3c7ba0084 100644
--- a/arch/arm64/lib/memset.S
+++ b/arch/arm64/lib/memset.S
@@ -43,8 +43,8 @@  tmp3w		.req	w9
 tmp3		.req	x9
 
 	.weak memset
-ENTRY(__memset)
-ENTRY(memset)
+SYM_FUNC_START_ALIAS(__memset)
+SYM_FUNC_START_PI(memset)
 	mov	dst, dstin	/* Preserve return value.  */
 	and	A_lw, val, #255
 	orr	A_lw, A_lw, A_lw, lsl #8
@@ -203,7 +203,7 @@  ENTRY(memset)
 	ands	count, count, zva_bits_x
 	b.ne	.Ltail_maybe_long
 	ret
-ENDPIPROC(memset)
+SYM_FUNC_END_PI(memset)
 EXPORT_SYMBOL(memset)
-ENDPROC(__memset)
+SYM_FUNC_END_ALIAS(__memset)
 EXPORT_SYMBOL(__memset)
diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S
index ca3ec18171a4..1f47eae3b0d6 100644
--- a/arch/arm64/lib/strchr.S
+++ b/arch/arm64/lib/strchr.S
@@ -18,7 +18,7 @@ 
  * Returns:
  *	x0 - address of first occurrence of 'c' or 0
  */
-WEAK(strchr)
+SYM_FUNC_START_WEAK(strchr)
 	and	w1, w1, #0xff
 1:	ldrb	w2, [x0], #1
 	cmp	w2, w1
@@ -28,5 +28,5 @@  WEAK(strchr)
 	cmp	w2, w1
 	csel	x0, x0, xzr, eq
 	ret
-ENDPROC(strchr)
+SYM_FUNC_END(strchr)
 EXPORT_SYMBOL_NOKASAN(strchr)
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index e9aefbe0b740..5f9bb9094d1f 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -48,7 +48,7 @@  tmp3		.req	x9
 zeroones	.req	x10
 pos		.req	x11
 
-WEAK(strcmp)
+SYM_FUNC_START_PI_WEAK(strcmp)
 	eor	tmp1, src1, src2
 	mov	zeroones, #REP8_01
 	tst	tmp1, #7
@@ -219,5 +219,5 @@  CPU_BE(	orr	syndrome, diff, has_nul )
 	lsr	data1, data1, #56
 	sub	result, data1, data2, lsr #56
 	ret
-ENDPIPROC(strcmp)
+SYM_FUNC_END_PI(strcmp)
 EXPORT_SYMBOL_NOKASAN(strcmp)
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
index 87b0cb066915..f71b4b94d4b2 100644
--- a/arch/arm64/lib/strlen.S
+++ b/arch/arm64/lib/strlen.S
@@ -44,7 +44,7 @@  pos		.req	x12
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
-WEAK(strlen)
+SYM_FUNC_START_PI_WEAK(strlen)
 	mov	zeroones, #REP8_01
 	bic	src, srcin, #15
 	ands	tmp1, srcin, #15
@@ -111,5 +111,5 @@  CPU_LE( lsr	tmp2, tmp2, tmp1 )	/* Shift (tmp1 & 63).  */
 	csinv	data1, data1, xzr, le
 	csel	data2, data2, data2a, le
 	b	.Lrealigned
-ENDPIPROC(strlen)
+SYM_FUNC_END_PI(strlen)
 EXPORT_SYMBOL_NOKASAN(strlen)
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index f571581888fa..6df88cd9c6f2 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -52,7 +52,7 @@  limit_wd	.req	x13
 mask		.req	x14
 endloop		.req	x15
 
-WEAK(strncmp)
+SYM_FUNC_START_PI_WEAK(strncmp)
 	cbz	limit, .Lret0
 	eor	tmp1, src1, src2
 	mov	zeroones, #REP8_01
@@ -295,5 +295,5 @@  CPU_BE( orr	syndrome, diff, has_nul )
 .Lret0:
 	mov	result, #0
 	ret
-ENDPIPROC(strncmp)
+SYM_FUNC_END_PI(strncmp)
 EXPORT_SYMBOL_NOKASAN(strncmp)
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
index c0bac9493c68..2fadeeff6ff4 100644
--- a/arch/arm64/lib/strnlen.S
+++ b/arch/arm64/lib/strnlen.S
@@ -47,7 +47,7 @@  limit_wd	.req	x14
 #define REP8_7f 0x7f7f7f7f7f7f7f7f
 #define REP8_80 0x8080808080808080
 
-WEAK(strnlen)
+SYM_FUNC_START_PI_WEAK(strnlen)
 	cbz	limit, .Lhit_limit
 	mov	zeroones, #REP8_01
 	bic	src, srcin, #15
@@ -156,5 +156,5 @@  CPU_LE( lsr	tmp2, tmp2, tmp4 )	/* Shift (tmp1 & 63).  */
 .Lhit_limit:
 	mov	len, limit
 	ret
-ENDPIPROC(strnlen)
+SYM_FUNC_END_PI(strnlen)
 EXPORT_SYMBOL_NOKASAN(strnlen)
diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S
index 794ac49ea433..9a9936e0c6f3 100644
--- a/arch/arm64/lib/strrchr.S
+++ b/arch/arm64/lib/strrchr.S
@@ -18,7 +18,7 @@ 
  * Returns:
  *	x0 - address of last occurrence of 'c' or 0
  */
-WEAK(strrchr)
+SYM_FUNC_START_PI_WEAK(strrchr)
 	mov	x3, #0
 	and	w1, w1, #0xff
 1:	ldrb	w2, [x0], #1
@@ -29,5 +29,5 @@  WEAK(strrchr)
 	b	1b
 2:	mov	x0, x3
 	ret
-ENDPIPROC(strrchr)
+SYM_FUNC_END_PI(strrchr)
 EXPORT_SYMBOL_NOKASAN(strrchr)
diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S
index 047622536535..a88613834fb0 100644
--- a/arch/arm64/lib/tishift.S
+++ b/arch/arm64/lib/tishift.S
@@ -7,7 +7,7 @@ 
 
 #include <asm/assembler.h>
 
-ENTRY(__ashlti3)
+SYM_FUNC_START(__ashlti3)
 	cbz	x2, 1f
 	mov	x3, #64
 	sub	x3, x3, x2
@@ -26,10 +26,10 @@  ENTRY(__ashlti3)
 	lsl	x1, x0, x1
 	mov	x0, x2
 	ret
-ENDPROC(__ashlti3)
+SYM_FUNC_END(__ashlti3)
 EXPORT_SYMBOL(__ashlti3)
 
-ENTRY(__ashrti3)
+SYM_FUNC_START(__ashrti3)
 	cbz	x2, 1f
 	mov	x3, #64
 	sub	x3, x3, x2
@@ -48,10 +48,10 @@  ENTRY(__ashrti3)
 	asr	x0, x1, x0
 	mov	x1, x2
 	ret
-ENDPROC(__ashrti3)
+SYM_FUNC_END(__ashrti3)
 EXPORT_SYMBOL(__ashrti3)
 
-ENTRY(__lshrti3)
+SYM_FUNC_START(__lshrti3)
 	cbz	x2, 1f
 	mov	x3, #64
 	sub	x3, x3, x2
@@ -70,5 +70,5 @@  ENTRY(__lshrti3)
 	lsr	x0, x1, x0
 	mov	x1, x2
 	ret
-ENDPROC(__lshrti3)
+SYM_FUNC_END(__lshrti3)
 EXPORT_SYMBOL(__lshrti3)