diff mbox series

[v2,1/2] x86: annotate entry points with type and size

Message ID fd492a4a-11ba-b63a-daf4-99697db0db0e@suse.com (mailing list archive)
State New, archived
Headers show
Series x86: annotate entry points with type and size | expand

Commit Message

Jan Beulich May 23, 2023, 11:30 a.m. UTC
Recent gas versions generate minimalistic Dwarf debug info for items
annotated as functions and having their sizes specified [1]. "Borrow"
Arm's END() and (remotely) derive other annotation infrastructure from
Linux'es.

For switch_to_kernel() and restore_all_guest() so far implicit alignment
(from being first in their respective sections) is being made explicit
(as in: using FUNC() without 2nd argument). Whereas for
{,compat}create_bounce_frame() and autogen_entrypoints[] alignment is
newly arranged for.

Except for the added alignment padding (including their knock-on
effects) no change in generated code/data.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

[1] https://sourceware.org/git?p=binutils-gdb.git;a=commitdiff;h=591cc9fbbfd6d51131c0f1d4a92e7893edcc7a28
---
v2: Full rework.
---
Only two of the assembly files are being converted for now. More could
be done right here or as follow-on in separate patches.

In principle the framework should be possible to use by other
architectures as well. If we want this, the main questions are going to
be:
- What header file name? (I don't really like Linux'es linkage.h, so I'd
  prefer e.g. asm-defns.h or asm_defns.h as we already have in x86.)
- How much per-arch customization do we want to permit up front (i.e.
  without knowing how much of it is going to be needed)? Initially I'd
  expect only the default function alignment (and padding) to require
  per-arch definitions.

Note that the FB-label in autogen_stubs() cannot be converted just yet:
Such labels cannot be used with .type. We could further diverge from
Linux'es model and avoid setting STT_NOTYPE explicitly (that's the type
labels get by default anyway).

Note that we can't use ALIGN() (in place of SYM_ALIGN()) as long as we
still have ALIGN.

Comments

Roger Pau Monné May 29, 2023, 1:34 p.m. UTC | #1
On Tue, May 23, 2023 at 01:30:51PM +0200, Jan Beulich wrote:
> Recent gas versions generate minimalistic Dwarf debug info for items
> annotated as functions and having their sizes specified [1]. "Borrow"
> Arm's END() and (remotely) derive other annotation infrastructure from
> Linux'es.
> 
> For switch_to_kernel() and restore_all_guest() so far implicit alignment
> (from being first in their respective sections) is being made explicit
> (as in: using FUNC() without 2nd argument). Whereas for
> {,compat}create_bounce_frame() and autogen_entrypoints[] alignment is
> newly arranged for.
> 
> Except for the added alignment padding (including their knock-on
> effects) no change in generated code/data.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> [1] https://sourceware.org/git?p=binutils-gdb.git;a=commitdiff;h=591cc9fbbfd6d51131c0f1d4a92e7893edcc7a28
> ---
> v2: Full rework.
> ---
> Only two of the assembly files are being converted for now. More could
> be done right here or as follow-on in separate patches.
> 
> In principle the framework should be possible to use by other
> architectures as well. If we want this, the main questions are going to
> be:
> - What header file name? (I don't really like Linux'es linkage.h, so I'd
>   prefer e.g. asm-defns.h or asm_defns.h as we already have in x86.)
> - How much per-arch customization do we want to permit up front (i.e.
>   without knowing how much of it is going to be needed)? Initially I'd
>   expect only the default function alignment (and padding) to require
>   per-arch definitions.
> 
> Note that the FB-label in autogen_stubs() cannot be converted just yet:
> Such labels cannot be used with .type. We could further diverge from
> Linux'es model and avoid setting STT_NOTYPE explicitly (that's the type
> labels get by default anyway).
> 
> Note that we can't use ALIGN() (in place of SYM_ALIGN()) as long as we
> still have ALIGN.

FWIW, as I'm looking into using the newly added macros in order to add
annotations suitable for live-patching, I would need to switch some of
the LABEL usages into it's own functions, as it's not possible to
livepatch a function that has labels jumped into from code paths
outside of the function.

> --- a/xen/arch/x86/include/asm/asm_defns.h
> +++ b/xen/arch/x86/include/asm/asm_defns.h
> @@ -81,6 +81,45 @@ register unsigned long current_stack_poi
>  
>  #ifdef __ASSEMBLY__
>  
> +#define SYM_ALIGN(algn...) .balign algn
> +
> +#define SYM_L_GLOBAL(name) .globl name
> +#define SYM_L_WEAK(name)   .weak name

Won't this better be added when required?  I can't spot any weak
symbols in assembly ATM, and you don't introduce any _WEAK macro
variants below.

> +#define SYM_L_LOCAL(name)  /* nothing */
> +
> +#define SYM_T_FUNC         STT_FUNC
> +#define SYM_T_DATA         STT_OBJECT
> +#define SYM_T_NONE         STT_NOTYPE
> +
> +#define SYM(name, typ, linkage, algn...)          \
> +        .type name, SYM_T_ ## typ;                \
> +        SYM_L_ ## linkage(name);                  \
> +        SYM_ALIGN(algn);                          \
> +        name:
> +
> +#define END(name) .size name, . - name
> +
> +#define ARG1_(x, y...) (x)
> +#define ARG2_(x, y...) ARG1_(y)
> +
> +#define LAST__(nr) ARG ## nr ## _
> +#define LAST_(nr)  LAST__(nr)
> +#define LAST(x, y...) LAST_(count_args(x, ## y))(x, ## y)

I find LAST not very descriptive, won't it better be named OPTIONAL()
or similar? (and maybe placed in lib.h?)

> +
> +#define FUNC(name, algn...) \
> +        SYM(name, FUNC, GLOBAL, LAST(16, ## algn), 0x90)

A rant, should the alignment of functions use a different padding?
(ie: ret or ud2?) In order to prevent stray jumps falling in the
padding and fall trough into the next function.  That would also
prevent the implicit fall trough used in some places.

> +#define LABEL(name, algn...) \
> +        SYM(name, NONE, GLOBAL, LAST(16, ## algn), 0x90)
> +#define DATA(name, algn...) \
> +        SYM(name, DATA, GLOBAL, LAST(0, ## algn), 0xff)
> +
> +#define FUNC_LOCAL(name, algn...) \
> +        SYM(name, FUNC, LOCAL, LAST(16, ## algn), 0x90)
> +#define LABEL_LOCAL(name, algn...) \
> +        SYM(name, NONE, LOCAL, LAST(16, ## algn), 0x90)

Is there much value in adding local labels to the symbol table?

AFAICT the main purpose of this macro is to be used to declare aligned
labels, and here avoid the ALIGN + label name pair, but could likely
drop the .type directive?

> +#define DATA_LOCAL(name, algn...) \
> +        SYM(name, DATA, LOCAL, LAST(0, ## algn), 0xff)
> +
>  #ifdef HAVE_AS_QUOTED_SYM
>  #define SUBSECTION_LBL(tag)                        \
>          .ifndef .L.tag;                            \
> --- a/xen/arch/x86/x86_64/compat/entry.S
> +++ b/xen/arch/x86/x86_64/compat/entry.S
> @@ -8,10 +8,11 @@
>  #include <asm/page.h>
>  #include <asm/processor.h>
>  #include <asm/desc.h>
> +#include <xen/lib.h>

Shouldn't the inclusion of lib.h be in asm_defs.h, as that's where the
usage of count_args() resides? (I assume that's why lib.h is added
here).

>  #include <public/xen.h>
>  #include <irq_vectors.h>
>  
> -ENTRY(entry_int82)
> +FUNC(entry_int82)
>          ENDBR64
>          ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
>          pushq $0
> @@ -27,9 +28,10 @@ ENTRY(entry_int82)
>  
>          mov   %rsp, %rdi
>          call  do_entry_int82
> +END(entry_int82)
>  
>  /* %rbx: struct vcpu */
> -ENTRY(compat_test_all_events)
> +FUNC(compat_test_all_events)
>          ASSERT_NOT_IN_ATOMIC
>          cli                             # tests must not race interrupts
>  /*compat_test_softirqs:*/
> @@ -66,24 +68,21 @@ compat_test_guest_events:
>          call  compat_create_bounce_frame
>          jmp   compat_test_all_events
>  
> -        ALIGN
>  /* %rbx: struct vcpu */
> -compat_process_softirqs:
> +LABEL_LOCAL(compat_process_softirqs)

Shouldn't this be a local function rather than a local label?  It's
fully isolated.  I guess it would create issues with
compat_process_trap, as we would then require a jump from the
preceding compat_process_nmi.

>          sti
>          call  do_softirq
>          jmp   compat_test_all_events
>  
> -        ALIGN
>  /* %rbx: struct vcpu, %rdx: struct trap_bounce */
> -.Lcompat_process_trapbounce:
> +LABEL_LOCAL(.Lcompat_process_trapbounce)

It's my understanding that here the '.L' prefix is pointless, since
LABEL_LOCAL() will forcefully create a symbol for the label due to the
usage of .type?

Thanks, Roger.
Jan Beulich May 30, 2023, 8:06 a.m. UTC | #2
On 29.05.2023 15:34, Roger Pau Monné wrote:
> On Tue, May 23, 2023 at 01:30:51PM +0200, Jan Beulich wrote:
>> Note that the FB-label in autogen_stubs() cannot be converted just yet:
>> Such labels cannot be used with .type. We could further diverge from
>> Linux'es model and avoid setting STT_NOTYPE explicitly (that's the type
>> labels get by default anyway).
>>
>> Note that we can't use ALIGN() (in place of SYM_ALIGN()) as long as we
>> still have ALIGN.
> 
> FWIW, as I'm looking into using the newly added macros in order to add
> annotations suitable for live-patching, I would need to switch some of
> the LABEL usages into it's own functions, as it's not possible to
> livepatch a function that has labels jumped into from code paths
> outside of the function.

Hmm, I'm not sure what the best way is to overcome that restriction. I'm
not convinced we want to arbitrarily name things "functions".

>> --- a/xen/arch/x86/include/asm/asm_defns.h
>> +++ b/xen/arch/x86/include/asm/asm_defns.h
>> @@ -81,6 +81,45 @@ register unsigned long current_stack_poi
>>  
>>  #ifdef __ASSEMBLY__
>>  
>> +#define SYM_ALIGN(algn...) .balign algn
>> +
>> +#define SYM_L_GLOBAL(name) .globl name
>> +#define SYM_L_WEAK(name)   .weak name
> 
> Won't this better be added when required?  I can't spot any weak
> symbols in assembly ATM, and you don't introduce any _WEAK macro
> variants below.

Well, Andrew specifically mentioned to desire to also have Linux'es
support for weak symbols. Hence I decided to add it here despite
(for now) being unused). I can certainly drop that again, but in
particular if we wanted to use the scheme globally, I think we may
want to make it "complete".

>> +#define SYM_L_LOCAL(name)  /* nothing */
>> +
>> +#define SYM_T_FUNC         STT_FUNC
>> +#define SYM_T_DATA         STT_OBJECT
>> +#define SYM_T_NONE         STT_NOTYPE
>> +
>> +#define SYM(name, typ, linkage, algn...)          \
>> +        .type name, SYM_T_ ## typ;                \
>> +        SYM_L_ ## linkage(name);                  \
>> +        SYM_ALIGN(algn);                          \
>> +        name:
>> +
>> +#define END(name) .size name, . - name
>> +
>> +#define ARG1_(x, y...) (x)
>> +#define ARG2_(x, y...) ARG1_(y)
>> +
>> +#define LAST__(nr) ARG ## nr ## _
>> +#define LAST_(nr)  LAST__(nr)
>> +#define LAST(x, y...) LAST_(count_args(x, ## y))(x, ## y)
> 
> I find LAST not very descriptive, won't it better be named OPTIONAL()
> or similar? (and maybe placed in lib.h?)

I don't think OPTIONAL describes the purpose. I truly mean "last" here.
As to placing in lib.h - perhaps, but then we may want to have forms
with more than 2 arguments right away (and it would be a little unclear
how far up to go).

>> +
>> +#define FUNC(name, algn...) \
>> +        SYM(name, FUNC, GLOBAL, LAST(16, ## algn), 0x90)
> 
> A rant, should the alignment of functions use a different padding?
> (ie: ret or ud2?) In order to prevent stray jumps falling in the
> padding and fall trough into the next function.  That would also
> prevent the implicit fall trough used in some places.

Yes, but that's a separate topic (for which iirc patches are pending
as well, just of course not integrated with the work here. There's
the slight risk of overlooking some "fall-through" case ...

>> +#define LABEL(name, algn...) \
>> +        SYM(name, NONE, GLOBAL, LAST(16, ## algn), 0x90)
>> +#define DATA(name, algn...) \
>> +        SYM(name, DATA, GLOBAL, LAST(0, ## algn), 0xff)
>> +
>> +#define FUNC_LOCAL(name, algn...) \
>> +        SYM(name, FUNC, LOCAL, LAST(16, ## algn), 0x90)
>> +#define LABEL_LOCAL(name, algn...) \
>> +        SYM(name, NONE, LOCAL, LAST(16, ## algn), 0x90)
> 
> Is there much value in adding local labels to the symbol table?
> 
> AFAICT the main purpose of this macro is to be used to declare aligned
> labels, and here avoid the ALIGN + label name pair, but could likely
> drop the .type directive?

Right, .type ... NOTYPE is kind of redundant, but it fits the model
better here.

>> --- a/xen/arch/x86/x86_64/compat/entry.S
>> +++ b/xen/arch/x86/x86_64/compat/entry.S
>> @@ -8,10 +8,11 @@
>>  #include <asm/page.h>
>>  #include <asm/processor.h>
>>  #include <asm/desc.h>
>> +#include <xen/lib.h>
> 
> Shouldn't the inclusion of lib.h be in asm_defs.h, as that's where the
> usage of count_args() resides? (I assume that's why lib.h is added
> here).

When the uses are in macros I'm always largely undecided, and I slightly
tend towards the (in general, perhaps not overly relevant here) "less
dependencies" solution. As in: Source files not using the macros which
use count_args() also don't need libs.h then.

>> @@ -66,24 +68,21 @@ compat_test_guest_events:
>>          call  compat_create_bounce_frame
>>          jmp   compat_test_all_events
>>  
>> -        ALIGN
>>  /* %rbx: struct vcpu */
>> -compat_process_softirqs:
>> +LABEL_LOCAL(compat_process_softirqs)
> 
> Shouldn't this be a local function rather than a local label?  It's
> fully isolated.  I guess it would create issues with
> compat_process_trap, as we would then require a jump from the
> preceding compat_process_nmi.

Alternatives are possible, but right now I consider this an inner label
of compat_test_all_events.

>>          sti
>>          call  do_softirq
>>          jmp   compat_test_all_events
>>  
>> -        ALIGN
>>  /* %rbx: struct vcpu, %rdx: struct trap_bounce */
>> -.Lcompat_process_trapbounce:
>> +LABEL_LOCAL(.Lcompat_process_trapbounce)
> 
> It's my understanding that here the '.L' prefix is pointless, since
> LABEL_LOCAL() will forcefully create a symbol for the label due to the
> usage of .type?

I don't think .type has this effect. There's certainly no such label in
the symbol table of the object file I have as a result.

Jan
Roger Pau Monné May 30, 2023, 1:21 p.m. UTC | #3
On Tue, May 30, 2023 at 10:06:27AM +0200, Jan Beulich wrote:
> On 29.05.2023 15:34, Roger Pau Monné wrote:
> > On Tue, May 23, 2023 at 01:30:51PM +0200, Jan Beulich wrote:
> >> Note that the FB-label in autogen_stubs() cannot be converted just yet:
> >> Such labels cannot be used with .type. We could further diverge from
> >> Linux'es model and avoid setting STT_NOTYPE explicitly (that's the type
> >> labels get by default anyway).
> >>
> >> Note that we can't use ALIGN() (in place of SYM_ALIGN()) as long as we
> >> still have ALIGN.
> > 
> > FWIW, as I'm looking into using the newly added macros in order to add
> > annotations suitable for live-patching, I would need to switch some of
> > the LABEL usages into it's own functions, as it's not possible to
> > livepatch a function that has labels jumped into from code paths
> > outside of the function.
> 
> Hmm, I'm not sure what the best way is to overcome that restriction. I'm
> not convinced we want to arbitrarily name things "functions".

Any external entry point in the middle of a function-like block will
prevent it from being live patched.

If you want I can try to do a pass on top of your patch and see how
that would end up looking.  I'm attempting to think about other
solutions, but every other solution seems quite horrible.

> >> --- a/xen/arch/x86/include/asm/asm_defns.h
> >> +++ b/xen/arch/x86/include/asm/asm_defns.h
> >> @@ -81,6 +81,45 @@ register unsigned long current_stack_poi
> >>  
> >>  #ifdef __ASSEMBLY__
> >>  
> >> +#define SYM_ALIGN(algn...) .balign algn
> >> +
> >> +#define SYM_L_GLOBAL(name) .globl name
> >> +#define SYM_L_WEAK(name)   .weak name
> > 
> > Won't this better be added when required?  I can't spot any weak
> > symbols in assembly ATM, and you don't introduce any _WEAK macro
> > variants below.
> 
> Well, Andrew specifically mentioned to desire to also have Linux'es
> support for weak symbols. Hence I decided to add it here despite
> (for now) being unused). I can certainly drop that again, but in
> particular if we wanted to use the scheme globally, I think we may
> want to make it "complete".

OK, as long as we know it's unused.

> >> +#define SYM_L_LOCAL(name)  /* nothing */
> >> +
> >> +#define SYM_T_FUNC         STT_FUNC
> >> +#define SYM_T_DATA         STT_OBJECT
> >> +#define SYM_T_NONE         STT_NOTYPE
> >> +
> >> +#define SYM(name, typ, linkage, algn...)          \
> >> +        .type name, SYM_T_ ## typ;                \
> >> +        SYM_L_ ## linkage(name);                  \
> >> +        SYM_ALIGN(algn);                          \
> >> +        name:
> >> +
> >> +#define END(name) .size name, . - name
> >> +
> >> +#define ARG1_(x, y...) (x)
> >> +#define ARG2_(x, y...) ARG1_(y)
> >> +
> >> +#define LAST__(nr) ARG ## nr ## _
> >> +#define LAST_(nr)  LAST__(nr)
> >> +#define LAST(x, y...) LAST_(count_args(x, ## y))(x, ## y)
> > 
> > I find LAST not very descriptive, won't it better be named OPTIONAL()
> > or similar? (and maybe placed in lib.h?)
> 
> I don't think OPTIONAL describes the purpose. I truly mean "last" here.
> As to placing in lib.h - perhaps, but then we may want to have forms
> with more than 2 arguments right away (and it would be a little unclear
> how far up to go).

Hm, I would be fine with adding that version with just 2 arguments, as
it's better to have the helper in a generic place IMO.

> >> +
> >> +#define FUNC(name, algn...) \
> >> +        SYM(name, FUNC, GLOBAL, LAST(16, ## algn), 0x90)
> > 
> > A rant, should the alignment of functions use a different padding?
> > (ie: ret or ud2?) In order to prevent stray jumps falling in the
> > padding and fall trough into the next function.  That would also
> > prevent the implicit fall trough used in some places.
> 
> Yes, but that's a separate topic (for which iirc patches are pending
> as well, just of course not integrated with the work here. There's
> the slight risk of overlooking some "fall-through" case ...

Oh, OK, wasn't aware patches are floating for this already, just came
across it while reviewing.

> >> --- a/xen/arch/x86/x86_64/compat/entry.S
> >> +++ b/xen/arch/x86/x86_64/compat/entry.S
> >> @@ -8,10 +8,11 @@
> >>  #include <asm/page.h>
> >>  #include <asm/processor.h>
> >>  #include <asm/desc.h>
> >> +#include <xen/lib.h>
> > 
> > Shouldn't the inclusion of lib.h be in asm_defs.h, as that's where the
> > usage of count_args() resides? (I assume that's why lib.h is added
> > here).
> 
> When the uses are in macros I'm always largely undecided, and I slightly
> tend towards the (in general, perhaps not overly relevant here) "less
> dependencies" solution. As in: Source files not using the macros which
> use count_args() also don't need libs.h then.

I tend to prefer headers to be self contained, as it overall leads to
a clearer set of includes in source files.  It's not obvious why
entry.S needs lib.h unless the asm_macros.h usage is taken into
account.

> >>          sti
> >>          call  do_softirq
> >>          jmp   compat_test_all_events
> >>  
> >> -        ALIGN
> >>  /* %rbx: struct vcpu, %rdx: struct trap_bounce */
> >> -.Lcompat_process_trapbounce:
> >> +LABEL_LOCAL(.Lcompat_process_trapbounce)
> > 
> > It's my understanding that here the '.L' prefix is pointless, since
> > LABEL_LOCAL() will forcefully create a symbol for the label due to the
> > usage of .type?
> 
> I don't think .type has this effect. There's certainly no such label in
> the symbol table of the object file I have as a result.

I was expecting .type to force the creation of a symbol, so the '.L'
prefix does prevent the symbol from being created even if .type is
specified.

Shouldn't the assembler complain that we are attempting to set a type
for a not present symbol?

Thanks, Roger.
Jan Beulich May 30, 2023, 2:23 p.m. UTC | #4
On 30.05.2023 15:21, Roger Pau Monné wrote:
> On Tue, May 30, 2023 at 10:06:27AM +0200, Jan Beulich wrote:
>> On 29.05.2023 15:34, Roger Pau Monné wrote:
>>> On Tue, May 23, 2023 at 01:30:51PM +0200, Jan Beulich wrote:
>>>> Note that the FB-label in autogen_stubs() cannot be converted just yet:
>>>> Such labels cannot be used with .type. We could further diverge from
>>>> Linux'es model and avoid setting STT_NOTYPE explicitly (that's the type
>>>> labels get by default anyway).
>>>>
>>>> Note that we can't use ALIGN() (in place of SYM_ALIGN()) as long as we
>>>> still have ALIGN.
>>>
>>> FWIW, as I'm looking into using the newly added macros in order to add
>>> annotations suitable for live-patching, I would need to switch some of
>>> the LABEL usages into it's own functions, as it's not possible to
>>> livepatch a function that has labels jumped into from code paths
>>> outside of the function.
>>
>> Hmm, I'm not sure what the best way is to overcome that restriction. I'm
>> not convinced we want to arbitrarily name things "functions".
> 
> Any external entry point in the middle of a function-like block will
> prevent it from being live patched.

Is there actually any particular reason for this restriction? As long
as old and new code has the same external entry points, redirecting
all old ones to their new counterparts would seem feasible.

> If you want I can try to do a pass on top of your patch and see how
> that would end up looking.  I'm attempting to think about other
> solutions, but every other solution seems quite horrible.

Right, but splitting functions into piecemeal fragments isn't going
to be very nice either.

>>>> --- a/xen/arch/x86/include/asm/asm_defns.h
>>>> +++ b/xen/arch/x86/include/asm/asm_defns.h
>>>> @@ -81,6 +81,45 @@ register unsigned long current_stack_poi
>>>>  
>>>>  #ifdef __ASSEMBLY__
>>>>  
>>>> +#define SYM_ALIGN(algn...) .balign algn
>>>> +
>>>> +#define SYM_L_GLOBAL(name) .globl name
>>>> +#define SYM_L_WEAK(name)   .weak name
>>>
>>> Won't this better be added when required?  I can't spot any weak
>>> symbols in assembly ATM, and you don't introduce any _WEAK macro
>>> variants below.
>>
>> Well, Andrew specifically mentioned to desire to also have Linux'es
>> support for weak symbols. Hence I decided to add it here despite
>> (for now) being unused). I can certainly drop that again, but in
>> particular if we wanted to use the scheme globally, I think we may
>> want to make it "complete".
> 
> OK, as long as we know it's unused.

I've added a sentence to this effect to the description.

>>>> +#define SYM_L_LOCAL(name)  /* nothing */
>>>> +
>>>> +#define SYM_T_FUNC         STT_FUNC
>>>> +#define SYM_T_DATA         STT_OBJECT
>>>> +#define SYM_T_NONE         STT_NOTYPE
>>>> +
>>>> +#define SYM(name, typ, linkage, algn...)          \
>>>> +        .type name, SYM_T_ ## typ;                \
>>>> +        SYM_L_ ## linkage(name);                  \
>>>> +        SYM_ALIGN(algn);                          \
>>>> +        name:
>>>> +
>>>> +#define END(name) .size name, . - name
>>>> +
>>>> +#define ARG1_(x, y...) (x)
>>>> +#define ARG2_(x, y...) ARG1_(y)
>>>> +
>>>> +#define LAST__(nr) ARG ## nr ## _
>>>> +#define LAST_(nr)  LAST__(nr)
>>>> +#define LAST(x, y...) LAST_(count_args(x, ## y))(x, ## y)
>>>
>>> I find LAST not very descriptive, won't it better be named OPTIONAL()
>>> or similar? (and maybe placed in lib.h?)
>>
>> I don't think OPTIONAL describes the purpose. I truly mean "last" here.
>> As to placing in lib.h - perhaps, but then we may want to have forms
>> with more than 2 arguments right away (and it would be a little unclear
>> how far up to go).
> 
> Hm, I would be fine with adding that version with just 2 arguments, as
> it's better to have the helper in a generic place IMO.

I'll think about this some more.

>>>> +
>>>> +#define FUNC(name, algn...) \
>>>> +        SYM(name, FUNC, GLOBAL, LAST(16, ## algn), 0x90)
>>>
>>> A rant, should the alignment of functions use a different padding?
>>> (ie: ret or ud2?) In order to prevent stray jumps falling in the
>>> padding and fall trough into the next function.  That would also
>>> prevent the implicit fall trough used in some places.
>>
>> Yes, but that's a separate topic (for which iirc patches are pending
>> as well, just of course not integrated with the work here. There's
>> the slight risk of overlooking some "fall-through" case ...
> 
> Oh, OK, wasn't aware patches are floating for this already, just came
> across it while reviewing.

Well, those don't cover padding yet, but they deal with straight-line
speculation past RET or JMP.

>>>>          sti
>>>>          call  do_softirq
>>>>          jmp   compat_test_all_events
>>>>  
>>>> -        ALIGN
>>>>  /* %rbx: struct vcpu, %rdx: struct trap_bounce */
>>>> -.Lcompat_process_trapbounce:
>>>> +LABEL_LOCAL(.Lcompat_process_trapbounce)
>>>
>>> It's my understanding that here the '.L' prefix is pointless, since
>>> LABEL_LOCAL() will forcefully create a symbol for the label due to the
>>> usage of .type?
>>
>> I don't think .type has this effect. There's certainly no such label in
>> the symbol table of the object file I have as a result.
> 
> I was expecting .type to force the creation of a symbol, so the '.L'
> prefix does prevent the symbol from being created even if .type is
> specified.
> 
> Shouldn't the assembler complain that we are attempting to set a type
> for a not present symbol?

But .L symbols are still normal symbols to gas, just that it knows to not
emit them to the symbol table (unless there's a need, e.g. through a use
in a relocation that cannot be expressed as section-relative one). It
could flag the pointless use, but then it may get this wrong if in the
end the symbol does need emitting.

Jan
Roger Pau Monné May 30, 2023, 3:15 p.m. UTC | #5
On Tue, May 30, 2023 at 04:23:21PM +0200, Jan Beulich wrote:
> On 30.05.2023 15:21, Roger Pau Monné wrote:
> > On Tue, May 30, 2023 at 10:06:27AM +0200, Jan Beulich wrote:
> >> On 29.05.2023 15:34, Roger Pau Monné wrote:
> >>> On Tue, May 23, 2023 at 01:30:51PM +0200, Jan Beulich wrote:
> >>>> Note that the FB-label in autogen_stubs() cannot be converted just yet:
> >>>> Such labels cannot be used with .type. We could further diverge from
> >>>> Linux'es model and avoid setting STT_NOTYPE explicitly (that's the type
> >>>> labels get by default anyway).
> >>>>
> >>>> Note that we can't use ALIGN() (in place of SYM_ALIGN()) as long as we
> >>>> still have ALIGN.
> >>>
> >>> FWIW, as I'm looking into using the newly added macros in order to add
> >>> annotations suitable for live-patching, I would need to switch some of
> >>> the LABEL usages into it's own functions, as it's not possible to
> >>> livepatch a function that has labels jumped into from code paths
> >>> outside of the function.
> >>
> >> Hmm, I'm not sure what the best way is to overcome that restriction. I'm
> >> not convinced we want to arbitrarily name things "functions".
> > 
> > Any external entry point in the middle of a function-like block will
> > prevent it from being live patched.
> 
> Is there actually any particular reason for this restriction? As long
> as old and new code has the same external entry points, redirecting
> all old ones to their new counterparts would seem feasible.

Yes, that was another option, we could force asm patching to always be
done with a jump (instead of in-place) and then add jumps at the old
entry point addresses in order to redirect to the new addresses.

Or assert that the addresses of any symbols inside the function is not
changed in order to do in-place replacement of code.

> > If you want I can try to do a pass on top of your patch and see how
> > that would end up looking.  I'm attempting to think about other
> > solutions, but every other solution seems quite horrible.
> 
> Right, but splitting functions into piecemeal fragments isn't going
> to be very nice either.

I'm not sure how much splitting would be required TBH.

> >>>> +
> >>>> +#define FUNC(name, algn...) \
> >>>> +        SYM(name, FUNC, GLOBAL, LAST(16, ## algn), 0x90)
> >>>
> >>> A rant, should the alignment of functions use a different padding?
> >>> (ie: ret or ud2?) In order to prevent stray jumps falling in the
> >>> padding and fall trough into the next function.  That would also
> >>> prevent the implicit fall trough used in some places.
> >>
> >> Yes, but that's a separate topic (for which iirc patches are pending
> >> as well, just of course not integrated with the work here. There's
> >> the slight risk of overlooking some "fall-through" case ...
> > 
> > Oh, OK, wasn't aware patches are floating for this already, just came
> > across it while reviewing.
> 
> Well, those don't cover padding yet, but they deal with straight-line
> speculation past RET or JMP.

Introducing the helpers does make it easy to convert the padding for
all the existing users at least.

> >>>>          sti
> >>>>          call  do_softirq
> >>>>          jmp   compat_test_all_events
> >>>>  
> >>>> -        ALIGN
> >>>>  /* %rbx: struct vcpu, %rdx: struct trap_bounce */
> >>>> -.Lcompat_process_trapbounce:
> >>>> +LABEL_LOCAL(.Lcompat_process_trapbounce)
> >>>
> >>> It's my understanding that here the '.L' prefix is pointless, since
> >>> LABEL_LOCAL() will forcefully create a symbol for the label due to the
> >>> usage of .type?
> >>
> >> I don't think .type has this effect. There's certainly no such label in
> >> the symbol table of the object file I have as a result.
> > 
> > I was expecting .type to force the creation of a symbol, so the '.L'
> > prefix does prevent the symbol from being created even if .type is
> > specified.
> > 
> > Shouldn't the assembler complain that we are attempting to set a type
> > for a not present symbol?
> 
> But .L symbols are still normal symbols to gas, just that it knows to not
> emit them to the symbol table (unless there's a need, e.g. through a use
> in a relocation that cannot be expressed as section-relative one). It
> could flag the pointless use, but then it may get this wrong if in the
> end the symbol does need emitting.

Thanks for the explanation.

Roger.
diff mbox series

Patch

--- a/xen/arch/x86/include/asm/asm_defns.h
+++ b/xen/arch/x86/include/asm/asm_defns.h
@@ -81,6 +81,45 @@  register unsigned long current_stack_poi
 
 #ifdef __ASSEMBLY__
 
+#define SYM_ALIGN(algn...) .balign algn
+
+#define SYM_L_GLOBAL(name) .globl name
+#define SYM_L_WEAK(name)   .weak name
+#define SYM_L_LOCAL(name)  /* nothing */
+
+#define SYM_T_FUNC         STT_FUNC
+#define SYM_T_DATA         STT_OBJECT
+#define SYM_T_NONE         STT_NOTYPE
+
+#define SYM(name, typ, linkage, algn...)          \
+        .type name, SYM_T_ ## typ;                \
+        SYM_L_ ## linkage(name);                  \
+        SYM_ALIGN(algn);                          \
+        name:
+
+#define END(name) .size name, . - name
+
+#define ARG1_(x, y...) (x)
+#define ARG2_(x, y...) ARG1_(y)
+
+#define LAST__(nr) ARG ## nr ## _
+#define LAST_(nr)  LAST__(nr)
+#define LAST(x, y...) LAST_(count_args(x, ## y))(x, ## y)
+
+#define FUNC(name, algn...) \
+        SYM(name, FUNC, GLOBAL, LAST(16, ## algn), 0x90)
+#define LABEL(name, algn...) \
+        SYM(name, NONE, GLOBAL, LAST(16, ## algn), 0x90)
+#define DATA(name, algn...) \
+        SYM(name, DATA, GLOBAL, LAST(0, ## algn), 0xff)
+
+#define FUNC_LOCAL(name, algn...) \
+        SYM(name, FUNC, LOCAL, LAST(16, ## algn), 0x90)
+#define LABEL_LOCAL(name, algn...) \
+        SYM(name, NONE, LOCAL, LAST(16, ## algn), 0x90)
+#define DATA_LOCAL(name, algn...) \
+        SYM(name, DATA, LOCAL, LAST(0, ## algn), 0xff)
+
 #ifdef HAVE_AS_QUOTED_SYM
 #define SUBSECTION_LBL(tag)                        \
         .ifndef .L.tag;                            \
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -8,10 +8,11 @@ 
 #include <asm/page.h>
 #include <asm/processor.h>
 #include <asm/desc.h>
+#include <xen/lib.h>
 #include <public/xen.h>
 #include <irq_vectors.h>
 
-ENTRY(entry_int82)
+FUNC(entry_int82)
         ENDBR64
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         pushq $0
@@ -27,9 +28,10 @@  ENTRY(entry_int82)
 
         mov   %rsp, %rdi
         call  do_entry_int82
+END(entry_int82)
 
 /* %rbx: struct vcpu */
-ENTRY(compat_test_all_events)
+FUNC(compat_test_all_events)
         ASSERT_NOT_IN_ATOMIC
         cli                             # tests must not race interrupts
 /*compat_test_softirqs:*/
@@ -66,24 +68,21 @@  compat_test_guest_events:
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
 
-        ALIGN
 /* %rbx: struct vcpu */
-compat_process_softirqs:
+LABEL_LOCAL(compat_process_softirqs)
         sti
         call  do_softirq
         jmp   compat_test_all_events
 
-        ALIGN
 /* %rbx: struct vcpu, %rdx: struct trap_bounce */
-.Lcompat_process_trapbounce:
+LABEL_LOCAL(.Lcompat_process_trapbounce)
         sti
 .Lcompat_bounce_exception:
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
 
-	ALIGN
 /* %rbx: struct vcpu */
-compat_process_mce:
+LABEL_LOCAL(compat_process_mce)
         testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
         jnz   .Lcompat_test_guest_nmi
         sti
@@ -97,9 +96,8 @@  compat_process_mce:
         movb %dl,VCPU_async_exception_mask(%rbx)
         jmp   compat_process_trap
 
-	ALIGN
 /* %rbx: struct vcpu */
-compat_process_nmi:
+LABEL_LOCAL(compat_process_nmi)
         testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
         jnz   compat_test_guest_events
         sti
@@ -116,9 +114,10 @@  compat_process_trap:
         leaq  VCPU_trap_bounce(%rbx),%rdx
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
+END(compat_test_all_events)
 
 /* %rbx: struct vcpu, interrupts disabled */
-ENTRY(compat_restore_all_guest)
+FUNC(compat_restore_all_guest)
         ASSERT_INTERRUPTS_DISABLED
         mov   $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11d
         and   UREGS_eflags(%rsp),%r11d
@@ -161,9 +160,10 @@  ENTRY(compat_restore_all_guest)
         RESTORE_ALL adj=8 compat=1
 .Lft0:  iretq
         _ASM_PRE_EXTABLE(.Lft0, handle_exception)
+END(compat_restore_all_guest)
 
 /* This mustn't modify registers other than %rax. */
-ENTRY(cr4_pv32_restore)
+FUNC(cr4_pv32_restore)
         push  %rdx
         GET_CPUINFO_FIELD(cr4, dx)
         mov   (%rdx), %rax
@@ -193,8 +193,9 @@  ENTRY(cr4_pv32_restore)
         pop   %rdx
         xor   %eax, %eax
         ret
+END(cr4_pv32_restore)
 
-ENTRY(compat_syscall)
+FUNC(compat_syscall)
         /* Fix up reported %cs/%ss for compat domains. */
         movl  $FLAT_COMPAT_USER_SS, UREGS_ss(%rsp)
         movl  $FLAT_COMPAT_USER_CS, UREGS_cs(%rsp)
@@ -222,8 +223,9 @@  UNLIKELY_END(compat_syscall_gpf)
         movw  %si,TRAPBOUNCE_cs(%rdx)
         movb  %cl,TRAPBOUNCE_flags(%rdx)
         jmp   .Lcompat_bounce_exception
+END(compat_syscall)
 
-ENTRY(compat_sysenter)
+FUNC(compat_sysenter)
         CR4_PV32_RESTORE
         movq  VCPU_trap_ctxt(%rbx),%rcx
         cmpb  $X86_EXC_GP, UREGS_entry_vector(%rsp)
@@ -236,17 +238,19 @@  ENTRY(compat_sysenter)
         movw  %ax,TRAPBOUNCE_cs(%rdx)
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
+END(compat_sysenter)
 
-ENTRY(compat_int80_direct_trap)
+FUNC(compat_int80_direct_trap)
         CR4_PV32_RESTORE
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
+END(compat_int80_direct_trap)
 
 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
 /*   {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]}                             */
 /* %rdx: trap_bounce, %rbx: struct vcpu                                  */
 /* On return only %rbx and %rdx are guaranteed non-clobbered.            */
-compat_create_bounce_frame:
+FUNC_LOCAL(compat_create_bounce_frame)
         ASSERT_INTERRUPTS_ENABLED
         mov   %fs,%edi
         ALTERNATIVE "", stac, X86_FEATURE_XEN_SMAP
@@ -352,3 +356,4 @@  compat_crash_page_fault:
         jmp   .Lft14
 .previous
         _ASM_EXTABLE(.Lft14, .Lfx14)
+END(compat_create_bounce_frame)
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -9,6 +9,7 @@ 
 #include <asm/asm_defns.h>
 #include <asm/page.h>
 #include <asm/processor.h>
+#include <xen/lib.h>
 #include <public/xen.h>
 #include <irq_vectors.h>
 
@@ -24,7 +25,7 @@ 
 
 #ifdef CONFIG_PV
 /* %rbx: struct vcpu */
-switch_to_kernel:
+FUNC_LOCAL(switch_to_kernel)
         leaq  VCPU_trap_bounce(%rbx),%rdx
 
         /* TB_eip = 32-bit syscall ? syscall32_addr : syscall_addr */
@@ -89,24 +90,21 @@  test_guest_events:
         call  create_bounce_frame
         jmp   test_all_events
 
-        ALIGN
 /* %rbx: struct vcpu */
-process_softirqs:
+LABEL_LOCAL(process_softirqs)
         sti
         call do_softirq
         jmp  test_all_events
 
-        ALIGN
 /* %rbx: struct vcpu, %rdx struct trap_bounce */
-.Lprocess_trapbounce:
+LABEL_LOCAL(.Lprocess_trapbounce)
         sti
 .Lbounce_exception:
         call  create_bounce_frame
         jmp   test_all_events
 
-        ALIGN
 /* %rbx: struct vcpu */
-process_mce:
+LABEL_LOCAL(process_mce)
         testb $1 << VCPU_TRAP_MCE, VCPU_async_exception_mask(%rbx)
         jnz  .Ltest_guest_nmi
         sti
@@ -120,9 +118,8 @@  process_mce:
         movb %dl, VCPU_async_exception_mask(%rbx)
         jmp  process_trap
 
-        ALIGN
 /* %rbx: struct vcpu */
-process_nmi:
+LABEL_LOCAL(process_nmi)
         testb $1 << VCPU_TRAP_NMI, VCPU_async_exception_mask(%rbx)
         jnz  test_guest_events
         sti
@@ -139,11 +136,12 @@  process_trap:
         leaq VCPU_trap_bounce(%rbx), %rdx
         call create_bounce_frame
         jmp  test_all_events
+END(switch_to_kernel)
 
         .section .text.entry, "ax", @progbits
 
 /* %rbx: struct vcpu, interrupts disabled */
-restore_all_guest:
+FUNC_LOCAL(restore_all_guest)
         ASSERT_INTERRUPTS_DISABLED
 
         /* Stash guest SPEC_CTRL value while we can read struct vcpu. */
@@ -220,8 +218,7 @@  restore_all_guest:
         sysretq
 1:      sysretl
 
-        ALIGN
-.Lrestore_rcx_iret_exit_to_guest:
+LABEL_LOCAL(.Lrestore_rcx_iret_exit_to_guest)
         movq  8(%rsp), %rcx           # RIP
 /* No special register assumptions. */
 iret_exit_to_guest:
@@ -230,6 +227,7 @@  iret_exit_to_guest:
         addq  $8,%rsp
 .Lft0:  iretq
         _ASM_PRE_EXTABLE(.Lft0, handle_exception)
+END(restore_all_guest)
 
 /*
  * When entering SYSCALL from kernel mode:
@@ -246,7 +244,7 @@  iret_exit_to_guest:
  *  - Guest %rsp stored in %rax
  *  - Xen stack loaded, pointing at the %ss slot
  */
-ENTRY(lstar_enter)
+FUNC(lstar_enter)
 #ifdef CONFIG_XEN_SHSTK
         ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
 #endif
@@ -281,9 +279,10 @@  ENTRY(lstar_enter)
         mov   %rsp, %rdi
         call  pv_hypercall
         jmp   test_all_events
+END(lstar_enter)
 
 /* See lstar_enter for entry register state. */
-ENTRY(cstar_enter)
+FUNC(cstar_enter)
 #ifdef CONFIG_XEN_SHSTK
         ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
 #endif
@@ -321,8 +320,9 @@  ENTRY(cstar_enter)
         jne   compat_syscall
 #endif
         jmp   switch_to_kernel
+END(cstar_enter)
 
-ENTRY(sysenter_entry)
+FUNC(sysenter_entry)
         ENDBR64
 #ifdef CONFIG_XEN_SHSTK
         ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
@@ -331,7 +331,7 @@  ENTRY(sysenter_entry)
         pushq $FLAT_USER_SS
         pushq $0
         pushfq
-GLOBAL(sysenter_eflags_saved)
+LABEL(sysenter_eflags_saved, 0)
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         pushq $3 /* ring 3 null cs */
         pushq $0 /* null rip */
@@ -385,8 +385,9 @@  UNLIKELY_END(sysenter_gpf)
         jne   compat_sysenter
 #endif
         jmp   .Lbounce_exception
+END(sysenter_entry)
 
-ENTRY(int80_direct_trap)
+FUNC(int80_direct_trap)
         ENDBR64
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         pushq $0
@@ -474,6 +475,7 @@  int80_slow_path:
          */
         GET_STACK_END(14)
         jmp   handle_exception_saved
+END(int80_direct_trap)
 
         /* create_bounce_frame & helpers don't need to be in .text.entry */
         .text
@@ -482,7 +484,7 @@  int80_slow_path:
 /*   { RCX, R11, [ERRCODE,] RIP, CS, RFLAGS, RSP, SS }                   */
 /* %rdx: trap_bounce, %rbx: struct vcpu                                  */
 /* On return only %rbx and %rdx are guaranteed non-clobbered.            */
-create_bounce_frame:
+FUNC_LOCAL(create_bounce_frame)
         ASSERT_INTERRUPTS_ENABLED
         testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
         jnz   1f
@@ -618,6 +620,7 @@  ENTRY(dom_crash_sync_extable)
         xorl  %edi,%edi
         jmp   asm_domain_crash_synchronous /* Does not return */
         .popsection
+END(create_bounce_frame)
 #endif /* CONFIG_PV */
 
 /* --- CODE BELOW THIS LINE (MOSTLY) NOT GUEST RELATED --- */
@@ -626,7 +629,7 @@  ENTRY(dom_crash_sync_extable)
 
 /* No special register assumptions. */
 #ifdef CONFIG_PV
-ENTRY(continue_pv_domain)
+FUNC(continue_pv_domain)
         ENDBR64
         call  check_wakeup_from_wait
 ret_from_intr:
@@ -641,26 +644,28 @@  ret_from_intr:
 #else
         jmp   test_all_events
 #endif
+END(continue_pv_domain)
 #else
-ret_from_intr:
+FUNC(ret_from_intr, 0)
         ASSERT_CONTEXT_IS_XEN
         jmp   restore_all_xen
+END(ret_from_intr)
 #endif
 
         .section .init.text, "ax", @progbits
-ENTRY(early_page_fault)
+FUNC(early_page_fault)
         ENDBR64
         movl  $X86_EXC_PF, 4(%rsp)
         SAVE_ALL
         movq  %rsp, %rdi
         call  do_early_page_fault
         jmp   restore_all_xen
+END(early_page_fault)
 
         .section .text.entry, "ax", @progbits
 
-        ALIGN
 /* No special register assumptions. */
-restore_all_xen:
+FUNC_LOCAL(restore_all_xen)
         /*
          * Check whether we need to switch to the per-CPU page tables, in
          * case we return to late PV exit code (from an NMI or #MC).
@@ -677,8 +682,9 @@  UNLIKELY_END(exit_cr3)
 
         RESTORE_ALL adj=8
         iretq
+END(restore_all_xen)
 
-ENTRY(common_interrupt)
+FUNC(common_interrupt)
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         SAVE_ALL
 
@@ -707,12 +713,14 @@  ENTRY(common_interrupt)
         mov   %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
         mov   %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
         jmp ret_from_intr
+END(common_interrupt)
 
-ENTRY(page_fault)
+FUNC(page_fault)
         ENDBR64
         movl  $X86_EXC_PF, 4(%rsp)
+END(page_fault)
 /* No special register assumptions. */
-GLOBAL(handle_exception)
+FUNC(handle_exception, 0)
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         SAVE_ALL
 
@@ -882,92 +890,108 @@  FATAL_exception_with_ints_disabled:
         movq  %rsp,%rdi
         call  fatal_trap
         BUG   /* fatal_trap() shouldn't return. */
+END(handle_exception)
 
-ENTRY(divide_error)
+FUNC(divide_error)
         ENDBR64
         pushq $0
         movl  $X86_EXC_DE, 4(%rsp)
         jmp   handle_exception
+END(divide_error)
 
-ENTRY(coprocessor_error)
+FUNC(coprocessor_error)
         ENDBR64
         pushq $0
         movl  $X86_EXC_MF, 4(%rsp)
         jmp   handle_exception
+END(coprocessor_error)
 
-ENTRY(simd_coprocessor_error)
+FUNC(simd_coprocessor_error)
         ENDBR64
         pushq $0
         movl  $X86_EXC_XM, 4(%rsp)
         jmp   handle_exception
+END(coprocessor_error)
 
-ENTRY(device_not_available)
+FUNC(device_not_available)
         ENDBR64
         pushq $0
         movl  $X86_EXC_NM, 4(%rsp)
         jmp   handle_exception
+END(device_not_available)
 
-ENTRY(debug)
+FUNC(debug)
         ENDBR64
         pushq $0
         movl  $X86_EXC_DB, 4(%rsp)
         jmp   handle_ist_exception
+END(debug)
 
-ENTRY(int3)
+FUNC(int3)
         ENDBR64
         pushq $0
         movl  $X86_EXC_BP, 4(%rsp)
         jmp   handle_exception
+END(int3)
 
-ENTRY(overflow)
+FUNC(overflow)
         ENDBR64
         pushq $0
         movl  $X86_EXC_OF, 4(%rsp)
         jmp   handle_exception
+END(overflow)
 
-ENTRY(bounds)
+FUNC(bounds)
         ENDBR64
         pushq $0
         movl  $X86_EXC_BR, 4(%rsp)
         jmp   handle_exception
+END(bounds)
 
-ENTRY(invalid_op)
+FUNC(invalid_op)
         ENDBR64
         pushq $0
         movl  $X86_EXC_UD, 4(%rsp)
         jmp   handle_exception
+END(invalid_op)
 
-ENTRY(invalid_TSS)
+FUNC(invalid_TSS)
         ENDBR64
         movl  $X86_EXC_TS, 4(%rsp)
         jmp   handle_exception
+END(invalid_TSS)
 
-ENTRY(segment_not_present)
+FUNC(segment_not_present)
         ENDBR64
         movl  $X86_EXC_NP, 4(%rsp)
         jmp   handle_exception
+END(segment_not_present)
 
-ENTRY(stack_segment)
+FUNC(stack_segment)
         ENDBR64
         movl  $X86_EXC_SS, 4(%rsp)
         jmp   handle_exception
+END(stack_segment)
 
-ENTRY(general_protection)
+FUNC(general_protection)
         ENDBR64
         movl  $X86_EXC_GP, 4(%rsp)
         jmp   handle_exception
+END(general_protection)
 
-ENTRY(alignment_check)
+FUNC(alignment_check)
         ENDBR64
         movl  $X86_EXC_AC, 4(%rsp)
         jmp   handle_exception
+END(alignment_check)
 
-ENTRY(entry_CP)
+FUNC(entry_CP)
         ENDBR64
         movl  $X86_EXC_CP, 4(%rsp)
         jmp   handle_exception
+END(entry_CP)
 
-ENTRY(double_fault)
+FUNC(double_fault)
         ENDBR64
         movl  $X86_EXC_DF, 4(%rsp)
         /* Set AC to reduce chance of further SMAP faults */
@@ -991,8 +1015,9 @@  ENTRY(double_fault)
         movq  %rsp,%rdi
         call  do_double_fault
         BUG   /* do_double_fault() shouldn't return. */
+END(double_fault)
 
-ENTRY(nmi)
+FUNC(nmi)
         ENDBR64
         pushq $0
         movl  $X86_EXC_NMI, 4(%rsp)
@@ -1120,21 +1145,24 @@  handle_ist_exception:
         ASSERT_CONTEXT_IS_XEN
         jmp   restore_all_xen
 #endif
+END(nmi)
 
-ENTRY(machine_check)
+FUNC(machine_check)
         ENDBR64
         pushq $0
         movl  $X86_EXC_MC, 4(%rsp)
         jmp   handle_ist_exception
+END(machine_check)
 
 /* No op trap handler.  Required for kexec crash path. */
-GLOBAL(trap_nop)
+FUNC(trap_nop, 0)
         ENDBR64
         iretq
+END(trap_nop)
 
 /* Table of automatically generated entry points.  One per vector. */
         .pushsection .init.rodata, "a", @progbits
-GLOBAL(autogen_entrypoints)
+DATA(autogen_entrypoints, 8)
         /* pop into the .init.rodata section and record an entry point. */
         .macro entrypoint ent
         .pushsection .init.rodata, "a", @progbits
@@ -1143,7 +1171,7 @@  GLOBAL(autogen_entrypoints)
         .endm
 
         .popsection
-autogen_stubs: /* Automatically generated stubs. */
+FUNC_LOCAL(autogen_stubs, 0) /* Automatically generated stubs. */
 
         vec = 0
         .rept X86_NR_VECTORS
@@ -1187,6 +1215,7 @@  autogen_stubs: /* Automatically generate
 
         vec = vec + 1
         .endr
+END(autogen_stubs)
 
         .section .init.rodata, "a", @progbits
-        .size autogen_entrypoints, . - autogen_entrypoints
+END(autogen_entrypoints)