diff mbox series

[10/13] arm64: extable: add `type` and `data` fields

Message ID 20211013110059.10324-11-mark.rutland@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: extable: remove anonymous out-of-line fixups | expand

Commit Message

Mark Rutland Oct. 13, 2021, 11 a.m. UTC
Subsequent patches will add specialized handlers for fixups, in addition
to the simple PC fixup and BPF handlers we have today. In preparation,
this patch adds a new `type` field to struct exception_table_entry, and
uses this to distinguish the fixup and BPF cases. A `data` field is also
added so that subsequent patches can associate data specific to each
exception site (e.g. register numbers).

Handlers are named ex_handler_*() for consistency, following the exmaple
of x86. At the same time, get_ex_fixup() is split out into a helper so
that it can be used by other ex_handler_*() functions ins subsequent
patches.

This patch will increase the size of the exception tables, which will be
remedied by subsequent patches removing redundant fixup code. There
should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: James Morse <james.morse@arm.com>
Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/asm-extable.h | 32 ++++++++++++++++++++------------
 arch/arm64/include/asm/extable.h     | 19 +++++++++++++++----
 arch/arm64/mm/extable.c              | 29 +++++++++++++++++++++++++----
 arch/arm64/net/bpf_jit_comp.c        |  7 +++++--
 scripts/sorttable.c                  | 30 ++++++++++++++++++++++++++++++
 5 files changed, 95 insertions(+), 22 deletions(-)

Comments

Will Deacon Oct. 19, 2021, 11:29 a.m. UTC | #1
On Wed, Oct 13, 2021 at 12:00:56PM +0100, Mark Rutland wrote:
> Subsequent patches will add specialized handlers for fixups, in addition
> to the simple PC fixup and BPF handlers we have today. In preparation,
> this patch adds a new `type` field to struct exception_table_entry, and
> uses this to distinguish the fixup and BPF cases. A `data` field is also
> added so that subsequent patches can associate data specific to each
> exception site (e.g. register numbers).
> 
> Handlers are named ex_handler_*() for consistency, following the exmaple
> of x86. At the same time, get_ex_fixup() is split out into a helper so
> that it can be used by other ex_handler_*() functions ins subsequent
> patches.
> 
> This patch will increase the size of the exception tables, which will be
> remedied by subsequent patches removing redundant fixup code. There
> should be no functional change as a result of this patch.
> 
> Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> Cc: Alexei Starovoitov <ast@kernel.org>
> Cc: Andrii Nakryiko <andrii@kernel.org>
> Cc: Ard Biesheuvel <ardb@kernel.org>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: Daniel Borkmann <daniel@iogearbox.net>
> Cc: James Morse <james.morse@arm.com>
> Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
> Cc: Robin Murphy <robin.murphy@arm.com>
> Cc: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/include/asm/asm-extable.h | 32 ++++++++++++++++++++------------
>  arch/arm64/include/asm/extable.h     | 19 +++++++++++++++----
>  arch/arm64/mm/extable.c              | 29 +++++++++++++++++++++++++----
>  arch/arm64/net/bpf_jit_comp.c        |  7 +++++--
>  scripts/sorttable.c                  | 30 ++++++++++++++++++++++++++++++
>  5 files changed, 95 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
> index 986b4c0d4792..5ee748edaef1 100644
> --- a/arch/arm64/include/asm/asm-extable.h
> +++ b/arch/arm64/include/asm/asm-extable.h
> @@ -2,13 +2,19 @@
>  #ifndef __ASM_ASM_EXTABLE_H
>  #define __ASM_ASM_EXTABLE_H
>  
> +#define EX_TYPE_NONE			0
> +#define EX_TYPE_FIXUP			1
> +#define EX_TYPE_BPF			2
> +
>  #ifdef __ASSEMBLY__
>  
> -#define __ASM_EXTABLE_RAW(insn, fixup)		\
> -	.pushsection	__ex_table, "a";	\
> -	.align		3;			\
> -	.long		((insn) - .);		\
> -	.long		((fixup) - .);		\
> +#define __ASM_EXTABLE_RAW(insn, fixup, type, data)	\
> +	.pushsection	__ex_table, "a";		\
> +	.align		2;				\
> +	.long		((insn) - .);			\
> +	.long		((fixup) - .);			\
> +	.short		(type);				\
> +	.short		(data);				\

Why are you reducing the alignment here?

> diff --git a/scripts/sorttable.c b/scripts/sorttable.c
> index 6ee4fa882919..ee95bb47a50d 100644
> --- a/scripts/sorttable.c
> +++ b/scripts/sorttable.c
> @@ -231,6 +231,34 @@ static void sort_relative_table(char *extab_image, int image_size)
>  	}
>  }
>  
> +static void arm64_sort_relative_table(char *extab_image, int image_size)
> +{
> +	int i = 0;
> +
> +	while (i < image_size) {
> +		uint32_t *loc = (uint32_t *)(extab_image + i);
> +
> +		w(r(loc) + i, loc);
> +		w(r(loc + 1) + i + 4, loc + 1);
> +		/* Don't touch the fixup type or data */
> +
> +		i += sizeof(uint32_t) * 3;
> +	}
> +
> +	qsort(extab_image, image_size / 12, 12, compare_relative_table);
> +
> +	i = 0;
> +	while (i < image_size) {
> +		uint32_t *loc = (uint32_t *)(extab_image + i);
> +
> +		w(r(loc) - i, loc);
> +		w(r(loc + 1) - (i + 4), loc + 1);
> +		/* Don't touch the fixup type or data */
> +
> +		i += sizeof(uint32_t) * 3;
> +	}
> +}

This is very nearly a direct copy of x86_sort_relative_table() (magic
numbers and all). It would be nice to tidy that up, but I couldn't
immediately see a good way to do it :(

Will
Mark Rutland Oct. 19, 2021, 11:50 a.m. UTC | #2
On Tue, Oct 19, 2021 at 12:29:55PM +0100, Will Deacon wrote:
> On Wed, Oct 13, 2021 at 12:00:56PM +0100, Mark Rutland wrote:
> > Subsequent patches will add specialized handlers for fixups, in addition
> > to the simple PC fixup and BPF handlers we have today. In preparation,
> > this patch adds a new `type` field to struct exception_table_entry, and
> > uses this to distinguish the fixup and BPF cases. A `data` field is also
> > added so that subsequent patches can associate data specific to each
> > exception site (e.g. register numbers).
> > 
> > Handlers are named ex_handler_*() for consistency, following the exmaple
> > of x86. At the same time, get_ex_fixup() is split out into a helper so
> > that it can be used by other ex_handler_*() functions ins subsequent
> > patches.
> > 
> > This patch will increase the size of the exception tables, which will be
> > remedied by subsequent patches removing redundant fixup code. There
> > should be no functional change as a result of this patch.
> > 
> > Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> > Cc: Alexei Starovoitov <ast@kernel.org>
> > Cc: Andrii Nakryiko <andrii@kernel.org>
> > Cc: Ard Biesheuvel <ardb@kernel.org>
> > Cc: Catalin Marinas <catalin.marinas@arm.com>
> > Cc: Daniel Borkmann <daniel@iogearbox.net>
> > Cc: James Morse <james.morse@arm.com>
> > Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
> > Cc: Robin Murphy <robin.murphy@arm.com>
> > Cc: Will Deacon <will@kernel.org>
> > ---
> >  arch/arm64/include/asm/asm-extable.h | 32 ++++++++++++++++++++------------
> >  arch/arm64/include/asm/extable.h     | 19 +++++++++++++++----
> >  arch/arm64/mm/extable.c              | 29 +++++++++++++++++++++++++----
> >  arch/arm64/net/bpf_jit_comp.c        |  7 +++++--
> >  scripts/sorttable.c                  | 30 ++++++++++++++++++++++++++++++
> >  5 files changed, 95 insertions(+), 22 deletions(-)
> > 
> > diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
> > index 986b4c0d4792..5ee748edaef1 100644
> > --- a/arch/arm64/include/asm/asm-extable.h
> > +++ b/arch/arm64/include/asm/asm-extable.h
> > @@ -2,13 +2,19 @@
> >  #ifndef __ASM_ASM_EXTABLE_H
> >  #define __ASM_ASM_EXTABLE_H
> >  
> > +#define EX_TYPE_NONE			0
> > +#define EX_TYPE_FIXUP			1
> > +#define EX_TYPE_BPF			2
> > +
> >  #ifdef __ASSEMBLY__
> >  
> > -#define __ASM_EXTABLE_RAW(insn, fixup)		\
> > -	.pushsection	__ex_table, "a";	\
> > -	.align		3;			\
> > -	.long		((insn) - .);		\
> > -	.long		((fixup) - .);		\
> > +#define __ASM_EXTABLE_RAW(insn, fixup, type, data)	\
> > +	.pushsection	__ex_table, "a";		\
> > +	.align		2;				\
> > +	.long		((insn) - .);			\
> > +	.long		((fixup) - .);			\
> > +	.short		(type);				\
> > +	.short		(data);				\
> 
> Why are you reducing the alignment here?

That's because the size of each entry is now 12 bytes, and 
`.align 3` aligns to 8 bytes, which would leave a gap between entries.
We only require the fields are naturally aligned, so `.align 2` is
sufficient, and doesn't waste space.

I'll update the commit message to call that out.

> > diff --git a/scripts/sorttable.c b/scripts/sorttable.c
> > index 6ee4fa882919..ee95bb47a50d 100644
> > --- a/scripts/sorttable.c
> > +++ b/scripts/sorttable.c
> > @@ -231,6 +231,34 @@ static void sort_relative_table(char *extab_image, int image_size)
> >  	}
> >  }
> >  
> > +static void arm64_sort_relative_table(char *extab_image, int image_size)
> > +{
> > +	int i = 0;
> > +
> > +	while (i < image_size) {
> > +		uint32_t *loc = (uint32_t *)(extab_image + i);
> > +
> > +		w(r(loc) + i, loc);
> > +		w(r(loc + 1) + i + 4, loc + 1);
> > +		/* Don't touch the fixup type or data */
> > +
> > +		i += sizeof(uint32_t) * 3;
> > +	}
> > +
> > +	qsort(extab_image, image_size / 12, 12, compare_relative_table);
> > +
> > +	i = 0;
> > +	while (i < image_size) {
> > +		uint32_t *loc = (uint32_t *)(extab_image + i);
> > +
> > +		w(r(loc) - i, loc);
> > +		w(r(loc + 1) - (i + 4), loc + 1);
> > +		/* Don't touch the fixup type or data */
> > +
> > +		i += sizeof(uint32_t) * 3;
> > +	}
> > +}
> 
> This is very nearly a direct copy of x86_sort_relative_table() (magic
> numbers and all). It would be nice to tidy that up, but I couldn't
> immediately see a good way to do it :(

Beware that's true in linux-next, but not mainline, as that changes in
commit:

  46d28947d9876fc0 ("x86/extable: Rework the exception table mechanics")

A patch to unify the two is trivial, but will cause a cross-tree
dependency, so I'd suggest having this separate for now and sending a
unification patch come -rc1.

I can note something to that effect in the commit message, if that
helps?

Thanks,
Mark.
Will Deacon Oct. 19, 2021, 12:05 p.m. UTC | #3
On Tue, Oct 19, 2021 at 12:50:22PM +0100, Mark Rutland wrote:
> On Tue, Oct 19, 2021 at 12:29:55PM +0100, Will Deacon wrote:
> > On Wed, Oct 13, 2021 at 12:00:56PM +0100, Mark Rutland wrote:
> > > Subsequent patches will add specialized handlers for fixups, in addition
> > > to the simple PC fixup and BPF handlers we have today. In preparation,
> > > this patch adds a new `type` field to struct exception_table_entry, and
> > > uses this to distinguish the fixup and BPF cases. A `data` field is also
> > > added so that subsequent patches can associate data specific to each
> > > exception site (e.g. register numbers).
> > > 
> > > Handlers are named ex_handler_*() for consistency, following the exmaple
> > > of x86. At the same time, get_ex_fixup() is split out into a helper so
> > > that it can be used by other ex_handler_*() functions ins subsequent
> > > patches.
> > > 
> > > This patch will increase the size of the exception tables, which will be
> > > remedied by subsequent patches removing redundant fixup code. There
> > > should be no functional change as a result of this patch.
> > > 
> > > Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> > > Cc: Alexei Starovoitov <ast@kernel.org>
> > > Cc: Andrii Nakryiko <andrii@kernel.org>
> > > Cc: Ard Biesheuvel <ardb@kernel.org>
> > > Cc: Catalin Marinas <catalin.marinas@arm.com>
> > > Cc: Daniel Borkmann <daniel@iogearbox.net>
> > > Cc: James Morse <james.morse@arm.com>
> > > Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
> > > Cc: Robin Murphy <robin.murphy@arm.com>
> > > Cc: Will Deacon <will@kernel.org>
> > > ---
> > >  arch/arm64/include/asm/asm-extable.h | 32 ++++++++++++++++++++------------
> > >  arch/arm64/include/asm/extable.h     | 19 +++++++++++++++----
> > >  arch/arm64/mm/extable.c              | 29 +++++++++++++++++++++++++----
> > >  arch/arm64/net/bpf_jit_comp.c        |  7 +++++--
> > >  scripts/sorttable.c                  | 30 ++++++++++++++++++++++++++++++
> > >  5 files changed, 95 insertions(+), 22 deletions(-)
> > > 
> > > diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
> > > index 986b4c0d4792..5ee748edaef1 100644
> > > --- a/arch/arm64/include/asm/asm-extable.h
> > > +++ b/arch/arm64/include/asm/asm-extable.h
> > > @@ -2,13 +2,19 @@
> > >  #ifndef __ASM_ASM_EXTABLE_H
> > >  #define __ASM_ASM_EXTABLE_H
> > >  
> > > +#define EX_TYPE_NONE			0
> > > +#define EX_TYPE_FIXUP			1
> > > +#define EX_TYPE_BPF			2
> > > +
> > >  #ifdef __ASSEMBLY__
> > >  
> > > -#define __ASM_EXTABLE_RAW(insn, fixup)		\
> > > -	.pushsection	__ex_table, "a";	\
> > > -	.align		3;			\
> > > -	.long		((insn) - .);		\
> > > -	.long		((fixup) - .);		\
> > > +#define __ASM_EXTABLE_RAW(insn, fixup, type, data)	\
> > > +	.pushsection	__ex_table, "a";		\
> > > +	.align		2;				\
> > > +	.long		((insn) - .);			\
> > > +	.long		((fixup) - .);			\
> > > +	.short		(type);				\
> > > +	.short		(data);				\
> > 
> > Why are you reducing the alignment here?
> 
> That's because the size of each entry is now 12 bytes, and 
> `.align 3` aligns to 8 bytes, which would leave a gap between entries.
> We only require the fields are naturally aligned, so `.align 2` is
> sufficient, and doesn't waste space.
> 
> I'll update the commit message to call that out.

I think the part which is confusing me is that I would expect the alignment
here to match the alignment of the corresponding C type, but the old value
of '3' doesn't seem to do that, so is this patch fixing an earlier bug?

Without your patches in the picture, we're using a '.align 3' in
_asm_extable, but with:

struct exception_table_entry
{
	int insn, fixup;
};

I suppose it works out because that over-alignment doesn't result in any
additional padding, but I think we could reduce the current alignment
without any of these other changes, no?

> > > diff --git a/scripts/sorttable.c b/scripts/sorttable.c
> > > index 6ee4fa882919..ee95bb47a50d 100644
> > > --- a/scripts/sorttable.c
> > > +++ b/scripts/sorttable.c
> > > @@ -231,6 +231,34 @@ static void sort_relative_table(char *extab_image, int image_size)
> > >  	}
> > >  }
> > >  
> > > +static void arm64_sort_relative_table(char *extab_image, int image_size)
> > > +{
> > > +	int i = 0;
> > > +
> > > +	while (i < image_size) {
> > > +		uint32_t *loc = (uint32_t *)(extab_image + i);
> > > +
> > > +		w(r(loc) + i, loc);
> > > +		w(r(loc + 1) + i + 4, loc + 1);
> > > +		/* Don't touch the fixup type or data */
> > > +
> > > +		i += sizeof(uint32_t) * 3;
> > > +	}
> > > +
> > > +	qsort(extab_image, image_size / 12, 12, compare_relative_table);
> > > +
> > > +	i = 0;
> > > +	while (i < image_size) {
> > > +		uint32_t *loc = (uint32_t *)(extab_image + i);
> > > +
> > > +		w(r(loc) - i, loc);
> > > +		w(r(loc + 1) - (i + 4), loc + 1);
> > > +		/* Don't touch the fixup type or data */
> > > +
> > > +		i += sizeof(uint32_t) * 3;
> > > +	}
> > > +}
> > 
> > This is very nearly a direct copy of x86_sort_relative_table() (magic
> > numbers and all). It would be nice to tidy that up, but I couldn't
> > immediately see a good way to do it :(
> 
> Beware that's true in linux-next, but not mainline, as that changes in
> commit:
> 
>   46d28947d9876fc0 ("x86/extable: Rework the exception table mechanics")
> 
> A patch to unify the two is trivial, but will cause a cross-tree
> dependency, so I'd suggest having this separate for now and sending a
> unification patch come -rc1.
> 
> I can note something to that effect in the commit message, if that
> helps?

Yeah, I suppose. It's not worth tripping over the x86 changes, but we
should try to remember to come back and unify things.

Will
Ard Biesheuvel Oct. 19, 2021, 12:12 p.m. UTC | #4
On Tue, 19 Oct 2021 at 14:05, Will Deacon <will@kernel.org> wrote:
>
> On Tue, Oct 19, 2021 at 12:50:22PM +0100, Mark Rutland wrote:
> > On Tue, Oct 19, 2021 at 12:29:55PM +0100, Will Deacon wrote:
> > > On Wed, Oct 13, 2021 at 12:00:56PM +0100, Mark Rutland wrote:
> > > > Subsequent patches will add specialized handlers for fixups, in addition
> > > > to the simple PC fixup and BPF handlers we have today. In preparation,
> > > > this patch adds a new `type` field to struct exception_table_entry, and
> > > > uses this to distinguish the fixup and BPF cases. A `data` field is also
> > > > added so that subsequent patches can associate data specific to each
> > > > exception site (e.g. register numbers).
> > > >
> > > > Handlers are named ex_handler_*() for consistency, following the exmaple
> > > > of x86. At the same time, get_ex_fixup() is split out into a helper so
> > > > that it can be used by other ex_handler_*() functions ins subsequent
> > > > patches.
> > > >
> > > > This patch will increase the size of the exception tables, which will be
> > > > remedied by subsequent patches removing redundant fixup code. There
> > > > should be no functional change as a result of this patch.
> > > >
> > > > Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> > > > Cc: Alexei Starovoitov <ast@kernel.org>
> > > > Cc: Andrii Nakryiko <andrii@kernel.org>
> > > > Cc: Ard Biesheuvel <ardb@kernel.org>
> > > > Cc: Catalin Marinas <catalin.marinas@arm.com>
> > > > Cc: Daniel Borkmann <daniel@iogearbox.net>
> > > > Cc: James Morse <james.morse@arm.com>
> > > > Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
> > > > Cc: Robin Murphy <robin.murphy@arm.com>
> > > > Cc: Will Deacon <will@kernel.org>
> > > > ---
> > > >  arch/arm64/include/asm/asm-extable.h | 32 ++++++++++++++++++++------------
> > > >  arch/arm64/include/asm/extable.h     | 19 +++++++++++++++----
> > > >  arch/arm64/mm/extable.c              | 29 +++++++++++++++++++++++++----
> > > >  arch/arm64/net/bpf_jit_comp.c        |  7 +++++--
> > > >  scripts/sorttable.c                  | 30 ++++++++++++++++++++++++++++++
> > > >  5 files changed, 95 insertions(+), 22 deletions(-)
> > > >
> > > > diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
> > > > index 986b4c0d4792..5ee748edaef1 100644
> > > > --- a/arch/arm64/include/asm/asm-extable.h
> > > > +++ b/arch/arm64/include/asm/asm-extable.h
> > > > @@ -2,13 +2,19 @@
> > > >  #ifndef __ASM_ASM_EXTABLE_H
> > > >  #define __ASM_ASM_EXTABLE_H
> > > >
> > > > +#define EX_TYPE_NONE                     0
> > > > +#define EX_TYPE_FIXUP                    1
> > > > +#define EX_TYPE_BPF                      2
> > > > +
> > > >  #ifdef __ASSEMBLY__
> > > >
> > > > -#define __ASM_EXTABLE_RAW(insn, fixup)           \
> > > > - .pushsection    __ex_table, "a";        \
> > > > - .align          3;                      \
> > > > - .long           ((insn) - .);           \
> > > > - .long           ((fixup) - .);          \
> > > > +#define __ASM_EXTABLE_RAW(insn, fixup, type, data)       \
> > > > + .pushsection    __ex_table, "a";                \
> > > > + .align          2;                              \
> > > > + .long           ((insn) - .);                   \
> > > > + .long           ((fixup) - .);                  \
> > > > + .short          (type);                         \
> > > > + .short          (data);                         \
> > >
> > > Why are you reducing the alignment here?
> >
> > That's because the size of each entry is now 12 bytes, and
> > `.align 3` aligns to 8 bytes, which would leave a gap between entries.
> > We only require the fields are naturally aligned, so `.align 2` is
> > sufficient, and doesn't waste space.
> >
> > I'll update the commit message to call that out.
>
> I think the part which is confusing me is that I would expect the alignment
> here to match the alignment of the corresponding C type, but the old value
> of '3' doesn't seem to do that, so is this patch fixing an earlier bug?
>
> Without your patches in the picture, we're using a '.align 3' in
> _asm_extable, but with:
>
> struct exception_table_entry
> {
>         int insn, fixup;
> };
>
> I suppose it works out because that over-alignment doesn't result in any
> additional padding, but I think we could reduce the current alignment
> without any of these other changes, no?
>

If a struct type's size happens to be a power of 2, it is perfectly
reasonable to align it to its size rather than use the minimum
alignment required by its members, as this will generally result in
more efficient placement wrt cachelines, pages, etc.

So the change is obviously needed here, but I wouldn't consider the
old alignment value to be a bug.
Mark Rutland Oct. 19, 2021, 1:01 p.m. UTC | #5
On Tue, Oct 19, 2021 at 01:05:06PM +0100, Will Deacon wrote:
> On Tue, Oct 19, 2021 at 12:50:22PM +0100, Mark Rutland wrote:
> > On Tue, Oct 19, 2021 at 12:29:55PM +0100, Will Deacon wrote:
> > > On Wed, Oct 13, 2021 at 12:00:56PM +0100, Mark Rutland wrote:
> > > > -#define __ASM_EXTABLE_RAW(insn, fixup)		\
> > > > -	.pushsection	__ex_table, "a";	\
> > > > -	.align		3;			\
> > > > -	.long		((insn) - .);		\
> > > > -	.long		((fixup) - .);		\
> > > > +#define __ASM_EXTABLE_RAW(insn, fixup, type, data)	\
> > > > +	.pushsection	__ex_table, "a";		\
> > > > +	.align		2;				\
> > > > +	.long		((insn) - .);			\
> > > > +	.long		((fixup) - .);			\
> > > > +	.short		(type);				\
> > > > +	.short		(data);				\
> > > 
> > > Why are you reducing the alignment here?
> > 
> > That's because the size of each entry is now 12 bytes, and 
> > `.align 3` aligns to 8 bytes, which would leave a gap between entries.
> > We only require the fields are naturally aligned, so `.align 2` is
> > sufficient, and doesn't waste space.
> > 
> > I'll update the commit message to call that out.
> 
> I think the part which is confusing me is that I would expect the alignment
> here to match the alignment of the corresponding C type, but the old value
> of '3' doesn't seem to do that, so is this patch fixing an earlier bug?
> 
> Without your patches in the picture, we're using a '.align 3' in
> _asm_extable, but with:
> 
> struct exception_table_entry
> {
> 	int insn, fixup;
> };
> 
> I suppose it works out because that over-alignment doesn't result in any
> additional padding, but I think we could reduce the current alignment
> without any of these other changes, no?

Yes, we could reduce that first, but no, it's not a bug -- there's no
functional issue today.

For context, today the `__ex_table` section as a whole and the
`__start___ex_table` symbol also got 8 byte alignment, since in
ARch/arm64/kernel/vmlinux.lds.S we have:

| #define RO_EXCEPTION_TABLE_ALIGN        8

... and so in include/asm-generic/vmlinux.lds.h when the exception table
gets output with:

| EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN)

| #define EXCEPTION_TABLE(align)                                          \
|         . = ALIGN(align);                                               \
|         __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {               \
|                 __start___ex_table = .;                                 \
|                 KEEP(*(__ex_table))                                     \
|                 __stop___ex_table = .;                                  \
|         }

If you want, I can split out a preparatory patch which drops the
alignment to the minimum necessary, both in the asm and for
RO_EXCEPTION_TABLE_ALIGN?

[...]

> > > > +static void arm64_sort_relative_table(char *extab_image, int image_size)
> > > > +{
> > > > +	int i = 0;
> > > > +
> > > > +	while (i < image_size) {
> > > > +		uint32_t *loc = (uint32_t *)(extab_image + i);
> > > > +
> > > > +		w(r(loc) + i, loc);
> > > > +		w(r(loc + 1) + i + 4, loc + 1);
> > > > +		/* Don't touch the fixup type or data */
> > > > +
> > > > +		i += sizeof(uint32_t) * 3;
> > > > +	}
> > > > +
> > > > +	qsort(extab_image, image_size / 12, 12, compare_relative_table);
> > > > +
> > > > +	i = 0;
> > > > +	while (i < image_size) {
> > > > +		uint32_t *loc = (uint32_t *)(extab_image + i);
> > > > +
> > > > +		w(r(loc) - i, loc);
> > > > +		w(r(loc + 1) - (i + 4), loc + 1);
> > > > +		/* Don't touch the fixup type or data */
> > > > +
> > > > +		i += sizeof(uint32_t) * 3;
> > > > +	}
> > > > +}
> > > 
> > > This is very nearly a direct copy of x86_sort_relative_table() (magic
> > > numbers and all). It would be nice to tidy that up, but I couldn't
> > > immediately see a good way to do it :(
> > 
> > Beware that's true in linux-next, but not mainline, as that changes in
> > commit:
> > 
> >   46d28947d9876fc0 ("x86/extable: Rework the exception table mechanics")
> > 
> > A patch to unify the two is trivial, but will cause a cross-tree
> > dependency, so I'd suggest having this separate for now and sending a
> > unification patch come -rc1.
> > 
> > I can note something to that effect in the commit message, if that
> > helps?
> 
> Yeah, I suppose. It's not worth tripping over the x86 changes, but we
> should try to remember to come back and unify things.

Sure; works for me.

Thanks,
Mark.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index 986b4c0d4792..5ee748edaef1 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -2,13 +2,19 @@ 
 #ifndef __ASM_ASM_EXTABLE_H
 #define __ASM_ASM_EXTABLE_H
 
+#define EX_TYPE_NONE			0
+#define EX_TYPE_FIXUP			1
+#define EX_TYPE_BPF			2
+
 #ifdef __ASSEMBLY__
 
-#define __ASM_EXTABLE_RAW(insn, fixup)		\
-	.pushsection	__ex_table, "a";	\
-	.align		3;			\
-	.long		((insn) - .);		\
-	.long		((fixup) - .);		\
+#define __ASM_EXTABLE_RAW(insn, fixup, type, data)	\
+	.pushsection	__ex_table, "a";		\
+	.align		2;				\
+	.long		((insn) - .);			\
+	.long		((fixup) - .);			\
+	.short		(type);				\
+	.short		(data);				\
 	.popsection;
 
 /*
@@ -16,7 +22,7 @@ 
  * when an unhandled fault is taken.
  */
 	.macro		_asm_extable, insn, fixup
-	__ASM_EXTABLE_RAW(\insn, \fixup)
+	__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
 	.endm
 
 /*
@@ -33,15 +39,17 @@ 
 
 #include <linux/stringify.h>
 
-#define __ASM_EXTABLE_RAW(insn, fixup)		\
-	".pushsection	__ex_table, \"a\"\n"	\
-	".align		3\n"			\
-	".long		((" insn ") - .)\n"	\
-	".long		((" fixup ") - .)\n"	\
+#define __ASM_EXTABLE_RAW(insn, fixup, type, data)	\
+	".pushsection	__ex_table, \"a\"\n"		\
+	".align		2\n"				\
+	".long		((" insn ") - .)\n"		\
+	".long		((" fixup ") - .)\n"		\
+	".short		(" type ")\n"			\
+	".short		(" data ")\n"			\
 	".popsection\n"
 
 #define _ASM_EXTABLE(insn, fixup) \
-	__ASM_EXTABLE_RAW(#insn, #fixup)
+	__ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index 1859b9fd566f..8b300dd28def 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -18,10 +18,21 @@ 
 struct exception_table_entry
 {
 	int insn, fixup;
+	short type, data;
 };
 
 #define ARCH_HAS_RELATIVE_EXTABLE
 
+#define swap_ex_entry_fixup(a, b, tmp, delta)		\
+do {							\
+	(a)->fixup = (b)->fixup + (delta);		\
+	(b)->fixup = (tmp).fixup - (delta);		\
+	(a)->type = (b)->type;				\
+	(b)->type = (tmp).type;				\
+	(a)->data = (b)->data;				\
+	(b)->data = (tmp).data;				\
+} while (0)
+
 static inline bool in_bpf_jit(struct pt_regs *regs)
 {
 	if (!IS_ENABLED(CONFIG_BPF_JIT))
@@ -32,12 +43,12 @@  static inline bool in_bpf_jit(struct pt_regs *regs)
 }
 
 #ifdef CONFIG_BPF_JIT
-bool arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
-			      struct pt_regs *regs);
+bool ex_handler_bpf(const struct exception_table_entry *ex,
+		    struct pt_regs *regs);
 #else /* !CONFIG_BPF_JIT */
 static inline
-bool arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
-			       struct pt_regs *regs)
+bool ex_handler_bpf(const struct exception_table_entry *ex,
+		    struct pt_regs *regs)
 {
 	return false;
 }
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index dba3d59f3eca..c2951b963335 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -6,6 +6,24 @@ 
 #include <linux/extable.h>
 #include <linux/uaccess.h>
 
+#include <asm/asm-extable.h>
+
+typedef bool (*ex_handler_t)(const struct exception_table_entry *,
+			     struct pt_regs *);
+
+static inline unsigned long
+get_ex_fixup(const struct exception_table_entry *ex)
+{
+	return ((unsigned long)&ex->fixup + ex->fixup);
+}
+
+static bool ex_handler_fixup(const struct exception_table_entry *ex,
+			     struct pt_regs *regs)
+{
+	regs->pc = get_ex_fixup(ex);
+	return true;
+}
+
 bool fixup_exception(struct pt_regs *regs)
 {
 	const struct exception_table_entry *ex;
@@ -14,9 +32,12 @@  bool fixup_exception(struct pt_regs *regs)
 	if (!ex)
 		return false;
 
-	if (in_bpf_jit(regs))
-		return arm64_bpf_fixup_exception(ex, regs);
+	switch (ex->type) {
+	case EX_TYPE_FIXUP:
+		return ex_handler_fixup(ex, regs);
+	case EX_TYPE_BPF:
+		return ex_handler_bpf(ex, regs);
+	}
 
-	regs->pc = (unsigned long)&ex->fixup + ex->fixup;
-	return true;
+	BUG();
 }
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 956c841ef346..7df7345e60d8 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -13,6 +13,7 @@ 
 #include <linux/printk.h>
 #include <linux/slab.h>
 
+#include <asm/asm-extable.h>
 #include <asm/byteorder.h>
 #include <asm/cacheflush.h>
 #include <asm/debug-monitors.h>
@@ -358,8 +359,8 @@  static void build_epilogue(struct jit_ctx *ctx)
 #define BPF_FIXUP_OFFSET_MASK	GENMASK(26, 0)
 #define BPF_FIXUP_REG_MASK	GENMASK(31, 27)
 
-bool arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
-			       struct pt_regs *regs)
+bool ex_handler_bpf(const struct exception_table_entry *ex,
+		    struct pt_regs *regs)
 {
 	off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
 	int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
@@ -412,6 +413,8 @@  static int add_exception_handler(const struct bpf_insn *insn,
 	ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
 		    FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
 
+	ex->type = EX_TYPE_BPF;
+
 	ctx->exentry_idx++;
 	return 0;
 }
diff --git a/scripts/sorttable.c b/scripts/sorttable.c
index 6ee4fa882919..ee95bb47a50d 100644
--- a/scripts/sorttable.c
+++ b/scripts/sorttable.c
@@ -231,6 +231,34 @@  static void sort_relative_table(char *extab_image, int image_size)
 	}
 }
 
+static void arm64_sort_relative_table(char *extab_image, int image_size)
+{
+	int i = 0;
+
+	while (i < image_size) {
+		uint32_t *loc = (uint32_t *)(extab_image + i);
+
+		w(r(loc) + i, loc);
+		w(r(loc + 1) + i + 4, loc + 1);
+		/* Don't touch the fixup type or data */
+
+		i += sizeof(uint32_t) * 3;
+	}
+
+	qsort(extab_image, image_size / 12, 12, compare_relative_table);
+
+	i = 0;
+	while (i < image_size) {
+		uint32_t *loc = (uint32_t *)(extab_image + i);
+
+		w(r(loc) - i, loc);
+		w(r(loc + 1) - (i + 4), loc + 1);
+		/* Don't touch the fixup type or data */
+
+		i += sizeof(uint32_t) * 3;
+	}
+}
+
 static void x86_sort_relative_table(char *extab_image, int image_size)
 {
 	int i = 0;
@@ -343,6 +371,8 @@  static int do_file(char const *const fname, void *addr)
 		custom_sort = s390_sort_relative_table;
 		break;
 	case EM_AARCH64:
+		custom_sort = arm64_sort_relative_table;
+		break;
 	case EM_PARISC:
 	case EM_PPC:
 	case EM_PPC64: