@@ -629,6 +629,7 @@ SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_exit 1
SYM_CODE_END(el1_irq)
+ .align 6
SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
kernel_entry 1
mov x0, sp
@@ -636,6 +637,14 @@ SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
kernel_exit 1
SYM_CODE_END(el1_fiq)
+ .align 6
+SYM_CODE_START_LOCAL_NOALIGN(el1_error)
+ kernel_entry 1
+ mov x0, sp
+ bl el1_error_handler
+ kernel_exit 1
+SYM_CODE_END(el1_error)
+
/*
* EL0 mode handlers.
*/
@@ -647,6 +656,30 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
b ret_to_user
SYM_CODE_END(el0_sync)
+ .align 6
+SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
+ kernel_entry 0
+ mov x0, sp
+ bl el0_irq_handler
+ b ret_to_user
+SYM_CODE_END(el0_irq)
+
+ .align 6
+SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
+ kernel_entry 0
+ mov x0, sp
+ bl el0_fiq_handler
+ b ret_to_user
+SYM_CODE_END(el0_fiq)
+
+ .align 6
+SYM_CODE_START_LOCAL_NOALIGN(el0_error)
+ kernel_entry 0
+ mov x0, sp
+ bl el0_error_handler
+ b ret_to_user
+SYM_CODE_END(el0_error)
+
#ifdef CONFIG_COMPAT
.align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
@@ -664,6 +697,7 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
b ret_to_user
SYM_CODE_END(el0_irq_compat)
+ .align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
kernel_entry 0, 32
mov x0, sp
@@ -671,6 +705,7 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
b ret_to_user
SYM_CODE_END(el0_fiq_compat)
+ .align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
kernel_entry 0, 32
mov x0, sp
@@ -679,35 +714,6 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
SYM_CODE_END(el0_error_compat)
#endif
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
- kernel_entry 0
- mov x0, sp
- bl el0_irq_handler
- b ret_to_user
-SYM_CODE_END(el0_irq)
-
-SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
- kernel_entry 0
- mov x0, sp
- bl el0_fiq_handler
- b ret_to_user
-SYM_CODE_END(el0_fiq)
-
-SYM_CODE_START_LOCAL(el1_error)
- kernel_entry 1
- mov x0, sp
- bl el1_error_handler
- kernel_exit 1
-SYM_CODE_END(el1_error)
-
-SYM_CODE_START_LOCAL(el0_error)
- kernel_entry 0
- mov x0, sp
- bl el0_error_handler
- b ret_to_user
-SYM_CODE_END(el0_error)
-
/*
* "slow" syscall return path.
*/
In entry.S we have two comments which distinguish EL0 and EL1 exception handlers, but the code isn't actually laid out this way, and there are a few other inconsitencies that would be good to clear up. This patch organizes the entry handers consistently: * The handlers are laid out in order of the vectors, to make them easier to navigate. * All handlers are given the same alignment, which was previously applied inconsitently. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> --- arch/arm64/kernel/entry.S | 64 ++++++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 29 deletions(-)