@@ -324,7 +324,7 @@ syscall_return_via_sysret:
opportunistic_sysret_failed:
SWAPGS
jmp restore_c_regs_and_iret
-END(entry_SYSCALL_64)
+ENDPROC(entry_SYSCALL_64)
ENTRY(stub_ptregs_64)
/*
@@ -350,13 +350,13 @@ ENTRY(stub_ptregs_64)
1:
jmp *%rax /* Called from C */
-END(stub_ptregs_64)
+ENDPROC(stub_ptregs_64)
.macro ptregs_stub func
ENTRY(ptregs_\func)
leaq \func(%rip), %rax
jmp stub_ptregs_64
-END(ptregs_\func)
+ENDPROC(ptregs_\func)
.endm
/* Instantiate ptregs_stub for each ptregs-using syscall */
@@ -399,7 +399,7 @@ ENTRY(__switch_to_asm)
popq %rbp
jmp __switch_to
-END(__switch_to_asm)
+ENDPROC(__switch_to_asm)
/*
* A newly forked process directly context switches into this address.
@@ -435,7 +435,7 @@ ENTRY(ret_from_fork)
*/
movq $0, RAX(%rsp)
jmp 2b
-END(ret_from_fork)
+ENDPROC(ret_from_fork)
/*
* Build the entry stubs with some assembler magic.
@@ -450,7 +450,7 @@ ENTRY(irq_entries_start)
jmp common_interrupt
.align 8
.endr
-END(irq_entries_start)
+ENDPROC(irq_entries_start)
/*
* Interrupt entry/exit.
@@ -652,7 +652,7 @@ native_irq_return_ldt:
*/
jmp native_irq_return_iret
#endif
-END(common_interrupt)
+ENDPROC(common_interrupt)
/*
* APIC interrupts.
@@ -664,7 +664,7 @@ ENTRY(\sym)
.Lcommon_\sym:
interrupt \do_sym
jmp ret_from_intr
-END(\sym)
+ENDPROC(\sym)
.endm
#ifdef CONFIG_TRACING
@@ -830,7 +830,7 @@ ENTRY(\sym)
jmp error_exit /* %ebx: no swapgs flag */
.endif
-END(\sym)
+ENDPROC(\sym)
.endm
#ifdef CONFIG_TRACING
@@ -873,7 +873,7 @@ ENTRY(native_load_gs_index)
SWAPGS
popfq
ret
-END(native_load_gs_index)
+ENDPROC(native_load_gs_index)
EXPORT_SYMBOL(native_load_gs_index)
_ASM_EXTABLE(.Lgs_change, bad_gs)
@@ -903,7 +903,7 @@ ENTRY(do_softirq_own_stack)
leaveq
decl PER_CPU_VAR(irq_count)
ret
-END(do_softirq_own_stack)
+ENDPROC(do_softirq_own_stack)
#ifdef CONFIG_XEN
idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
@@ -939,7 +939,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
call xen_maybe_preempt_hcall
#endif
jmp error_exit
-END(xen_do_hypervisor_callback)
+ENDPROC(xen_do_hypervisor_callback)
/*
* Hypervisor uses this for application faults while it executes.
@@ -985,7 +985,7 @@ ENTRY(xen_failsafe_callback)
SAVE_EXTRA_REGS
ENCODE_FRAME_POINTER
jmp error_exit
-END(xen_failsafe_callback)
+ENDPROC(xen_failsafe_callback)
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
xen_hvm_callback_vector xen_evtchn_do_upcall
@@ -1036,7 +1036,7 @@ ENTRY(paranoid_entry)
SWAPGS
xorl %ebx, %ebx
1: ret
-END(paranoid_entry)
+ENDPROC(paranoid_entry)
/*
* "Paranoid" exit path from exception stack. This is invoked
@@ -1065,7 +1065,7 @@ paranoid_exit_restore:
RESTORE_C_REGS
REMOVE_PT_GPREGS_FROM_STACK 8
INTERRUPT_RETURN
-END(paranoid_exit)
+ENDPROC(paranoid_exit)
/*
* Save all registers in pt_regs, and switch gs if needed.
@@ -1147,7 +1147,7 @@ ENTRY(error_entry)
mov %rax, %rsp
decl %ebx
jmp .Lerror_entry_from_usermode_after_swapgs
-END(error_entry)
+ENDPROC(error_entry)
/*
@@ -1162,7 +1162,7 @@ ENTRY(error_exit)
testl %eax, %eax
jnz retint_kernel
jmp retint_user
-END(error_exit)
+ENDPROC(error_exit)
/* Runs on exception stack */
ENTRY(nmi)
@@ -1510,12 +1510,12 @@ nmi_restore:
* mode, so this cannot result in a fault.
*/
INTERRUPT_RETURN
-END(nmi)
+ENDPROC(nmi)
ENTRY(ignore_sysret)
mov $-ENOSYS, %eax
sysret
-END(ignore_sysret)
+ENDPROC(ignore_sysret)
ENTRY(rewind_stack_do_exit)
/* Prevent any naive code from trying to unwind to our caller. */
@@ -1526,4 +1526,4 @@ ENTRY(rewind_stack_do_exit)
call do_exit
1: jmp 1b
-END(rewind_stack_do_exit)
+ENDPROC(rewind_stack_do_exit)
@@ -262,7 +262,7 @@ sysret32_from_system_call:
movq RSP-ORIG_RAX(%rsp), %rsp
swapgs
sysretl
-END(entry_SYSCALL_compat)
+ENDPROC(entry_SYSCALL_compat)
/*
* 32-bit legacy system call entry.
@@ -340,7 +340,7 @@ ENTRY(entry_INT80_compat)
TRACE_IRQS_ON
SWAPGS
jmp restore_regs_and_iret
-END(entry_INT80_compat)
+ENDPROC(entry_INT80_compat)
ALIGN
GLOBAL(stub32_clone)
@@ -145,7 +145,7 @@
ENTRY(fentry_hook)
retq
-END(fentry_hook)
+ENDPROC(fentry_hook)
ENTRY(ftrace_caller)
/* save_mcount_regs fills in first two parameters */
@@ -177,6 +177,7 @@ GLOBAL(ftrace_epilogue)
GLOBAL(ftrace_graph_call)
jmp ftrace_stub
#endif
+ENDPROC(ftrace_caller)
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
@@ -252,7 +253,7 @@ GLOBAL(ftrace_regs_caller_end)
jmp ftrace_epilogue
-END(ftrace_regs_caller)
+ENDPROC(ftrace_regs_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -288,7 +289,7 @@ trace:
restore_mcount_regs
jmp fgraph_trace
-END(fentry_hook)
+ENDPROC(fentry_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
EXPORT_SYMBOL(fentry_hook)
#endif /* CONFIG_FUNCTION_TRACER */
@@ -312,7 +313,7 @@ ENTRY(ftrace_graph_caller)
restore_mcount_regs
retq
-END(ftrace_graph_caller)
+ENDPROC(ftrace_graph_caller)
ENTRY(return_to_handler)
subq $24, %rsp
@@ -329,4 +330,5 @@ ENTRY(return_to_handler)
movq (%rsp), %rax
addq $24, %rsp
jmp *%rdi
+ENDPROC(return_to_handler)
#endif
@@ -68,6 +68,7 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movq relocated_restore_code(%rip), %rcx
jmpq *%rcx
+ENDPROC(restore_image)
/* code below has been relocated to a safe page */
ENTRY(core_restore_code)
@@ -98,6 +99,7 @@ ENTRY(core_restore_code)
.Ldone:
/* jump to the restore_registers address from the image header */
jmpq *%r8
+ENDPROC(core_restore_code)
/* code below belongs to the image kernel */
.align PAGE_SIZE
@@ -62,6 +62,7 @@ GLOBAL(machine_real_restart_paging_off)
movl %ecx, %gs
movl %ecx, %ss
ljmpw $8, $1f
+ENDPROC(machine_real_restart_asm)
/*
* This is 16-bit protected mode code to disable paging and the cache,
@@ -79,6 +79,8 @@ ENTRY(trampoline_start)
no_longmode:
hlt
jmp no_longmode
+ENDPROC(trampoline_start)
+
#include "../kernel/verify_cpu.S"
.section ".text32","ax"
@@ -116,6 +118,7 @@ ENTRY(startup_32)
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
*/
ljmpl $__KERNEL_CS, $pa_startup_64
+ENDPROC(startup_32)
.section ".text64","ax"
.code64
@@ -123,6 +126,7 @@ ENTRY(startup_32)
ENTRY(startup_64)
# Now jump into the kernel using virtual addresses
jmpq *tr_start(%rip)
+ENDPROC(startup_64)
.section ".rodata","a"
# Duplicate the global descriptor table
@@ -134,6 +134,7 @@ ENTRY(wakeup_start)
#else
jmp trampoline_start
#endif
+ENDPROC(wakeup_start)
bogus_real_magic:
1:
@@ -49,6 +49,7 @@ ENTRY(xen_iret)
1: jmp hypercall_iret
ENDPATCH(xen_iret)
RELOC(xen_iret, 1b+1)
+ENDPROC(xen_iret)
ENTRY(xen_sysret64)
/*
@@ -68,6 +69,7 @@ ENTRY(xen_sysret64)
1: jmp hypercall_iret
ENDPATCH(xen_sysret64)
RELOC(xen_sysret64, 1b+1)
+ENDPROC(xen_sysret64)
/*
* Xen handles syscall callbacks much like ordinary exceptions, which
@@ -32,7 +32,7 @@ ENTRY(startup_xen)
mov $init_thread_union+THREAD_SIZE, %_ASM_SP
jmp xen_start_kernel
-
+ENDPROC(startup_xen)
__FINIT
.pushsection .text
@@ -133,7 +133,7 @@ ENTRY(pvh_start_xen)
ljmp $__BOOT_CS, $_pa(startup_32)
#endif
-END(pvh_start_xen)
+ENDPROC(pvh_start_xen)
.section ".init.data","aw"
.balign 8
Somewhere END was used to end a function, elsewhere, nothing was used. So unify it and mark them all by ENDPROC. Signed-off-by: Jiri Slaby <jslaby@suse.cz> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <x86@kernel.org> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Pavel Machek <pavel@ucw.cz> Cc: <linux-pm@vger.kernel.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Juergen Gross <jgross@suse.com> Cc: <xen-devel@lists.xenproject.org> --- arch/x86/entry/entry_64.S | 40 ++++++++++++++++++------------------ arch/x86/entry/entry_64_compat.S | 4 ++-- arch/x86/kernel/mcount_64.S | 10 +++++---- arch/x86/power/hibernate_asm_64.S | 2 ++ arch/x86/realmode/rm/reboot.S | 1 + arch/x86/realmode/rm/trampoline_64.S | 4 ++++ arch/x86/realmode/rm/wakeup_asm.S | 1 + arch/x86/xen/xen-asm_64.S | 2 ++ arch/x86/xen/xen-head.S | 2 +- arch/x86/xen/xen-pvh.S | 2 +- 10 files changed, 40 insertions(+), 28 deletions(-)