@@ -310,6 +310,7 @@ ENTRY(startup_64)
*/
leaq relocated(%rbx), %rax
jmp *%rax
+ENDPROC(startup_64)
#ifdef CONFIG_EFI_STUB
@@ -356,6 +356,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
ENTRY(xen_sysenter_target)
addl $5*4, %esp /* remove xen-provided frame */
jmp .Lsysenter_past_esp
+ENDPROC(xen_sysenter_target)
#endif
/*
@@ -353,3 +353,4 @@ GLOBAL(stub32_clone)
*/
xchg %r8, %rcx
jmp sys_clone
+ENDPROC(stub32_clone)
@@ -38,6 +38,7 @@ wakeup_pmode_return:
# jump to place where we left off
movl saved_eip, %eax
jmp *%eax
+ENDPROC(wakeup_pmode_return)
bogus_magic:
jmp bogus_magic
@@ -86,6 +87,7 @@ ret_point:
call restore_registers
call restore_processor_state
ret
+ENDPROC(do_suspend_lowlevel)
.data
ALIGN
@@ -168,6 +168,7 @@ GLOBAL(ftrace_regs_call)
lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
jmp .Lftrace_ret
+ENDPROC(ftrace_regs_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(function_hook)
@@ -329,4 +329,5 @@ GLOBAL(return_to_handler)
movq (%rsp), %rax
addq $24, %rsp
jmp *%rdi
+ENDPROC(return_to_handler)
#endif
@@ -173,6 +173,7 @@ num_subarch_entries = (. - subarch_entries) / 4
#else
jmp .Ldefault_entry
#endif /* CONFIG_PARAVIRT */
+ENDPROC(startup_32)
#ifdef CONFIG_HOTPLUG_CPU
/*
@@ -48,6 +48,7 @@ ENTRY(lguest_entry)
/* Jumps are relative: we're running __PAGE_OFFSET too low. */
jmp lguest_init+__PAGE_OFFSET
+ENDPROC(lguest_entry)
/*G:055
* We create a macro which puts the assembler code between lgstart_ and lgend_
@@ -110,6 +111,7 @@ send_interrupts:
/* Put eax back the way we found it. */
popl %eax
ret
+ENDPROC(lg_irq_enable)
/*
* Finally, the "popf" or "restore flags" routine. The %eax register holds the
@@ -131,6 +133,7 @@ ENTRY(lg_restore_fl)
jnz send_interrupts
/* Again, the normal path has used no extra registers. Clever, huh? */
ret
+ENDPROC(lg_restore_fl)
/*:*/
/* These demark the EIP where host should never deliver interrupts. */
@@ -190,3 +193,4 @@ ENTRY(lguest_iret)
popl %ss:lguest_data+LGUEST_DATA_irq_enabled
lguest_noirq_iret:
iret
+ENDPROC(lguest_iret)
@@ -363,3 +363,4 @@ L_bugged_2:
pop %ebx
jmp L_exit
#endif /* PARANOID */
+ENDPROC(div_Xsig)
@@ -44,4 +44,4 @@ ENTRY(FPU_div_small)
leave
ret
-
+ENDPROC(FPU_div_small)
@@ -62,6 +62,7 @@ ENTRY(mul32_Xsig)
popl %esi
leave
ret
+ENDPROC(mul32_Xsig)
ENTRY(mul64_Xsig)
@@ -114,6 +115,7 @@ ENTRY(mul64_Xsig)
popl %esi
leave
ret
+ENDPROC(mul64_Xsig)
@@ -173,4 +175,4 @@ ENTRY(mul_Xsig_Xsig)
popl %esi
leave
ret
-
+ENDPROC(mul_Xsig_Xsig)
@@ -133,3 +133,4 @@ L_accum_done:
popl %esi
leave
ret
+ENDPROC(polynomial_Xsig)
@@ -94,6 +94,7 @@ L_overflow:
call arith_overflow
pop %ebx
jmp L_exit
+ENDPROC(FPU_normalize)
@@ -145,3 +146,4 @@ L_exit_nuo_zero:
popl %ebx
leave
ret
+ENDPROC(FPU_normalize_nuo)
@@ -706,3 +706,5 @@ L_exception_exit:
mov $-1,%eax
jmp fpu_reg_round_special_exit
#endif /* PARANOID */
+
+ENDPROC(FPU_round)
@@ -165,3 +165,4 @@ L_exit:
leave
ret
#endif /* PARANOID */
+ENDPROC(FPU_u_add)
@@ -469,3 +469,5 @@ L_exit:
leave
ret
#endif /* PARANOID */
+
+ENDPROC(FPU_u_div)
@@ -146,3 +146,4 @@ L_exit:
ret
#endif /* PARANOID */
+ENDPROC(FPU_u_mul)
@@ -270,3 +270,4 @@ L_exit:
popl %esi
leave
ret
+ENDPROC(FPU_u_sub)
@@ -78,7 +78,7 @@ L_exit:
popl %ebx
leave
ret
-
+ENDPROC(round_Xsig)
@@ -138,4 +138,4 @@ L_n_exit:
popl %ebx
leave
ret
-
+ENDPROC(norm_Xsig)
@@ -85,3 +85,4 @@ L_more_than_95:
popl %esi
leave
ret
+ENDPROC(shr_Xsig)
@@ -92,6 +92,7 @@ L_more_than_95:
popl %esi
leave
ret
+ENDPROC(FPU_shrx)
/*---------------------------------------------------------------------------+
@@ -202,3 +203,4 @@ Ls_more_than_95:
popl %esi
leave
ret
+ENDPROC(FPU_shrxs)
@@ -468,3 +468,4 @@ sqrt_more_prec_large:
/* Our estimate is too large */
movl $0x7fffff00,%eax
jmp sqrt_round_result
+ENDPROC(wm_sqrt)
@@ -109,6 +109,7 @@ ret_point:
call restore_registers
call restore_processor_state
ret
+ENDPROC(do_olpc_suspend_lowlevel)
.data
saved_gdt: .long 0,0
@@ -25,6 +25,7 @@ ENTRY(swsusp_arch_suspend)
call swsusp_save
ret
+ENDPROC(swsusp_arch_suspend)
ENTRY(restore_image)
movl mmu_cr4_features, %ecx
@@ -82,3 +83,5 @@ done:
xorl %eax, %eax
ret
+ENDPROC(restore_image)
+
@@ -68,6 +68,7 @@ ENTRY(restore_image)
/* jump to relocated restore code */
movq relocated_restore_code(%rip), %rcx
jmpq *%rcx
+ENDPROC(restore_image)
/* code below has been relocated to a safe page */
ENTRY(core_restore_code)
@@ -98,6 +99,7 @@ ENTRY(core_restore_code)
.Ldone:
/* jump to the restore_registers address from the image header */
jmpq *%r8
+ENDPROC(core_restore_code)
/* code below belongs to the image kernel */
.align PAGE_SIZE
@@ -62,6 +62,7 @@ GLOBAL(machine_real_restart_paging_off)
movl %ecx, %gs
movl %ecx, %ss
ljmpw $8, $1f
+ENDPROC(machine_real_restart_asm)
/*
* This is 16-bit protected mode code to disable paging and the cache,
@@ -56,11 +56,13 @@ ENTRY(trampoline_start)
lmsw %dx # into protected mode
ljmpl $__BOOT_CS, $pa_startup_32
+ENDPROC(trampoline_start)
.section ".text32","ax"
.code32
ENTRY(startup_32) # note: also used from wakeup_asm.S
jmp *%eax
+ENDPROC(startup_32)
.bss
.balign 8
@@ -79,6 +79,8 @@ ENTRY(trampoline_start)
no_longmode:
hlt
jmp no_longmode
+ENDPROC(trampoline_start)
+
#include "../kernel/verify_cpu.S"
.section ".text32","ax"
@@ -116,6 +118,7 @@ ENTRY(startup_32)
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
*/
ljmpl $__KERNEL_CS, $pa_startup_64
+ENDPROC(startup_32)
.section ".text64","ax"
.code64
@@ -123,6 +126,7 @@ ENTRY(startup_32)
ENTRY(startup_64)
# Now jump into the kernel using virtual addresses
jmpq *tr_start(%rip)
+ENDPROC(startup_64)
.section ".rodata","a"
# Duplicate the global descriptor table
@@ -134,6 +134,7 @@ ENTRY(wakeup_start)
#else
jmp trampoline_start
#endif
+ENDPROC(wakeup_start)
bogus_real_magic:
1:
@@ -138,6 +138,7 @@ xen_iret_end_crit:
hyper_iret:
/* put this out of line since its very rarely used */
jmp hypercall_page + __HYPERVISOR_iret * 32
+ENDPROC(xen_iret)
.globl xen_iret_start_crit, xen_iret_end_crit
@@ -220,4 +221,4 @@ ENTRY(xen_iret_crit_fixup)
lea 4(%edi), %esp /* point esp to new frame */
2: jmp xen_do_upcall
-
+ENDPROC(xen_iret_crit_fixup)
@@ -49,6 +49,7 @@ ENTRY(xen_iret)
1: jmp hypercall_iret
ENDPATCH(xen_iret)
RELOC(xen_iret, 1b+1)
+ENDPROC(xen_iret)
ENTRY(xen_sysret64)
/*
@@ -68,6 +69,7 @@ ENTRY(xen_sysret64)
1: jmp hypercall_iret
ENDPATCH(xen_sysret64)
RELOC(xen_sysret64, 1b+1)
+ENDPROC(xen_sysret64)
/*
* Xen handles syscall callbacks much like ordinary exceptions, which
@@ -33,7 +33,7 @@ ENTRY(startup_xen)
mov $init_thread_union+THREAD_SIZE, %_ASM_SP
jmp xen_start_kernel
-
+ENDPROC(startup_xen)
__FINIT
#endif
@@ -41,6 +41,7 @@ ENTRY(startup_xen)
.balign PAGE_SIZE
ENTRY(hypercall_page)
.skip PAGE_SIZE
+ENDPROC(hypercall_page)
#define HYPERCALL(n) \
.equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \
@@ -265,6 +265,7 @@ ENTRY(switch_to_guest)
return_to_host:
SWITCH_TO_HOST
iret
+ENDPROC(switch_to_guest)
// We are lead to the second path like so:
// An interrupt, with some cause external