From patchwork Wed Oct 6 07:17:40 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Magnus Damm X-Patchwork-Id: 234881 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id o967FP8q009164 for ; Wed, 6 Oct 2010 07:15:34 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932114Ab0JFHPe (ORCPT ); Wed, 6 Oct 2010 03:15:34 -0400 Received: from mail-pv0-f174.google.com ([74.125.83.174]:64364 "EHLO mail-pv0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932110Ab0JFHPd (ORCPT ); Wed, 6 Oct 2010 03:15:33 -0400 Received: by pvg2 with SMTP id 2so1836798pvg.19 for ; Wed, 06 Oct 2010 00:15:33 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:received:received:from:to:cc:date:message-id :in-reply-to:references:subject; bh=LOE4n7AK89A/ZCiohmaivamJ5JrcJY4+CaC+O0LXBN0=; b=xoqClQI2hQuGKLNaS3BMhQeJdGne+K1vWnaTB+39vtYCCZEjULWkLq25QoQ/Uxy4zM ILtN7LSKT5WEYWkHDPDsesYxOZy4ziey3td1Io0LqUQxycp9+Kpd1/jBVvcKehCidgD2 Fbiit1w7N0CXsY89k3BTOWv9+Saq8QzJtFa8I= DomainKey-Signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=from:to:cc:date:message-id:in-reply-to:references:subject; b=QxX1+2ehohepAgdOo6h+CxLq6p0AM4lIGp4HWrl27hquBhisFHtbN1qVlVh6LadOJL l6jiQ9GN0NMH5leLXMJbVgrO59P9o7bgyawlRQvSTRvrfOamQaS5TmEsbgSZn3E2C10b ZTbBSlSM8Q017wrmGbVXTaUrQyC/n0482mO7g= Received: by 10.114.111.15 with SMTP id j15mr15026950wac.119.1286349333068; Wed, 06 Oct 2010 00:15:33 -0700 (PDT) Received: from [127.0.0.1] (49.14.32.202.bf.2iij.net [202.32.14.49]) by mx.google.com with ESMTPS id d38sm837279wam.8.2010.10.06.00.15.30 (version=TLSv1/SSLv3 cipher=RC4-MD5); Wed, 06 Oct 2010 00:15:32 -0700 (PDT) From: Magnus Damm To: linux@arm.linux.org.uk Cc: grant.likely@secretlab.ca, Magnus Damm , lethal@linux-sh.org, linux-arm-kernel@lists.infradead.org, linux-sh@vger.kernel.org Date: Wed, 06 Oct 2010 16:17:40 +0900 Message-Id: <20101006071740.28048.45621.sendpatchset@t400s> In-Reply-To: <20101006071731.28048.89938.sendpatchset@t400s> References: <20101006071731.28048.89938.sendpatchset@t400s> Subject: [PATCH 01/08] ARM: Move entry-header.S to asm/ Sender: linux-sh-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-sh@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter1.kernel.org [140.211.167.41]); Wed, 06 Oct 2010 07:15:34 +0000 (UTC) --- /dev/null +++ work/arch/arm/include/asm/entry-header.S 2010-09-16 17:22:01.000000000 +0900 @@ -0,0 +1,181 @@ +#include +#include + +#include +#include +#include +#include + +@ Bad Abort numbers +@ ----------------- +@ +#define BAD_PREFETCH 0 +#define BAD_DATA 1 +#define BAD_ADDREXCPTN 2 +#define BAD_IRQ 3 +#define BAD_UNDEFINSTR 4 + +@ +@ Most of the stack format comes from struct pt_regs, but with +@ the addition of 8 bytes for storing syscall args 5 and 6. +@ This _must_ remain a multiple of 8 for EABI. +@ +#define S_OFF 8 + +/* + * The SWI code relies on the fact that R0 is at the bottom of the stack + * (due to slow/fast restore user regs). + */ +#if S_R0 != 0 +#error "Please fix" +#endif + + .macro zero_fp +#ifdef CONFIG_FRAME_POINTER + mov fp, #0 +#endif + .endm + + .macro alignment_trap, rtemp +#ifdef CONFIG_ALIGNMENT_TRAP + ldr \rtemp, .LCcralign + ldr \rtemp, [\rtemp] + mcr p15, 0, \rtemp, c1, c0 +#endif + .endm + + @ + @ Store/load the USER SP and LR registers by switching to the SYS + @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not + @ available. Should only be called from SVC mode + @ + .macro store_user_sp_lr, rd, rtemp, offset = 0 + mrs \rtemp, cpsr + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) + msr cpsr_c, \rtemp @ switch to the SYS mode + + str sp, [\rd, #\offset] @ save sp_usr + str lr, [\rd, #\offset + 4] @ save lr_usr + + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) + msr cpsr_c, \rtemp @ switch back to the SVC mode + .endm + + .macro load_user_sp_lr, rd, rtemp, offset = 0 + mrs \rtemp, cpsr + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) + msr cpsr_c, \rtemp @ switch to the SYS mode + + ldr sp, [\rd, #\offset] @ load sp_usr + ldr lr, [\rd, #\offset + 4] @ load lr_usr + + eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) + msr cpsr_c, \rtemp @ switch back to the SVC mode + .endm + +#ifndef CONFIG_THUMB2_KERNEL + .macro svc_exit, rpsr + msr spsr_cxsf, \rpsr +#if defined(CONFIG_CPU_32v6K) + clrex @ clear the exclusive monitor + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +#elif defined (CONFIG_CPU_V6) + ldr r0, [sp] + strex r1, r2, [sp] @ clear the exclusive monitor + ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr +#else + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +#endif + .endm + + .macro restore_user_regs, fast = 0, offset = 0 + ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr + ldr lr, [sp, #\offset + S_PC]! @ get pc + msr spsr_cxsf, r1 @ save in spsr_svc +#if defined(CONFIG_CPU_32v6K) + clrex @ clear the exclusive monitor +#elif defined (CONFIG_CPU_V6) + strex r1, r2, [sp] @ clear the exclusive monitor +#endif + .if \fast + ldmdb sp, {r1 - lr}^ @ get calling r1 - lr + .else + ldmdb sp, {r0 - lr}^ @ get calling r0 - lr + .endif + mov r0, r0 @ ARMv5T and earlier require a nop + @ after ldm {}^ + add sp, sp, #S_FRAME_SIZE - S_PC + movs pc, lr @ return & move spsr_svc into cpsr + .endm + + .macro get_thread_info, rd + mov \rd, sp, lsr #13 + mov \rd, \rd, lsl #13 + .endm + + @ + @ 32-bit wide "mov pc, reg" + @ + .macro movw_pc, reg + mov pc, \reg + .endm +#else /* CONFIG_THUMB2_KERNEL */ + .macro svc_exit, rpsr + clrex @ clear the exclusive monitor + ldr r0, [sp, #S_SP] @ top of the stack + ldr r1, [sp, #S_PC] @ return address + tst r0, #4 @ orig stack 8-byte aligned? + stmdb r0, {r1, \rpsr} @ rfe context + ldmia sp, {r0 - r12} + ldr lr, [sp, #S_LR] + addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned + addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned + rfeia sp! + .endm + + .macro restore_user_regs, fast = 0, offset = 0 + clrex @ clear the exclusive monitor + mov r2, sp + load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr + ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr + ldr lr, [sp, #\offset + S_PC] @ get pc + add sp, sp, #\offset + S_SP + msr spsr_cxsf, r1 @ save in spsr_svc + .if \fast + ldmdb sp, {r1 - r12} @ get calling r1 - r12 + .else + ldmdb sp, {r0 - r12} @ get calling r0 - r12 + .endif + add sp, sp, #S_FRAME_SIZE - S_SP + movs pc, lr @ return & move spsr_svc into cpsr + .endm + + .macro get_thread_info, rd + mov \rd, sp + lsr \rd, \rd, #13 + mov \rd, \rd, lsl #13 + .endm + + @ + @ 32-bit wide "mov pc, reg" + @ + .macro movw_pc, reg + mov pc, \reg + nop + .endm +#endif /* !CONFIG_THUMB2_KERNEL */ + +/* + * These are the registers used in the syscall handler, and allow us to + * have in theory up to 7 arguments to a function - r0 to r6. + * + * r7 is reserved for the system call number for thumb mode. + * + * Note that tbl == why is intentional. + * + * We must set at least "tsk" and "why" when calling ret_with_reschedule. + */ +scno .req r7 @ syscall number +tbl .req r8 @ syscall table pointer +why .req r8 @ Linux syscall (!= 0) +tsk .req r9 @ current thread_info --- 0001/arch/arm/kernel/entry-armv.S +++ work/arch/arm/kernel/entry-armv.S 2010-10-05 20:03:00.000000000 +0900 @@ -23,8 +23,7 @@ #include #include #include - -#include "entry-header.S" +#include /* * Interrupt handling. Preserves r7, r8, r9 --- 0001/arch/arm/kernel/entry-common.S +++ work/arch/arm/kernel/entry-common.S 2010-10-05 20:03:14.000000000 +0900 @@ -12,8 +12,7 @@ #include #include #include - -#include "entry-header.S" +#include .align 5 --- 0001/arch/arm/kernel/entry-header.S +++ /dev/null 2010-09-30 18:32:39.608254290 +0900 @@ -1,181 +0,0 @@ -#include -#include - -#include -#include -#include -#include - -@ Bad Abort numbers -@ ----------------- -@ -#define BAD_PREFETCH 0 -#define BAD_DATA 1 -#define BAD_ADDREXCPTN 2 -#define BAD_IRQ 3 -#define BAD_UNDEFINSTR 4 - -@ -@ Most of the stack format comes from struct pt_regs, but with -@ the addition of 8 bytes for storing syscall args 5 and 6. -@ This _must_ remain a multiple of 8 for EABI. -@ -#define S_OFF 8 - -/* - * The SWI code relies on the fact that R0 is at the bottom of the stack - * (due to slow/fast restore user regs). - */ -#if S_R0 != 0 -#error "Please fix" -#endif - - .macro zero_fp -#ifdef CONFIG_FRAME_POINTER - mov fp, #0 -#endif - .endm - - .macro alignment_trap, rtemp -#ifdef CONFIG_ALIGNMENT_TRAP - ldr \rtemp, .LCcralign - ldr \rtemp, [\rtemp] - mcr p15, 0, \rtemp, c1, c0 -#endif - .endm - - @ - @ Store/load the USER SP and LR registers by switching to the SYS - @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not - @ available. Should only be called from SVC mode - @ - .macro store_user_sp_lr, rd, rtemp, offset = 0 - mrs \rtemp, cpsr - eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) - msr cpsr_c, \rtemp @ switch to the SYS mode - - str sp, [\rd, #\offset] @ save sp_usr - str lr, [\rd, #\offset + 4] @ save lr_usr - - eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) - msr cpsr_c, \rtemp @ switch back to the SVC mode - .endm - - .macro load_user_sp_lr, rd, rtemp, offset = 0 - mrs \rtemp, cpsr - eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) - msr cpsr_c, \rtemp @ switch to the SYS mode - - ldr sp, [\rd, #\offset] @ load sp_usr - ldr lr, [\rd, #\offset + 4] @ load lr_usr - - eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE) - msr cpsr_c, \rtemp @ switch back to the SVC mode - .endm - -#ifndef CONFIG_THUMB2_KERNEL - .macro svc_exit, rpsr - msr spsr_cxsf, \rpsr -#if defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr -#elif defined (CONFIG_CPU_V6) - ldr r0, [sp] - strex r1, r2, [sp] @ clear the exclusive monitor - ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr -#else - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr -#endif - .endm - - .macro restore_user_regs, fast = 0, offset = 0 - ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr - ldr lr, [sp, #\offset + S_PC]! @ get pc - msr spsr_cxsf, r1 @ save in spsr_svc -#if defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor -#elif defined (CONFIG_CPU_V6) - strex r1, r2, [sp] @ clear the exclusive monitor -#endif - .if \fast - ldmdb sp, {r1 - lr}^ @ get calling r1 - lr - .else - ldmdb sp, {r0 - lr}^ @ get calling r0 - lr - .endif - mov r0, r0 @ ARMv5T and earlier require a nop - @ after ldm {}^ - add sp, sp, #S_FRAME_SIZE - S_PC - movs pc, lr @ return & move spsr_svc into cpsr - .endm - - .macro get_thread_info, rd - mov \rd, sp, lsr #13 - mov \rd, \rd, lsl #13 - .endm - - @ - @ 32-bit wide "mov pc, reg" - @ - .macro movw_pc, reg - mov pc, \reg - .endm -#else /* CONFIG_THUMB2_KERNEL */ - .macro svc_exit, rpsr - clrex @ clear the exclusive monitor - ldr r0, [sp, #S_SP] @ top of the stack - ldr r1, [sp, #S_PC] @ return address - tst r0, #4 @ orig stack 8-byte aligned? - stmdb r0, {r1, \rpsr} @ rfe context - ldmia sp, {r0 - r12} - ldr lr, [sp, #S_LR] - addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned - addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned - rfeia sp! - .endm - - .macro restore_user_regs, fast = 0, offset = 0 - clrex @ clear the exclusive monitor - mov r2, sp - load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr - ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr - ldr lr, [sp, #\offset + S_PC] @ get pc - add sp, sp, #\offset + S_SP - msr spsr_cxsf, r1 @ save in spsr_svc - .if \fast - ldmdb sp, {r1 - r12} @ get calling r1 - r12 - .else - ldmdb sp, {r0 - r12} @ get calling r0 - r12 - .endif - add sp, sp, #S_FRAME_SIZE - S_SP - movs pc, lr @ return & move spsr_svc into cpsr - .endm - - .macro get_thread_info, rd - mov \rd, sp - lsr \rd, \rd, #13 - mov \rd, \rd, lsl #13 - .endm - - @ - @ 32-bit wide "mov pc, reg" - @ - .macro movw_pc, reg - mov pc, \reg - nop - .endm -#endif /* !CONFIG_THUMB2_KERNEL */ - -/* - * These are the registers used in the syscall handler, and allow us to - * have in theory up to 7 arguments to a function - r0 to r6. - * - * r7 is reserved for the system call number for thumb mode. - * - * Note that tbl == why is intentional. - * - * We must set at least "tsk" and "why" when calling ret_with_reschedule. - */ -scno .req r7 @ syscall number -tbl .req r8 @ syscall table pointer -why .req r8 @ Linux syscall (!= 0) -tsk .req r9 @ current thread_info