From patchwork Wed Jul 16 08:47:43 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Jones X-Patchwork-Id: 4565551 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 4E4829F1D6 for ; Wed, 16 Jul 2014 08:48:25 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id D3B3E2018E for ; Wed, 16 Jul 2014 08:48:23 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 69118201BB for ; Wed, 16 Jul 2014 08:48:21 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933390AbaGPIsS (ORCPT ); Wed, 16 Jul 2014 04:48:18 -0400 Received: from mx1.redhat.com ([209.132.183.28]:37520 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S933276AbaGPIsM (ORCPT ); Wed, 16 Jul 2014 04:48:12 -0400 Received: from int-mx09.intmail.prod.int.phx2.redhat.com (int-mx09.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id s6G8mAdG028259 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-GCM-SHA384 bits=256 verify=OK); Wed, 16 Jul 2014 04:48:10 -0400 Received: from hawk.usersys.redhat.com (dhcp-1-116.brq.redhat.com [10.34.1.116]) by int-mx09.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id s6G8llvr029735; Wed, 16 Jul 2014 04:48:08 -0400 From: Andrew Jones To: kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org Cc: christoffer.dall@linaro.org, pbonzini@redhat.com Subject: [PATCH v7 14/14] arm: vectors support Date: Wed, 16 Jul 2014 10:47:43 +0200 Message-Id: <1405500463-20713-15-git-send-email-drjones@redhat.com> In-Reply-To: <1405500463-20713-1-git-send-email-drjones@redhat.com> References: <1405500463-20713-1-git-send-email-drjones@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.22 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Add support for tests to use exception handlers using install_exception_handler(). This patch also adds start_usr(), which can be used to start a function in USR mode, using a given stack pointer. start_usr() is used by a new selftest test that checks the new vector support. Signed-off-by: Andrew Jones Reviewed-by: Christoffer Dall --- v7: - selftest.c: s/alloc_aligned/memalign/ - lib/arm/processor.c remove unnecessary include "asm/setup.h" v6: use alloc() for start_usr v5: rebase change: replace __stringify with libcflat's new xstr macro v4: a couple tweaks to fit changes in the other patches, vectors-usr test now has an 8K usr stack v3: - squashed in 'arm: Simplify exceptions_init in cstart.S' [Christoffer Dall] - suggested function name changes and comment additions [Christoffer Dall] - fix a bug with stack restore from usr mode exceptions that Christoffer pointed out. Add a get_sp() accessor too. --- arm/cstart.S | 174 ++++++++++++++++++++++++++++++++++++++++++++++++ arm/flat.lds | 7 +- arm/selftest.c | 126 ++++++++++++++++++++++++++++++++++- arm/unittests.cfg | 12 ++++ config/config-arm.mak | 3 +- lib/arm/asm/processor.h | 39 +++++++++++ lib/arm/processor.c | 111 ++++++++++++++++++++++++++++++ 7 files changed, 469 insertions(+), 3 deletions(-) create mode 100644 lib/arm/asm/processor.h create mode 100644 lib/arm/processor.c diff --git a/arm/cstart.S b/arm/cstart.S index e28251db2950d..cc87ece4b6b40 100644 --- a/arm/cstart.S +++ b/arm/cstart.S @@ -5,6 +5,10 @@ * * This work is licensed under the terms of the GNU LGPL, version 2. */ +#define __ASSEMBLY__ +#include "asm/asm-offsets.h" +#include "asm/ptrace.h" +#include "asm/cp15.h" .arm @@ -17,6 +21,13 @@ start: * See the kernel doc Documentation/arm/Booting */ ldr sp, =stacktop + push {r0-r3} + + /* set up vector table and mode stacks */ + bl exceptions_init + + /* complete setup */ + pop {r0-r3} bl setup /* run the test */ @@ -27,9 +38,172 @@ start: bl exit b halt + +.macro set_mode_stack mode, stack + add \stack, #S_FRAME_SIZE + msr cpsr_c, #(\mode | PSR_I_BIT | PSR_F_BIT) + mov sp, \stack +.endm + +exceptions_init: + mrc p15, 0, r2, c1, c0, 0 @ read SCTLR + bic r2, #CR_V @ SCTLR.V := 0 + mcr p15, 0, r2, c1, c0, 0 @ write SCTLR + ldr r2, =vector_table + mcr p15, 0, r2, c12, c0, 0 @ write VBAR + + mrs r2, cpsr + ldr r1, =exception_stacks + + /* first frame reserved for svc mode */ + set_mode_stack UND_MODE, r1 + set_mode_stack ABT_MODE, r1 + set_mode_stack IRQ_MODE, r1 + set_mode_stack FIQ_MODE, r1 + + msr cpsr_cxsf, r2 @ back to svc mode + mov pc, lr + .text .globl halt halt: 1: wfi b 1b + +/* + * Vector stubs + * Simplified version of the Linux kernel implementation + * arch/arm/kernel/entry-armv.S + * + * Each mode has an S_FRAME_SIZE sized stack initialized + * in exceptions_init + */ +.macro vector_stub, name, vec, mode, correction=0 +.align 5 +vector_\name: +.if \correction + sub lr, lr, #\correction +.endif + /* + * Save r0, r1, lr_ (parent PC) + * and spsr_ (parent CPSR) + */ + str r0, [sp, #S_R0] + str r1, [sp, #S_R1] + str lr, [sp, #S_PC] + mrs r0, spsr + str r0, [sp, #S_PSR] + + /* Prepare for SVC32 mode. */ + mrs r0, cpsr + bic r0, #MODE_MASK + orr r0, #SVC_MODE + msr spsr_cxsf, r0 + + /* Branch to handler in SVC mode */ + mov r0, #\vec + mov r1, sp + ldr lr, =vector_common + movs pc, lr +.endm + +vector_stub rst, 0, UND_MODE +vector_stub und, 1, UND_MODE +vector_stub pabt, 3, ABT_MODE, 4 +vector_stub dabt, 4, ABT_MODE, 8 +vector_stub irq, 6, IRQ_MODE, 4 +vector_stub fiq, 7, FIQ_MODE, 4 + +.align 5 +vector_svc: + /* + * Save r0, r1, lr_ (parent PC) + * and spsr_ (parent CPSR) + */ + push { r1 } + ldr r1, =exception_stacks + str r0, [r1, #S_R0] + pop { r0 } + str r0, [r1, #S_R1] + str lr, [r1, #S_PC] + mrs r0, spsr + str r0, [r1, #S_PSR] + + /* + * Branch to handler, still in SVC mode. + * r0 := 2 is the svc vector number. + */ + mov r0, #2 + ldr lr, =vector_common + mov pc, lr + +vector_common: + /* make room for pt_regs */ + sub sp, #S_FRAME_SIZE + tst sp, #4 @ check stack alignment + subne sp, #4 + + /* store registers r0-r12 */ + stmia sp, { r0-r12 } @ stored wrong r0 and r1, fix later + + /* get registers saved in the stub */ + ldr r2, [r1, #S_R0] @ r0 + ldr r3, [r1, #S_R1] @ r1 + ldr r4, [r1, #S_PC] @ lr_ (parent PC) + ldr r5, [r1, #S_PSR] @ spsr_ (parent CPSR) + + /* fix r0 and r1 */ + str r2, [sp, #S_R0] + str r3, [sp, #S_R1] + + /* store sp_svc, if we were in usr mode we'll fix this later */ + add r6, sp, #S_FRAME_SIZE + addne r6, #4 @ stack wasn't aligned + str r6, [sp, #S_SP] + + str lr, [sp, #S_LR] @ store lr_svc, fix later for usr mode + str r4, [sp, #S_PC] @ store lr_ + str r5, [sp, #S_PSR] @ store spsr_ + + /* set ORIG_r0 */ + mov r2, #-1 + str r2, [sp, #S_OLD_R0] + + /* if we were in usr mode then we need sp_usr and lr_usr instead */ + and r1, r5, #MODE_MASK + cmp r1, #USR_MODE + bne 1f + add r1, sp, #S_SP + stmia r1, { sp,lr }^ + + /* Call the handler. r0 is the vector number, r1 := pt_regs */ +1: mov r1, sp + bl do_handle_exception + + /* + * make sure we restore sp_svc on mode change. No need to + * worry about lr_svc though, as that gets clobbered on + * exception entry anyway. + */ + str r6, [sp, #S_SP] + + /* return from exception */ + msr spsr_cxsf, r5 + ldmia sp, { r0-pc }^ + +.align 5 +vector_addrexcptn: + b vector_addrexcptn + +.section .text.ex +.align 5 +vector_table: + b vector_rst + b vector_und + b vector_svc + b vector_pabt + b vector_dabt + b vector_addrexcptn @ should never happen + b vector_irq + b vector_fiq diff --git a/arm/flat.lds b/arm/flat.lds index 3e5d72e24989b..ee9fc0ab79abc 100644 --- a/arm/flat.lds +++ b/arm/flat.lds @@ -3,7 +3,12 @@ SECTIONS { .text : { *(.init) *(.text) *(.text.*) } . = ALIGN(4K); - .data : { *(.data) } + .data : { + exception_stacks = .; + . += 4K; + exception_stacks_end = .; + *(.data) + } . = ALIGN(16); .rodata : { *(.rodata) } . = ALIGN(16); diff --git a/arm/selftest.c b/arm/selftest.c index e674103379956..0f70e1dcb3b0e 100644 --- a/arm/selftest.c +++ b/arm/selftest.c @@ -6,7 +6,12 @@ * This work is licensed under the terms of the GNU LGPL, version 2. */ #include "libcflat.h" +#include "alloc.h" #include "asm/setup.h" +#include "asm/ptrace.h" +#include "asm/asm-offsets.h" +#include "asm/processor.h" +#include "asm/page.h" #define TESTGRP "selftest" @@ -73,14 +78,133 @@ static void check_setup(int argc, char **argv) assert_args(nr_tests, 2); } +static struct pt_regs expected_regs; +/* + * Capture the current register state and execute an instruction + * that causes an exception. The test handler will check that its + * capture of the current register state matches the capture done + * here. + * + * NOTE: update clobber list if passed insns needs more than r0,r1 + */ +#define test_exception(pre_insns, excptn_insn, post_insns) \ + asm volatile( \ + pre_insns "\n" \ + "mov r0, %0\n" \ + "stmia r0, { r0-lr }\n" \ + "mrs r1, cpsr\n" \ + "str r1, [r0, #" xstr(S_PSR) "]\n" \ + "mov r1, #-1\n" \ + "str r1, [r0, #" xstr(S_OLD_R0) "]\n" \ + "add r1, pc, #8\n" \ + "str r1, [r0, #" xstr(S_R1) "]\n" \ + "str r1, [r0, #" xstr(S_PC) "]\n" \ + excptn_insn "\n" \ + post_insns "\n" \ + :: "r" (&expected_regs) : "r0", "r1") + +static bool check_regs(struct pt_regs *regs) +{ + unsigned i; + + /* exception handlers should always run in svc mode */ + if (current_mode() != SVC_MODE) + return false; + + for (i = 0; i < ARRAY_SIZE(regs->uregs); ++i) { + if (regs->uregs[i] != expected_regs.uregs[i]) + return false; + } + + return true; +} + +static bool und_works; +static void und_handler(struct pt_regs *regs) +{ + und_works = check_regs(regs); +} + +static bool check_und(void) +{ + install_exception_handler(EXCPTN_UND, und_handler); + + /* issue an instruction to a coprocessor we don't have */ + test_exception("", "mcr p2, 0, r0, c0, c0", ""); + + install_exception_handler(EXCPTN_UND, NULL); + + return und_works; +} + +static bool svc_works; +static void svc_handler(struct pt_regs *regs) +{ + u32 svc = *(u32 *)(regs->ARM_pc - 4) & 0xffffff; + + if (processor_mode(regs) == SVC_MODE) { + /* + * When issuing an svc from supervisor mode lr_svc will + * get corrupted. So before issuing the svc, callers must + * always push it on the stack. We pushed it to offset 4. + */ + regs->ARM_lr = *(unsigned long *)(regs->ARM_sp + 4); + } + + svc_works = check_regs(regs) && svc == 123; +} + +static bool check_svc(void) +{ + install_exception_handler(EXCPTN_SVC, svc_handler); + + if (current_mode() == SVC_MODE) { + /* + * An svc from supervisor mode will corrupt lr_svc and + * spsr_svc. We need to save/restore them separately. + */ + test_exception( + "mrs r0, spsr\n" + "push { r0,lr }\n", + "svc #123\n", + "pop { r0,lr }\n" + "msr spsr_cxsf, r0\n" + ); + } else { + test_exception("", "svc #123", ""); + } + + install_exception_handler(EXCPTN_SVC, NULL); + + return svc_works; +} + +static void check_vectors(void *arg __unused) +{ + report("%s", check_und() && check_svc(), testname); + exit(report_summary()); +} + int main(int argc, char **argv) { testname_set(NULL); assert_args(argc, 1); testname_set(argv[0]); - if (strcmp(argv[0], "setup") == 0) + if (strcmp(argv[0], "setup") == 0) { + check_setup(argc-1, &argv[1]); + } else if (strcmp(argv[0], "vectors-svc") == 0) { + + check_vectors(NULL); + + } else if (strcmp(argv[0], "vectors-usr") == 0) { + + void *sp = memalign(PAGE_SIZE, PAGE_SIZE); + memset(sp, 0, PAGE_SIZE); + start_usr(check_vectors, NULL, (unsigned long)sp + PAGE_SIZE); + } + return report_summary(); } diff --git a/arm/unittests.cfg b/arm/unittests.cfg index da9dfd7b1f118..57f5f90f3e808 100644 --- a/arm/unittests.cfg +++ b/arm/unittests.cfg @@ -16,3 +16,15 @@ file = selftest.flat smp = 1 extra_params = -m 256 -append 'setup smp=1 mem=256' groups = selftest + +# Test vector setup and exception handling (svc mode). +[selftest::vectors-svc] +file = selftest.flat +extra_params = -append 'vectors-svc' +groups = selftest + +# Test vector setup and exception handling (usr mode). +[selftest::vectors-usr] +file = selftest.flat +extra_params = -append 'vectors-usr' +groups = selftest diff --git a/config/config-arm.mak b/config/config-arm.mak index f03b96d4c50c5..8a274c50332b0 100644 --- a/config/config-arm.mak +++ b/config/config-arm.mak @@ -41,7 +41,8 @@ cflatobjs += \ lib/chr-testdev.o \ lib/arm/io.o \ lib/arm/setup.o \ - lib/arm/spinlock.o + lib/arm/spinlock.o \ + lib/arm/processor.o libeabi = lib/arm/libeabi.a eabiobjs = lib/arm/eabi_compat.o diff --git a/lib/arm/asm/processor.h b/lib/arm/asm/processor.h new file mode 100644 index 0000000000000..883cab89622f7 --- /dev/null +++ b/lib/arm/asm/processor.h @@ -0,0 +1,39 @@ +#ifndef _ASMARM_PROCESSOR_H_ +#define _ASMARM_PROCESSOR_H_ +/* + * Copyright (C) 2014, Red Hat Inc, Andrew Jones + * + * This work is licensed under the terms of the GNU LGPL, version 2. + */ +#include "ptrace.h" + +enum vector { + EXCPTN_RST, + EXCPTN_UND, + EXCPTN_SVC, + EXCPTN_PABT, + EXCPTN_DABT, + EXCPTN_ADDREXCPTN, + EXCPTN_IRQ, + EXCPTN_FIQ, + EXCPTN_MAX, +}; + +typedef void (*exception_fn)(struct pt_regs *); +extern void install_exception_handler(enum vector v, exception_fn fn); + +extern void show_regs(struct pt_regs *regs); +extern void *get_sp(void); + +static inline unsigned long current_cpsr(void) +{ + unsigned long cpsr; + asm volatile("mrs %0, cpsr" : "=r" (cpsr)); + return cpsr; +} + +#define current_mode() (current_cpsr() & MODE_MASK) + +extern void start_usr(void (*func)(void *arg), void *arg, unsigned long sp_usr); + +#endif /* _ASMARM_PROCESSOR_H_ */ diff --git a/lib/arm/processor.c b/lib/arm/processor.c new file mode 100644 index 0000000000000..382a128edd415 --- /dev/null +++ b/lib/arm/processor.c @@ -0,0 +1,111 @@ +/* + * processor control and status functions + * + * Copyright (C) 2014, Red Hat Inc, Andrew Jones + * + * This work is licensed under the terms of the GNU LGPL, version 2. + */ +#include "libcflat.h" +#include "asm/ptrace.h" +#include "asm/processor.h" + +static const char *processor_modes[] = { + "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , + "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , + "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", + "UK12_26", "UK13_26", "UK14_26", "UK15_26", + "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , + "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , + "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , + "UK12_32", "UK13_32", "UK14_32", "SYS_32" +}; + +static char *vector_names[] = { + "rst", "und", "svc", "pabt", "dabt", "addrexcptn", "irq", "fiq" +}; + +void show_regs(struct pt_regs *regs) +{ + unsigned long flags; + char buf[64]; + + printf("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" + "sp : %08lx ip : %08lx fp : %08lx\n", + regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, + regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); + printf("r10: %08lx r9 : %08lx r8 : %08lx\n", + regs->ARM_r10, regs->ARM_r9, regs->ARM_r8); + printf("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", + regs->ARM_r7, regs->ARM_r6, regs->ARM_r5, regs->ARM_r4); + printf("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", + regs->ARM_r3, regs->ARM_r2, regs->ARM_r1, regs->ARM_r0); + + flags = regs->ARM_cpsr; + buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; + buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; + buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; + buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; + buf[4] = '\0'; + + printf("Flags: %s IRQs o%s FIQs o%s Mode %s\n", + buf, interrupts_enabled(regs) ? "n" : "ff", + fast_interrupts_enabled(regs) ? "n" : "ff", + processor_modes[processor_mode(regs)]); + + if (!user_mode(regs)) { + unsigned int ctrl, transbase, dac; + asm volatile( + "mrc p15, 0, %0, c1, c0\n" + "mrc p15, 0, %1, c2, c0\n" + "mrc p15, 0, %2, c3, c0\n" + : "=r" (ctrl), "=r" (transbase), "=r" (dac)); + printf("Control: %08x Table: %08x DAC: %08x\n", + ctrl, transbase, dac); + } +} + +void *get_sp(void) +{ + register unsigned long sp asm("sp"); + return (void *)sp; +} + +static exception_fn exception_handlers[EXCPTN_MAX]; + +void install_exception_handler(enum vector v, exception_fn fn) +{ + if (v < EXCPTN_MAX) + exception_handlers[v] = fn; +} + +void do_handle_exception(enum vector v, struct pt_regs *regs) +{ + if (v < EXCPTN_MAX && exception_handlers[v]) { + exception_handlers[v](regs); + return; + } + + if (v < EXCPTN_MAX) + printf("Unhandled exception %d (%s)\n", v, vector_names[v]); + else + printf("%s called with vector=%d\n", __func__, v); + + printf("Exception frame registers:\n"); + show_regs(regs); + abort(); +} + +void start_usr(void (*func)(void *arg), void *arg, unsigned long sp_usr) +{ + sp_usr &= (~7UL); /* stack ptr needs 8-byte alignment */ + + asm volatile( + "mrs r0, cpsr\n" + "bic r0, #" xstr(MODE_MASK) "\n" + "orr r0, #" xstr(USR_MODE) "\n" + "msr cpsr_c, r0\n" + "mov r0, %0\n" + "mov sp, %1\n" + "mov pc, %2\n" + :: "r" (arg), "r" (sp_usr), "r" (func) : "r0"); +}