Message ID | 1380534251-4464-3-git-send-email-vijay.kilari@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Mon, Sep 30, 2013 at 10:44:10AM +0100, vijay.kilari@gmail.com wrote: > From: Vijaya Kumar K <Vijaya.Kumar@caviumnetworks.com> > > Add KGDB debug support for kernel debugging. > With this patch, basic KGDB debugging is possible.GDB register > layout is updated and GDB tool can establish connection with > target and can set/clear breakpoints. > > Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@caviumnetworks.com> > --- > arch/arm64/include/asm/debug-monitors.h | 10 ++ > arch/arm64/include/asm/kgdb.h | 81 +++++++++ > arch/arm64/kernel/Makefile | 1 + > arch/arm64/kernel/kgdb.c | 298 +++++++++++++++++++++++++++++++ > 4 files changed, 390 insertions(+) > > diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h > index 19e429e..ed376ce 100644 > --- a/arch/arm64/include/asm/debug-monitors.h > +++ b/arch/arm64/include/asm/debug-monitors.h > @@ -28,6 +28,16 @@ > #define DBG_ESR_EVT_HWWP 0x2 > #define DBG_ESR_EVT_BRK 0x6 > > +/* > + * Break point instruction encoding > + */ > +#define BREAK_INSTR_SIZE 4 > +#define KGDB_BREAKINST_ESR_VAL 0xf2000000 > +#define KGDB_COMPILED_BREAK_ESR_VAL 0xf2000001 > +#define KGDB_ARM64_COMPILE_BRK_IMM 1 > +#define CACHE_FLUSH_IS_SAFE 1 #define DBG_BRK_KGDB_DYN 0 #define DBG_BRK_KGDB_COMPILED 1 #define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xffff)) Then you can use the latter to construct your ESR values dynamically and adding a new brk handler is straightforward. > diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h > new file mode 100644 > index 0000000..15b36fd > --- /dev/null > +++ b/arch/arm64/include/asm/kgdb.h > @@ -0,0 +1,81 @@ > +/* > + * AArch64 KGDB support > + * > + * Based on arch/arm/include/kgdb.h > + * > + * Copyright (C) 2013 Cavium Inc. > + * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com> > + * > + * This program is free software; you can redistribute it and/or modify > + * it under the terms of the GNU General Public License version 2 as > + * published by the Free Software Foundation. > + * > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > + * GNU General Public License for more details. > + * > + * You should have received a copy of the GNU General Public License > + * along with this program. If not, see <http://www.gnu.org/licenses/>. > + */ > + > +#ifndef __ARM_KGDB_H__ > +#define __ARM_KGDB_H__ > + > +#include <linux/ptrace.h> > +#include <asm/debug-monitors.h> > + > +#ifndef __ASSEMBLY__ > + > +static inline void arch_kgdb_breakpoint(void) > +{ > + asm ("brk %0" :: "I" (KGDB_ARM64_COMPILE_BRK_IMM)); > +} Hmm, I need to find me a compiler guy, but I don't have much faith in this function in the face of inlining. Sure a breakpoint needs to happen at a specific point in the program? If so, why can't GCC re-order this asm block as it likes? Or is there a restricted use-case for compiled breakpoints in kgdb? > +void > +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) > +{ > + struct pt_regs *thread_regs; > + int regno; > + int i; > + > + /* Just making sure... */ > + if (task == NULL) > + return; Erm... that comment just raises more questions. I appreciate ARM has the same check, but why doesn't x86? Can task *actually* be NULL? > + > + /* Initialize to zero */ > + for (regno = 0; regno < DBG_MAX_REG_NUM; regno++) > + gdb_regs[regno] = 0; memset? > + > + thread_regs = task_pt_regs(task); > + > + for(i = 0; i < 31; i++) > + gdb_regs[i] = thread_regs->regs[i]; memcpy? > +int kgdb_arch_handle_exception(int exception_vector, int signo, > + int err_code, char *remcom_in_buffer, > + char *remcom_out_buffer, > + struct pt_regs *linux_regs) > +{ > + unsigned long addr; > + char *ptr; > + int err; > + > + switch (remcom_in_buffer[0]) { > + case 'D': > + case 'k': > + case 'c': > + /* > + * Packet D (Detach), k (kill) & c (Continue) requires > + * to continue executing. Set pc to required address. > + * Try to read optional parameter, pc unchanged if no parm. > + * If this was a compiled breakpoint, we need to move > + * to the next instruction or we will just breakpoint > + * over and over again. > + */ > + ptr = &remcom_in_buffer[1]; > + if (kgdb_hex2long(&ptr, &addr)) > + kgdb_arch_set_pc(linux_regs, addr); > + else if (compiled_break == 1) > + kgdb_arch_set_pc(linux_regs, linux_regs->pc + 4); > + > + compiled_break = 0; > + > + err = 0; > + break; > + default: > + err = -1; > + } > + return err; > +} > + > +static int > +kgdb_brk_fn(struct pt_regs *regs, unsigned int esr, unsigned long addr) > +{ > + kgdb_handle_exception(1, SIGTRAP, 0, regs); > + return 0; > +} > + > +static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr, > + unsigned long addr) > +{ > + compiled_break = 1; > + kgdb_handle_exception(1, SIGTRAP, 0, regs); > + > + return 0; > +} > + > +static struct break_hook kgdb_brkpt_hook = { > + .esr_mask = 0xffffffff, > + .esr_val = KGDB_BREAKINST_ESR_VAL, > + .fn = kgdb_brk_fn > +}; > + > +static struct break_hook kgdb_compiled_brkpt_hook = { > + .esr_mask = 0xffffffff, > + .esr_val = KGDB_COMPILED_BREAK_ESR_VAL, > + .fn = kgdb_compiled_brk_fn > +}; > + > +static void kgdb_call_nmi_hook(void *ignored) > +{ > + kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); > +} > + > +void kgdb_roundup_cpus(unsigned long flags) > +{ > + local_irq_enable(); > + smp_call_function(kgdb_call_nmi_hook, NULL, 0); > + local_irq_disable(); > +} > + > +static int __kgdb_notify(struct die_args *args, unsigned long cmd) > +{ > + struct pt_regs *regs = args->regs; > + > + if (kgdb_handle_exception(1, args->signr, cmd, regs)) > + return NOTIFY_DONE; > + return NOTIFY_STOP; > +} > + > +static int > +kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) > +{ > + unsigned long flags; > + int ret; > + > + local_irq_save(flags); > + ret = __kgdb_notify(ptr, cmd); > + local_irq_restore(flags); > + > + return ret; > +} > + > +static struct notifier_block kgdb_notifier = { > + .notifier_call = kgdb_notify, > + .priority = -INT_MAX, > +}; > + > + > +/* > + * kgdb_arch_init - Perform any architecture specific initalization. > + * > + * This function will handle the initalization of any architecture > + * specific callbacks. > + */ > +int kgdb_arch_init(void) > +{ > + int ret = register_die_notifier(&kgdb_notifier); > + > + if (ret != 0) > + return ret; > + > + register_break_hook(&kgdb_brkpt_hook); > + register_break_hook(&kgdb_compiled_brkpt_hook); > + > + return 0; > +} > + > +/* > + * kgdb_arch_exit - Perform any architecture specific uninitalization. > + * > + * This function will handle the uninitalization of any architecture > + * specific callbacks, for dynamic registration and unregistration. > + */ > +void kgdb_arch_exit(void) > +{ > + unregister_break_hook(&kgdb_brkpt_hook); > + unregister_break_hook(&kgdb_compiled_brkpt_hook); > + unregister_die_notifier(&kgdb_notifier); > +} > + > +/* > + * ARM instructions are always in LE. > + * Break instruction is encoded in LE format > + */ > +struct kgdb_arch arch_kgdb_ops = { > + .gdb_bpt_instr = {0x00, 0x00, 0x20, 0xd4} Again, you should probably try and encode the dynamic breakpoint immediate in here (which is sadly offset by 5 bits in the instruction encoding). Will
On Mon, Sep 30, 2013 at 10:06 PM, Will Deacon <will.deacon@arm.com> wrote: > On Mon, Sep 30, 2013 at 10:44:10AM +0100, vijay.kilari@gmail.com wrote: >> From: Vijaya Kumar K <Vijaya.Kumar@caviumnetworks.com> >> >> Add KGDB debug support for kernel debugging. >> With this patch, basic KGDB debugging is possible.GDB register >> layout is updated and GDB tool can establish connection with >> target and can set/clear breakpoints. >> >> Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@caviumnetworks.com> >> --- >> arch/arm64/include/asm/debug-monitors.h | 10 ++ >> arch/arm64/include/asm/kgdb.h | 81 +++++++++ >> arch/arm64/kernel/Makefile | 1 + >> arch/arm64/kernel/kgdb.c | 298 +++++++++++++++++++++++++++++++ >> 4 files changed, 390 insertions(+) >> >> diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h >> index 19e429e..ed376ce 100644 >> --- a/arch/arm64/include/asm/debug-monitors.h >> +++ b/arch/arm64/include/asm/debug-monitors.h >> @@ -28,6 +28,16 @@ >> #define DBG_ESR_EVT_HWWP 0x2 >> #define DBG_ESR_EVT_BRK 0x6 >> >> +/* >> + * Break point instruction encoding >> + */ >> +#define BREAK_INSTR_SIZE 4 >> +#define KGDB_BREAKINST_ESR_VAL 0xf2000000 >> +#define KGDB_COMPILED_BREAK_ESR_VAL 0xf2000001 >> +#define KGDB_ARM64_COMPILE_BRK_IMM 1 >> +#define CACHE_FLUSH_IS_SAFE 1 > > #define DBG_BRK_KGDB_DYN 0 > #define DBG_BRK_KGDB_COMPILED 1 > > #define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xffff)) > > Then you can use the latter to construct your ESR values dynamically and > adding a new brk handler is straightforward. > OK, agreed >> diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h >> new file mode 100644 >> index 0000000..15b36fd >> --- /dev/null >> +++ b/arch/arm64/include/asm/kgdb.h >> @@ -0,0 +1,81 @@ >> +/* >> + * AArch64 KGDB support >> + * >> + * Based on arch/arm/include/kgdb.h >> + * >> + * Copyright (C) 2013 Cavium Inc. >> + * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com> >> + * >> + * This program is free software; you can redistribute it and/or modify >> + * it under the terms of the GNU General Public License version 2 as >> + * published by the Free Software Foundation. >> + * >> + * This program is distributed in the hope that it will be useful, >> + * but WITHOUT ANY WARRANTY; without even the implied warranty of >> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the >> + * GNU General Public License for more details. >> + * >> + * You should have received a copy of the GNU General Public License >> + * along with this program. If not, see <http://www.gnu.org/licenses/>. >> + */ >> + >> +#ifndef __ARM_KGDB_H__ >> +#define __ARM_KGDB_H__ >> + >> +#include <linux/ptrace.h> >> +#include <asm/debug-monitors.h> >> + >> +#ifndef __ASSEMBLY__ >> + >> +static inline void arch_kgdb_breakpoint(void) >> +{ >> + asm ("brk %0" :: "I" (KGDB_ARM64_COMPILE_BRK_IMM)); >> +} > > Hmm, I need to find me a compiler guy, but I don't have much faith in this > function in the face of inlining. Sure a breakpoint needs to happen at a > specific point in the program? If so, why can't GCC re-order this asm block > as it likes? > > Or is there a restricted use-case for compiled breakpoints in kgdb? > As per Andrew, arch_kgdb_breakpoint is declared as static inline for the x86, arm, tile and powerpc. Since the inline-asm does not have any output constraints it is considered volatile just like if you declare the inline-asm as volatile. The compiler might reorder some stuff around it due to all uses being explicit just like any other volatile inline-asm. In fact, the arch_kgdb_breakpoint is called from only one place from kernel/debug/debug_core.c and there is no way for the compiler to reorder the block as it has two write barriers around it: wmb(); /* Sync point before breakpoint */ arch_kgdb_breakpoint(); wmb(); /* Sync point after breakpoint */ >> +void >> +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) >> +{ >> + struct pt_regs *thread_regs; >> + int regno; >> + int i; >> + >> + /* Just making sure... */ >> + if (task == NULL) >> + return; > > Erm... that comment just raises more questions. I appreciate ARM has the > same check, but why doesn't x86? Can task *actually* be NULL? > generally, the task will not be NULL. The check made because the caller (gdbstub.c) fetches task information from gdb global structure and calls this function. This helps in case the caller fails to pass task info. I think should not be problem with this check. In fact this is legacy code >> + >> + /* Initialize to zero */ >> + for (regno = 0; regno < DBG_MAX_REG_NUM; regno++) >> + gdb_regs[regno] = 0; > > memset? OK. agreed > >> + >> + thread_regs = task_pt_regs(task); >> + >> + for(i = 0; i < 31; i++) >> + gdb_regs[i] = thread_regs->regs[i]; > > memcpy? > OK. agreed >> +int kgdb_arch_handle_exception(int exception_vector, int signo, >> + int err_code, char *remcom_in_buffer, >> + char *remcom_out_buffer, >> + struct pt_regs *linux_regs) >> +{ >> + unsigned long addr; >> + char *ptr; >> + int err; >> + >> + switch (remcom_in_buffer[0]) { >> + case 'D': >> + case 'k': >> + case 'c': >> + /* >> + * Packet D (Detach), k (kill) & c (Continue) requires >> + * to continue executing. Set pc to required address. >> + * Try to read optional parameter, pc unchanged if no parm. >> + * If this was a compiled breakpoint, we need to move >> + * to the next instruction or we will just breakpoint >> + * over and over again. >> + */ >> + ptr = &remcom_in_buffer[1]; >> + if (kgdb_hex2long(&ptr, &addr)) >> + kgdb_arch_set_pc(linux_regs, addr); >> + else if (compiled_break == 1) >> + kgdb_arch_set_pc(linux_regs, linux_regs->pc + 4); >> + >> + compiled_break = 0; >> + >> + err = 0; >> + break; >> + default: >> + err = -1; >> + } >> + return err; >> +} >> + >> +static int >> +kgdb_brk_fn(struct pt_regs *regs, unsigned int esr, unsigned long addr) >> +{ >> + kgdb_handle_exception(1, SIGTRAP, 0, regs); >> + return 0; >> +} >> + >> +static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr, >> + unsigned long addr) >> +{ >> + compiled_break = 1; >> + kgdb_handle_exception(1, SIGTRAP, 0, regs); >> + >> + return 0; >> +} >> + >> +static struct break_hook kgdb_brkpt_hook = { >> + .esr_mask = 0xffffffff, >> + .esr_val = KGDB_BREAKINST_ESR_VAL, >> + .fn = kgdb_brk_fn >> +}; >> + >> +static struct break_hook kgdb_compiled_brkpt_hook = { >> + .esr_mask = 0xffffffff, >> + .esr_val = KGDB_COMPILED_BREAK_ESR_VAL, >> + .fn = kgdb_compiled_brk_fn >> +}; >> + >> +static void kgdb_call_nmi_hook(void *ignored) >> +{ >> + kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); >> +} >> + >> +void kgdb_roundup_cpus(unsigned long flags) >> +{ >> + local_irq_enable(); >> + smp_call_function(kgdb_call_nmi_hook, NULL, 0); >> + local_irq_disable(); >> +} >> + >> +static int __kgdb_notify(struct die_args *args, unsigned long cmd) >> +{ >> + struct pt_regs *regs = args->regs; >> + >> + if (kgdb_handle_exception(1, args->signr, cmd, regs)) >> + return NOTIFY_DONE; >> + return NOTIFY_STOP; >> +} >> + >> +static int >> +kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) >> +{ >> + unsigned long flags; >> + int ret; >> + >> + local_irq_save(flags); >> + ret = __kgdb_notify(ptr, cmd); >> + local_irq_restore(flags); >> + >> + return ret; >> +} >> + >> +static struct notifier_block kgdb_notifier = { >> + .notifier_call = kgdb_notify, >> + .priority = -INT_MAX, >> +}; >> + >> + >> +/* >> + * kgdb_arch_init - Perform any architecture specific initalization. >> + * >> + * This function will handle the initalization of any architecture >> + * specific callbacks. >> + */ >> +int kgdb_arch_init(void) >> +{ >> + int ret = register_die_notifier(&kgdb_notifier); >> + >> + if (ret != 0) >> + return ret; >> + >> + register_break_hook(&kgdb_brkpt_hook); >> + register_break_hook(&kgdb_compiled_brkpt_hook); >> + >> + return 0; >> +} >> + >> +/* >> + * kgdb_arch_exit - Perform any architecture specific uninitalization. >> + * >> + * This function will handle the uninitalization of any architecture >> + * specific callbacks, for dynamic registration and unregistration. >> + */ >> +void kgdb_arch_exit(void) >> +{ >> + unregister_break_hook(&kgdb_brkpt_hook); >> + unregister_break_hook(&kgdb_compiled_brkpt_hook); >> + unregister_die_notifier(&kgdb_notifier); >> +} >> + >> +/* >> + * ARM instructions are always in LE. >> + * Break instruction is encoded in LE format >> + */ >> +struct kgdb_arch arch_kgdb_ops = { >> + .gdb_bpt_instr = {0x00, 0x00, 0x20, 0xd4} > > Again, you should probably try and encode the dynamic breakpoint immediate > in here (which is sadly offset by 5 bits in the instruction encoding). > Yes, I propose to change like this #define KGDB_DYN_BRK_INS_BYTE0 ((KGDB_DYN_DGB_BRK_IMM & 0x7) << 5) #define KGDB_DYN_BRK_INS_BYTE1 ((KGDB_DYN_DGB_BRK_IMM & 0x7f8) >> 3) #define KGDB_DYN_BRK_INS_BYTE2 (0x20 | \ ((KGDB_DYN_DGB_BRK_IMM & 0xf800) >> 11)) #define KGDB_DYN_BRK_INS_BYTE3 0xd4 struct kgdb_arch arch_kgdb_ops = { .gdb_bpt_instr = { KGDB_DYN_BRK_INS_BYTE0, KGDB_DYN_BRK_INS_BYTE1, KGDB_DYN_BRK_INS_BYTE2, KGDB_DYN_BRK_INS_BYTE3 } }; > Will
On Tue, Oct 01, 2013 at 11:53:01AM +0100, Vijay Kilari wrote: > On Mon, Sep 30, 2013 at 10:06 PM, Will Deacon <will.deacon@arm.com> wrote: > > On Mon, Sep 30, 2013 at 10:44:10AM +0100, vijay.kilari@gmail.com wrote: > >> +static inline void arch_kgdb_breakpoint(void) > >> +{ > >> + asm ("brk %0" :: "I" (KGDB_ARM64_COMPILE_BRK_IMM)); > >> +} > > > > Hmm, I need to find me a compiler guy, but I don't have much faith in this > > function in the face of inlining. Sure a breakpoint needs to happen at a > > specific point in the program? If so, why can't GCC re-order this asm block > > as it likes? > > > > Or is there a restricted use-case for compiled breakpoints in kgdb? > > > As per Andrew, > arch_kgdb_breakpoint is declared as static inline for the x86, arm, tile > and powerpc. Since the inline-asm does not have any output constraints > it is considered volatile just like if you declare the inline-asm as volatile. > The compiler might reorder some stuff around it due to all uses being explicit > just like any other volatile inline-asm. > > In fact, the arch_kgdb_breakpoint is called from only one place from > kernel/debug/debug_core.c and there is no way for the compiler to > reorder the block > as it has two write barriers around it: > wmb(); /* Sync point before breakpoint */ > arch_kgdb_breakpoint(); > wmb(); /* Sync point after breakpoint */ A write barrier just looks like a memory clobber to the compiler, which isn't enough to stop this being reordered wrt breakpoints. In fact, I don't think is actually solvable with GCC since there is no way to emit a full scheduling barrier using asm constraints (we'd need an intrinsic I think). You could try clobbering every register, but that's filthy, and will likely cause reload to generate some dreadful spills. > >> +void > >> +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) > >> +{ > >> + struct pt_regs *thread_regs; > >> + int regno; > >> + int i; > >> + > >> + /* Just making sure... */ > >> + if (task == NULL) > >> + return; > > > > Erm... that comment just raises more questions. I appreciate ARM has the > > same check, but why doesn't x86? Can task *actually* be NULL? > > > generally, the task will not be NULL. The check made because the > caller (gdbstub.c) > fetches task information from gdb global structure and calls this function. > This helps in case the caller fails to pass task info. I think should not be > problem with this check. In fact this is legacy code Generally or always? Afaict, either other architectures have a bug here or we have a redundant check. Regardless, it should be made consistent. > >> +/* > >> + * ARM instructions are always in LE. > >> + * Break instruction is encoded in LE format > >> + */ > >> +struct kgdb_arch arch_kgdb_ops = { > >> + .gdb_bpt_instr = {0x00, 0x00, 0x20, 0xd4} > > > > Again, you should probably try and encode the dynamic breakpoint immediate > > in here (which is sadly offset by 5 bits in the instruction encoding). > > > Yes, I propose to change like this > > #define KGDB_DYN_BRK_INS_BYTE0 ((KGDB_DYN_DGB_BRK_IMM & 0x7) << 5) > #define KGDB_DYN_BRK_INS_BYTE1 ((KGDB_DYN_DGB_BRK_IMM & 0x7f8) >> 3) > #define KGDB_DYN_BRK_INS_BYTE2 (0x20 | \ > ((KGDB_DYN_DGB_BRK_IMM & 0xf800) >> 11)) > #define KGDB_DYN_BRK_INS_BYTE3 0xd4 This would be cleaner if you #defined the complete instruction encoding: #define AARCH64_BREAK_MON 0xd4200000 #define KGDB_DYN_BRK_BYTE(x) \ (((AARCH64_BREAK_MON | KGDB_DYN_DGB_BRK_IMM) >> (x * 8)) & 0xff) Will
On Wed, Oct 2, 2013 at 3:18 AM, Will Deacon <will.deacon@arm.com> wrote: > On Tue, Oct 01, 2013 at 11:53:01AM +0100, Vijay Kilari wrote: >> On Mon, Sep 30, 2013 at 10:06 PM, Will Deacon <will.deacon@arm.com> wrote: >> > On Mon, Sep 30, 2013 at 10:44:10AM +0100, vijay.kilari@gmail.com wrote: >> >> +static inline void arch_kgdb_breakpoint(void) >> >> +{ >> >> + asm ("brk %0" :: "I" (KGDB_ARM64_COMPILE_BRK_IMM)); >> >> +} >> > >> > Hmm, I need to find me a compiler guy, but I don't have much faith in this >> > function in the face of inlining. Sure a breakpoint needs to happen at a >> > specific point in the program? If so, why can't GCC re-order this asm block >> > as it likes? >> > >> > Or is there a restricted use-case for compiled breakpoints in kgdb? >> > >> As per Andrew, >> arch_kgdb_breakpoint is declared as static inline for the x86, arm, tile >> and powerpc. Since the inline-asm does not have any output constraints >> it is considered volatile just like if you declare the inline-asm as volatile. >> The compiler might reorder some stuff around it due to all uses being explicit >> just like any other volatile inline-asm. >> >> In fact, the arch_kgdb_breakpoint is called from only one place from >> kernel/debug/debug_core.c and there is no way for the compiler to >> reorder the block >> as it has two write barriers around it: >> wmb(); /* Sync point before breakpoint */ >> arch_kgdb_breakpoint(); >> wmb(); /* Sync point after breakpoint */ > > A write barrier just looks like a memory clobber to the compiler, which > isn't enough to stop this being reordered wrt breakpoints. Yes this will be a full schedule barrier due to the inline-asm being a volatile inline-asm (implicitly because there are no outputs). What I meant about the compiler could move around code only happens when you don't there are other implicit non schedule barrier instructions, in this case wmb is an volatile inline-asm which is also a scheduler barrier. > In fact, I don't > think is actually solvable with GCC since there is no way to emit a full > scheduling barrier using asm constraints (we'd need an intrinsic I think). > > You could try clobbering every register, but that's filthy, and will likely > cause reload to generate some dreadful spills. Actually this is enough since both inline-asms (the one for wmb and arch_kgdb_breakpoint) are going to be treated as volatile by the compiler so it will not schedule those two inline-asm with respect of each other. Note all other targets (ARM, PPC, x86, tile, etc.) all use a static inline function which does exactly the same as what is being added here. x86 for an example: static inline void arch_kgdb_breakpoint(void) { asm(" int $3"); } arm: static inline void arch_kgdb_breakpoint(void) { asm(".word 0xe7ffdeff"); } PPC: static inline void arch_kgdb_breakpoint(void) { asm(".long 0x7d821008"); /* twge r2, r2 */ } Yes they have don't have either input or output constraints while this is adding one with an input constraint but that does not change the volatilism of the inline-asm; only the output constraint does. Thanks, Andrew Pinski > >> >> +void >> >> +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) >> >> +{ >> >> + struct pt_regs *thread_regs; >> >> + int regno; >> >> + int i; >> >> + >> >> + /* Just making sure... */ >> >> + if (task == NULL) >> >> + return; >> > >> > Erm... that comment just raises more questions. I appreciate ARM has the >> > same check, but why doesn't x86? Can task *actually* be NULL? >> > >> generally, the task will not be NULL. The check made because the >> caller (gdbstub.c) >> fetches task information from gdb global structure and calls this function. >> This helps in case the caller fails to pass task info. I think should not be >> problem with this check. In fact this is legacy code > > Generally or always? Afaict, either other architectures have a bug here or > we have a redundant check. Regardless, it should be made consistent. > >> >> +/* >> >> + * ARM instructions are always in LE. >> >> + * Break instruction is encoded in LE format >> >> + */ >> >> +struct kgdb_arch arch_kgdb_ops = { >> >> + .gdb_bpt_instr = {0x00, 0x00, 0x20, 0xd4} >> > >> > Again, you should probably try and encode the dynamic breakpoint immediate >> > in here (which is sadly offset by 5 bits in the instruction encoding). >> > >> Yes, I propose to change like this >> >> #define KGDB_DYN_BRK_INS_BYTE0 ((KGDB_DYN_DGB_BRK_IMM & 0x7) << 5) >> #define KGDB_DYN_BRK_INS_BYTE1 ((KGDB_DYN_DGB_BRK_IMM & 0x7f8) >> 3) >> #define KGDB_DYN_BRK_INS_BYTE2 (0x20 | \ >> ((KGDB_DYN_DGB_BRK_IMM & 0xf800) >> 11)) >> #define KGDB_DYN_BRK_INS_BYTE3 0xd4 > > This would be cleaner if you #defined the complete instruction encoding: > > #define AARCH64_BREAK_MON 0xd4200000 > > #define KGDB_DYN_BRK_BYTE(x) \ > (((AARCH64_BREAK_MON | KGDB_DYN_DGB_BRK_IMM) >> (x * 8)) & 0xff) > > Will
Hi Andrew, On Fri, Oct 04, 2013 at 10:51:02PM +0100, Andrew Pinski wrote: > On Wed, Oct 2, 2013 at 3:18 AM, Will Deacon <will.deacon@arm.com> wrote: > > On Tue, Oct 01, 2013 at 11:53:01AM +0100, Vijay Kilari wrote: > >> In fact, the arch_kgdb_breakpoint is called from only one place from > >> kernel/debug/debug_core.c and there is no way for the compiler to > >> reorder the block > >> as it has two write barriers around it: > >> wmb(); /* Sync point before breakpoint */ > >> arch_kgdb_breakpoint(); > >> wmb(); /* Sync point after breakpoint */ > > > > A write barrier just looks like a memory clobber to the compiler, which > > isn't enough to stop this being reordered wrt breakpoints. > > Yes this will be a full schedule barrier due to the inline-asm being a > volatile inline-asm (implicitly because there are no outputs). What I > meant about the compiler could move around code only happens when you > don't there are other implicit non schedule barrier instructions, in > this case wmb is an volatile inline-asm which is also a scheduler > barrier. volatile != scheduling barrier (example below). > > In fact, I don't > > think is actually solvable with GCC since there is no way to emit a full > > scheduling barrier using asm constraints (we'd need an intrinsic I think). > > > > You could try clobbering every register, but that's filthy, and will likely > > cause reload to generate some dreadful spills. > > Actually this is enough since both inline-asms (the one for wmb and > arch_kgdb_breakpoint) are going to be treated as volatile by the > compiler so it will not schedule those two inline-asm with respect of > each other. That's true: the asms will not be optimised away and/or moved across each other. However, other code *can* move across then, which strikes me as odd when we're dealing with breakpoints. E.g: 8<--- #define wmb() __asm__ __volatile__ ("dsb" ::: "memory") static inline void nop(void) { asm ("nop"); } int foo(int *bar) { int baz, i; baz = *bar; for (i = 0; i < 10; ++i) baz++; wmb(); nop(); wmb(); return baz; } --->8 Assembles to the following with GCC 4.9 for ARM: 00000000 <foo>: 0: e5900000 ldr r0, [r0] 4: f57ff04f dsb sy 8: e320f000 nop {0} c: f57ff04f dsb sy 10: e280000a add r0, r0, #10 14: e12fff1e bx lr So, whilst loading the parameter hazards against the memory clobber of the wmb()s, the loop (optimised to the single add) is now at the end of the function. If I wanted a breakpoint after that loop had finished, it's now no longer true. > Note all other targets (ARM, PPC, x86, tile, etc.) all use a static > inline function which does exactly the same as what is being added > here. Agreed, I didn't mean to imply this was an issue with this particular patch. I'm just curious as to why this has been deemed acceptable by other architectures, when it doesn't look especially useful to me. Will
On Mon, Oct 7, 2013 at 2:51 AM, Will Deacon <will.deacon@arm.com> wrote: > Hi Andrew, > > On Fri, Oct 04, 2013 at 10:51:02PM +0100, Andrew Pinski wrote: >> On Wed, Oct 2, 2013 at 3:18 AM, Will Deacon <will.deacon@arm.com> wrote: >> > On Tue, Oct 01, 2013 at 11:53:01AM +0100, Vijay Kilari wrote: >> >> In fact, the arch_kgdb_breakpoint is called from only one place from >> >> kernel/debug/debug_core.c and there is no way for the compiler to >> >> reorder the block >> >> as it has two write barriers around it: >> >> wmb(); /* Sync point before breakpoint */ >> >> arch_kgdb_breakpoint(); >> >> wmb(); /* Sync point after breakpoint */ >> > >> > A write barrier just looks like a memory clobber to the compiler, which >> > isn't enough to stop this being reordered wrt breakpoints. >> >> Yes this will be a full schedule barrier due to the inline-asm being a >> volatile inline-asm (implicitly because there are no outputs). What I >> meant about the compiler could move around code only happens when you >> don't there are other implicit non schedule barrier instructions, in >> this case wmb is an volatile inline-asm which is also a scheduler >> barrier. > > volatile != scheduling barrier (example below). Actually it does mean that but scheduling barrier != code motion barrier; at least in GCC. > >> > In fact, I don't >> > think is actually solvable with GCC since there is no way to emit a full >> > scheduling barrier using asm constraints (we'd need an intrinsic I think). >> > >> > You could try clobbering every register, but that's filthy, and will likely >> > cause reload to generate some dreadful spills. >> >> Actually this is enough since both inline-asms (the one for wmb and >> arch_kgdb_breakpoint) are going to be treated as volatile by the >> compiler so it will not schedule those two inline-asm with respect of >> each other. > > That's true: the asms will not be optimised away and/or moved across each > other. However, other code *can* move across then, which strikes me as odd > when we're dealing with breakpoints. > > E.g: > > 8<--- > > #define wmb() __asm__ __volatile__ ("dsb" ::: "memory") > > static inline void nop(void) > { > asm ("nop"); > } > > int foo(int *bar) > { > int baz, i; > > baz = *bar; > > for (i = 0; i < 10; ++i) > baz++; > > wmb(); > nop(); > wmb(); > > return baz; > } > > --->8 > > Assembles to the following with GCC 4.9 for ARM: > > 00000000 <foo>: > 0: e5900000 ldr r0, [r0] > 4: f57ff04f dsb sy > 8: e320f000 nop {0} > c: f57ff04f dsb sy > 10: e280000a add r0, r0, #10 > 14: e12fff1e bx lr Yes but this is not the scheduler doing the code motion (it is SCEV Constant prop followed by TER [temporary expression replacement] which is causing the code motion to happen). > > So, whilst loading the parameter hazards against the memory clobber of the > wmb()s, the loop (optimised to the single add) is now at the end of the > function. If I wanted a breakpoint after that loop had finished, it's now no > longer true. > >> Note all other targets (ARM, PPC, x86, tile, etc.) all use a static >> inline function which does exactly the same as what is being added >> here. > > Agreed, I didn't mean to imply this was an issue with this particular patch. > I'm just curious as to why this has been deemed acceptable by other > architectures, when it doesn't look especially useful to me. The main reason why it is acceptable by other architectures is due to the full definition of the function itself: void kgdb_breakpoint(void) { atomic_inc(&kgdb_setting_breakpoint); wmb(); /* Sync point before breakpoint */ arch_kgdb_breakpoint(); wmb(); /* Sync point after breakpoint */ atomic_dec(&kgdb_setting_breakpoint); } All of these function calls are all volatile inline-asm so there is not going to be any code motion happening. This is true in all architectures. So then you ask if kgdb_breakpoint can be inlined anywhere that we could get the breakpoint and not have the "correct" value, the answer is no and I audited each and every call to kgdb_breakpoint. If you want just add the attribute noinline to kgdb_breakpoint and the problem you mentioned about the code motion happening goes away. Thanks, Andrew Pinski
Hi Andrew, On Fri, Oct 11, 2013 at 01:24:29AM +0100, Andrew Pinski wrote: > On Mon, Oct 7, 2013 at 2:51 AM, Will Deacon <will.deacon@arm.com> wrote: > > On Fri, Oct 04, 2013 at 10:51:02PM +0100, Andrew Pinski wrote: > >> Yes this will be a full schedule barrier due to the inline-asm being a > >> volatile inline-asm (implicitly because there are no outputs). What I > >> meant about the compiler could move around code only happens when you > >> don't there are other implicit non schedule barrier instructions, in > >> this case wmb is an volatile inline-asm which is also a scheduler > >> barrier. > > > > volatile != scheduling barrier (example below). > > Actually it does mean that but scheduling barrier != code motion > barrier; at least in GCC. Bah, terminology. I can tell you're a compiler engineer ;) > > Assembles to the following with GCC 4.9 for ARM: > > > > 00000000 <foo>: > > 0: e5900000 ldr r0, [r0] > > 4: f57ff04f dsb sy > > 8: e320f000 nop {0} > > c: f57ff04f dsb sy > > 10: e280000a add r0, r0, #10 > > 14: e12fff1e bx lr > > > Yes but this is not the scheduler doing the code motion (it is SCEV > Constant prop followed by TER [temporary expression replacement] which > is causing the code motion to happen). Regardless, it's still not suitable for breakpoints in general. > >> Note all other targets (ARM, PPC, x86, tile, etc.) all use a static > >> inline function which does exactly the same as what is being added > >> here. > > > > Agreed, I didn't mean to imply this was an issue with this particular patch. > > I'm just curious as to why this has been deemed acceptable by other > > architectures, when it doesn't look especially useful to me. > > The main reason why it is acceptable by other architectures is due to > the full definition of the function itself: > void kgdb_breakpoint(void) > { > atomic_inc(&kgdb_setting_breakpoint); > wmb(); /* Sync point before breakpoint */ > arch_kgdb_breakpoint(); > wmb(); /* Sync point after breakpoint */ > atomic_dec(&kgdb_setting_breakpoint); > } > > All of these function calls are all volatile inline-asm so there is > not going to be any code motion happening. This is true in all > architectures. So then you ask if kgdb_breakpoint can be inlined > anywhere that we could get the breakpoint and not have the "correct" > value, the answer is no and I audited each and every call to > kgdb_breakpoint. If you want just add the attribute noinline to > kgdb_breakpoint and the problem you mentioned about the code motion > happening goes away. Thanks for taking a look at this. I think adding the noinline would be a good idea (as a seperate patch). Cheers, Will
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 19e429e..ed376ce 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -28,6 +28,16 @@ #define DBG_ESR_EVT_HWWP 0x2 #define DBG_ESR_EVT_BRK 0x6 +/* + * Break point instruction encoding + */ +#define BREAK_INSTR_SIZE 4 +#define KGDB_BREAKINST_ESR_VAL 0xf2000000 +#define KGDB_COMPILED_BREAK_ESR_VAL 0xf2000001 +#define KGDB_ARM64_COMPILE_BRK_IMM 1 +#define CACHE_FLUSH_IS_SAFE 1 + + enum debug_el { DBG_ACTIVE_EL0 = 0, DBG_ACTIVE_EL1, diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h new file mode 100644 index 0000000..15b36fd --- /dev/null +++ b/arch/arm64/include/asm/kgdb.h @@ -0,0 +1,81 @@ +/* + * AArch64 KGDB support + * + * Based on arch/arm/include/kgdb.h + * + * Copyright (C) 2013 Cavium Inc. + * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM_KGDB_H__ +#define __ARM_KGDB_H__ + +#include <linux/ptrace.h> +#include <asm/debug-monitors.h> + +#ifndef __ASSEMBLY__ + +static inline void arch_kgdb_breakpoint(void) +{ + asm ("brk %0" :: "I" (KGDB_ARM64_COMPILE_BRK_IMM)); +} + +extern void kgdb_handle_bus_error(void); +extern int kgdb_fault_expected; + +#endif /* !__ASSEMBLY__ */ + +/* + * gdb is expecting the following registers layout. + * + * General purpose regs: + * r0-r30: 64 bit + * sp,pc : 64 bit + * pstate : 32 bit + * Total: 34 + * FPU regs: + * f0-f31: 128 bit + * Total: 32 + * Extra regs + * fpsr & fpcr: 32 bit + * Total: 2 + * + */ + +#define _GP_REGS 34 +#define _FP_REGS 32 +#define _EXTRA_REGS 2 +#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS) + +/* + * Size of I/O buffer for gdb packet. + * considering to hold all register contents, size is set to 1024 + */ + +#define BUFMAX 1024 + +/* + * Number of bytes required for gdb_regs buffer. + * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each + * pstate reg is only 4 bytes. subtract 4 from size contributed + * by _GP_REGS. + * GDB fails to connect for size beyond this with error + * "'g' packet reply is too long" + */ + +#define NUMREGBYTES (((_GP_REGS * 8) - 4) + (_FP_REGS * 16) + \ + (_EXTRA_REGS * 4)) + +#endif /* __ASM_KGDB_H__ */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 7b4b564..69613bb 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -18,6 +18,7 @@ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +arm64-obj-$(CONFIG_KGDB) += kgdb.o obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c new file mode 100644 index 0000000..c99e04d --- /dev/null +++ b/arch/arm64/kernel/kgdb.c @@ -0,0 +1,298 @@ +/* + * AArch64 KGDB support + * + * Based on arch/arm/kernel/kgdb.c + * + * Copyright (C) 2013 Cavium Inc. + * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/irq.h> +#include <linux/kdebug.h> +#include <linux/kgdb.h> +#include <asm/traps.h> + +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = +{ + { "x0", 8, offsetof(struct pt_regs, regs[0])}, + { "x1", 8, offsetof(struct pt_regs, regs[1])}, + { "x2", 8, offsetof(struct pt_regs, regs[2])}, + { "x3", 8, offsetof(struct pt_regs, regs[3])}, + { "x4", 8, offsetof(struct pt_regs, regs[4])}, + { "x5", 8, offsetof(struct pt_regs, regs[5])}, + { "x6", 8, offsetof(struct pt_regs, regs[6])}, + { "x7", 8, offsetof(struct pt_regs, regs[7])}, + { "x8", 8, offsetof(struct pt_regs, regs[8])}, + { "x9", 8, offsetof(struct pt_regs, regs[9])}, + { "x10", 8, offsetof(struct pt_regs, regs[10])}, + { "x11", 8, offsetof(struct pt_regs, regs[11])}, + { "x12", 8, offsetof(struct pt_regs, regs[12])}, + { "x13", 8, offsetof(struct pt_regs, regs[13])}, + { "x14", 8, offsetof(struct pt_regs, regs[14])}, + { "x15", 8, offsetof(struct pt_regs, regs[15])}, + { "x16", 8, offsetof(struct pt_regs, regs[16])}, + { "x17", 8, offsetof(struct pt_regs, regs[17])}, + { "x18", 8, offsetof(struct pt_regs, regs[18])}, + { "x19", 8, offsetof(struct pt_regs, regs[19])}, + { "x20", 8, offsetof(struct pt_regs, regs[20])}, + { "x21", 8, offsetof(struct pt_regs, regs[21])}, + { "x22", 8, offsetof(struct pt_regs, regs[22])}, + { "x23", 8, offsetof(struct pt_regs, regs[23])}, + { "x24", 8, offsetof(struct pt_regs, regs[24])}, + { "x25", 8, offsetof(struct pt_regs, regs[25])}, + { "x26", 8, offsetof(struct pt_regs, regs[26])}, + { "x27", 8, offsetof(struct pt_regs, regs[27])}, + { "x28", 8, offsetof(struct pt_regs, regs[28])}, + { "x29", 8, offsetof(struct pt_regs, regs[29])}, + { "x30", 8, offsetof(struct pt_regs, regs[30])}, + { "sp", 8, offsetof(struct pt_regs, sp)}, + { "pc", 8, offsetof(struct pt_regs, pc)}, + { "pstate", 4, offsetof(struct pt_regs, pstate)}, + { "v0", 16, -1 }, + { "v1", 16, -1 }, + { "v2", 16, -1 }, + { "v3", 16, -1 }, + { "v4", 16, -1 }, + { "v5", 16, -1 }, + { "v6", 16, -1 }, + { "v7", 16, -1 }, + { "v8", 16, -1 }, + { "v9", 16, -1 }, + { "v10", 16, -1 }, + { "v11", 16, -1 }, + { "v12", 16, -1 }, + { "v13", 16, -1 }, + { "v14", 16, -1 }, + { "v15", 16, -1 }, + { "v16", 16, -1 }, + { "v17", 16, -1 }, + { "v18", 16, -1 }, + { "v19", 16, -1 }, + { "v20", 16, -1 }, + { "v21", 16, -1 }, + { "v22", 16, -1 }, + { "v23", 16, -1 }, + { "v24", 16, -1 }, + { "v25", 16, -1 }, + { "v26", 16, -1 }, + { "v27", 16, -1 }, + { "v28", 16, -1 }, + { "v29", 16, -1 }, + { "v30", 16, -1 }, + { "v31", 16, -1 }, + { "fpsr", 4, -1 }, + { "fpcr", 4, -1 }, +}; + +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1) + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + else + memset(mem, 0, dbg_reg_def[regno].size); + return dbg_reg_def[regno].name; +} + +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1) + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + return 0; +} + +void +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) +{ + struct pt_regs *thread_regs; + int regno; + int i; + + /* Just making sure... */ + if (task == NULL) + return; + + /* Initialize to zero */ + for (regno = 0; regno < DBG_MAX_REG_NUM; regno++) + gdb_regs[regno] = 0; + + thread_regs = task_pt_regs(task); + + for(i = 0; i < 31; i++) + gdb_regs[i] = thread_regs->regs[i]; + + gdb_regs[31] = thread_regs->sp; + gdb_regs[32] = thread_regs->pc; + gdb_regs[33] = thread_regs->pstate; +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + regs->pc = pc; +} + +static int compiled_break; + +int kgdb_arch_handle_exception(int exception_vector, int signo, + int err_code, char *remcom_in_buffer, + char *remcom_out_buffer, + struct pt_regs *linux_regs) +{ + unsigned long addr; + char *ptr; + int err; + + switch (remcom_in_buffer[0]) { + case 'D': + case 'k': + case 'c': + /* + * Packet D (Detach), k (kill) & c (Continue) requires + * to continue executing. Set pc to required address. + * Try to read optional parameter, pc unchanged if no parm. + * If this was a compiled breakpoint, we need to move + * to the next instruction or we will just breakpoint + * over and over again. + */ + ptr = &remcom_in_buffer[1]; + if (kgdb_hex2long(&ptr, &addr)) + kgdb_arch_set_pc(linux_regs, addr); + else if (compiled_break == 1) + kgdb_arch_set_pc(linux_regs, linux_regs->pc + 4); + + compiled_break = 0; + + err = 0; + break; + default: + err = -1; + } + return err; +} + +static int +kgdb_brk_fn(struct pt_regs *regs, unsigned int esr, unsigned long addr) +{ + kgdb_handle_exception(1, SIGTRAP, 0, regs); + return 0; +} + +static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr, + unsigned long addr) +{ + compiled_break = 1; + kgdb_handle_exception(1, SIGTRAP, 0, regs); + + return 0; +} + +static struct break_hook kgdb_brkpt_hook = { + .esr_mask = 0xffffffff, + .esr_val = KGDB_BREAKINST_ESR_VAL, + .fn = kgdb_brk_fn +}; + +static struct break_hook kgdb_compiled_brkpt_hook = { + .esr_mask = 0xffffffff, + .esr_val = KGDB_COMPILED_BREAK_ESR_VAL, + .fn = kgdb_compiled_brk_fn +}; + +static void kgdb_call_nmi_hook(void *ignored) +{ + kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); +} + +void kgdb_roundup_cpus(unsigned long flags) +{ + local_irq_enable(); + smp_call_function(kgdb_call_nmi_hook, NULL, 0); + local_irq_disable(); +} + +static int __kgdb_notify(struct die_args *args, unsigned long cmd) +{ + struct pt_regs *regs = args->regs; + + if (kgdb_handle_exception(1, args->signr, cmd, regs)) + return NOTIFY_DONE; + return NOTIFY_STOP; +} + +static int +kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __kgdb_notify(ptr, cmd); + local_irq_restore(flags); + + return ret; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_notify, + .priority = -INT_MAX, +}; + + +/* + * kgdb_arch_init - Perform any architecture specific initalization. + * + * This function will handle the initalization of any architecture + * specific callbacks. + */ +int kgdb_arch_init(void) +{ + int ret = register_die_notifier(&kgdb_notifier); + + if (ret != 0) + return ret; + + register_break_hook(&kgdb_brkpt_hook); + register_break_hook(&kgdb_compiled_brkpt_hook); + + return 0; +} + +/* + * kgdb_arch_exit - Perform any architecture specific uninitalization. + * + * This function will handle the uninitalization of any architecture + * specific callbacks, for dynamic registration and unregistration. + */ +void kgdb_arch_exit(void) +{ + unregister_break_hook(&kgdb_brkpt_hook); + unregister_break_hook(&kgdb_compiled_brkpt_hook); + unregister_die_notifier(&kgdb_notifier); +} + +/* + * ARM instructions are always in LE. + * Break instruction is encoded in LE format + */ +struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = {0x00, 0x00, 0x20, 0xd4} +};