Message ID | 20190501200231.GA7087@ls3530.dellerweb.de (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | parisc: Add static branch / JUMP_LABEL feature | expand |
Hi Helge, On Wed, May 01, 2019 at 10:02:31PM +0200, Helge Deller wrote: > Signed-off-by: Helge Deller <deller@gmx.de> > > diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig > index 49212f31b461..bdd4fea0150b 100644 > --- a/arch/parisc/Kconfig > +++ b/arch/parisc/Kconfig > @@ -45,6 +45,8 @@ config PARISC > select HAVE_DEBUG_STACKOVERFLOW > select HAVE_ARCH_AUDITSYSCALL > select HAVE_ARCH_HASH > + select HAVE_ARCH_JUMP_LABEL > + select HAVE_ARCH_JUMP_LABEL_RELATIVE > select HAVE_ARCH_SECCOMP_FILTER > select HAVE_ARCH_TRACEHOOK > select HAVE_REGS_AND_STACK_ACCESS_API > diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h > new file mode 100644 > index 000000000000..2d82368b6ed3 > --- /dev/null > +++ b/arch/parisc/include/asm/jump_label.h > @@ -0,0 +1,43 @@ > +/* SPDX-License-Identifier: GPL-2.0 */ > +#ifndef _ASM_PARISC_JUMP_LABEL_H > +#define _ASM_PARISC_JUMP_LABEL_H > + > +#ifndef __ASSEMBLY__ > + > +#include <linux/types.h> > +#include <asm/assembly.h> > + > +#define JUMP_LABEL_NOP_SIZE 4 > + > +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) > +{ > + asm_volatile_goto("1:\n\t" > + "nop\n\t" > + ".pushsection __jump_table, \"aw\"\n\t" > + ".word 1b - ., %l[l_yes] - .\n\t" > + __stringify(ASM_ULONG_INSN) " %c0\n\t" > + ".popsection\n\t" > + : : "i" (&((char *)key)[branch]) : : l_yes); > + > + return false; > +l_yes: > + return true; > +} > + > +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) > +{ > + asm_volatile_goto("1:\n\t" > + "b,n %l[l_yes]\n\t" > + ".pushsection __jump_table, \"aw\"\n\t" > + ".word 1b - ., %l[l_yes] - .\n\t" > + __stringify(ASM_ULONG_INSN) " %c0\n\t" > + ".popsection\n\t" > + : : "i" (&((char *)key)[branch]) : : l_yes); > + > + return false; > +l_yes: > + return true; > +} > + > +#endif /* __ASSEMBLY__ */ > +#endif > diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile > index b818b28c8a99..fc0df5c44468 100644 > --- a/arch/parisc/kernel/Makefile > +++ b/arch/parisc/kernel/Makefile > @@ -33,5 +33,6 @@ obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y) > obj-$(CONFIG_PARISC_CPU_TOPOLOGY) += topology.o > obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o > obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o > +obj-$(CONFIG_JUMP_LABEL) += jump_label.o > obj-$(CONFIG_KGDB) += kgdb.o > obj-$(CONFIG_KPROBES) += kprobes.o > diff --git a/arch/parisc/kernel/jump_label.c b/arch/parisc/kernel/jump_label.c > new file mode 100644 > index 000000000000..0d077c6d3ca7 > --- /dev/null > +++ b/arch/parisc/kernel/jump_label.c > @@ -0,0 +1,37 @@ > +/* > + * Copyright (C) 2019 Helge Deller <deller@gmx.de> > + * > + * Based on arch/arm64/kernel/jump_label.c > + */ > +#include <linux/kernel.h> > +#include <linux/jump_label.h> > +#include <asm/alternative.h> > +#include <asm/patch.h> > + > +void arch_jump_label_transform(struct jump_entry *entry, > + enum jump_label_type type) > +{ > + void *addr = (void *)jump_entry_code(entry); > + u32 insn; > + > + if (type == JUMP_LABEL_JMP) { > + void *target = (void *)jump_entry_target(entry); > + unsigned len = target - addr; > + insn = 0xe8000002 + (len-2)*8; /* "b,n .+8" */ I don't know the maximum jump distance here that could be encountered, but this won't work for negative offsets and offsets > 2^11 bits due to the weird immediate encoding PA-RISC uses. I think this should use proper bit shifting to match the immediate encoding and have a BUG_ON(len < $MAX_NEG_DISTANCE || len > $MAX_POS_DISTANCE) to make sure we're not patching invalid branch instructions into the Kernel. > + } else { > + insn = INSN_NOP; > + } > + > + patch_text(addr, insn); // nosync ? > +} > + > +void arch_jump_label_transform_static(struct jump_entry *entry, > + enum jump_label_type type) > +{ > + /* > + * We use the architected NOP in arch_static_branch, so there's no > + * need to patch an identical NOP over the top of it here. The core > + * will call arch_jump_label_transform from a module notifier if the > + * NOP needs to be replaced by a branch. > + */ > +} > diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S > index c3b1b9c24ede..a8be7a47fcc0 100644 > --- a/arch/parisc/kernel/vmlinux.lds.S > +++ b/arch/parisc/kernel/vmlinux.lds.S > @@ -18,6 +18,9 @@ > *(.data..vm0.pgd) \ > *(.data..vm0.pte) > > +/* No __ro_after_init data in the .rodata section - which will always be ro */ > +#define RO_AFTER_INIT_DATA > + > #include <asm-generic/vmlinux.lds.h> > > /* needed for the processor specific cache alignment size */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 49212f31b461..bdd4fea0150b 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -45,6 +45,8 @@ config PARISC select HAVE_DEBUG_STACKOVERFLOW select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HASH + select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h new file mode 100644 index 000000000000..2d82368b6ed3 --- /dev/null +++ b/arch/parisc/include/asm/jump_label.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_PARISC_JUMP_LABEL_H +#define _ASM_PARISC_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <asm/assembly.h> + +#define JUMP_LABEL_NOP_SIZE 4 + +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +{ + asm_volatile_goto("1:\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".word 1b - ., %l[l_yes] - .\n\t" + __stringify(ASM_ULONG_INSN) " %c0\n\t" + ".popsection\n\t" + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("1:\n\t" + "b,n %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".word 1b - ., %l[l_yes] - .\n\t" + __stringify(ASM_ULONG_INSN) " %c0\n\t" + ".popsection\n\t" + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +#endif /* __ASSEMBLY__ */ +#endif diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index b818b28c8a99..fc0df5c44468 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -33,5 +33,6 @@ obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y) obj-$(CONFIG_PARISC_CPU_TOPOLOGY) += topology.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KPROBES) += kprobes.o diff --git a/arch/parisc/kernel/jump_label.c b/arch/parisc/kernel/jump_label.c new file mode 100644 index 000000000000..0d077c6d3ca7 --- /dev/null +++ b/arch/parisc/kernel/jump_label.c @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2019 Helge Deller <deller@gmx.de> + * + * Based on arch/arm64/kernel/jump_label.c + */ +#include <linux/kernel.h> +#include <linux/jump_label.h> +#include <asm/alternative.h> +#include <asm/patch.h> + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + void *addr = (void *)jump_entry_code(entry); + u32 insn; + + if (type == JUMP_LABEL_JMP) { + void *target = (void *)jump_entry_target(entry); + unsigned len = target - addr; + insn = 0xe8000002 + (len-2)*8; /* "b,n .+8" */ + } else { + insn = INSN_NOP; + } + + patch_text(addr, insn); // nosync ? +} + +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) +{ + /* + * We use the architected NOP in arch_static_branch, so there's no + * need to patch an identical NOP over the top of it here. The core + * will call arch_jump_label_transform from a module notifier if the + * NOP needs to be replaced by a branch. + */ +} diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index c3b1b9c24ede..a8be7a47fcc0 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -18,6 +18,9 @@ *(.data..vm0.pgd) \ *(.data..vm0.pte) +/* No __ro_after_init data in the .rodata section - which will always be ro */ +#define RO_AFTER_INIT_DATA + #include <asm-generic/vmlinux.lds.h> /* needed for the processor specific cache alignment size */
Signed-off-by: Helge Deller <deller@gmx.de>