diff mbox

[1/5] arm64: kprobe: protect/rename few definitions to be reused by uprobe

Message ID bcc42a5866cd856a08075ca599adc78ed74b453f.1470114993.git.panand@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Pratyush Anand Aug. 2, 2016, 5:30 a.m. UTC
decode-insn code has to be reused by arm64 uprobe implementation as well.
Therefore, this patch protects some portion of kprobe code and renames few
other, so that decode-insn functionality can be reused by uprobe even when
CONFIG_KPROBES is not defined.

kprobe_opcode_t and struct arch_specific_insn are also defined by
linux/kprobes.h, when CONFIG_KPROBES is not defined. So, protect these
definitions in asm/probes.h.

linux/kprobes.h already includes asm/kprobes.h. Therefore, remove inclusion
of asm/kprobes.h from decode-insn.c.

There are some definitions like kprobe_insn and kprobes_handler_t etc can
be re-used by uprobe. So, it would be better to remove 'k' from their
names.

struct arch_specific_insn is specific to kprobe. Therefore, introduce a new
struct arch_probe_insn which will be common for both kprobe and uprobe, so
that decode-insn code can be shared. Modify kprobe code accordingly.

Function arm_probe_decode_insn() will be needed by uprobe as well. So make
it global.

Signed-off-by: Pratyush Anand <panand@redhat.com>
---
 arch/arm64/include/asm/probes.h        | 19 ++++++++++--------
 arch/arm64/kernel/probes/decode-insn.c | 31 +++++++++++++++--------------
 arch/arm64/kernel/probes/decode-insn.h |  8 ++++++--
 arch/arm64/kernel/probes/kprobes.c     | 36 +++++++++++++++++-----------------
 4 files changed, 51 insertions(+), 43 deletions(-)
diff mbox

Patch

diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h
index 5af574d632fa..e175a825b187 100644
--- a/arch/arm64/include/asm/probes.h
+++ b/arch/arm64/include/asm/probes.h
@@ -17,19 +17,22 @@ 
 
 #include <asm/opcodes.h>
 
-struct kprobe;
-struct arch_specific_insn;
-
-typedef u32 kprobe_opcode_t;
-typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *);
+typedef u32 probe_opcode_t;
+typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
 
 /* architecture specific copy of original instruction */
-struct arch_specific_insn {
-	kprobe_opcode_t *insn;
+struct arch_probe_insn {
+	probe_opcode_t *insn;
 	pstate_check_t *pstate_cc;
-	kprobes_handler_t *handler;
+	probes_handler_t *handler;
 	/* restore address after step xol */
 	unsigned long restore;
 };
+#ifdef CONFIG_KPROBES
+typedef u32 kprobe_opcode_t;
+struct arch_specific_insn {
+	struct arch_probe_insn api;
+};
+#endif
 
 #endif
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index 37e47a9d617e..335984cd3a99 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -16,7 +16,6 @@ 
 #include <linux/kernel.h>
 #include <linux/kprobes.h>
 #include <linux/module.h>
-#include <asm/kprobes.h>
 #include <asm/insn.h>
 #include <asm/sections.h>
 
@@ -77,8 +76,8 @@  static bool __kprobes aarch64_insn_is_steppable(u32 insn)
  *   INSN_GOOD         If instruction is supported and uses instruction slot,
  *   INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
  */
-static enum kprobe_insn __kprobes
-arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+enum probe_insn __kprobes
+arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
 {
 	/*
 	 * Instructions reading or modifying the PC won't work from the XOL
@@ -88,26 +87,26 @@  arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 		return INSN_GOOD;
 
 	if (aarch64_insn_is_bcond(insn)) {
-		asi->handler = simulate_b_cond;
+		api->handler = simulate_b_cond;
 	} else if (aarch64_insn_is_cbz(insn) ||
 	    aarch64_insn_is_cbnz(insn)) {
-		asi->handler = simulate_cbz_cbnz;
+		api->handler = simulate_cbz_cbnz;
 	} else if (aarch64_insn_is_tbz(insn) ||
 	    aarch64_insn_is_tbnz(insn)) {
-		asi->handler = simulate_tbz_tbnz;
+		api->handler = simulate_tbz_tbnz;
 	} else if (aarch64_insn_is_adr_adrp(insn)) {
-		asi->handler = simulate_adr_adrp;
+		api->handler = simulate_adr_adrp;
 	} else if (aarch64_insn_is_b(insn) ||
 	    aarch64_insn_is_bl(insn)) {
-		asi->handler = simulate_b_bl;
+		api->handler = simulate_b_bl;
 	} else if (aarch64_insn_is_br(insn) ||
 	    aarch64_insn_is_blr(insn) ||
 	    aarch64_insn_is_ret(insn)) {
-		asi->handler = simulate_br_blr_ret;
+		api->handler = simulate_br_blr_ret;
 	} else if (aarch64_insn_is_ldr_lit(insn)) {
-		asi->handler = simulate_ldr_literal;
+		api->handler = simulate_ldr_literal;
 	} else if (aarch64_insn_is_ldrsw_lit(insn)) {
-		asi->handler = simulate_ldrsw_literal;
+		api->handler = simulate_ldrsw_literal;
 	} else {
 		/*
 		 * Instruction cannot be stepped out-of-line and we don't
@@ -119,6 +118,7 @@  arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 	return INSN_GOOD_NO_SLOT;
 }
 
+#ifdef CONFIG_KPROBES
 static bool __kprobes
 is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
 {
@@ -137,11 +137,11 @@  is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
 	return false;
 }
 
-enum kprobe_insn __kprobes
+enum probe_insn __kprobes
 arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
 {
-	enum kprobe_insn decoded;
-	kprobe_opcode_t insn = le32_to_cpu(*addr);
+	enum probe_insn decoded;
+	probe_opcode_t insn = le32_to_cpu(*addr);
 	kprobe_opcode_t *scan_start = addr - 1;
 	kprobe_opcode_t *scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
@@ -164,7 +164,7 @@  arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
 		preempt_enable();
 	}
 #endif
-	decoded = arm_probe_decode_insn(insn, asi);
+	decoded = arm_probe_decode_insn(insn, &asi->api);
 
 	if (decoded == INSN_REJECTED ||
 			is_probed_address_atomic(scan_start, scan_end))
@@ -172,3 +172,4 @@  arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
 
 	return decoded;
 }
+#endif
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h
index d438289646a6..76d3f315407f 100644
--- a/arch/arm64/kernel/probes/decode-insn.h
+++ b/arch/arm64/kernel/probes/decode-insn.h
@@ -23,13 +23,17 @@ 
  */
 #define MAX_ATOMIC_CONTEXT_SIZE	(128 / sizeof(kprobe_opcode_t))
 
-enum kprobe_insn {
+enum probe_insn {
 	INSN_REJECTED,
 	INSN_GOOD_NO_SLOT,
 	INSN_GOOD,
 };
 
-enum kprobe_insn __kprobes
+#ifdef CONFIG_KPROBES
+enum probe_insn __kprobes
 arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi);
+#endif
+enum probe_insn __kprobes
+arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *asi);
 
 #endif /* _ARM_KERNEL_KPROBES_ARM64_H */
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index bf9768588288..8e5923c04fef 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -56,31 +56,31 @@  static inline unsigned long min_stack_size(unsigned long addr)
 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
 {
 	/* prepare insn slot */
-	p->ainsn.insn[0] = cpu_to_le32(p->opcode);
+	p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);
 
-	flush_icache_range((uintptr_t) (p->ainsn.insn),
-			   (uintptr_t) (p->ainsn.insn) +
+	flush_icache_range((uintptr_t) (p->ainsn.api.insn),
+			   (uintptr_t) (p->ainsn.api.insn) +
 			   MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
 
 	/*
 	 * Needs restoring of return address after stepping xol.
 	 */
-	p->ainsn.restore = (unsigned long) p->addr +
+	p->ainsn.api.restore = (unsigned long) p->addr +
 	  sizeof(kprobe_opcode_t);
 }
 
 static void __kprobes arch_prepare_simulate(struct kprobe *p)
 {
 	/* This instructions is not executed xol. No need to adjust the PC */
-	p->ainsn.restore = 0;
+	p->ainsn.api.restore = 0;
 }
 
 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
 {
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 
-	if (p->ainsn.handler)
-		p->ainsn.handler((u32)p->opcode, (long)p->addr, regs);
+	if (p->ainsn.api.handler)
+		p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
 
 	/* single step simulated, now go for post processing */
 	post_kprobe_handler(kcb, regs);
@@ -110,18 +110,18 @@  int __kprobes arch_prepare_kprobe(struct kprobe *p)
 		return -EINVAL;
 
 	case INSN_GOOD_NO_SLOT:	/* insn need simulation */
-		p->ainsn.insn = NULL;
+		p->ainsn.api.insn = NULL;
 		break;
 
 	case INSN_GOOD:	/* instruction uses slot */
-		p->ainsn.insn = get_insn_slot();
-		if (!p->ainsn.insn)
+		p->ainsn.api.insn = get_insn_slot();
+		if (!p->ainsn.api.insn)
 			return -ENOMEM;
 		break;
 	};
 
 	/* prepare the instruction */
-	if (p->ainsn.insn)
+	if (p->ainsn.api.insn)
 		arch_prepare_ss_slot(p);
 	else
 		arch_prepare_simulate(p);
@@ -154,9 +154,9 @@  void __kprobes arch_disarm_kprobe(struct kprobe *p)
 
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
-	if (p->ainsn.insn) {
-		free_insn_slot(p->ainsn.insn, 0);
-		p->ainsn.insn = NULL;
+	if (p->ainsn.api.insn) {
+		free_insn_slot(p->ainsn.api.insn, 0);
+		p->ainsn.api.insn = NULL;
 	}
 }
 
@@ -251,9 +251,9 @@  static void __kprobes setup_singlestep(struct kprobe *p,
 	}
 
 
-	if (p->ainsn.insn) {
+	if (p->ainsn.api.insn) {
 		/* prepare for single stepping */
-		slot = (unsigned long)p->ainsn.insn;
+		slot = (unsigned long)p->ainsn.api.insn;
 
 		set_ss_context(kcb, slot);	/* mark pending ss */
 
@@ -305,8 +305,8 @@  post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
 		return;
 
 	/* return addr restore if non-branching insn */
-	if (cur->ainsn.restore != 0)
-		instruction_pointer_set(regs, cur->ainsn.restore);
+	if (cur->ainsn.api.restore != 0)
+		instruction_pointer_set(regs, cur->ainsn.api.restore);
 
 	/* restore back original saved kprobe variables and continue */
 	if (kcb->kprobe_status == KPROBE_REENTER) {