diff mbox series

[v2,3/3] arm64: implement KPROBES_ON_FTRACE

Message ID 20190820114314.685a3239@xhacker.debian (mailing list archive)
State New, archived
Headers show
Series arm64: KPROBES_ON_FTRACE | expand

Commit Message

Jisheng Zhang Aug. 20, 2019, 3:54 a.m. UTC
KPROBES_ON_FTRACE avoids much of the overhead with regular kprobes as it
eliminates the need for a trap, as well as the need to emulate or
single-step instructions.

This patch implements KPROBES_ON_FTRACE for arm64.

Tested on berlin arm64 platform.

~ # mount -t debugfs debugfs /sys/kernel/debug/
~ # cd /sys/kernel/debug/
/sys/kernel/debug # echo 'p _do_fork' > tracing/kprobe_events

before the patch:

/sys/kernel/debug # cat kprobes/list
ffffff801009fe28  k  _do_fork+0x0    [DISABLED]

after the patch:

/sys/kernel/debug # cat kprobes/list
ffffff801009ff54  k  _do_fork+0x4    [DISABLED][FTRACE]

Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
---
 .../debug/kprobes-on-ftrace/arch-support.txt  |  2 +-
 arch/arm64/Kconfig                            |  1 +
 arch/arm64/kernel/probes/Makefile             |  1 +
 arch/arm64/kernel/probes/ftrace.c             | 60 +++++++++++++++++++
 4 files changed, 63 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm64/kernel/probes/ftrace.c

Comments

Jisheng Zhang Aug. 20, 2019, 7:17 a.m. UTC | #1
On Tue, 20 Aug 2019 03:54:20 +0000 Jisheng Zhang wrote:

> 
> 
> KPROBES_ON_FTRACE avoids much of the overhead with regular kprobes as it
> eliminates the need for a trap, as well as the need to emulate or
> single-step instructions.
> 
> This patch implements KPROBES_ON_FTRACE for arm64.
> 
> Tested on berlin arm64 platform.

some performance numbers may be interesting.

HW: Berlin arm64 platform, cpufreq is forced to 800MHZ
SW: getppid syscall micro-benchmark, source code is put at the end of this email.

A. Not probed. 
B. Probed at __arm64_sys_getppid w/ non-operation probe functions, w/o KPROBES_ON_FTRACE
C. Probed at __arm64_sys_getppid w/ non-operation probe functions, w/ KPROBES_ON_FTRACE

A: 1905 ns/call
B: 5833 ns/call
C: 2169 ns/call

The overhead of kprobes is 5833 - 1905 = 3928 ns/call
The overhead of kprobes w/ KPROBES_ON_FTRACE is 2169 - 1905 = 264 ns/call

As can be seen, KPROBES_ON_FTRACE significantly reduce the overhead of kprobes.

Thanks

<---8---
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <unistd.h>

int main (int argc, char *argv[])
{
	struct timeval tv;
	unsigned long count;
	struct rusage usage;

	for (count = 0; count < 10000000; count++)
		getppid();
	getrusage(RUSAGE_SELF, &usage);
	tv = usage.ru_stime;
	tv.tv_sec += usage.ru_utime.tv_sec;
	tv.tv_usec += usage.ru_utime.tv_usec;
	fprintf(stderr, "getppid was called %u times: %d nsec per call\n",
	       count, (tv.tv_sec*1000*1000 + tv.tv_usec)/(count/1000));

	return 0;
}

> 
> ~ # mount -t debugfs debugfs /sys/kernel/debug/
> ~ # cd /sys/kernel/debug/
> /sys/kernel/debug # echo 'p _do_fork' > tracing/kprobe_events
> 
> before the patch:
> 
> /sys/kernel/debug # cat kprobes/list
> ffffff801009fe28  k  _do_fork+0x0    [DISABLED]
> 
> after the patch:
> 
> /sys/kernel/debug # cat kprobes/list
> ffffff801009ff54  k  _do_fork+0x4    [DISABLED][FTRACE]
> 
> Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
> ---
>  .../debug/kprobes-on-ftrace/arch-support.txt  |  2 +-
>  arch/arm64/Kconfig                            |  1 +
>  arch/arm64/kernel/probes/Makefile             |  1 +
>  arch/arm64/kernel/probes/ftrace.c             | 60 +++++++++++++++++++
>  4 files changed, 63 insertions(+), 1 deletion(-)
>  create mode 100644 arch/arm64/kernel/probes/ftrace.c
> 
> diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
> index 68f266944d5f..e8358a38981c 100644
> --- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
> +++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
> @@ -9,7 +9,7 @@
>      |       alpha: | TODO |
>      |         arc: | TODO |
>      |         arm: | TODO |
> -    |       arm64: | TODO |
> +    |       arm64: |  ok  |
>      |         c6x: | TODO |
>      |        csky: | TODO |
>      |       h8300: | TODO |
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 663392d1eae2..928700f15e23 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -167,6 +167,7 @@ config ARM64
>         select HAVE_STACKPROTECTOR
>         select HAVE_SYSCALL_TRACEPOINTS
>         select HAVE_KPROBES
> +       select HAVE_KPROBES_ON_FTRACE
>         select HAVE_KRETPROBES
>         select HAVE_GENERIC_VDSO
>         select IOMMU_DMA if IOMMU_SUPPORT
> diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
> index 8e4be92e25b1..4020cfc66564 100644
> --- a/arch/arm64/kernel/probes/Makefile
> +++ b/arch/arm64/kernel/probes/Makefile
> @@ -4,3 +4,4 @@ obj-$(CONFIG_KPROBES)           += kprobes.o decode-insn.o      \
>                                    simulate-insn.o
>  obj-$(CONFIG_UPROBES)          += uprobes.o decode-insn.o      \
>                                    simulate-insn.o
> +obj-$(CONFIG_KPROBES_ON_FTRACE)        += ftrace.o
> diff --git a/arch/arm64/kernel/probes/ftrace.c b/arch/arm64/kernel/probes/ftrace.c
> new file mode 100644
> index 000000000000..52901ffff570
> --- /dev/null
> +++ b/arch/arm64/kernel/probes/ftrace.c
> @@ -0,0 +1,60 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/*
> + * Dynamic Ftrace based Kprobes Optimization
> + *
> + * Copyright (C) Hitachi Ltd., 2012
> + * Copyright (C) 2019 Jisheng Zhang <jszhang@kernel.org>
> + *                   Synaptics Incorporated
> + */
> +
> +#include <linux/kprobes.h>
> +
> +/* Ftrace callback handler for kprobes -- called under preepmt disabed */
> +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
> +                          struct ftrace_ops *ops, struct pt_regs *regs)
> +{
> +       struct kprobe *p;
> +       struct kprobe_ctlblk *kcb;
> +
> +       /* Preempt is disabled by ftrace */
> +       p = get_kprobe((kprobe_opcode_t *)ip);
> +       if (unlikely(!p) || kprobe_disabled(p))
> +               return;
> +
> +       kcb = get_kprobe_ctlblk();
> +       if (kprobe_running()) {
> +               kprobes_inc_nmissed_count(p);
> +       } else {
> +               unsigned long orig_ip = instruction_pointer(regs);
> +               /* Kprobe handler expects regs->pc = pc + 1 as breakpoint hit */
> +               instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
> +
> +               __this_cpu_write(current_kprobe, p);
> +               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> +               if (!p->pre_handler || !p->pre_handler(p, regs)) {
> +                       /*
> +                        * Emulate singlestep (and also recover regs->pc)
> +                        * as if there is a nop
> +                        */
> +                       instruction_pointer_set(regs,
> +                               (unsigned long)p->addr + MCOUNT_INSN_SIZE);
> +                       if (unlikely(p->post_handler)) {
> +                               kcb->kprobe_status = KPROBE_HIT_SSDONE;
> +                               p->post_handler(p, regs, 0);
> +                       }
> +                       instruction_pointer_set(regs, orig_ip);
> +               }
> +               /*
> +                * If pre_handler returns !0, it changes regs->pc. We have to
> +                * skip emulating post_handler.
> +                */
> +               __this_cpu_write(current_kprobe, NULL);
> +       }
> +}
> +NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> +
> +int arch_prepare_kprobe_ftrace(struct kprobe *p)
> +{
> +       p->ainsn.api.insn = NULL;
> +       return 0;
> +}
> --
> 2.23.0.rc1
> 
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
Thomas Gleixner Aug. 20, 2019, 8:53 a.m. UTC | #2
On Tue, 20 Aug 2019, Jisheng Zhang wrote:

> KPROBES_ON_FTRACE avoids much of the overhead with regular kprobes as it
> eliminates the need for a trap, as well as the need to emulate or
> single-step instructions.
> 
> This patch implements KPROBES_ON_FTRACE for arm64.

 git grep 'This patch' Documentation/process/submitting-patches.rst
 
Thanks,

	tglx
diff mbox series

Patch

diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 68f266944d5f..e8358a38981c 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -9,7 +9,7 @@ 
     |       alpha: | TODO |
     |         arc: | TODO |
     |         arm: | TODO |
-    |       arm64: | TODO |
+    |       arm64: |  ok  |
     |         c6x: | TODO |
     |        csky: | TODO |
     |       h8300: | TODO |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 663392d1eae2..928700f15e23 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -167,6 +167,7 @@  config ARM64
 	select HAVE_STACKPROTECTOR
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_KPROBES
+	select HAVE_KPROBES_ON_FTRACE
 	select HAVE_KRETPROBES
 	select HAVE_GENERIC_VDSO
 	select IOMMU_DMA if IOMMU_SUPPORT
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
index 8e4be92e25b1..4020cfc66564 100644
--- a/arch/arm64/kernel/probes/Makefile
+++ b/arch/arm64/kernel/probes/Makefile
@@ -4,3 +4,4 @@  obj-$(CONFIG_KPROBES)		+= kprobes.o decode-insn.o	\
 				   simulate-insn.o
 obj-$(CONFIG_UPROBES)		+= uprobes.o decode-insn.o	\
 				   simulate-insn.o
+obj-$(CONFIG_KPROBES_ON_FTRACE)	+= ftrace.o
diff --git a/arch/arm64/kernel/probes/ftrace.c b/arch/arm64/kernel/probes/ftrace.c
new file mode 100644
index 000000000000..52901ffff570
--- /dev/null
+++ b/arch/arm64/kernel/probes/ftrace.c
@@ -0,0 +1,60 @@ 
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * Copyright (C) Hitachi Ltd., 2012
+ * Copyright (C) 2019 Jisheng Zhang <jszhang@kernel.org>
+ *		      Synaptics Incorporated
+ */
+
+#include <linux/kprobes.h>
+
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+			   struct ftrace_ops *ops, struct pt_regs *regs)
+{
+	struct kprobe *p;
+	struct kprobe_ctlblk *kcb;
+
+	/* Preempt is disabled by ftrace */
+	p = get_kprobe((kprobe_opcode_t *)ip);
+	if (unlikely(!p) || kprobe_disabled(p))
+		return;
+
+	kcb = get_kprobe_ctlblk();
+	if (kprobe_running()) {
+		kprobes_inc_nmissed_count(p);
+	} else {
+		unsigned long orig_ip = instruction_pointer(regs);
+		/* Kprobe handler expects regs->pc = pc + 1 as breakpoint hit */
+		instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
+
+		__this_cpu_write(current_kprobe, p);
+		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+		if (!p->pre_handler || !p->pre_handler(p, regs)) {
+			/*
+			 * Emulate singlestep (and also recover regs->pc)
+			 * as if there is a nop
+			 */
+			instruction_pointer_set(regs,
+				(unsigned long)p->addr + MCOUNT_INSN_SIZE);
+			if (unlikely(p->post_handler)) {
+				kcb->kprobe_status = KPROBE_HIT_SSDONE;
+				p->post_handler(p, regs, 0);
+			}
+			instruction_pointer_set(regs, orig_ip);
+		}
+		/*
+		 * If pre_handler returns !0, it changes regs->pc. We have to
+		 * skip emulating post_handler.
+		 */
+		__this_cpu_write(current_kprobe, NULL);
+	}
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+	p->ainsn.api.insn = NULL;
+	return 0;
+}