diff mbox series

[RFC,v2,1/1] trace,smp: Add tracepoints around remotelly called functions

Message ID 20230510221513.93297-1-leobras@redhat.com (mailing list archive)
State Superseded
Headers show
Series [RFC,v2,1/1] trace,smp: Add tracepoints around remotelly called functions | expand

Commit Message

Leonardo Bras May 10, 2023, 10:15 p.m. UTC
When running RT workloads in isolated CPUs, many cases of deadline misses
are caused by remote CPU requests such as smp_call_function*().

For those cases, having the names of those functions running around the
deadline miss moment could help (a lot) finding a target for the next
improvements.

Add tracepoints for acquiring the function name & csd before entry and
after returning from the remote-cpu requested function.

Also, add tracepoints on the remote cpus requesting them.

Signed-off-by: Leonardo Bras <leobras@redhat.com>
---

Changes since RFCv1:
- Implemented trace_csd_queue_cpu() as suggested by Valentin Schneider
- Using EVENT_CLASS in order to avoid duplication
- Introduced new helper: csd_do_func()
- Name change from smp_call_function_* to csd_function_*
- Rebased on top of torvalds/master

 include/trace/events/smp.h | 72 ++++++++++++++++++++++++++++++++++++++
 kernel/smp.c               | 41 +++++++++++++---------
 2 files changed, 96 insertions(+), 17 deletions(-)
 create mode 100644 include/trace/events/smp.h

Comments

Leonardo Bras May 10, 2023, 11:05 p.m. UTC | #1
On Wed, May 10, 2023 at 7:16 PM Leonardo Bras <leobras@redhat.com> wrote:
>
> When running RT workloads in isolated CPUs, many cases of deadline misses
> are caused by remote CPU requests such as smp_call_function*().
>
> For those cases, having the names of those functions running around the
> deadline miss moment could help (a lot) finding a target for the next
> improvements.
>
> Add tracepoints for acquiring the function name & csd before entry and
> after returning from the remote-cpu requested function.
>
> Also, add tracepoints on the remote cpus requesting them.
>
> Signed-off-by: Leonardo Bras <leobras@redhat.com>
> ---
>
> Changes since RFCv1:
> - Implemented trace_csd_queue_cpu() as suggested by Valentin Schneider
> - Using EVENT_CLASS in order to avoid duplication
> - Introduced new helper: csd_do_func()
> - Name change from smp_call_function_* to csd_function_*
> - Rebased on top of torvalds/master
>
>  include/trace/events/smp.h | 72 ++++++++++++++++++++++++++++++++++++++
>  kernel/smp.c               | 41 +++++++++++++---------
>  2 files changed, 96 insertions(+), 17 deletions(-)
>  create mode 100644 include/trace/events/smp.h
>
> diff --git a/include/trace/events/smp.h b/include/trace/events/smp.h
> new file mode 100644
> index 000000000000..5fd75399d3f1
> --- /dev/null
> +++ b/include/trace/events/smp.h
> @@ -0,0 +1,72 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#undef TRACE_SYSTEM
> +#define TRACE_SYSTEM smp
> +
> +#if !defined(_TRACE_SMP_H) || defined(TRACE_HEADER_MULTI_READ)
> +#define _TRACE_SMP_H
> +
> +#include <linux/tracepoint.h>
> +
> +TRACE_EVENT(csd_queue_cpu,
> +
> +       TP_PROTO(const unsigned int cpu,
> +                unsigned long callsite,
> +                smp_call_func_t func,
> +                call_single_data_t *csd),
> +
> +       TP_ARGS(cpu, callsite, func, csd),
> +
> +       TP_STRUCT__entry(
> +               __field(unsigned int, cpu)
> +               __field(void *, callsite)
> +               __field(void *, func)
> +               __field(void *, csd)
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->cpu = cpu;
> +               __entry->callsite = (void *)callsite;
> +               __entry->func = func;
> +               __entry->csd  = csd;
> +       ),
> +
> +       TP_printk("cpu=%u callsite=%pS func=%pS csd=%p",
> +                 __entry->cpu, __entry->callsite, __entry->func, __entry->csd)
> +);
> +
> +/*
> + * Tracepoints for a function which is called as an effect of smp_call_function.*
> + */
> +DECLARE_EVENT_CLASS(csd_function,
> +
> +       TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
> +
> +       TP_ARGS(func, csd),
> +
> +       TP_STRUCT__entry(
> +               __field(void *, func)
> +               __field(void *, csd)
> +       ),
> +
> +       TP_fast_assign(
> +               __entry->func   = func;
> +               __entry->csd    = csd;
> +       ),
> +
> +       TP_printk("function %ps, csd = %p", __entry->func, __entry->csd)
> +);
> +
> +DEFINE_EVENT(csd_function, csd_function_entry,
> +       TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
> +       TP_ARGS(func, csd),
> +);
> +
> +DEFINE_EVENT(csd_function, csd_function_exit,
> +       TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
> +       TP_ARGS(func, csd),
> +);
> +
> +#endif /* _TRACE_SMP_H */
> +
> +/* This part must be outside protection */
> +#include <trace/define_trace.h>
> diff --git a/kernel/smp.c b/kernel/smp.c
> index ab3e5dad6cfe..a34aa2b92050 100644
> --- a/kernel/smp.c
> +++ b/kernel/smp.c
> @@ -27,6 +27,9 @@
>  #include <linux/jump_label.h>
>
>  #include <trace/events/ipi.h>
> +#define CREATE_TRACE_POINTS
> +#include <trace/events/smp.h>
> +#undef CREATE_TRACE_POINTS
>
>  #include "smpboot.h"
>  #include "sched/smp.h"
> @@ -121,6 +124,14 @@ send_call_function_ipi_mask(struct cpumask *mask)
>         arch_send_call_function_ipi_mask(mask);
>  }
>
> +static __always_inline void
> +csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
> +{
> +       trace_csd_function_entry(func, csd);
> +       func(info);
> +       trace_csd_function_exit(func, csd);
> +}
> +
>  #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
>
>  static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled);
> @@ -329,7 +340,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
>          * even if we haven't sent the smp_call IPI yet (e.g. the stopper
>          * executes migration_cpu_stop() on the remote CPU).
>          */
> -       if (trace_ipi_send_cpu_enabled()) {
> +       if (trace_csd_queue_cpu_enabled()) {
>                 call_single_data_t *csd;
>                 smp_call_func_t func;
>
> @@ -337,7 +348,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
>                 func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
>                         sched_ttwu_pending : csd->func;
>
> -               trace_ipi_send_cpu(cpu, _RET_IP_, func);
> +               trace_csd_queue_cpu(cpu, _RET_IP_, func);
>         }
>
>         /*
> @@ -375,7 +386,7 @@ static int generic_exec_single(int cpu, struct __call_single_data *csd)
>                 csd_lock_record(csd);
>                 csd_unlock(csd);
>                 local_irq_save(flags);
> -               func(info);
> +               csd_do_func(func, info, csd);
>                 csd_lock_record(NULL);
>                 local_irq_restore(flags);
>                 return 0;
> @@ -477,7 +488,7 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline)
>                         }
>
>                         csd_lock_record(csd);
> -                       func(info);
> +                       csd_do_func(func, info, csd);
>                         csd_unlock(csd);
>                         csd_lock_record(NULL);
>                 } else {
> @@ -508,7 +519,7 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline)
>
>                                 csd_lock_record(csd);
>                                 csd_unlock(csd);
> -                               func(info);
> +                               csd_do_func(func, info, csd);
>                                 csd_lock_record(NULL);
>                         } else if (type == CSD_TYPE_IRQ_WORK) {
>                                 irq_work_single(csd);
> @@ -522,8 +533,10 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline)
>         /*
>          * Third; only CSD_TYPE_TTWU is left, issue those.
>          */
> -       if (entry)
> -               sched_ttwu_pending(entry);
> +       if (entry) {
> +               csd = llist_entry(entry, typeof(*csd), node.llist);
> +               csd_do_func(sched_ttwu_pending, entry, csd);
> +       }
>  }
>
>
> @@ -728,7 +741,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
>         int cpu, last_cpu, this_cpu = smp_processor_id();
>         struct call_function_data *cfd;
>         bool wait = scf_flags & SCF_WAIT;
> -       int nr_cpus = 0, nr_queued = 0;
> +       int nr_cpus = 0;
>         bool run_remote = false;
>         bool run_local = false;
>
> @@ -786,21 +799,15 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
>                         csd->node.src = smp_processor_id();
>                         csd->node.dst = cpu;
>  #endif
> +                       trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
> +
>                         if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
>                                 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
>                                 nr_cpus++;
>                                 last_cpu = cpu;
>                         }
> -                       nr_queued++;
>                 }
>
> -               /*
> -                * Trace each smp_function_call_*() as an IPI, actual IPIs
> -                * will be traced with func==generic_smp_call_function_single_ipi().
> -                */
> -               if (nr_queued)
> -                       trace_ipi_send_cpumask(cfd->cpumask, _RET_IP_, func);
> -
>                 /*
>                  * Choose the most efficient way to send an IPI. Note that the
>                  * number of CPUs might be zero due to concurrent changes to the
> @@ -816,7 +823,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
>                 unsigned long flags;
>
>                 local_irq_save(flags);
> -               func(info);
> +               csd_do_func(func, info, csd);
>                 local_irq_restore(flags);
>         }
>
> --
> 2.40.1
>

Argh, I accidentally sent an unfinished patch, sorry about that.
Please disconsider above v2, and review the v3 instead:
https://lore.kernel.org/lkml/20230510230128.150384-1-leobras@redhat.com/

Leo
diff mbox series

Patch

diff --git a/include/trace/events/smp.h b/include/trace/events/smp.h
new file mode 100644
index 000000000000..5fd75399d3f1
--- /dev/null
+++ b/include/trace/events/smp.h
@@ -0,0 +1,72 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM smp
+
+#if !defined(_TRACE_SMP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SMP_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(csd_queue_cpu,
+
+	TP_PROTO(const unsigned int cpu,
+		 unsigned long callsite,
+		 smp_call_func_t func,
+		 call_single_data_t *csd),
+
+	TP_ARGS(cpu, callsite, func, csd),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+		__field(void *, callsite)
+		__field(void *, func)
+		__field(void *, csd)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->callsite = (void *)callsite;
+		__entry->func = func;
+		__entry->csd  = csd;
+	),
+
+	TP_printk("cpu=%u callsite=%pS func=%pS csd=%p",
+		  __entry->cpu, __entry->callsite, __entry->func, __entry->csd)
+);
+
+/*
+ * Tracepoints for a function which is called as an effect of smp_call_function.*
+ */
+DECLARE_EVENT_CLASS(csd_function,
+
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+
+	TP_ARGS(func, csd),
+
+	TP_STRUCT__entry(
+		__field(void *,	func)
+		__field(void *,	csd)
+	),
+
+	TP_fast_assign(
+		__entry->func	= func;
+		__entry->csd	= csd;
+	),
+
+	TP_printk("function %ps, csd = %p", __entry->func, __entry->csd)
+);
+
+DEFINE_EVENT(csd_function, csd_function_entry,
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+	TP_ARGS(func, csd),
+);
+
+DEFINE_EVENT(csd_function, csd_function_exit,
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+	TP_ARGS(func, csd),
+);
+
+#endif /* _TRACE_SMP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/smp.c b/kernel/smp.c
index ab3e5dad6cfe..a34aa2b92050 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -27,6 +27,9 @@ 
 #include <linux/jump_label.h>
 
 #include <trace/events/ipi.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/smp.h>
+#undef CREATE_TRACE_POINTS
 
 #include "smpboot.h"
 #include "sched/smp.h"
@@ -121,6 +124,14 @@  send_call_function_ipi_mask(struct cpumask *mask)
 	arch_send_call_function_ipi_mask(mask);
 }
 
+static __always_inline void
+csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
+{
+	trace_csd_function_entry(func, csd);
+	func(info);
+	trace_csd_function_exit(func, csd);
+}
+
 #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
 
 static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled);
@@ -329,7 +340,7 @@  void __smp_call_single_queue(int cpu, struct llist_node *node)
 	 * even if we haven't sent the smp_call IPI yet (e.g. the stopper
 	 * executes migration_cpu_stop() on the remote CPU).
 	 */
-	if (trace_ipi_send_cpu_enabled()) {
+	if (trace_csd_queue_cpu_enabled()) {
 		call_single_data_t *csd;
 		smp_call_func_t func;
 
@@ -337,7 +348,7 @@  void __smp_call_single_queue(int cpu, struct llist_node *node)
 		func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
 			sched_ttwu_pending : csd->func;
 
-		trace_ipi_send_cpu(cpu, _RET_IP_, func);
+		trace_csd_queue_cpu(cpu, _RET_IP_, func);
 	}
 
 	/*
@@ -375,7 +386,7 @@  static int generic_exec_single(int cpu, struct __call_single_data *csd)
 		csd_lock_record(csd);
 		csd_unlock(csd);
 		local_irq_save(flags);
-		func(info);
+		csd_do_func(func, info, csd);
 		csd_lock_record(NULL);
 		local_irq_restore(flags);
 		return 0;
@@ -477,7 +488,7 @@  static void __flush_smp_call_function_queue(bool warn_cpu_offline)
 			}
 
 			csd_lock_record(csd);
-			func(info);
+			csd_do_func(func, info, csd);
 			csd_unlock(csd);
 			csd_lock_record(NULL);
 		} else {
@@ -508,7 +519,7 @@  static void __flush_smp_call_function_queue(bool warn_cpu_offline)
 
 				csd_lock_record(csd);
 				csd_unlock(csd);
-				func(info);
+				csd_do_func(func, info, csd);
 				csd_lock_record(NULL);
 			} else if (type == CSD_TYPE_IRQ_WORK) {
 				irq_work_single(csd);
@@ -522,8 +533,10 @@  static void __flush_smp_call_function_queue(bool warn_cpu_offline)
 	/*
 	 * Third; only CSD_TYPE_TTWU is left, issue those.
 	 */
-	if (entry)
-		sched_ttwu_pending(entry);
+	if (entry) {
+		csd = llist_entry(entry, typeof(*csd), node.llist);
+		csd_do_func(sched_ttwu_pending, entry, csd);
+	}
 }
 
 
@@ -728,7 +741,7 @@  static void smp_call_function_many_cond(const struct cpumask *mask,
 	int cpu, last_cpu, this_cpu = smp_processor_id();
 	struct call_function_data *cfd;
 	bool wait = scf_flags & SCF_WAIT;
-	int nr_cpus = 0, nr_queued = 0;
+	int nr_cpus = 0;
 	bool run_remote = false;
 	bool run_local = false;
 
@@ -786,21 +799,15 @@  static void smp_call_function_many_cond(const struct cpumask *mask,
 			csd->node.src = smp_processor_id();
 			csd->node.dst = cpu;
 #endif
+			trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
+
 			if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
 				__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
 				nr_cpus++;
 				last_cpu = cpu;
 			}
-			nr_queued++;
 		}
 
-		/*
-		 * Trace each smp_function_call_*() as an IPI, actual IPIs
-		 * will be traced with func==generic_smp_call_function_single_ipi().
-		 */
-		if (nr_queued)
-			trace_ipi_send_cpumask(cfd->cpumask, _RET_IP_, func);
-
 		/*
 		 * Choose the most efficient way to send an IPI. Note that the
 		 * number of CPUs might be zero due to concurrent changes to the
@@ -816,7 +823,7 @@  static void smp_call_function_many_cond(const struct cpumask *mask,
 		unsigned long flags;
 
 		local_irq_save(flags);
-		func(info);
+		csd_do_func(func, info, csd);
 		local_irq_restore(flags);
 	}