diff mbox series

[V6,5/6] IB/MAD: Add SMP details to MAD tracing

Message ID 20190317195950.2991-6-ira.weiny@intel.com (mailing list archive)
State Superseded
Headers show
Series Add MAD stack trace points | expand

Commit Message

Ira Weiny March 17, 2019, 7:59 p.m. UTC
From: Ira Weiny <ira.weiny@intel.com>

CC: Hal Rosenstock <hal@dev.mellanox.co.il>
CC: Alexei Starovoitov <ast@kernel.org>
CC: Leon Romanovsky <leon@kernel.org>
CC: Jason Gunthorpe <jgg@ziepe.ca>
CC: "Ruhl, Michael J" <michael.j.ruhl@intel.com>
CC: Steven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: Ira Weiny <ira.weiny@intel.com>

---
Changes from V2
	change field order for better ring buffer utilization

 drivers/infiniband/core/mad.c |  8 ++++
 include/trace/events/ib_mad.h | 90 +++++++++++++++++++++++++++++++++++
 2 files changed, 98 insertions(+)

Comments

Michael J. Ruhl March 18, 2019, 12:57 p.m. UTC | #1
>-----Original Message-----
>From: Weiny, Ira
>Sent: Sunday, March 17, 2019 4:00 PM
>To: Jason Gunthorpe <jgg@ziepe.ca>; Steven Rostedt
><rostedt@goodmis.org>
>Cc: Ingo Molnar <mingo@redhat.com>; linux-rdma@vger.kernel.org; Weiny,
>Ira <ira.weiny@intel.com>; Hal Rosenstock <hal@dev.mellanox.co.il>; Alexei
>Starovoitov <ast@kernel.org>; Leon Romanovsky <leon@kernel.org>; Ruhl,
>Michael J <michael.j.ruhl@intel.com>
>Subject: [PATCH V6 5/6] IB/MAD: Add SMP details to MAD tracing
>
>From: Ira Weiny <ira.weiny@intel.com>
>
>CC: Hal Rosenstock <hal@dev.mellanox.co.il>
>CC: Alexei Starovoitov <ast@kernel.org>
>CC: Leon Romanovsky <leon@kernel.org>
>CC: Jason Gunthorpe <jgg@ziepe.ca>
>CC: "Ruhl, Michael J" <michael.j.ruhl@intel.com>
>CC: Steven Rostedt (VMware) <rostedt@goodmis.org>
>Signed-off-by: Ira Weiny <ira.weiny@intel.com>
>
>---
>Changes from V2
>	change field order for better ring buffer utilization
>
> drivers/infiniband/core/mad.c |  8 ++++
> include/trace/events/ib_mad.h | 90
>+++++++++++++++++++++++++++++++++++
> 2 files changed, 98 insertions(+)
>
>diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
>index e310c90a28b2..0060e066e752 100644
>--- a/drivers/infiniband/core/mad.c
>+++ b/drivers/infiniband/core/mad.c
>@@ -862,6 +862,8 @@ static int handle_outgoing_dr_smp(struct
>ib_mad_agent_private *mad_agent_priv,
> 	if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
> 		u32 opa_drslid;
>
>+		trace_ib_mad_handle_out_opa_smi(opa_smp);
>+
> 		if ((opa_get_smp_direction(opa_smp)
> 		     ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid)
>==
> 		     OPA_LID_PERMISSIVE &&
>@@ -887,6 +889,8 @@ static int handle_outgoing_dr_smp(struct
>ib_mad_agent_private *mad_agent_priv,
> 		    opa_smi_check_local_returning_smp(opa_smp, device) ==
>IB_SMI_DISCARD)
> 			goto out;
> 	} else {
>+		trace_ib_mad_handle_out_ib_smi(smp);
>+
> 		if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp-
>>dr_slid) ==
> 		     IB_LID_PERMISSIVE &&
> 		     smi_handle_dr_smp_send(smp,
>rdma_cap_ib_switch(device), port_num) ==
>@@ -2119,6 +2123,8 @@ static enum smi_action handle_ib_smi(const struct
>ib_mad_port_private *port_priv
> 	enum smi_forward_action retsmi;
> 	struct ib_smp *smp = (struct ib_smp *)recv->mad;
>
>+	trace_ib_mad_handle_ib_smi(smp);
>+
> 	if (smi_handle_dr_smp_recv(smp,
> 				   rdma_cap_ib_switch(port_priv->device),
> 				   port_num,
>@@ -2204,6 +2210,8 @@ handle_opa_smi(struct ib_mad_port_private
>*port_priv,
> 	enum smi_forward_action retsmi;
> 	struct opa_smp *smp = (struct opa_smp *)recv->mad;
>
>+	trace_ib_mad_handle_opa_smi(smp);
>+
> 	if (opa_smi_handle_dr_smp_recv(smp,
> 				   rdma_cap_ib_switch(port_priv->device),
> 				   port_num,
>diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h
>index 6f504c2af935..59363a083ecb 100644
>--- a/include/trace/events/ib_mad.h
>+++ b/include/trace/events/ib_mad.h
>@@ -295,6 +295,96 @@ DEFINE_EVENT(ib_mad_agent_template,
>ib_mad_unregister_agent,
> 	TP_ARGS(agent));
>
>
>+

Lots of extra lines?

>+DECLARE_EVENT_CLASS(ib_mad_opa_smi_template,
>+	TP_PROTO(struct opa_smp *smp),
>+	TP_ARGS(smp),
>+
>+	TP_STRUCT__entry(
>+		__field(u64,            mkey)
>+		__field(u32,            dr_slid)
>+		__field(u32,            dr_dlid)
>+		__field(u8,             hop_ptr)
>+		__field(u8,             hop_cnt)
>+		__array(u8,             initial_path, OPA_SMP_MAX_PATH_HOPS)
>+		__array(u8,             return_path,

Lots of extra spaces?

>OPA_SMP_MAX_PATH_HOPS)
>+	),
>+
>+	TP_fast_assign(
>+		__entry->hop_ptr = smp->hop_ptr;
>+		__entry->hop_cnt = smp->hop_cnt;
>+		__entry->mkey = smp->mkey;
>+		__entry->dr_slid = smp->route.dr.dr_slid;
>+		__entry->dr_dlid = smp->route.dr.dr_dlid;
>+		memcpy(__entry->initial_path, smp->route.dr.initial_path,
>+			OPA_SMP_MAX_PATH_HOPS);
>+		memcpy(__entry->return_path, smp->route.dr.return_path,
>+			OPA_SMP_MAX_PATH_HOPS);
>+	),
>+
>+	TP_printk("OPA SMP: hop_ptr %d hop_cnt %d " \
>+		  "mkey 0x%016llx dr_slid 0x%08x dr_dlid 0x%08x " \
>+		  "initial_path %*ph return_path %*ph ",
>+		__entry->hop_ptr, __entry->hop_cnt,
>+		be64_to_cpu(__entry->mkey), be32_to_cpu(__entry-
>>dr_slid),
>+		be32_to_cpu(__entry->dr_dlid),
>+		OPA_SMP_MAX_PATH_HOPS, __entry->initial_path,
>+		OPA_SMP_MAX_PATH_HOPS, __entry->return_path
>+	)
>+);
>+
>+DEFINE_EVENT(ib_mad_opa_smi_template, ib_mad_handle_opa_smi,
>+	TP_PROTO(struct opa_smp *smp),
>+	TP_ARGS(smp));
>+DEFINE_EVENT(ib_mad_opa_smi_template, ib_mad_handle_out_opa_smi,
>+	TP_PROTO(struct opa_smp *smp),
>+	TP_ARGS(smp));
>+
>+

Lines again... :)

Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>

>+DECLARE_EVENT_CLASS(ib_mad_opa_ib_template,
>+	TP_PROTO(struct ib_smp *smp),
>+	TP_ARGS(smp),
>+
>+	TP_STRUCT__entry(
>+		__field(u64,            mkey)
>+		__field(u32,            dr_slid)
>+		__field(u32,            dr_dlid)
>+		__field(u8,             hop_ptr)
>+		__field(u8,             hop_cnt)
>+		__array(u8,             initial_path, IB_SMP_MAX_PATH_HOPS)
>+		__array(u8,             return_path, IB_SMP_MAX_PATH_HOPS)
>+	),
>+
>+	TP_fast_assign(
>+		__entry->hop_ptr = smp->hop_ptr;
>+		__entry->hop_cnt = smp->hop_cnt;
>+		__entry->mkey = smp->mkey;
>+		__entry->dr_slid = smp->dr_slid;
>+		__entry->dr_dlid = smp->dr_dlid;
>+		memcpy(__entry->initial_path, smp->initial_path,
>+			IB_SMP_MAX_PATH_HOPS);
>+		memcpy(__entry->return_path, smp->return_path,
>+			IB_SMP_MAX_PATH_HOPS);
>+	),
>+
>+	TP_printk("OPA SMP: hop_ptr %d hop_cnt %d " \
>+		  "mkey 0x%016llx dr_slid 0x%04x dr_dlid 0x%04x " \
>+		  "initial_path %*ph return_path %*ph ",
>+		__entry->hop_ptr, __entry->hop_cnt,
>+		be64_to_cpu(__entry->mkey), be16_to_cpu(__entry-
>>dr_slid),
>+		be16_to_cpu(__entry->dr_dlid),
>+		IB_SMP_MAX_PATH_HOPS, __entry->initial_path,
>+		IB_SMP_MAX_PATH_HOPS, __entry->return_path
>+	)
>+);
>+
>+DEFINE_EVENT(ib_mad_opa_ib_template, ib_mad_handle_ib_smi,
>+	TP_PROTO(struct ib_smp *smp),
>+	TP_ARGS(smp));
>+DEFINE_EVENT(ib_mad_opa_ib_template, ib_mad_handle_out_ib_smi,
>+	TP_PROTO(struct ib_smp *smp),
>+	TP_ARGS(smp));
>+
> #endif /* _TRACE_IB_MAD_H */
>
> #include <trace/define_trace.h>
>--
>2.20.1
Steven Rostedt March 18, 2019, 1:44 p.m. UTC | #2
On Sun, 17 Mar 2019 12:59:49 -0700
ira.weiny@intel.com wrote:

> From: Ira Weiny <ira.weiny@intel.com>
> 
> CC: Hal Rosenstock <hal@dev.mellanox.co.il>
> CC: Alexei Starovoitov <ast@kernel.org>
> CC: Leon Romanovsky <leon@kernel.org>
> CC: Jason Gunthorpe <jgg@ziepe.ca>
> CC: "Ruhl, Michael J" <michael.j.ruhl@intel.com>
> CC: Steven Rostedt (VMware) <rostedt@goodmis.org>
> Signed-off-by: Ira Weiny <ira.weiny@intel.com>
> 
> ---
> Changes from V2
> 	change field order for better ring buffer utilization
> 
>  drivers/infiniband/core/mad.c |  8 ++++
>  include/trace/events/ib_mad.h | 90 +++++++++++++++++++++++++++++++++++
>  2 files changed, 98 insertions(+)

For the tracing aspect.

Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>

-- Steve
diff mbox series

Patch

diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index e310c90a28b2..0060e066e752 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -862,6 +862,8 @@  static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
 	if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
 		u32 opa_drslid;
 
+		trace_ib_mad_handle_out_opa_smi(opa_smp);
+
 		if ((opa_get_smp_direction(opa_smp)
 		     ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
 		     OPA_LID_PERMISSIVE &&
@@ -887,6 +889,8 @@  static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
 		    opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
 			goto out;
 	} else {
+		trace_ib_mad_handle_out_ib_smi(smp);
+
 		if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
 		     IB_LID_PERMISSIVE &&
 		     smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
@@ -2119,6 +2123,8 @@  static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
 	enum smi_forward_action retsmi;
 	struct ib_smp *smp = (struct ib_smp *)recv->mad;
 
+	trace_ib_mad_handle_ib_smi(smp);
+
 	if (smi_handle_dr_smp_recv(smp,
 				   rdma_cap_ib_switch(port_priv->device),
 				   port_num,
@@ -2204,6 +2210,8 @@  handle_opa_smi(struct ib_mad_port_private *port_priv,
 	enum smi_forward_action retsmi;
 	struct opa_smp *smp = (struct opa_smp *)recv->mad;
 
+	trace_ib_mad_handle_opa_smi(smp);
+
 	if (opa_smi_handle_dr_smp_recv(smp,
 				   rdma_cap_ib_switch(port_priv->device),
 				   port_num,
diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h
index 6f504c2af935..59363a083ecb 100644
--- a/include/trace/events/ib_mad.h
+++ b/include/trace/events/ib_mad.h
@@ -295,6 +295,96 @@  DEFINE_EVENT(ib_mad_agent_template, ib_mad_unregister_agent,
 	TP_ARGS(agent));
 
 
+
+DECLARE_EVENT_CLASS(ib_mad_opa_smi_template,
+	TP_PROTO(struct opa_smp *smp),
+	TP_ARGS(smp),
+
+	TP_STRUCT__entry(
+		__field(u64,            mkey)
+		__field(u32,            dr_slid)
+		__field(u32,            dr_dlid)
+		__field(u8,             hop_ptr)
+		__field(u8,             hop_cnt)
+		__array(u8,             initial_path, OPA_SMP_MAX_PATH_HOPS)
+		__array(u8,             return_path, OPA_SMP_MAX_PATH_HOPS)
+	),
+
+	TP_fast_assign(
+		__entry->hop_ptr = smp->hop_ptr;
+		__entry->hop_cnt = smp->hop_cnt;
+		__entry->mkey = smp->mkey;
+		__entry->dr_slid = smp->route.dr.dr_slid;
+		__entry->dr_dlid = smp->route.dr.dr_dlid;
+		memcpy(__entry->initial_path, smp->route.dr.initial_path,
+			OPA_SMP_MAX_PATH_HOPS);
+		memcpy(__entry->return_path, smp->route.dr.return_path,
+			OPA_SMP_MAX_PATH_HOPS);
+	),
+
+	TP_printk("OPA SMP: hop_ptr %d hop_cnt %d " \
+		  "mkey 0x%016llx dr_slid 0x%08x dr_dlid 0x%08x " \
+		  "initial_path %*ph return_path %*ph ",
+		__entry->hop_ptr, __entry->hop_cnt,
+		be64_to_cpu(__entry->mkey), be32_to_cpu(__entry->dr_slid),
+		be32_to_cpu(__entry->dr_dlid),
+		OPA_SMP_MAX_PATH_HOPS, __entry->initial_path,
+		OPA_SMP_MAX_PATH_HOPS, __entry->return_path
+	)
+);
+
+DEFINE_EVENT(ib_mad_opa_smi_template, ib_mad_handle_opa_smi,
+	TP_PROTO(struct opa_smp *smp),
+	TP_ARGS(smp));
+DEFINE_EVENT(ib_mad_opa_smi_template, ib_mad_handle_out_opa_smi,
+	TP_PROTO(struct opa_smp *smp),
+	TP_ARGS(smp));
+
+
+DECLARE_EVENT_CLASS(ib_mad_opa_ib_template,
+	TP_PROTO(struct ib_smp *smp),
+	TP_ARGS(smp),
+
+	TP_STRUCT__entry(
+		__field(u64,            mkey)
+		__field(u32,            dr_slid)
+		__field(u32,            dr_dlid)
+		__field(u8,             hop_ptr)
+		__field(u8,             hop_cnt)
+		__array(u8,             initial_path, IB_SMP_MAX_PATH_HOPS)
+		__array(u8,             return_path, IB_SMP_MAX_PATH_HOPS)
+	),
+
+	TP_fast_assign(
+		__entry->hop_ptr = smp->hop_ptr;
+		__entry->hop_cnt = smp->hop_cnt;
+		__entry->mkey = smp->mkey;
+		__entry->dr_slid = smp->dr_slid;
+		__entry->dr_dlid = smp->dr_dlid;
+		memcpy(__entry->initial_path, smp->initial_path,
+			IB_SMP_MAX_PATH_HOPS);
+		memcpy(__entry->return_path, smp->return_path,
+			IB_SMP_MAX_PATH_HOPS);
+	),
+
+	TP_printk("OPA SMP: hop_ptr %d hop_cnt %d " \
+		  "mkey 0x%016llx dr_slid 0x%04x dr_dlid 0x%04x " \
+		  "initial_path %*ph return_path %*ph ",
+		__entry->hop_ptr, __entry->hop_cnt,
+		be64_to_cpu(__entry->mkey), be16_to_cpu(__entry->dr_slid),
+		be16_to_cpu(__entry->dr_dlid),
+		IB_SMP_MAX_PATH_HOPS, __entry->initial_path,
+		IB_SMP_MAX_PATH_HOPS, __entry->return_path
+	)
+);
+
+DEFINE_EVENT(ib_mad_opa_ib_template, ib_mad_handle_ib_smi,
+	TP_PROTO(struct ib_smp *smp),
+	TP_ARGS(smp));
+DEFINE_EVENT(ib_mad_opa_ib_template, ib_mad_handle_out_ib_smi,
+	TP_PROTO(struct ib_smp *smp),
+	TP_ARGS(smp));
+
 #endif /* _TRACE_IB_MAD_H */
 
 #include <trace/define_trace.h>