diff mbox series

[V6,3/6] IB/MAD: Add agent trace points

Message ID 20190317195950.2991-4-ira.weiny@intel.com (mailing list archive)
State Superseded
Headers show
Series Add MAD stack trace points | expand

Commit Message

Ira Weiny March 17, 2019, 7:59 p.m. UTC
From: Ira Weiny <ira.weiny@intel.com>

Trace agent details when agents are [un]registered.  In addition,
report agent details on send/recv.

CC: Hal Rosenstock <hal@dev.mellanox.co.il>
CC: Alexei Starovoitov <ast@kernel.org>
CC: Leon Romanovsky <leon@kernel.org>
CC: Jason Gunthorpe <jgg@ziepe.ca>
CC: "Ruhl, Michael J" <michael.j.ruhl@intel.com>
CC: Steven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: Ira Weiny <ira.weiny@intel.com>

---
Changes from V4
	Cleanup checkpatch

Changes from V3
	Change dev_name to index

Changes from V2
	Change dev_name to string
	Reorder fields for more efficient ring buffer utilization

 drivers/infiniband/core/mad.c |  4 +++
 include/trace/events/ib_mad.h | 46 +++++++++++++++++++++++++++++++++++
 2 files changed, 50 insertions(+)

Comments

Michael J. Ruhl March 18, 2019, 12:46 p.m. UTC | #1
>-----Original Message-----
>From: Weiny, Ira
>Sent: Sunday, March 17, 2019 4:00 PM
>To: Jason Gunthorpe <jgg@ziepe.ca>; Steven Rostedt
><rostedt@goodmis.org>
>Cc: Ingo Molnar <mingo@redhat.com>; linux-rdma@vger.kernel.org; Weiny,
>Ira <ira.weiny@intel.com>; Hal Rosenstock <hal@dev.mellanox.co.il>; Alexei
>Starovoitov <ast@kernel.org>; Leon Romanovsky <leon@kernel.org>; Ruhl,
>Michael J <michael.j.ruhl@intel.com>
>Subject: [PATCH V6 3/6] IB/MAD: Add agent trace points
>
>From: Ira Weiny <ira.weiny@intel.com>
>
>Trace agent details when agents are [un]registered.  In addition,
>report agent details on send/recv.
>
>CC: Hal Rosenstock <hal@dev.mellanox.co.il>
>CC: Alexei Starovoitov <ast@kernel.org>
>CC: Leon Romanovsky <leon@kernel.org>
>CC: Jason Gunthorpe <jgg@ziepe.ca>
>CC: "Ruhl, Michael J" <michael.j.ruhl@intel.com>
>CC: Steven Rostedt (VMware) <rostedt@goodmis.org>
>Signed-off-by: Ira Weiny <ira.weiny@intel.com>
>
>---
>Changes from V4
>	Cleanup checkpatch
>
>Changes from V3
>	Change dev_name to index
>
>Changes from V2
>	Change dev_name to string
>	Reorder fields for more efficient ring buffer utilization
>
> drivers/infiniband/core/mad.c |  4 +++
> include/trace/events/ib_mad.h | 46
>+++++++++++++++++++++++++++++++++++
> 2 files changed, 50 insertions(+)
>
>diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
>index f91f7ce4c007..e310c90a28b2 100644
>--- a/drivers/infiniband/core/mad.c
>+++ b/drivers/infiniband/core/mad.c
>@@ -484,6 +484,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct
>ib_device *device,
> 	}
> 	spin_unlock_irq(&port_priv->reg_lock);
>
>+	trace_ib_mad_create_agent(mad_agent_priv);
> 	return &mad_agent_priv->agent;
> error6:
> 	spin_unlock_irq(&port_priv->reg_lock);
>@@ -641,6 +642,7 @@ static void unregister_mad_agent(struct
>ib_mad_agent_private *mad_agent_priv)
> 	struct ib_mad_port_private *port_priv;
>
> 	/* Note that we could still be handling received MADs */
>+	trace_ib_mad_unregister_agent(mad_agent_priv);
>
> 	/*
> 	 * Canceling all sends results in dropping received response
>@@ -2375,6 +2377,7 @@ static void ib_mad_recv_done(struct ib_cq *cq,
>struct ib_wc *wc)
>
> 	mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr
>*)recv->mad);
> 	if (mad_agent) {
>+		trace_ib_mad_recv_done_agent(mad_agent);
> 		ib_mad_complete_recv(mad_agent, &recv-
>>header.recv_wc);
> 		/*
> 		 * recv is freed up in error cases in ib_mad_complete_recv
>@@ -2539,6 +2542,7 @@ static void ib_mad_send_done(struct ib_cq *cq,
>struct ib_wc *wc)
> 	send_queue = mad_list->mad_queue;
> 	qp_info = send_queue->qp_info;
>
>+	trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv);
> 	trace_ib_mad_send_done_handler(mad_send_wr, wc);
>
> retry:
>diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h
>index 245a916bfd5a..6f504c2af935 100644
>--- a/include/trace/events/ib_mad.h
>+++ b/include/trace/events/ib_mad.h
>@@ -248,6 +248,52 @@ TRACE_EVENT(ib_mad_recv_done_handler,
> 	)
> );
>
>+DECLARE_EVENT_CLASS(ib_mad_agent_template,
>+	TP_PROTO(struct ib_mad_agent_private *agent),
>+	TP_ARGS(agent),
>+
>+	TP_STRUCT__entry(
>+		__field(u32,            dev_index)
>+		__field(u32,            hi_tid)
>+		__field(u8,             port_num)
>+		__field(u8,             mgmt_class)
>+		__field(u8,             mgmt_class_version)

Seems to be an awful lot of extra spaces between the type and the variable...

Is my email client adding them, or are they tabs?

Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>

>+	),
>+
>+	TP_fast_assign(
>+		__entry->dev_index = agent->agent.device->index;
>+		__entry->port_num = agent->agent.port_num;
>+		__entry->hi_tid = agent->agent.hi_tid;
>+
>+		if (agent->reg_req) {
>+			__entry->mgmt_class = agent->reg_req-
>>mgmt_class;
>+			__entry->mgmt_class_version =
>+				agent->reg_req->mgmt_class_version;
>+		} else {
>+			__entry->mgmt_class = 0;
>+			__entry->mgmt_class_version = 0;
>+		}
>+	),
>+
>+	TP_printk("%d:%d mad agent : hi_tid 0x%08x class 0x%02x class_ver
>0x%02x",
>+		__entry->dev_index, __entry->port_num,
>+		__entry->hi_tid, __entry->mgmt_class,
>+		__entry->mgmt_class_version
>+	)
>+);
>+DEFINE_EVENT(ib_mad_agent_template, ib_mad_recv_done_agent,
>+	TP_PROTO(struct ib_mad_agent_private *agent),
>+	TP_ARGS(agent));
>+DEFINE_EVENT(ib_mad_agent_template, ib_mad_send_done_agent,
>+	TP_PROTO(struct ib_mad_agent_private *agent),
>+	TP_ARGS(agent));
>+DEFINE_EVENT(ib_mad_agent_template, ib_mad_create_agent,
>+	TP_PROTO(struct ib_mad_agent_private *agent),
>+	TP_ARGS(agent));
>+DEFINE_EVENT(ib_mad_agent_template, ib_mad_unregister_agent,
>+	TP_PROTO(struct ib_mad_agent_private *agent),
>+	TP_ARGS(agent));
>+
>
> #endif /* _TRACE_IB_MAD_H */
>
>--
>2.20.1
Steven Rostedt March 18, 2019, 1:37 p.m. UTC | #2
On Sun, 17 Mar 2019 12:59:47 -0700
ira.weiny@intel.com wrote:

> From: Ira Weiny <ira.weiny@intel.com>
> 
> Trace agent details when agents are [un]registered.  In addition,
> report agent details on send/recv.
> 
> CC: Hal Rosenstock <hal@dev.mellanox.co.il>
> CC: Alexei Starovoitov <ast@kernel.org>
> CC: Leon Romanovsky <leon@kernel.org>
> CC: Jason Gunthorpe <jgg@ziepe.ca>
> CC: "Ruhl, Michael J" <michael.j.ruhl@intel.com>
> CC: Steven Rostedt (VMware) <rostedt@goodmis.org>
> Signed-off-by: Ira Weiny <ira.weiny@intel.com>

For the tracing aspect.

Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>

-- Steve
diff mbox series

Patch

diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index f91f7ce4c007..e310c90a28b2 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -484,6 +484,7 @@  struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
 	}
 	spin_unlock_irq(&port_priv->reg_lock);
 
+	trace_ib_mad_create_agent(mad_agent_priv);
 	return &mad_agent_priv->agent;
 error6:
 	spin_unlock_irq(&port_priv->reg_lock);
@@ -641,6 +642,7 @@  static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
 	struct ib_mad_port_private *port_priv;
 
 	/* Note that we could still be handling received MADs */
+	trace_ib_mad_unregister_agent(mad_agent_priv);
 
 	/*
 	 * Canceling all sends results in dropping received response
@@ -2375,6 +2377,7 @@  static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 
 	mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
 	if (mad_agent) {
+		trace_ib_mad_recv_done_agent(mad_agent);
 		ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
 		/*
 		 * recv is freed up in error cases in ib_mad_complete_recv
@@ -2539,6 +2542,7 @@  static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
 	send_queue = mad_list->mad_queue;
 	qp_info = send_queue->qp_info;
 
+	trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv);
 	trace_ib_mad_send_done_handler(mad_send_wr, wc);
 
 retry:
diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h
index 245a916bfd5a..6f504c2af935 100644
--- a/include/trace/events/ib_mad.h
+++ b/include/trace/events/ib_mad.h
@@ -248,6 +248,52 @@  TRACE_EVENT(ib_mad_recv_done_handler,
 	)
 );
 
+DECLARE_EVENT_CLASS(ib_mad_agent_template,
+	TP_PROTO(struct ib_mad_agent_private *agent),
+	TP_ARGS(agent),
+
+	TP_STRUCT__entry(
+		__field(u32,            dev_index)
+		__field(u32,            hi_tid)
+		__field(u8,             port_num)
+		__field(u8,             mgmt_class)
+		__field(u8,             mgmt_class_version)
+	),
+
+	TP_fast_assign(
+		__entry->dev_index = agent->agent.device->index;
+		__entry->port_num = agent->agent.port_num;
+		__entry->hi_tid = agent->agent.hi_tid;
+
+		if (agent->reg_req) {
+			__entry->mgmt_class = agent->reg_req->mgmt_class;
+			__entry->mgmt_class_version =
+				agent->reg_req->mgmt_class_version;
+		} else {
+			__entry->mgmt_class = 0;
+			__entry->mgmt_class_version = 0;
+		}
+	),
+
+	TP_printk("%d:%d mad agent : hi_tid 0x%08x class 0x%02x class_ver 0x%02x",
+		__entry->dev_index, __entry->port_num,
+		__entry->hi_tid, __entry->mgmt_class,
+		__entry->mgmt_class_version
+	)
+);
+DEFINE_EVENT(ib_mad_agent_template, ib_mad_recv_done_agent,
+	TP_PROTO(struct ib_mad_agent_private *agent),
+	TP_ARGS(agent));
+DEFINE_EVENT(ib_mad_agent_template, ib_mad_send_done_agent,
+	TP_PROTO(struct ib_mad_agent_private *agent),
+	TP_ARGS(agent));
+DEFINE_EVENT(ib_mad_agent_template, ib_mad_create_agent,
+	TP_PROTO(struct ib_mad_agent_private *agent),
+	TP_ARGS(agent));
+DEFINE_EVENT(ib_mad_agent_template, ib_mad_unregister_agent,
+	TP_PROTO(struct ib_mad_agent_private *agent),
+	TP_ARGS(agent));
+
 
 #endif /* _TRACE_IB_MAD_H */