diff mbox

[v1,rdma-next,4/4] iw_cxgb4: dump detailed provider-specific QP information

Message ID 7ad718def54cdee1dfd095143fb13dd128eb69c9.1521827521.git.swise@opengridcomputing.com (mailing list archive)
State Superseded
Headers show

Commit Message

Steve Wise March 23, 2018, 4:49 p.m. UTC
Provide a cxgb4-specific function to fill in qp state details.
This allows dumping important c4iw_qp state useful for debugging.

Included in the dump are the t4_sq, t4_rq structs, plus a dump
of the t4_swsqe and t4swrqe descriptors for the first and last
pending entries.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
---
 drivers/infiniband/hw/cxgb4/Makefile   |   3 +-
 drivers/infiniband/hw/cxgb4/iw_cxgb4.h |   6 +
 drivers/infiniband/hw/cxgb4/provider.c |   9 +
 drivers/infiniband/hw/cxgb4/restrack.c | 363 +++++++++++++++++++++++++++++++++
 4 files changed, 380 insertions(+), 1 deletion(-)
 create mode 100644 drivers/infiniband/hw/cxgb4/restrack.c

Comments

Leon Romanovsky March 24, 2018, 9:55 a.m. UTC | #1
On Fri, Mar 23, 2018 at 09:49:51AM -0700, Steve Wise wrote:
> Provide a cxgb4-specific function to fill in qp state details.
> This allows dumping important c4iw_qp state useful for debugging.
>
> Included in the dump are the t4_sq, t4_rq structs, plus a dump
> of the t4_swsqe and t4swrqe descriptors for the first and last
> pending entries.
>
> Signed-off-by: Steve Wise <swise@opengridcomputing.com>
> ---
>  drivers/infiniband/hw/cxgb4/Makefile   |   3 +-
>  drivers/infiniband/hw/cxgb4/iw_cxgb4.h |   6 +
>  drivers/infiniband/hw/cxgb4/provider.c |   9 +
>  drivers/infiniband/hw/cxgb4/restrack.c | 363 +++++++++++++++++++++++++++++++++
>  4 files changed, 380 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/infiniband/hw/cxgb4/restrack.c
>
> diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
> index fa40b68..9edd920 100644
> --- a/drivers/infiniband/hw/cxgb4/Makefile
> +++ b/drivers/infiniband/hw/cxgb4/Makefile
> @@ -3,4 +3,5 @@ ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
>
>  obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
>
> -iw_cxgb4-y :=  device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
> +iw_cxgb4-y :=  device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o \
> +	       restrack.o
> diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
> index cc92900..bbdc8dd 100644
> --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
> +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
> @@ -55,6 +55,7 @@
>  #include <rdma/iw_cm.h>
>  #include <rdma/rdma_netlink.h>
>  #include <rdma/iw_portmap.h>
> +#include <rdma/restrack.h>
>
>  #include "cxgb4.h"
>  #include "cxgb4_uld.h"
> @@ -1078,4 +1079,9 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
>  void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
>  struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
>
> +typedef int c4iw_restrack_func(struct sk_buff *msg,
> +			       struct netlink_callback *cb,
> +			       struct rdma_restrack_entry *res);
> +extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX];
> +
>  #endif
> diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
> index 0b9cc73..1dda32b 100644
> --- a/drivers/infiniband/hw/cxgb4/provider.c
> +++ b/drivers/infiniband/hw/cxgb4/provider.c
> @@ -551,6 +551,14 @@ static struct net_device *get_netdev(struct ib_device *dev, u8 port)
>  	return ndev;
>  }
>
> +static int fill_res_entry(struct sk_buff *msg, struct netlink_callback *cb,
> +			  struct rdma_restrack_entry *res)
> +{
> +	return (res->type < ARRAY_SIZE(c4iw_restrack_funcs) &&
> +		c4iw_restrack_funcs[res->type]) ?
> +		c4iw_restrack_funcs[res->type](msg, cb, res) : 0;
> +}
> +
>  void c4iw_register_device(struct work_struct *work)
>  {
>  	int ret;
> @@ -645,6 +653,7 @@ void c4iw_register_device(struct work_struct *work)
>  	dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
>  	dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
>  	dev->ibdev.iwcm->get_qp = c4iw_get_qp;
> +	dev->ibdev.res.fill_res_entry = fill_res_entry;
>  	memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
>  	       sizeof(dev->ibdev.iwcm->ifname));
>
> diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
> new file mode 100644
> index 0000000..a67f509
> --- /dev/null
> +++ b/drivers/infiniband/hw/cxgb4/restrack.c
> @@ -0,0 +1,363 @@
> +/*
> + * Copyright (c) 2018 Chelsio, Inc. All rights reserved.
> + *
> + * This software is available to you under a choice of one of two
> + * licenses.  You may choose to be licensed under the terms of the GNU
> + * General Public License (GPL) Version 2, available from the file
> + * COPYING in the main directory of this source tree, or the
> + * OpenIB.org BSD license below:
> + *
> + *     Redistribution and use in source and binary forms, with or
> + *     without modification, are permitted provided that the following
> + *     conditions are met:
> + *
> + *      - Redistributions of source code must retain the above
> + *        copyright notice, this list of conditions and the following
> + *        disclaimer.
> + *
> + *      - Redistributions in binary form must reproduce the above
> + *        copyright notice, this list of conditions and the following
> + *        disclaimer in the documentation and/or other materials
> + *        provided with the distribution.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
> + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
> + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
> + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
> + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
> + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
> + * SOFTWARE.
> + */
> +
> +#include "iw_cxgb4.h"
> +
> +static int fill_sq(struct sk_buff *msg, struct t4_wq *wq)
> +{
> +	struct nlattr *entry_attr;
> +
> +	/* WQ+SQ */
> +	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_PROVIDER_ENTRY);
> +	if (!entry_attr)
> +		goto err;
> +
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "sqid"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.qid))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "flushed"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->flushed))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "memsize"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.memsize))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "cidx"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.cidx))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "pidx"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.pidx))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "wq_pidx"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.wq_pidx))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> +			   "flush_cidx"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32,
> +			wq->sq.flush_cidx))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "in_use"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.in_use))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "size"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.size))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "flags"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_X32, wq->sq.flags))
> +		goto err_cancel_entry;
> +
> +	nla_nest_end(msg, entry_attr);
> +	return 0;
> +
> +err_cancel_entry:
> +	nla_nest_cancel(msg, entry_attr);
> +err:
> +	return -EMSGSIZE;
> +}
> +
> +static int fill_rq(struct sk_buff *msg, struct t4_wq *wq)
> +{
> +	struct nlattr *entry_attr;
> +
> +	/* RQ */
> +	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_PROVIDER_ENTRY);
> +	if (!entry_attr)
> +		goto err;
> +
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "rqid"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.qid))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "memsize"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.memsize))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "cidx"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.cidx))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "pidx"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.pidx))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "wq_pidx"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.wq_pidx))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "msn"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.msn))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> +			   "rqt_hwaddr"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_X32,
> +			wq->rq.rqt_hwaddr))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "rqt_size"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.rqt_size))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "in_use"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.in_use))
> +		goto err_cancel_entry;
> +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "size"))
> +		goto err_cancel_entry;
> +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.size))
> +		goto err_cancel_entry;
> +
> +	nla_nest_end(msg, entry_attr);
> +	return 0;
> +
> +err_cancel_entry:
> +	nla_nest_cancel(msg, entry_attr);
> +err:
> +	return -EMSGSIZE;
> +}

The above is perfect example of my claim that most of the date will be
simple u32 which is default in netlink. In such case, you won't need to
provide type identification at all.

Thanks
Steve Wise March 24, 2018, 7:47 p.m. UTC | #2
> -----Original Message-----
> From: Leon Romanovsky <leon@kernel.org>
> Sent: Saturday, March 24, 2018 4:56 AM
> To: Steve Wise <swise@opengridcomputing.com>
> Cc: jgg@mellanox.com; dledford@redhat.com; linux-rdma@vger.kernel.org
> Subject: Re: [PATCH v1 rdma-next 4/4] iw_cxgb4: dump detailed provider-
> specific QP information
> 
> On Fri, Mar 23, 2018 at 09:49:51AM -0700, Steve Wise wrote:
> > Provide a cxgb4-specific function to fill in qp state details.
> > This allows dumping important c4iw_qp state useful for debugging.
> >
> > Included in the dump are the t4_sq, t4_rq structs, plus a dump
> > of the t4_swsqe and t4swrqe descriptors for the first and last
> > pending entries.
> >
> > Signed-off-by: Steve Wise <swise@opengridcomputing.com>
> > ---
> >  drivers/infiniband/hw/cxgb4/Makefile   |   3 +-
> >  drivers/infiniband/hw/cxgb4/iw_cxgb4.h |   6 +
> >  drivers/infiniband/hw/cxgb4/provider.c |   9 +
> >  drivers/infiniband/hw/cxgb4/restrack.c | 363
> +++++++++++++++++++++++++++++++++
> >  4 files changed, 380 insertions(+), 1 deletion(-)
> >  create mode 100644 drivers/infiniband/hw/cxgb4/restrack.c
> >
> > diff --git a/drivers/infiniband/hw/cxgb4/Makefile
> b/drivers/infiniband/hw/cxgb4/Makefile
> > index fa40b68..9edd920 100644
> > --- a/drivers/infiniband/hw/cxgb4/Makefile
> > +++ b/drivers/infiniband/hw/cxgb4/Makefile
> > @@ -3,4 +3,5 @@ ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
> >
> >  obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
> >
> > -iw_cxgb4-y :=  device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
> id_table.o
> > +iw_cxgb4-y :=  device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o
> id_table.o \
> > +	       restrack.o
> > diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
> b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
> > index cc92900..bbdc8dd 100644
> > --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
> > +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
> > @@ -55,6 +55,7 @@
> >  #include <rdma/iw_cm.h>
> >  #include <rdma/rdma_netlink.h>
> >  #include <rdma/iw_portmap.h>
> > +#include <rdma/restrack.h>
> >
> >  #include "cxgb4.h"
> >  #include "cxgb4_uld.h"
> > @@ -1078,4 +1079,9 @@ void __iomem *c4iw_bar2_addrs(struct
> c4iw_rdev *rdev, unsigned int qid,
> >  void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
> >  struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
> >
> > +typedef int c4iw_restrack_func(struct sk_buff *msg,
> > +			       struct netlink_callback *cb,
> > +			       struct rdma_restrack_entry *res);
> > +extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX];
> > +
> >  #endif
> > diff --git a/drivers/infiniband/hw/cxgb4/provider.c
> b/drivers/infiniband/hw/cxgb4/provider.c
> > index 0b9cc73..1dda32b 100644
> > --- a/drivers/infiniband/hw/cxgb4/provider.c
> > +++ b/drivers/infiniband/hw/cxgb4/provider.c
> > @@ -551,6 +551,14 @@ static struct net_device *get_netdev(struct
> ib_device *dev, u8 port)
> >  	return ndev;
> >  }
> >
> > +static int fill_res_entry(struct sk_buff *msg, struct netlink_callback
*cb,
> > +			  struct rdma_restrack_entry *res)
> > +{
> > +	return (res->type < ARRAY_SIZE(c4iw_restrack_funcs) &&
> > +		c4iw_restrack_funcs[res->type]) ?
> > +		c4iw_restrack_funcs[res->type](msg, cb, res) : 0;
> > +}
> > +
> >  void c4iw_register_device(struct work_struct *work)
> >  {
> >  	int ret;
> > @@ -645,6 +653,7 @@ void c4iw_register_device(struct work_struct
> *work)
> >  	dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
> >  	dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
> >  	dev->ibdev.iwcm->get_qp = c4iw_get_qp;
> > +	dev->ibdev.res.fill_res_entry = fill_res_entry;
> >  	memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
> >  	       sizeof(dev->ibdev.iwcm->ifname));
> >
> > diff --git a/drivers/infiniband/hw/cxgb4/restrack.c
> b/drivers/infiniband/hw/cxgb4/restrack.c
> > new file mode 100644
> > index 0000000..a67f509
> > --- /dev/null
> > +++ b/drivers/infiniband/hw/cxgb4/restrack.c
> > @@ -0,0 +1,363 @@
> > +/*
> > + * Copyright (c) 2018 Chelsio, Inc. All rights reserved.
> > + *
> > + * This software is available to you under a choice of one of two
> > + * licenses.  You may choose to be licensed under the terms of the GNU
> > + * General Public License (GPL) Version 2, available from the file
> > + * COPYING in the main directory of this source tree, or the
> > + * OpenIB.org BSD license below:
> > + *
> > + *     Redistribution and use in source and binary forms, with or
> > + *     without modification, are permitted provided that the following
> > + *     conditions are met:
> > + *
> > + *      - Redistributions of source code must retain the above
> > + *        copyright notice, this list of conditions and the following
> > + *        disclaimer.
> > + *
> > + *      - Redistributions in binary form must reproduce the above
> > + *        copyright notice, this list of conditions and the following
> > + *        disclaimer in the documentation and/or other materials
> > + *        provided with the distribution.
> > + *
> > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
> KIND,
> > + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
> WARRANTIES OF
> > + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
> > + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
> COPYRIGHT HOLDERS
> > + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
> IN AN
> > + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
> OR IN
> > + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> IN THE
> > + * SOFTWARE.
> > + */
> > +
> > +#include "iw_cxgb4.h"
> > +
> > +static int fill_sq(struct sk_buff *msg, struct t4_wq *wq)
> > +{
> > +	struct nlattr *entry_attr;
> > +
> > +	/* WQ+SQ */
> > +	entry_attr = nla_nest_start(msg,
> RDMA_NLDEV_ATTR_PROVIDER_ENTRY);
> > +	if (!entry_attr)
> > +		goto err;
> > +
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "sqid"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >sq.qid))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "flushed"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >flushed))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "memsize"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >sq.memsize))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "cidx"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >sq.cidx))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "pidx"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >sq.pidx))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "wq_pidx"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >sq.wq_pidx))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> > +			   "flush_cidx"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32,
> > +			wq->sq.flush_cidx))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "in_use"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >sq.in_use))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "size"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >sq.size))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "flags"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_X32, wq-
> >sq.flags))
> > +		goto err_cancel_entry;
> > +
> > +	nla_nest_end(msg, entry_attr);
> > +	return 0;
> > +
> > +err_cancel_entry:
> > +	nla_nest_cancel(msg, entry_attr);
> > +err:
> > +	return -EMSGSIZE;
> > +}
> > +
> > +static int fill_rq(struct sk_buff *msg, struct t4_wq *wq)
> > +{
> > +	struct nlattr *entry_attr;
> > +
> > +	/* RQ */
> > +	entry_attr = nla_nest_start(msg,
> RDMA_NLDEV_ATTR_PROVIDER_ENTRY);
> > +	if (!entry_attr)
> > +		goto err;
> > +
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "rqid"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >rq.qid))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "memsize"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >rq.memsize))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "cidx"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >rq.cidx))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "pidx"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >rq.pidx))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "wq_pidx"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >rq.wq_pidx))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "msn"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >rq.msn))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> > +			   "rqt_hwaddr"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_X32,
> > +			wq->rq.rqt_hwaddr))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "rqt_size"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >rq.rqt_size))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "in_use"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >rq.in_use))
> > +		goto err_cancel_entry;
> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "size"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >rq.size))
> > +		goto err_cancel_entry;
> > +
> > +	nla_nest_end(msg, entry_attr);
> > +	return 0;
> > +
> > +err_cancel_entry:
> > +	nla_nest_cancel(msg, entry_attr);
> > +err:
> > +	return -EMSGSIZE;
> > +}
> 
> The above is perfect example of my claim that most of the date will be
> simple u32 which is default in netlink. In such case, you won't need to
> provide type identification at all.
> 

And my solution provides the type identification for free.


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jason Gunthorpe March 27, 2018, 2:52 p.m. UTC | #3
On Sat, Mar 24, 2018 at 12:55:41PM +0300, Leon Romanovsky wrote:

> > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "sqid"))
> > +		goto err_cancel_entry;
> > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.qid))
> > +		goto err_cancel_entry;

And if you are going to have this as a standard scheme, probably give
it a helper and drop half the lines..

rdma_nl_put_provider_u32(msg, "sqid", wq->sq.qid);

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Steve Wise March 27, 2018, 7:02 p.m. UTC | #4
> On Sat, Mar 24, 2018 at 12:55:41PM +0300, Leon Romanovsky wrote:
> 
> > > +	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
> "sqid"))
> > > +		goto err_cancel_entry;
> > > +	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq-
> >sq.qid))
> > > +		goto err_cancel_entry;
> 
> And if you are going to have this as a standard scheme, probably give
> it a helper and drop half the lines..
> 
> rdma_nl_put_provider_u32(msg, "sqid", wq->sq.qid);
> 
> Jason

Good idea.  I'll do this...

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
index fa40b68..9edd920 100644
--- a/drivers/infiniband/hw/cxgb4/Makefile
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -3,4 +3,5 @@  ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
 
 obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
 
-iw_cxgb4-y :=  device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
+iw_cxgb4-y :=  device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o \
+	       restrack.o
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index cc92900..bbdc8dd 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -55,6 +55,7 @@ 
 #include <rdma/iw_cm.h>
 #include <rdma/rdma_netlink.h>
 #include <rdma/iw_portmap.h>
+#include <rdma/restrack.h>
 
 #include "cxgb4.h"
 #include "cxgb4_uld.h"
@@ -1078,4 +1079,9 @@  void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
 struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
 
+typedef int c4iw_restrack_func(struct sk_buff *msg,
+			       struct netlink_callback *cb,
+			       struct rdma_restrack_entry *res);
+extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX];
+
 #endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 0b9cc73..1dda32b 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -551,6 +551,14 @@  static struct net_device *get_netdev(struct ib_device *dev, u8 port)
 	return ndev;
 }
 
+static int fill_res_entry(struct sk_buff *msg, struct netlink_callback *cb,
+			  struct rdma_restrack_entry *res)
+{
+	return (res->type < ARRAY_SIZE(c4iw_restrack_funcs) &&
+		c4iw_restrack_funcs[res->type]) ?
+		c4iw_restrack_funcs[res->type](msg, cb, res) : 0;
+}
+
 void c4iw_register_device(struct work_struct *work)
 {
 	int ret;
@@ -645,6 +653,7 @@  void c4iw_register_device(struct work_struct *work)
 	dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
 	dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
 	dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+	dev->ibdev.res.fill_res_entry = fill_res_entry;
 	memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
 	       sizeof(dev->ibdev.iwcm->ifname));
 
diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
new file mode 100644
index 0000000..a67f509
--- /dev/null
+++ b/drivers/infiniband/hw/cxgb4/restrack.c
@@ -0,0 +1,363 @@ 
+/*
+ * Copyright (c) 2018 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "iw_cxgb4.h"
+
+static int fill_sq(struct sk_buff *msg, struct t4_wq *wq)
+{
+	struct nlattr *entry_attr;
+
+	/* WQ+SQ */
+	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_PROVIDER_ENTRY);
+	if (!entry_attr)
+		goto err;
+
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "sqid"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.qid))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "flushed"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->flushed))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "memsize"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.memsize))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "cidx"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.cidx))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "pidx"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.pidx))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "wq_pidx"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.wq_pidx))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
+			   "flush_cidx"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32,
+			wq->sq.flush_cidx))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "in_use"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.in_use))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "size"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->sq.size))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "flags"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_X32, wq->sq.flags))
+		goto err_cancel_entry;
+
+	nla_nest_end(msg, entry_attr);
+	return 0;
+
+err_cancel_entry:
+	nla_nest_cancel(msg, entry_attr);
+err:
+	return -EMSGSIZE;
+}
+
+static int fill_rq(struct sk_buff *msg, struct t4_wq *wq)
+{
+	struct nlattr *entry_attr;
+
+	/* RQ */
+	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_PROVIDER_ENTRY);
+	if (!entry_attr)
+		goto err;
+
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "rqid"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.qid))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "memsize"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.memsize))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "cidx"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.cidx))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "pidx"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.pidx))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "wq_pidx"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.wq_pidx))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "msn"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.msn))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
+			   "rqt_hwaddr"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_X32,
+			wq->rq.rqt_hwaddr))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "rqt_size"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.rqt_size))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "in_use"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.in_use))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "size"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, wq->rq.size))
+		goto err_cancel_entry;
+
+	nla_nest_end(msg, entry_attr);
+	return 0;
+
+err_cancel_entry:
+	nla_nest_cancel(msg, entry_attr);
+err:
+	return -EMSGSIZE;
+}
+
+static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx,
+		      struct t4_swsqe *sqe)
+{
+	struct nlattr *entry_attr;
+
+	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_PROVIDER_ENTRY);
+	if (!entry_attr)
+		goto err;
+
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "    idx"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, idx))
+		goto err_cancel_entry;
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "opcode"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, sqe->opcode))
+		goto err_cancel_entry;
+
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "wr_id"))
+		goto err_cancel_entry;
+	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_PROVIDER_X64, sqe->wr_id,
+			      RDMA_NLDEV_ATTR_PAD))
+		goto err_cancel_entry;
+
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "complete"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, sqe->complete))
+		goto err_cancel_entry;
+	if (sqe->complete) {
+		if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING,
+				   "cqe_status"))
+			goto err_cancel_entry;
+		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32,
+				CQE_STATUS(&sqe->cqe)))
+			goto err_cancel_entry;
+	}
+
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "signaled"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, sqe->signaled))
+		goto err_cancel_entry;
+
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "flushed"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, sqe->flushed))
+		goto err_cancel_entry;
+
+	nla_nest_end(msg, entry_attr);
+	return 0;
+
+err_cancel_entry:
+	nla_nest_cancel(msg, entry_attr);
+err:
+	return -EMSGSIZE;
+}
+
+/*
+ * Dump the first and last pending sqes.
+ */
+static int fill_swsqes(struct sk_buff *msg, struct t4_sq *sq,
+		       u16 first_idx, struct t4_swsqe *first_sqe,
+		       u16 last_idx, struct t4_swsqe *last_sqe)
+{
+	if (!first_sqe)
+		goto out;
+	if (fill_swsqe(msg, sq, first_idx, first_sqe))
+		goto err;
+	if (!last_sqe)
+		goto out;
+	if (fill_swsqe(msg, sq, last_idx, last_sqe))
+		goto err;
+out:
+	return 0;
+err:
+	return -EMSGSIZE;
+}
+
+static int fill_swrqe(struct sk_buff *msg, struct t4_rq *rq, u16 idx,
+		      struct t4_swrqe *rqe)
+{
+	struct nlattr *entry_attr;
+
+	entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_PROVIDER_ENTRY);
+	if (!entry_attr)
+		goto err;
+
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "    idx"))
+		goto err_cancel_entry;
+	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PROVIDER_U32, idx))
+		goto err_cancel_entry;
+
+	if (nla_put_string(msg, RDMA_NLDEV_ATTR_PROVIDER_STRING, "wr_id"))
+		goto err_cancel_entry;
+	if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_PROVIDER_X64, rqe->wr_id,
+			      RDMA_NLDEV_ATTR_PAD))
+		goto err_cancel_entry;
+
+	nla_nest_end(msg, entry_attr);
+	return 0;
+
+err_cancel_entry:
+	nla_nest_cancel(msg, entry_attr);
+err:
+	return -EMSGSIZE;
+}
+
+/*
+ * Dump the first and last pending rqes.
+ */
+static int fill_swrqes(struct sk_buff *msg, struct t4_rq *rq,
+		       u16 first_idx, struct t4_swrqe *first_rqe,
+		       u16 last_idx, struct t4_swrqe *last_rqe)
+{
+	if (!first_rqe)
+		goto out;
+	if (fill_swrqe(msg, rq, first_idx, first_rqe))
+		goto err;
+	if (!last_rqe)
+		goto out;
+	if (fill_swrqe(msg, rq, last_idx, last_rqe))
+		goto err;
+out:
+	return 0;
+err:
+	return -EMSGSIZE;
+}
+
+static int fill_res_qp_entry(struct sk_buff *msg,
+			     struct netlink_callback *cb,
+			     struct rdma_restrack_entry *res)
+{
+	struct ib_qp *ibqp = container_of(res, struct ib_qp, res);
+	struct t4_swsqe *fsp = NULL, *lsp = NULL;
+	struct t4_swrqe *frp = NULL, *lrp = NULL;
+	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
+	struct t4_swsqe first_sqe, last_sqe;
+	struct t4_swrqe first_rqe, last_rqe;
+	u16 first_sq_idx, last_sq_idx;
+	u16 first_rq_idx, last_rq_idx;
+	struct nlattr *table_attr;
+	struct t4_wq wq;
+
+	/* User qp state is not available, so don't dump user qps */
+	if (qhp->ucontext)
+		return 0;
+
+	table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_PROVIDER);
+	if (!table_attr)
+		goto err;
+
+	/* Get a consistent snapshot */
+	spin_lock_irq(&qhp->lock);
+	wq = qhp->wq;
+
+	/* If there are any pending sqes, copy the first and last */
+	if (wq.sq.cidx != wq.sq.pidx) {
+		first_sq_idx = wq.sq.cidx;
+		first_sqe = qhp->wq.sq.sw_sq[first_sq_idx];
+		fsp = &first_sqe;
+		last_sq_idx = wq.sq.pidx;
+		if (last_sq_idx-- == 0)
+			last_sq_idx = wq.sq.size - 1;
+		if (last_sq_idx != first_sq_idx) {
+			last_sqe = qhp->wq.sq.sw_sq[last_sq_idx];
+			lsp = &last_sqe;
+		}
+	}
+
+	/* If there are any pending rqes, copy the first and last */
+	if (wq.rq.cidx != wq.rq.pidx) {
+		first_rq_idx = wq.rq.cidx;
+		first_rqe = qhp->wq.rq.sw_rq[first_rq_idx];
+		frp = &first_rqe;
+		last_rq_idx = wq.rq.pidx;
+		if (last_rq_idx-- == 0)
+			last_rq_idx = wq.rq.size - 1;
+		if (last_rq_idx != first_rq_idx) {
+			last_rqe = qhp->wq.rq.sw_rq[last_rq_idx];
+			lrp = &last_rqe;
+		}
+	}
+	spin_unlock_irq(&qhp->lock);
+
+	if (fill_sq(msg, &wq))
+		goto err_cancel_table;
+
+	if (fill_swsqes(msg, &wq.sq, first_sq_idx, fsp, last_sq_idx, lsp))
+		goto err_cancel_table;
+
+	if (fill_rq(msg, &wq))
+		goto err_cancel_table;
+
+	if (fill_swrqes(msg, &wq.rq, first_rq_idx, frp, last_rq_idx, lrp))
+		goto err_cancel_table;
+
+	nla_nest_end(msg, table_attr);
+	return 0;
+
+err_cancel_table:
+	nla_nest_cancel(msg, table_attr);
+err:
+	return -EMSGSIZE;
+}
+
+c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = {
+	[RDMA_RESTRACK_QP]	= fill_res_qp_entry,
+};