diff mbox series

[rdma-next,v1,5/6] RDMA/mlx5: Convert CTX to use restrack HW allocation scheme

Message ID 20190120130600.8788-6-leon@kernel.org (mailing list archive)
State Changes Requested
Headers show
Series Annotate mlx5 driver to reuse restrack IDs | expand

Commit Message

Leon Romanovsky Jan. 20, 2019, 1:05 p.m. UTC
From: Leon Romanovsky <leonro@mellanox.com>

ucontextes are limited by number of transport domain, which are counted
and allocated by FW. Convert mlx5 code to use restrack HW allocation
scheme to mange tdns.

Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
 drivers/infiniband/hw/mlx5/main.c    | 23 ++++++++++++++++++++---
 drivers/infiniband/hw/mlx5/mlx5_ib.h |  2 --
 drivers/infiniband/hw/mlx5/qp.c      |  5 ++---
 3 files changed, 22 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 12a038146105..e662b575807c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1667,6 +1667,7 @@  static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 	struct mlx5_core_dev *mdev = dev->mdev;
 	struct mlx5_ib_ucontext *context;
 	struct mlx5_bfreg_info *bfregi;
+	u32 tdn;
 	int ver;
 	int err;
 	size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
@@ -1774,7 +1775,7 @@  static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 		context->devx_uid = err;
 	}
 
-	err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
+	err = mlx5_ib_alloc_transport_domain(dev, &tdn,
 					     context->devx_uid);
 	if (err)
 		goto out_devx;
@@ -1850,6 +1851,8 @@  static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 		resp.response_length += sizeof(resp.dump_fill_mkey);
 	}
 
+
+
 	err = ib_copy_to_udata(udata, &resp, resp.response_length);
 	if (err)
 		goto out_mdev;
@@ -1868,10 +1871,19 @@  static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 				   1, &dev->roce[port].tx_port_affinity));
 	}
 
+	context->ibucontext.device = ibdev;
+	rdma_rt_set_type(&context->ibucontext.res, RDMA_RESTRACK_CTX);
+	rdma_rt_set_id(&context->ibucontext.res, tdn);
+	err = rdma_restrack_add(&context->ibucontext.res);
+	if (err)
+		goto out_mdev;
+
 	return &context->ibucontext;
 
 out_mdev:
-	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+	mlx5_ib_dealloc_transport_domain(
+		dev, rdma_res_to_id(&context->ibucontext.res),
+		context->devx_uid);
 out_devx:
 	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
 		mlx5_ib_devx_destroy(dev, context->devx_uid);
@@ -1903,7 +1915,10 @@  static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
 	mutex_unlock(&ibcontext->per_mm_list_lock);
 
 	bfregi = &context->bfregi;
-	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+	rdma_restrack_del(&context->ibucontext.res);
+	mlx5_ib_dealloc_transport_domain(
+		dev, rdma_res_to_id(&context->ibucontext.res),
+		context->devx_uid);
 
 	if (context->devx_uid)
 		mlx5_ib_devx_destroy(dev, context->devx_uid);
@@ -6531,6 +6546,8 @@  static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 	rdma_rt_set_id_range(&dev->ib_dev, RDMA_RESTRACK_CQ, 0, 0);
 	/* QP ID managed by HW */
 	rdma_rt_set_id_range(&dev->ib_dev, RDMA_RESTRACK_QP, 0, 0);
+	/* context are managed by TDN */
+	rdma_rt_set_id_range(&dev->ib_dev, RDMA_RESTRACK_CTX, 0, 0);
 
 	dev->mdev = mdev;
 	dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 8b235ceb9574..1f1dd19c9d85 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -126,8 +126,6 @@  struct mlx5_ib_ucontext {
 	struct mutex		db_page_mutex;
 	struct mlx5_bfreg_info	bfregi;
 	u8			cqe_version;
-	/* Transport Domain number */
-	u32			tdn;
 
 	u64			lib_caps;
 	DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d18e2156d395..dba75b96f971 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1360,9 +1360,8 @@  static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
 	struct ib_uobject *uobj = pd->uobject;
 	struct ib_ucontext *ucontext = uobj->context;
-	struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
 	int err;
-	u32 tdn = mucontext->tdn;
+	u32 tdn = rdma_res_to_id(&ucontext->res);
 	u16 uid = to_mpd(pd)->uid;
 
 	if (qp->sq.wqe_cnt) {
@@ -1486,7 +1485,7 @@  static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
 	u32 selected_fields = 0;
 	u32 outer_l4;
 	size_t min_resp_len;
-	u32 tdn = mucontext->tdn;
+	u32 tdn = rdma_res_to_id(&ucontext->res);
 	struct mlx5_ib_create_qp_rss ucmd = {};
 	size_t required_cmd_sz;
 	u8 lb_flag = 0;