@@ -1667,6 +1667,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_ucontext *context;
struct mlx5_bfreg_info *bfregi;
+ u32 tdn;
int ver;
int err;
size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
@@ -1774,7 +1775,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->devx_uid = err;
}
- err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
+ err = mlx5_ib_alloc_transport_domain(dev, &tdn,
context->devx_uid);
if (err)
goto out_devx;
@@ -1850,6 +1851,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.dump_fill_mkey);
}
+
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto out_mdev;
@@ -1868,10 +1871,19 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1, &dev->roce[port].tx_port_affinity));
}
+ context->ibucontext.device = ibdev;
+ rdma_rt_set_type(&context->ibucontext.res, RDMA_RESTRACK_CTX);
+ rdma_rt_set_id(&context->ibucontext.res, tdn);
+ err = rdma_restrack_add(&context->ibucontext.res);
+ if (err)
+ goto out_mdev;
+
return &context->ibucontext;
out_mdev:
- mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+ mlx5_ib_dealloc_transport_domain(
+ dev, rdma_res_to_id(&context->ibucontext.res),
+ context->devx_uid);
out_devx:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
mlx5_ib_devx_destroy(dev, context->devx_uid);
@@ -1903,7 +1915,10 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
mutex_unlock(&ibcontext->per_mm_list_lock);
bfregi = &context->bfregi;
- mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+ rdma_restrack_del(&context->ibucontext.res);
+ mlx5_ib_dealloc_transport_domain(
+ dev, rdma_res_to_id(&context->ibucontext.res),
+ context->devx_uid);
if (context->devx_uid)
mlx5_ib_devx_destroy(dev, context->devx_uid);
@@ -6531,6 +6546,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
rdma_rt_set_id_range(&dev->ib_dev, RDMA_RESTRACK_CQ, 0, 0);
/* QP ID managed by HW */
rdma_rt_set_id_range(&dev->ib_dev, RDMA_RESTRACK_QP, 0, 0);
+ /* context are managed by TDN */
+ rdma_rt_set_id_range(&dev->ib_dev, RDMA_RESTRACK_CTX, 0, 0);
dev->mdev = mdev;
dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
@@ -126,8 +126,6 @@ struct mlx5_ib_ucontext {
struct mutex db_page_mutex;
struct mlx5_bfreg_info bfregi;
u8 cqe_version;
- /* Transport Domain number */
- u32 tdn;
u64 lib_caps;
DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
@@ -1360,9 +1360,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
struct ib_uobject *uobj = pd->uobject;
struct ib_ucontext *ucontext = uobj->context;
- struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
int err;
- u32 tdn = mucontext->tdn;
+ u32 tdn = rdma_res_to_id(&ucontext->res);
u16 uid = to_mpd(pd)->uid;
if (qp->sq.wqe_cnt) {
@@ -1486,7 +1485,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 selected_fields = 0;
u32 outer_l4;
size_t min_resp_len;
- u32 tdn = mucontext->tdn;
+ u32 tdn = rdma_res_to_id(&ucontext->res);
struct mlx5_ib_create_qp_rss ucmd = {};
size_t required_cmd_sz;
u8 lb_flag = 0;