@@ -2262,33 +2262,46 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
u16 uid = 0;
+ u32 pdn;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
+ pd->ibpd.device = ibdev;
+
uid = context ? to_mucontext(context)->devx_uid : 0;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
MLX5_SET(alloc_pd_in, in, uid, uid);
err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
out, sizeof(out));
- if (err) {
- kfree(pd);
- return ERR_PTR(err);
- }
+ if (err)
+ goto out;
- pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
+ pdn = MLX5_GET(alloc_pd_out, out, pd);
pd->uid = uid;
+ rdma_rt_set_type(&pd->ibpd.res, RDMA_RESTRACK_PD);
+ rdma_rt_set_id(&pd->ibpd.res, pdn);
+ err = rdma_restrack_add(&pd->ibpd.res);
+ if (err)
+ goto out;
+
if (context) {
- resp.pdn = pd->pdn;
+ resp.pdn = pdn;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
- kfree(pd);
- return ERR_PTR(-EFAULT);
+ err = -EFAULT;
+ goto err_copy;
}
}
return &pd->ibpd;
+
+err_copy:
+ mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pdn, uid);
+ rdma_restrack_del(&pd->ibpd.res);
+
+out: kfree(pd);
+ return ERR_PTR(err);
}
static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
@@ -2296,7 +2309,8 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd);
- mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
+ rdma_restrack_del(&pd->res);
+ mlx5_cmd_dealloc_pd(mdev->mdev, rdma_res_to_id(&pd->res), mpd->uid);
kfree(mpd);
return 0;
@@ -4665,8 +4679,6 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
ret = PTR_ERR(devr->p0);
goto error0;
}
- devr->p0->device = &dev->ib_dev;
- devr->p0->uobject = NULL;
atomic_set(&devr->p0->usecnt, 0);
devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
@@ -6513,6 +6525,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (!dev)
return NULL;
+ /* PD ID managed by FW */
+ rdma_rt_set_id_range(&dev->ib_dev, RDMA_RESTRACK_PD, 0, 0);
+
dev->mdev = mdev;
dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
MLX5_CAP_GEN(mdev, num_vhca_ports));
@@ -143,7 +143,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
struct mlx5_ib_pd {
struct ib_pd ibpd;
- u32 pdn;
u16 uid;
};
@@ -800,7 +800,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, length64, 1);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, pd, rdma_res_to_id(&pd->res));
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET64(mkc, mkc, start_addr, 0);
@@ -950,7 +950,7 @@ static struct mlx5_ib_mr *alloc_mr_from_cache(
mr->desc_size = sizeof(struct mlx5_mtt);
mr->mmkey.iova = virt_addr;
mr->mmkey.size = len;
- mr->mmkey.pd = to_mpd(pd)->pdn;
+ mr->mmkey.pd = rdma_res_to_id(&pd->res);
return mr;
}
@@ -1171,7 +1171,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
MLX5_SET64(mkc, mkc, start_addr, virt_addr);
MLX5_SET64(mkc, mkc, len, length);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, pd, rdma_res_to_id(&pd->res));
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
MLX5_SET(mkc, mkc, translations_octword_size,
get_octo_len(virt_addr, length, page_shift));
@@ -1250,7 +1250,7 @@ static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET64(mkc, mkc, len, length);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, pd, rdma_res_to_id(&pd->res));
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET64(mkc, mkc, start_addr,
memic_addr - pci_resource_start(dev->mdev->pdev, 0));
@@ -1525,7 +1525,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mr->access_flags = access_flags;
mr->mmkey.iova = addr;
mr->mmkey.size = len;
- mr->mmkey.pd = to_mpd(pd)->pdn;
+ mr->mmkey.pd = rdma_res_to_id(&pd->res);
if (flags & IB_MR_REREG_TRANS) {
upd_flags = MLX5_IB_UPD_XLT_ADDR;
@@ -1703,7 +1703,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, pd, rdma_res_to_id(&pd->res));
if (mr_type == IB_MR_TYPE_MEM_REG) {
mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
@@ -1736,7 +1736,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
}
/* create mem & wire PSVs */
- err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
+ err = mlx5_core_create_psv(dev->mdev, rdma_res_to_id(&pd->res),
2, psv_index);
if (err)
goto err_free_sig;
@@ -1834,7 +1834,7 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, pd, rdma_res_to_id(&pd->res));
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
@@ -2038,11 +2038,13 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, st, mlx5_st);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
- if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
- MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
- else
- MLX5_SET(qpc, qpc, latency_sensitive, 1);
+ if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
+ struct ib_pd *t = pd ? pd : devr->p0;
+ MLX5_SET(qpc, qpc, pd, rdma_res_to_id(&t->res));
+ } else {
+ MLX5_SET(qpc, qpc, latency_sensitive, 1);
+ }
if (qp->wq_sig)
MLX5_SET(qpc, qpc, wq_signature, 1);
@@ -2431,7 +2433,7 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
qp->qp_sub_type = MLX5_IB_QPT_DCT;
- MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(dctc, dctc, pd, rdma_res_to_id(&pd->res));
MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
@@ -3357,7 +3359,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
- context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
+ context->flags_pd =
+ cpu_to_be32(pd ? rdma_res_to_id(&pd->ibpd.res) :
+ rdma_res_to_id(&dev->devr.p0->res));
context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
@@ -4148,7 +4152,7 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
seg->flags = convert_access(umrwr->access_flags);
if (umrwr->pd)
- seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+ seg->flags_pd = cpu_to_be32(rdma_res_to_id(&umrwr->pd->res));
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
!umrwr->length)
seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
@@ -4495,7 +4499,7 @@ static int set_sig_umr_wr(const struct ib_send_wr *send_wr,
{
const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
- u32 pdn = get_pd(qp)->pdn;
+ u32 pdn = rdma_res_to_id(&get_pd(qp)->ibpd.res);
u32 xlt_size;
int region_len, ret;
@@ -5647,7 +5651,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES);
}
MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
- MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
+ MLX5_SET(wq, wq, pd, rdma_res_to_id(&pd->res));
MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
@@ -297,7 +297,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
else
in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
- in.pd = to_mpd(pd)->pdn;
+ in.pd = rdma_res_to_id(&pd->res);
in.db_record = srq->db.dma;
err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
kvfree(in.pas);