@@ -730,7 +730,7 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
/* Write AVID to shared page. */
if (rdma_is_user_pd(ib_pd)) {
- struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
+ struct ib_ucontext *ib_uctx = rdma_udata_context(udata);
struct bnxt_re_ucontext *uctx;
unsigned long flag;
u32 *wrptr;
@@ -880,7 +880,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
struct ib_umem *umem;
int bytes = 0;
- struct ib_ucontext *context = pd->ib_pd.uobject->context;
+ struct ib_ucontext *context = rdma_udata_context(udata);
struct bnxt_re_ucontext *cntx = container_of(context,
struct bnxt_re_ucontext,
ib_uctx);
@@ -1358,7 +1358,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
struct ib_umem *umem;
int bytes = 0;
- struct ib_ucontext *context = pd->ib_pd.uobject->context;
+ struct ib_ucontext *context = rdma_udata_context(udata);
struct bnxt_re_ucontext *cntx = container_of(context,
struct bnxt_re_ucontext,
ib_uctx);
@@ -3585,7 +3585,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
- umem = ib_umem_get(ib_pd->uobject->context, start, length,
+ umem = ib_umem_get(rdma_udata_context(udata), start, length,
mr_access_flags, 0);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
@@ -540,7 +540,8 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ mhp->umem = ib_umem_get(rdma_udata_context(udata), start, length, acc,
+ 0);
if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem);
kfree(mhp);
@@ -837,7 +838,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
* Kernel users need more wq space for fastreg WRs which can take
* 2 WR fragments.
*/
- ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
+ ucontext = udata ? to_iwch_ucontext(rdma_udata_context(udata)) : NULL;
if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
wqsize = roundup_pow_of_two(rqsize +
roundup_pow_of_two(attrs->cap.max_send_wr * 2));
@@ -537,7 +537,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ mhp->umem = ib_umem_get(rdma_udata_context(udata), start, length, acc,
+ 0);
if (IS_ERR(mhp->umem))
goto err_free_skb;
@@ -2163,7 +2163,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (sqsize < 8)
sqsize = 8;
- ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+ ucontext = udata ? to_c4iw_ucontext(rdma_udata_context(udata)) : NULL;
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
if (!qhp)
@@ -2713,7 +2713,7 @@ struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
rqsize = attrs->attr.max_wr + 1;
rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
- ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+ ucontext = udata ? to_c4iw_ucontext(rdma_udata_context(udata)) : NULL;
srq = kzalloc(sizeof(*srq), GFP_KERNEL);
if (!srq)
@@ -674,8 +674,9 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
goto error;
}
iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+
iwqp->user_mode = 1;
- ucontext = to_ucontext(ibpd->uobject->context);
+ ucontext = to_ucontext(rdma_udata_context(udata));
if (req.user_wqe_buffers) {
struct i40iw_pbl *iwpbl;
@@ -1854,7 +1855,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
- region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ region = ib_umem_get(rdma_udata_context(udata), start, length, acc, 0);
if (IS_ERR(region))
return (struct ib_mr *)region;
@@ -1874,7 +1875,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
iwmr->region = region;
iwmr->ibmr.pd = pd;
iwmr->ibmr.device = pd->device;
- ucontext = to_ucontext(pd->uobject->context);
+ ucontext = to_ucontext(rdma_udata_context(udata));
iwmr->page_size = PAGE_SIZE;
iwmr->page_msk = PAGE_MASK;
@@ -2095,7 +2096,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
if (rdma_is_user_pd(ibpd)) {
struct i40iw_ucontext *ucontext;
- ucontext = to_ucontext(ibpd->uobject->context);
+ ucontext = to_ucontext(rdma_udata_context(udata));
i40iw_del_memlist(iwmr, ucontext);
}
if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
@@ -415,7 +415,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
+ mr->umem = mlx4_get_umem_mr(rdma_udata_context(udata), start, length,
virt_addr, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
@@ -1015,7 +1015,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
}
- qp->umem = ib_umem_get(pd->uobject->context,
+ qp->umem = ib_umem_get(rdma_udata_context(udata),
(src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr :
ucmd.wq.buf_addr, qp->buf_size, 0, 0);
if (IS_ERR(qp->umem)) {
@@ -1035,7 +1035,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_mtt;
if (qp_has_rq(init_attr)) {
- err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
+ err = mlx4_ib_db_map_user(
+ to_mucontext(rdma_udata_context(udata)),
(src == MLX4_IB_QP_SRC) ? ucmd.qp.db_addr :
ucmd.wq.db_addr, &qp->db);
if (err)
@@ -1108,8 +1109,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
}
} else if (src == MLX4_IB_RWQ_SRC) {
- err = mlx4_ib_alloc_wqn(to_mucontext(pd->uobject->context), qp,
- range_size, &qpn);
+ err = mlx4_ib_alloc_wqn(to_mucontext(rdma_udata_context(udata)),
+ qp, range_size, &qpn);
if (err)
goto err_wrid;
} else {
@@ -1180,8 +1181,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qpn, 1);
else if (src == MLX4_IB_RWQ_SRC)
- mlx4_ib_release_wqn(to_mucontext(pd->uobject->context),
- qp, 0);
+ mlx4_ib_release_wqn(
+ to_mucontext(rdma_udata_context(udata)),
+ qp, 0);
else
mlx4_qp_release_range(dev->dev, qpn, 1);
}
@@ -1191,7 +1193,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
err_wrid:
if (qp->umem) {
if (qp_has_rq(init_attr))
- mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
+ mlx4_ib_db_unmap_user(
+ to_mucontext(rdma_udata_context(udata)),
+ &qp->db);
} else {
kvfree(qp->sq.wrid);
kvfree(qp->rq.wrid);
@@ -113,7 +113,8 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
- srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
+ srq->umem = ib_umem_get(rdma_udata_context(udata),
+ ucmd.buf_addr,
buf_size, 0, 0);
if (IS_ERR(srq->umem)) {
err = PTR_ERR(srq->umem);
@@ -129,8 +130,9 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_mtt;
- err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
- ucmd.db_addr, &srq->db);
+ err = mlx4_ib_db_map_user(
+ to_mucontext(rdma_udata_context(udata)),
+ ucmd.db_addr, &srq->db);
if (err)
goto err_mtt;
} else {
@@ -203,7 +205,8 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
err_wrid:
if (rdma_is_user_pd(pd))
- mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
+ mlx4_ib_db_unmap_user(to_mucontext(rdma_udata_context(udata)),
+ &srq->db);
else
kvfree(srq->wrid);
@@ -1075,7 +1075,8 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw);
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
- int access_flags);
+ int access_flags,
+ struct ib_udata *udata);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
@@ -847,7 +847,7 @@ static int mr_cache_max_order(struct mlx5_ib_dev *dev)
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
int access_flags, struct ib_umem **umem,
int *npages, int *page_shift, int *ncont,
- int *order)
+ int *order, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem *u;
@@ -855,7 +855,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
*umem = NULL;
- u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
+ u = ib_umem_get(rdma_udata_context(udata), start, length, access_flags,
+ 0);
err = PTR_ERR_OR_ZERO(u);
if (err) {
mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
@@ -1319,7 +1320,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return ERR_PTR(-EINVAL);
- mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
+ mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags, udata);
if (IS_ERR(mr))
return ERR_CAST(mr);
return &mr->ibmr;
@@ -1327,7 +1328,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
#endif
err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
- &page_shift, &ncont, &order);
+ &page_shift, &ncont, &order, udata);
if (err < 0)
return ERR_PTR(err);
@@ -1478,7 +1479,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
ib_umem_release(mr->umem);
mr->umem = NULL;
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
- &npages, &page_shift, &ncont, &order);
+ &npages, &page_shift, &ncont, &order, udata);
if (err)
goto err;
}
@@ -446,9 +446,10 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
}
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
- int access_flags)
+ int access_flags,
+ struct ib_udata *udata)
{
- struct ib_ucontext *ctx = pd->ibpd.uobject->context;
+ struct ib_ucontext *ctx = rdma_udata_context(udata);
struct mlx5_ib_mr *imr;
struct ib_umem *umem;
@@ -665,11 +665,11 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
unsigned long addr, size_t size,
struct ib_umem **umem,
int *npages, int *page_shift, int *ncont,
- u32 *offset)
+ u32 *offset, struct ib_udata *udata)
{
int err;
- *umem = ib_umem_get(pd->uobject->context, addr, size, 0, 0);
+ *umem = ib_umem_get(rdma_udata_context(udata), addr, size, 0, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
@@ -696,14 +696,14 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
}
static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_rwq *rwq)
+ struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
{
struct mlx5_ib_ucontext *context;
if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
atomic_dec(&dev->delay_drop.rqs_cnt);
- context = to_mucontext(pd->uobject->context);
+ context = to_mucontext(rdma_udata_context(udata));
mlx5_ib_db_unmap_user(context, &rwq->db);
if (rwq->umem)
ib_umem_release(rwq->umem);
@@ -711,7 +711,8 @@ static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_rwq *rwq,
- struct mlx5_ib_create_wq *ucmd)
+ struct mlx5_ib_create_wq *ucmd,
+ struct ib_udata *udata)
{
struct mlx5_ib_ucontext *context;
int page_shift = 0;
@@ -723,8 +724,8 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (!ucmd->buf_addr)
return -EINVAL;
- context = to_mucontext(pd->uobject->context);
- rwq->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr,
+ context = to_mucontext(rdma_udata_context(udata));
+ rwq->umem = ib_umem_get(rdma_udata_context(udata), ucmd->buf_addr,
rwq->buf_size, 0, 0);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
@@ -797,7 +798,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return err;
}
- context = to_mucontext(pd->uobject->context);
+ context = to_mucontext(rdma_udata_context(udata));
if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) {
uar_index = bfregn_to_uar_index(dev, &context->bfregi,
ucmd.bfreg_index, true);
@@ -836,7 +837,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr,
ubuffer->buf_size,
&ubuffer->umem, &npages, &page_shift,
- &ncont, &offset);
+ &ncont, &offset, udata);
if (err)
goto err_bfreg;
} else {
@@ -900,11 +901,12 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base)
+ struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base,
+ struct ib_udata *udata)
{
struct mlx5_ib_ucontext *context;
- context = to_mucontext(pd->uobject->context);
+ context = to_mucontext(rdma_udata_context(udata));
mlx5_ib_db_unmap_user(context, &qp->db);
if (base->ubuffer.umem)
ib_umem_release(base->ubuffer.umem);
@@ -1090,7 +1092,8 @@ static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq, void *qpin,
- struct ib_pd *pd)
+ struct ib_pd *pd,
+ struct ib_udata *udata)
{
struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
__be64 *pas;
@@ -1107,7 +1110,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr, ubuffer->buf_size,
&sq->ubuffer.umem, &npages, &page_shift,
- &ncont, &offset);
+ &ncont, &offset, udata);
if (err)
return err;
@@ -1332,8 +1335,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
- struct ib_uobject *uobj = pd->uobject;
- struct ib_ucontext *ucontext = uobj->context;
+ struct ib_ucontext *ucontext = rdma_udata_context(udata);
struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
int err;
u32 tdn = mucontext->tdn;
@@ -1344,7 +1346,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
return err;
- err = create_raw_packet_qp_sq(dev, sq, in, pd);
+ err = create_raw_packet_qp_sq(dev, sq, in, pd, udata);
if (err)
goto err_destroy_tis;
@@ -1448,8 +1450,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
- struct ib_uobject *uobj = pd->uobject;
- struct ib_ucontext *ucontext = uobj->context;
+ struct ib_ucontext *ucontext = rdma_udata_context(udata);
struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
struct mlx5_ib_create_qp_resp resp = {};
int inlen;
@@ -1781,7 +1782,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EFAULT;
}
- err = get_qp_user_index(to_mucontext(pd->uobject->context),
+ err = get_qp_user_index(to_mucontext(rdma_udata_context(udata)),
&ucmd, udata->inlen, &uidx);
if (err)
return err;
@@ -2048,7 +2049,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err_create:
if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(dev, pd, qp, base);
+ destroy_qp_user(dev, pd, qp, base, udata);
else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
@@ -2159,7 +2160,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct mlx5_modify_raw_qp_param *raw_qp_param,
u8 lag_tx_affinity);
-static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_udata *udata)
{
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_ib_qp_base *base;
@@ -2230,7 +2232,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
else if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base);
+ destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base, udata);
}
static const char *ib_qp_type_str(enum ib_qp_type type)
@@ -2268,7 +2270,8 @@ static const char *ib_qp_type_str(enum ib_qp_type type)
static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
struct ib_qp_init_attr *attr,
- struct mlx5_ib_create_qp *ucmd)
+ struct mlx5_ib_create_qp *ucmd,
+ struct ib_udata *udata)
{
struct mlx5_ib_qp *qp;
int err = 0;
@@ -2278,7 +2281,7 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
if (!attr->srq || !attr->recv_cq)
return ERR_PTR(-EINVAL);
- err = get_qp_user_index(to_mucontext(pd->uobject->context),
+ err = get_qp_user_index(to_mucontext(rdma_udata_context(udata)),
ucmd, sizeof(*ucmd), &uidx);
if (err)
return ERR_PTR(err);
@@ -2366,7 +2369,9 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
if (!rdma_is_user_pd(pd)) {
mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
return ERR_PTR(-EINVAL);
- } else if (!to_mucontext(pd->uobject->context)->cqe_version) {
+ } else if (!to_mucontext(
+ rdma_udata_context(
+ udata))->cqe_version) {
mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n");
return ERR_PTR(-EINVAL);
}
@@ -2398,7 +2403,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
} else {
- return mlx5_ib_create_dct(pd, init_attr, &ucmd);
+ return mlx5_ib_create_dct(pd, init_attr, &ucmd, udata);
}
}
@@ -2500,7 +2505,7 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
return mlx5_ib_destroy_dct(mqp);
- destroy_qp_common(dev, mqp);
+ destroy_qp_common(dev, mqp, udata);
kfree(mqp);
@@ -3023,13 +3028,14 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
struct mlx5_ib_pd *pd,
struct mlx5_ib_qp_base *qp_base,
- u8 port_num)
+ u8 port_num,
+ struct ib_udata *udata)
{
struct mlx5_ib_ucontext *ucontext = NULL;
unsigned int tx_port_affinity;
- if (pd && pd->ibpd.uobject && pd->ibpd.uobject->context)
- ucontext = to_mucontext(pd->ibpd.uobject->context);
+ if (udata && _rdma_udata_context(udata, false))
+ ucontext = to_mucontext(rdma_udata_context(udata));
if (ucontext) {
tx_port_affinity = (unsigned int)atomic_add_return(
@@ -3054,7 +3060,8 @@ static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, enum ib_qp_state new_state,
- const struct mlx5_ib_modify_qp *ucmd)
+ const struct mlx5_ib_modify_qp *ucmd,
+ struct ib_udata *udata)
{
static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
[MLX5_QP_STATE_RST] = {
@@ -3145,7 +3152,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (mlx5_lag_is_active(dev->mdev)) {
u8 p = mlx5_core_native_port_num(dev->mdev);
- tx_affinity = get_tx_affinity(dev, pd, base, p);
+ tx_affinity = get_tx_affinity(dev, pd, base, p,
+ udata);
context->flags |= cpu_to_be32(tx_affinity << 24);
}
}
@@ -3616,7 +3624,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
- new_state, &ucmd);
+ new_state, &ucmd, udata);
out:
mutex_unlock(&qp->mutex);
@@ -5584,7 +5592,7 @@ static int prepare_user_rq(struct ib_pd *pd,
return err;
}
- err = create_user_rq(dev, pd, rwq, &ucmd);
+ err = create_user_rq(dev, pd, rwq, &ucmd, udata);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
if (err)
@@ -5648,7 +5656,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
err_copy:
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
err_user_rq:
- destroy_user_rq(dev, pd, rwq);
+ destroy_user_rq(dev, pd, rwq, udata);
err:
kfree(rwq);
return ERR_PTR(err);
@@ -5660,7 +5668,7 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
- destroy_user_rq(dev, wq->pd, rwq);
+ destroy_user_rq(dev, wq->pd, rwq, udata);
kfree(rwq);
return 0;
@@ -102,16 +102,17 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
return -EINVAL;
if (in->type != IB_SRQT_BASIC) {
- err = get_srq_user_index(to_mucontext(pd->uobject->context),
- &ucmd, udata->inlen, &uidx);
+ err = get_srq_user_index(
+ to_mucontext(rdma_udata_context(udata)),
+ &ucmd, udata->inlen, &uidx);
if (err)
return err;
}
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
- srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
- 0, 0);
+ srq->umem = ib_umem_get(rdma_udata_context(udata), ucmd.buf_addr,
+ buf_size, 0, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
@@ -135,7 +136,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
- err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
+ err = mlx5_ib_db_map_user(to_mucontext(rdma_udata_context(udata)),
ucmd.db_addr, &srq->db);
if (err) {
mlx5_ib_dbg(dev, "map doorbell failed\n");
@@ -222,9 +223,11 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
return err;
}
-static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
+static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
+ struct ib_udata *udata)
{
- mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
+ mlx5_ib_db_unmap_user(to_mucontext(rdma_udata_context(udata)),
+ &srq->db);
ib_umem_release(srq->umem);
}
@@ -355,7 +358,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
err_usr_kern_srq:
if (rdma_is_user_pd(pd))
- destroy_srq_user(pd, srq);
+ destroy_srq_user(pd, srq, udata);
else
destroy_srq_kernel(dev, srq);
@@ -510,7 +510,8 @@ int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent
void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe);
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
- struct ib_srq_attr *attr, struct mthca_srq *srq);
+ struct ib_srq_attr *attr, struct mthca_srq *srq,
+ struct ib_udata *udata);
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
@@ -456,7 +456,7 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
return ERR_PTR(-ENOMEM);
if (rdma_is_user_pd(pd)) {
- context = to_mucontext(pd->uobject->context);
+ context = to_mucontext(rdma_udata_context(udata));
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
err = -EFAULT;
@@ -475,7 +475,7 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
}
err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
- &init_attr->attr, srq);
+ &init_attr->attr, srq, udata);
if (err && rdma_is_user_pd(pd))
mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
@@ -538,7 +538,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
return ERR_PTR(-ENOMEM);
if (rdma_is_user_pd(pd)) {
- context = to_mucontext(pd->uobject->context);
+ context = to_mucontext(rdma_udata_context(udata));
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
kfree(qp);
@@ -577,7 +577,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
&init_attr->cap, qp);
if (err && rdma_is_user_pd(pd)) {
- context = to_mucontext(pd->uobject->context);
+ context = to_mucontext(rdma_udata_context(udata));
mthca_unmap_user_db(to_mdev(pd->device),
&context->uar,
@@ -916,12 +916,12 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int write_mtt_size;
if (udata->inlen < sizeof ucmd) {
- if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
+ if (!to_mucontext(rdma_udata_context(udata))->reg_mr_warned) {
mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
current->comm);
mthca_warn(dev, " Update libmthca to fix this.\n");
}
- ++to_mucontext(pd->uobject->context)->reg_mr_warned;
+ ++to_mucontext(rdma_udata_context(udata))->reg_mr_warned;
ucmd.mr_attrs = 0;
} else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return ERR_PTR(-EFAULT);
@@ -930,7 +930,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
+ mr->umem = ib_umem_get(rdma_udata_context(udata), start, length, acc,
ucmd.mr_attrs & MTHCA_MR_DMASYNC);
if (IS_ERR(mr->umem)) {
@@ -92,10 +92,12 @@ static inline int *wqe_to_link(void *wqe)
return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
}
-static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
- struct mthca_pd *pd,
- struct mthca_srq *srq,
- struct mthca_tavor_srq_context *context)
+static void
+mthca_tavor_init_srq_context(struct mthca_dev *dev,
+ struct mthca_pd *pd,
+ struct mthca_srq *srq,
+ struct mthca_tavor_srq_context *context,
+ struct ib_udata *udata)
{
memset(context, 0, sizeof *context);
@@ -103,17 +105,21 @@ static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
context->state_pd = cpu_to_be32(pd->pd_num);
context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
- if (pd->ibpd.uobject)
+ if (udata)
context->uar =
- cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
+ cpu_to_be32(
+ to_mucontext(
+ rdma_udata_context(udata))->uar.index);
else
context->uar = cpu_to_be32(dev->driver_uar.index);
}
-static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
- struct mthca_pd *pd,
- struct mthca_srq *srq,
- struct mthca_arbel_srq_context *context)
+static void
+mthca_arbel_init_srq_context(struct mthca_dev *dev,
+ struct mthca_pd *pd,
+ struct mthca_srq *srq,
+ struct mthca_arbel_srq_context *context,
+ struct ib_udata *udata)
{
int logsize, max;
@@ -129,9 +135,11 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
context->db_index = cpu_to_be32(srq->db_index);
context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
- if (pd->ibpd.uobject)
+ if (udata)
context->logstride_usrpage |=
- cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
+ cpu_to_be32(
+ to_mucontext(
+ rdma_udata_context(udata))->uar.index);
else
context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
@@ -197,7 +205,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
}
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
- struct ib_srq_attr *attr, struct mthca_srq *srq)
+ struct ib_srq_attr *attr, struct mthca_srq *srq,
+ struct ib_udata *udata)
{
struct mthca_mailbox *mailbox;
int ds;
@@ -261,9 +270,9 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
mutex_init(&srq->mutex);
if (mthca_is_memfree(dev))
- mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
+ mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata);
else
- mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
+ mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata);
err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
@@ -734,8 +734,8 @@ static int nes_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- if (rdma_is_user_pd(ibpd) && (ibpd->uobject->context)) {
- nesucontext = to_nesucontext(ibpd->uobject->context);
+ if (rdma_is_user_pd(ibpd) && _rdma_udata_context(udata, false)) {
+ nesucontext = to_nesucontext(rdma_udata_context(udata));
nes_debug(NES_DBG_PD, "Clearing bit %u from allocated doorbells\n",
nespd->mmap_db_index);
clear_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
@@ -1068,9 +1068,11 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
if (req.user_qp_buffer)
nesqp->nesuqp_addr = req.user_qp_buffer;
if (rdma_is_user_pd(ibpd) &&
- (ibpd->uobject->context)) {
+ _rdma_udata_context(udata, false)) {
nesqp->user_mode = 1;
- nes_ucontext = to_nesucontext(ibpd->uobject->context);
+ nes_ucontext =
+ to_nesucontext(
+ rdma_udata_context(udata));
if (virt_wqs) {
err = 1;
list_for_each_entry(nespbl, &nes_ucontext->qp_reg_mem_list, list) {
@@ -1091,7 +1093,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
}
}
- nes_ucontext = to_nesucontext(ibpd->uobject->context);
+ nes_ucontext =
+ to_nesucontext(
+ rdma_udata_context(udata));
nesqp->mmap_sq_db_index =
find_next_zero_bit(nes_ucontext->allocated_wqs,
NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq);
@@ -2136,7 +2140,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u8 stag_key;
int first_page = 1;
- region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ region = ib_umem_get(rdma_udata_context(udata), start, length, acc, 0);
if (IS_ERR(region)) {
return (struct ib_mr *)region;
}
@@ -2385,7 +2389,8 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-ENOMEM);
}
nesmr->region = region;
- nes_ucontext = to_nesucontext(pd->uobject->context);
+ nes_ucontext = to_nesucontext(
+ rdma_udata_context(udata));
pbl_depth = region->length >> 12;
pbl_depth += (region->length & (4096-1)) ? 1 : 0;
nespbl->pbl_size = pbl_depth*sizeof(u64);
@@ -928,7 +928,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ mr->umem = ib_umem_get(rdma_udata_context(udata), start, len, acc, 0);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
@@ -1470,8 +1470,8 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
hw_srq->max_wr = init_attr->attr.max_wr;
hw_srq->max_sges = init_attr->attr.max_sge;
- if (udata && ibpd->uobject && ibpd->uobject->context) {
- ib_ctx = ibpd->uobject->context;
+ if (udata && _rdma_udata_context(udata, false)) {
+ ib_ctx = rdma_udata_context(udata);
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
DP_ERR(dev,
@@ -1715,7 +1715,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
int rc = -EINVAL;
- ib_ctx = ibpd->uobject->context;
+ ib_ctx = rdma_udata_context(udata);
memset(&ureq, 0, sizeof(ureq));
rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
@@ -2730,7 +2730,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER;
- mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ mr->umem = ib_umem_get(rdma_udata_context(udata), start, len, acc, 0);
if (IS_ERR(mr->umem)) {
rc = -EFAULT;
goto err0;
@@ -501,7 +501,7 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
usnic_dbg("\n");
- ucontext = to_uucontext(pd->uobject->context);
+ ucontext = to_uucontext(rdma_udata_context(udata));
us_ibdev = to_usdev(pd->device);
if (init_attr->create_flags)
@@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL);
}
- umem = ib_umem_get(pd->uobject->context, start,
+ umem = ib_umem_get(rdma_udata_context(udata), start,
length, access_flags, 0);
if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev,
@@ -262,9 +262,10 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */
- qp->rumem = ib_umem_get(pd->uobject->context,
- ucmd.rbuf_addr,
- ucmd.rbuf_size, 0, 0);
+ qp->rumem = ib_umem_get(
+ rdma_udata_context(udata),
+ ucmd.rbuf_addr,
+ ucmd.rbuf_size, 0, 0);
if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem);
goto err_qp;
@@ -275,7 +276,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->srq = to_vsrq(init_attr->srq);
}
- qp->sumem = ib_umem_get(pd->uobject->context,
+ qp->sumem = ib_umem_get(rdma_udata_context(udata),
ucmd.sbuf_addr,
ucmd.sbuf_size, 0, 0);
if (IS_ERR(qp->sumem)) {
@@ -153,7 +153,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
goto err_srq;
}
- srq->umem = ib_umem_get(pd->uobject->context,
+ srq->umem = ib_umem_get(rdma_udata_context(udata),
ucmd.buf_addr,
ucmd.buf_size, 0, 0);
if (IS_ERR(srq->umem)) {
@@ -388,7 +388,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (length == 0)
return ERR_PTR(-EINVAL);
- umem = ib_umem_get(pd->uobject->context, start, length,
+ umem = ib_umem_get(rdma_udata_context(udata), start, length,
mr_access_flags, 0);
if (IS_ERR(umem))
return (void *)umem;
@@ -1120,9 +1120,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
} else {
u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
- qp->ip = rvt_create_mmap_info(rdi, s,
- ibpd->uobject->context,
- qp->r_rq.wq);
+ qp->ip = rvt_create_mmap_info(
+ rdi, s,
+ rdma_udata_context(udata),
+ qp->r_rq.wq);
if (!qp->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_qpn;
@@ -119,7 +119,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
srq->ip =
- rvt_create_mmap_info(dev, s, ibpd->uobject->context,
+ rvt_create_mmap_info(dev, s, rdma_udata_context(udata),
srq->rq.wq);
if (!srq->ip) {
ret = ERR_PTR(-ENOMEM);
@@ -157,7 +157,8 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct ib_qp_init_attr *init,
struct rxe_create_qp_resp __user *uresp,
- struct ib_pd *ibpd);
+ struct ib_pd *ibpd,
+ struct ib_udata *udata);
int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
@@ -171,7 +171,8 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr;
int err;
- umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0);
+ umem = ib_umem_get(rdma_udata_context(udata), start, length, access,
+ 0);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
@@ -336,14 +336,15 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct ib_qp_init_attr *init,
struct rxe_create_qp_resp __user *uresp,
- struct ib_pd *ibpd)
+ struct ib_pd *ibpd,
+ struct ib_udata *udata)
{
int err;
struct rxe_cq *rcq = to_rcq(init->recv_cq);
struct rxe_cq *scq = to_rcq(init->send_cq);
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
struct ib_ucontext *context = rdma_is_user_pd(ibpd) ?
- ibpd->uobject->context : NULL;
+ rdma_udata_context(udata) : NULL;
rxe_add_ref(pd);
rxe_add_ref(rcq);
@@ -342,7 +342,7 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_srq *srq;
- struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
+ struct ib_ucontext *context = udata ? rdma_udata_context(udata) : NULL;
struct rxe_create_srq_resp __user *uresp = NULL;
if (udata) {
@@ -498,7 +498,7 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
rxe_add_index(qp);
- err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
+ err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
if (err)
goto err3;
Prepare the code for shared ib_x model. Future patch will remove the ucontext information from ib_pd. Prior patches added ucontext to ib_udata and used udata to convey ucontext to core, sw and driver layers. Stop using ucontext from ib_pd. Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com> --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 8 +- drivers/infiniband/hw/cxgb3/iwch_provider.c | 5 +- drivers/infiniband/hw/cxgb4/mem.c | 3 +- drivers/infiniband/hw/cxgb4/qp.c | 4 +- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 9 ++- drivers/infiniband/hw/mlx4/mr.c | 2 +- drivers/infiniband/hw/mlx4/qp.c | 18 +++-- drivers/infiniband/hw/mlx4/srq.c | 11 ++- drivers/infiniband/hw/mlx5/mlx5_ib.h | 3 +- drivers/infiniband/hw/mlx5/mr.c | 11 +-- drivers/infiniband/hw/mlx5/odp.c | 5 +- drivers/infiniband/hw/mlx5/qp.c | 80 ++++++++++--------- drivers/infiniband/hw/mlx5/srq.c | 19 +++-- drivers/infiniband/hw/mthca/mthca_dev.h | 3 +- drivers/infiniband/hw/mthca/mthca_provider.c | 14 ++-- drivers/infiniband/hw/mthca/mthca_srq.c | 39 +++++---- drivers/infiniband/hw/nes/nes_verbs.c | 19 +++-- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 2 +- drivers/infiniband/hw/qedr/verbs.c | 8 +- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 9 ++- drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | 2 +- drivers/infiniband/sw/rdmavt/mr.c | 2 +- drivers/infiniband/sw/rdmavt/qp.c | 7 +- drivers/infiniband/sw/rdmavt/srq.c | 2 +- drivers/infiniband/sw/rxe/rxe_loc.h | 3 +- drivers/infiniband/sw/rxe/rxe_mr.c | 3 +- drivers/infiniband/sw/rxe/rxe_qp.c | 5 +- drivers/infiniband/sw/rxe/rxe_verbs.c | 4 +- 30 files changed, 174 insertions(+), 130 deletions(-)