@@ -698,7 +698,7 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
ah->qplib_ah.flow_label = grh->flow_label;
ah->qplib_ah.hop_limit = grh->hop_limit;
ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
- if (ib_pd->uobject &&
+ if (rdma_is_user_pd(ib_pd) &&
!rdma_is_multicast_addr((struct in6_addr *)
grh->dgid.raw) &&
!rdma_link_local_addr((struct in6_addr *)
@@ -729,7 +729,7 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
}
/* Write AVID to shared page. */
- if (ib_pd->uobject) {
+ if (rdma_is_user_pd(ib_pd)) {
struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
struct bnxt_re_ucontext *uctx;
unsigned long flag;
@@ -4003,7 +4003,8 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
int ret;
- ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
+ ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp,
+ rdma_is_user_pd(&ibqp->pd));
if (ret) {
dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
return ret;
@@ -562,7 +562,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
else
hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
- ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
+ ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap,
+ rdma_is_user_pd(ib_pd),
!!init_attr->srq, hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_rq_size failed\n");
@@ -599,7 +600,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
init_attr->cap.max_recv_sge];
}
- if (ib_pd->uobject) {
+ if (rdma_is_user_pd(ib_pd)) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "ib_copy_from_udata error for create qp\n");
ret = -EFAULT;
@@ -784,7 +785,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
else
hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
- if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
+ if (rdma_is_user_pd(ib_pd) && (udata->outlen >= sizeof(resp)) &&
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
/* indicate kernel supports rq record db */
@@ -811,7 +812,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_release_range_qp(hr_dev, qpn, 1);
err_wrid:
- if (ib_pd->uobject) {
+ if (rdma_is_user_pd(ib_pd)) {
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp)) &&
hns_roce_qp_has_rq(init_attr))
@@ -824,7 +825,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
err_sq_dbmap:
- if (ib_pd->uobject)
+ if (rdma_is_user_pd(ib_pd))
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
(udata->inlen >= sizeof(ucmd)) &&
(udata->outlen >= sizeof(resp)) &&
@@ -837,13 +838,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
err_buf:
- if (ib_pd->uobject)
+ if (rdma_is_user_pd(ib_pd))
ib_umem_release(hr_qp->umem);
else
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
err_db:
- if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) &&
+ if (!rdma_is_user_pd(ib_pd) && hns_roce_qp_has_rq(init_attr) &&
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
hns_roce_free_db(hr_dev, &hr_qp->rdb);
@@ -889,7 +890,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
}
case IB_QPT_GSI: {
/* Userspace is not allowed to create special QPs: */
- if (pd->uobject) {
+ if (rdma_is_user_pd(pd)) {
dev_err(dev, "not support usr space GSI\n");
return ERR_PTR(-EINVAL);
}
@@ -2090,7 +2090,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
ib_umem_release(iwmr->region);
if (iwmr->type != IW_MEMREG_TYPE_MEM) {
- if (ibpd->uobject) {
+ if (rdma_is_user_pd(ibpd)) {
struct i40iw_ucontext *ucontext;
ucontext = to_ucontext(ibpd->uobject->context);
@@ -942,7 +942,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
- if (pd->uobject) {
+ if (rdma_is_user_pd(pd)) {
union {
struct mlx4_ib_create_qp qp;
struct mlx4_ib_create_wq wq;
@@ -991,7 +991,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
qp->flags |= MLX4_IB_QP_SCATTER_FCS;
}
- err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
+ err = set_rq_size(dev, &init_attr->cap, rdma_is_user_pd(pd),
qp_has_rq(init_attr), qp, qp->inl_recv_sz);
if (err)
goto err;
@@ -1043,7 +1043,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
} else {
- err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
+ err = set_rq_size(dev, &init_attr->cap, rdma_is_user_pd(pd),
qp_has_rq(init_attr), qp, 0);
if (err)
goto err;
@@ -1201,13 +1201,13 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
err_buf:
- if (pd->uobject)
+ if (rdma_is_user_pd(pd))
ib_umem_release(qp->umem);
else
mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
err_db:
- if (!pd->uobject && qp_has_rq(init_attr))
+ if (rdma_is_user_pd(pd) && qp_has_rq(init_attr))
mlx4_db_free(dev->dev, &qp->db);
err:
@@ -1612,7 +1612,8 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
struct mlx4_ib_pd *pd;
pd = get_pd(mqp);
- destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, !!pd->ibpd.uobject);
+ destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC,
+ rdma_is_user_pd(&pd->ibpd));
}
if (is_sqp(dev, mqp))
@@ -105,7 +105,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
buf_size = srq->msrq.max * desc_size;
- if (pd->uobject) {
+ if (rdma_is_user_pd(pd)) {
struct mlx4_ib_create_srq ucmd;
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
@@ -191,7 +191,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
srq->msrq.event = mlx4_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
- if (pd->uobject)
+ if (rdma_is_user_pd(pd))
if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
err = -EFAULT;
goto err_wrid;
@@ -202,7 +202,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
return &srq->ibsrq;
err_wrid:
- if (pd->uobject)
+ if (rdma_is_user_pd(pd))
mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
else
kvfree(srq->wrid);
@@ -211,13 +211,13 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
mlx4_mtt_cleanup(dev->dev, &srq->mtt);
err_buf:
- if (pd->uobject)
+ if (rdma_is_user_pd(pd))
ib_umem_release(srq->umem);
else
mlx4_buf_free(dev->dev, buf_size, &srq->buf);
err_db:
- if (!pd->uobject)
+ if (!rdma_is_user_pd(pd))
mlx4_db_free(dev->dev, &srq->db);
err_srq:
@@ -1843,7 +1843,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
if (pd) {
- if (pd->uobject) {
+ if (rdma_is_user_pd(pd)) {
__u32 max_wqes =
1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
@@ -2363,7 +2363,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
dev = to_mdev(pd->device);
if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
- if (!pd->uobject) {
+ if (!rdma_is_user_pd(pd)) {
mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
return ERR_PTR(-EINVAL);
} else if (!to_mucontext(pd->uobject->context)->cqe_version) {
@@ -287,14 +287,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
}
in.type = init_attr->srq_type;
- if (pd->uobject)
+ if (rdma_is_user_pd(pd))
err = create_srq_user(pd, srq, &in, udata, buf_size);
else
err = create_srq_kernel(dev, srq, &in, buf_size);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
- pd->uobject ? "user" : "kernel", err);
+ rdma_is_user_pd(pd) ? "user" : "kernel", err);
goto err_srq;
}
@@ -339,7 +339,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
srq->msrq.event = mlx5_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
- if (pd->uobject)
+ if (rdma_is_user_pd(pd))
if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
mlx5_ib_dbg(dev, "copy to user failed\n");
err = -EFAULT;
@@ -354,7 +354,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
err_usr_kern_srq:
- if (pd->uobject)
+ if (rdma_is_user_pd(pd))
destroy_srq_user(pd, srq);
else
destroy_srq_kernel(dev, srq);
@@ -455,7 +455,7 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
if (!srq)
return ERR_PTR(-ENOMEM);
- if (pd->uobject) {
+ if (rdma_is_user_pd(pd)) {
context = to_mucontext(pd->uobject->context);
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
@@ -477,7 +477,7 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
&init_attr->attr, srq);
- if (err && pd->uobject)
+ if (err && rdma_is_user_pd(pd))
mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
context->db_tab, ucmd.db_index);
@@ -537,7 +537,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
if (!qp)
return ERR_PTR(-ENOMEM);
- if (pd->uobject) {
+ if (rdma_is_user_pd(pd)) {
context = to_mucontext(pd->uobject->context);
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
@@ -576,7 +576,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
init_attr->qp_type, init_attr->sq_sig_type,
&init_attr->cap, qp);
- if (err && pd->uobject) {
+ if (err && rdma_is_user_pd(pd)) {
context = to_mucontext(pd->uobject->context);
mthca_unmap_user_db(to_mdev(pd->device),
@@ -596,7 +596,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
case IB_QPT_GSI:
{
/* Don't allow userspace to create special QPs */
- if (pd->uobject)
+ if (rdma_is_user_pd(pd))
return ERR_PTR(-EINVAL);
qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
@@ -951,7 +951,8 @@ static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int d
static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
{
/* We don't support inline data for kernel QPs (yet). */
- return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
+ return rdma_is_user_pd(&pd->ibpd) ?
+ max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
}
static void mthca_adjust_qp_caps(struct mthca_dev *dev,
@@ -1048,7 +1049,7 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
* allocate anything. All we need is to calculate the WQE
* sizes and the send_wqe_offset, so we're done now.
*/
- if (pd->ibpd.uobject)
+ if (rdma_is_user_pd(&pd->ibpd))
return 0;
size = PAGE_ALIGN(qp->send_wqe_offset +
@@ -1191,7 +1192,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
* will be allocated and buffers will be initialized in
* userspace.
*/
- if (pd->ibpd.uobject)
+ if (rdma_is_user_pd(&pd->ibpd))
return 0;
ret = mthca_alloc_memfree(dev, qp);
@@ -152,7 +152,7 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
int err;
int i;
- if (pd->ibpd.uobject)
+ if (rdma_is_user_pd(&pd->ibpd))
return 0;
srq->wrid = kmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
@@ -235,7 +235,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
if (err)
goto err_out;
- if (!pd->ibpd.uobject) {
+ if (!rdma_is_user_pd(&pd->ibpd)) {
srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
srq->srqn, &srq->db);
if (srq->db_index < 0) {
@@ -297,14 +297,14 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
err_out_free_buf:
- if (!pd->ibpd.uobject)
+ if (!rdma_is_user_pd(&pd->ibpd))
mthca_free_srq_buf(dev, srq);
err_out_mailbox:
mthca_free_mailbox(dev, mailbox);
err_out_db:
- if (!pd->ibpd.uobject && mthca_is_memfree(dev))
+ if (!rdma_is_user_pd(&pd->ibpd) && mthca_is_memfree(dev))
mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
err_out_icm:
@@ -733,7 +733,7 @@ static int nes_dealloc_pd(struct ib_pd *ibpd)
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- if ((ibpd->uobject) && (ibpd->uobject->context)) {
+ if (rdma_is_user_pd(ibpd) && (ibpd->uobject->context)) {
nesucontext = to_nesucontext(ibpd->uobject->context);
nes_debug(NES_DBG_PD, "Clearing bit %u from allocated doorbells\n",
nespd->mmap_db_index);
@@ -1066,7 +1066,8 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
}
if (req.user_qp_buffer)
nesqp->nesuqp_addr = req.user_qp_buffer;
- if ((ibpd->uobject) && (ibpd->uobject->context)) {
+ if (rdma_is_user_pd(ibpd) &&
+ (ibpd->uobject->context)) {
nesqp->user_mode = 1;
nes_ucontext = to_nesucontext(ibpd->uobject->context);
if (virt_wqs) {
@@ -1257,7 +1258,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
nes_put_cqp_request(nesdev, cqp_request);
- if (ibpd->uobject) {
+ if (rdma_is_user_pd(ibpd)) {
uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
uresp.mmap_rq_db_index = 0;
uresp.actual_sq_size = sq_size;
@@ -1217,7 +1217,7 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
return -EINVAL;
}
/* unprivileged user space cannot create special QP */
- if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+ if (rdma_is_user_pd(ibpd) && attrs->qp_type == IB_QPT_GSI) {
pr_err
("%s(%d) Userspace can't create special QPs of type=0x%x\n",
__func__, dev->id, attrs->qp_type);
@@ -1189,7 +1189,7 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
}
/* Unprivileged user space cannot create special QP */
- if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+ if (rdma_is_user_pd(ibpd) && attrs->qp_type == IB_QPT_GSI) {
DP_ERR(dev,
"create qp: userspace can't create special QPs of type=0x%x\n",
attrs->qp_type);
@@ -1552,7 +1552,7 @@ int qedr_destroy_srq(struct ib_srq *ibsrq)
in_params.srq_id = srq->srq_id;
dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
- if (ibsrq->pd->uobject)
+ if (rdma_is_user_pd(ibsrq->pd))
qedr_free_srq_user_params(srq);
else
qedr_free_srq_kernel_params(srq);
@@ -249,7 +249,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
init_completion(&qp->free);
qp->state = IB_QPS_RESET;
- qp->is_kernel = !(pd->uobject && udata);
+ qp->is_kernel = !(rdma_is_user_pd(pd) && udata);
if (!qp->is_kernel) {
dev_dbg(&dev->pdev->dev,
@@ -111,7 +111,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
unsigned long flags;
int ret;
- if (!(pd->uobject && udata)) {
+ if (!(rdma_is_user_pd(pd) && udata)) {
/* No support for kernel clients. */
dev_warn(&dev->pdev->dev,
"no shared receive queue support for kernel client\n");
@@ -342,7 +342,8 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct rxe_cq *rcq = to_rcq(init->recv_cq);
struct rxe_cq *scq = to_rcq(init->send_cq);
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
- struct ib_ucontext *context = ibpd->uobject ? ibpd->uobject->context : NULL;
+ struct ib_ucontext *context = rdma_is_user_pd(ibpd) ?
+ ibpd->uobject->context : NULL;
rxe_add_ref(pd);
rxe_add_ref(rcq);
Now we have the ability to tell from ib_pd if ib_pd was created by user/kernel verbs. Stop using the ib_pd->uobject pointer for this. This patch prepare the ib_pb uobject pointer removal that will happen in another patch. Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com> --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 4 ++-- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 ++- drivers/infiniband/hw/hns/hns_roce_qp.c | 17 +++++++++-------- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 2 +- drivers/infiniband/hw/mlx4/qp.c | 13 +++++++------ drivers/infiniband/hw/mlx4/srq.c | 10 +++++----- drivers/infiniband/hw/mlx5/qp.c | 4 ++-- drivers/infiniband/hw/mlx5/srq.c | 8 ++++---- drivers/infiniband/hw/mthca/mthca_provider.c | 10 +++++----- drivers/infiniband/hw/mthca/mthca_qp.c | 7 ++++--- drivers/infiniband/hw/mthca/mthca_srq.c | 8 ++++---- drivers/infiniband/hw/nes/nes_verbs.c | 7 ++++--- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 2 +- drivers/infiniband/hw/qedr/verbs.c | 4 ++-- drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | 2 +- drivers/infiniband/sw/rxe/rxe_qp.c | 3 ++- 17 files changed, 56 insertions(+), 50 deletions(-)