@@ -71,15 +71,21 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
*
* If access flags indicate ODP memory, avoid pinning. Instead, stores
* the mm for future page fault handling in conjunction with MMU notifiers.
+ * If the process doing the pinning is the same as the process that owns
+ * the memory being pinned, 'owner' should be NULL. Otherwise, 'owner' should
+ * be the process ID of the owning process. The process ID must be in the
+ * same PID namespace as the calling userspace context.
*
- * @context: userspace context to pin memory for
+ * @context: userspace context that is pinning the memory
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
* @dmasync: flush in-flight DMA when the memory region is written
+ * @owner: the ID of the process that owns the memory being pinned
*/
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
- size_t size, int access, int dmasync)
+ size_t size, int access, int dmasync,
+ struct pid *owner)
{
struct ib_umem *umem;
struct page **page_list;
@@ -94,6 +100,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
unsigned int gup_flags = FOLL_WRITE;
+ struct task_struct *owner_task = current;
if (dmasync)
dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -120,12 +127,18 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
return ERR_PTR(-ENOMEM);
}
+ if (owner) {
+ rcu_read_lock();
+ owner_task = pid_task(owner, PIDTYPE_PID);
+ rcu_read_unlock();
+ }
+
umem->context = context;
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
umem->writable = ib_access_writable(access);
- umem->owning_mm = mm = current->mm;
+ umem->owning_mm = mm = owner_task->mm;
mmgrab(mm);
if (access & IB_ACCESS_ON_DEMAND) {
@@ -183,10 +196,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
while (npages) {
down_read(&mm->mmap_sem);
- ret = get_user_pages_longterm(cur_base,
+ ret = get_user_pages_remote_longterm(owner_task,
+ mm, cur_base,
min_t(unsigned long, npages,
- PAGE_SIZE / sizeof (struct page *)),
- gup_flags, page_list, vma_list);
+ PAGE_SIZE / sizeof(struct page *)),
+ gup_flags, page_list, vma_list, NULL);
if (ret < 0) {
up_read(&mm->mmap_sem);
goto umem_release;
@@ -896,7 +896,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(context, ureq.qpsva, bytes,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE, 1, NULL);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -909,7 +909,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(context, ureq.qprva, bytes,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE, 1, NULL);
if (IS_ERR(umem))
goto rqfail;
qp->rumem = umem;
@@ -1371,7 +1371,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(context, ureq.srqva, bytes,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE, 1, NULL);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -2624,7 +2624,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
cq->umem = ib_umem_get(context, req.cq_va,
entries * sizeof(struct cq_base),
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE, 1, NULL);
if (IS_ERR(cq->umem)) {
rc = PTR_ERR(cq->umem);
goto fail;
@@ -3591,7 +3591,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
mr->ib_mr.rkey = mr->qplib_mr.rkey;
umem = ib_umem_get(ib_pd->uobject->context, start, length,
- mr_access_flags, 0);
+ mr_access_flags, 0, NULL);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
@@ -541,7 +541,8 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0,
+ owner);
if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem);
kfree(mhp);
@@ -538,7 +538,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0,
+ owner);
if (IS_ERR(mhp->umem))
goto err_free_skb;
@@ -224,7 +224,7 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
u32 npages;
*umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE, 1, NULL);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
@@ -29,7 +29,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
refcount_set(&page->refcount, 1);
page->user_virt = (virt & PAGE_MASK);
page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
- PAGE_SIZE, 0, 0);
+ PAGE_SIZE, 0, 0, NULL);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
@@ -1111,7 +1111,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-ENOMEM);
mr->umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags, 0);
+ access_flags, 0, owner);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
goto err_free;
@@ -1221,7 +1221,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
ib_umem_release(mr->umem);
mr->umem = ib_umem_get(ibmr->uobject->context, start, length,
- mr_access_flags, 0);
+ mr_access_flags, 0, NULL);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
@@ -614,7 +614,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
ucmd.buf_addr, hr_qp->buff_size, 0,
- 0);
+ 0, NULL);
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
@@ -253,7 +253,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
}
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- srq_buf_size, 0, 0);
+ srq_buf_size, 0, 0, NULL);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
@@ -1853,7 +1853,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
- region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ region = ib_umem_get(pd->uobject->context, start, length, acc, 0, NULL);
if (IS_ERR(region))
return (struct ib_mr *)region;
@@ -144,7 +144,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
int n;
*umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE, 1, NULL);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
@@ -62,7 +62,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
- PAGE_SIZE, 0, 0);
+ PAGE_SIZE, 0, 0, NULL);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
@@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
up_read(¤t->mm->mmap_sem);
}
- return ib_umem_get(context, start, length, access_flags, 0);
+ return ib_umem_get(context, start, length, access_flags, 0, NULL);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
@@ -1017,7 +1017,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
qp->umem = ib_umem_get(pd->uobject->context,
(src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr :
- ucmd.wq.buf_addr, qp->buf_size, 0, 0);
+ ucmd.wq.buf_addr, qp->buf_size, 0, 0, NULL);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -114,7 +114,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
}
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- buf_size, 0, 0);
+ buf_size, 0, 0, NULL);
if (IS_ERR(srq->umem)) {
err = PTR_ERR(srq->umem);
goto err_srq;
@@ -709,7 +709,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE, 1, NULL);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
@@ -1126,7 +1126,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
umem = ib_umem_get(context, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE, 1, NULL);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
return err;
@@ -1195,7 +1195,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err)
return err;
- obj->umem = ib_umem_get(ucontext, addr, size, access, 0);
+ obj->umem = ib_umem_get(ucontext, addr, size, access, 0, NULL);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
@@ -64,7 +64,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
- PAGE_SIZE, 0, 0);
+ PAGE_SIZE, 0, 0, NULL);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
@@ -849,7 +849,7 @@ static int mr_cache_max_order(struct mlx5_ib_dev *dev)
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
int access_flags, struct ib_umem **umem,
int *npages, int *page_shift, int *ncont,
- int *order)
+ int *order, struct pid *owner)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem *u;
@@ -857,7 +857,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
*umem = NULL;
- u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
+ u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0,
+ owner);
err = PTR_ERR_OR_ZERO(u);
if (err) {
mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
@@ -1328,8 +1329,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
return ERR_PTR(-EOPNOTSUPP);
- mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
- start, virt_addr, length, access_flags);
+ mlx5_ib_dbg(dev, "start=0x%llx, virt_addr=0x%llx, length=0x%llx, access_flags=0x%x owner=%i\n",
+ start, virt_addr, length, access_flags, pid_vnr(owner));
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (!start && length == U64_MAX) {
@@ -1337,7 +1338,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return ERR_PTR(-EINVAL);
- mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
+ mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags, owner);
if (IS_ERR(mr))
return ERR_CAST(mr);
return &mr->ibmr;
@@ -1345,7 +1346,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
#endif
err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
- &page_shift, &ncont, &order);
+ &page_shift, &ncont, &order, owner);
if (err < 0)
return ERR_PTR(err);
@@ -1496,7 +1497,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
ib_umem_release(mr->umem);
mr->umem = NULL;
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
- &npages, &page_shift, &ncont, &order);
+ &npages, &page_shift, &ncont, &order, NULL);
if (err)
goto err;
}
@@ -492,13 +492,14 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
}
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
- int access_flags)
+ int access_flags,
+ struct pid *owner)
{
struct ib_ucontext *ctx = pd->ibpd.uobject->context;
struct mlx5_ib_mr *imr;
struct ib_umem *umem;
- umem = ib_umem_get(ctx, 0, 0, IB_ACCESS_ON_DEMAND, 0);
+ umem = ib_umem_get(ctx, 0, 0, IB_ACCESS_ON_DEMAND, 0, owner);
if (IS_ERR(umem))
return ERR_CAST(umem);
@@ -654,7 +654,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
{
int err;
- *umem = ib_umem_get(pd->uobject->context, addr, size, 0, 0);
+ *umem = ib_umem_get(pd->uobject->context, addr, size, 0, 0, NULL);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
@@ -710,7 +710,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
context = to_mucontext(pd->uobject->context);
rwq->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr,
- rwq->buf_size, 0, 0);
+ rwq->buf_size, 0, 0, NULL);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem);
@@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
- 0, 0);
+ 0, 0, NULL);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
@@ -933,7 +933,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-ENOMEM);
mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
- ucmd.mr_attrs & MTHCA_MR_DMASYNC);
+ ucmd.mr_attrs & MTHCA_MR_DMASYNC, owner);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
@@ -2134,7 +2134,8 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u8 stag_key;
int first_page = 1;
- region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ region = ib_umem_get(pd->uobject->context, start, length, acc, 0,
+ owner);
if (IS_ERR(region)) {
return (struct ib_mr *)region;
}
@@ -917,7 +917,8 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0,
+ owner);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
@@ -748,7 +748,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
q->buf_addr = buf_addr;
q->buf_len = buf_len;
- q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
+ q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync,
+ NULL);
if (IS_ERR(q->umem)) {
DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
PTR_ERR(q->umem));
@@ -1359,7 +1360,7 @@ static int qedr_init_srq_user_params(struct ib_ucontext *ib_ctx,
srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
sizeof(struct rdma_srq_producers),
- access, dmasync);
+ access, dmasync, NULL);
if (IS_ERR(srq->prod_umem)) {
qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
ib_umem_release(srq->usrq.umem);
@@ -2719,7 +2720,8 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER;
- mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0,
+ NULL);
if (IS_ERR(mr->umem)) {
rc = -EFAULT;
goto err0;
@@ -142,7 +142,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
}
cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE, 1, NULL);
if (IS_ERR(cq->umem)) {
ret = PTR_ERR(cq->umem);
goto err_cq;
@@ -127,7 +127,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
umem = ib_umem_get(pd->uobject->context, start,
- length, access_flags, 0);
+ length, access_flags, 0, NULL);
if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev,
"could not get umem for mem region\n");
@@ -264,7 +264,8 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
/* set qp->sq.wqe_cnt, shift, buf_size.. */
qp->rumem = ib_umem_get(pd->uobject->context,
ucmd.rbuf_addr,
- ucmd.rbuf_size, 0, 0);
+ ucmd.rbuf_size, 0, 0,
+ NULL);
if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem);
goto err_qp;
@@ -277,7 +278,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->sumem = ib_umem_get(pd->uobject->context,
ucmd.sbuf_addr,
- ucmd.sbuf_size, 0, 0);
+ ucmd.sbuf_size, 0, 0, NULL);
if (IS_ERR(qp->sumem)) {
if (!is_srq)
ib_umem_release(qp->rumem);
@@ -155,7 +155,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
srq->umem = ib_umem_get(pd->uobject->context,
ucmd.buf_addr,
- ucmd.buf_size, 0, 0);
+ ucmd.buf_size, 0, 0, NULL);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
@@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL);
umem = ib_umem_get(pd->uobject->context, start, length,
- mr_access_flags, 0);
+ mr_access_flags, 0, NULL);
if (IS_ERR(umem))
return (void *)umem;
@@ -171,7 +171,8 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr;
int err;
- umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0);
+ umem = ib_umem_get(pd->ibpd.uobject->context, start, length,
+ access, 0, NULL);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
@@ -81,7 +81,8 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
- size_t size, int access, int dmasync);
+ size_t size, int access, int dmasync,
+ struct pid *owner);
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
ib_umem_get is a core function used by drivers that support RDMA. The 'owner' parameter signifies the process that owns the memory. Until now, it was assumed that the owning process was the current process. This adds the flexibility to specify a process other than the current process. All drivers that call this function are also updated, but the default behaviour is to keep backwards compatibility by assuming the current process is the owner when the 'owner' parameter is NULL. Signed-off-by: Joel Nider <joeln@il.ibm.com> --- drivers/infiniband/core/umem.c | 26 ++++++++++++++++++++------ drivers/infiniband/hw/bnxt_re/ib_verbs.c | 10 +++++----- drivers/infiniband/hw/cxgb3/iwch_provider.c | 3 ++- drivers/infiniband/hw/cxgb4/mem.c | 3 ++- drivers/infiniband/hw/hns/hns_roce_cq.c | 2 +- drivers/infiniband/hw/hns/hns_roce_db.c | 2 +- drivers/infiniband/hw/hns/hns_roce_mr.c | 4 ++-- drivers/infiniband/hw/hns/hns_roce_qp.c | 2 +- drivers/infiniband/hw/hns/hns_roce_srq.c | 2 +- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 2 +- drivers/infiniband/hw/mlx4/cq.c | 2 +- drivers/infiniband/hw/mlx4/doorbell.c | 2 +- drivers/infiniband/hw/mlx4/mr.c | 2 +- drivers/infiniband/hw/mlx4/qp.c | 2 +- drivers/infiniband/hw/mlx4/srq.c | 2 +- drivers/infiniband/hw/mlx5/cq.c | 4 ++-- drivers/infiniband/hw/mlx5/devx.c | 2 +- drivers/infiniband/hw/mlx5/doorbell.c | 2 +- drivers/infiniband/hw/mlx5/mr.c | 15 ++++++++------- drivers/infiniband/hw/mlx5/odp.c | 5 +++-- drivers/infiniband/hw/mlx5/qp.c | 4 ++-- drivers/infiniband/hw/mlx5/srq.c | 2 +- drivers/infiniband/hw/mthca/mthca_provider.c | 2 +- drivers/infiniband/hw/nes/nes_verbs.c | 3 ++- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 3 ++- drivers/infiniband/hw/qedr/verbs.c | 8 +++++--- drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c | 2 +- drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | 5 +++-- drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | 2 +- drivers/infiniband/sw/rdmavt/mr.c | 2 +- drivers/infiniband/sw/rxe/rxe_mr.c | 3 ++- include/rdma/ib_umem.h | 3 ++- 33 files changed, 80 insertions(+), 55 deletions(-)