@@ -77,15 +77,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
- * @dmasync: flush in-flight DMA when the memory region is written
*/
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
- size_t size, int access, int dmasync)
+ size_t size, int access)
{
- unsigned long dma_attrs = 0;
- if (dmasync)
- dma_attrs |= DMA_ATTR_WRITE_BARRIER;
- return ib_umem_get_attrs(context, addr, size, access, DMA_BIDIRECTIONAL, dma_attrs);
+ return ib_umem_get_attrs(context, addr, size, access, DMA_BIDIRECTIONAL, 0);
}
EXPORT_SYMBOL(ib_umem_get);
@@ -576,7 +576,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc);
if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem);
kfree(mhp);
@@ -509,7 +509,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc);
if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem);
kfree_skb(mhp->dereg_skb);
@@ -1703,7 +1703,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
- region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ region = ib_umem_get(pd->uobject->context, start, length, acc);
if (IS_ERR(region))
return (struct ib_mr *)region;
@@ -141,8 +141,8 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
int err;
int cqe_size = dev->dev->caps.cqe_size;
- *umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ *umem = ib_umem_get_attrs(context, buf_addr, cqe * cqe_size,
+ IB_ACCESS_LOCAL_WRITE, DMA_BIDIRECTIONAL, DMA_ATTR_WRITE_BARRIER);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
@@ -62,7 +62,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
- PAGE_SIZE, 0, 0);
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
@@ -148,7 +148,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
/* Force registering the memory as writable. */
/* Used for memory re-registeration. HCA protects the access */
mr->umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags | IB_ACCESS_LOCAL_WRITE, 0);
+ access_flags | IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@@ -230,8 +230,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
ib_umem_release(mmr->umem);
mmr->umem = ib_umem_get(mr->uobject->context, start, length,
mr_access_flags |
- IB_ACCESS_LOCAL_WRITE,
- 0);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
@@ -742,7 +742,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- qp->buf_size, 0, 0);
+ qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -115,7 +115,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
}
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- buf_size, 0, 0);
+ buf_size, 0);
if (IS_ERR(srq->umem)) {
err = PTR_ERR(srq->umem);
goto err_srq;
@@ -776,9 +776,11 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*cqe_size = ucmd.cqe_size;
- cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
- entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ cq->buf.umem = ib_umem_get_attrs(context, ucmd.buf_addr,
+ entries * ucmd.cqe_size,
+ IB_ACCESS_LOCAL_WRITE,
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_WRITE_BARRIER);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
@@ -1137,8 +1139,8 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL;
- umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ umem = ib_umem_get_attrs(context, ucmd.buf_addr, entries * ucmd.cqe_size,
+ IB_ACCESS_LOCAL_WRITE, DMA_BIDIRECTIONAL, DMA_ATTR_WRITE_BARRIER);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
return err;
@@ -64,7 +64,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
- PAGE_SIZE, 0, 0);
+ PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
@@ -822,7 +822,7 @@ static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags, 0);
+ access_flags);
if (IS_ERR(umem)) {
mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
return (void *)umem;
@@ -632,7 +632,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
{
int err;
- *umem = ib_umem_get(pd->uobject->context, addr, size, 0, 0);
+ *umem = ib_umem_get(pd->uobject->context, addr, size, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
@@ -684,7 +684,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
context = to_mucontext(pd->uobject->context);
rwq->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr,
- rwq->buf_size, 0, 0);
+ rwq->buf_size, 0);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem);
@@ -112,7 +112,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
- 0, 0);
+ 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
@@ -910,6 +910,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int i, k, entry;
int err = 0;
int write_mtt_size;
+ unsigned long dma_attrs = 0;
if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) {
if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
@@ -926,8 +927,11 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
- ucmd.mr_attrs & MTHCA_MR_DMASYNC);
+ if (ucmd.mr_attrs & MTHCA_MR_DMASYNC)
+ dma_attrs |= DMA_ATTR_WRITE_BARRIER;
+
+ mr->umem = ib_umem_get_attrs(pd->uobject->context, start, length, acc,
+ DMA_BIDIRECTIONAL, dma_attrs);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
@@ -2170,7 +2170,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u8 stag_key;
int first_page = 1;
- region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ region = ib_umem_get(pd->uobject->context, start, length, acc);
if (IS_ERR(region)) {
return (struct ib_mr *)region;
}
@@ -967,7 +967,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
@@ -369,7 +369,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL);
umem = ib_umem_get(pd->uobject->context, start, length,
- mr_access_flags, 0);
+ mr_access_flags);
if (IS_ERR(umem))
return (void *)umem;
@@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
void *vaddr;
int err;
- umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0);
+ umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
@@ -84,7 +84,7 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
- size_t size, int access, int dmasync);
+ size_t size, int access);
struct ib_umem *ib_umem_get_attrs(struct ib_ucontext *context, unsigned long addr,
size_t size, int access,
enum dma_data_direction dir,
@@ -100,7 +100,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
unsigned long addr, size_t size,
- int access, int dmasync) {
+ int access) {
return ERR_PTR(-EINVAL);
}
static inline struct ib_umem *ib_umem_get_attrs(struct ib_ucontext *context,
Instead of having most drivers provide a specialized and weakly typed "boolean" argument dmasync = 0 argument which are only set to nonzero 3 places, instead let these special use cases use the more generic ib_umem_get_attrs which gives access to the full range of dma attributes and also dma direction, and eliminate the dmasync parameter from the ib_umem_get call, Signed-off-by: Knut Omang <knut.omang@oracle.com> --- drivers/infiniband/core/umem.c | 8 ++------ drivers/infiniband/hw/cxgb3/iwch_provider.c | 2 +- drivers/infiniband/hw/cxgb4/mem.c | 2 +- drivers/infiniband/hw/i40iw/i40iw_verbs.c | 2 +- drivers/infiniband/hw/mlx4/cq.c | 4 ++-- drivers/infiniband/hw/mlx4/doorbell.c | 2 +- drivers/infiniband/hw/mlx4/mr.c | 5 ++--- drivers/infiniband/hw/mlx4/qp.c | 2 +- drivers/infiniband/hw/mlx4/srq.c | 2 +- drivers/infiniband/hw/mlx5/cq.c | 12 +++++++----- drivers/infiniband/hw/mlx5/doorbell.c | 2 +- drivers/infiniband/hw/mlx5/mr.c | 2 +- drivers/infiniband/hw/mlx5/qp.c | 4 ++-- drivers/infiniband/hw/mlx5/srq.c | 2 +- drivers/infiniband/hw/mthca/mthca_provider.c | 8 ++++++-- drivers/infiniband/hw/nes/nes_verbs.c | 2 +- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 2 +- drivers/infiniband/sw/rdmavt/mr.c | 2 +- drivers/infiniband/sw/rxe/rxe_mr.c | 2 +- include/rdma/ib_umem.h | 4 ++-- 20 files changed, 36 insertions(+), 35 deletions(-)