diff mbox series

[for-next,1/1] RDMA/bnxt_re: Add support for dmabuf pinned memory regions

Message ID 1690468194-6185-2-git-send-email-selvin.xavier@broadcom.com (mailing list archive)
State Superseded
Headers show
Series RDMA/bnxt_re: Add dmabuf support | expand

Commit Message

Selvin Xavier July 27, 2023, 2:29 p.m. UTC
From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>

Support the new verb which indicates dmabuf support.
bnxt doesn't support ODP. So use the pinned version of the
dmabuf APIs to enable bnxt_re devices to work as dmabuf importer.

Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
---
 drivers/infiniband/hw/bnxt_re/ib_verbs.c | 48 ++++++++++++++++++++++++++------
 drivers/infiniband/hw/bnxt_re/ib_verbs.h |  4 +++
 drivers/infiniband/hw/bnxt_re/main.c     |  1 +
 3 files changed, 44 insertions(+), 9 deletions(-)

Comments

Jason Gunthorpe July 27, 2023, 2:44 p.m. UTC | #1
On Thu, Jul 27, 2023 at 07:29:54AM -0700, Selvin Xavier wrote:
> From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
> 
> Support the new verb which indicates dmabuf support.
> bnxt doesn't support ODP. So use the pinned version of the
> dmabuf APIs to enable bnxt_re devices to work as dmabuf importer.
> 
> Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
> Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
> ---
>  drivers/infiniband/hw/bnxt_re/ib_verbs.c | 48 ++++++++++++++++++++++++++------
>  drivers/infiniband/hw/bnxt_re/ib_verbs.h |  4 +++
>  drivers/infiniband/hw/bnxt_re/main.c     |  1 +
>  3 files changed, 44 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> index 2b2505a..3c3459d 100644
> --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> @@ -3981,17 +3981,19 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
>  	return rc;
>  }
>  
> -/* uverbs */
> -struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
> -				  u64 virt_addr, int mr_access_flags,
> -				  struct ib_udata *udata)
> +static struct ib_mr *__bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start,
> +					   u64 length, u64 virt_addr, int fd,
> +					   int mr_access_flags,
> +					   struct ib_udata *udata,
> +					   bool dmabuf)
>  {
>  	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
>  	struct bnxt_re_dev *rdev = pd->rdev;
> +	struct ib_umem_dmabuf *umem_dmabuf;
> +	unsigned long page_size;
>  	struct bnxt_re_mr *mr;
>  	struct ib_umem *umem;
> -	unsigned long page_size;
> -	int umem_pgs, rc;
> +	int umem_pgs, rc = 0;
>  	u32 active_mrs;
>  
>  	if (length > BNXT_RE_MAX_MR_SIZE) {
> @@ -4017,9 +4019,21 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
>  	/* The fixed portion of the rkey is the same as the lkey */
>  	mr->ib_mr.rkey = mr->qplib_mr.rkey;
>  
> -	umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
> -	if (IS_ERR(umem)) {
> -		ibdev_err(&rdev->ibdev, "Failed to get umem");
> +	if (!dmabuf) {
> +		umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
> +		if (IS_ERR(umem))
> +			rc = PTR_ERR(umem);
> +	} else {
> +		umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
> +							fd, mr_access_flags);
> +		if (IS_ERR(umem_dmabuf))
> +			rc = PTR_ERR(umem_dmabuf);
> +		else
> +			umem = &umem_dmabuf->umem;
> +	}

This is pretty ugly, why can't you pass in the umem from the two stubs:

> +struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
> +				  u64 virt_addr, int mr_access_flags,
> +				  struct ib_udata *udata)
> +{
> +	return __bnxt_re_reg_user_mr(ib_pd, start, length, virt_addr, 0,
> +				     mr_access_flags, udata, false);
> +}
> +
> +struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
> +					 u64 length, u64 virt_addr, int fd,
> +					 int mr_access_flags, struct ib_udata *udata)
> +{
> +	return __bnxt_re_reg_user_mr(ib_pd, start, length, virt_addr, fd,
> +				     mr_access_flags, udata, true);
> +}

?

Jason
Selvin Xavier July 27, 2023, 2:55 p.m. UTC | #2
On Thu, Jul 27, 2023 at 8:14 PM Jason Gunthorpe <jgg@ziepe.ca> wrote:
>
> On Thu, Jul 27, 2023 at 07:29:54AM -0700, Selvin Xavier wrote:
> > From: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
> >
> > Support the new verb which indicates dmabuf support.
> > bnxt doesn't support ODP. So use the pinned version of the
> > dmabuf APIs to enable bnxt_re devices to work as dmabuf importer.
> >
> > Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
> > Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
> > ---
> >  drivers/infiniband/hw/bnxt_re/ib_verbs.c | 48 ++++++++++++++++++++++++++------
> >  drivers/infiniband/hw/bnxt_re/ib_verbs.h |  4 +++
> >  drivers/infiniband/hw/bnxt_re/main.c     |  1 +
> >  3 files changed, 44 insertions(+), 9 deletions(-)
> >
> > diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> > index 2b2505a..3c3459d 100644
> > --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> > +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> > @@ -3981,17 +3981,19 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
> >       return rc;
> >  }
> >
> > -/* uverbs */
> > -struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
> > -                               u64 virt_addr, int mr_access_flags,
> > -                               struct ib_udata *udata)
> > +static struct ib_mr *__bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start,
> > +                                        u64 length, u64 virt_addr, int fd,
> > +                                        int mr_access_flags,
> > +                                        struct ib_udata *udata,
> > +                                        bool dmabuf)
> >  {
> >       struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
> >       struct bnxt_re_dev *rdev = pd->rdev;
> > +     struct ib_umem_dmabuf *umem_dmabuf;
> > +     unsigned long page_size;
> >       struct bnxt_re_mr *mr;
> >       struct ib_umem *umem;
> > -     unsigned long page_size;
> > -     int umem_pgs, rc;
> > +     int umem_pgs, rc = 0;
> >       u32 active_mrs;
> >
> >       if (length > BNXT_RE_MAX_MR_SIZE) {
> > @@ -4017,9 +4019,21 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
> >       /* The fixed portion of the rkey is the same as the lkey */
> >       mr->ib_mr.rkey = mr->qplib_mr.rkey;
> >
> > -     umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
> > -     if (IS_ERR(umem)) {
> > -             ibdev_err(&rdev->ibdev, "Failed to get umem");
> > +     if (!dmabuf) {
> > +             umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
> > +             if (IS_ERR(umem))
> > +                     rc = PTR_ERR(umem);
> > +     } else {
> > +             umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
> > +                                                     fd, mr_access_flags);
> > +             if (IS_ERR(umem_dmabuf))
> > +                     rc = PTR_ERR(umem_dmabuf);
> > +             else
> > +                     umem = &umem_dmabuf->umem;
> > +     }
>
> This is pretty ugly, why can't you pass in the umem from the two stubs:
My intention was to re-use the function  bnxt_re_reg_user_mr without
much change.
I got your point. let me split the support function and post a v2.
>
> > +struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
> > +                               u64 virt_addr, int mr_access_flags,
> > +                               struct ib_udata *udata)
> > +{
> > +     return __bnxt_re_reg_user_mr(ib_pd, start, length, virt_addr, 0,
> > +                                  mr_access_flags, udata, false);
> > +}
> > +
> > +struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
> > +                                      u64 length, u64 virt_addr, int fd,
> > +                                      int mr_access_flags, struct ib_udata *udata)
> > +{
> > +     return __bnxt_re_reg_user_mr(ib_pd, start, length, virt_addr, fd,
> > +                                  mr_access_flags, udata, true);
> > +}
>
> ?
>
> Jason
>
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 2b2505a..3c3459d 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -3981,17 +3981,19 @@  int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
 	return rc;
 }
 
-/* uverbs */
-struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
-				  u64 virt_addr, int mr_access_flags,
-				  struct ib_udata *udata)
+static struct ib_mr *__bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start,
+					   u64 length, u64 virt_addr, int fd,
+					   int mr_access_flags,
+					   struct ib_udata *udata,
+					   bool dmabuf)
 {
 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
 	struct bnxt_re_dev *rdev = pd->rdev;
+	struct ib_umem_dmabuf *umem_dmabuf;
+	unsigned long page_size;
 	struct bnxt_re_mr *mr;
 	struct ib_umem *umem;
-	unsigned long page_size;
-	int umem_pgs, rc;
+	int umem_pgs, rc = 0;
 	u32 active_mrs;
 
 	if (length > BNXT_RE_MAX_MR_SIZE) {
@@ -4017,9 +4019,21 @@  struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
 	/* The fixed portion of the rkey is the same as the lkey */
 	mr->ib_mr.rkey = mr->qplib_mr.rkey;
 
-	umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
-	if (IS_ERR(umem)) {
-		ibdev_err(&rdev->ibdev, "Failed to get umem");
+	if (!dmabuf) {
+		umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
+		if (IS_ERR(umem))
+			rc = PTR_ERR(umem);
+	} else {
+		umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
+							fd, mr_access_flags);
+		if (IS_ERR(umem_dmabuf))
+			rc = PTR_ERR(umem_dmabuf);
+		else
+			umem = &umem_dmabuf->umem;
+	}
+	if (rc) {
+		ibdev_err(&rdev->ibdev, "Failed to get umem dmabuf = %s",
+			  dmabuf ? "true" : "false");
 		rc = -EFAULT;
 		goto free_mrw;
 	}
@@ -4059,6 +4073,22 @@  struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
 	return ERR_PTR(rc);
 }
 
+struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
+				  u64 virt_addr, int mr_access_flags,
+				  struct ib_udata *udata)
+{
+	return __bnxt_re_reg_user_mr(ib_pd, start, length, virt_addr, 0,
+				     mr_access_flags, udata, false);
+}
+
+struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
+					 u64 length, u64 virt_addr, int fd,
+					 int mr_access_flags, struct ib_udata *udata)
+{
+	return __bnxt_re_reg_user_mr(ib_pd, start, length, virt_addr, fd,
+				     mr_access_flags, udata, true);
+}
+
 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
 {
 	struct ib_device *ibdev = ctx->device;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index f392a09..84715b7 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -229,6 +229,10 @@  int bnxt_re_dealloc_mw(struct ib_mw *mw);
 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 				  u64 virt_addr, int mr_access_flags,
 				  struct ib_udata *udata);
+struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
+					 u64 length, u64 virt_addr,
+					 int fd, int mr_access_flags,
+					 struct ib_udata *udata);
 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
 void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
 int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 87960ac..c467415 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -862,6 +862,7 @@  static const struct ib_device_ops bnxt_re_dev_ops = {
 	.query_qp = bnxt_re_query_qp,
 	.query_srq = bnxt_re_query_srq,
 	.reg_user_mr = bnxt_re_reg_user_mr,
+	.reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
 	.req_notify_cq = bnxt_re_req_notify_cq,
 	.resize_cq = bnxt_re_resize_cq,
 	INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),