diff mbox series

[for-next,1/6] RDMA/bnxt_re: Use the common mmap helper functions

Message ID 1681125115-7127-2-git-send-email-selvin.xavier@broadcom.com (mailing list archive)
State Superseded
Headers show
Series RDMA/bnxt_re: driver update for supporting low latency push | expand

Commit Message

Selvin Xavier April 10, 2023, 11:11 a.m. UTC
Replace the mmap handling function with common code in
IB core. Create rdma_user_mmap_entry for each mmap
resource and add to the ib_core mmap list. Add mmap_free
verb support. Also, use rdma_user_mmap_io while mapping
Doorbell pages.

Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
---
 drivers/infiniband/hw/bnxt_re/ib_verbs.c  | 110 ++++++++++++++++++++++++++----
 drivers/infiniband/hw/bnxt_re/ib_verbs.h  |  15 ++++
 drivers/infiniband/hw/bnxt_re/main.c      |   1 +
 drivers/infiniband/hw/bnxt_re/qplib_fp.c  |  10 +--
 drivers/infiniband/hw/bnxt_re/qplib_res.c |   2 +-
 5 files changed, 115 insertions(+), 23 deletions(-)

Comments

Leon Romanovsky April 10, 2023, 12:20 p.m. UTC | #1
On Mon, Apr 10, 2023 at 04:11:50AM -0700, Selvin Xavier wrote:
> Replace the mmap handling function with common code in
> IB core. Create rdma_user_mmap_entry for each mmap
> resource and add to the ib_core mmap list. Add mmap_free
> verb support. Also, use rdma_user_mmap_io while mapping
> Doorbell pages.
> 
> Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
> ---
>  drivers/infiniband/hw/bnxt_re/ib_verbs.c  | 110 ++++++++++++++++++++++++++----
>  drivers/infiniband/hw/bnxt_re/ib_verbs.h  |  15 ++++
>  drivers/infiniband/hw/bnxt_re/main.c      |   1 +
>  drivers/infiniband/hw/bnxt_re/qplib_fp.c  |  10 +--
>  drivers/infiniband/hw/bnxt_re/qplib_res.c |   2 +-
>  5 files changed, 115 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> index e86afec..c9d134c 100644
> --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> @@ -533,12 +533,42 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
>  	return rc;
>  }
>  
> +static struct rdma_user_mmap_entry*
> +bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
> +			  enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
> +{
> +	struct bnxt_re_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);

Please separate calls to kzalloc() and variable initialization.

Thanks
Jason Gunthorpe April 11, 2023, 4:53 p.m. UTC | #2
On Mon, Apr 10, 2023 at 04:11:50AM -0700, Selvin Xavier wrote:
> -		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
> +		pd->pd_db_mmap = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
> +							   BNXT_RE_MMAP_UC_DB, &resp.dbr);
> +
> +		if (!pd->pd_db_mmap) {
> +			ibdev_err(&rdev->ibdev,
> +				  "Failed to insert mmap entry\n");

No prints from drivers on failure paths. dbg at worst.

> +	switch (bnxt_entry->mmap_flag) {
> +	case BNXT_RE_MMAP_UC_DB:
> +		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
> +		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
> +					pgprot_noncached(vma->vm_page_prot),
> +				rdma_entry);
> +		if (ret)
> +			ibdev_err(&rdev->ibdev, "Failed to map shared page");
> +		break;
> +	case BNXT_RE_MMAP_SH_PAGE:
>  		pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
>  		if (remap_pfn_range(vma, vma->vm_start,
>  				    pfn, PAGE_SIZE, vma->vm_page_prot)) {
>  			ibdev_err(&rdev->ibdev, "Failed to map shared page");
> -			return -EAGAIN;
> +			ret =  -EAGAIN;
>  		}
> +		break;

What is this? You can't enable disassociate support until all the mmap's
in the driver have been fixed.

Jason
Selvin Xavier April 12, 2023, 6:49 a.m. UTC | #3
On Tue, Apr 11, 2023 at 10:23 PM Jason Gunthorpe <jgg@ziepe.ca> wrote:
>
> On Mon, Apr 10, 2023 at 04:11:50AM -0700, Selvin Xavier wrote:
> > -             rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
> > +             pd->pd_db_mmap = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
> > +                                                        BNXT_RE_MMAP_UC_DB, &resp.dbr);
> > +
> > +             if (!pd->pd_db_mmap) {
> > +                     ibdev_err(&rdev->ibdev,
> > +                               "Failed to insert mmap entry\n");
>
> No prints from drivers on failure paths. dbg at worst.
>
> > +     switch (bnxt_entry->mmap_flag) {
> > +     case BNXT_RE_MMAP_UC_DB:
> > +             pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
> > +             ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
> > +                                     pgprot_noncached(vma->vm_page_prot),
> > +                             rdma_entry);
> > +             if (ret)
> > +                     ibdev_err(&rdev->ibdev, "Failed to map shared page");
> > +             break;
> > +     case BNXT_RE_MMAP_SH_PAGE:
> >               pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
> >               if (remap_pfn_range(vma, vma->vm_start,
> >                                   pfn, PAGE_SIZE, vma->vm_page_prot)) {
> >                       ibdev_err(&rdev->ibdev, "Failed to map shared page");
> > -                     return -EAGAIN;
> > +                     ret =  -EAGAIN;
> >               }
> > +             break;
>
> What is this? You can't enable disassociate support until all the mmap's
> in the driver have been fixed.
Noted. Will modify this also to use rdma mmap apis.
>
> Jason
>
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index e86afec..c9d134c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -533,12 +533,42 @@  static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
 	return rc;
 }
 
+static struct rdma_user_mmap_entry*
+bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
+			  enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
+{
+	struct bnxt_re_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	int ret;
+
+	if (!entry)
+		return NULL;
+
+	entry->mem_offset = mem_offset;
+	entry->mmap_flag = mmap_flag;
+
+	ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
+					  &entry->rdma_entry, PAGE_SIZE);
+	if (ret) {
+		kfree(entry);
+		return NULL;
+	}
+	if (offset)
+		*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+	return &entry->rdma_entry;
+}
+
 /* Protection Domains */
 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
 {
 	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
 	struct bnxt_re_dev *rdev = pd->rdev;
 
+	if (udata) {
+		rdma_user_mmap_entry_remove(pd->pd_db_mmap);
+		pd->pd_db_mmap = NULL;
+	}
+
 	bnxt_re_destroy_fence_mr(pd);
 
 	if (pd->qplib_pd.id) {
@@ -557,7 +587,7 @@  int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 	struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
 		udata, struct bnxt_re_ucontext, ib_uctx);
 	struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
-	int rc;
+	int rc = 0;
 
 	pd->rdev = rdev;
 	if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
@@ -567,7 +597,7 @@  int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 	}
 
 	if (udata) {
-		struct bnxt_re_pd_resp resp;
+		struct bnxt_re_pd_resp resp = {};
 
 		if (!ucntx->dpi.dbr) {
 			/* Allocate DPI in alloc_pd to avoid failing of
@@ -584,12 +614,23 @@  int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 		resp.pdid = pd->qplib_pd.id;
 		/* Still allow mapping this DBR to the new user PD. */
 		resp.dpi = ucntx->dpi.dpi;
-		resp.dbr = (u64)ucntx->dpi.umdbr;
 
-		rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+		pd->pd_db_mmap = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
+							   BNXT_RE_MMAP_UC_DB, &resp.dbr);
+
+		if (!pd->pd_db_mmap) {
+			ibdev_err(&rdev->ibdev,
+				  "Failed to insert mmap entry\n");
+			rc = -ENOMEM;
+			goto dbfail;
+		}
+
+		rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
 		if (rc) {
 			ibdev_err(&rdev->ibdev,
 				  "Failed to copy user response\n");
+			rdma_user_mmap_entry_remove(pd->pd_db_mmap);
+			rc = -EFAULT;
 			goto dbfail;
 		}
 	}
@@ -3994,6 +4035,13 @@  int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
 	resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
 	resp.mode = rdev->chip_ctx->modes.wqe_mode;
 
+	uctx->shpage_mmap = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
+	if (!uctx->shpage_mmap) {
+		ibdev_err(ibdev, "Failed to create mmap entry for shared page\n");
+		rc = -ENOMEM;
+		goto cfail;
+	}
+
 	rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
 	if (rc) {
 		ibdev_err(ibdev, "Failed to copy user context");
@@ -4017,6 +4065,8 @@  void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
 
 	struct bnxt_re_dev *rdev = uctx->rdev;
 
+	rdma_user_mmap_entry_remove(uctx->shpage_mmap);
+	uctx->shpage_mmap = NULL;
 	if (uctx->shpg)
 		free_page((unsigned long)uctx->shpg);
 
@@ -4036,27 +4086,57 @@  int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
 	struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
 						   struct bnxt_re_ucontext,
 						   ib_uctx);
+	struct bnxt_re_user_mmap_entry *bnxt_entry;
+	struct rdma_user_mmap_entry *rdma_entry;
 	struct bnxt_re_dev *rdev = uctx->rdev;
+	int ret = 0;
 	u64 pfn;
 
-	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+	rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
+	if (!rdma_entry) {
+		ibdev_err(&rdev->ibdev,
+			  "%s: No valid rdma_entry pgoffset = 0x%lx\n",
+			  __func__, vma->vm_pgoff);
 		return -EINVAL;
+	}
 
-	if (vma->vm_pgoff) {
-		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-		if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-				       PAGE_SIZE, vma->vm_page_prot)) {
-			ibdev_err(&rdev->ibdev, "Failed to map DPI");
-			return -EAGAIN;
-		}
-	} else {
+	bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
+				  rdma_entry);
+
+	switch (bnxt_entry->mmap_flag) {
+	case BNXT_RE_MMAP_UC_DB:
+		pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
+		ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
+					pgprot_noncached(vma->vm_page_prot),
+				rdma_entry);
+		if (ret)
+			ibdev_err(&rdev->ibdev, "Failed to map shared page");
+		break;
+	case BNXT_RE_MMAP_SH_PAGE:
 		pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
 		if (remap_pfn_range(vma, vma->vm_start,
 				    pfn, PAGE_SIZE, vma->vm_page_prot)) {
 			ibdev_err(&rdev->ibdev, "Failed to map shared page");
-			return -EAGAIN;
+			ret =  -EAGAIN;
 		}
+		break;
+	default:
+		ibdev_err(&rdev->ibdev,
+			  "%s: invalid mmap flag = 0x%x\n", __func__, bnxt_entry->mmap_flag);
+		ret = -EINVAL;
+		break;
 	}
 
-	return 0;
+	rdma_user_mmap_entry_put(rdma_entry);
+	return ret;
+}
+
+void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+	struct bnxt_re_user_mmap_entry *bnxt_entry;
+
+	bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
+				  rdma_entry);
+
+	kfree(bnxt_entry);
 }
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 31f7e34..dcd31ae 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -60,6 +60,7 @@  struct bnxt_re_pd {
 	struct bnxt_re_dev	*rdev;
 	struct bnxt_qplib_pd	qplib_pd;
 	struct bnxt_re_fence_data fence;
+	struct rdma_user_mmap_entry *pd_db_mmap;
 };
 
 struct bnxt_re_ah {
@@ -136,6 +137,18 @@  struct bnxt_re_ucontext {
 	struct bnxt_qplib_dpi	dpi;
 	void			*shpg;
 	spinlock_t		sh_lock;	/* protect shpg */
+	struct rdma_user_mmap_entry *shpage_mmap;
+};
+
+enum bnxt_re_mmap_flag {
+	BNXT_RE_MMAP_SH_PAGE,
+	BNXT_RE_MMAP_UC_DB,
+};
+
+struct bnxt_re_user_mmap_entry {
+	struct rdma_user_mmap_entry rdma_entry;
+	u64 mem_offset;
+	u8 mmap_flag;
 };
 
 static inline u16 bnxt_re_get_swqe_size(int nsge)
@@ -213,6 +226,8 @@  struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
 void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
 int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+
 
 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index b9e2f89..1d361eb 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -539,6 +539,7 @@  static const struct ib_device_ops bnxt_re_dev_ops = {
 	.get_port_immutable = bnxt_re_get_port_immutable,
 	.map_mr_sg = bnxt_re_map_mr_sg,
 	.mmap = bnxt_re_mmap,
+	.mmap_free = bnxt_re_mmap_free,
 	.modify_qp = bnxt_re_modify_qp,
 	.modify_srq = bnxt_re_modify_srq,
 	.poll_cq = bnxt_re_poll_cq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index f139d4c..ade858f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -471,7 +471,6 @@  static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
 	resource_size_t reg_base;
 	struct bnxt_qplib_nq_db *nq_db;
 	struct pci_dev *pdev;
-	int rc = 0;
 
 	pdev = nq->pdev;
 	nq_db = &nq->nq_db;
@@ -481,8 +480,7 @@  static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
 	if (!nq_db->reg.bar_base) {
 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
 			nq_db->reg.bar_id);
-		rc = -ENOMEM;
-		goto fail;
+		return -ENOMEM;
 	}
 
 	reg_base = nq_db->reg.bar_base + reg_offt;
@@ -492,15 +490,13 @@  static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
 	if (!nq_db->reg.bar_reg) {
 		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
 			nq_db->reg.bar_id);
-		rc = -ENOMEM;
-		goto fail;
+		return -ENOMEM;
 	}
 
 	nq_db->dbinfo.db = nq_db->reg.bar_reg;
 	nq_db->dbinfo.hwq = &nq->hwq;
 	nq_db->dbinfo.xid = nq->ring_id;
-fail:
-	return rc;
+	return 0;
 }
 
 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 126d4f2..920ab87 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -813,7 +813,7 @@  static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
 	return 0;
 
 unmap_io:
-	pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
+	iounmap(dpit->dbr_bar_reg_iomem);
 	dpit->dbr_bar_reg_iomem = NULL;
 	return -ENOMEM;
 }