diff mbox series

[rdma-next,v1,5/6] RDMA/bnxt_re: Use umem APIs to retrieve aligned DMA address

Message ID 20190219145745.13476-6-shiraz.saleem@intel.com (mailing list archive)
State Changes Requested
Delegated to: Jason Gunthorpe
Headers show
Series Add APIs to get contiguous memory blocks aligned to a HW supported page size | expand

Commit Message

Saleem, Shiraz Feb. 19, 2019, 2:57 p.m. UTC
Call the core helpers to retrieve the HW aligned
address to use for the MR, within a supported bnxt_re
page size.

Remove checking the umem->hugtetlb flag as it is no
longer required. The core helpers will return the 2M
aligned address if the MR is backed by 2M huge pages.

Cc: Selvin Xavier <selvin.xavier@broadcom.com>
Cc: Devesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
---
 drivers/infiniband/hw/bnxt_re/ib_verbs.c | 28 +++++++++++-----------------
 1 file changed, 11 insertions(+), 17 deletions(-)

Comments

Ira Weiny Feb. 19, 2019, 9:18 p.m. UTC | #1
On Tue, Feb 19, 2019 at 08:57:44AM -0600, Shiraz Saleem wrote:
> Call the core helpers to retrieve the HW aligned
> address to use for the MR, within a supported bnxt_re
> page size.
> 
> Remove checking the umem->hugtetlb flag as it is no
> longer required. The core helpers will return the 2M
> aligned address if the MR is backed by 2M huge pages.
> 
> Cc: Selvin Xavier <selvin.xavier@broadcom.com>
> Cc: Devesh Sharma <devesh.sharma@broadcom.com>
> Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
> ---
>  drivers/infiniband/hw/bnxt_re/ib_verbs.c | 28 +++++++++++-----------------
>  1 file changed, 11 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> index 2ed7786..6f56857 100644
> --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> @@ -3551,17 +3551,13 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
>  			     int page_shift)
>  {
>  	u64 *pbl_tbl = pbl_tbl_orig;
> -	u64 paddr;
> -	u64 page_mask = (1ULL << page_shift) - 1;
> -	struct sg_dma_page_iter sg_iter;
> +	u64 page_size =  BIT_ULL(page_shift);
> +	struct sg_phys_iter sg_phys_iter;
> +
> +	for (ib_umem_start_phys_iter(umem, &sg_phys_iter, page_size);
> +	     ib_umem_next_phys_iter(umem, &sg_phys_iter);)
> +		*pbl_tbl++ = sg_phys_iter.phyaddr;
>  
> -	for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
> -		paddr = sg_page_iter_dma_address(&sg_iter);
> -		if (pbl_tbl == pbl_tbl_orig)
> -			*pbl_tbl++ = paddr & ~page_mask;
> -		else if ((paddr & page_mask) == 0)
> -			*pbl_tbl++ = paddr;
> -	}
>  	return pbl_tbl - pbl_tbl_orig;
>  }
>  
> @@ -3623,7 +3619,9 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
>  		goto free_umem;
>  	}
>  
> -	page_shift = PAGE_SHIFT;
> +	page_shift = __ffs(ib_umem_find_single_pg_size(umem,
> +				BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
> +				virt_addr));

Maybe this is impossible but __ffs requires the value to not be 0 and
ib_umem_find_single_pg_size may return 0?

Is this not possible with this driver?

Ira

>  
>  	if (!bnxt_re_page_size_ok(page_shift)) {
>  		dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
> @@ -3631,17 +3629,13 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
>  		goto fail;
>  	}
>  
> -	if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
> +	if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
> +	    length > BNXT_RE_MAX_MR_SIZE_LOW) {
>  		dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
>  			length,	(u64)BNXT_RE_MAX_MR_SIZE_LOW);
>  		rc = -EINVAL;
>  		goto fail;
>  	}
> -	if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
> -		page_shift = BNXT_RE_PAGE_SHIFT_2M;
> -		dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
> -			 1 << page_shift);
> -	}
>  
>  	/* Map umem buf ptrs to the PBL */
>  	umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
> -- 
> 1.8.3.1
>
Saleem, Shiraz Feb. 23, 2019, 7:44 p.m. UTC | #2
On Tue, Feb 19, 2019 at 01:18:49PM -0800, Ira Weiny wrote:
> On Tue, Feb 19, 2019 at 08:57:44AM -0600, Shiraz Saleem wrote:
> > Call the core helpers to retrieve the HW aligned
> > address to use for the MR, within a supported bnxt_re
> > page size.
> > 
> > Remove checking the umem->hugtetlb flag as it is no
> > longer required. The core helpers will return the 2M
> > aligned address if the MR is backed by 2M huge pages.
> > 
> > Cc: Selvin Xavier <selvin.xavier@broadcom.com>
> > Cc: Devesh Sharma <devesh.sharma@broadcom.com>
> > Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
[...]
 
> > @@ -3623,7 +3619,9 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
> >  		goto free_umem;
> >  	}
> >  
> > -	page_shift = PAGE_SHIFT;
> > +	page_shift = __ffs(ib_umem_find_single_pg_size(umem,
> > +				BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
> > +				virt_addr));
> 
> Maybe this is impossible but __ffs requires the value to not be 0 and
> ib_umem_find_single_pg_size may return 0?
It is expected that the driver supports the system page size or smaller.
Othewrise, its possible. The API will also throw a WARN_ON.   

> 
> Is this not possible with this driver?
>
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 2ed7786..6f56857 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -3551,17 +3551,13 @@  static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
 			     int page_shift)
 {
 	u64 *pbl_tbl = pbl_tbl_orig;
-	u64 paddr;
-	u64 page_mask = (1ULL << page_shift) - 1;
-	struct sg_dma_page_iter sg_iter;
+	u64 page_size =  BIT_ULL(page_shift);
+	struct sg_phys_iter sg_phys_iter;
+
+	for (ib_umem_start_phys_iter(umem, &sg_phys_iter, page_size);
+	     ib_umem_next_phys_iter(umem, &sg_phys_iter);)
+		*pbl_tbl++ = sg_phys_iter.phyaddr;
 
-	for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
-		paddr = sg_page_iter_dma_address(&sg_iter);
-		if (pbl_tbl == pbl_tbl_orig)
-			*pbl_tbl++ = paddr & ~page_mask;
-		else if ((paddr & page_mask) == 0)
-			*pbl_tbl++ = paddr;
-	}
 	return pbl_tbl - pbl_tbl_orig;
 }
 
@@ -3623,7 +3619,9 @@  struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
 		goto free_umem;
 	}
 
-	page_shift = PAGE_SHIFT;
+	page_shift = __ffs(ib_umem_find_single_pg_size(umem,
+				BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
+				virt_addr));
 
 	if (!bnxt_re_page_size_ok(page_shift)) {
 		dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
@@ -3631,17 +3629,13 @@  struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
 		goto fail;
 	}
 
-	if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
+	if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
+	    length > BNXT_RE_MAX_MR_SIZE_LOW) {
 		dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
 			length,	(u64)BNXT_RE_MAX_MR_SIZE_LOW);
 		rc = -EINVAL;
 		goto fail;
 	}
-	if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
-		page_shift = BNXT_RE_PAGE_SHIFT_2M;
-		dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
-			 1 << page_shift);
-	}
 
 	/* Map umem buf ptrs to the PBL */
 	umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);