diff mbox series

[rdma-next,5/6] RDMA/bnxt_re: Use umem APIs to retrieve optimal HW address

Message ID 20181224223227.18016-6-shiraz.saleem@intel.com (mailing list archive)
State Changes Requested
Headers show
Series Introduce APIs to get DMA addresses aligned to a HW supported page size | expand

Commit Message

Saleem, Shiraz Dec. 24, 2018, 10:32 p.m. UTC
Call the core helpers to retrieve the optimal HW aligned
address to use for the MR, within a supported bnxt_re
page size.

Remove checking the umem->hugtetlb flag as it is no
longer required. The core helpers will return the 2M
aligned address if the MR is backed by 2M huge pages.

Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
---
 drivers/infiniband/hw/bnxt_re/ib_verbs.c | 28 +++++++++++-----------------
 1 file changed, 11 insertions(+), 17 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index fa0cb7c..6692435 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -3535,17 +3535,13 @@  static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
 			     int page_shift)
 {
 	u64 *pbl_tbl = pbl_tbl_orig;
-	u64 paddr;
-	u64 page_mask = (1ULL << page_shift) - 1;
-	struct sg_page_iter sg_iter;
+	u64 page_size =  BIT_ULL(page_shift);
+	struct sg_phys_iter sg_phys_iter;
+
+	for (ib_umem_start_phys_iter(umem, &sg_phys_iter, page_size);
+	     ib_umem_next_phys_iter(umem, &sg_phys_iter);)
+		*pbl_tbl++ = sg_phys_iter.phyaddr;
 
-	for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
-		paddr = sg_page_iter_dma_address(&sg_iter);
-		if (pbl_tbl == pbl_tbl_orig)
-			*pbl_tbl++ = paddr & ~page_mask;
-		else if ((paddr & page_mask) == 0)
-			*pbl_tbl++ = paddr;
-	}
 	return pbl_tbl - pbl_tbl_orig;
 }
 
@@ -3608,7 +3604,9 @@  struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
 		goto free_umem;
 	}
 
-	page_shift = umem->page_shift;
+	page_shift = __ffs(ib_umem_find_single_pg_size(umem,
+				BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
+				virt_addr));
 
 	if (!bnxt_re_page_size_ok(page_shift)) {
 		dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
@@ -3616,17 +3614,13 @@  struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
 		goto fail;
 	}
 
-	if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
+	if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
+	    length > BNXT_RE_MAX_MR_SIZE_LOW) {
 		dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
 			length,	(u64)BNXT_RE_MAX_MR_SIZE_LOW);
 		rc = -EINVAL;
 		goto fail;
 	}
-	if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
-		page_shift = BNXT_RE_PAGE_SHIFT_2M;
-		dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
-			 1 << page_shift);
-	}
 
 	/* Map umem buf ptrs to the PBL */
 	umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);