diff mbox series

[RFC,03/12] RDMA/i40iw: Use for_each_sg_dma_page iterator on umem SGL

Message ID 20190126165913.18272-4-shiraz.saleem@intel.com (mailing list archive)
State Superseded
Headers show
Series Adapt drivers to handle page combining on umem SGEs | expand

Commit Message

Saleem, Shiraz Jan. 26, 2019, 4:59 p.m. UTC
From: "Shiraz, Saleem" <shiraz.saleem@intel.com>

Use the for_each_sg_dma_page iterator variant to walk the umem
DMA-mapped SGL and get the page DMA address. This avoids the extra
loop to iterate pages in the SGE when for_each_sg iterator is used.

Additionally, purge umem->page_shift usage in the driver
as its only relevant for ODP MRs. Use system page size and
shift instead.

Cc: Faisal Latif <faisal.latif@intel.com>
Signed-off-by: Shiraz, Saleem <shiraz.saleem@intel.com>
---
 drivers/infiniband/hw/i40iw/i40iw_verbs.c | 35 ++++++++++++++-----------------
 1 file changed, 16 insertions(+), 19 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 12b31a8..e490a60 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1369,32 +1369,29 @@  static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
 {
 	struct ib_umem *region = iwmr->region;
 	struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
-	int chunk_pages, entry, i;
 	struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
 	struct i40iw_pble_info *pinfo;
-	struct scatterlist *sg;
+	struct sg_dma_page_iter sg_iter;
 	u64 pg_addr = 0;
 	u32 idx = 0;
+	bool first_pg = true;
 
 	pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
 
-	for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
-		chunk_pages = sg_dma_len(sg) >> region->page_shift;
-		if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
-		    !iwpbl->qp_mr.sq_page)
-			iwpbl->qp_mr.sq_page = sg_page(sg);
-		for (i = 0; i < chunk_pages; i++) {
-			pg_addr = sg_dma_address(sg) +
-				(i << region->page_shift);
-
-			if ((entry + i) == 0)
-				*pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
-			else if (!(pg_addr & ~iwmr->page_msk))
-				*pbl = cpu_to_le64(pg_addr);
-			else
-				continue;
-			pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
-		}
+	if (iwmr->type == IW_MEMREG_TYPE_QP)
+		iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
+
+	for_each_sg_dma_page(region->sg_head.sgl, &sg_iter, region->nmap, 0) {
+		pg_addr = sg_page_iter_dma_address(&sg_iter);
+		if (first_pg)
+			*pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
+		else if (!(pg_addr & ~iwmr->page_msk))
+			*pbl = cpu_to_le64(pg_addr);
+		else
+			continue;
+
+		first_pg = false;
+		pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
 	}
 }