diff mbox series

[rdma-next,08/12] RDMA/qedr: Use for_each_sg_dma_page iterator on umem SGL

Message ID 20190211152508.25040-9-shiraz.saleem@intel.com (mailing list archive)
State Accepted
Headers show
Series Adapt drivers to handle page combining on umem SGEs | expand

Commit Message

Saleem, Shiraz Feb. 11, 2019, 3:25 p.m. UTC
From: "Shiraz, Saleem" <shiraz.saleem@intel.com>

Use the for_each_sg_dma_page iterator variant to walk the umem
DMA-mapped SGL and get the page DMA address. This avoids the extra
loop to iterate pages in the SGE when for_each_sg iterator is used.

Additionally, purge umem->page_shift usage in the driver
as its only relevant for ODP MRs. Use system page size and
shift instead.

Cc: Michal Kalderon <Michal.Kalderon@cavium.com>
Cc: Ariel Elior <Ariel.Elior@cavium.com>
Signed-off-by: Shiraz, Saleem <shiraz.saleem@intel.com>
---
 drivers/infiniband/hw/qedr/verbs.c | 68 +++++++++++++++++---------------------
 1 file changed, 31 insertions(+), 37 deletions(-)

Comments

Michal Kalderon Feb. 12, 2019, 1:13 p.m. UTC | #1
> From: Shiraz Saleem <shiraz.saleem@intel.com>
> Sent: Monday, February 11, 2019 5:25 PM
> 
> From: "Shiraz, Saleem" <shiraz.saleem@intel.com>
> 
> Use the for_each_sg_dma_page iterator variant to walk the umem DMA-
> mapped SGL and get the page DMA address. This avoids the extra loop to
> iterate pages in the SGE when for_each_sg iterator is used.
> 
> Additionally, purge umem->page_shift usage in the driver as its only relevant
> for ODP MRs. Use system page size and shift instead.
> 
> Cc: Michal Kalderon <Michal.Kalderon@cavium.com>
> Cc: Ariel Elior <Ariel.Elior@cavium.com>
> Signed-off-by: Shiraz, Saleem <shiraz.saleem@intel.com>
> ---
>  drivers/infiniband/hw/qedr/verbs.c | 68 +++++++++++++++++----------------
> -----
>  1 file changed, 31 insertions(+), 37 deletions(-)
> 
> diff --git a/drivers/infiniband/hw/qedr/verbs.c
> b/drivers/infiniband/hw/qedr/verbs.c
> index 989f086..936e5e2 100644
> --- a/drivers/infiniband/hw/qedr/verbs.c
> +++ b/drivers/infiniband/hw/qedr/verbs.c
> @@ -636,13 +636,12 @@ static void qedr_populate_pbls(struct qedr_dev
> *dev, struct ib_umem *umem,
>                                struct qedr_pbl *pbl,
>                                struct qedr_pbl_info *pbl_info, u32 pg_shift)  {
> -       int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
> +       int pbe_cnt, total_num_pbes = 0;
>         u32 fw_pg_cnt, fw_pg_per_umem_pg;
>         struct qedr_pbl *pbl_tbl;
> -       struct scatterlist *sg;
> +       struct sg_dma_page_iter sg_iter;
>         struct regpair *pbe;
>         u64 pg_addr;
> -       int entry;
> 
>         if (!pbl_info->num_pbes)
>                 return;
> @@ -663,38 +662,33 @@ static void qedr_populate_pbls(struct qedr_dev
> *dev, struct ib_umem *umem,
> 
>         pbe_cnt = 0;
> 
> -       shift = umem->page_shift;
> -
> -       fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
> -
> -       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
> -               pages = sg_dma_len(sg) >> shift;
> -               pg_addr = sg_dma_address(sg);
> -               for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
> -                       for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
> -                               pbe->lo = cpu_to_le32(pg_addr);
> -                               pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
> -
> -                               pg_addr += BIT(pg_shift);
> -                               pbe_cnt++;
> -                               total_num_pbes++;
> -                               pbe++;
> -
> -                               if (total_num_pbes == pbl_info->num_pbes)
> -                                       return;
> -
> -                               /* If the given pbl is full storing the pbes,
> -                                * move to next pbl.
> -                                */
> -                               if (pbe_cnt ==
> -                                   (pbl_info->pbl_size / sizeof(u64))) {
> -                                       pbl_tbl++;
> -                                       pbe = (struct regpair *)pbl_tbl->va;
> -                                       pbe_cnt = 0;
> -                               }
> +       fw_pg_per_umem_pg = BIT(PAGE_SHIFT - pg_shift);
> +
> +       for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap,
> 0) {
> +               pg_addr = sg_page_iter_dma_address(&sg_iter);
> +               for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
> +                       pbe->lo = cpu_to_le32(pg_addr);
> +                       pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
> +
> +                       pg_addr += BIT(pg_shift);
> +                       pbe_cnt++;
> +                       total_num_pbes++;
> +                       pbe++;
> 
> -                               fw_pg_cnt++;
> +                       if (total_num_pbes == pbl_info->num_pbes)
> +                               return;
> +
> +                       /* If the given pbl is full storing the pbes,
> +                        * move to next pbl.
> +                        */
> +                       if (pbe_cnt ==
> +                           (pbl_info->pbl_size / sizeof(u64))) {
> +                               pbl_tbl++;
> +                               pbe = (struct regpair *)pbl_tbl->va;
> +                               pbe_cnt = 0;
>                         }
> +
> +                       fw_pg_cnt++;
>                 }
>         }
>  }
> @@ -755,7 +749,7 @@ static inline int qedr_init_user_queue(struct ib_udata
> *udata,
>         }
> 
>         fw_pages = ib_umem_page_count(q->umem) <<
> -           (q->umem->page_shift - FW_PAGE_SHIFT);
> +           (PAGE_SHIFT - FW_PAGE_SHIFT);
> 
>         rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
>         if (rc)
> @@ -1471,7 +1465,7 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
>                 page_cnt = srq->usrq.pbl_info.num_pbes;
>                 pbl_base_addr = srq->usrq.pbl_tbl->pa;
>                 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
> -               page_size = BIT(srq->usrq.umem->page_shift);
> +               page_size = PAGE_SIZE;
>         } else {
>                 struct qed_chain *pbl;
> 
> @@ -2723,7 +2717,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd
> *ibpd, u64 start, u64 len,
>                 goto err1;
> 
>         qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
> -                          &mr->info.pbl_info, mr->umem->page_shift);
> +                          &mr->info.pbl_info, PAGE_SHIFT);
> 
>         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
>         if (rc) {
> @@ -2744,7 +2738,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd
> *ibpd, u64 start, u64 len,
>         mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
>         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
>         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
> -       mr->hw_mr.page_size_log = mr->umem->page_shift;
> +       mr->hw_mr.page_size_log = PAGE_SHIFT;
>         mr->hw_mr.fbo = ib_umem_offset(mr->umem);
>         mr->hw_mr.length = len;
>         mr->hw_mr.vaddr = usr_addr;
> --
> 1.8.3.1

Thanks, 

Acked-by: Michal Kalderon <michal.kalderon@marvell.com>
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 989f086..936e5e2 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -636,13 +636,12 @@  static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
 			       struct qedr_pbl *pbl,
 			       struct qedr_pbl_info *pbl_info, u32 pg_shift)
 {
-	int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+	int pbe_cnt, total_num_pbes = 0;
 	u32 fw_pg_cnt, fw_pg_per_umem_pg;
 	struct qedr_pbl *pbl_tbl;
-	struct scatterlist *sg;
+	struct sg_dma_page_iter sg_iter;
 	struct regpair *pbe;
 	u64 pg_addr;
-	int entry;
 
 	if (!pbl_info->num_pbes)
 		return;
@@ -663,38 +662,33 @@  static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
 
 	pbe_cnt = 0;
 
-	shift = umem->page_shift;
-
-	fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
-
-	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
-		pages = sg_dma_len(sg) >> shift;
-		pg_addr = sg_dma_address(sg);
-		for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
-			for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
-				pbe->lo = cpu_to_le32(pg_addr);
-				pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
-
-				pg_addr += BIT(pg_shift);
-				pbe_cnt++;
-				total_num_pbes++;
-				pbe++;
-
-				if (total_num_pbes == pbl_info->num_pbes)
-					return;
-
-				/* If the given pbl is full storing the pbes,
-				 * move to next pbl.
-				 */
-				if (pbe_cnt ==
-				    (pbl_info->pbl_size / sizeof(u64))) {
-					pbl_tbl++;
-					pbe = (struct regpair *)pbl_tbl->va;
-					pbe_cnt = 0;
-				}
+	fw_pg_per_umem_pg = BIT(PAGE_SHIFT - pg_shift);
+
+	for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
+		pg_addr = sg_page_iter_dma_address(&sg_iter);
+		for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
+			pbe->lo = cpu_to_le32(pg_addr);
+			pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
+
+			pg_addr += BIT(pg_shift);
+			pbe_cnt++;
+			total_num_pbes++;
+			pbe++;
 
-				fw_pg_cnt++;
+			if (total_num_pbes == pbl_info->num_pbes)
+				return;
+
+			/* If the given pbl is full storing the pbes,
+			 * move to next pbl.
+			 */
+			if (pbe_cnt ==
+			    (pbl_info->pbl_size / sizeof(u64))) {
+				pbl_tbl++;
+				pbe = (struct regpair *)pbl_tbl->va;
+				pbe_cnt = 0;
 			}
+
+			fw_pg_cnt++;
 		}
 	}
 }
@@ -755,7 +749,7 @@  static inline int qedr_init_user_queue(struct ib_udata *udata,
 	}
 
 	fw_pages = ib_umem_page_count(q->umem) <<
-	    (q->umem->page_shift - FW_PAGE_SHIFT);
+	    (PAGE_SHIFT - FW_PAGE_SHIFT);
 
 	rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
 	if (rc)
@@ -1471,7 +1465,7 @@  struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
 		page_cnt = srq->usrq.pbl_info.num_pbes;
 		pbl_base_addr = srq->usrq.pbl_tbl->pa;
 		phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
-		page_size = BIT(srq->usrq.umem->page_shift);
+		page_size = PAGE_SIZE;
 	} else {
 		struct qed_chain *pbl;
 
@@ -2723,7 +2717,7 @@  struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
 		goto err1;
 
 	qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
-			   &mr->info.pbl_info, mr->umem->page_shift);
+			   &mr->info.pbl_info, PAGE_SHIFT);
 
 	rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
 	if (rc) {
@@ -2744,7 +2738,7 @@  struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
 	mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
 	mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
 	mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
-	mr->hw_mr.page_size_log = mr->umem->page_shift;
+	mr->hw_mr.page_size_log = PAGE_SHIFT;
 	mr->hw_mr.fbo = ib_umem_offset(mr->umem);
 	mr->hw_mr.length = len;
 	mr->hw_mr.vaddr = usr_addr;