diff mbox series

[RFC,4/4] RDMA/i40iw: Use umem APIs to retrieve optimal HW address

Message ID 20181019233409.1104-5-shiraz.saleem@intel.com (mailing list archive)
State Superseded
Headers show
Series Introduce APIs to get DMA addresses aligned to a HW supported page size | expand

Commit Message

Saleem, Shiraz Oct. 19, 2018, 11:34 p.m. UTC
Call the core helpers to retrieve the optimal HW aligned address to use
for the MR, within a supported i40iw page size.

Remove code in i40iw to determine when MR is backed by 2M huge pages;
which involves checking the umem->hugetlb flag and VMA inspection.
The core helpers will return the 2M aligned address if the
MR is backed by 2M pages.

Fixes: f26c7c83395b ("i40iw: Add 2MB page support")
Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
---
 drivers/infiniband/hw/i40iw/i40iw_user.h  |  5 +++
 drivers/infiniband/hw/i40iw/i40iw_verbs.c | 58 +++++++------------------------
 2 files changed, 17 insertions(+), 46 deletions(-)

Comments

Jason Gunthorpe Oct. 22, 2018, 9:43 p.m. UTC | #1
On Fri, Oct 19, 2018 at 06:34:09PM -0500, Shiraz Saleem wrote:
> Call the core helpers to retrieve the optimal HW aligned address to use
> for the MR, within a supported i40iw page size.
> 
> Remove code in i40iw to determine when MR is backed by 2M huge pages;
> which involves checking the umem->hugetlb flag and VMA inspection.
> The core helpers will return the 2M aligned address if the
> MR is backed by 2M pages.
> 
> Fixes: f26c7c83395b ("i40iw: Add 2MB page support")
> Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
> Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
>  drivers/infiniband/hw/i40iw/i40iw_user.h  |  5 +++
>  drivers/infiniband/hw/i40iw/i40iw_verbs.c | 58 +++++++------------------------
>  2 files changed, 17 insertions(+), 46 deletions(-)

This patch looks really good though

Ideally this series would also convert bnxt_re which is the only other
driver to use huge_tlb, then we can drop huge_tlb completely.

Jason
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index b125925..09fdcee 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -80,6 +80,11 @@  enum i40iw_device_capabilities_const {
 	I40IW_MAX_PDS = 			32768
 };
 
+enum i40iw_supported_page_size {
+	I40IW_PAGE_SZ_4K = 0x00001000,
+	I40IW_PAGE_SZ_2M = 0x00200000
+};
+
 #define i40iw_handle void *
 #define i40iw_adapter_handle i40iw_handle
 #define i40iw_qp_handle i40iw_handle
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index cb2aef8..a2ecf9e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1371,55 +1371,22 @@  static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
 {
 	struct ib_umem *region = iwmr->region;
 	struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
-	int chunk_pages, entry, i;
 	struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
 	struct i40iw_pble_info *pinfo;
-	struct scatterlist *sg;
-	u64 pg_addr = 0;
+	struct sg_phys_iter sg_phys_iter;
 	u32 idx = 0;
 
 	pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
 
-	for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
-		chunk_pages = sg_dma_len(sg) >> region->page_shift;
-		if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
-		    !iwpbl->qp_mr.sq_page)
-			iwpbl->qp_mr.sq_page = sg_page(sg);
-		for (i = 0; i < chunk_pages; i++) {
-			pg_addr = sg_dma_address(sg) +
-				(i << region->page_shift);
-
-			if ((entry + i) == 0)
-				*pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
-			else if (!(pg_addr & ~iwmr->page_msk))
-				*pbl = cpu_to_le64(pg_addr);
-			else
-				continue;
-			pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
-		}
-	}
-}
+	if (iwmr->type == IW_MEMREG_TYPE_QP)
+		iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
 
-/**
- * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
- * @addr: virtual address
- * @iwmr: mr pointer for this memory registration
- */
-static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
-{
-	struct vm_area_struct *vma;
-	struct hstate *h;
-
-	down_read(&current->mm->mmap_sem);
-	vma = find_vma(current->mm, addr);
-	if (vma && is_vm_hugetlb_page(vma)) {
-		h = hstate_vma(vma);
-		if (huge_page_size(h) == 0x200000) {
-			iwmr->page_size = huge_page_size(h);
-			iwmr->page_msk = huge_page_mask(h);
-		}
+	for (ib_umem_start_phys_iter(region, &sg_phys_iter);
+	     ib_umem_next_phys_iter(region, &sg_phys_iter, iwmr->page_size);) {
+		*pbl = cpu_to_le64(sg_phys_iter.phyaddr);
+		pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
 	}
-	up_read(&current->mm->mmap_sem);
+
 }
 
 /**
@@ -1876,11 +1843,10 @@  static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
 	iwmr->ibmr.device = pd->device;
 	ucontext = to_ucontext(pd->uobject->context);
 
-	iwmr->page_size = PAGE_SIZE;
-	iwmr->page_msk = PAGE_MASK;
-
-	if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
-		i40iw_set_hugetlb_values(start, iwmr);
+	iwmr->page_size = I40IW_PAGE_SZ_4K;
+	if (req.reg_type == IW_MEMREG_TYPE_MEM)
+		iwmr->page_size = ib_umem_find_single_pg_size(region,
+					I40IW_PAGE_SZ_4K | I40IW_PAGE_SZ_2M);
 
 	region_length = region->length + (start & (iwmr->page_size - 1));
 	pg_shift = ffs(iwmr->page_size) - 1;