diff mbox

RDMA/vmw_pvrdma: Do not re-calculate npages

Message ID 20171126115135.5824-1-yuval.shaia@oracle.com (mailing list archive)
State Accepted
Delegated to: Jason Gunthorpe
Headers show

Commit Message

Yuval Shaia Nov. 26, 2017, 11:51 a.m. UTC
There is no need to re-calculate the number of pages since it is already
done in ib_umem_get.

Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com>
---
 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c | 15 ++++-----------
 1 file changed, 4 insertions(+), 11 deletions(-)

Comments

Adit Ranadive Dec. 1, 2017, 12:16 a.m. UTC | #1
On 11/26/17 3:51 AM, Yuval Shaia wrote:
> There is no need to re-calculate the number of pages since it is already
> done in ib_umem_get.
> 
> Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com>
> ---

Thanks!

Acked-by: Adit Ranadive <aditr@vmware.com>
Tested-by: Adit Ranadive <aditr@vmware.com>
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jason Gunthorpe Dec. 11, 2017, 11:38 p.m. UTC | #2
On Sun, Nov 26, 2017 at 01:51:35PM +0200, Yuval Shaia wrote:
> There is no need to re-calculate the number of pages since it is already
> done in ib_umem_get.
> 
> Signed-off-by: Yuval Shaia <yuval.shaia@oracle.com>
> Acked-by: Adit Ranadive <aditr@vmware.com>
> Tested-by: Adit Ranadive <aditr@vmware.com>
>  drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c | 15 ++++-----------
>  1 file changed, 4 insertions(+), 11 deletions(-)

Applied to for-next, thanks

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index 8519f3212e52..fa96fa4fb829 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -119,10 +119,7 @@  struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	union pvrdma_cmd_resp rsp;
 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
 	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
-	int nchunks;
 	int ret;
-	int entry;
-	struct scatterlist *sg;
 
 	if (length == 0 || length > dev->dsr->caps.max_mr_size) {
 		dev_warn(&dev->pdev->dev, "invalid mem region length\n");
@@ -137,13 +134,9 @@  struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 		return ERR_CAST(umem);
 	}
 
-	nchunks = 0;
-	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry)
-		nchunks += sg_dma_len(sg) >> PAGE_SHIFT;
-
-	if (nchunks < 0 || nchunks > PVRDMA_PAGE_DIR_MAX_PAGES) {
+	if (umem->npages < 0 || umem->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
 		dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
-			 nchunks);
+			 umem->npages);
 		ret = -EINVAL;
 		goto err_umem;
 	}
@@ -158,7 +151,7 @@  struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	mr->mmr.size = length;
 	mr->umem = umem;
 
-	ret = pvrdma_page_dir_init(dev, &mr->pdir, nchunks, false);
+	ret = pvrdma_page_dir_init(dev, &mr->pdir, umem->npages, false);
 	if (ret) {
 		dev_warn(&dev->pdev->dev,
 			 "could not allocate page directory\n");
@@ -175,7 +168,7 @@  struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 	cmd->length = length;
 	cmd->pd_handle = to_vpd(pd)->pd_handle;
 	cmd->access_flags = access_flags;
-	cmd->nchunks = nchunks;
+	cmd->nchunks = umem->npages;
 	cmd->pdir_dma = mr->pdir.dir_dma;
 
 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);