diff mbox

[11/11] IB/mlx4: Prevent cross page boundary allocation

Message ID 1466605652-24798-12-git-send-email-leonro@mellanox.com (mailing list archive)
State Accepted
Headers show

Commit Message

Leon Romanovsky June 22, 2016, 2:27 p.m. UTC
From: Chuck Lever <chuck.lever@oracle.com>

Prevent cross page boundary allocation by allocating
new page, this is required to be aligned with ConnectX-3 HW
requirements.

Not doing that might cause to "RDMA read local protection" error.

Fixes: 1b2cd0fc673c ('IB/mlx4: Support the new memory registration API')
Suggested-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
---
 drivers/infiniband/hw/mlx4/mlx4_ib.h |  2 +-
 drivers/infiniband/hw/mlx4/mr.c      | 34 +++++++++++++++++-----------------
 2 files changed, 18 insertions(+), 18 deletions(-)

Comments

Dennis Dalessandro June 22, 2016, 3:01 p.m. UTC | #1
On Wed, Jun 22, 2016 at 05:27:32PM +0300, Leon Romanovsky wrote:
>From: Chuck Lever <chuck.lever@oracle.com>
>
>Prevent cross page boundary allocation by allocating
>new page, this is required to be aligned with ConnectX-3 HW
>requirements.
>
>Not doing that might cause to "RDMA read local protection" error.
>
>Fixes: 1b2cd0fc673c ('IB/mlx4: Support the new memory registration API')
>Suggested-by: Christoph Hellwig <hch@infradead.org>
>Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
>Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
>Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
>Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
>Signed-off-by: Leon Romanovsky <leon@kernel.org>

Hi Leon, why sign-off twice with different addresses?

-Denny
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leon Romanovsky June 22, 2016, 4 p.m. UTC | #2
On Wed, Jun 22, 2016 at 11:01:42AM -0400, Dennis Dalessandro wrote:
> On Wed, Jun 22, 2016 at 05:27:32PM +0300, Leon Romanovsky wrote:
> >From: Chuck Lever <chuck.lever@oracle.com>
> >
> >Prevent cross page boundary allocation by allocating
> >new page, this is required to be aligned with ConnectX-3 HW
> >requirements.
> >
> >Not doing that might cause to "RDMA read local protection" error.
> >
> >Fixes: 1b2cd0fc673c ('IB/mlx4: Support the new memory registration API')
> >Suggested-by: Christoph Hellwig <hch@infradead.org>
> >Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> >Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
> >Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
> >Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
> >Signed-off-by: Leon Romanovsky <leon@kernel.org>
> 
> Hi Leon, why sign-off twice with different addresses?

First SOB is added because I changed this patch and did it as a Mellanox
employee. Second SOB is added as a sign of my external git tree, and
comes instead of my ROB tag for other patches.

This allows to visualize my external/internal work.

> 
> -Denny
diff mbox

Patch

diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6c5ac5d..29acda2 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -139,7 +139,7 @@  struct mlx4_ib_mr {
 	u32			max_pages;
 	struct mlx4_mr		mmr;
 	struct ib_umem	       *umem;
-	void			*pages_alloc;
+	size_t			page_map_size;
 };
 
 struct mlx4_ib_mw {
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 6312721..5d73989 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -277,20 +277,23 @@  mlx4_alloc_priv_pages(struct ib_device *device,
 		      struct mlx4_ib_mr *mr,
 		      int max_pages)
 {
-	int size = max_pages * sizeof(u64);
-	int add_size;
 	int ret;
 
-	add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+	/* Ensure that size is aligned to DMA cacheline
+	 * requirements.
+	 * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
+	 * so page_map_size will never cross PAGE_SIZE.
+	 */
+	mr->page_map_size = roundup(max_pages * sizeof(u64),
+				    MLX4_MR_PAGES_ALIGN);
 
-	mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
-	if (!mr->pages_alloc)
+	/* Prevent cross page boundary allocation. */
+	mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
+	if (!mr->pages)
 		return -ENOMEM;
 
-	mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
-
 	mr->page_map = dma_map_single(device->dma_device, mr->pages,
-				      size, DMA_TO_DEVICE);
+				      mr->page_map_size, DMA_TO_DEVICE);
 
 	if (dma_mapping_error(device->dma_device, mr->page_map)) {
 		ret = -ENOMEM;
@@ -298,9 +301,9 @@  mlx4_alloc_priv_pages(struct ib_device *device,
 	}
 
 	return 0;
-err:
-	kfree(mr->pages_alloc);
 
+err:
+	free_page((unsigned long)mr->pages);
 	return ret;
 }
 
@@ -309,11 +312,10 @@  mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
 {
 	if (mr->pages) {
 		struct ib_device *device = mr->ibmr.device;
-		int size = mr->max_pages * sizeof(u64);
 
 		dma_unmap_single(device->dma_device, mr->page_map,
-				 size, DMA_TO_DEVICE);
-		kfree(mr->pages_alloc);
+				 mr->page_map_size, DMA_TO_DEVICE);
+		free_page((unsigned long)mr->pages);
 		mr->pages = NULL;
 	}
 }
@@ -537,14 +539,12 @@  int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
 	mr->npages = 0;
 
 	ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
-				   sizeof(u64) * mr->max_pages,
-				   DMA_TO_DEVICE);
+				   mr->page_map_size, DMA_TO_DEVICE);
 
 	rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
 
 	ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
-				      sizeof(u64) * mr->max_pages,
-				      DMA_TO_DEVICE);
+				      mr->page_map_size, DMA_TO_DEVICE);
 
 	return rc;
 }