diff mbox series

[v1,rdma-next,4/8] RDMA/mlx5: Allow rereg all the mkeys that can load pas with UMR

Message ID 20221107161449.5611-5-michaelgur@nvidia.com (mailing list archive)
State Superseded
Delegated to: Jason Gunthorpe
Headers show
Series RDMA/mlx5: Switch MR cache to use RB-tree | expand

Commit Message

Michael Guralnik Nov. 7, 2022, 4:14 p.m. UTC
From: Aharon Landau <aharonl@nvidia.com>

Keep track of the mkey size of all cacheable mkeys, and by this allow to
rereg them.

Signed-off-by: Aharon Landau <aharonl@nvidia.com>
---
 drivers/infiniband/hw/mlx5/mr.c | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 8ff10944ff16..fe5567c57897 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -709,6 +709,7 @@  struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, u8 access_mode,
 			kfree(mr);
 			return ERR_PTR(err);
 		}
+		mr->mmkey.ndescs = ndescs;
 	}
 	mr->mmkey.type = MLX5_MKEY_MR;
 	init_waitqueue_head(&mr->mmkey.wait);
@@ -1374,9 +1375,6 @@  static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
 {
 	struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
 
-	/* We only track the allocated sizes of MRs from the cache */
-	if (!mr->mmkey.cache_ent)
-		return false;
 	if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
 		return false;
 
@@ -1384,8 +1382,7 @@  static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
 		mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
 	if (WARN_ON(!*page_size))
 		return false;
-	return (1ULL << mr->mmkey.cache_ent->order) >=
-	       ib_umem_num_dma_blocks(new_umem, *page_size);
+	return mr->mmkey.ndescs >= ib_umem_num_dma_blocks(new_umem, *page_size);
 }
 
 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,