@@ -709,6 +709,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, u8 access_mode,
kfree(mr);
return ERR_PTR(err);
}
+ mr->mmkey.ndescs = ndescs;
}
mr->mmkey.type = MLX5_MKEY_MR;
init_waitqueue_head(&mr->mmkey.wait);
@@ -1374,9 +1375,6 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
- /* We only track the allocated sizes of MRs from the cache */
- if (!mr->mmkey.cache_ent)
- return false;
if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
return false;
@@ -1384,8 +1382,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
if (WARN_ON(!*page_size))
return false;
- return (1ULL << mr->mmkey.cache_ent->order) >=
- ib_umem_num_dma_blocks(new_umem, *page_size);
+ return mr->mmkey.ndescs >= ib_umem_num_dma_blocks(new_umem, *page_size);
}
static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,