diff mbox series

[v1,rdma-next,5/8] RDMA/mlx5: Introduce mlx5r_cache_rb_key

Message ID 20221107161449.5611-6-michaelgur@nvidia.com (mailing list archive)
State Superseded
Delegated to: Jason Gunthorpe
Headers show
Series RDMA/mlx5: Switch MR cache to use RB-tree | expand

Commit Message

Michael Guralnik Nov. 7, 2022, 4:14 p.m. UTC
From: Aharon Landau <aharonl@nvidia.com>

In the next patch, I will change the cache structure to an RB tree.
Introducing the key of the tree.
The key is all the mkey properties that UMR operations can't modify.
Using this key to define the cache entries and to search and create
cache mkeys.

Signed-off-by: Aharon Landau <aharonl@nvidia.com>
---
 drivers/infiniband/hw/mlx5/mlx5_ib.h | 14 +++++-
 drivers/infiniband/hw/mlx5/mr.c      | 73 +++++++++++++++++++---------
 drivers/infiniband/hw/mlx5/odp.c     |  8 ++-
 3 files changed, 69 insertions(+), 26 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 84d3b917c33e..939ec3759eba 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -613,6 +613,16 @@  enum mlx5_mkey_type {
 	MLX5_MKEY_INDIRECT_DEVX,
 };
 
+struct mlx5r_cache_rb_key {
+	unsigned int access_mode;
+	unsigned int access_flags;
+	/*
+	 * keep ndescs as the last member so entries with about the same ndescs
+	 * will be close in the tree
+	 */
+	unsigned int ndescs;
+};
+
 struct mlx5_ib_mkey {
 	u32 key;
 	enum mlx5_mkey_type type;
@@ -732,10 +742,10 @@  struct mlx5_cache_ent {
 	unsigned long		stored;
 	unsigned long		reserved;
 
+	struct mlx5r_cache_rb_key rb_key;
+
 	char                    name[4];
 	u32                     order;
-	u32			access_mode;
-	unsigned int		ndescs;
 
 	u8 disabled:1;
 	u8 fill_to_high_water:1;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index fe5567c57897..e7a3d4fa52d0 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -287,18 +287,18 @@  static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
 	return ret;
 }
 
-static void set_cache_mkc(struct mlx5_ib_dev *dev, u8 access_mode,
-			  unsigned int access_flags, unsigned int ndescs,
-			  void *mkc)
+static void set_cache_mkc(struct mlx5_ib_dev *dev,
+			  struct mlx5r_cache_rb_key rb_key, void *mkc)
 {
-	set_mkc_access_pd_addr_fields(mkc, access_flags, 0, dev->umrc.pd);
+	set_mkc_access_pd_addr_fields(mkc, rb_key.access_flags, 0,
+				      dev->umrc.pd);
 	MLX5_SET(mkc, mkc, free, 1);
 	MLX5_SET(mkc, mkc, umr_en, 1);
-	MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
-	MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
+	MLX5_SET(mkc, mkc, access_mode_1_0, rb_key.access_mode & 0x3);
+	MLX5_SET(mkc, mkc, access_mode_4_2, (rb_key.access_mode >> 2) & 0x7);
 
 	MLX5_SET(mkc, mkc, translations_octword_size,
-		 get_mkc_octo_size(access_mode, ndescs));
+		 get_mkc_octo_size(rb_key.access_mode, rb_key.ndescs));
 	MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
 }
 
@@ -317,7 +317,7 @@  static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
 			return -ENOMEM;
 		mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in,
 				   memory_key_mkey_entry);
-		set_cache_mkc(ent->dev, ent->access_mode, 0, ent->ndescs, mkc);
+		set_cache_mkc(ent->dev, ent->rb_key, mkc);
 		async_create->ent = ent;
 
 		err = push_mkey(ent, true, NULL);
@@ -343,8 +343,8 @@  static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
 }
 
 /* Synchronously create a cacheable mkey */
-static int create_cache_mkey(struct mlx5_ib_dev *dev, u8 access_mode,
-			     unsigned int access_flags, unsigned int ndescs,
+static int create_cache_mkey(struct mlx5_ib_dev *dev,
+			     struct mlx5r_cache_rb_key rb_key,
 			     struct mlx5_ib_mkey *mkey)
 {
 	size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
@@ -356,7 +356,7 @@  static int create_cache_mkey(struct mlx5_ib_dev *dev, u8 access_mode,
 	if (!in)
 		return -ENOMEM;
 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
-	set_cache_mkc(dev, access_mode, access_flags, ndescs, mkc);
+	set_cache_mkc(dev, rb_key, mkc);
 
 	err = mlx5_ib_create_mkey(dev, mkey, in, inlen);
 	kfree(in);
@@ -647,7 +647,7 @@  static bool mlx5_ent_get_mkey(struct mlx5_cache_ent *ent, struct mlx5_ib_mr *mr)
 	}
 
 	mr->mmkey.key = pop_stored_mkey(ent);
-	mr->mmkey.ndescs = ent->ndescs;
+	mr->mmkey.ndescs = ent->rb_key.ndescs;
 	mr->mmkey.cache_ent = ent;
 	queue_adjust_cache_locked(ent);
 	ent->in_use++;
@@ -668,29 +668,57 @@  static struct mlx5_cache_ent *mkey_cache_ent_from_order(struct mlx5_ib_dev *dev,
 	return &cache->ent[order];
 }
 
-static bool mlx5_cache_get_mkey(struct mlx5_ib_dev *dev, u8 access_mode,
-				u8 access_flags, unsigned int ndescs,
+static bool mlx5_cache_get_mkey(struct mlx5_ib_dev *dev,
+				struct mlx5r_cache_rb_key rb_key,
 				struct mlx5_ib_mr *mr)
 {
 	struct mlx5_cache_ent *ent;
 
-	if (!mlx5r_umr_can_reconfig(dev, 0, access_flags))
+	if (!mlx5r_umr_can_reconfig(dev, 0, rb_key.access_flags))
 		return false;
 
-	if (access_mode == MLX5_MKC_ACCESS_MODE_KSM)
+	if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
 		ent = &dev->cache.ent[MLX5_IMR_KSM_CACHE_ENTRY];
 
-	ent = mkey_cache_ent_from_order(dev, order_base_2(ndescs));
+	ent = mkey_cache_ent_from_order(dev, order_base_2(rb_key.ndescs));
 	if (!ent)
 		return false;
 
 	return mlx5_ent_get_mkey(ent, mr);
 }
 
+static int get_uchangeable_access_flags(struct mlx5_ib_dev *dev,
+					int access_flags)
+{
+	int ret = 0;
+
+	if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+	    MLX5_CAP_GEN(dev->mdev, atomic) &&
+	    MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
+		ret |= IB_ACCESS_REMOTE_ATOMIC;
+
+	if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
+	    MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
+	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
+		ret |= IB_ACCESS_RELAXED_ORDERING;
+
+	if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
+	    MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
+	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
+		ret |= IB_ACCESS_RELAXED_ORDERING;
+
+	return ret;
+}
+
 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, u8 access_mode,
 				       unsigned int access_flags,
 				       unsigned int ndescs)
 {
+	struct mlx5r_cache_rb_key rb_key = {
+		.access_mode = access_mode,
+		.access_flags = get_uchangeable_access_flags(dev, access_flags),
+		.ndescs = ndescs
+	};
 	struct mlx5_ib_mr *mr;
 	int err;
 
@@ -698,13 +726,12 @@  struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, u8 access_mode,
 	if (!mr)
 		return ERR_PTR(-ENOMEM);
 
-	if (!mlx5_cache_get_mkey(dev, access_mode, access_flags, ndescs, mr)) {
+	if (!mlx5_cache_get_mkey(dev, rb_key, mr)) {
 		/*
 		 * Didn't find an mkey in cache.
 		 * Create an mkey with the exact needed size.
 		 */
-		err = create_cache_mkey(dev, access_mode, access_flags, ndescs,
-					&mr->mmkey);
+		err = create_cache_mkey(dev, rb_key, &mr->mmkey);
 		if (err) {
 			kfree(mr);
 			return ERR_PTR(err);
@@ -774,6 +801,8 @@  static void delay_time_func(struct timer_list *t)
 
 int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
 {
+	struct mlx5r_cache_rb_key rb_key = { .access_mode =
+						    MLX5_MKC_ACCESS_MODE_MTT };
 	struct mlx5_mkey_cache *cache = &dev->cache;
 	struct mlx5_cache_ent *ent;
 	int i;
@@ -804,8 +833,8 @@  int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
 		if (ent->order > mkey_cache_max_order(dev))
 			continue;
 
-		ent->ndescs = 1 << ent->order;
-		ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
+		rb_key.ndescs = 1 << ent->order;
+		ent->rb_key = rb_key;
 		if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
 		    !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
 		    mlx5r_umr_can_load_pas(dev, 0))
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 137143da5959..90339edddfed 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1589,10 +1589,14 @@  mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
 
 void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
 {
+	struct mlx5r_cache_rb_key rb_key = {
+		.access_mode = MLX5_MKC_ACCESS_MODE_KSM,
+		.ndescs = mlx5_imr_ksm_entries
+	};
+
 	if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
 		return;
-	ent->ndescs = mlx5_imr_ksm_entries;
-	ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
+	ent->rb_key = rb_key;
 }
 
 static const struct ib_device_ops mlx5_ib_dev_odp_ops = {