diff mbox series

[mlx5-next,2/5] net/mlx5: Manage ICM type of SW encap

Message ID 37dc4fd78dfa3374ff53aa602f038a2ec76eb069.1701172481.git.leon@kernel.org (mailing list archive)
State Superseded
Headers show
Series Expose c0 and SW encap ICM for RDMA | expand

Commit Message

Leon Romanovsky Nov. 28, 2023, 12:29 p.m. UTC
From: Shun Hao <shunh@nvidia.com>

Support allocate/deallocate the new SW encap ICM type memory.
The new ICM type is used for encap context allocation managed by SW,
instead FW. It can increase encap context maximum number and allocation
speed

Signed-off-by: Shun Hao <shunh@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/lib/dm.c  | 38 ++++++++++++++++++-
 1 file changed, 37 insertions(+), 1 deletion(-)

Comments

Simon Horman Nov. 30, 2023, 6:16 p.m. UTC | #1
On Tue, Nov 28, 2023 at 02:29:46PM +0200, Leon Romanovsky wrote:
> From: Shun Hao <shunh@nvidia.com>
> 
> Support allocate/deallocate the new SW encap ICM type memory.
> The new ICM type is used for encap context allocation managed by SW,
> instead FW. It can increase encap context maximum number and allocation
> speed
> 
> Signed-off-by: Shun Hao <shunh@nvidia.com>
> Signed-off-by: Leon Romanovsky <leonro@nvidia.com>

...

> @@ -164,6 +188,13 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
>  						log_header_modify_pattern_sw_icm_size);
>  		block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
>  		break;
> +	case MLX5_SW_ICM_TYPE_SW_ENCAP:
> +		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
> +						    indirect_encap_sw_icm_start_address);
> +		log_icm_size = MLX5_CAP_DEV_MEM(dev,
> +						log_indirect_encap_sw_icm_size);
> +		block_map = dm->header_encap_sw_icm_alloc_blocks;
> +		break;
>  	default:
>  		return -EINVAL;
>  	}
> @@ -242,6 +273,11 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
>  						    header_modify_pattern_sw_icm_start_address);
>  		block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
>  		break;
> +	case MLX5_SW_ICM_TYPE_SW_ENCAP:
> +		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
> +						    indirect_encap_sw_icm_start_address);
> +		block_map = dm->header_encap_sw_icm_alloc_blocks;
> +		break;
>  	default:
>  		return -EINVAL;
>  	}

Hi Leon and Shun,

a minor nit from my side: this patch uses MLX5_SW_ICM_TYPE_SW_ENCAP,
but that enum value isn't present until the following patch.
Leon Romanovsky Dec. 1, 2023, 5:15 p.m. UTC | #2
On Thu, Nov 30, 2023 at 06:16:11PM +0000, Simon Horman wrote:
> On Tue, Nov 28, 2023 at 02:29:46PM +0200, Leon Romanovsky wrote:
> > From: Shun Hao <shunh@nvidia.com>
> > 
> > Support allocate/deallocate the new SW encap ICM type memory.
> > The new ICM type is used for encap context allocation managed by SW,
> > instead FW. It can increase encap context maximum number and allocation
> > speed
> > 
> > Signed-off-by: Shun Hao <shunh@nvidia.com>
> > Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
> 
> ...
> 
> > @@ -164,6 +188,13 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
> >  						log_header_modify_pattern_sw_icm_size);
> >  		block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
> >  		break;
> > +	case MLX5_SW_ICM_TYPE_SW_ENCAP:
> > +		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
> > +						    indirect_encap_sw_icm_start_address);
> > +		log_icm_size = MLX5_CAP_DEV_MEM(dev,
> > +						log_indirect_encap_sw_icm_size);
> > +		block_map = dm->header_encap_sw_icm_alloc_blocks;
> > +		break;
> >  	default:
> >  		return -EINVAL;
> >  	}
> > @@ -242,6 +273,11 @@ int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
> >  						    header_modify_pattern_sw_icm_start_address);
> >  		block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
> >  		break;
> > +	case MLX5_SW_ICM_TYPE_SW_ENCAP:
> > +		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
> > +						    indirect_encap_sw_icm_start_address);
> > +		block_map = dm->header_encap_sw_icm_alloc_blocks;
> > +		break;
> >  	default:
> >  		return -EINVAL;
> >  	}
> 
> Hi Leon and Shun,
> 
> a minor nit from my side: this patch uses MLX5_SW_ICM_TYPE_SW_ENCAP,
> but that enum value isn't present until the following patch.

Thanks, it was my mistake to reorder patches, will change to right order
and resubmit.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
index 9482e51ac82a..7c5516b0a844 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c
@@ -13,11 +13,13 @@  struct mlx5_dm {
 	unsigned long *steering_sw_icm_alloc_blocks;
 	unsigned long *header_modify_sw_icm_alloc_blocks;
 	unsigned long *header_modify_pattern_sw_icm_alloc_blocks;
+	unsigned long *header_encap_sw_icm_alloc_blocks;
 };
 
 struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
 {
 	u64 header_modify_pattern_icm_blocks = 0;
+	u64 header_sw_encap_icm_blocks = 0;
 	u64 header_modify_icm_blocks = 0;
 	u64 steering_icm_blocks = 0;
 	struct mlx5_dm *dm;
@@ -54,6 +56,17 @@  struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
 			goto err_modify_hdr;
 	}
 
+	if (MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size)) {
+		header_sw_encap_icm_blocks =
+			BIT(MLX5_CAP_DEV_MEM(dev, log_indirect_encap_sw_icm_size) -
+			    MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
+
+		dm->header_encap_sw_icm_alloc_blocks =
+			bitmap_zalloc(header_sw_encap_icm_blocks, GFP_KERNEL);
+		if (!dm->header_encap_sw_icm_alloc_blocks)
+			goto err_pattern;
+	}
+
 	support_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) &&
 		     MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2) &&
 		     MLX5_CAP64_DEV_MEM(dev, header_modify_pattern_sw_icm_start_address);
@@ -66,11 +79,14 @@  struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev)
 		dm->header_modify_pattern_sw_icm_alloc_blocks =
 			bitmap_zalloc(header_modify_pattern_icm_blocks, GFP_KERNEL);
 		if (!dm->header_modify_pattern_sw_icm_alloc_blocks)
-			goto err_pattern;
+			goto err_sw_encap;
 	}
 
 	return dm;
 
+err_sw_encap:
+	bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
+
 err_pattern:
 	bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
 
@@ -105,6 +121,14 @@  void mlx5_dm_cleanup(struct mlx5_core_dev *dev)
 		bitmap_free(dm->header_modify_sw_icm_alloc_blocks);
 	}
 
+	if (dm->header_encap_sw_icm_alloc_blocks) {
+		WARN_ON(!bitmap_empty(dm->header_encap_sw_icm_alloc_blocks,
+				      BIT(MLX5_CAP_DEV_MEM(dev,
+							   log_indirect_encap_sw_icm_size) -
+				      MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))));
+		bitmap_free(dm->header_encap_sw_icm_alloc_blocks);
+	}
+
 	if (dm->header_modify_pattern_sw_icm_alloc_blocks) {
 		WARN_ON(!bitmap_empty(dm->header_modify_pattern_sw_icm_alloc_blocks,
 				      BIT(MLX5_CAP_DEV_MEM(dev,
@@ -164,6 +188,13 @@  int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
 						log_header_modify_pattern_sw_icm_size);
 		block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
 		break;
+	case MLX5_SW_ICM_TYPE_SW_ENCAP:
+		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+						    indirect_encap_sw_icm_start_address);
+		log_icm_size = MLX5_CAP_DEV_MEM(dev,
+						log_indirect_encap_sw_icm_size);
+		block_map = dm->header_encap_sw_icm_alloc_blocks;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -242,6 +273,11 @@  int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type
 						    header_modify_pattern_sw_icm_start_address);
 		block_map = dm->header_modify_pattern_sw_icm_alloc_blocks;
 		break;
+	case MLX5_SW_ICM_TYPE_SW_ENCAP:
+		icm_start_addr = MLX5_CAP64_DEV_MEM(dev,
+						    indirect_encap_sw_icm_start_address);
+		block_map = dm->header_encap_sw_icm_alloc_blocks;
+		break;
 	default:
 		return -EINVAL;
 	}