diff mbox series

[mlx5-next,14/16] net/mlx5: Rename peer_pf to host_pf

Message ID 20201120230339.651609-15-saeedm@nvidia.com (mailing list archive)
State Not Applicable
Headers show
Series mlx5 next updates 2020-11-20 | expand

Commit Message

Saeed Mahameed Nov. 20, 2020, 11:03 p.m. UTC
From: Parav Pandit <parav@nvidia.com>

To match the hardware spec, rename peer_pf to host_pf.

Signed-off-by: Parav Pandit <parav@nvidia.com>
Reviewed-by: Bodong Wang <bodong@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
---
 .../net/ethernet/mellanox/mlx5/core/ecpf.c    | 51 ++++++++++++-------
 .../ethernet/mellanox/mlx5/core/pagealloc.c   | 12 ++---
 include/linux/mlx5/driver.h                   |  2 +-
 3 files changed, 40 insertions(+), 25 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index 3dc9dd3f24dc..68ca0e2b26cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -8,37 +8,52 @@  bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
 	return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
 }
 
-static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
+static int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev)
 {
-	u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
-	int err;
+	u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
+	u32 in[MLX5_ST_SZ_DW(enable_hca_in)]   = {};
 
 	MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
-	err = mlx5_cmd_exec_in(dev, enable_hca, in);
+	MLX5_SET(enable_hca_in, in, function_id, 0);
+	MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+	return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+}
+
+static int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev)
+{
+	u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
+	u32 in[MLX5_ST_SZ_DW(disable_hca_in)]   = {};
+
+	MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+	MLX5_SET(disable_hca_in, in, function_id, 0);
+	MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_host_pf_init(struct mlx5_core_dev *dev)
+{
+	int err;
+
+	err = mlx5_cmd_host_pf_enable_hca(dev);
 	if (err)
-		mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n",
-			      err);
+		mlx5_core_err(dev, "Failed to enable external host PF HCA err(%d)\n", err);
 
 	return err;
 }
 
-static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
+static void mlx5_host_pf_cleanup(struct mlx5_core_dev *dev)
 {
-	u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
 	int err;
 
-	MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
-	err = mlx5_cmd_exec_in(dev, disable_hca, in);
+	err = mlx5_cmd_host_pf_disable_hca(dev);
 	if (err) {
-		mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n",
-			      err);
+		mlx5_core_err(dev, "Failed to disable external host PF HCA err(%d)\n", err);
 		return;
 	}
 
-	err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages);
+	err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
 	if (err)
-		mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n",
-			       err);
+		mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
 }
 
 int mlx5_ec_init(struct mlx5_core_dev *dev)
@@ -46,10 +61,10 @@  int mlx5_ec_init(struct mlx5_core_dev *dev)
 	if (!mlx5_core_is_ecpf(dev))
 		return 0;
 
-	/* ECPF shall enable HCA for peer PF in the same way a PF
+	/* ECPF shall enable HCA for host PF in the same way a PF
 	 * does this for its VFs.
 	 */
-	return mlx5_peer_pf_init(dev);
+	return mlx5_host_pf_init(dev);
 }
 
 void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
@@ -57,5 +72,5 @@  void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
 	if (!mlx5_core_is_ecpf(dev))
 		return;
 
-	mlx5_peer_pf_cleanup(dev);
+	mlx5_host_pf_cleanup(dev);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 150638814517..539baea358bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -374,7 +374,7 @@  static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
 	if (func_id)
 		dev->priv.vfs_pages += npages;
 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
-		dev->priv.peer_pf_pages += npages;
+		dev->priv.host_pf_pages += npages;
 
 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
 		      npages, ec_function, func_id, err);
@@ -416,7 +416,7 @@  static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
 	if (func_id)
 		dev->priv.vfs_pages -= npages;
 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
-		dev->priv.peer_pf_pages -= npages;
+		dev->priv.host_pf_pages -= npages;
 
 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
 		      npages, ec_function, func_id);
@@ -506,7 +506,7 @@  static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
 	if (func_id)
 		dev->priv.vfs_pages -= num_claimed;
 	else if (mlx5_core_is_ecpf(dev) && !ec_function)
-		dev->priv.peer_pf_pages -= num_claimed;
+		dev->priv.host_pf_pages -= num_claimed;
 
 out_free:
 	kvfree(out);
@@ -661,9 +661,9 @@  int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
 	WARN(dev->priv.vfs_pages,
 	     "VFs FW pages counter is %d after reclaiming all pages\n",
 	     dev->priv.vfs_pages);
-	WARN(dev->priv.peer_pf_pages,
-	     "Peer PF FW pages counter is %d after reclaiming all pages\n",
-	     dev->priv.peer_pf_pages);
+	WARN(dev->priv.host_pf_pages,
+	     "External host PF FW pages counter is %d after reclaiming all pages\n",
+	     dev->priv.host_pf_pages);
 
 	return 0;
 }
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index d6ef3068d7d3..8e9bcb3bfd77 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -547,7 +547,7 @@  struct mlx5_priv {
 	atomic_t		reg_pages;
 	struct list_head	free_list;
 	int			vfs_pages;
-	int			peer_pf_pages;
+	int			host_pf_pages;
 
 	struct mlx5_core_health health;