@@ -317,6 +317,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
+ case MLX5_CMD_OP_DEALLOC_SF:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -449,6 +450,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_XRQ:
case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
case MLX5_CMD_OP_QUERY_SF_PARTITION:
+ case MLX5_CMD_OP_ALLOC_SF:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -476,6 +478,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(SET_ISSI);
MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
MLX5_COMMAND_STR_CASE(QUERY_SF_PARTITION);
+ MLX5_COMMAND_STR_CASE(ALLOC_SF);
+ MLX5_COMMAND_STR_CASE(DEALLOC_SF);
MLX5_COMMAND_STR_CASE(CREATE_MKEY);
MLX5_COMMAND_STR_CASE(QUERY_MKEY);
MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
@@ -644,30 +644,53 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
return ret;
}
-int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
+static int enable_hca(struct mlx5_core_dev *dev, u16 func_id, bool ecpu)
{
- u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
MLX5_SET(enable_hca_in, in, function_id, func_id);
- MLX5_SET(enable_hca_in, in, embedded_cpu_function,
- dev->caps.embedded_cpu);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, ecpu);
return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}
-int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
+int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
- u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
+ return enable_hca(dev, func_id, dev->caps.embedded_cpu);
+}
+
+int mlx5_core_enable_sf_hca(struct mlx5_core_dev *dev, u16 sf_func_id)
+{
+ /* When enabling SF, it doesn't matter if is enabled on ECPF or PF,
+ * embedded_cpu bit must be cleared as expected by device firmware.
+ * SF function ids are split between ECPF And PF. A given SF is for
+ * ECPF or for PF is decided by SF's function id by the firmware.
+ */
+ return enable_hca(dev, sf_func_id, 0);
+}
+
+static int disable_hca(struct mlx5_core_dev *dev, u16 func_id, bool ecpu)
+{
+ u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
MLX5_SET(disable_hca_in, in, function_id, func_id);
- MLX5_SET(enable_hca_in, in, embedded_cpu_function,
- dev->caps.embedded_cpu);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, ecpu);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
+{
+ return disable_hca(dev, func_id, dev->caps.embedded_cpu);
+}
+
+int mlx5_core_disable_sf_hca(struct mlx5_core_dev *dev, u16 sf_func_id)
+{
+ return disable_hca(dev, sf_func_id, 0);
+}
+
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
struct ptp_system_timestamp *sts)
{
@@ -3,6 +3,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/io-mapping.h>
+#include <linux/bitmap.h>
#include "sf.h"
#include "mlx5_core.h"
@@ -31,7 +33,6 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev,
if (!out)
return -ENOMEM;
- mutex_init(&sf_table->lock);
/* SFs BAR is implemented in PCI BAR2 */
sf_table->base_address = pci_resource_start(dev->pdev, 2);
@@ -46,6 +47,13 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev,
sf_table->log_sf_bar_size =
MLX5_GET(sf_partition, sf_parts, log_sf_bar_size);
+ sf_table->sf_id_bitmap = bitmap_zalloc(sf_table->max_sfs, GFP_KERNEL);
+ if (!sf_table->sf_id_bitmap) {
+ err = -ENOMEM;
+ goto free_outmem;
+ }
+ mutex_init(&sf_table->lock);
+
mlx5_core_dbg(dev, "supported partitions(%d)\n", n_support);
mlx5_core_dbg(dev, "SF_part(0) log_num_sf(%d) log_sf_bar_size(%d)\n",
sf_table->max_sfs, sf_table->log_sf_bar_size);
@@ -59,4 +67,110 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev,
struct mlx5_sf_table *sf_table)
{
mutex_destroy(&sf_table->lock);
+ bitmap_free(sf_table->sf_id_bitmap);
+}
+
+static int mlx5_cmd_alloc_sf(struct mlx5_core_dev *mdev, u16 function_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_sf_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_sf_in)] = {};
+
+ MLX5_SET(alloc_sf_in, in, opcode, MLX5_CMD_OP_ALLOC_SF);
+ MLX5_SET(alloc_sf_in, in, function_id, function_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *mdev, u16 function_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_sf_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_sf_in)] = {};
+
+ MLX5_SET(dealloc_sf_in, in, opcode, MLX5_CMD_OP_DEALLOC_SF);
+ MLX5_SET(dealloc_sf_in, in, function_id, function_id);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int alloc_sf_id(struct mlx5_sf_table *sf_table, u16 *sf_id)
+{
+ int ret = 0;
+ u16 idx;
+
+ mutex_lock(&sf_table->lock);
+ idx = find_first_zero_bit(sf_table->sf_id_bitmap, sf_table->max_sfs);
+ if (idx == sf_table->max_sfs) {
+ ret = -ENOSPC;
+ goto done;
+ }
+ bitmap_set(sf_table->sf_id_bitmap, idx, 1);
+ *sf_id = idx;
+done:
+ mutex_unlock(&sf_table->lock);
+ return ret;
+}
+
+static void free_sf_id(struct mlx5_sf_table *sf_table, u16 sf_id)
+{
+ mutex_lock(&sf_table->lock);
+ bitmap_clear(sf_table->sf_id_bitmap, sf_id, 1);
+ mutex_unlock(&sf_table->lock);
+}
+
+static u16 mlx5_sf_hw_id(const struct mlx5_core_dev *coredev, u16 sf_id)
+{
+ return mlx5_sf_base_id(coredev) + sf_id;
+}
+
+/* Perform SF allocation using parent device BAR. */
+struct mlx5_sf *
+mlx5_sf_alloc(struct mlx5_core_dev *coredev, struct mlx5_sf_table *sf_table,
+ struct device *dev)
+{
+ struct mlx5_sf *sf;
+ u16 hw_function_id;
+ u16 sf_id;
+ int ret;
+
+ sf = kzalloc(sizeof(*sf), GFP_KERNEL);
+ if (!sf)
+ return ERR_PTR(-ENOMEM);
+
+ ret = alloc_sf_id(sf_table, &sf_id);
+ if (ret)
+ goto id_err;
+
+ hw_function_id = mlx5_sf_hw_id(coredev, sf_id);
+ ret = mlx5_cmd_alloc_sf(coredev, hw_function_id);
+ if (ret)
+ goto alloc_sf_err;
+
+ ret = mlx5_core_enable_sf_hca(coredev, hw_function_id);
+ if (ret)
+ goto enable_err;
+
+ sf->idx = sf_id;
+ sf->base_addr = sf_table->base_address +
+ (sf->idx << (sf_table->log_sf_bar_size + 12));
+ return sf;
+
+enable_err:
+ mlx5_cmd_dealloc_sf(coredev, hw_function_id);
+alloc_sf_err:
+ free_sf_id(sf_table, sf_id);
+id_err:
+ kfree(sf);
+ return ERR_PTR(ret);
+}
+
+void mlx5_sf_free(struct mlx5_core_dev *coredev, struct mlx5_sf_table *sf_table,
+ struct mlx5_sf *sf)
+{
+ u16 hw_function_id;
+
+ hw_function_id = mlx5_sf_hw_id(coredev, sf->idx);
+ mlx5_core_disable_sf_hca(coredev, hw_function_id);
+ mlx5_cmd_dealloc_sf(coredev, hw_function_id);
+ free_sf_id(sf_table, sf->idx);
+ kfree(sf);
}
@@ -6,11 +6,18 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
+#include <linux/idr.h>
+
+struct mlx5_sf {
+ phys_addr_t base_addr;
+ u16 idx; /* Index allocated by the SF table bitmap */
+};
struct mlx5_sf_table {
phys_addr_t base_address;
/* Protects sfs life cycle and sf enable/disable flows */
struct mutex lock;
+ unsigned long *sf_id_bitmap;
u16 max_sfs;
u16 log_sf_bar_size;
};
@@ -22,11 +29,22 @@ static inline bool mlx5_core_is_sf_supported(const struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, sf);
}
+static inline u16 mlx5_sf_base_id(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf_base_id);
+}
+
#ifdef CONFIG_MLX5_MDEV
int mlx5_sf_table_init(struct mlx5_core_dev *dev,
struct mlx5_sf_table *sf_table);
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev,
struct mlx5_sf_table *sf_table);
+
+struct mlx5_sf *
+mlx5_sf_alloc(struct mlx5_core_dev *coredev, struct mlx5_sf_table *sf_table,
+ struct device *dev);
+void mlx5_sf_free(struct mlx5_core_dev *coredev, struct mlx5_sf_table *sf_table,
+ struct mlx5_sf *sf);
#endif
#endif
@@ -133,6 +133,8 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
+int mlx5_core_enable_sf_hca(struct mlx5_core_dev *dev, u16 sf_func_id);
+int mlx5_core_disable_sf_hca(struct mlx5_core_dev *dev, u16 sf_func_id);
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
void *context, u32 *element_id);
int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,