@@ -366,6 +366,14 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
return err;
}
+static void tx_destroy(struct mlx5e_ipsec_tx *tx)
+{
+ mlx5_del_flow_rules(tx->pol.rule);
+ mlx5_destroy_flow_group(tx->pol.group);
+ mlx5_destroy_flow_table(tx->ft.pol);
+ mlx5_destroy_flow_table(tx->ft.sa);
+}
+
static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
struct mlx5e_ipsec *ipsec)
{
@@ -379,6 +387,13 @@ static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
err = tx_create(mdev, tx);
if (err)
goto out;
+
+ err = mlx5_ipsec_fs_roce_tx_create(ipsec->roce_ipsec, tx->ft.pol, ipsec->mdev);
+ if (err) {
+ tx_destroy(tx);
+ goto out;
+ }
+
skip:
tx->ft.refcnt++;
out:
@@ -397,10 +412,9 @@ static void tx_ft_put(struct mlx5e_ipsec *ipsec)
if (tx->ft.refcnt)
goto out;
- mlx5_del_flow_rules(tx->pol.rule);
- mlx5_destroy_flow_group(tx->pol.group);
- mlx5_destroy_flow_table(tx->ft.pol);
- mlx5_destroy_flow_table(tx->ft.sa);
+ mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce_ipsec);
+
+ tx_destroy(tx);
out:
mutex_unlock(&tx->ft.mutex);
}
@@ -20,9 +20,17 @@ struct mlx5_ipsec_rx_roce {
struct mlx5_flow_namespace *ns_rdma;
};
+struct mlx5_ipsec_tx_roce {
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_namespace *ns;
+};
+
struct mlx5_ipsec_fs {
struct mlx5_ipsec_rx_roce ipv4_rx;
struct mlx5_ipsec_rx_roce ipv6_rx;
+ struct mlx5_ipsec_tx_roce tx;
};
static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec, u16 dport)
@@ -83,6 +91,103 @@ static int ipsec_fs_roce_rx_rule_setup(struct mlx5_flow_destination *default_dst
return err;
}
+static int ipsec_fs_roce_tx_rule_setup(struct mlx5_core_dev *mdev, struct mlx5_ipsec_tx_roce *roce,
+ struct mlx5_flow_table *pol_ft)
+{
+ struct mlx5_flow_destination dst = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ int err = 0;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = pol_ft;
+ rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, &dst,
+ 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add TX roce ipsec rule err=%d\n",
+ err);
+ goto out;
+ }
+ roce->rule = rule;
+
+out:
+ return err;
+}
+
+void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce)
+{
+ struct mlx5_ipsec_tx_roce *tx_roce;
+
+ if (!ipsec_roce)
+ return;
+
+ tx_roce = &ipsec_roce->tx;
+
+ mlx5_del_flow_rules(tx_roce->rule);
+ mlx5_destroy_flow_group(tx_roce->g);
+ mlx5_destroy_flow_table(tx_roce->ft);
+}
+
+#define MLX5_TX_ROCE_GROUP_SIZE BIT(0)
+
+int mlx5_ipsec_fs_roce_tx_create(struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_table *pol_ft,
+ struct mlx5_core_dev *mdev)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_ipsec_tx_roce *roce;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ int ix = 0;
+ int err;
+ u32 *in;
+
+ if (!ipsec_roce)
+ return 0;
+
+ roce = &ipsec_roce->tx;
+
+ in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ ft_attr.max_fte = 1;
+ ft = mlx5_create_flow_table(roce->ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create ipsec tx roce ft err=%d\n", err);
+ return err;
+ }
+
+ roce->ft = ft;
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_TX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create ipsec tx roce group err=%d\n", err);
+ goto fail;
+ }
+ roce->g = g;
+
+ err = ipsec_fs_roce_tx_rule_setup(mdev, roce, pol_ft);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create ipsec tx roce rules err=%d\n", err);
+ goto rule_fail;
+ }
+
+ return 0;
+
+rule_fail:
+ mlx5_destroy_flow_group(roce->g);
+fail:
+ mlx5_destroy_flow_table(ft);
+ return err;
+}
+
struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
{
struct mlx5_ipsec_rx_roce *rx_roce;
@@ -222,6 +327,8 @@ void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce)
kfree(ipsec_roce);
}
+#define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
+
struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev)
{
struct mlx5_ipsec_fs *roce_ipsec;
@@ -230,8 +337,8 @@ struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev)
if (!mlx5_get_roce_state(mdev))
return NULL;
- if (!(MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) &
- MLX5_FT_NIC_RX_2_NIC_RX_RDMA)) {
+ if ((MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) &
+ NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) {
mlx5_core_dbg(mdev, "Failed to init roce ipsec flow steering, capabilities not supported\n");
return NULL;
}
@@ -249,5 +356,17 @@ struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev)
roce_ipsec->ipv4_rx.ns_rdma = ns;
roce_ipsec->ipv6_rx.ns_rdma = ns;
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
+ if (!ns) {
+ mlx5_core_err(mdev, "Failed to get roce tx ns\n");
+ goto err_tx;
+ }
+
+ roce_ipsec->tx.ns = ns;
+
return roce_ipsec;
+
+err_tx:
+ kfree(roce_ipsec);
+ return NULL;
}
@@ -11,6 +11,9 @@ void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
int mlx5_ipsec_fs_roce_rx_create(struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_namespace *ns,
struct mlx5_flow_destination *default_dst, u32 family, u32 level,
u32 prio, struct mlx5_core_dev *mdev);
+void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce);
+int mlx5_ipsec_fs_roce_tx_create(struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_table *pol_ft,
+ struct mlx5_core_dev *mdev);
void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce);
struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev);