diff mbox series

[xfrm-next,10/13] net/mlx5e: Update IPsec soft and hard limits

Message ID 89fcfe0432d857d006c8b5f2f5082e2193dfbd1b.1670011885.git.leonro@nvidia.com (mailing list archive)
State RFC
Delegated to: Netdev Maintainers
Headers show
Series mlx5 IPsec packet offload support (Part I) | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply

Commit Message

Leon Romanovsky Dec. 2, 2022, 8:14 p.m. UTC
From: Leon Romanovsky <leonro@nvidia.com>

Implement mlx5 IPsec callback to update current lifetime counters.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ipsec.c       | 63 +++++++++++++++++++
 .../mellanox/mlx5/core/en_accel/ipsec.h       |  6 ++
 .../mlx5/core/en_accel/ipsec_offload.c        | 57 +++++++++++++++++
 .../net/ethernet/mellanox/mlx5/core/lib/aso.h |  1 +
 4 files changed, 127 insertions(+)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index fe10f1a2a04a..8d0c605d4cdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -83,6 +83,31 @@  static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
 	return false;
 }
 
+static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
+				    struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+	struct xfrm_state *x = sa_entry->x;
+
+	attrs->hard_packet_limit = x->lft.hard_packet_limit;
+	if (x->lft.soft_packet_limit == XFRM_INF)
+		return;
+
+	/* Hardware decrements hard_packet_limit counter through
+	 * the operation. While fires an event when soft_packet_limit
+	 * is reached. It emans that we need substitute the numbers
+	 * in order to properly count soft limit.
+	 *
+	 * As an example:
+	 * XFRM user sets soft limit is 2 and hard limit is 9 and
+	 * expects to see soft event after 2 packets and hard event
+	 * after 9 packets. In our case, the hard limit will be set
+	 * to 9 and soft limit is comparator to 7 so user gets the
+	 * soft event after 2 packeta
+	 */
+	attrs->soft_packet_limit =
+		x->lft.hard_packet_limit - x->lft.soft_packet_limit;
+}
+
 static void
 mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
 				   struct mlx5_accel_esp_xfrm_attrs *attrs)
@@ -134,6 +159,8 @@  mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
 	attrs->family = x->props.family;
 	attrs->type = x->xso.type;
 	attrs->reqid = x->props.reqid;
+
+	mlx5e_ipsec_init_limits(sa_entry, attrs);
 }
 
 static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@@ -222,6 +249,21 @@  static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
 			netdev_info(netdev, "Cannot offload without reqid\n");
 			return -EINVAL;
 		}
+
+		if (x->lft.hard_byte_limit != XFRM_INF ||
+		    x->lft.soft_byte_limit != XFRM_INF) {
+			netdev_info(netdev,
+				    "Device doesn't support limits in bytes\n");
+			return -EINVAL;
+		}
+
+		if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
+		    x->lft.hard_packet_limit != XFRM_INF) {
+			/* XFRM stack doesn't prevent such configuration :(. */
+			netdev_info(netdev,
+				    "Hard packet limit must be greater than soft one\n");
+			return -EINVAL;
+		}
 	}
 	return 0;
 }
@@ -415,6 +457,26 @@  static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
 	queue_work(sa_entry->ipsec->wq, &modify_work->work);
 }
 
+static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
+{
+	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+	int err;
+
+	lockdep_assert_held(&x->lock);
+
+	if (sa_entry->attrs.soft_packet_limit == XFRM_INF)
+		/* Limits are not configured, as soft limit
+		 * must be lowever than hard limit.
+		 */
+		return;
+
+	err = mlx5e_ipsec_aso_query(sa_entry);
+	if (err)
+		return;
+
+	mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
+}
+
 static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x)
 {
 	struct net_device *netdev = x->xdo.real_dev;
@@ -526,6 +588,7 @@  static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
 	.xdo_dev_offload_ok	= mlx5e_ipsec_offload_ok,
 	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
 
+	.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
 	.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
 	.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 724f2df14a97..aac1e6a83631 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -78,6 +78,8 @@  struct mlx5_accel_esp_xfrm_attrs {
 	u32 replay_window;
 	u32 authsize;
 	u32 reqid;
+	u64 hard_packet_limit;
+	u64 soft_packet_limit;
 };
 
 enum mlx5_ipsec_cap {
@@ -208,6 +210,10 @@  void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
 
+int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
+				   u64 *packets);
+
 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
 				     void *ipsec_stats);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index fc88454aaf8d..8790558ea859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -83,6 +83,20 @@  static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
 	MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
 	if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
 		MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+
+	if (attrs->hard_packet_limit != XFRM_INF) {
+		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
+			 lower_32_bits(attrs->hard_packet_limit));
+		MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
+		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
+	}
+
+	if (attrs->soft_packet_limit != XFRM_INF) {
+		MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
+			 lower_32_bits(attrs->soft_packet_limit));
+
+		MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
+	}
 }
 
 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -298,3 +312,46 @@  void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
 			 DMA_BIDIRECTIONAL);
 	kfree(aso);
 }
+
+int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+	struct mlx5e_ipsec_aso *aso = ipsec->aso;
+	struct mlx5_core_dev *mdev = ipsec->mdev;
+	struct mlx5_wqe_aso_ctrl_seg *ctrl;
+	struct mlx5e_hw_objs *res;
+	struct mlx5_aso_wqe *wqe;
+	u8 ds_cnt;
+
+	res = &mdev->mlx5e_res.hw_objs;
+
+	memset(aso->ctx, 0, sizeof(aso->ctx));
+	wqe = mlx5_aso_get_wqe(aso->aso);
+	ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+	mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
+			   MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
+
+	ctrl = &wqe->aso_ctrl;
+	ctrl->va_l =
+		cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
+	ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
+	ctrl->l_key = cpu_to_be32(res->mkey);
+
+	mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
+	return mlx5_aso_poll_cq(aso->aso, false);
+}
+
+void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
+				   u64 *packets)
+{
+	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+	struct mlx5e_ipsec_aso *aso = ipsec->aso;
+	u64 hard_cnt;
+
+	hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
+	/* HW decresases the limit till it reaches zero to fire an avent.
+	 * We need to fix the calculations, so the returned count is a total
+	 * number of passed packets and not how much left.
+	 */
+	*packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
index c8fc3c838642..afb078bbb8ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -15,6 +15,7 @@ 
 #define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
 #define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
 
+#define ASO_CTRL_READ_EN BIT(0)
 struct mlx5_wqe_aso_ctrl_seg {
 	__be32  va_h;
 	__be32  va_l; /* include read_enable */