@@ -109,7 +109,8 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
-mlx5_core-$(CONFIG_MLX5_EN_PSP) += en_accel/nisp.o en_accel/nisp_offload.o en_accel/nisp_fs.o
+mlx5_core-$(CONFIG_MLX5_EN_PSP) += en_accel/nisp.o en_accel/nisp_offload.o en_accel/nisp_fs.o \
+ en_accel/nisp_rxtx.o
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
steering/dr_matcher.o steering/dr_rule.o \
@@ -47,6 +47,7 @@
#include <linux/rhashtable.h>
#include <net/udp_tunnel.h>
#include <net/switchdev.h>
+#include <net/psp/types.h>
#include <net/xdp.h>
#include <linux/dim.h>
#include <linux/bits.h>
@@ -61,6 +62,7 @@
#include "en/rx_res.h"
#include "en/selq.h"
#include "lib/sd.h"
+#include "lib/psp_defs.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -68,7 +70,7 @@ struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8
-#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MLX5E_ETH_HARD_MTU (ETH_HLEN + PSP_ENCAP_HLEN + PSP_TRL_SIZE + VLAN_HLEN + ETH_FCS_LEN)
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
@@ -43,6 +43,7 @@
#include "en.h"
#include "en/txrx.h"
#include "en_accel/nisp_fs.h"
+#include "en_accel/nisp_rxtx.h"
#if IS_ENABLED(CONFIG_GENEVE)
#include <net/geneve.h>
@@ -114,6 +115,9 @@ struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_accel_tx_ipsec_state ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ struct mlx5e_accel_tx_nisp_state nisp_st;
+#endif
};
static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
@@ -132,6 +136,13 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
return false;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload(skb, dev)) {
+ if (unlikely(!mlx5e_nisp_handle_tx_skb(dev, skb, &state->nisp_st)))
+ return false;
+ }
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
@@ -152,8 +163,14 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_state *state)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&state->nisp_st))
+ return mlx5e_nisp_tx_ids_len(&state->nisp_st);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
return mlx5e_ipsec_tx_ids_len(&state->ipsec);
@@ -167,8 +184,14 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&accel->nisp_st))
+ mlx5e_nisp_tx_build_eseg(priv, skb, &accel->nisp_st, eseg);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
@@ -194,6 +217,11 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&state->nisp_st))
+ mlx5e_nisp_handle_tx_wqe(wqe, &state->nisp_st, inlseg);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
state->ipsec.xo && state->ipsec.tailen)
new file mode 100644
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/protocol.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
+#include <net/psp/types.h>
+
+#include "en.h"
+#include "../nisp.h"
+#include "en_accel/nisp_rxtx.h"
+#include "en_accel/nisp.h"
+#include "lib/psp_defs.h"
+
+static void mlx5e_nisp_set_swp(struct sk_buff *skb,
+ struct mlx5e_accel_tx_nisp_state *nisp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ /* Tunnel Mode:
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP IP L4
+ *
+ * Transport Mode:
+ * SWP: OutL3 OutL4
+ * Pkt: MAC IP ESP L4
+ *
+ * Tunnel(VXLAN TCP/UDP) over Transport Mode
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP UDP VXLAN IP L4
+ */
+ u8 inner_ipproto = 0;
+ struct ethhdr *eth;
+
+ /* Shared settings */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+
+ if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
+ inner_ipproto = skb->inner_ipproto;
+ /* Set SWP additional flags for packet of type IP|UDP|PSP|[ TCP | UDP ] */
+ switch (inner_ipproto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* IP in IP tunneling like vxlan*/
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+ return;
+
+ eth = (struct ethhdr *)skb_inner_mac_header(skb);
+ switch (ntohs(eth->h_proto)) {
+ case ETH_P_IP:
+ inner_ipproto = ((struct iphdr *)((char *)skb->data +
+ skb_inner_network_offset(skb)))->protocol;
+ break;
+ case ETH_P_IPV6:
+ inner_ipproto = ((struct ipv6hdr *)((char *)skb->data +
+ skb_inner_network_offset(skb)))->nexthdr;
+ break;
+ default:
+ break;
+ }
+
+ /* Tunnel(VXLAN TCP/UDP) over Transport Mode PSP i.e. PSP payload is vxlan tunnel */
+ switch (inner_ipproto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset =
+ (skb->csum_start + skb->head - skb->data) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ nisp_st->inner_ipproto = inner_ipproto;
+ }
+}
+
+static bool mlx5e_nisp_set_state(struct mlx5e_priv *priv,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_nisp_state *nisp_st)
+{
+ struct psp_assoc *pas;
+ bool ret = false;
+
+ rcu_read_lock();
+ pas = psp_skb_get_assoc_rcu(skb);
+ if (!pas)
+ goto out;
+
+ ret = true;
+ nisp_st->tailen = PSP_TRL_SIZE;
+ nisp_st->spi = pas->tx.spi;
+ nisp_st->ver = pas->version;
+ memcpy(&nisp_st->keyid, pas->drv_data, sizeof(nisp_st->keyid));
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+void mlx5e_nisp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_accel_tx_nisp_state *nisp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ if (!mlx5_is_nisp_device(priv->mdev))
+ return;
+
+ if (unlikely(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)))
+ return;
+
+ mlx5e_nisp_set_swp(skb, nisp_st, eseg);
+ /* Special WA for PSP LSO in ConnectX7 */
+ eseg->swp_outer_l3_offset = 0;
+ eseg->swp_inner_l3_offset = 0;
+
+ eseg->flow_table_metadata |= cpu_to_be32(nisp_st->keyid);
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER) |
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
+}
+
+void mlx5e_nisp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_nisp_state *nisp_st,
+ struct mlx5_wqe_inline_seg *inlseg)
+{
+ inlseg->byte_count = cpu_to_be32(nisp_st->tailen | MLX5_INLINE_SEG);
+}
+
+static void psp_write_headers(struct net *net, struct sk_buff *skb,
+ __be32 spi, u8 ver, unsigned int udp_len,
+ __be16 sport)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ struct psphdr *psph = (struct psphdr *)(uh + 1);
+
+ uh->dest = htons(PSP_DEFAULT_UDP_PORT);
+ uh->source = udp_flow_src_port(net, skb, 0, 0, false);
+ uh->check = 0;
+ uh->len = htons(udp_len);
+
+ psph->nexthdr = IPPROTO_TCP;
+ psph->hdrlen = PSP_HDRLEN_NOOPT;
+ psph->crypt_offset = 0;
+ psph->verfl = FIELD_PREP(PSPHDR_VERFL_VERSION, ver) |
+ FIELD_PREP(PSPHDR_VERFL_ONE, 1);
+ psph->spi = spi;
+ memset(&psph->iv, 0, sizeof(psph->iv));
+}
+
+/* Encapsulate a TCP packet with PSP by adding the UDP+PSP headers and filling
+ * them in.
+ */
+static bool psp_encapsulate(struct net *net, struct sk_buff *skb,
+ __be32 spi, u8 ver, __be16 sport)
+{
+ u32 network_len = skb_network_header_len(skb);
+ u32 ethr_len = skb_mac_header_len(skb);
+ u32 bufflen = ethr_len + network_len;
+ struct ipv6hdr *ip6;
+
+ if (skb_cow_head(skb, PSP_ENCAP_HLEN))
+ return false;
+
+ skb_push(skb, PSP_ENCAP_HLEN);
+ skb->mac_header -= PSP_ENCAP_HLEN;
+ skb->network_header -= PSP_ENCAP_HLEN;
+ skb->transport_header -= PSP_ENCAP_HLEN;
+ memmove(skb->data, skb->data + PSP_ENCAP_HLEN, bufflen);
+
+ ip6 = ipv6_hdr(skb);
+ skb_set_inner_ipproto(skb, IPPROTO_TCP);
+ ip6->nexthdr = IPPROTO_UDP;
+ be16_add_cpu(&ip6->payload_len, PSP_ENCAP_HLEN);
+
+ skb_set_inner_transport_header(skb, skb_transport_offset(skb) + PSP_ENCAP_HLEN);
+ skb->encapsulation = 1;
+ psp_write_headers(net, skb, spi, ver,
+ skb->len - skb_transport_offset(skb), sport);
+
+ return true;
+}
+
+bool mlx5e_nisp_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_nisp_state *nisp_st)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct net *net = sock_net(skb->sk);
+ const struct ipv6hdr *ip6;
+ struct tcphdr *th;
+
+ if (!mlx5e_nisp_set_state(priv, skb, nisp_st))
+ return true;
+
+ /* psp_encap of the packet */
+ if (!psp_encapsulate(net, skb, nisp_st->spi, nisp_st->ver, 0)) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_PSP_OUTPUT);
+ return false;
+ }
+ if (skb_is_gso(skb)) {
+ ip6 = ipv6_hdr(skb);
+ th = inner_tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb), &ip6->saddr,
+ &ip6->daddr, 0);
+ }
+
+ return true;
+}
new file mode 100644
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_NISP_RXTX_H__
+#define __MLX5E_NISP_RXTX_H__
+
+#include <linux/skbuff.h>
+#include <net/xfrm.h>
+#include <net/psp.h>
+#include "en.h"
+#include "en/txrx.h"
+
+struct mlx5e_accel_tx_nisp_state {
+ u32 tailen;
+ u32 keyid;
+ __be32 spi;
+ u8 inner_ipproto;
+ u8 ver;
+};
+
+#ifdef CONFIG_MLX5_EN_PSP
+static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_nisp_state *nisp_state)
+{
+ return (nisp_state->tailen != 0);
+}
+
+static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = !!psp_skb_get_assoc_rcu(skb);
+ rcu_read_unlock();
+ return ret;
+}
+
+bool mlx5e_nisp_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_nisp_state *nisp_st);
+
+void mlx5e_nisp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_accel_tx_nisp_state *nisp_st,
+ struct mlx5_wqe_eth_seg *eseg);
+
+void mlx5e_nisp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_nisp_state *nisp_st,
+ struct mlx5_wqe_inline_seg *inlseg);
+
+static inline bool mlx5e_nisp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_nisp_state *nisp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ u8 inner_ipproto;
+
+ if (!mlx5e_psp_is_offload_state(nisp_st))
+ return false;
+
+ inner_ipproto = nisp_st->inner_ipproto;
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ sq->stats->csum_partial_inner++;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ sq->stats->csum_partial_inner++;
+ }
+
+ return true;
+}
+
+static inline unsigned int mlx5e_nisp_tx_ids_len(struct mlx5e_accel_tx_nisp_state *nisp_st)
+{
+ return nisp_st->tailen;
+}
+#else
+static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_nisp_state *nisp_state)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
+{
+ return false;
+}
+
+static inline bool mlx5e_nisp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_nisp_state *nisp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ return false;
+}
+#endif
+#endif
@@ -39,6 +39,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/nisp_rxtx.h"
#include "en_accel/macsec.h"
#include "en/ptp.h"
#include <net/ipv6.h>
@@ -120,6 +121,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (unlikely(mlx5e_nisp_txwqe_build_eseg_csum(sq, skb, &accel->nisp_st, eseg)))
+ return;
+#endif
+
if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
return;
@@ -294,7 +300,7 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->packets++;
}
- attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
+ attr->insz = mlx5e_accel_tx_ids_len(sq, skb, accel);
stats->bytes += attr->num_bytes;
}
@@ -663,7 +669,7 @@ static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
+ mlx5e_accel_tx_eseg(priv, skb, accel, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
if (unlikely(sq->ptpsq))
mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
new file mode 100644
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef _LIB_PSP_DEFS_H
+#define _LIB_PSP_DEFS_H
+
+/* PSP Security Payload (PSP) Header
+ *
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Next Header | Hdr Ext Len | Crypt Offset | R |Version|V|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Security Parameters Index (SPI) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * + Initialization Vector (IV) +
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Virtualization Key (VK) [Optional] |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Pad to 8*N bytes [if needed] |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+/* total length of headers for PSP encapsulation (UDP + PSP) */
+#define PSP_ENCAP_HLEN (sizeof(struct udphdr) + sizeof(struct psphdr))
+
+#endif /* _LIB_PSP_DEFS_H */