@@ -506,4 +506,14 @@ static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int
return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz));
}
+
+static inline u8 get_ip_proto(void *data, int network_depth, __be16 proto)
+{
+ void *ip_p = data + network_depth;
+
+ return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
+ ((struct ipv6hdr *)ip_p)->nexthdr;
+}
+
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
#endif
@@ -270,10 +270,110 @@ static int mlx5e_xdp_rx_vlan_tag(const struct xdp_md *ctx, u16 *vlan_tci,
return 0;
}
+static __be16 xdp_buff_last_ethertype(const struct xdp_buff *xdp,
+ int *network_offset)
+{
+ __be16 proto = ((struct ethhdr *)xdp->data)->h_proto;
+ struct vlan_hdr *remaining_data = xdp->data + ETH_HLEN;
+ u8 allowed_depth = VLAN_MAX_DEPTH;
+
+ while (eth_type_vlan(proto)) {
+ struct vlan_hdr *next_data = remaining_data + 1;
+
+ if ((void *)next_data > xdp->data_end || !--allowed_depth)
+ return 0;
+ proto = remaining_data->h_vlan_encapsulated_proto;
+ remaining_data = next_data;
+ }
+
+ *network_offset = (void *)remaining_data - xdp->data;
+ return proto;
+}
+
+static bool xdp_csum_needs_fixup(const struct xdp_buff *xdp, int network_depth,
+ __be16 proto)
+{
+ struct ipv6hdr *ip6;
+ struct iphdr *ip4;
+ int pkt_len;
+
+ if (network_depth > ETH_HLEN)
+ return true;
+
+ switch (proto) {
+ case htons(ETH_P_IP):
+ ip4 = (struct iphdr *)(xdp->data + network_depth);
+ pkt_len = network_depth + ntohs(ip4->tot_len);
+ break;
+ case htons(ETH_P_IPV6):
+ ip6 = (struct ipv6hdr *)(xdp->data + network_depth);
+ pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
+ break;
+ default:
+ return true;
+ }
+
+ if (likely(pkt_len >= xdp->data_end - xdp->data))
+ return false;
+
+ return true;
+}
+
+static int mlx5e_xdp_rx_csum(const struct xdp_md *ctx,
+ enum xdp_csum_status *csum_status,
+ __wsum *csum)
+{
+ const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+ const struct mlx5_cqe64 *cqe = _ctx->cqe;
+ const struct mlx5e_rq *rq = _ctx->rq;
+ __be16 last_ethertype;
+ int network_offset;
+ u8 lro_num_seg;
+
+ lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+ if (lro_num_seg) {
+ *csum_status = XDP_CHECKSUM_VERIFIED;
+ return 0;
+ }
+
+ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
+ get_cqe_tls_offload(cqe))
+ goto csum_unnecessary;
+
+ if (short_frame(ctx->data_end - ctx->data))
+ goto csum_unnecessary;
+
+ last_ethertype = xdp_buff_last_ethertype(&_ctx->xdp, &network_offset);
+ if (last_ethertype != htons(ETH_P_IP) && last_ethertype != htons(ETH_P_IPV6))
+ goto csum_unnecessary;
+ if (unlikely(get_ip_proto(_ctx->xdp.data, network_offset,
+ last_ethertype) == IPPROTO_SCTP))
+ goto csum_unnecessary;
+
+ *csum_status = XDP_CHECKSUM_COMPLETE;
+ *csum = csum_unfold((__force __sum16)cqe->check_sum);
+
+ if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
+ goto csum_unnecessary;
+
+ if (unlikely(xdp_csum_needs_fixup(&_ctx->xdp, network_offset,
+ last_ethertype)))
+ *csum_status = 0;
+
+csum_unnecessary:
+ if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK))) {
+ *csum_status |= XDP_CHECKSUM_VERIFIED;
+ }
+
+ return *csum_status ? 0 : -ENODATA;
+}
+
const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
.xmo_rx_timestamp = mlx5e_xdp_rx_timestamp,
.xmo_rx_hash = mlx5e_xdp_rx_hash,
.xmo_rx_vlan_tag = mlx5e_xdp_rx_vlan_tag,
+ .xmo_rx_csum = mlx5e_xdp_rx_csum,
};
/* returns true if packet was consumed by xdp */
@@ -1374,16 +1374,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
rq->stats->ecn_mark += !!rc;
}
-static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
-{
- void *ip_p = skb->data + network_depth;
-
- return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
- ((struct ipv6hdr *)ip_p)->nexthdr;
-}
-
-#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
-
#define MAX_PADDING 8
static void
@@ -1493,7 +1483,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
goto csum_unnecessary;
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
- if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
+ if (unlikely(get_ip_proto(skb->data, network_depth, proto) == IPPROTO_SCTP))
goto csum_unnecessary;
stats->csum_complete++;
@@ -908,7 +908,7 @@ static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
return cqe->tls_outer_l3_tunneled & 0x1;
}
-static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_tls_offload(const struct mlx5_cqe64 *cqe)
{
return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
}
Implement .xmo_rx_csum() callback to expose checksum information to XDP code. This version contains a lot of logic, duplicated from skb path, because refactoring would be much more complex than implementation itself, checksum code is too coupled with the skb concept. Inteded logic differences from the skb path: - when checksum does not cover the whole packet, no fixups are performed, such packet is treated as one without complete checksum. Just to prevent the patch from ballooning from hints-unrelated code. - with hints API, we can now inform about both complete and validated checksum statuses, that is why XDP_CHECKSUM_VERIFIED is ORed to the status. I hope this represents HW logic well. Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com> --- .../net/ethernet/mellanox/mlx5/core/en/txrx.h | 10 ++ .../net/ethernet/mellanox/mlx5/core/en/xdp.c | 100 ++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/en_rx.c | 12 +-- include/linux/mlx5/device.h | 2 +- 4 files changed, 112 insertions(+), 12 deletions(-)