@@ -205,6 +205,7 @@ enum ice_feature {
ICE_F_SMA_CTRL,
ICE_F_CGU,
ICE_F_GNSS,
+ ICE_F_GCS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
ICE_F_MBX_LIMIT,
@@ -229,7 +229,7 @@ struct ice_32b_rx_flex_desc_nic {
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
- __le16 l2tag2_1st;
+ __le16 raw_csum;
__le16 l2tag2_2nd;
/* Qword 3 */
@@ -500,10 +500,15 @@ enum ice_tx_desc_len_fields {
struct ice_tx_ctx_desc {
__le32 tunneling_params;
__le16 l2tag2;
- __le16 rsvd;
+ __le16 gcs;
__le64 qw1;
};
+#define ICE_TX_GCS_DESC_START_M GENMASK(7, 0)
+#define ICE_TX_GCS_DESC_OFFSET_M GENMASK(11, 8)
+#define ICE_TX_GCS_DESC_TYPE_M GENMASK(14, 12)
+#define ICE_TX_GCS_DESC_CSUM_PSH BIT(12)
+
#define ICE_TXD_CTX_QW1_CMD_S 4
#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
@@ -1401,6 +1401,10 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
else
ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
+
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ ring->flags |= ICE_TX_FLAGS_RING_GCS;
+
WRITE_ONCE(vsi->tx_rings[i], ring);
}
@@ -1420,6 +1424,10 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->dev = dev;
ring->count = vsi->num_rx_desc;
ring->cached_phctime = pf->ptp.cached_phc_time;
+
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ ring->flags |= ICE_RX_FLAGS_RING_GCS;
+
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -3881,8 +3889,10 @@ void ice_init_feature_support(struct ice_pf *pf)
break;
}
- if (pf->hw.mac_type == ICE_MAC_E830)
+ if (pf->hw.mac_type == ICE_MAC_E830) {
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
+ ice_set_feature_support(pf, ICE_F_GCS);
+ }
}
/**
@@ -3666,6 +3666,12 @@ void ice_set_netdev_features(struct net_device *netdev)
*/
netdev->hw_features |= NETIF_F_RXFCS;
+ /* Mutual exclusivity for TSO and GCS is enforced by the fix features
+ * ndo callback.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ netdev->hw_features |= NETIF_F_HW_CSUM;
+
netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
}
@@ -6188,6 +6194,38 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
return err;
}
+/**
+ * ice_fix_features_gcs - enforce Generic Checksum (GCS) feature restrictions
+ * @netdev: ptr to the netdev that flags are being fixed on
+ * @features: features that need to be checked and possibly fixed
+ *
+ * Due to E830 hardware limitations on TSO (NETIF_F_ALL_TSO) with GCS
+ * (NETIF_F_HW_CSUM), inner packet header modification is not supported and
+ * maximum segment size is limited to 1023 bytes, make TSO and GCS mutually
+ * exclusive. If both TSO and GCS are requested, then choose TSO and drop
+ * GCS, else preserve existing settings.
+ *
+ * Note: IP checksums enforcement is handled by netdev_fix_features().
+ *
+ * Return: updated features based on device GCS limitations.
+ */
+static netdev_features_t
+ice_fix_features_gcs(struct net_device *netdev, netdev_features_t features)
+{
+ if (!((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO)))
+ return features;
+
+ if (netdev->features & NETIF_F_HW_CSUM) {
+ netdev_warn(netdev, "Dropping TSO. TSO and HW checksum are mutually exclusive.\n");
+ features &= ~NETIF_F_ALL_TSO;
+ } else {
+ netdev_warn(netdev, "Dropping HW checksum. TSO and HW checksum are mutually exclusive.\n");
+ features &= ~NETIF_F_HW_CSUM;
+ }
+
+ return features;
+}
+
#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_RX | \
@@ -6235,6 +6273,8 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
* These are mutually exclusive as there is currently no way to
* enable/disable VLAN filtering based on VLAN ethertype when using VLAN
* prune rules.
+ *
+ * Return: updated features list.
*/
static netdev_features_t
ice_fix_features(struct net_device *netdev, netdev_features_t features)
@@ -6290,6 +6330,9 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
features &= ~NETIF_VLAN_STRIPPING_FEATURES;
}
+ if (ice_is_feature_supported(np->vsi->back, ICE_F_GCS))
+ features = ice_fix_features_gcs(netdev, features);
+
return features;
}
@@ -1753,6 +1753,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
static
int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
+ const struct ice_tx_ring *tx_ring = off->tx_ring;
u32 l4_len = 0, l3_len = 0, l2_len = 0;
struct sk_buff *skb = first->skb;
union {
@@ -1902,6 +1903,29 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l3_len = l4.hdr - ip.hdr;
offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
+ if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
+ !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
+ !skb_csum_is_sctp(skb)) {
+ /* Set GCS */
+ u16 csum_start = (skb->csum_start - skb->mac_header) / 2;
+ u16 csum_offset = skb->csum_offset / 2;
+ u16 gcs_params;
+
+ gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) |
+ FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) |
+ FIELD_PREP(ICE_TX_GCS_DESC_CSUM_PSH, 1);
+
+ /* Unlike legacy HW checksums, GCS requires a context
+ * descriptor.
+ */
+ off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX;
+ off->cd_gcs_params = gcs_params;
+ /* Fill out CSO info in data descriptors */
+ off->td_offset |= offset;
+ off->td_cmd |= cmd;
+ return 1;
+ }
+
/* Enable L4 checksum offloads */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -2383,7 +2407,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* setup context descriptor */
cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
- cdesc->rsvd = cpu_to_le16(0);
+ cdesc->gcs = cpu_to_le16(offload.cd_gcs_params);
cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
}
@@ -193,6 +193,7 @@ struct ice_tx_offload_params {
u32 td_l2tag1;
u32 cd_tunnel_params;
u16 cd_l2tag2;
+ u16 cd_gcs_params;
u8 header_len;
};
@@ -367,6 +368,7 @@ struct ice_rx_ring {
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
#define ICE_RX_FLAGS_MULTIDEV BIT(3)
+#define ICE_RX_FLAGS_RING_GCS BIT(4)
u8 flags;
/* CL5 - 5th cacheline starts here */
struct xdp_rxq_info xdp_rxq;
@@ -405,6 +407,7 @@ struct ice_tx_ring {
#define ICE_TX_FLAGS_RING_XDP BIT(0)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
+#define ICE_TX_FLAGS_RING_GCS BIT(3)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
u16 quanta_prof_id;
@@ -80,6 +80,23 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
libeth_rx_pt_set_hash(skb, hash, decoded);
}
+/**
+ * ice_rx_gcs - Set generic checksum in skb
+ * @skb: skb currently being received and modified
+ * @rx_desc: receive descriptor
+ */
+static void ice_rx_gcs(struct sk_buff *skb,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ const struct ice_32b_rx_flex_desc_nic *desc;
+ u16 csum;
+
+ desc = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = (__force u16)desc->raw_csum;
+ skb->csum = csum_unfold((__force __sum16)swab16(csum));
+}
+
/**
* ice_rx_csum - Indicate in skb if checksum is good
* @ring: the ring we care about
@@ -107,6 +124,15 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
+ if ((ring->flags & ICE_RX_FLAGS_RING_GCS) &&
+ rx_desc->wb.rxdid == ICE_RXDID_FLEX_NIC &&
+ (decoded.inner_prot == LIBETH_RX_PT_INNER_TCP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_UDP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_ICMP)) {
+ ice_rx_gcs(skb, rx_desc);
+ return;
+ }
+
/* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;