@@ -101,18 +101,33 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
}
+enum ice_rx_csum_status {
+ ICE_RX_CSUM_LVL_0 = 0,
+ ICE_RX_CSUM_LVL_1 = BIT(0),
+ ICE_RX_CSUM_NONE = BIT(1),
+ ICE_RX_CSUM_ERROR = BIT(2),
+ ICE_RX_CSUM_FAIL = ICE_RX_CSUM_NONE | ICE_RX_CSUM_ERROR,
+};
+
+static u8 ice_rx_csum_lvl(enum ice_rx_csum_status status)
+{
+ return status & ICE_RX_CSUM_LVL_1;
+}
+
+static u8 ice_rx_csum_ip_summed(enum ice_rx_csum_status status)
+{
+ return status & ICE_RX_CSUM_NONE ? CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+}
+
/**
- * ice_rx_csum - Indicate in skb if checksum is good
- * @ring: the ring we care about
- * @skb: skb currently being received and modified
+ * ice_get_rx_csum_status - Deduce checksum status from descriptor
* @rx_desc: the receive descriptor
* @ptype: the packet type decoded by hardware
*
- * skb->protocol must be set before this function is called
+ * Returns driver-specific checksum status
*/
-static void
-ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
- union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
+static enum ice_rx_csum_status
+ice_get_rx_csum_status(const union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{
struct ice_rx_ptype_decoded decoded;
u16 rx_status0, rx_status1;
@@ -123,20 +138,12 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
decoded = ice_decode_rx_desc_ptype(ptype);
- /* Start with CHECKSUM_NONE and by default csum_level = 0 */
- skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
-
- /* check if Rx checksum is enabled */
- if (!(ring->netdev->features & NETIF_F_RXCSUM))
- return;
-
/* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
- return;
+ return ICE_RX_CSUM_NONE;
if (!(decoded.known && decoded.outer_ip))
- return;
+ return ICE_RX_CSUM_NONE;
ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
@@ -145,43 +152,61 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
- goto checksum_fail;
+ return ICE_RX_CSUM_FAIL;
if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
- goto checksum_fail;
+ return ICE_RX_CSUM_FAIL;
/* check for L4 errors and handle packets that were not able to be
* checksummed due to arrival speed
*/
if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
- goto checksum_fail;
+ return ICE_RX_CSUM_FAIL;
/* check for outer UDP checksum error in tunneled packets */
if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
- goto checksum_fail;
-
- /* If there is an outer header present that might contain a checksum
- * we need to bump the checksum level by 1 to reflect the fact that
- * we are indicating we validated the inner checksum.
- */
- if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
- skb->csum_level = 1;
+ return ICE_RX_CSUM_FAIL;
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
case ICE_RX_PTYPE_INNER_PROT_TCP:
case ICE_RX_PTYPE_INNER_PROT_UDP:
case ICE_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- default:
- break;
+ /* If there is an outer header present that might contain
+ * a checksum we need to bump the checksum level by 1 to reflect
+ * the fact that we have validated the inner checksum.
+ */
+ return decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT ?
+ ICE_RX_CSUM_LVL_1 : ICE_RX_CSUM_LVL_0;
}
- return;
-checksum_fail:
- ring->vsi->back->hw_csum_rx_error++;
+ return ICE_RX_CSUM_NONE;
+}
+
+/**
+ * ice_rx_csum_into_skb - Indicate in skb if checksum is good
+ * @ring: the ring we care about
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ * @ptype: the packet type decoded by hardware
+ */
+static void
+ice_rx_csum_into_skb(struct ice_rx_ring *ring, struct sk_buff *skb,
+ const union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
+{
+ enum ice_rx_csum_status csum_status;
+
+ /* check if Rx checksum is enabled */
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ csum_status = ice_get_rx_csum_status(rx_desc, ptype);
+ if (csum_status & ICE_RX_CSUM_ERROR)
+ ring->vsi->back->hw_csum_rx_error++;
+
+ skb->ip_summed = ice_rx_csum_ip_summed(csum_status);
+ skb->csum_level = ice_rx_csum_lvl(csum_status);
}
/**
@@ -225,7 +250,7 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- ice_rx_csum(rx_ring, skb, rx_desc, ptype);
+ ice_rx_csum_into_skb(rx_ring, skb, rx_desc, ptype);
if (rx_ring->ptp_rx)
ice_ptp_rx_hwts_to_skb(rx_ring, rx_desc, skb);
Previously, we only needed RX checksum flags in skb path, hence all related code was written with skb in mind. But with the addition of XDP hints via kfuncs to the ice driver, the same logic will be needed in .xmo_() callbacks. Put generic process of determining checksum status into a separate function. Now we cannot operate directly on skb, when deducing checksum status, therefore introduce an intermediate enum for checksum status. Fortunately, in ice, we have only 4 possibilities: checksum validated at level 0, validated at level 1, no checksum, checksum error. Use 3 bits for more convenient conversion. Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com> --- drivers/net/ethernet/intel/ice/ice_txrx_lib.c | 97 ++++++++++++------- 1 file changed, 61 insertions(+), 36 deletions(-)