@@ -143,6 +143,26 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp,
return 0;
}
+static bool enetc_tx_csum_offload_check(struct sk_buff *skb)
+{
+ if (ip_hdr(skb)->version == 4)
+ return ip_hdr(skb)->protocol == IPPROTO_TCP ||
+ ip_hdr(skb)->protocol == IPPROTO_UDP;
+ else if (ip_hdr(skb)->version == 6)
+ return ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP ||
+ ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP;
+ else
+ return false;
+}
+
+static bool enetc_skb_is_tcp(struct sk_buff *skb)
+{
+ if (ip_hdr(skb)->version == 4)
+ return ip_hdr(skb)->protocol == IPPROTO_TCP;
+ else
+ return ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP;
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
@@ -160,6 +180,29 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_addr_t dma;
u8 flags = 0;
+ enetc_clear_tx_bd(&temp_bd);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* Can not support TSD and checksum offload at the same time */
+ if (priv->active_offloads & ENETC_F_TXCSUM &&
+ enetc_tx_csum_offload_check(skb) && !tx_ring->tsd_enable) {
+ bool is_ipv6 = ip_hdr(skb)->version == 6;
+ bool is_tcp = enetc_skb_is_tcp(skb);
+
+ temp_bd.l3_start = skb_network_offset(skb);
+ temp_bd.ipcs = is_ipv6 ? 0 : 1;
+ temp_bd.l3_hdr_size = skb_network_header_len(skb) / 4;
+ temp_bd.l3t = is_ipv6 ? 1 : 0;
+ temp_bd.l4t = is_tcp ? ENETC_TXBD_L4T_TCP : ENETC_TXBD_L4T_UDP;
+ flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS;
+ } else {
+ if (skb_checksum_help(skb)) {
+ dev_err(tx_ring->dev, "skb_checksum_help() error\n");
+
+ return 0;
+ }
+ }
+ }
+
i = tx_ring->next_to_use;
txbd = ENETC_TXBD(*tx_ring, i);
prefetchw(txbd);
@@ -170,7 +213,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
temp_bd.addr = cpu_to_le64(dma);
temp_bd.buf_len = cpu_to_le16(len);
- temp_bd.lstatus = 0;
tx_swbd = &tx_ring->tx_swbd[i];
tx_swbd->dma = dma;
@@ -591,7 +633,7 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count, err;
+ int count;
/* Queue one-step Sync packet if already locked */
if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
@@ -624,11 +666,6 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- err = skb_checksum_help(skb);
- if (err)
- goto drop_packet_err;
- }
enetc_lock_mdio();
count = enetc_map_tx_buffs(tx_ring, skb);
enetc_unlock_mdio();
@@ -3287,6 +3324,7 @@ static const struct enetc_drvdata enetc4_pf_data = {
.sysclk_freq = ENETC_CLK_333M,
.pmac_offset = ENETC4_PMAC_OFFSET,
.rx_csum = 1,
+ .tx_csum = 1,
.eth_ops = &enetc4_pf_ethtool_ops,
};
@@ -235,6 +235,7 @@ enum enetc_errata {
struct enetc_drvdata {
u32 pmac_offset; /* Only valid for PSI which supports 802.1Qbu */
u8 rx_csum:1;
+ u8 tx_csum:1;
u64 sysclk_freq;
const struct ethtool_ops *eth_ops;
};
@@ -343,6 +344,7 @@ enum enetc_active_offloads {
ENETC_F_QCI = BIT(10),
ENETC_F_QBU = BIT(11),
ENETC_F_RXCSUM = BIT(12),
+ ENETC_F_TXCSUM = BIT(13),
};
enum enetc_flags_bit {
@@ -558,7 +558,12 @@ union enetc_tx_bd {
__le16 frm_len;
union {
struct {
- u8 reserved[3];
+ u8 l3_start:7;
+ u8 ipcs:1;
+ u8 l3_hdr_size:7;
+ u8 l3t:1;
+ u8 resv:5;
+ u8 l4t:3;
u8 flags;
}; /* default layout */
__le32 txstart;
@@ -582,10 +587,10 @@ union enetc_tx_bd {
};
enum enetc_txbd_flags {
- ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_L4CS = BIT(0), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TSE = BIT(1),
ENETC_TXBD_FLAGS_W = BIT(2),
- ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_CSUM_LSO = BIT(3), /* For ENETC 4.1 and later */
ENETC_TXBD_FLAGS_TXSTART = BIT(4),
ENETC_TXBD_FLAGS_EX = BIT(6),
ENETC_TXBD_FLAGS_F = BIT(7)
@@ -594,6 +599,9 @@ enum enetc_txbd_flags {
#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
#define ENETC_TXBD_FLAGS_OFFSET 24
+#define ENETC_TXBD_L4T_UDP BIT(0)
+#define ENETC_TXBD_L4T_TCP BIT(1)
+
static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags)
{
u32 temp;
@@ -122,6 +122,9 @@ void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
if (si->drvdata->rx_csum)
priv->active_offloads |= ENETC_F_RXCSUM;
+ if (si->drvdata->tx_csum)
+ priv->active_offloads |= ENETC_F_TXCSUM;
+
/* TODO: currently, i.MX95 ENETC driver does not support advanced features */
if (!is_enetc_rev1(si)) {
ndev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK);
In addition to supporting Rx checksum offload, i.MX95 ENETC also supports Tx checksum offload. The transmit checksum offload is implemented through the Tx BD. To support Tx checksum offload, software needs to fill some auxiliary information in Tx BD, such as IP version, IP header offset and size, whether L4 is UDP or TCP, etc. Same as Rx checksum offload, Tx checksum offload capability isn't defined in register, so tx_csum bit is added to struct enetc_drvdata to indicate whether the device supports Tx checksum offload. Signed-off-by: Wei Fang <wei.fang@nxp.com> --- drivers/net/ethernet/freescale/enetc/enetc.c | 52 ++++++++++++++++--- drivers/net/ethernet/freescale/enetc/enetc.h | 2 + .../net/ethernet/freescale/enetc/enetc_hw.h | 14 +++-- .../freescale/enetc/enetc_pf_common.c | 3 ++ 4 files changed, 61 insertions(+), 10 deletions(-)