From patchwork Wed May 10 09:38:39 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mengyuan Lou X-Patchwork-Id: 13236688 X-Patchwork-Delegate: kuba@kernel.org Received: from lindbergh.monkeyblade.net (lindbergh.monkeyblade.net [23.128.96.19]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 5F79F811 for ; Wed, 10 May 2023 09:39:17 +0000 (UTC) Received: from smtpbg153.qq.com (smtpbg153.qq.com [13.245.218.24]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id D824912C for ; Wed, 10 May 2023 02:39:12 -0700 (PDT) X-QQ-mid: bizesmtp73t1683711543ts27ghvc Received: from localhost.localdomain ( [125.119.253.217]) by bizesmtp.qq.com (ESMTP) with id ; Wed, 10 May 2023 17:39:00 +0800 (CST) X-QQ-SSF: 01400000000000N0Z000000A0000000 X-QQ-FEAT: qcKkmz/zJhz9Kxp1kRhESPIwQqN4KQ6jIz/3ezXOJpGbne1GAf04OvUVO11zW wPxOkLz2J7zuyTk+8iA7HYVSJISrRy/uQxwzhez1ES14adSP5e58KthyB+ovQsJlgcN6BKl o9yE6nzg92LTRy3CZRROi0IqooDnZo2VpPJHDRUSs5x0U2kqsGP3d8sJnidNdD5uw2sW0y6 np5Ud83OHr5VqimV0+mKCSQWE9ti4ZsLDtoStJL75gIIq8Fuqj7B8qKEX+H3wywdXx2Wz/t /DduOD0mDKYMCGihI5MKxnkaxBiOiqDxwYrUotQv7Cwtvs2EGVB9J2WqaXxCwGhFgkdoxVb Z1FHGDnCb0hVgys+i7ZTm4jHJhLUZA6OyMPPfK7zixTdRwE0bjDP6+dU4yZ8gwe6MkXQ64U 3Bim/3N6V7xCCJ/9Uqv/Sg== X-QQ-GoodBg: 2 X-BIZMAIL-ID: 12267457053048260314 From: Mengyuan Lou To: netdev@vger.kernel.org Cc: jiawenwu@trustnetic.com, Mengyuan Lou Subject: [PATCH net-next v4 1/7] net: wangxun: libwx add tx offload functions Date: Wed, 10 May 2023 17:38:39 +0800 Message-Id: <20230510093845.47446-2-mengyuanlou@net-swift.com> X-Mailer: git-send-email 2.40.0 In-Reply-To: <20230510093845.47446-1-mengyuanlou@net-swift.com> References: <20230510093845.47446-1-mengyuanlou@net-swift.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:net-swift.com:qybglogicsvrgz:qybglogicsvrgz5a-3 X-Spam-Status: No, score=-1.9 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_NONE, RCVD_IN_MSPIKE_H2,SPF_HELO_PASS,SPF_PASS,T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net X-Patchwork-Delegate: kuba@kernel.org Add tx offload functions for wx_xmit_frame_ring which includes wx_encode_tx_desc_ptype, wx_tso and wx_tx_csum. which supports ngbe and txgbe to implement tx offload function. Signed-off-by: Mengyuan Lou --- drivers/net/ethernet/wangxun/libwx/wx_lib.c | 483 ++++++++++++++++++- drivers/net/ethernet/wangxun/libwx/wx_type.h | 91 +++- 2 files changed, 566 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index eb89a274083e..c8e8c1ca0a69 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -2,9 +2,14 @@ /* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */ #include +#include #include +#include #include +#include #include +#include +#include #include "wx_type.h" #include "wx_lib.h" @@ -707,11 +712,50 @@ static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) return 0; } +static u32 wx_tx_cmd_type(u32 tx_flags) +{ + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS; + + /* set HW vlan bit if vlan is present */ + cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_HW_VLAN, WX_TXD_VLE); + /* set segmentation enable bits for TSO/FSO */ + cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSO, WX_TXD_TSE); + /* set timestamp bit if present */ + cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSTAMP, WX_TXD_MAC_TSTAMP); + cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_LINKSEC, WX_TXD_LINKSEC); + + return cmd_type; +} + +static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) +{ + u32 olinfo_status = paylen << WX_TXD_PAYLEN_SHIFT; + + /* enable L4 checksum for TSO and TX checksum offload */ + olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CSUM, WX_TXD_L4CS); + /* enable IPv4 checksum for TSO */ + olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPV4, WX_TXD_IIPCS); + /* enable outer IPv4 checksum for TSO */ + olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_OUTER_IPV4, + WX_TXD_EIPCS); + /* Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CC, WX_TXD_CC); + olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPSEC, + WX_TXD_IPSEC); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); +} + static void wx_tx_map(struct wx_ring *tx_ring, - struct wx_tx_buffer *first) + struct wx_tx_buffer *first, + const u8 hdr_len) { struct sk_buff *skb = first->skb; struct wx_tx_buffer *tx_buffer; + u32 tx_flags = first->tx_flags; u16 i = tx_ring->next_to_use; unsigned int data_len, size; union wx_tx_desc *tx_desc; @@ -719,10 +763,9 @@ static void wx_tx_map(struct wx_ring *tx_ring, dma_addr_t dma; u32 cmd_type; - cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS; + cmd_type = wx_tx_cmd_type(tx_flags); tx_desc = WX_TX_DESC(tx_ring, i); - - tx_desc->read.olinfo_status = cpu_to_le32(skb->len << WX_TXD_PAYLEN_SHIFT); + wx_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); size = skb_headlen(skb); data_len = skb->data_len; @@ -838,12 +881,420 @@ static void wx_tx_map(struct wx_ring *tx_ring, tx_ring->next_to_use = i; } +static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct wx_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = WX_TX_CTXTDESC(tx_ring, i); + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= WX_TXD_DTYP_CTXT; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + +static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr) +{ + struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); + + *nexthdr = hdr->nexthdr; + offset += sizeof(struct ipv6hdr); + while (ipv6_ext_hdr(*nexthdr)) { + struct ipv6_opt_hdr _hdr, *hp; + + if (*nexthdr == NEXTHDR_NONE) + return; + hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); + if (!hp) + return; + if (*nexthdr == NEXTHDR_FRAGMENT) + break; + *nexthdr = hp->nexthdr; + } +} + +union network_header { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + void *raw; +}; + +static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) +{ + u8 tun_prot = 0, l4_prot = 0, ptype = 0; + struct sk_buff *skb = first->skb; + + if (skb->encapsulation) { + union network_header hdr; + + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + if (ip_is_fragment(ip_hdr(skb))) + return WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IPFRAG; + ptype = WX_PTYPE_TUN_IPV4; + break; + case htons(ETH_P_IPV6): + wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot); + if (tun_prot == NEXTHDR_FRAGMENT) + return WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_IPFRAG; + ptype = WX_PTYPE_TUN_IPV6; + break; + default: + return ptype; + } + + if (tun_prot == IPPROTO_IPIP) { + hdr.raw = (void *)inner_ip_hdr(skb); + ptype |= WX_PTYPE_PKT_IPIP; + } else if (tun_prot == IPPROTO_UDP) { + hdr.raw = (void *)inner_ip_hdr(skb); + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) { + ptype |= WX_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) + ptype |= WX_PTYPE_PKT_IGMV; + else + ptype |= WX_PTYPE_PKT_IGM; + } + + } else if (tun_prot == IPPROTO_GRE) { + hdr.raw = (void *)inner_ip_hdr(skb); + if (skb->inner_protocol == htons(ETH_P_IP) || + skb->inner_protocol == htons(ETH_P_IPV6)) { + ptype |= WX_PTYPE_PKT_IG; + } else { + if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto + == htons(ETH_P_8021Q)) + ptype |= WX_PTYPE_PKT_IGMV; + else + ptype |= WX_PTYPE_PKT_IGM; + } + } else { + return ptype; + } + + switch (hdr.ipv4->version) { + case IPVERSION: + l4_prot = hdr.ipv4->protocol; + if (ip_is_fragment(ip_hdr(skb))) { + ptype |= WX_PTYPE_TYP_IPFRAG; + return ptype; + } + break; + case 6: + wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot); + ptype |= WX_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= WX_PTYPE_TYP_IPFRAG; + return ptype; + } + break; + default: + return ptype; + } + } else { + switch (first->protocol) { + case htons(ETH_P_IP): + l4_prot = ip_hdr(skb)->protocol; + ptype = WX_PTYPE_PKT_IP; + if (ip_is_fragment(ip_hdr(skb))) { + ptype |= WX_PTYPE_TYP_IPFRAG; + return ptype; + } + break; + case htons(ETH_P_IPV6): + wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot); + ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6; + if (l4_prot == NEXTHDR_FRAGMENT) { + ptype |= WX_PTYPE_TYP_IPFRAG; + return ptype; + } + break; + default: + return WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC; + } + } + switch (l4_prot) { + case IPPROTO_TCP: + ptype |= WX_PTYPE_TYP_TCP; + break; + case IPPROTO_UDP: + ptype |= WX_PTYPE_TYP_UDP; + break; + case IPPROTO_SCTP: + ptype |= WX_PTYPE_TYP_SCTP; + break; + default: + ptype |= WX_PTYPE_TYP_IP; + break; + } + + return ptype; +} + +static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first, + u8 *hdr_len, u8 ptype) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct net_device *netdev = tx_ring->netdev; + u32 l4len, tunhdr_eiplen_tunlen = 0; + struct sk_buff *skb = first->skb; + bool enc = skb->encapsulation; + struct ipv6hdr *ipv6h; + struct tcphdr *tcph; + struct iphdr *iph; + u8 tun_prot = 0; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + /* indicates the inner headers in the skbuff are valid. */ + iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb); + if (iph->version == 4) { + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, 0); + first->tx_flags |= WX_TX_FLAGS_TSO | + WX_TX_FLAGS_CSUM | + WX_TX_FLAGS_IPV4 | + WX_TX_FLAGS_CC; + } else if (iph->version == 6 && skb_is_gso_v6(skb)) { + ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb); + ipv6h->payload_len = 0; + tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, + &ipv6h->daddr, 0, + IPPROTO_TCP, 0); + first->tx_flags |= WX_TX_FLAGS_TSO | + WX_TX_FLAGS_CSUM | + WX_TX_FLAGS_CC; + } + + /* compute header lengths */ + l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) : + skb_transport_offset(skb); + *hdr_len += l4len; + + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + + /* mss_l4len_id: use 0 as index for TSO */ + mss_l4len_idx = l4len << WX_TXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << WX_TXD_MSS_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + if (enc) { + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4; + break; + case htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + break; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + WX_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + WX_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - + (char *)ip_hdr(skb)) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + vlan_macip_lens = skb_inner_network_header_len(skb) >> 1; + } else { + vlan_macip_lens = skb_network_header_len(skb) >> 1; + } + + vlan_macip_lens |= skb_network_offset(skb) << WX_TXD_MACLEN_SHIFT; + vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK; + + type_tucmd = ptype << 24; + if (skb->vlan_proto == htons(ETH_P_8021AD) && + netdev->features & NETIF_F_HW_VLAN_STAG_TX) + type_tucmd |= WX_SET_FLAG(first->tx_flags, + WX_TX_FLAGS_HW_VLAN, + 0x1 << WX_TXD_TAG_TPID_SEL_SHIFT); + wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); + + return 1; +} + +static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, + u8 ptype) +{ + u32 tunhdr_eiplen_tunlen = 0, vlan_macip_lens = 0; + struct net_device *netdev = tx_ring->netdev; + u32 mss_l4len_idx = 0, type_tucmd; + struct sk_buff *skb = first->skb; + u8 tun_prot = 0; + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & WX_TX_FLAGS_CC)) + return; + vlan_macip_lens = skb_network_offset(skb) << + WX_TXD_MACLEN_SHIFT; + } else { + u8 l4_prot = 0; + union { + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + u8 *raw; + } network_hdr; + union { + struct tcphdr *tcphdr; + u8 *raw; + } transport_hdr; + + if (skb->encapsulation) { + network_hdr.raw = skb_inner_network_header(skb); + transport_hdr.raw = skb_inner_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + WX_TXD_MACLEN_SHIFT; + switch (first->protocol) { + case htons(ETH_P_IP): + tun_prot = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + tun_prot = ipv6_hdr(skb)->nexthdr; + break; + default: + return; + } + switch (tun_prot) { + case IPPROTO_UDP: + tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP; + tunhdr_eiplen_tunlen |= + ((skb_network_header_len(skb) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + WX_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_GRE: + tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE; + tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT) | + (((skb_inner_mac_header(skb) - + skb_transport_header(skb)) >> 1) << + WX_TXD_TUNNEL_LEN_SHIFT); + break; + case IPPROTO_IPIP: + tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - + (char *)ip_hdr(skb)) >> 2) << + WX_TXD_OUTER_IPLEN_SHIFT; + break; + default: + break; + } + + } else { + network_hdr.raw = skb_network_header(skb); + transport_hdr.raw = skb_transport_header(skb); + vlan_macip_lens = skb_network_offset(skb) << + WX_TXD_MACLEN_SHIFT; + } + + switch (network_hdr.ipv4->version) { + case IPVERSION: + vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv4->protocol; + break; + case 6: + vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; + l4_prot = network_hdr.ipv6->nexthdr; + break; + default: + break; + } + + switch (l4_prot) { + case IPPROTO_TCP: + mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << + WX_TXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + mss_l4len_idx = sizeof(struct sctphdr) << + WX_TXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + WX_TXD_L4LEN_SHIFT; + break; + default: + break; + } + + /* update TX checksum flag */ + first->tx_flags |= WX_TX_FLAGS_CSUM; + } + first->tx_flags |= WX_TX_FLAGS_CC; + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK; + + type_tucmd = ptype << 24; + if (skb->vlan_proto == htons(ETH_P_8021AD) && + netdev->features & NETIF_F_HW_VLAN_STAG_TX) + type_tucmd |= WX_SET_FLAG(first->tx_flags, + WX_TX_FLAGS_HW_VLAN, + 0x1 << WX_TXD_TAG_TPID_SEL_SHIFT); + wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen, + type_tucmd, mss_l4len_idx); +} + static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, struct wx_ring *tx_ring) { u16 count = TXD_USE_COUNT(skb_headlen(skb)); struct wx_tx_buffer *first; + u8 hdr_len = 0, ptype; unsigned short f; + u32 tx_flags = 0; + int tso; /* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD, @@ -864,7 +1315,29 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; - wx_tx_map(tx_ring, first); + /* if we have a HW VLAN tag being added default to the HW one */ + if (skb_vlan_tag_present(skb)) { + tx_flags |= skb_vlan_tag_get(skb) << WX_TX_FLAGS_VLAN_SHIFT; + tx_flags |= WX_TX_FLAGS_HW_VLAN; + } + + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = vlan_get_protocol(skb); + + ptype = wx_encode_tx_desc_ptype(first); + + tso = wx_tso(tx_ring, first, &hdr_len, ptype); + if (tso < 0) + goto out_drop; + else if (!tso) + wx_tx_csum(tx_ring, first, ptype); + wx_tx_map(tx_ring, first, hdr_len); + + return NETDEV_TX_OK; +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 32f952d93009..70f5fd168e40 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -6,6 +6,7 @@ #include #include +#include #define WX_NCSI_SUP 0x8000 #define WX_NCSI_MASK 0x8000 @@ -315,9 +316,6 @@ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) -/* Ether Types */ -#define WX_ETH_P_CNM 0x22E7 - #define WX_CFG_PORT_ST 0x14404 /******************* Receive Descriptor bit definitions **********************/ @@ -326,6 +324,33 @@ #define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */ +/** + * receive packet type + * PTYPE:8 = TUN:2 + PKT:2 + TYP:4 + **/ +/* TUN */ +#define WX_PTYPE_TUN_IPV4 0x80 +#define WX_PTYPE_TUN_IPV6 0xC0 + +/* PKT for TUN */ +#define WX_PTYPE_PKT_IPIP 0x00 /* IP+IP */ +#define WX_PTYPE_PKT_IG 0x10 /* IP+GRE */ +#define WX_PTYPE_PKT_IGM 0x20 /* IP+GRE+MAC */ +#define WX_PTYPE_PKT_IGMV 0x30 /* IP+GRE+MAC+VLAN */ +/* PKT for !TUN */ +#define WX_PTYPE_PKT_MAC 0x10 +#define WX_PTYPE_PKT_IP 0x20 + +/* TYP for PKT=mac */ +#define WX_PTYPE_TYP_MAC 0x01 +/* TYP for PKT=ip */ +#define WX_PTYPE_PKT_IPV6 0x08 +#define WX_PTYPE_TYP_IPFRAG 0x01 +#define WX_PTYPE_TYP_IP 0x02 +#define WX_PTYPE_TYP_UDP 0x03 +#define WX_PTYPE_TYP_TCP 0x04 +#define WX_PTYPE_TYP_SCTP 0x05 + /*********************** Transmit Descriptor Config Masks ****************/ #define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */ #define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */ @@ -334,6 +359,49 @@ #define WX_TXD_IFCS BIT(25) /* Insert FCS */ #define WX_TXD_RS BIT(27) /* Report Status */ +/*********************** Adv Transmit Descriptor Config Masks ****************/ +#define WX_TXD_MAC_TSTAMP BIT(19) /* IEEE1588 time stamp */ +#define WX_TXD_DTYP_CTXT BIT(20) /* Adv Context Desc */ +#define WX_TXD_LINKSEC BIT(26) /* enable linksec */ +#define WX_TXD_VLE BIT(30) /* VLAN pkt enable */ +#define WX_TXD_TSE BIT(31) /* TCP Seg enable */ +#define WX_TXD_CC BIT(7) /* Check Context */ +#define WX_TXD_IPSEC BIT(8) /* enable ipsec esp */ +#define WX_TXD_L4CS BIT(9) +#define WX_TXD_IIPCS BIT(10) +#define WX_TXD_EIPCS BIT(11) +#define WX_TXD_PAYLEN_SHIFT 13 /* Adv desc PAYLEN shift */ +#define WX_TXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define WX_TXD_TAG_TPID_SEL_SHIFT 11 + +#define WX_TXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define WX_TXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +#define WX_TXD_OUTER_IPLEN_SHIFT 12 /* Adv ctxt OUTERIPLEN shift */ +#define WX_TXD_TUNNEL_LEN_SHIFT 21 /* Adv ctxt TUNNELLEN shift */ +#define WX_TXD_TUNNEL_TYPE_SHIFT 11 /* Adv Tx Desc Tunnel Type shift */ +#define WX_TXD_TUNNEL_UDP FIELD_PREP(BIT(WX_TXD_TUNNEL_TYPE_SHIFT), 0) +#define WX_TXD_TUNNEL_GRE FIELD_PREP(BIT(WX_TXD_TUNNEL_TYPE_SHIFT), 1) + +enum wx_tx_flags { + /* cmd_type flags */ + WX_TX_FLAGS_HW_VLAN = 0x01, + WX_TX_FLAGS_TSO = 0x02, + WX_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + WX_TX_FLAGS_CC = 0x08, + WX_TX_FLAGS_IPV4 = 0x10, + WX_TX_FLAGS_CSUM = 0x20, + WX_TX_FLAGS_OUTER_IPV4 = 0x100, + WX_TX_FLAGS_LINKSEC = 0x200, + WX_TX_FLAGS_IPSEC = 0x400, +}; + +/* VLAN info */ +#define WX_TX_FLAGS_VLAN_MASK GENMASK(31, 16) +#define WX_TX_FLAGS_VLAN_SHIFT 16 + /* Host Interface Command Structures */ struct wx_hic_hdr { u8 cmd; @@ -508,10 +576,25 @@ union wx_rx_desc { } wb; /* writeback */ }; +struct wx_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* if _flag is in _input, return _result */ +#define WX_SET_FLAG(_input, _flag, _result) \ + (((_flag) <= (_result)) ? \ + ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ + ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) + #define WX_RX_DESC(R, i) \ (&(((union wx_rx_desc *)((R)->desc))[i])) #define WX_TX_DESC(R, i) \ (&(((union wx_tx_desc *)((R)->desc))[i])) +#define WX_TX_CTXTDESC(R, i) \ + (&(((struct wx_tx_context_desc *)((R)->desc))[i])) /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer @@ -523,6 +606,8 @@ struct wx_tx_buffer { unsigned short gso_segs; DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); + __be16 protocol; + u32 tx_flags; }; struct wx_rx_buffer { From patchwork Wed May 10 09:38:40 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mengyuan Lou X-Patchwork-Id: 13236689 X-Patchwork-Delegate: kuba@kernel.org Received: from lindbergh.monkeyblade.net (lindbergh.monkeyblade.net [23.128.96.19]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7B2F3613A for ; Wed, 10 May 2023 09:39:20 +0000 (UTC) Received: from smtpbgbr2.qq.com (smtpbgbr2.qq.com [54.207.22.56]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 177792120 for ; Wed, 10 May 2023 02:39:15 -0700 (PDT) X-QQ-mid: bizesmtp73t1683711547t9da2wld Received: from localhost.localdomain ( [125.119.253.217]) by bizesmtp.qq.com (ESMTP) with id ; Wed, 10 May 2023 17:39:05 +0800 (CST) X-QQ-SSF: 01400000000000N0Z000000A0000000 X-QQ-FEAT: zT6n3Y95oi2u+CQiksRVmS7tAo34N+/7C/TvufYs3ljq2+DknN7nycMsLxsDB cKe5XKrTpAO1oP2kYAztC3bMVs339KGzRp3BzECAo9AR5wPxG5uc4O4y0B1vheqk+DOfju2 lhfv/+Qvnw0YRnFKhimF13cR/ZOSijyMGSTB2laMgZsyw/Sn9Uv0o854g/rAcmwyR3J56Qc 6mu61hwEmb8Vsof8zcEtMouOK5q1+exV0EQD2UwOjZEolV7dtFAhyolLLI+Kwmy9mBB5mod IRKzw6Hgxn3g9ZLvXFbWEYxcMe21BDic6SDFYdyUsOiITWttdCI43iFuXKVRusmj0od/Q21 BjXxuurNMltwGDvbDZB9i1HFcHxDzrW3t8GqVJ8e5J7X750gwdds8EvLrXMGv/cGBV9MNaj YoRabaV+Nf+4fMEdI4pqpA== X-QQ-GoodBg: 2 X-BIZMAIL-ID: 14669398910802567981 From: Mengyuan Lou To: netdev@vger.kernel.org Cc: jiawenwu@trustnetic.com, Mengyuan Lou Subject: [PATCH net-next v4 2/7] net: wangxun: libwx add rx offload functions Date: Wed, 10 May 2023 17:38:40 +0800 Message-Id: <20230510093845.47446-3-mengyuanlou@net-swift.com> X-Mailer: git-send-email 2.40.0 In-Reply-To: <20230510093845.47446-1-mengyuanlou@net-swift.com> References: <20230510093845.47446-1-mengyuanlou@net-swift.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:net-swift.com:qybglogicsvrgz:qybglogicsvrgz5a-3 X-Spam-Status: No, score=-1.9 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_NONE, RCVD_IN_MSPIKE_H2,SPF_HELO_PASS,SPF_PASS,T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net X-Patchwork-Delegate: kuba@kernel.org Add rx offload functions for wx_clean_rx_irq which supports ngbe and txgbe to implement rx offload function. Signed-off-by: Mengyuan Lou --- drivers/net/ethernet/wangxun/libwx/wx_lib.c | 99 +++++++- drivers/net/ethernet/wangxun/libwx/wx_type.h | 238 +++++++++++++++++++ 2 files changed, 335 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index c8e8c1ca0a69..b462a6149463 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -424,6 +424,101 @@ static bool wx_cleanup_headers(struct wx_ring *rx_ring, return false; } +static void wx_rx_hash(struct wx_ring *ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + WX_RXD_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (WX_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +/** + * wx_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static void wx_rx_checksum(struct wx_ring *ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct wx_dec_ptype dptype = wx_decode_ptype(WX_RXD_PKTTYPE(rx_desc)); + + skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* if IPv4 header checksum error */ + if ((wx_test_staterr(rx_desc, WX_RXD_STAT_IPCS) && + wx_test_staterr(rx_desc, WX_RXD_ERR_IPE)) || + (wx_test_staterr(rx_desc, WX_RXD_STAT_OUTERIPCS) && + wx_test_staterr(rx_desc, WX_RXD_ERR_OUTERIPER))) { + ring->rx_stats.csum_err++; + return; + } + + /* L4 checksum offload flag must set for the below code to work */ + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_L4CS)) + return; + + /*likely incorrect csum if IPv6 Dest Header found */ + if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc)) + return; + + /* if L4 checksum error */ + if (wx_test_staterr(rx_desc, WX_RXD_ERR_TCPE)) { + ring->rx_stats.csum_err++; + return; + } + + /* If there is an outer header present that might contain a checksum + * we need to bump the checksum level by 1 to reflect the fact that + * we are indicating we validated the inner checksum. + */ + if (dptype.etype >= WX_DEC_PTYPE_ETYPE_IG) { + skb->csum_level = 1; + skb->encapsulation = 1; + } + + /* It must be a TCP or UDP or SCTP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + ring->rx_stats.csum_good_cnt++; +} + +/** + * wx_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void wx_process_skb_fields(struct wx_ring *rx_ring, + union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + wx_rx_hash(rx_ring, rx_desc, skb); + wx_rx_checksum(rx_ring, rx_desc, skb); + skb_record_rx_queue(skb, rx_ring->queue_index); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + /** * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @q_vector: structure containing interrupt and ring information @@ -491,8 +586,8 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector, /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - skb_record_rx_queue(skb, rx_ring->queue_index); - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + /* populate checksum, timestamp, VLAN, and protocol */ + wx_process_skb_fields(rx_ring, rx_desc, skb); napi_gro_receive(&q_vector->napi, skb); /* update budget accounting */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 70f5fd168e40..69a9ed7bc2df 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -321,8 +321,31 @@ /******************* Receive Descriptor bit definitions **********************/ #define WX_RXD_STAT_DD BIT(0) /* Done */ #define WX_RXD_STAT_EOP BIT(1) /* End of Packet */ +#define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */ +#define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */ +#define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/ +#define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */ #define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */ +#define WX_RXD_ERR_TCPE BIT(30) /* TCP/UDP Checksum Error */ +#define WX_RXD_ERR_IPE BIT(31) /* IP Checksum Error */ + +/* RSS Hash results */ +#define WX_RXD_RSSTYPE_MASK GENMASK(3, 0) +#define WX_RXD_RSSTYPE_IPV4_TCP 0x00000001U +#define WX_RXD_RSSTYPE_IPV6_TCP 0x00000003U +#define WX_RXD_RSSTYPE_IPV4_SCTP 0x00000004U +#define WX_RXD_RSSTYPE_IPV6_SCTP 0x00000006U +#define WX_RXD_RSSTYPE_IPV4_UDP 0x00000007U +#define WX_RXD_RSSTYPE_IPV6_UDP 0x00000008U + +#define WX_RSS_L4_TYPES_MASK \ + ((1ul << WX_RXD_RSSTYPE_IPV4_TCP) | \ + (1ul << WX_RXD_RSSTYPE_IPV4_UDP) | \ + (1ul << WX_RXD_RSSTYPE_IPV4_SCTP) | \ + (1ul << WX_RXD_RSSTYPE_IPV6_TCP) | \ + (1ul << WX_RXD_RSSTYPE_IPV6_UDP) | \ + (1ul << WX_RXD_RSSTYPE_IPV6_SCTP)) /** * receive packet type @@ -351,6 +374,10 @@ #define WX_PTYPE_TYP_TCP 0x04 #define WX_PTYPE_TYP_SCTP 0x05 +#define WX_RXD_PKTTYPE(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) +#define WX_RXD_IPV6EX(_rxd) \ + ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 6) & 0x1) /*********************** Transmit Descriptor Config Masks ****************/ #define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */ #define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */ @@ -402,6 +429,208 @@ enum wx_tx_flags { #define WX_TX_FLAGS_VLAN_MASK GENMASK(31, 16) #define WX_TX_FLAGS_VLAN_SHIFT 16 +/* wx_dec_ptype.mac: outer mac */ +enum wx_dec_ptype_mac { + WX_DEC_PTYPE_MAC_IP = 0, + WX_DEC_PTYPE_MAC_L2 = 2, + WX_DEC_PTYPE_MAC_FCOE = 3, +}; + +/* wx_dec_ptype.[e]ip: outer&encaped ip */ +#define WX_DEC_PTYPE_IP_FRAG 0x4 +enum wx_dec_ptype_ip { + WX_DEC_PTYPE_IP_NONE = 0, + WX_DEC_PTYPE_IP_IPV4 = 1, + WX_DEC_PTYPE_IP_IPV6 = 2, + WX_DEC_PTYPE_IP_FGV4 = WX_DEC_PTYPE_IP_FRAG | WX_DEC_PTYPE_IP_IPV4, + WX_DEC_PTYPE_IP_FGV6 = WX_DEC_PTYPE_IP_FRAG | WX_DEC_PTYPE_IP_IPV6, +}; + +/* wx_dec_ptype.etype: encaped type */ +enum wx_dec_ptype_etype { + WX_DEC_PTYPE_ETYPE_NONE = 0, + WX_DEC_PTYPE_ETYPE_IPIP = 1, /* IP+IP */ + WX_DEC_PTYPE_ETYPE_IG = 2, /* IP+GRE */ + WX_DEC_PTYPE_ETYPE_IGM = 3, /* IP+GRE+MAC */ + WX_DEC_PTYPE_ETYPE_IGMV = 4, /* IP+GRE+MAC+VLAN */ +}; + +/* wx_dec_ptype.proto: payload proto */ +enum wx_dec_ptype_prot { + WX_DEC_PTYPE_PROT_NONE = 0, + WX_DEC_PTYPE_PROT_UDP = 1, + WX_DEC_PTYPE_PROT_TCP = 2, + WX_DEC_PTYPE_PROT_SCTP = 3, + WX_DEC_PTYPE_PROT_ICMP = 4, + WX_DEC_PTYPE_PROT_TS = 5, /* time sync */ +}; + +/* wx_dec_ptype.layer: payload layer */ +enum wx_dec_ptype_layer { + WX_DEC_PTYPE_LAYER_NONE = 0, + WX_DEC_PTYPE_LAYER_PAY2 = 1, + WX_DEC_PTYPE_LAYER_PAY3 = 2, + WX_DEC_PTYPE_LAYER_PAY4 = 3, +}; + +struct wx_dec_ptype { + u32 known:1; + u32 mac:2; /* outer mac */ + u32 ip:3; /* outer ip*/ + u32 etype:3; /* encaped type */ + u32 eip:3; /* encaped ip */ + u32 prot:4; /* payload proto */ + u32 layer:3; /* payload layer */ +}; + +/* macro to make the table lines short */ +#define WX_PTT(mac, ip, etype, eip, proto, layer)\ + {1, \ + WX_DEC_PTYPE_MAC_##mac, /* mac */\ + WX_DEC_PTYPE_IP_##ip, /* ip */ \ + WX_DEC_PTYPE_ETYPE_##etype, /* etype */\ + WX_DEC_PTYPE_IP_##eip, /* eip */\ + WX_DEC_PTYPE_PROT_##proto, /* proto */\ + WX_DEC_PTYPE_LAYER_##layer /* layer */} + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +static struct wx_dec_ptype wx_ptype_lookup[256] = { + /* L2: mac */ + [0x11] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), + [0x12] = WX_PTT(L2, NONE, NONE, NONE, TS, PAY2), + [0x13] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), + [0x14] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), + [0x15] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), + [0x16] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2), + [0x17] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), + + /* L2: ethertype filter */ + [0x18 ... 0x1F] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE), + + /* L3: ip non-tunnel */ + [0x21] = WX_PTT(IP, FGV4, NONE, NONE, NONE, PAY3), + [0x22] = WX_PTT(IP, IPV4, NONE, NONE, NONE, PAY3), + [0x23] = WX_PTT(IP, IPV4, NONE, NONE, UDP, PAY4), + [0x24] = WX_PTT(IP, IPV4, NONE, NONE, TCP, PAY4), + [0x25] = WX_PTT(IP, IPV4, NONE, NONE, SCTP, PAY4), + [0x29] = WX_PTT(IP, FGV6, NONE, NONE, NONE, PAY3), + [0x2A] = WX_PTT(IP, IPV6, NONE, NONE, NONE, PAY3), + [0x2B] = WX_PTT(IP, IPV6, NONE, NONE, UDP, PAY3), + [0x2C] = WX_PTT(IP, IPV6, NONE, NONE, TCP, PAY4), + [0x2D] = WX_PTT(IP, IPV6, NONE, NONE, SCTP, PAY4), + + /* L2: fcoe */ + [0x30 ... 0x34] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3), + [0x38 ... 0x3C] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3), + + /* IPv4 --> IPv4/IPv6 */ + [0x81] = WX_PTT(IP, IPV4, IPIP, FGV4, NONE, PAY3), + [0x82] = WX_PTT(IP, IPV4, IPIP, IPV4, NONE, PAY3), + [0x83] = WX_PTT(IP, IPV4, IPIP, IPV4, UDP, PAY4), + [0x84] = WX_PTT(IP, IPV4, IPIP, IPV4, TCP, PAY4), + [0x85] = WX_PTT(IP, IPV4, IPIP, IPV4, SCTP, PAY4), + [0x89] = WX_PTT(IP, IPV4, IPIP, FGV6, NONE, PAY3), + [0x8A] = WX_PTT(IP, IPV4, IPIP, IPV6, NONE, PAY3), + [0x8B] = WX_PTT(IP, IPV4, IPIP, IPV6, UDP, PAY4), + [0x8C] = WX_PTT(IP, IPV4, IPIP, IPV6, TCP, PAY4), + [0x8D] = WX_PTT(IP, IPV4, IPIP, IPV6, SCTP, PAY4), + + /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */ + [0x90] = WX_PTT(IP, IPV4, IG, NONE, NONE, PAY3), + [0x91] = WX_PTT(IP, IPV4, IG, FGV4, NONE, PAY3), + [0x92] = WX_PTT(IP, IPV4, IG, IPV4, NONE, PAY3), + [0x93] = WX_PTT(IP, IPV4, IG, IPV4, UDP, PAY4), + [0x94] = WX_PTT(IP, IPV4, IG, IPV4, TCP, PAY4), + [0x95] = WX_PTT(IP, IPV4, IG, IPV4, SCTP, PAY4), + [0x99] = WX_PTT(IP, IPV4, IG, FGV6, NONE, PAY3), + [0x9A] = WX_PTT(IP, IPV4, IG, IPV6, NONE, PAY3), + [0x9B] = WX_PTT(IP, IPV4, IG, IPV6, UDP, PAY4), + [0x9C] = WX_PTT(IP, IPV4, IG, IPV6, TCP, PAY4), + [0x9D] = WX_PTT(IP, IPV4, IG, IPV6, SCTP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */ + [0xA0] = WX_PTT(IP, IPV4, IGM, NONE, NONE, PAY3), + [0xA1] = WX_PTT(IP, IPV4, IGM, FGV4, NONE, PAY3), + [0xA2] = WX_PTT(IP, IPV4, IGM, IPV4, NONE, PAY3), + [0xA3] = WX_PTT(IP, IPV4, IGM, IPV4, UDP, PAY4), + [0xA4] = WX_PTT(IP, IPV4, IGM, IPV4, TCP, PAY4), + [0xA5] = WX_PTT(IP, IPV4, IGM, IPV4, SCTP, PAY4), + [0xA9] = WX_PTT(IP, IPV4, IGM, FGV6, NONE, PAY3), + [0xAA] = WX_PTT(IP, IPV4, IGM, IPV6, NONE, PAY3), + [0xAB] = WX_PTT(IP, IPV4, IGM, IPV6, UDP, PAY4), + [0xAC] = WX_PTT(IP, IPV4, IGM, IPV6, TCP, PAY4), + [0xAD] = WX_PTT(IP, IPV4, IGM, IPV6, SCTP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */ + [0xB0] = WX_PTT(IP, IPV4, IGMV, NONE, NONE, PAY3), + [0xB1] = WX_PTT(IP, IPV4, IGMV, FGV4, NONE, PAY3), + [0xB2] = WX_PTT(IP, IPV4, IGMV, IPV4, NONE, PAY3), + [0xB3] = WX_PTT(IP, IPV4, IGMV, IPV4, UDP, PAY4), + [0xB4] = WX_PTT(IP, IPV4, IGMV, IPV4, TCP, PAY4), + [0xB5] = WX_PTT(IP, IPV4, IGMV, IPV4, SCTP, PAY4), + [0xB9] = WX_PTT(IP, IPV4, IGMV, FGV6, NONE, PAY3), + [0xBA] = WX_PTT(IP, IPV4, IGMV, IPV6, NONE, PAY3), + [0xBB] = WX_PTT(IP, IPV4, IGMV, IPV6, UDP, PAY4), + [0xBC] = WX_PTT(IP, IPV4, IGMV, IPV6, TCP, PAY4), + [0xBD] = WX_PTT(IP, IPV4, IGMV, IPV6, SCTP, PAY4), + + /* IPv6 --> IPv4/IPv6 */ + [0xC1] = WX_PTT(IP, IPV6, IPIP, FGV4, NONE, PAY3), + [0xC2] = WX_PTT(IP, IPV6, IPIP, IPV4, NONE, PAY3), + [0xC3] = WX_PTT(IP, IPV6, IPIP, IPV4, UDP, PAY4), + [0xC4] = WX_PTT(IP, IPV6, IPIP, IPV4, TCP, PAY4), + [0xC5] = WX_PTT(IP, IPV6, IPIP, IPV4, SCTP, PAY4), + [0xC9] = WX_PTT(IP, IPV6, IPIP, FGV6, NONE, PAY3), + [0xCA] = WX_PTT(IP, IPV6, IPIP, IPV6, NONE, PAY3), + [0xCB] = WX_PTT(IP, IPV6, IPIP, IPV6, UDP, PAY4), + [0xCC] = WX_PTT(IP, IPV6, IPIP, IPV6, TCP, PAY4), + [0xCD] = WX_PTT(IP, IPV6, IPIP, IPV6, SCTP, PAY4), + + /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */ + [0xD0] = WX_PTT(IP, IPV6, IG, NONE, NONE, PAY3), + [0xD1] = WX_PTT(IP, IPV6, IG, FGV4, NONE, PAY3), + [0xD2] = WX_PTT(IP, IPV6, IG, IPV4, NONE, PAY3), + [0xD3] = WX_PTT(IP, IPV6, IG, IPV4, UDP, PAY4), + [0xD4] = WX_PTT(IP, IPV6, IG, IPV4, TCP, PAY4), + [0xD5] = WX_PTT(IP, IPV6, IG, IPV4, SCTP, PAY4), + [0xD9] = WX_PTT(IP, IPV6, IG, FGV6, NONE, PAY3), + [0xDA] = WX_PTT(IP, IPV6, IG, IPV6, NONE, PAY3), + [0xDB] = WX_PTT(IP, IPV6, IG, IPV6, UDP, PAY4), + [0xDC] = WX_PTT(IP, IPV6, IG, IPV6, TCP, PAY4), + [0xDD] = WX_PTT(IP, IPV6, IG, IPV6, SCTP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */ + [0xE0] = WX_PTT(IP, IPV6, IGM, NONE, NONE, PAY3), + [0xE1] = WX_PTT(IP, IPV6, IGM, FGV4, NONE, PAY3), + [0xE2] = WX_PTT(IP, IPV6, IGM, IPV4, NONE, PAY3), + [0xE3] = WX_PTT(IP, IPV6, IGM, IPV4, UDP, PAY4), + [0xE4] = WX_PTT(IP, IPV6, IGM, IPV4, TCP, PAY4), + [0xE5] = WX_PTT(IP, IPV6, IGM, IPV4, SCTP, PAY4), + [0xE9] = WX_PTT(IP, IPV6, IGM, FGV6, NONE, PAY3), + [0xEA] = WX_PTT(IP, IPV6, IGM, IPV6, NONE, PAY3), + [0xEB] = WX_PTT(IP, IPV6, IGM, IPV6, UDP, PAY4), + [0xEC] = WX_PTT(IP, IPV6, IGM, IPV6, TCP, PAY4), + [0xED] = WX_PTT(IP, IPV6, IGM, IPV6, SCTP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */ + [0xF0] = WX_PTT(IP, IPV6, IGMV, NONE, NONE, PAY3), + [0xF1] = WX_PTT(IP, IPV6, IGMV, FGV4, NONE, PAY3), + [0xF2] = WX_PTT(IP, IPV6, IGMV, IPV4, NONE, PAY3), + [0xF3] = WX_PTT(IP, IPV6, IGMV, IPV4, UDP, PAY4), + [0xF4] = WX_PTT(IP, IPV6, IGMV, IPV4, TCP, PAY4), + [0xF5] = WX_PTT(IP, IPV6, IGMV, IPV4, SCTP, PAY4), + [0xF9] = WX_PTT(IP, IPV6, IGMV, FGV6, NONE, PAY3), + [0xFA] = WX_PTT(IP, IPV6, IGMV, IPV6, NONE, PAY3), + [0xFB] = WX_PTT(IP, IPV6, IGMV, IPV6, UDP, PAY4), + [0xFC] = WX_PTT(IP, IPV6, IGMV, IPV6, TCP, PAY4), + [0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4), +}; + +static inline struct wx_dec_ptype wx_decode_ptype(const u8 ptype) +{ + return wx_ptype_lookup[ptype]; +} + /* Host Interface Command Structures */ struct wx_hic_hdr { u8 cmd; @@ -624,6 +853,11 @@ struct wx_queue_stats { u64 bytes; }; +struct wx_rx_queue_stats { + u64 csum_good_cnt; + u64 csum_err; +}; + /* iterator for handling rings in ring container */ #define wx_for_each_ring(posm, headm) \ for (posm = (headm).ring; posm; posm = posm->next) @@ -665,6 +899,9 @@ struct wx_ring { struct wx_queue_stats stats; struct u64_stats_sync syncp; + union { + struct wx_rx_queue_stats rx_stats; + }; } ____cacheline_internodealigned_in_smp; struct wx_q_vector { @@ -680,6 +917,7 @@ struct wx_q_vector { struct napi_struct napi; struct rcu_head rcu; /* to avoid race with update stats on free */ + bool netpoll_rx; char name[IFNAMSIZ + 17]; /* for dynamic allocation of rings associated with this q_vector */ From patchwork Wed May 10 09:38:41 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mengyuan Lou X-Patchwork-Id: 13236691 X-Patchwork-Delegate: kuba@kernel.org Received: from lindbergh.monkeyblade.net (lindbergh.monkeyblade.net [23.128.96.19]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6B6606D38 for ; Wed, 10 May 2023 09:39:29 +0000 (UTC) Received: from smtpbguseast1.qq.com (smtpbguseast1.qq.com [54.204.34.129]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3AD7512C for ; Wed, 10 May 2023 02:39:25 -0700 (PDT) X-QQ-mid: bizesmtp73t1683711551t4pelavi Received: from localhost.localdomain ( [125.119.253.217]) by bizesmtp.qq.com (ESMTP) with id ; Wed, 10 May 2023 17:39:09 +0800 (CST) X-QQ-SSF: 01400000000000N0Z000000A0000000 X-QQ-FEAT: DViCT0MMEKx1znKdP1ByG+KLUfOl/datTVYPnulC1UAtvPooS4IerGSYIAojS xoE4VEE8IUZdjGod1DmRxjPFj4SBVw/iIJuudGsqCy5BZh4vFBwm5yUBBvi+zfjA4CVCCIN eERPr7nSGCwhAljZLwFehfuDVw1869B4MIFsI0bydFrJ015gNsdqm39UWiNlSksMnswQHtj tbRBqR1o/Fz0iKZNlsDluMiOzPEmS4Km6gPAj7AM/OUokdH2bKQcQ5InU5QJFXMjT5ovJ+Z fjr6GJh2j13RrbvqgtqlxVyG5F6iBIhfMvYEUgB80KPFK2lYH2h/rNGn+UQZsnYekpLuHQY fizKPigtSfv0g4rDQhqZl5+HWKtNeJQ1S82mYRnOl/2jPq8uYZ/HRBVhh354teeVuPzRqbJ yNe23DJet1r/OJlh+P5aww== X-QQ-GoodBg: 2 X-BIZMAIL-ID: 10409871982555373245 From: Mengyuan Lou To: netdev@vger.kernel.org Cc: jiawenwu@trustnetic.com, Mengyuan Lou Subject: [PATCH net-next v4 3/7] net: wangxun: Implement vlan add and kill functions Date: Wed, 10 May 2023 17:38:41 +0800 Message-Id: <20230510093845.47446-4-mengyuanlou@net-swift.com> X-Mailer: git-send-email 2.40.0 In-Reply-To: <20230510093845.47446-1-mengyuanlou@net-swift.com> References: <20230510093845.47446-1-mengyuanlou@net-swift.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:net-swift.com:qybglogicsvrgz:qybglogicsvrgz5a-3 X-Spam-Status: No, score=-1.9 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_NONE, RCVD_IN_MSPIKE_H2,SPF_HELO_PASS,SPF_PASS,T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net X-Patchwork-Delegate: kuba@kernel.org Implement vlan add/kill functions which add and remove vlan id in hardware. Signed-off-by: Mengyuan Lou --- drivers/net/ethernet/wangxun/libwx/wx_hw.c | 275 ++++++++++++++++++- drivers/net/ethernet/wangxun/libwx/wx_hw.h | 3 + drivers/net/ethernet/wangxun/libwx/wx_lib.c | 18 ++ drivers/net/ethernet/wangxun/libwx/wx_type.h | 31 +++ 4 files changed, 326 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index ca409b4054d0..4b7baeb6c568 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1182,12 +1182,30 @@ static void wx_enable_sec_rx_path(struct wx *wx) WX_WRITE_FLUSH(wx); } +static void wx_vlan_strip_control(struct wx *wx, bool enable) +{ + int i, j; + + for (i = 0; i < wx->num_rx_queues; i++) { + struct wx_ring *ring = wx->rx_ring[i]; + + if (ring->accel) + continue; + j = ring->reg_idx; + wr32m(wx, WX_PX_RR_CFG(j), WX_PX_RR_CFG_VLAN, + enable ? WX_PX_RR_CFG_VLAN : 0); + } +} + void wx_set_rx_mode(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); + netdev_features_t features; u32 fctrl, vmolr, vlnctrl; int count; + features = netdev->features; + /* Check for Promiscuous and All Multicast modes */ fctrl = rd32(wx, WX_PSR_CTL); fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE); @@ -1254,6 +1272,13 @@ void wx_set_rx_mode(struct net_device *netdev) wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); wr32(wx, WX_PSR_CTL, fctrl); wr32(wx, WX_PSR_VM_L2CTL(0), vmolr); + + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && + (features & NETIF_F_HW_VLAN_STAG_RX)) + wx_vlan_strip_control(wx, true); + else + wx_vlan_strip_control(wx, false); + } EXPORT_SYMBOL(wx_set_rx_mode); @@ -1462,6 +1487,16 @@ static void wx_configure_tx(struct wx *wx) WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE); } +static void wx_restore_vlan(struct wx *wx) +{ + u16 vid = 1; + + wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit_from(vid, wx->active_vlans, VLAN_N_VID) + wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid); +} + /** * wx_configure_rx - Configure Receive Unit after Reset * @wx: pointer to private structure @@ -1527,7 +1562,7 @@ void wx_configure(struct wx *wx) wx_configure_port(wx); wx_set_rx_mode(wx->netdev); - + wx_restore_vlan(wx); wx_enable_sec_rx_path(wx); wx_configure_tx(wx); @@ -1727,4 +1762,242 @@ int wx_sw_init(struct wx *wx) } EXPORT_SYMBOL(wx_sw_init); +/** + * wx_find_vlvf_slot - find the vlanid or the first empty slot + * @wx: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static int wx_find_vlvf_slot(struct wx *wx, u32 vlan) +{ + u32 bits = 0, first_empty_slot = 0; + int regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex); + bits = rd32(wx, WX_PSR_VLAN_SWC); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + if (regindex >= WX_PSR_VLAN_SWC_ENTRIES) { + if (first_empty_slot) + regindex = first_empty_slot; + else + regindex = -ENOMEM; + } + + return regindex; +} + +/** + * wx_set_vlvf - Set VLAN Pool Filter + * @wx: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vfta_changed: pointer to boolean flag which indicates whether VFTA + * should be changed + * + * Turn on/off specified bit in VLVF table. + **/ +static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on, + bool *vfta_changed) +{ + u32 vt; + + /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = rd32(wx, WX_CFG_PORT_CTL); + if (vt & WX_CFG_PORT_CTL_NUM_VT_MASK) { + s32 vlvf_index; + u32 bits; + + vlvf_index = wx_find_vlvf_slot(wx, vlan); + if (vlvf_index < 0) + return vlvf_index; + + wr32(wx, WX_PSR_VLAN_SWC_IDX, vlvf_index); + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L); + bits |= (1 << vind); + wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits); + } else { + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H); + bits |= (1 << (vind - 32)); + wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L); + bits &= ~(1 << vind); + wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits); + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H); + } else { + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H); + bits &= ~(1 << (vind - 32)); + wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits); + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L); + } + } + + if (bits) { + wr32(wx, WX_PSR_VLAN_SWC, (WX_PSR_VLAN_SWC_VIEN | vlan)); + if (!vlan_on && vfta_changed) + *vfta_changed = false; + } else { + wr32(wx, WX_PSR_VLAN_SWC, 0); + } + } + + return 0; +} + +/** + * wx_set_vfta - Set VLAN filter table + * @wx: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on) +{ + u32 bitindex, vfta, targetbit; + bool vfta_changed = false; + int regindex, ret; + + /* this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + /* errata 5 */ + vfta = wx->mac.vft_shadow[regindex]; + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + /* Part 2 + * Call wx_set_vlvf to set VLVFB and VLVF + */ + ret = wx_set_vlvf(wx, vlan, vind, vlan_on, &vfta_changed); + if (ret != 0) + return ret; + + if (vfta_changed) + wr32(wx, WX_PSR_VLAN_TBL(regindex), vfta); + wx->mac.vft_shadow[regindex] = vfta; + + return 0; +} + +/** + * wx_clear_vfta - Clear VLAN filter table + * @wx: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +static void wx_clear_vfta(struct wx *wx) +{ + u32 offset; + + for (offset = 0; offset < wx->mac.vft_size; offset++) { + wr32(wx, WX_PSR_VLAN_TBL(offset), 0); + wx->mac.vft_shadow[offset] = 0; + } + + for (offset = 0; offset < WX_PSR_VLAN_SWC_ENTRIES; offset++) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, offset); + wr32(wx, WX_PSR_VLAN_SWC, 0); + wr32(wx, WX_PSR_VLAN_SWC_VM_L, 0); + wr32(wx, WX_PSR_VLAN_SWC_VM_H, 0); + } +} + +int wx_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct wx *wx = netdev_priv(netdev); + + /* add VID to filter table */ + wx_set_vfta(wx, vid, VMDQ_P(0), true); + set_bit(vid, wx->active_vlans); + + return 0; +} +EXPORT_SYMBOL(wx_vlan_rx_add_vid); + +int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct wx *wx = netdev_priv(netdev); + + /* remove VID from filter table */ + if (vid) + wx_set_vfta(wx, vid, VMDQ_P(0), false); + clear_bit(vid, wx->active_vlans); + + return 0; +} +EXPORT_SYMBOL(wx_vlan_rx_kill_vid); + +/** + * wx_start_hw - Prepare hardware for Tx/Rx + * @wx: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +void wx_start_hw(struct wx *wx) +{ + int i; + + /* Clear the VLAN filter table */ + wx_clear_vfta(wx); + WX_WRITE_FLUSH(wx); + /* Clear the rate limiters */ + for (i = 0; i < wx->mac.max_tx_queues; i++) { + wr32(wx, WX_TDM_RP_IDX, i); + wr32(wx, WX_TDM_RP_RATE, 0); + } +} +EXPORT_SYMBOL(wx_start_hw); + MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index c173c56f0ab5..1f93ca32c921 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -26,10 +26,13 @@ void wx_set_rx_mode(struct net_device *netdev); int wx_change_mtu(struct net_device *netdev, int new_mtu); void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring); void wx_configure(struct wx *wx); +void wx_start_hw(struct wx *wx); int wx_disable_pcie_master(struct wx *wx); int wx_stop_adapter(struct wx *wx); void wx_reset_misc(struct wx *wx); int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); +int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); +int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); #endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index b462a6149463..024e6604a385 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -499,6 +499,23 @@ static void wx_rx_checksum(struct wx_ring *ring, ring->rx_stats.csum_good_cnt++; } +static void wx_rx_vlan(struct wx_ring *ring, union wx_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 ethertype; + u8 idx = 0; + + if ((ring->netdev->features & + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) && + wx_test_staterr(rx_desc, WX_RXD_STAT_VP)) { + idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + 0x1c0) >> 6; + ethertype = ring->q_vector->wx->tpid[idx]; + __vlan_hwaccel_put_tag(skb, htons(ethertype), + le16_to_cpu(rx_desc->wb.upper.vlan)); + } +} + /** * wx_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on @@ -515,6 +532,7 @@ static void wx_process_skb_fields(struct wx_ring *rx_ring, { wx_rx_hash(rx_ring, rx_desc, skb); wx_rx_checksum(rx_ring, rx_desc, skb); + wx_rx_vlan(rx_ring, rx_desc, skb); skb_record_rx_queue(skb, rx_ring->queue_index); skb->protocol = eth_type_trans(skb, rx_ring->netdev); } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 69a9ed7bc2df..e9d8917239c9 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -6,6 +6,7 @@ #include #include +#include #include #define WX_NCSI_SUP 0x8000 @@ -65,6 +66,8 @@ #define WX_CFG_PORT_CTL_QINQ BIT(2) #define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/ #define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) +#define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */ + /* GPIO Registers */ #define WX_GPIO_DR 0x14800 @@ -88,6 +91,8 @@ /* TDM CTL BIT */ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ #define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) +#define WX_TDM_RP_IDX 0x1820C +#define WX_TDM_RP_RATE 0x18404 /***************************** RDB registers *********************************/ /* receive packet buffer */ @@ -151,6 +156,9 @@ #define WX_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) #define WX_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) +/* vlan tbl */ +#define WX_PSR_VLAN_TBL(_i) (0x16000 + ((_i) * 4)) + /* mac switcher */ #define WX_PSR_MAC_SWC_AD_L 0x16200 #define WX_PSR_MAC_SWC_AD_H 0x16204 @@ -162,6 +170,15 @@ #define WX_PSR_MAC_SWC_IDX 0x16210 #define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU +/* vlan switch */ +#define WX_PSR_VLAN_SWC 0x16220 +#define WX_PSR_VLAN_SWC_VM_L 0x16224 +#define WX_PSR_VLAN_SWC_VM_H 0x16228 +#define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */ +/* VLAN pool filtering masks */ +#define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */ +#define WX_PSR_VLAN_SWC_ENTRIES 64 + /********************************* RSEC **************************************/ /* general rsec */ #define WX_RSC_CTL 0x17000 @@ -256,6 +273,7 @@ #define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) #define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) /* PX_RR_CFG bit definitions */ +#define WX_PX_RR_CFG_VLAN BIT(31) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26) #define WX_PX_RR_CFG_RR_THER_SHIFT 16 #define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) @@ -297,6 +315,7 @@ #define WX_MAX_TXD 8192 #define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ +#define VMDQ_P(p) p /* Supported Rx Buffer Sizes */ #define WX_RXBUFFER_256 256 /* Used for skb receive header */ @@ -321,6 +340,7 @@ /******************* Receive Descriptor bit definitions **********************/ #define WX_RXD_STAT_DD BIT(0) /* Done */ #define WX_RXD_STAT_EOP BIT(1) /* End of Packet */ +#define WX_RXD_STAT_VP BIT(5) /* IEEE VLAN Pkt */ #define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */ #define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */ #define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/ @@ -709,6 +729,8 @@ struct wx_mac_info { u32 mta_shadow[128]; s32 mc_filter_type; u32 mcft_size; + u32 vft_shadow[128]; + u32 vft_size; u32 num_rar_entries; u32 rx_pb_size; u32 tx_pb_size; @@ -870,11 +892,18 @@ struct wx_ring_container { u8 itr; /* current ITR setting for ring */ }; +struct wx_fwd_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct wx *wx; + int index; /* pool index on PF */ +}; + struct wx_ring { struct wx_ring *next; /* pointer to next ring in q_vector */ struct wx_q_vector *q_vector; /* backpointer to host q_vector */ struct net_device *netdev; /* netdev ring belongs to */ struct device *dev; /* device for DMA mapping */ + struct wx_fwd_adapter *accel; struct page_pool *page_pool; void *desc; /* descriptor ring memory */ union { @@ -933,6 +962,8 @@ enum wx_isb_idx { }; struct wx { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u8 __iomem *hw_addr; struct pci_dev *pdev; struct net_device *netdev; From patchwork Wed May 10 09:38:42 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mengyuan Lou X-Patchwork-Id: 13236695 X-Patchwork-Delegate: kuba@kernel.org Received: from lindbergh.monkeyblade.net (lindbergh.monkeyblade.net [23.128.96.19]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4B0B063E for ; Wed, 10 May 2023 09:40:52 +0000 (UTC) Received: from smtpbguseast1.qq.com (smtpbguseast1.qq.com [54.204.34.129]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 4B39D30C7 for ; Wed, 10 May 2023 02:40:37 -0700 (PDT) X-QQ-mid: bizesmtp73t1683711555tawfvkb0 Received: from localhost.localdomain ( [125.119.253.217]) by bizesmtp.qq.com (ESMTP) with id ; Wed, 10 May 2023 17:39:13 +0800 (CST) X-QQ-SSF: 01400000000000N0Z000000A0000000 X-QQ-FEAT: YNhQxxQKZUKeClNdfH309gqII8lRspgeVItJkHyNveRO/zKrgSmZ4lKMGOhM0 epmiTFYAjJkhMmXkEkVcVk9s73kWOiHSRj3FQUh2/q5NspSAdd+F8q2dRDOwbXvRPlRHdbD uviSKQ6KDoSW3aebVMGaB0WyoG9YN8sQUI0kaCphuUioE0Gl1II/KDHJn8litEmLXCC5EIj 6gCSRed4HAGypWimVXS7lie5I2T3yNgBhPOl6tYcUM18/cvZRruI2zGD8Txg/eH6oHKmm52 7QUGq3lnCbKb6wFyIQvvyh2jFX0QLtF9UHjAjjtzb87epiDrZyfyI1FHOq3sILGqGtfsdHp nqjkFyrXanOt7ktaDS1WJNniUgdH6hxt60wF610/SzKnT5EWG16v/VDqiIBH20AcC0BNrPO toeMIQXEOVFS4MzdhUGGcA== X-QQ-GoodBg: 2 X-BIZMAIL-ID: 282522838949692191 From: Mengyuan Lou To: netdev@vger.kernel.org Cc: jiawenwu@trustnetic.com, Mengyuan Lou Subject: [PATCH net-next v4 4/7] net: ngbe add netdev features support Date: Wed, 10 May 2023 17:38:42 +0800 Message-Id: <20230510093845.47446-5-mengyuanlou@net-swift.com> X-Mailer: git-send-email 2.40.0 In-Reply-To: <20230510093845.47446-1-mengyuanlou@net-swift.com> References: <20230510093845.47446-1-mengyuanlou@net-swift.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:net-swift.com:qybglogicsvrgz:qybglogicsvrgz5a-3 X-Spam-Status: No, score=-1.9 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_NONE, RCVD_IN_MSPIKE_H2,SPF_HELO_PASS,SPF_PASS,T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net X-Patchwork-Delegate: kuba@kernel.org Add features and hw_features that ngbe can support. Signed-off-by: Mengyuan Lou --- drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index df6b870aa871..793ebed925ef 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -551,12 +551,18 @@ static int ngbe_probe(struct pci_dev *pdev, ngbe_set_ethtool_ops(netdev); netdev->netdev_ops = &ngbe_netdev_ops; - netdev->features |= NETIF_F_HIGHDMA; - netdev->features = NETIF_F_SG; - + netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_RXHASH | NETIF_F_RXCSUM; + netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID; + netdev->vlan_features |= netdev->features; + netdev->features |= NETIF_F_IPV6_CSUM | NETIF_F_VLAN_FEATURES; /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features | - NETIF_F_RXALL; + netdev->hw_features |= netdev->features | NETIF_F_RXALL; + netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; + netdev->features |= NETIF_F_HIGHDMA; + netdev->hw_features |= NETIF_F_GRO; + netdev->features |= NETIF_F_GRO; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; From patchwork Wed May 10 09:38:43 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mengyuan Lou X-Patchwork-Id: 13236690 X-Patchwork-Delegate: kuba@kernel.org Received: from lindbergh.monkeyblade.net (lindbergh.monkeyblade.net [23.128.96.19]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 78719814 for ; Wed, 10 May 2023 09:39:28 +0000 (UTC) Received: from smtpbguseast3.qq.com (smtpbguseast3.qq.com [54.243.244.52]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 80AFC30C7 for ; Wed, 10 May 2023 02:39:26 -0700 (PDT) X-QQ-mid: bizesmtp73t1683711559t3xeuvis Received: from localhost.localdomain ( [125.119.253.217]) by bizesmtp.qq.com (ESMTP) with id ; Wed, 10 May 2023 17:39:17 +0800 (CST) X-QQ-SSF: 01400000000000N0Z000000A0000000 X-QQ-FEAT: CR3LFp2JE4nsMMbvy8u5VAR27GGzp+SiADtEIVR36d8cy2Sq8Q7SwwF+QW6oW dfshpnxECOncPxY6cUQvUdalDBZ6fLCZNFjm5iUEPxRVFcsoTckHm7jEhULeXpY843bWnsY A0ClteOMfm9N+jncsOXyzOUtrtKQycqS/crs8874gs53I3d58c/aMW+UnUyO4Ya953NebB1 FH6Yt0Cqtd/SpYA68k/dWuOmU64KhB0VTkcw4qAgdFrZ/KNAKHCES4HqD4myWT2cH2w+bm8 lm505img0HDCxZMI0OeOLVuFOUctgX7FYpwfJZD4niNdhBDdBYBLkEIh6EzgtAC3NeMfC2O ro40AW2RSZ0kNYMxSzlwLG0m9nx6JgpX79cytuh88embR3DAppziJIb+PA0/YW3fBRVNXSR 1EX/H2WCFrvgIYQdjxuBUw== X-QQ-GoodBg: 2 X-BIZMAIL-ID: 15146015598912041317 From: Mengyuan Lou To: netdev@vger.kernel.org Cc: jiawenwu@trustnetic.com, Mengyuan Lou Subject: [PATCH net-next v4 5/7] net: ngbe: Implement vlan add and remove ops Date: Wed, 10 May 2023 17:38:43 +0800 Message-Id: <20230510093845.47446-6-mengyuanlou@net-swift.com> X-Mailer: git-send-email 2.40.0 In-Reply-To: <20230510093845.47446-1-mengyuanlou@net-swift.com> References: <20230510093845.47446-1-mengyuanlou@net-swift.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:net-swift.com:qybglogicsvrgz:qybglogicsvrgz5a-3 X-Spam-Status: No, score=-1.9 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_NONE, RCVD_IN_MSPIKE_H2,SPF_HELO_PASS,SPF_PASS,T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net X-Patchwork-Delegate: kuba@kernel.org ngbe add ndo_vlan_rx_add_vid and ndo_vlan_rx_kill_vid. Signed-off-by: Mengyuan Lou --- drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 3 +++ drivers/net/ethernet/wangxun/ngbe/ngbe_type.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 793ebed925ef..582c90dce4d0 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -115,6 +115,7 @@ static int ngbe_sw_init(struct wx *wx) wx->mac.max_rx_queues = NGBE_MAX_RX_QUEUES; wx->mac.max_tx_queues = NGBE_MAX_TX_QUEUES; wx->mac.mcft_size = NGBE_MC_TBL_SIZE; + wx->mac.vft_size = NGBE_SP_VFT_TBL_SIZE; wx->mac.rx_pb_size = NGBE_RX_PB_SIZE; wx->mac.tx_pb_size = NGBE_TDB_PB_SZ; @@ -476,6 +477,8 @@ static const struct net_device_ops ngbe_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = wx_set_mac, .ndo_get_stats64 = wx_get_stats64, + .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, }; /** diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 373d5af628cd..b70eca397b67 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -136,6 +136,7 @@ enum NGBE_MSCA_CMD_value { #define NGBE_RAR_ENTRIES 32 #define NGBE_RX_PB_SIZE 42 #define NGBE_MC_TBL_SIZE 128 +#define NGBE_SP_VFT_TBL_SIZE 128 #define NGBE_TDB_PB_SZ (20 * 1024) /* 160KB Packet Buffer */ /* TX/RX descriptor defines */ From patchwork Wed May 10 09:38:44 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mengyuan Lou X-Patchwork-Id: 13236693 X-Patchwork-Delegate: kuba@kernel.org Received: from lindbergh.monkeyblade.net (lindbergh.monkeyblade.net [23.128.96.19]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D95A96FDE for ; Wed, 10 May 2023 09:39:39 +0000 (UTC) Received: from smtpbg153.qq.com (smtpbg153.qq.com [13.245.218.24]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DC95F30C7 for ; Wed, 10 May 2023 02:39:36 -0700 (PDT) X-QQ-mid: bizesmtp73t1683711563trxh89zb Received: from localhost.localdomain ( [125.119.253.217]) by bizesmtp.qq.com (ESMTP) with id ; Wed, 10 May 2023 17:39:21 +0800 (CST) X-QQ-SSF: 01400000000000N0Z000000A0000000 X-QQ-FEAT: mRz6/7wsmIj+7k6Z5w/60CPfgn71A0L/20kBDPgZmRKEAG8jek+M237opVCCN ajGAx2SMxwd0TpMyxeVIvmM2ZEFeLS90vTk2NggzhF+R/e0FY3UFohfGIXjGgHzMCUr5QXw KSlt5m95QGOPN612lkraMppKFYpg4mwziIGiKfj2ZW9sCN0SDggkOZyVRYP63cphrVluNC2 9OA3I6MaP7Ty6k3jNS/tozVgJJBTKF0lkF6FQ+8suLe9Qf53TYMfiainqQb8ECZSuorgmiS YzI/c4oIfHFKXfgdXFRY5spItfooKMnQxdo10BJu2S57+IvHa7uVYKmaWC9a4pTNAoi1kSA W3D8E7oVylqO1SYYGrCmxRTHNJXBAXzM89gfMnxInpuCpuAm5wVFhRK/b+I4/1xfPlWOb95 igQ7oJgeero= X-QQ-GoodBg: 2 X-BIZMAIL-ID: 1328926299747573261 From: Mengyuan Lou To: netdev@vger.kernel.org Cc: jiawenwu@trustnetic.com, Mengyuan Lou Subject: [PATCH net-next v4 6/7] net: txgbe add netdev features support Date: Wed, 10 May 2023 17:38:44 +0800 Message-Id: <20230510093845.47446-7-mengyuanlou@net-swift.com> X-Mailer: git-send-email 2.40.0 In-Reply-To: <20230510093845.47446-1-mengyuanlou@net-swift.com> References: <20230510093845.47446-1-mengyuanlou@net-swift.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:net-swift.com:qybglogicsvrgz:qybglogicsvrgz5a-3 X-Spam-Status: No, score=-1.9 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_NONE, RCVD_IN_MSPIKE_H2,SPF_HELO_PASS,SPF_PASS,T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net X-Patchwork-Delegate: kuba@kernel.org Add features and hw_features that ngbe can support. Signed-off-by: Mengyuan Lou --- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 5b8a121fb496..be795d175aed 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -596,11 +596,25 @@ static int txgbe_probe(struct pci_dev *pdev, goto err_free_mac_table; } - netdev->features |= NETIF_F_HIGHDMA; - netdev->features = NETIF_F_SG; - + netdev->features = NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; + + netdev->gso_partial_features = NETIF_F_GSO_ENCAP_ALL; + netdev->features |= netdev->gso_partial_features; + netdev->features |= NETIF_F_SCTP_CRC; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; + netdev->features |= NETIF_F_VLAN_FEATURES; /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features | NETIF_F_RXALL; + netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; + netdev->features |= NETIF_F_HIGHDMA; + netdev->hw_features |= NETIF_F_GRO; + netdev->features |= NETIF_F_GRO; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; From patchwork Wed May 10 09:38:45 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mengyuan Lou X-Patchwork-Id: 13236692 X-Patchwork-Delegate: kuba@kernel.org Received: from lindbergh.monkeyblade.net (lindbergh.monkeyblade.net [23.128.96.19]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id BE4EC20B52 for ; Wed, 10 May 2023 09:39:38 +0000 (UTC) Received: from smtpbg151.qq.com (smtpbg151.qq.com [18.169.211.239]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id CDA0812C for ; Wed, 10 May 2023 02:39:35 -0700 (PDT) X-QQ-mid: bizesmtp73t1683711567tw0hlcrr Received: from localhost.localdomain ( [125.119.253.217]) by bizesmtp.qq.com (ESMTP) with id ; Wed, 10 May 2023 17:39:25 +0800 (CST) X-QQ-SSF: 01400000000000N0Z000000A0000000 X-QQ-FEAT: zT6n3Y95oi3wqdaiH7TqQU+EVa32lqjSXDc6Ye2Ho1xXg2zf9ZPCExEGkGqDy z/8KrBd6ODn9ems5ObsOs9LA0lGL2411UPcxzpETyYiLrdAXpnNs14GvlQtRXIw/XM906w+ G0+2HcSVpRGuAJjM4gSeKWt6tV1iXnTlieJEOPOXv7TBhHrUjHZAQdnd5BjcpvCB7QcmFsq WoFAX7FafRUpDZ1v80QsQkWkyksWPGgpNpHWH2dAALlpdCMOx3tSRuqct1XPMY+7JvbccSk rtElNVO5p4aI8JMPfB2Yu6j20JO3CIE8qQVU4PK4kJFuBOvksh9Iy66+efgedlAuMehQxPR hgMGpdvRcRdnh2ZduOYlMa1yutg0GMhpBn3K/2LAAGAP/MbjdGBUN4Mgfnz5L15AEkU4T9I TWxIMWUQSyssyHdkBC4DCQ== X-QQ-GoodBg: 2 X-BIZMAIL-ID: 4285406398777219357 From: Mengyuan Lou To: netdev@vger.kernel.org Cc: jiawenwu@trustnetic.com, Mengyuan Lou Subject: [PATCH net-next v4 7/7] net: txgbe: Implement vlan add and remove ops Date: Wed, 10 May 2023 17:38:45 +0800 Message-Id: <20230510093845.47446-8-mengyuanlou@net-swift.com> X-Mailer: git-send-email 2.40.0 In-Reply-To: <20230510093845.47446-1-mengyuanlou@net-swift.com> References: <20230510093845.47446-1-mengyuanlou@net-swift.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:net-swift.com:qybglogicsvrgz:qybglogicsvrgz5a-3 X-Spam-Status: No, score=-1.9 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_NONE, RCVD_IN_MSPIKE_H2,SPF_HELO_PASS,SPF_PASS,T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on lindbergh.monkeyblade.net X-Patchwork-Delegate: kuba@kernel.org txgbe add ndo_vlan_rx_add_vid and ndo_vlan_rx_kill_vid. Signed-off-by: Mengyuan Lou --- drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | 4 ++++ drivers/net/ethernet/wangxun/txgbe/txgbe_type.h | 1 + 2 files changed, 5 insertions(+) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index be795d175aed..00b8a43a87e1 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -258,6 +258,7 @@ static void txgbe_reset(struct wx *wx) if (err != 0) wx_err(wx, "Hardware Error: %d\n", err); + wx_start_hw(wx); /* do not flush user set addresses */ memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len); wx_flush_sw_mac_table(wx); @@ -330,6 +331,7 @@ static int txgbe_sw_init(struct wx *wx) wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; + wx->mac.vft_size = TXGBE_SP_VFT_TBL_SIZE; wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE; wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ; @@ -494,6 +496,8 @@ static const struct net_device_ops txgbe_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = wx_set_mac, .ndo_get_stats64 = wx_get_stats64, + .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, }; /** diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 63a1c733718d..032972369965 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -77,6 +77,7 @@ #define TXGBE_SP_MAX_RX_QUEUES 128 #define TXGBE_SP_RAR_ENTRIES 128 #define TXGBE_SP_MC_TBL_SIZE 128 +#define TXGBE_SP_VFT_TBL_SIZE 128 #define TXGBE_SP_RX_PB_SIZE 512 #define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */