From patchwork Mon Jan 9 19:15:14 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gerhard Engleder X-Patchwork-Id: 13094137 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 124DDC54EBD for ; Mon, 9 Jan 2023 19:15:39 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237346AbjAITPh (ORCPT ); Mon, 9 Jan 2023 14:15:37 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54494 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235170AbjAITPc (ORCPT ); Mon, 9 Jan 2023 14:15:32 -0500 Received: from mx14lb.world4you.com (mx14lb.world4you.com [81.19.149.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 72D6538B8 for ; Mon, 9 Jan 2023 11:15:30 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=engleder-embedded.com; s=dkim11; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From: Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=/9VMDhSEzU8zqPLgjpTrpBAFfIcPrVuwGjBeg6TPd44=; b=ULky5jlP5lmcpGN45lR2xRX6UB 1gOaH07fllA4+9Vb/WLcm9K8IWzE33HipPMuGwisqQ2rY7dQN1yFlpKIcXS6G6gLX97dnaz4+H0ji ufjbhGcne8INJkVMl4rMfCQZ5c3N9NY7oLOJXA1JTLjw/Iolv0X8fUQFcmm5XznYZ44U=; Received: from 88-117-53-243.adsl.highway.telekom.at ([88.117.53.243] helo=hornet.engleder.at) by mx14lb.world4you.com with esmtpsa (TLS1.2) tls TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (Exim 4.94.2) (envelope-from ) id 1pExcF-0007WQ-Sc; Mon, 09 Jan 2023 20:15:27 +0100 From: Gerhard Engleder To: netdev@vger.kernel.org Cc: davem@davemloft.net, kuba@kernel.org, edumazet@google.com, pabeni@redhat.com, Gerhard Engleder Subject: [PATCH net-next v4 01/10] tsnep: Use spin_lock_bh for TX Date: Mon, 9 Jan 2023 20:15:14 +0100 Message-Id: <20230109191523.12070-2-gerhard@engleder-embedded.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230109191523.12070-1-gerhard@engleder-embedded.com> References: <20230109191523.12070-1-gerhard@engleder-embedded.com> MIME-Version: 1.0 X-AV-Do-Run: Yes Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org TX processing is done only within BH context. Therefore, _irqsafe variant is not necessary. Signed-off-by: Gerhard Engleder --- drivers/net/ethernet/engleder/tsnep_main.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index bf0190e1d2ea..7cc5e2407809 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -434,7 +434,6 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, struct tsnep_tx *tx) { - unsigned long flags; int count = 1; struct tsnep_tx_entry *entry; int length; @@ -444,7 +443,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, if (skb_shinfo(skb)->nr_frags > 0) count += skb_shinfo(skb)->nr_frags; - spin_lock_irqsave(&tx->lock, flags); + spin_lock_bh(&tx->lock); if (tsnep_tx_desc_available(tx) < count) { /* ring full, shall not happen because queue is stopped if full @@ -452,7 +451,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, */ netif_stop_queue(tx->adapter->netdev); - spin_unlock_irqrestore(&tx->lock, flags); + spin_unlock_bh(&tx->lock); return NETDEV_TX_BUSY; } @@ -468,7 +467,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, tx->dropped++; - spin_unlock_irqrestore(&tx->lock, flags); + spin_unlock_bh(&tx->lock); netdev_err(tx->adapter->netdev, "TX DMA map failed\n"); @@ -496,20 +495,19 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, netif_stop_queue(tx->adapter->netdev); } - spin_unlock_irqrestore(&tx->lock, flags); + spin_unlock_bh(&tx->lock); return NETDEV_TX_OK; } static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) { - unsigned long flags; int budget = 128; struct tsnep_tx_entry *entry; int count; int length; - spin_lock_irqsave(&tx->lock, flags); + spin_lock_bh(&tx->lock); do { if (tx->read == tx->write) @@ -568,18 +566,17 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) netif_wake_queue(tx->adapter->netdev); } - spin_unlock_irqrestore(&tx->lock, flags); + spin_unlock_bh(&tx->lock); return (budget != 0); } static bool tsnep_tx_pending(struct tsnep_tx *tx) { - unsigned long flags; struct tsnep_tx_entry *entry; bool pending = false; - spin_lock_irqsave(&tx->lock, flags); + spin_lock_bh(&tx->lock); if (tx->read != tx->write) { entry = &tx->entry[tx->read]; @@ -589,7 +586,7 @@ static bool tsnep_tx_pending(struct tsnep_tx *tx) pending = true; } - spin_unlock_irqrestore(&tx->lock, flags); + spin_unlock_bh(&tx->lock); return pending; } From patchwork Mon Jan 9 19:15:15 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gerhard Engleder X-Patchwork-Id: 13094135 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3467AC54EBD for ; Mon, 9 Jan 2023 19:15:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237323AbjAITPe (ORCPT ); Mon, 9 Jan 2023 14:15:34 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54488 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235538AbjAITPc (ORCPT ); Mon, 9 Jan 2023 14:15:32 -0500 Received: from mx14lb.world4you.com (mx14lb.world4you.com [81.19.149.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 733656551 for ; Mon, 9 Jan 2023 11:15:30 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=engleder-embedded.com; s=dkim11; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From: Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=v+kzn2FVL3ck3a/DmSyxMSRUkunKBhY3F3H+TmL7qqs=; b=WJjFcZanZPLkW8lc6QFqIiDedm EbTZL63Hlzdth0u0WLhl8ezHVxwJAX0T7JHs+31w8iNsA4THhj5Jwmy609Ple7ebDIpHC5OlvHtwv akLBNsemD5ciLOlfcNWg1+7413Ogs8xIqHKbdpk1Lfx8VlvdVcmrcji90oILJoA4yxn8=; Received: from 88-117-53-243.adsl.highway.telekom.at ([88.117.53.243] helo=hornet.engleder.at) by mx14lb.world4you.com with esmtpsa (TLS1.2) tls TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (Exim 4.94.2) (envelope-from ) id 1pExcG-0007WQ-He; Mon, 09 Jan 2023 20:15:28 +0100 From: Gerhard Engleder To: netdev@vger.kernel.org Cc: davem@davemloft.net, kuba@kernel.org, edumazet@google.com, pabeni@redhat.com, Gerhard Engleder Subject: [PATCH net-next v4 02/10] tsnep: Forward NAPI budget to napi_consume_skb() Date: Mon, 9 Jan 2023 20:15:15 +0100 Message-Id: <20230109191523.12070-3-gerhard@engleder-embedded.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230109191523.12070-1-gerhard@engleder-embedded.com> References: <20230109191523.12070-1-gerhard@engleder-embedded.com> MIME-Version: 1.0 X-AV-Do-Run: Yes Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org NAPI budget must be forwarded to napi_consume_skb(). It is used to detect non-NAPI context. Signed-off-by: Gerhard Engleder Reviewed-by: Alexander Duyck --- drivers/net/ethernet/engleder/tsnep_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 7cc5e2407809..d148ba422b8c 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -550,7 +550,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) skb_tstamp_tx(entry->skb, &hwtstamps); } - napi_consume_skb(entry->skb, budget); + napi_consume_skb(entry->skb, napi_budget); entry->skb = NULL; tx->read = (tx->read + count) % TSNEP_RING_SIZE; From patchwork Mon Jan 9 19:15:16 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gerhard Engleder X-Patchwork-Id: 13094139 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id AC037C5479D for ; Mon, 9 Jan 2023 19:15:42 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237434AbjAITPk (ORCPT ); Mon, 9 Jan 2023 14:15:40 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54496 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S237072AbjAITPc (ORCPT ); Mon, 9 Jan 2023 14:15:32 -0500 Received: from mx14lb.world4you.com (mx14lb.world4you.com [81.19.149.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id BA70E655D for ; Mon, 9 Jan 2023 11:15:30 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=engleder-embedded.com; s=dkim11; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From: Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=j049ka5eTM4pvjT7RI+gdb/Rfj1CAQUdyADMf0D6lF0=; b=NiDB5eWhN0CRA4ZXWZs/mLu173 WV7oA/IwSiQYxyepgZte7JHA8+iQE1VH5wEtOnsnC0DOfYEU0fdI5KlLzRG5egn07IicqGONQfmiq zheLhEEmkTdne5Bhdl5t2Ztj4jtBTnoHE0RK0gjXII+9Ge0fDT3s7wil0vGZ62kZKNJk=; Received: from 88-117-53-243.adsl.highway.telekom.at ([88.117.53.243] helo=hornet.engleder.at) by mx14lb.world4you.com with esmtpsa (TLS1.2) tls TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (Exim 4.94.2) (envelope-from ) id 1pExcH-0007WQ-4d; Mon, 09 Jan 2023 20:15:29 +0100 From: Gerhard Engleder To: netdev@vger.kernel.org Cc: davem@davemloft.net, kuba@kernel.org, edumazet@google.com, pabeni@redhat.com, Gerhard Engleder Subject: [PATCH net-next v4 03/10] tsnep: Do not print DMA mapping error Date: Mon, 9 Jan 2023 20:15:16 +0100 Message-Id: <20230109191523.12070-4-gerhard@engleder-embedded.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230109191523.12070-1-gerhard@engleder-embedded.com> References: <20230109191523.12070-1-gerhard@engleder-embedded.com> MIME-Version: 1.0 X-AV-Do-Run: Yes Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Printing in data path shall be avoided. DMA mapping error is already counted in stats so printing is not necessary. Signed-off-by: Gerhard Engleder Reviewed-by: Alexander Duyck --- drivers/net/ethernet/engleder/tsnep_main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index d148ba422b8c..8c6d6e210494 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -469,8 +469,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, spin_unlock_bh(&tx->lock); - netdev_err(tx->adapter->netdev, "TX DMA map failed\n"); - return NETDEV_TX_OK; } length = retval; From patchwork Mon Jan 9 19:15:17 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gerhard Engleder X-Patchwork-Id: 13094138 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1E520C61DB3 for ; Mon, 9 Jan 2023 19:15:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237298AbjAITPi (ORCPT ); Mon, 9 Jan 2023 14:15:38 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54498 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S236382AbjAITPc (ORCPT ); Mon, 9 Jan 2023 14:15:32 -0500 Received: from mx14lb.world4you.com (mx14lb.world4you.com [81.19.149.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3796F6586 for ; Mon, 9 Jan 2023 11:15:31 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=engleder-embedded.com; s=dkim11; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From: Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=R2G+2jvGX0hLdxFAYt9XlU7nmTlz7IKrw9vZBfYWy9M=; b=o2oWxTJWO84uWit3LD0SRHnGcH 0epZ7CVlAtRJJxxoXjTDyarou8Zc7n1miOsipkkzUhHvrkLPIduChetaLYiA1IobB7AN8HTIUgpdv uZfNhXR9rySEjK+ozsU8KT0aeUBaVPC767z/UXm0pxfALexacehMwzWfO44kD31RNL48=; Received: from 88-117-53-243.adsl.highway.telekom.at ([88.117.53.243] helo=hornet.engleder.at) by mx14lb.world4you.com with esmtpsa (TLS1.2) tls TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (Exim 4.94.2) (envelope-from ) id 1pExcH-0007WQ-Mr; Mon, 09 Jan 2023 20:15:29 +0100 From: Gerhard Engleder To: netdev@vger.kernel.org Cc: davem@davemloft.net, kuba@kernel.org, edumazet@google.com, pabeni@redhat.com, Gerhard Engleder Subject: [PATCH net-next v4 04/10] tsnep: Add adapter down state Date: Mon, 9 Jan 2023 20:15:17 +0100 Message-Id: <20230109191523.12070-5-gerhard@engleder-embedded.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230109191523.12070-1-gerhard@engleder-embedded.com> References: <20230109191523.12070-1-gerhard@engleder-embedded.com> MIME-Version: 1.0 X-AV-Do-Run: Yes Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Add adapter state with flag for down state. This flag will be used by the XDP TX path to deny TX if adapter is down. Signed-off-by: Gerhard Engleder --- drivers/net/ethernet/engleder/tsnep.h | 1 + drivers/net/ethernet/engleder/tsnep_main.c | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h index f93ba48bac3f..d658413ceb14 100644 --- a/drivers/net/ethernet/engleder/tsnep.h +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -147,6 +147,7 @@ struct tsnep_adapter { bool suppress_preamble; phy_interface_t phy_mode; struct phy_device *phydev; + unsigned long state; int msg_enable; struct platform_device *pdev; diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 8c6d6e210494..943de5a09693 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -43,6 +43,10 @@ #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \ ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1) +enum { + __TSNEP_DOWN, +}; + static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask) { iowrite32(mask, adapter->addr + ECM_INT_ENABLE); @@ -1138,6 +1142,8 @@ static int tsnep_netdev_open(struct net_device *netdev) tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); } + clear_bit(__TSNEP_DOWN, &adapter->state); + return 0; phy_failed: @@ -1160,6 +1166,8 @@ static int tsnep_netdev_close(struct net_device *netdev) struct tsnep_adapter *adapter = netdev_priv(netdev); int i; + set_bit(__TSNEP_DOWN, &adapter->state); + tsnep_disable_irq(adapter, ECM_INT_LINK); tsnep_phy_close(adapter); @@ -1513,6 +1521,7 @@ static int tsnep_probe(struct platform_device *pdev) adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; + set_bit(__TSNEP_DOWN, &adapter->state); netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; @@ -1609,6 +1618,8 @@ static int tsnep_remove(struct platform_device *pdev) { struct tsnep_adapter *adapter = platform_get_drvdata(pdev); + set_bit(__TSNEP_DOWN, &adapter->state); + unregister_netdev(adapter->netdev); tsnep_rxnfc_cleanup(adapter); From patchwork Mon Jan 9 19:15:18 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gerhard Engleder X-Patchwork-Id: 13094141 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8E991C61DB3 for ; Mon, 9 Jan 2023 19:15:47 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237539AbjAITPo (ORCPT ); Mon, 9 Jan 2023 14:15:44 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54518 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S237225AbjAITPd (ORCPT ); Mon, 9 Jan 2023 14:15:33 -0500 Received: from mx14lb.world4you.com (mx14lb.world4you.com [81.19.149.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E64D28FD9 for ; Mon, 9 Jan 2023 11:15:31 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=engleder-embedded.com; s=dkim11; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From: Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=FyPbOBhxaB3ZPTtj8QFQql5SNS5MTQHDxBscsoDhbS4=; b=kTxt8OfdoOMO/iOqmWkJ64ZmRm /1GvqOFS1Od9Ls9rTUIDKcN/IVOXA6o4XJ5jY/+/6aXFEKSFub+lPSz/U6WixMzMTO6KrGcgoBRyl OrCAKRS4jmSbJ+ipWA2JVmeiaS10zjHRyC1E3wxHRemge5+s/k27jS49SyTn9rieVSFM=; Received: from 88-117-53-243.adsl.highway.telekom.at ([88.117.53.243] helo=hornet.engleder.at) by mx14lb.world4you.com with esmtpsa (TLS1.2) tls TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (Exim 4.94.2) (envelope-from ) id 1pExcI-0007WQ-7O; Mon, 09 Jan 2023 20:15:30 +0100 From: Gerhard Engleder To: netdev@vger.kernel.org Cc: davem@davemloft.net, kuba@kernel.org, edumazet@google.com, pabeni@redhat.com, Gerhard Engleder Subject: [PATCH net-next v4 05/10] tsnep: Add XDP TX support Date: Mon, 9 Jan 2023 20:15:18 +0100 Message-Id: <20230109191523.12070-6-gerhard@engleder-embedded.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230109191523.12070-1-gerhard@engleder-embedded.com> References: <20230109191523.12070-1-gerhard@engleder-embedded.com> MIME-Version: 1.0 X-AV-Do-Run: Yes Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Implement ndo_xdp_xmit() for XDP TX support. Support for fragmented XDP frames is included. Also some const, braces and logic clean ups are done in normal TX path to keep both TX paths in sync. Signed-off-by: Gerhard Engleder --- drivers/net/ethernet/engleder/tsnep.h | 12 +- drivers/net/ethernet/engleder/tsnep_main.c | 211 +++++++++++++++++++-- 2 files changed, 210 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h index d658413ceb14..9cb267938794 100644 --- a/drivers/net/ethernet/engleder/tsnep.h +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -57,6 +57,12 @@ struct tsnep_rxnfc_rule { int location; }; +enum tsnep_tx_type { + TSNEP_TX_TYPE_SKB, + TSNEP_TX_TYPE_XDP_TX, + TSNEP_TX_TYPE_XDP_NDO, +}; + struct tsnep_tx_entry { struct tsnep_tx_desc *desc; struct tsnep_tx_desc_wb *desc_wb; @@ -65,7 +71,11 @@ struct tsnep_tx_entry { u32 properties; - struct sk_buff *skb; + enum tsnep_tx_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; size_t len; DEFINE_DMA_UNMAP_ADDR(dma); }; diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 943de5a09693..1ae73c706c9e 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -310,10 +310,12 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, struct tsnep_tx_entry *entry = &tx->entry[index]; entry->properties = 0; + /* xdpf is union with skb */ if (entry->skb) { entry->properties = length & TSNEP_DESC_LENGTH_MASK; entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; - if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) + if (entry->type == TSNEP_TX_TYPE_SKB && + (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)) entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; /* toggle user flag to prevent false acknowledge @@ -370,7 +372,8 @@ static int tsnep_tx_desc_available(struct tsnep_tx *tx) return tx->read - tx->write - 1; } -static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) +static int tsnep_tx_map(const struct sk_buff *skb, struct tsnep_tx *tx, + int count) { struct device *dmadev = tx->adapter->dmadev; struct tsnep_tx_entry *entry; @@ -382,7 +385,7 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) for (i = 0; i < count; i++) { entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; - if (i == 0) { + if (!i) { len = skb_headlen(skb); dma = dma_map_single(dmadev, skb->data, len, DMA_TO_DEVICE); @@ -400,6 +403,8 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) entry->desc->tx = __cpu_to_le64(dma); + entry->type = TSNEP_TX_TYPE_SKB; + map_len += len; } @@ -417,12 +422,13 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) entry = &tx->entry[(index + i) % TSNEP_RING_SIZE]; if (entry->len) { - if (i == 0) + if (!i && entry->type == TSNEP_TX_TYPE_SKB) dma_unmap_single(dmadev, dma_unmap_addr(entry, dma), dma_unmap_len(entry, len), DMA_TO_DEVICE); - else + else if (entry->type == TSNEP_TX_TYPE_SKB || + entry->type == TSNEP_TX_TYPE_XDP_NDO) dma_unmap_page(dmadev, dma_unmap_addr(entry, dma), dma_unmap_len(entry, len), @@ -482,7 +488,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, for (i = 0; i < count; i++) tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, - i == (count - 1)); + i == count - 1); tx->write = (tx->write + count) % TSNEP_RING_SIZE; skb_tx_timestamp(skb); @@ -502,12 +508,133 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_OK; } +static int tsnep_xdp_tx_map(const struct xdp_frame *xdpf, struct tsnep_tx *tx, + const struct skb_shared_info *shinfo, int count, + enum tsnep_tx_type type) +{ + struct device *dmadev = tx->adapter->dmadev; + struct tsnep_tx_entry *entry; + const skb_frag_t *frag; + struct page *page; + unsigned int len; + int map_len = 0; + dma_addr_t dma; + void *data; + int i; + + frag = NULL; + len = xdpf->len; + for (i = 0; i < count; i++) { + entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; + if (type == TSNEP_TX_TYPE_XDP_NDO) { + data = unlikely(frag) ? skb_frag_address(frag) : + xdpf->data; + dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(dmadev, dma)) + return -ENOMEM; + + entry->type = TSNEP_TX_TYPE_XDP_NDO; + } else { + page = unlikely(frag) ? skb_frag_page(frag) : + virt_to_page(xdpf->data); + dma = page_pool_get_dma_addr(page); + if (unlikely(frag)) + dma += skb_frag_off(frag); + else + dma += sizeof(*xdpf) + xdpf->headroom; + dma_sync_single_for_device(dmadev, dma, len, + DMA_BIDIRECTIONAL); + + entry->type = TSNEP_TX_TYPE_XDP_TX; + } + + entry->len = len; + dma_unmap_addr_set(entry, dma, dma); + + entry->desc->tx = __cpu_to_le64(dma); + + map_len += len; + + if (i + 1 < count) { + frag = &shinfo->frags[i]; + len = skb_frag_size(frag); + } + } + + return map_len; +} + +/* This function requires __netif_tx_lock is held by the caller. */ +static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf, + struct tsnep_tx *tx, + enum tsnep_tx_type type) +{ + const struct skb_shared_info *shinfo = + xdp_get_shared_info_from_frame(xdpf); + struct tsnep_tx_entry *entry; + int count, length, retval, i; + + count = 1; + if (unlikely(xdp_frame_has_frags(xdpf))) + count += shinfo->nr_frags; + + spin_lock_bh(&tx->lock); + + /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS + * will be available for normal TX path and queue is stopped there if + * necessary + */ + if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) { + spin_unlock_bh(&tx->lock); + + return false; + } + + entry = &tx->entry[tx->write]; + entry->xdpf = xdpf; + + retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); + if (retval < 0) { + tsnep_tx_unmap(tx, tx->write, count); + entry->xdpf = NULL; + + tx->dropped++; + + spin_unlock_bh(&tx->lock); + + return false; + } + length = retval; + + for (i = 0; i < count; i++) + tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, + i == count - 1); + tx->write = (tx->write + count) % TSNEP_RING_SIZE; + + /* descriptor properties shall be valid before hardware is notified */ + dma_wmb(); + + spin_unlock_bh(&tx->lock); + + return true; +} + +static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) +{ + iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); +} + static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) { - int budget = 128; struct tsnep_tx_entry *entry; - int count; + struct xdp_frame_bulk bq; + int budget = 128; int length; + int count; + + xdp_frame_bulk_init(&bq); + + rcu_read_lock(); /* need for xdp_return_frame_bulk */ spin_lock_bh(&tx->lock); @@ -527,12 +654,17 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) dma_rmb(); count = 1; - if (skb_shinfo(entry->skb)->nr_frags > 0) + if (entry->type == TSNEP_TX_TYPE_SKB && + skb_shinfo(entry->skb)->nr_frags > 0) count += skb_shinfo(entry->skb)->nr_frags; + else if (entry->type != TSNEP_TX_TYPE_SKB && + xdp_frame_has_frags(entry->xdpf)) + count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags; length = tsnep_tx_unmap(tx, tx->read, count); - if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && + if (entry->type == TSNEP_TX_TYPE_SKB && + (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && (__le32_to_cpu(entry->desc_wb->properties) & TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { struct skb_shared_hwtstamps hwtstamps; @@ -552,7 +684,18 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) skb_tstamp_tx(entry->skb, &hwtstamps); } - napi_consume_skb(entry->skb, napi_budget); + switch (entry->type) { + case TSNEP_TX_TYPE_SKB: + napi_consume_skb(entry->skb, napi_budget); + break; + case TSNEP_TX_TYPE_XDP_TX: + xdp_return_frame_rx_napi(entry->xdpf); + break; + case TSNEP_TX_TYPE_XDP_NDO: + xdp_return_frame_bulk(entry->xdpf, &bq); + break; + } + /* xdpf is union with skb */ entry->skb = NULL; tx->read = (tx->read + count) % TSNEP_RING_SIZE; @@ -570,7 +713,11 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) spin_unlock_bh(&tx->lock); - return (budget != 0); + xdp_flush_frame_bulk(&bq); + + rcu_read_unlock(); + + return budget != 0; } static bool tsnep_tx_pending(struct tsnep_tx *tx) @@ -1330,6 +1477,45 @@ static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev, return ns_to_ktime(timestamp); } +static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **xdp, u32 flags) +{ + struct tsnep_adapter *adapter = netdev_priv(dev); + u32 cpu = smp_processor_id(); + struct netdev_queue *nq; + int nxmit, queue; + bool xmit; + + if (unlikely(test_bit(__TSNEP_DOWN, &adapter->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + queue = cpu % adapter->num_tx_queues; + nq = netdev_get_tx_queue(adapter->netdev, queue); + + __netif_tx_lock(nq, cpu); + + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(nq); + + for (nxmit = 0; nxmit < n; nxmit++) { + xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], + &adapter->tx[queue], + TSNEP_TX_TYPE_XDP_NDO); + if (!xmit) + break; + } + + if (flags & XDP_XMIT_FLUSH) + tsnep_xdp_xmit_flush(&adapter->tx[queue]); + + __netif_tx_unlock(nq); + + return nxmit; +} + static const struct net_device_ops tsnep_netdev_ops = { .ndo_open = tsnep_netdev_open, .ndo_stop = tsnep_netdev_close, @@ -1341,6 +1527,7 @@ static const struct net_device_ops tsnep_netdev_ops = { .ndo_set_features = tsnep_netdev_set_features, .ndo_get_tstamp = tsnep_netdev_get_tstamp, .ndo_setup_tc = tsnep_tc_setup, + .ndo_xdp_xmit = tsnep_netdev_xdp_xmit, }; static int tsnep_mac_init(struct tsnep_adapter *adapter) From patchwork Mon Jan 9 19:15:19 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gerhard Engleder X-Patchwork-Id: 13094140 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 01818C5479D for ; Mon, 9 Jan 2023 19:15:45 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237457AbjAITPm (ORCPT ); Mon, 9 Jan 2023 14:15:42 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54520 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S237132AbjAITPd (ORCPT ); Mon, 9 Jan 2023 14:15:33 -0500 Received: from mx14lb.world4you.com (mx14lb.world4you.com [81.19.149.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 74C22A190 for ; Mon, 9 Jan 2023 11:15:32 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=engleder-embedded.com; s=dkim11; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From: Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=qNGmMlyw8wqkE4793rXXvaJ8ENpzKPuZ+XNBgtGqU6o=; b=O528kFbSi0uLrR2E7hMVOPxX9L H+gnD1e+eFKO6V3Gkl5UG909ivKwlfaP/mc2/9BerWIq2Tcda/RnDgoxGsu6y+4ZMUFjcetL1aiWK GY3lTV6dNNjh/O70hbq8yv/BlLqGfj19918vPCY3YY2kS1F+gud1NUZuwZjwhBU8SI4o=; Received: from 88-117-53-243.adsl.highway.telekom.at ([88.117.53.243] helo=hornet.engleder.at) by mx14lb.world4you.com with esmtpsa (TLS1.2) tls TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (Exim 4.94.2) (envelope-from ) id 1pExcI-0007WQ-SW; Mon, 09 Jan 2023 20:15:30 +0100 From: Gerhard Engleder To: netdev@vger.kernel.org Cc: davem@davemloft.net, kuba@kernel.org, edumazet@google.com, pabeni@redhat.com, Gerhard Engleder Subject: [PATCH net-next v4 06/10] tsnep: Substract TSNEP_RX_INLINE_METADATA_SIZE once Date: Mon, 9 Jan 2023 20:15:19 +0100 Message-Id: <20230109191523.12070-7-gerhard@engleder-embedded.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230109191523.12070-1-gerhard@engleder-embedded.com> References: <20230109191523.12070-1-gerhard@engleder-embedded.com> MIME-Version: 1.0 X-AV-Do-Run: Yes Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Subtrace size of metadata in front of received data only once. This simplifies the RX code. Signed-off-by: Gerhard Engleder Reviewed-by: Alexander Duyck --- drivers/net/ethernet/engleder/tsnep_main.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 1ae73c706c9e..1110530ec639 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -980,7 +980,7 @@ static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, /* update pointers within the skb to store the data */ skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE); - __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN); + __skb_put(skb, length - ETH_FCS_LEN); if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); @@ -1052,6 +1052,13 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD, length, dma_dir); + /* RX metadata with timestamps is in front of actual data, + * subtract metadata size to get length of actual data and + * consider metadata size as offset of actual data during RX + * processing + */ + length -= TSNEP_RX_INLINE_METADATA_SIZE; + rx->read = (rx->read + 1) % TSNEP_RING_SIZE; desc_available++; @@ -1060,7 +1067,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, page_pool_release_page(rx->page_pool, entry->page); rx->packets++; - rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE; + rx->bytes += length; if (skb->pkt_type == PACKET_MULTICAST) rx->multicast++; From patchwork Mon Jan 9 19:15:20 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gerhard Engleder X-Patchwork-Id: 13094143 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2E3D6C5479D for ; Mon, 9 Jan 2023 19:15:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237564AbjAITPu (ORCPT ); Mon, 9 Jan 2023 14:15:50 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54520 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235392AbjAITPd (ORCPT ); Mon, 9 Jan 2023 14:15:33 -0500 Received: from mx14lb.world4you.com (mx14lb.world4you.com [81.19.149.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 19BAD63EB for ; Mon, 9 Jan 2023 11:15:33 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=engleder-embedded.com; s=dkim11; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From: Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=cSA+xS23hMLKtq6Vlcv1/f5Q/MEAf3XrGwSJW21j8bA=; b=xIljMZzSf0ao2jPHml5SZ4VVVE v92C+7BTujWLUIP8oXa1fAJw9nGd0hdrRIohVN2vw/7iKQMkn+kXHTuxRHqwsWEMgCAwu2qAmrkHx FZqFT1qD0y27eBwoiYz92Z1YafhhQnwXLhQELX9BCEnK/fX/S63y0y1e1sIE54I45uI8=; Received: from 88-117-53-243.adsl.highway.telekom.at ([88.117.53.243] helo=hornet.engleder.at) by mx14lb.world4you.com with esmtpsa (TLS1.2) tls TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (Exim 4.94.2) (envelope-from ) id 1pExcJ-0007WQ-Hd; Mon, 09 Jan 2023 20:15:31 +0100 From: Gerhard Engleder To: netdev@vger.kernel.org Cc: davem@davemloft.net, kuba@kernel.org, edumazet@google.com, pabeni@redhat.com, Gerhard Engleder Subject: [PATCH net-next v4 07/10] tsnep: Prepare RX buffer for XDP support Date: Mon, 9 Jan 2023 20:15:20 +0100 Message-Id: <20230109191523.12070-8-gerhard@engleder-embedded.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230109191523.12070-1-gerhard@engleder-embedded.com> References: <20230109191523.12070-1-gerhard@engleder-embedded.com> MIME-Version: 1.0 X-AV-Do-Run: Yes Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Introduce tsnep_adapter::xdp_prog, which will later signal that XDP is enabled. Reserve XDP_PACKET_HEADROOM in front of RX buffer if XDP is enabled. Also set DMA direction properly in this case. Signed-off-by: Gerhard Engleder --- drivers/net/ethernet/engleder/tsnep.h | 3 +++ drivers/net/ethernet/engleder/tsnep_main.c | 22 ++++++++++++++-------- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h index 9cb267938794..855738d31d73 100644 --- a/drivers/net/ethernet/engleder/tsnep.h +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -117,6 +117,7 @@ struct tsnep_rx { struct tsnep_adapter *adapter; void __iomem *addr; int queue_index; + unsigned int offset; void *page[TSNEP_RING_PAGE_COUNT]; dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT]; @@ -183,6 +184,8 @@ struct tsnep_adapter { int rxnfc_count; int rxnfc_max; + struct bpf_prog *xdp_prog; + int num_tx_queues; struct tsnep_tx tx[TSNEP_MAX_QUEUES]; int num_rx_queues; diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 1110530ec639..0c9669edb2dd 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -26,9 +26,10 @@ #include #include #include +#include #define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) -#define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4) +#define TSNEP_HEADROOM ALIGN(max(TSNEP_SKB_PAD, XDP_PACKET_HEADROOM), 4) #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) @@ -838,9 +839,10 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx) pp_params.pool_size = TSNEP_RING_SIZE; pp_params.nid = dev_to_node(dmadev); pp_params.dev = dmadev; - pp_params.dma_dir = DMA_FROM_DEVICE; + pp_params.dma_dir = !!rx->adapter->xdp_prog ? + DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE; - pp_params.offset = TSNEP_SKB_PAD; + pp_params.offset = rx->offset; rx->page_pool = page_pool_create(&pp_params); if (IS_ERR(rx->page_pool)) { retval = PTR_ERR(rx->page_pool); @@ -875,7 +877,7 @@ static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, entry->page = page; entry->len = TSNEP_MAX_RX_BUF_SIZE; entry->dma = page_pool_get_dma_addr(entry->page); - entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD); + entry->desc->rx = __cpu_to_le64(entry->dma + rx->offset); } static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) @@ -979,14 +981,14 @@ static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE); + skb_reserve(skb, rx->offset + TSNEP_RX_INLINE_METADATA_SIZE); __skb_put(skb, length - ETH_FCS_LEN); if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); struct tsnep_rx_inline *rx_inline = (struct tsnep_rx_inline *)(page_address(page) + - TSNEP_SKB_PAD); + rx->offset); skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; @@ -1046,10 +1048,10 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, */ dma_rmb(); - prefetch(page_address(entry->page) + TSNEP_SKB_PAD); + prefetch(page_address(entry->page) + rx->offset); length = __le32_to_cpu(entry->desc_wb->properties) & TSNEP_DESC_LENGTH_MASK; - dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD, + dma_sync_single_range_for_cpu(dmadev, entry->dma, rx->offset, length, dma_dir); /* RX metadata with timestamps is in front of actual data, @@ -1111,6 +1113,10 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, rx->adapter = adapter; rx->addr = addr; rx->queue_index = queue_index; + if (!!adapter->xdp_prog) + rx->offset = XDP_PACKET_HEADROOM; + else + rx->offset = TSNEP_SKB_PAD; retval = tsnep_rx_ring_init(rx); if (retval) From patchwork Mon Jan 9 19:15:21 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gerhard Engleder X-Patchwork-Id: 13094142 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 11676C54EBD for ; Mon, 9 Jan 2023 19:15:50 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237551AbjAITPr (ORCPT ); Mon, 9 Jan 2023 14:15:47 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54518 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S237402AbjAITPe (ORCPT ); Mon, 9 Jan 2023 14:15:34 -0500 Received: from mx14lb.world4you.com (mx14lb.world4you.com [81.19.149.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id C6D7B6586 for ; Mon, 9 Jan 2023 11:15:33 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=engleder-embedded.com; s=dkim11; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From: Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=4lSEEZgvezluv7THt4tyd4eda3/RENr7f7buua+5xjg=; b=NkRkvOrOUk1HseqezenMELSSvd G2xdReHHgkFCJVIqQiCHSxo7zwxn508PQTaATA1yVBLN5NlDWyALz9eFvQul1rtfRJeJrlmSE6lUm pX12C9E40gqdHVw9DmYslOFOonLI88TK2xMdpLGF9bJNNS97ZTrERgBD0P820u0U1984=; Received: from 88-117-53-243.adsl.highway.telekom.at ([88.117.53.243] helo=hornet.engleder.at) by mx14lb.world4you.com with esmtpsa (TLS1.2) tls TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (Exim 4.94.2) (envelope-from ) id 1pExcK-0007WQ-8X; Mon, 09 Jan 2023 20:15:32 +0100 From: Gerhard Engleder To: netdev@vger.kernel.org Cc: davem@davemloft.net, kuba@kernel.org, edumazet@google.com, pabeni@redhat.com, Gerhard Engleder , Saeed Mahameed Subject: [PATCH net-next v4 08/10] tsnep: Add RX queue info for XDP support Date: Mon, 9 Jan 2023 20:15:21 +0100 Message-Id: <20230109191523.12070-9-gerhard@engleder-embedded.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230109191523.12070-1-gerhard@engleder-embedded.com> References: <20230109191523.12070-1-gerhard@engleder-embedded.com> MIME-Version: 1.0 X-AV-Do-Run: Yes Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Register xdp_rxq_info with page_pool memory model. This is needed for XDP buffer handling. Signed-off-by: Gerhard Engleder Reviewed-by: Saeed Mahameed --- drivers/net/ethernet/engleder/tsnep.h | 2 ++ drivers/net/ethernet/engleder/tsnep_main.c | 39 ++++++++++++++++------ 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h index 855738d31d73..2268ff793edf 100644 --- a/drivers/net/ethernet/engleder/tsnep.h +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -134,6 +134,8 @@ struct tsnep_rx { u32 dropped; u32 multicast; u32 alloc_failed; + + struct xdp_rxq_info xdp_rxq; }; struct tsnep_queue { diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 0c9669edb2dd..451ad1849b9d 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -792,6 +792,9 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) entry->page = NULL; } + if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) + xdp_rxq_info_unreg(&rx->xdp_rxq); + if (rx->page_pool) page_pool_destroy(rx->page_pool); @@ -807,7 +810,7 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) } } -static int tsnep_rx_ring_init(struct tsnep_rx *rx) +static int tsnep_rx_ring_init(struct tsnep_rx *rx, unsigned int napi_id) { struct device *dmadev = rx->adapter->dmadev; struct tsnep_rx_entry *entry; @@ -850,6 +853,15 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx) goto failed; } + retval = xdp_rxq_info_reg(&rx->xdp_rxq, rx->adapter->netdev, + rx->queue_index, napi_id); + if (retval) + goto failed; + retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, MEM_TYPE_PAGE_POOL, + rx->page_pool); + if (retval) + goto failed; + for (i = 0; i < TSNEP_RING_SIZE; i++) { entry = &rx->entry[i]; next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; @@ -1104,7 +1116,8 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx) } static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, - int queue_index, struct tsnep_rx *rx) + unsigned int napi_id, int queue_index, + struct tsnep_rx *rx) { dma_addr_t dma; int retval; @@ -1118,7 +1131,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, else rx->offset = TSNEP_SKB_PAD; - retval = tsnep_rx_ring_init(rx); + retval = tsnep_rx_ring_init(rx, napi_id); if (retval) return retval; @@ -1245,14 +1258,19 @@ static void tsnep_free_irq(struct tsnep_queue *queue, bool first) static int tsnep_netdev_open(struct net_device *netdev) { struct tsnep_adapter *adapter = netdev_priv(netdev); - int i; - void __iomem *addr; int tx_queue_index = 0; int rx_queue_index = 0; - int retval; + unsigned int napi_id; + void __iomem *addr; + int i, retval; for (i = 0; i < adapter->num_queues; i++) { adapter->queue[i].adapter = adapter; + + netif_napi_add(adapter->netdev, &adapter->queue[i].napi, + tsnep_poll); + napi_id = adapter->queue[i].napi.napi_id; + if (adapter->queue[i].tx) { addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); retval = tsnep_tx_open(adapter, addr, tx_queue_index, @@ -1263,7 +1281,7 @@ static int tsnep_netdev_open(struct net_device *netdev) } if (adapter->queue[i].rx) { addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); - retval = tsnep_rx_open(adapter, addr, + retval = tsnep_rx_open(adapter, addr, napi_id, rx_queue_index, adapter->queue[i].rx); if (retval) @@ -1295,8 +1313,6 @@ static int tsnep_netdev_open(struct net_device *netdev) goto phy_failed; for (i = 0; i < adapter->num_queues; i++) { - netif_napi_add(adapter->netdev, &adapter->queue[i].napi, - tsnep_poll); napi_enable(&adapter->queue[i].napi); tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); @@ -1317,6 +1333,8 @@ static int tsnep_netdev_open(struct net_device *netdev) tsnep_rx_close(adapter->queue[i].rx); if (adapter->queue[i].tx) tsnep_tx_close(adapter->queue[i].tx); + + netif_napi_del(&adapter->queue[i].napi); } return retval; } @@ -1335,7 +1353,6 @@ static int tsnep_netdev_close(struct net_device *netdev) tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); napi_disable(&adapter->queue[i].napi); - netif_napi_del(&adapter->queue[i].napi); tsnep_free_irq(&adapter->queue[i], i == 0); @@ -1343,6 +1360,8 @@ static int tsnep_netdev_close(struct net_device *netdev) tsnep_rx_close(adapter->queue[i].rx); if (adapter->queue[i].tx) tsnep_tx_close(adapter->queue[i].tx); + + netif_napi_del(&adapter->queue[i].napi); } return 0; From patchwork Mon Jan 9 19:15:22 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gerhard Engleder X-Patchwork-Id: 13094144 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1A877C5479D for ; Mon, 9 Jan 2023 19:15:54 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S237572AbjAITPv (ORCPT ); Mon, 9 Jan 2023 14:15:51 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54580 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S237412AbjAITPf (ORCPT ); Mon, 9 Jan 2023 14:15:35 -0500 Received: from mx14lb.world4you.com (mx14lb.world4you.com [81.19.149.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 75AC78FD9 for ; Mon, 9 Jan 2023 11:15:34 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=engleder-embedded.com; s=dkim11; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From: Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=Gu2+kKPwLsoBPwSWotsuyoN9qXXnApSECbFmnwqhr6o=; b=fSFpkd7WckiQU3UbTFUgNv/ugJ DsZmASKj3gS1rfOsMK+Bs6GjABh83D4wxmL0sz+FyVL++RWKDy+JtjacCzJtG2OXDWrFm02ITyJ9/ x3k153DB91Ki2idtUHCbMEQ3Br52bZwwbvpjEONc1Oos0a2yQXx/z9TvqPCXG9RZB90I=; Received: from 88-117-53-243.adsl.highway.telekom.at ([88.117.53.243] helo=hornet.engleder.at) by mx14lb.world4you.com with esmtpsa (TLS1.2) tls TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (Exim 4.94.2) (envelope-from ) id 1pExcK-0007WQ-VB; Mon, 09 Jan 2023 20:15:33 +0100 From: Gerhard Engleder To: netdev@vger.kernel.org Cc: davem@davemloft.net, kuba@kernel.org, edumazet@google.com, pabeni@redhat.com, Gerhard Engleder Subject: [PATCH net-next v4 09/10] tsnep: Add XDP RX support Date: Mon, 9 Jan 2023 20:15:22 +0100 Message-Id: <20230109191523.12070-10-gerhard@engleder-embedded.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230109191523.12070-1-gerhard@engleder-embedded.com> References: <20230109191523.12070-1-gerhard@engleder-embedded.com> MIME-Version: 1.0 X-AV-Do-Run: Yes Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org If BPF program is set up, then run BPF program for every received frame and execute the selected action. Signed-off-by: Gerhard Engleder --- drivers/net/ethernet/engleder/tsnep_main.c | 122 ++++++++++++++++++++- 1 file changed, 120 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 451ad1849b9d..002c879639db 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -27,6 +27,7 @@ #include #include #include +#include #define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) #define TSNEP_HEADROOM ALIGN(max(TSNEP_SKB_PAD, XDP_PACKET_HEADROOM), 4) @@ -44,6 +45,9 @@ #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \ ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1) +#define TSNEP_XDP_TX BIT(0) +#define TSNEP_XDP_REDIRECT BIT(1) + enum { __TSNEP_DOWN, }; @@ -625,6 +629,28 @@ static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); } +static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter, + struct xdp_buff *xdp, + struct netdev_queue *tx_nq, struct tsnep_tx *tx) +{ + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + bool xmit; + + if (unlikely(!xdpf)) + return false; + + __netif_tx_lock(tx_nq, smp_processor_id()); + + /* Avoid transmit queue timeout since we share it with the slow path */ + txq_trans_cond_update(tx_nq); + + xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX); + + __netif_tx_unlock(tx_nq); + + return xmit; +} + static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) { struct tsnep_tx_entry *entry; @@ -983,6 +1009,62 @@ static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) return i; } +static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, + struct xdp_buff *xdp, int *status, + struct netdev_queue *tx_nq, struct tsnep_tx *tx) +{ + unsigned int length; + unsigned int sync; + u32 act; + + length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; + + act = bpf_prog_run_xdp(prog, xdp); + + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ + sync = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; + sync = max(sync, length); + + switch (act) { + case XDP_PASS: + return false; + case XDP_TX: + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + goto out_failure; + *status |= TSNEP_XDP_TX; + return true; + case XDP_REDIRECT: + if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) + goto out_failure; + *status |= TSNEP_XDP_REDIRECT; + return true; + default: + bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(rx->adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), + sync, true); + return true; + } +} + +static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status, + struct netdev_queue *tx_nq, struct tsnep_tx *tx) +{ + if (status & TSNEP_XDP_TX) { + __netif_tx_lock(tx_nq, smp_processor_id()); + tsnep_xdp_xmit_flush(tx); + __netif_tx_unlock(tx_nq); + } + + if (status & TSNEP_XDP_REDIRECT) + xdp_do_flush(); +} + static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, int length) { @@ -1018,15 +1100,29 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, int budget) { struct device *dmadev = rx->adapter->dmadev; - int desc_available; - int done = 0; enum dma_data_direction dma_dir; struct tsnep_rx_entry *entry; + struct netdev_queue *tx_nq; + struct bpf_prog *prog; + struct xdp_buff xdp; struct sk_buff *skb; + struct tsnep_tx *tx; + int desc_available; + int xdp_status = 0; + int done = 0; int length; desc_available = tsnep_rx_desc_available(rx); dma_dir = page_pool_get_dma_dir(rx->page_pool); + prog = READ_ONCE(rx->adapter->xdp_prog); + if (prog) { + int queue = smp_processor_id() % rx->adapter->num_tx_queues; + + tx_nq = netdev_get_tx_queue(rx->adapter->netdev, queue); + tx = &rx->adapter->tx[queue]; + + xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); + } while (likely(done < budget) && (rx->read != rx->write)) { entry = &rx->entry[rx->read]; @@ -1076,6 +1172,25 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, rx->read = (rx->read + 1) % TSNEP_RING_SIZE; desc_available++; + if (prog) { + bool consume; + + xdp_prepare_buff(&xdp, page_address(entry->page), + XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE, + length, false); + + consume = tsnep_xdp_run_prog(rx, prog, &xdp, + &xdp_status, tx_nq, tx); + if (consume) { + rx->packets++; + rx->bytes += length; + + entry->page = NULL; + + continue; + } + } + skb = tsnep_build_skb(rx, entry->page, length); if (skb) { page_pool_release_page(rx->page_pool, entry->page); @@ -1094,6 +1209,9 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, entry->page = NULL; } + if (xdp_status) + tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); + if (desc_available) tsnep_rx_refill(rx, desc_available, false); From patchwork Mon Jan 9 19:15:23 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gerhard Engleder X-Patchwork-Id: 13094145 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A62CAC54EBD for ; Mon, 9 Jan 2023 19:15:57 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235392AbjAITPz (ORCPT ); Mon, 9 Jan 2023 14:15:55 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54518 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S237313AbjAITPg (ORCPT ); Mon, 9 Jan 2023 14:15:36 -0500 Received: from mx14lb.world4you.com (mx14lb.world4you.com [81.19.149.124]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 36BE711C2C for ; Mon, 9 Jan 2023 11:15:35 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=engleder-embedded.com; s=dkim11; h=Content-Transfer-Encoding:MIME-Version: References:In-Reply-To:Message-Id:Date:Subject:Cc:To:From:Sender:Reply-To: Content-Type:Content-ID:Content-Description:Resent-Date:Resent-From: Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id:List-Help: List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=swB9W0iJUZ5mVohITM5ska0qjuOouZwKiEflwaKLXMk=; b=JvVu6Omfi3JInXdOB33zO4GCLD VOELItKbDFJgH1x39Tuo1Un2Ab6dmXskilRLXcbm+x5HK4J33hEJVUBupXj2wb8ktDKx3uYtMR8cV mP3/7PQPmHhPMwf6w+P3mGhPldvAQ9M9ZK8b3HttGwrdDy9Ixsb+G7PC1lWqIYeNBpgU=; Received: from 88-117-53-243.adsl.highway.telekom.at ([88.117.53.243] helo=hornet.engleder.at) by mx14lb.world4you.com with esmtpsa (TLS1.2) tls TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 (Exim 4.94.2) (envelope-from ) id 1pExcL-0007WQ-JD; Mon, 09 Jan 2023 20:15:33 +0100 From: Gerhard Engleder To: netdev@vger.kernel.org Cc: davem@davemloft.net, kuba@kernel.org, edumazet@google.com, pabeni@redhat.com, Gerhard Engleder Subject: [PATCH net-next v4 10/10] tsnep: Support XDP BPF program setup Date: Mon, 9 Jan 2023 20:15:23 +0100 Message-Id: <20230109191523.12070-11-gerhard@engleder-embedded.com> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20230109191523.12070-1-gerhard@engleder-embedded.com> References: <20230109191523.12070-1-gerhard@engleder-embedded.com> MIME-Version: 1.0 X-AV-Do-Run: Yes Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Implement setup of BPF programs for XDP RX path with command XDP_SETUP_PROG of ndo_bpf(). This is the final step for XDP RX path support. tsnep_netdev_close() is called directly during BPF program setup. Add netif_carrier_off() and netif_tx_stop_all_queues() calls to signal to network stack that device is down. Otherwise network stack would continue transmitting pakets. Return value of tsnep_netdev_open() is not checked during BPF program setup like in other drivers. Forwarding the return value would result in a bpf_prog_put() call in dev_xdp_install(), which would make removal of BPF program necessary. If tsnep_netdev_open() fails during BPF program setup, then the network stack would call tsnep_netdev_close() anyway. Thus, tsnep_netdev_close() checks now if device is already down. Additionally remove $(tsnep-y) from $(tsnep-objs) because it is added automatically. Test results with A53 1.2GHz: XDP_DROP (samples/bpf/xdp1) proto 17: 883878 pkt/s XDP_TX (samples/bpf/xdp2) proto 17: 255693 pkt/s XDP_REDIRECT (samples/bpf/xdpsock) sock0@eth2:0 rxdrop xdp-drv pps pkts 1.00 rx 855,582 5,404,523 tx 0 0 XDP_REDIRECT (samples/bpf/xdp_redirect) eth2->eth1 613,267 rx/s 0 err,drop/s 613,272 xmit/s Signed-off-by: Gerhard Engleder --- drivers/net/ethernet/engleder/Makefile | 2 +- drivers/net/ethernet/engleder/tsnep.h | 6 +++++ drivers/net/ethernet/engleder/tsnep_main.c | 25 ++++++++++++++++--- drivers/net/ethernet/engleder/tsnep_xdp.c | 29 ++++++++++++++++++++++ 4 files changed, 58 insertions(+), 4 deletions(-) create mode 100644 drivers/net/ethernet/engleder/tsnep_xdp.c diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile index b6e3b16623de..b98135f65eb7 100644 --- a/drivers/net/ethernet/engleder/Makefile +++ b/drivers/net/ethernet/engleder/Makefile @@ -6,5 +6,5 @@ obj-$(CONFIG_TSNEP) += tsnep.o tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \ - tsnep_rxnfc.o $(tsnep-y) + tsnep_rxnfc.o tsnep_xdp.o tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h index 2268ff793edf..550aae24c8b9 100644 --- a/drivers/net/ethernet/engleder/tsnep.h +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -197,6 +197,9 @@ struct tsnep_adapter { struct tsnep_queue queue[TSNEP_MAX_QUEUES]; }; +int tsnep_netdev_open(struct net_device *netdev); +int tsnep_netdev_close(struct net_device *netdev); + extern const struct ethtool_ops tsnep_ethtool_ops; int tsnep_ptp_init(struct tsnep_adapter *adapter); @@ -220,6 +223,9 @@ int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter, int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter, struct ethtool_rxnfc *cmd); +int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack); + #if IS_ENABLED(CONFIG_TSNEP_SELFTESTS) int tsnep_ethtool_get_test_count(void); void tsnep_ethtool_get_test_strings(u8 *data); diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 002c879639db..57c35c74dc08 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -1373,7 +1373,7 @@ static void tsnep_free_irq(struct tsnep_queue *queue, bool first) memset(queue->name, 0, sizeof(queue->name)); } -static int tsnep_netdev_open(struct net_device *netdev) +int tsnep_netdev_open(struct net_device *netdev) { struct tsnep_adapter *adapter = netdev_priv(netdev); int tx_queue_index = 0; @@ -1436,6 +1436,8 @@ static int tsnep_netdev_open(struct net_device *netdev) tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); } + netif_tx_start_all_queues(adapter->netdev); + clear_bit(__TSNEP_DOWN, &adapter->state); return 0; @@ -1457,12 +1459,16 @@ static int tsnep_netdev_open(struct net_device *netdev) return retval; } -static int tsnep_netdev_close(struct net_device *netdev) +int tsnep_netdev_close(struct net_device *netdev) { struct tsnep_adapter *adapter = netdev_priv(netdev); int i; - set_bit(__TSNEP_DOWN, &adapter->state); + if (test_and_set_bit(__TSNEP_DOWN, &adapter->state)) + return 0; + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); tsnep_disable_irq(adapter, ECM_INT_LINK); tsnep_phy_close(adapter); @@ -1627,6 +1633,18 @@ static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev, return ns_to_ktime(timestamp); } +static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf) +{ + struct tsnep_adapter *adapter = netdev_priv(dev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack); + default: + return -EOPNOTSUPP; + } +} + static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdp, u32 flags) { @@ -1677,6 +1695,7 @@ static const struct net_device_ops tsnep_netdev_ops = { .ndo_set_features = tsnep_netdev_set_features, .ndo_get_tstamp = tsnep_netdev_get_tstamp, .ndo_setup_tc = tsnep_tc_setup, + .ndo_bpf = tsnep_netdev_bpf, .ndo_xdp_xmit = tsnep_netdev_xdp_xmit, }; diff --git a/drivers/net/ethernet/engleder/tsnep_xdp.c b/drivers/net/ethernet/engleder/tsnep_xdp.c new file mode 100644 index 000000000000..5ced32cd9bb7 --- /dev/null +++ b/drivers/net/ethernet/engleder/tsnep_xdp.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2022 Gerhard Engleder */ + +#include +#include + +#include "tsnep.h" + +int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct net_device *dev = adapter->netdev; + struct bpf_prog *old_prog; + bool need_reset, running; + + running = netif_running(dev); + need_reset = !!adapter->xdp_prog != !!prog; + if (running && need_reset) + tsnep_netdev_close(dev); + + old_prog = xchg(&adapter->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (running && need_reset) + tsnep_netdev_open(dev); + + return 0; +}