From patchwork Thu Apr 27 03:05:20 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225125 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E4068C77B60 for ; Thu, 27 Apr 2023 03:05:46 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S242865AbjD0DFo (ORCPT ); Wed, 26 Apr 2023 23:05:44 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43518 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242859AbjD0DFk (ORCPT ); Wed, 26 Apr 2023 23:05:40 -0400 Received: from out30-132.freemail.mail.aliyun.com (out30-132.freemail.mail.aliyun.com [115.124.30.132]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 27EE140C6; Wed, 26 Apr 2023 20:05:38 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R151e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018045170;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5egqu_1682564735; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5egqu_1682564735) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:36 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 01/15] virtio_net: mergeable xdp: put old page immediately Date: Thu, 27 Apr 2023 11:05:20 +0800 Message-Id: <20230427030534.115066-2-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org In the xdp implementation of virtio-net mergeable, it always checks whether two page is used and a page is selected to release. This is complicated for the processing of action, and be careful. In the entire process, we have such principles: * If xdp_page is used (PASS, TX, Redirect), then we release the old page. * If it is a drop case, we will release two. The old page obtained from buf is release inside err_xdp, and xdp_page needs be relased by us. But in fact, when we allocate a new page, we can release the old page immediately. Then just one is using, we just need to release the new page for drop case. On the drop path, err_xdp will release the variable "page", so we only need to let "page" point to the new xdp_page in advance. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index e2560b6f7980..42435e762d72 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1245,6 +1245,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (!xdp_page) goto err_xdp; offset = VIRTIO_XDP_HEADROOM; + + put_page(page); + page = xdp_page; } else if (unlikely(headroom < virtnet_get_headroom(vi))) { xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + sizeof(struct skb_shared_info)); @@ -1259,11 +1262,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, page_address(page) + offset, len); frame_sz = PAGE_SIZE; offset = VIRTIO_XDP_HEADROOM; - } else { - xdp_page = page; + + put_page(page); + page = xdp_page; } - data = page_address(xdp_page) + offset; + data = page_address(page) + offset; err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, &num_buf, &xdp_frags_truesz, stats); if (unlikely(err)) @@ -1278,8 +1282,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (unlikely(!head_skb)) goto err_xdp_frags; - if (unlikely(xdp_page != page)) - put_page(page); rcu_read_unlock(); return head_skb; case XDP_TX: @@ -1297,8 +1299,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_xdp_frags; } *xdp_xmit |= VIRTIO_XDP_TX; - if (unlikely(xdp_page != page)) - put_page(page); rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: @@ -1307,8 +1307,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (err) goto err_xdp_frags; *xdp_xmit |= VIRTIO_XDP_REDIR; - if (unlikely(xdp_page != page)) - put_page(page); rcu_read_unlock(); goto xdp_xmit; default: @@ -1321,9 +1319,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_xdp_frags; } err_xdp_frags: - if (unlikely(xdp_page != page)) - __free_pages(xdp_page, 0); - if (xdp_buff_has_frags(&xdp)) { shinfo = xdp_get_shared_info_from_buff(&xdp); for (i = 0; i < shinfo->nr_frags; i++) { From patchwork Thu Apr 27 03:05:21 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225126 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 29E44C7EE26 for ; Thu, 27 Apr 2023 03:05:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S242857AbjD0DFr (ORCPT ); Wed, 26 Apr 2023 23:05:47 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43572 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242861AbjD0DFn (ORCPT ); Wed, 26 Apr 2023 23:05:43 -0400 Received: from out30-131.freemail.mail.aliyun.com (out30-131.freemail.mail.aliyun.com [115.124.30.131]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 755813A87; Wed, 26 Apr 2023 20:05:41 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R191e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018046056;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5egrZ_1682564736; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5egrZ_1682564736) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:37 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 02/15] virtio_net: introduce mergeable_xdp_get_buf() Date: Thu, 27 Apr 2023 11:05:21 +0800 Message-Id: <20230427030534.115066-3-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Separating the logic of preparation for xdp from receive_mergeable. The purpose of this is to simplify the logic of execution of XDP. The main logic here is that when headroom is insufficient, we need to allocate a new page and calculate offset. It should be noted that if there is new page, the variable page will refer to the new page. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 135 +++++++++++++++++++++++---------------- 1 file changed, 79 insertions(+), 56 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 42435e762d72..b8832b0cc215 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1162,6 +1162,81 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, return 0; } +static void *mergeable_xdp_get_buf(struct virtnet_info *vi, + struct receive_queue *rq, + struct bpf_prog *xdp_prog, + void *ctx, + unsigned int *frame_sz, + int *num_buf, + struct page **page, + int offset, + unsigned int *len, + struct virtio_net_hdr_mrg_rxbuf *hdr) +{ + unsigned int truesize = mergeable_ctx_to_truesize(ctx); + unsigned int headroom = mergeable_ctx_to_headroom(ctx); + struct page *xdp_page; + unsigned int xdp_room; + + /* Transient failure which in theory could occur if + * in-flight packets from before XDP was enabled reach + * the receive path after XDP is loaded. + */ + if (unlikely(hdr->hdr.gso_type)) + return NULL; + + /* Now XDP core assumes frag size is PAGE_SIZE, but buffers + * with headroom may add hole in truesize, which + * make their length exceed PAGE_SIZE. So we disabled the + * hole mechanism for xdp. See add_recvbuf_mergeable(). + */ + *frame_sz = truesize; + + /* This happens when headroom is not enough because + * of the buffer was prefilled before XDP is set. + * This should only happen for the first several packets. + * In fact, vq reset can be used here to help us clean up + * the prefilled buffers, but many existing devices do not + * support it, and we don't want to bother users who are + * using xdp normally. + */ + if (!xdp_prog->aux->xdp_has_frags && + (*num_buf > 1 || headroom < virtnet_get_headroom(vi))) { + /* linearize data for XDP */ + xdp_page = xdp_linearize_page(rq, num_buf, + *page, offset, + VIRTIO_XDP_HEADROOM, + len); + *frame_sz = PAGE_SIZE; + + if (!xdp_page) + return NULL; + offset = VIRTIO_XDP_HEADROOM; + + put_page(*page); + *page = xdp_page; + } else if (unlikely(headroom < virtnet_get_headroom(vi))) { + xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + + sizeof(struct skb_shared_info)); + if (*len + xdp_room > PAGE_SIZE) + return NULL; + + xdp_page = alloc_page(GFP_ATOMIC); + if (!xdp_page) + return NULL; + + memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, + page_address(*page) + offset, *len); + *frame_sz = PAGE_SIZE; + offset = VIRTIO_XDP_HEADROOM; + + put_page(*page); + *page = xdp_page; + } + + return page_address(*page) + offset; +} + static struct sk_buff *receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -1181,7 +1256,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, unsigned int headroom = mergeable_ctx_to_headroom(ctx); unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); - unsigned int frame_sz, xdp_room; + unsigned int frame_sz; int err; head_skb = NULL; @@ -1211,63 +1286,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, u32 act; int i; - /* Transient failure which in theory could occur if - * in-flight packets from before XDP was enabled reach - * the receive path after XDP is loaded. - */ - if (unlikely(hdr->hdr.gso_type)) + data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, + &num_buf, &page, offset, &len, hdr); + if (unlikely(!data)) goto err_xdp; - /* Now XDP core assumes frag size is PAGE_SIZE, but buffers - * with headroom may add hole in truesize, which - * make their length exceed PAGE_SIZE. So we disabled the - * hole mechanism for xdp. See add_recvbuf_mergeable(). - */ - frame_sz = truesize; - - /* This happens when headroom is not enough because - * of the buffer was prefilled before XDP is set. - * This should only happen for the first several packets. - * In fact, vq reset can be used here to help us clean up - * the prefilled buffers, but many existing devices do not - * support it, and we don't want to bother users who are - * using xdp normally. - */ - if (!xdp_prog->aux->xdp_has_frags && - (num_buf > 1 || headroom < virtnet_get_headroom(vi))) { - /* linearize data for XDP */ - xdp_page = xdp_linearize_page(rq, &num_buf, - page, offset, - VIRTIO_XDP_HEADROOM, - &len); - frame_sz = PAGE_SIZE; - - if (!xdp_page) - goto err_xdp; - offset = VIRTIO_XDP_HEADROOM; - - put_page(page); - page = xdp_page; - } else if (unlikely(headroom < virtnet_get_headroom(vi))) { - xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + - sizeof(struct skb_shared_info)); - if (len + xdp_room > PAGE_SIZE) - goto err_xdp; - - xdp_page = alloc_page(GFP_ATOMIC); - if (!xdp_page) - goto err_xdp; - - memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, - page_address(page) + offset, len); - frame_sz = PAGE_SIZE; - offset = VIRTIO_XDP_HEADROOM; - - put_page(page); - page = xdp_page; - } - - data = page_address(page) + offset; err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, &num_buf, &xdp_frags_truesz, stats); if (unlikely(err)) From patchwork Thu Apr 27 03:05:22 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225124 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id BACB9C7EE24 for ; Thu, 27 Apr 2023 03:05:48 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S242894AbjD0DFq (ORCPT ); Wed, 26 Apr 2023 23:05:46 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43574 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242863AbjD0DFn (ORCPT ); Wed, 26 Apr 2023 23:05:43 -0400 Received: from out30-118.freemail.mail.aliyun.com (out30-118.freemail.mail.aliyun.com [115.124.30.118]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 2C6193AB0; Wed, 26 Apr 2023 20:05:41 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R201e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018045168;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5XEpn_1682564737; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5XEpn_1682564737) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:38 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 03/15] virtio_net: optimize mergeable_xdp_get_buf() Date: Thu, 27 Apr 2023 11:05:22 +0800 Message-Id: <20230427030534.115066-4-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org The previous patch, in order to facilitate review, I do not do any modification. This patch has made some optimization on the top. * remove some repeated logics in this function. * add fast check for passing without any alloc. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b8832b0cc215..6f66d73097b4 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1192,6 +1192,11 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, */ *frame_sz = truesize; + if (likely(headroom >= virtnet_get_headroom(vi) && + (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) { + return page_address(*page) + offset; + } + /* This happens when headroom is not enough because * of the buffer was prefilled before XDP is set. * This should only happen for the first several packets. @@ -1200,22 +1205,15 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, * support it, and we don't want to bother users who are * using xdp normally. */ - if (!xdp_prog->aux->xdp_has_frags && - (*num_buf > 1 || headroom < virtnet_get_headroom(vi))) { + if (!xdp_prog->aux->xdp_has_frags) { /* linearize data for XDP */ xdp_page = xdp_linearize_page(rq, num_buf, *page, offset, VIRTIO_XDP_HEADROOM, len); - *frame_sz = PAGE_SIZE; - if (!xdp_page) return NULL; - offset = VIRTIO_XDP_HEADROOM; - - put_page(*page); - *page = xdp_page; - } else if (unlikely(headroom < virtnet_get_headroom(vi))) { + } else { xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + sizeof(struct skb_shared_info)); if (*len + xdp_room > PAGE_SIZE) @@ -1227,14 +1225,15 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, page_address(*page) + offset, *len); - *frame_sz = PAGE_SIZE; - offset = VIRTIO_XDP_HEADROOM; - - put_page(*page); - *page = xdp_page; } - return page_address(*page) + offset; + *frame_sz = PAGE_SIZE; + + put_page(*page); + + *page = xdp_page; + + return page_address(*page) + VIRTIO_XDP_HEADROOM; } static struct sk_buff *receive_mergeable(struct net_device *dev, From patchwork Thu Apr 27 03:05:23 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225127 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 90E3BC77B7E for ; Thu, 27 Apr 2023 03:05:54 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S242891AbjD0DFs (ORCPT ); Wed, 26 Apr 2023 23:05:48 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43492 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242836AbjD0DFp (ORCPT ); Wed, 26 Apr 2023 23:05:45 -0400 Received: from out30-98.freemail.mail.aliyun.com (out30-98.freemail.mail.aliyun.com [115.124.30.98]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 27DFD35B8; Wed, 26 Apr 2023 20:05:42 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R131e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018046056;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5kKlL_1682564739; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5kKlL_1682564739) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:39 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 04/15] virtio_net: introduce virtnet_xdp_handler() to seprate the logic of run xdp Date: Thu, 27 Apr 2023 11:05:23 +0800 Message-Id: <20230427030534.115066-5-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org At present, we have two similar logic to perform the XDP prog. Therefore, this patch separates the code of executing XDP, which is conducive to later maintenance. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 118 +++++++++++++++++++-------------------- 1 file changed, 58 insertions(+), 60 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 6f66d73097b4..264945dd7a35 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -789,6 +789,60 @@ static int virtnet_xdp_xmit(struct net_device *dev, return ret; } +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, + struct net_device *dev, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct xdp_frame *xdpf; + int err; + u32 act; + + act = bpf_prog_run_xdp(xdp_prog, xdp); + stats->xdp_packets++; + + switch (act) { + case XDP_PASS: + return act; + + case XDP_TX: + stats->xdp_tx++; + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + netdev_dbg(dev, "convert buff to frame failed for xdp\n"); + return XDP_DROP; + } + + err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); + if (unlikely(!err)) { + xdp_return_frame_rx_napi(xdpf); + } else if (unlikely(err < 0)) { + trace_xdp_exception(dev, xdp_prog, act); + return XDP_DROP; + } + *xdp_xmit |= VIRTIO_XDP_TX; + return act; + + case XDP_REDIRECT: + stats->xdp_redirects++; + err = xdp_do_redirect(dev, xdp, xdp_prog); + if (err) + return XDP_DROP; + + *xdp_xmit |= VIRTIO_XDP_REDIR; + return act; + + default: + bpf_warn_invalid_xdp_action(dev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(dev, xdp_prog, act); + fallthrough; + case XDP_DROP: + return XDP_DROP; + } +} + static unsigned int virtnet_get_headroom(struct virtnet_info *vi) { return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; @@ -876,7 +930,6 @@ static struct sk_buff *receive_small(struct net_device *dev, struct page *page = virt_to_head_page(buf); unsigned int delta = 0; struct page *xdp_page; - int err; unsigned int metasize = 0; len -= vi->hdr_len; @@ -898,7 +951,6 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; - struct xdp_frame *xdpf; struct xdp_buff xdp; void *orig_data; u32 act; @@ -931,8 +983,8 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, xdp_headroom, len, true); orig_data = xdp.data; - act = bpf_prog_run_xdp(xdp_prog, &xdp); - stats->xdp_packets++; + + act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); switch (act) { case XDP_PASS: @@ -942,35 +994,10 @@ static struct sk_buff *receive_small(struct net_device *dev, metasize = xdp.data - xdp.data_meta; break; case XDP_TX: - stats->xdp_tx++; - xdpf = xdp_convert_buff_to_frame(&xdp); - if (unlikely(!xdpf)) - goto err_xdp; - err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); - if (unlikely(!err)) { - xdp_return_frame_rx_napi(xdpf); - } else if (unlikely(err < 0)) { - trace_xdp_exception(vi->dev, xdp_prog, act); - goto err_xdp; - } - *xdp_xmit |= VIRTIO_XDP_TX; - rcu_read_unlock(); - goto xdp_xmit; case XDP_REDIRECT: - stats->xdp_redirects++; - err = xdp_do_redirect(dev, &xdp, xdp_prog); - if (err) - goto err_xdp; - *xdp_xmit |= VIRTIO_XDP_REDIR; rcu_read_unlock(); goto xdp_xmit; default: - bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); - fallthrough; - case XDP_ABORTED: - trace_xdp_exception(vi->dev, xdp_prog, act); - goto err_xdp; - case XDP_DROP: goto err_xdp; } } @@ -1278,7 +1305,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (xdp_prog) { unsigned int xdp_frags_truesz = 0; struct skb_shared_info *shinfo; - struct xdp_frame *xdpf; struct page *xdp_page; struct xdp_buff xdp; void *data; @@ -1295,8 +1321,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (unlikely(err)) goto err_xdp_frags; - act = bpf_prog_run_xdp(xdp_prog, &xdp); - stats->xdp_packets++; + act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); switch (act) { case XDP_PASS: @@ -1307,38 +1332,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_unlock(); return head_skb; case XDP_TX: - stats->xdp_tx++; - xdpf = xdp_convert_buff_to_frame(&xdp); - if (unlikely(!xdpf)) { - netdev_dbg(dev, "convert buff to frame failed for xdp\n"); - goto err_xdp_frags; - } - err = virtnet_xdp_xmit(dev, 1, &xdpf, 0); - if (unlikely(!err)) { - xdp_return_frame_rx_napi(xdpf); - } else if (unlikely(err < 0)) { - trace_xdp_exception(vi->dev, xdp_prog, act); - goto err_xdp_frags; - } - *xdp_xmit |= VIRTIO_XDP_TX; - rcu_read_unlock(); - goto xdp_xmit; case XDP_REDIRECT: - stats->xdp_redirects++; - err = xdp_do_redirect(dev, &xdp, xdp_prog); - if (err) - goto err_xdp_frags; - *xdp_xmit |= VIRTIO_XDP_REDIR; rcu_read_unlock(); goto xdp_xmit; default: - bpf_warn_invalid_xdp_action(vi->dev, xdp_prog, act); - fallthrough; - case XDP_ABORTED: - trace_xdp_exception(vi->dev, xdp_prog, act); - fallthrough; - case XDP_DROP: - goto err_xdp_frags; + break; } err_xdp_frags: if (xdp_buff_has_frags(&xdp)) { From patchwork Thu Apr 27 03:05:24 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225131 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id EDAD7C77B60 for ; Thu, 27 Apr 2023 03:06:30 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243008AbjD0DG3 (ORCPT ); Wed, 26 Apr 2023 23:06:29 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44138 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242976AbjD0DGM (ORCPT ); Wed, 26 Apr 2023 23:06:12 -0400 Received: from out30-119.freemail.mail.aliyun.com (out30-119.freemail.mail.aliyun.com [115.124.30.119]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 634564219; Wed, 26 Apr 2023 20:05:47 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R111e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018046059;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5fnwy_1682564740; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5fnwy_1682564740) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:41 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 05/15] virtio_net: separate the logic of freeing xdp shinfo Date: Thu, 27 Apr 2023 11:05:24 +0800 Message-Id: <20230427030534.115066-6-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org This patch introduce a new function that releases the xdp shinfo. The subsequent patch will reuse this function. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 264945dd7a35..b4eb083ebf55 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -789,6 +789,21 @@ static int virtnet_xdp_xmit(struct net_device *dev, return ret; } +static void put_xdp_frags(struct xdp_buff *xdp) +{ + struct skb_shared_info *shinfo; + struct page *xdp_page; + int i; + + if (xdp_buff_has_frags(xdp)) { + shinfo = xdp_get_shared_info_from_buff(xdp); + for (i = 0; i < shinfo->nr_frags; i++) { + xdp_page = skb_frag_page(&shinfo->frags[i]); + put_page(xdp_page); + } + } +} + static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, struct net_device *dev, unsigned int *xdp_xmit, @@ -1304,12 +1319,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { unsigned int xdp_frags_truesz = 0; - struct skb_shared_info *shinfo; - struct page *xdp_page; struct xdp_buff xdp; void *data; u32 act; - int i; data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page, offset, &len, hdr); @@ -1339,14 +1351,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, break; } err_xdp_frags: - if (xdp_buff_has_frags(&xdp)) { - shinfo = xdp_get_shared_info_from_buff(&xdp); - for (i = 0; i < shinfo->nr_frags; i++) { - xdp_page = skb_frag_page(&shinfo->frags[i]); - put_page(xdp_page); - } - } - + put_xdp_frags(&xdp); goto err_xdp; } rcu_read_unlock(); From patchwork Thu Apr 27 03:05:25 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225129 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E2A6EC77B60 for ; Thu, 27 Apr 2023 03:06:14 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S242885AbjD0DGM (ORCPT ); Wed, 26 Apr 2023 23:06:12 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43788 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242898AbjD0DFv (ORCPT ); Wed, 26 Apr 2023 23:05:51 -0400 Received: from out30-118.freemail.mail.aliyun.com (out30-118.freemail.mail.aliyun.com [115.124.30.118]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 83C9640ED; Wed, 26 Apr 2023 20:05:44 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R171e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018046050;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5fnxM_1682564741; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5fnxM_1682564741) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:42 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 06/15] virtio_net: separate the logic of freeing the rest mergeable buf Date: Thu, 27 Apr 2023 11:05:25 +0800 Message-Id: <20230427030534.115066-7-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org This patch introduce a new function that frees the rest mergeable buf. The subsequent patch will reuse this function. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b4eb083ebf55..5f37a1cef61e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1067,6 +1067,28 @@ static struct sk_buff *receive_big(struct net_device *dev, return NULL; } +static void mergeable_buf_free(struct receive_queue *rq, int num_buf, + struct net_device *dev, + struct virtnet_rq_stats *stats) +{ + struct page *page; + void *buf; + int len; + + while (num_buf-- > 1) { + buf = virtqueue_get_buf(rq->vq, &len); + if (unlikely(!buf)) { + pr_debug("%s: rx error: %d buffers missing\n", + dev->name, num_buf); + dev->stats.rx_length_errors++; + break; + } + stats->bytes += len; + page = virt_to_head_page(buf); + put_page(page); + } +} + /* Why not use xdp_build_skb_from_frame() ? * XDP core assumes that xdp frags are PAGE_SIZE in length, while in * virtio-net there are 2 points that do not match its requirements: @@ -1427,18 +1449,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, stats->xdp_drops++; err_skb: put_page(page); - while (num_buf-- > 1) { - buf = virtqueue_get_buf(rq->vq, &len); - if (unlikely(!buf)) { - pr_debug("%s: rx error: %d buffers missing\n", - dev->name, num_buf); - dev->stats.rx_length_errors++; - break; - } - stats->bytes += len; - page = virt_to_head_page(buf); - put_page(page); - } + mergeable_buf_free(rq, num_buf, dev, stats); + err_buf: stats->drops++; dev_kfree_skb(head_skb); From patchwork Thu Apr 27 03:05:26 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225128 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 53118C7618E for ; Thu, 27 Apr 2023 03:06:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S242965AbjD0DGL (ORCPT ); Wed, 26 Apr 2023 23:06:11 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44092 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242926AbjD0DF6 (ORCPT ); Wed, 26 Apr 2023 23:05:58 -0400 Received: from out30-99.freemail.mail.aliyun.com (out30-99.freemail.mail.aliyun.com [115.124.30.99]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 81F1E40D2; Wed, 26 Apr 2023 20:05:46 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R201e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018045168;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5cqod_1682564742; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5cqod_1682564742) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:43 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 07/15] virtio_net: virtnet_build_xdp_buff_mrg() auto release xdp shinfo Date: Thu, 27 Apr 2023 11:05:26 +0800 Message-Id: <20230427030534.115066-8-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org virtnet_build_xdp_buff_mrg() auto release xdp shinfo then the caller no need to careful the xdp shinfo. Signed-off-by: Xuan Zhuo --- drivers/net/virtio_net.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5f37a1cef61e..971320c17c73 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1190,7 +1190,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, dev->name, *num_buf, virtio16_to_cpu(vi->vdev, hdr->num_buffers)); dev->stats.rx_length_errors++; - return -EINVAL; + goto err; } stats->bytes += len; @@ -1209,7 +1209,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)(truesize - room)); dev->stats.rx_length_errors++; - return -EINVAL; + goto err; } frag = &shinfo->frags[shinfo->nr_frags++]; @@ -1224,6 +1224,10 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, *xdp_frags_truesize = xdp_frags_truesz; return 0; + +err: + put_xdp_frags(xdp); + return -EINVAL; } static void *mergeable_xdp_get_buf(struct virtnet_info *vi, @@ -1353,7 +1357,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, &num_buf, &xdp_frags_truesz, stats); if (unlikely(err)) - goto err_xdp_frags; + goto err_xdp; act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); From patchwork Thu Apr 27 03:05:27 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225130 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 87596C77B60 for ; Thu, 27 Apr 2023 03:06:19 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S242947AbjD0DGP (ORCPT ); Wed, 26 Apr 2023 23:06:15 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44344 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242951AbjD0DGK (ORCPT ); Wed, 26 Apr 2023 23:06:10 -0400 Received: from out30-131.freemail.mail.aliyun.com (out30-131.freemail.mail.aliyun.com [115.124.30.131]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 48E7F420F; Wed, 26 Apr 2023 20:05:47 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R151e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018046059;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5eguN_1682564743; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5eguN_1682564743) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:44 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 08/15] virtio_net: introduce receive_mergeable_xdp() Date: Thu, 27 Apr 2023 11:05:27 +0800 Message-Id: <20230427030534.115066-9-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org The purpose of this patch is to simplify the receive_mergeable(). Separate all the logic of XDP into a function. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 105 ++++++++++++++++++++++++--------------- 1 file changed, 64 insertions(+), 41 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 971320c17c73..520d48088455 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1304,6 +1304,66 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, return page_address(*page) + VIRTIO_XDP_HEADROOM; } +static struct sk_buff *receive_mergeable_xdp(struct net_device *dev, + struct virtnet_info *vi, + struct receive_queue *rq, + struct bpf_prog *xdp_prog, + void *buf, + void *ctx, + unsigned int len, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct virtio_net_hdr_mrg_rxbuf *hdr = buf; + int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); + struct page *page = virt_to_head_page(buf); + int offset = buf - page_address(page); + unsigned int xdp_frags_truesz = 0; + struct sk_buff *head_skb; + unsigned int frame_sz; + struct xdp_buff xdp; + void *data; + u32 act; + int err; + + data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page, + offset, &len, hdr); + if (unlikely(!data)) + goto err_xdp; + + err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, + &num_buf, &xdp_frags_truesz, stats); + if (unlikely(err)) + goto err_xdp; + + act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); + + switch (act) { + case XDP_PASS: + head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); + if (unlikely(!head_skb)) + break; + return head_skb; + + case XDP_TX: + case XDP_REDIRECT: + return NULL; + + default: + break; + } + + put_xdp_frags(&xdp); + +err_xdp: + put_page(page); + mergeable_buf_free(rq, num_buf, dev, stats); + + stats->xdp_drops++; + stats->drops++; + return NULL; +} + static struct sk_buff *receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -1323,8 +1383,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, unsigned int headroom = mergeable_ctx_to_headroom(ctx); unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); - unsigned int frame_sz; - int err; head_skb = NULL; stats->bytes += len - vi->hdr_len; @@ -1344,41 +1402,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { - unsigned int xdp_frags_truesz = 0; - struct xdp_buff xdp; - void *data; - u32 act; - - data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, - &num_buf, &page, offset, &len, hdr); - if (unlikely(!data)) - goto err_xdp; - - err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz, - &num_buf, &xdp_frags_truesz, stats); - if (unlikely(err)) - goto err_xdp; - - act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); - - switch (act) { - case XDP_PASS: - head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); - if (unlikely(!head_skb)) - goto err_xdp_frags; - - rcu_read_unlock(); - return head_skb; - case XDP_TX: - case XDP_REDIRECT: - rcu_read_unlock(); - goto xdp_xmit; - default: - break; - } -err_xdp_frags: - put_xdp_frags(&xdp); - goto err_xdp; + head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, + len, xdp_xmit, stats); + rcu_read_unlock(); + return head_skb; } rcu_read_unlock(); @@ -1448,9 +1475,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); return head_skb; -err_xdp: - rcu_read_unlock(); - stats->xdp_drops++; err_skb: put_page(page); mergeable_buf_free(rq, num_buf, dev, stats); @@ -1458,7 +1482,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, err_buf: stats->drops++; dev_kfree_skb(head_skb); -xdp_xmit: return NULL; } From patchwork Thu Apr 27 03:05:28 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225132 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 546E5C77B7E for ; Thu, 27 Apr 2023 03:06:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S242987AbjD0DGa (ORCPT ); Wed, 26 Apr 2023 23:06:30 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:43774 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242985AbjD0DGM (ORCPT ); Wed, 26 Apr 2023 23:06:12 -0400 Received: from out30-132.freemail.mail.aliyun.com (out30-132.freemail.mail.aliyun.com [115.124.30.132]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 36946422A; Wed, 26 Apr 2023 20:05:49 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R831e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018045168;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5XEsG_1682564744; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5XEsG_1682564744) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:45 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 09/15] virtio_net: merge: remove skip_xdp Date: Thu, 27 Apr 2023 11:05:28 +0800 Message-Id: <20230427030534.115066-10-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Now, the logic of merge xdp process is simple, we can remove the skip_xdp. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 520d48088455..55ecd1ce8a1e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1378,7 +1378,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, struct page *page = virt_to_head_page(buf); int offset = buf - page_address(page); struct sk_buff *head_skb, *curr_skb; - struct bpf_prog *xdp_prog; unsigned int truesize = mergeable_ctx_to_truesize(ctx); unsigned int headroom = mergeable_ctx_to_headroom(ctx); unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; @@ -1394,22 +1393,20 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_skb; } - if (likely(!vi->xdp_enabled)) { - xdp_prog = NULL; - goto skip_xdp; - } + if (unlikely(vi->xdp_enabled)) { + struct bpf_prog *xdp_prog; - rcu_read_lock(); - xdp_prog = rcu_dereference(rq->xdp_prog); - if (xdp_prog) { - head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, - len, xdp_xmit, stats); + rcu_read_lock(); + xdp_prog = rcu_dereference(rq->xdp_prog); + if (xdp_prog) { + head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx, + len, xdp_xmit, stats); + rcu_read_unlock(); + return head_skb; + } rcu_read_unlock(); - return head_skb; } - rcu_read_unlock(); -skip_xdp: head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); curr_skb = head_skb; From patchwork Thu Apr 27 03:05:29 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225134 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 52E9EC7618E for ; Thu, 27 Apr 2023 03:06:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243038AbjD0DGj (ORCPT ); Wed, 26 Apr 2023 23:06:39 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44394 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242992AbjD0DGN (ORCPT ); Wed, 26 Apr 2023 23:06:13 -0400 Received: from out30-100.freemail.mail.aliyun.com (out30-100.freemail.mail.aliyun.com [115.124.30.100]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 8C4DD4685; Wed, 26 Apr 2023 20:05:52 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R101e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018046056;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5Z4VS_1682564746; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5Z4VS_1682564746) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:47 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 10/15] virtio_net: introduce receive_small_xdp() Date: Thu, 27 Apr 2023 11:05:29 +0800 Message-Id: <20230427030534.115066-11-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org The purpose of this patch is to simplify the receive_small(). Separate all the logic of XDP of small into a function. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 165 ++++++++++++++++++++++++--------------- 1 file changed, 100 insertions(+), 65 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 55ecd1ce8a1e..3b0f13ab6ccb 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -927,6 +927,99 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, return NULL; } +static struct sk_buff *receive_small_xdp(struct net_device *dev, + struct virtnet_info *vi, + struct receive_queue *rq, + struct bpf_prog *xdp_prog, + void *buf, + unsigned int xdp_headroom, + unsigned int len, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; + unsigned int headroom = vi->hdr_len + header_offset; + struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; + struct page *page = virt_to_head_page(buf); + struct page *xdp_page; + unsigned int buflen; + struct xdp_buff xdp; + struct sk_buff *skb; + unsigned int delta = 0; + unsigned int metasize = 0; + void *orig_data; + u32 act; + + if (unlikely(hdr->hdr.gso_type)) + goto err_xdp; + + buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { + int offset = buf - page_address(page) + header_offset; + unsigned int tlen = len + vi->hdr_len; + int num_buf = 1; + + xdp_headroom = virtnet_get_headroom(vi); + header_offset = VIRTNET_RX_PAD + xdp_headroom; + headroom = vi->hdr_len + header_offset; + buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + xdp_page = xdp_linearize_page(rq, &num_buf, page, + offset, header_offset, + &tlen); + if (!xdp_page) + goto err_xdp; + + buf = page_address(xdp_page); + put_page(page); + page = xdp_page; + } + + xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); + xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, + xdp_headroom, len, true); + orig_data = xdp.data; + + act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); + + switch (act) { + case XDP_PASS: + /* Recalculate length in case bpf program changed it */ + delta = orig_data - xdp.data; + len = xdp.data_end - xdp.data; + metasize = xdp.data - xdp.data_meta; + break; + + case XDP_TX: + case XDP_REDIRECT: + goto xdp_xmit; + + default: + goto err_xdp; + } + + skb = build_skb(buf, buflen); + if (!skb) + goto err; + + skb_reserve(skb, headroom - delta); + skb_put(skb, len); + if (metasize) + skb_metadata_set(skb, metasize); + + return skb; + +err_xdp: + stats->xdp_drops++; +err: + stats->drops++; + put_page(page); +xdp_xmit: + return NULL; +} + static struct sk_buff *receive_small(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -943,9 +1036,6 @@ static struct sk_buff *receive_small(struct net_device *dev, unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); struct page *page = virt_to_head_page(buf); - unsigned int delta = 0; - struct page *xdp_page; - unsigned int metasize = 0; len -= vi->hdr_len; stats->bytes += len; @@ -965,56 +1055,10 @@ static struct sk_buff *receive_small(struct net_device *dev, rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { - struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; - struct xdp_buff xdp; - void *orig_data; - u32 act; - - if (unlikely(hdr->hdr.gso_type)) - goto err_xdp; - - if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { - int offset = buf - page_address(page) + header_offset; - unsigned int tlen = len + vi->hdr_len; - int num_buf = 1; - - xdp_headroom = virtnet_get_headroom(vi); - header_offset = VIRTNET_RX_PAD + xdp_headroom; - headroom = vi->hdr_len + header_offset; - buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - xdp_page = xdp_linearize_page(rq, &num_buf, page, - offset, header_offset, - &tlen); - if (!xdp_page) - goto err_xdp; - - buf = page_address(xdp_page); - put_page(page); - page = xdp_page; - } - - xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); - xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, - xdp_headroom, len, true); - orig_data = xdp.data; - - act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); - - switch (act) { - case XDP_PASS: - /* Recalculate length in case bpf program changed it */ - delta = orig_data - xdp.data; - len = xdp.data_end - xdp.data; - metasize = xdp.data - xdp.data_meta; - break; - case XDP_TX: - case XDP_REDIRECT: - rcu_read_unlock(); - goto xdp_xmit; - default: - goto err_xdp; - } + skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, xdp_headroom, + len, xdp_xmit, stats); + rcu_read_unlock(); + return skb; } rcu_read_unlock(); @@ -1022,25 +1066,16 @@ static struct sk_buff *receive_small(struct net_device *dev, skb = build_skb(buf, buflen); if (!skb) goto err; - skb_reserve(skb, headroom - delta); + skb_reserve(skb, headroom); skb_put(skb, len); - if (!xdp_prog) { - buf += header_offset; - memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); - } /* keep zeroed vnet hdr since XDP is loaded */ - - if (metasize) - skb_metadata_set(skb, metasize); + buf += header_offset; + memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); return skb; -err_xdp: - rcu_read_unlock(); - stats->xdp_drops++; err: stats->drops++; put_page(page); -xdp_xmit: return NULL; } From patchwork Thu Apr 27 03:05:30 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225133 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id CDF9DC7618E for ; Thu, 27 Apr 2023 03:06:34 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243032AbjD0DGc (ORCPT ); Wed, 26 Apr 2023 23:06:32 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44388 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242905AbjD0DGN (ORCPT ); Wed, 26 Apr 2023 23:06:13 -0400 Received: from out30-97.freemail.mail.aliyun.com (out30-97.freemail.mail.aliyun.com [115.124.30.97]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id BCC554225; Wed, 26 Apr 2023 20:05:51 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R191e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018045192;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5Z4Vt_1682564747; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5Z4Vt_1682564747) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:48 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 11/15] virtio_net: small: remove the delta Date: Thu, 27 Apr 2023 11:05:30 +0800 Message-Id: <20230427030534.115066-12-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org In the case of XDP-PASS, skb_reserve uses the "delta" to compatible non-XDP, now that is not shared between xdp and non-xdp, so we can remove this logic. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3b0f13ab6ccb..b8ec642899c9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -945,9 +945,7 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev, unsigned int buflen; struct xdp_buff xdp; struct sk_buff *skb; - unsigned int delta = 0; unsigned int metasize = 0; - void *orig_data; u32 act; if (unlikely(hdr->hdr.gso_type)) @@ -980,14 +978,12 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev, xdp_init_buff(&xdp, buflen, &rq->xdp_rxq); xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len, xdp_headroom, len, true); - orig_data = xdp.data; act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats); switch (act) { case XDP_PASS: /* Recalculate length in case bpf program changed it */ - delta = orig_data - xdp.data; len = xdp.data_end - xdp.data; metasize = xdp.data - xdp.data_meta; break; @@ -1004,7 +1000,7 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev, if (!skb) goto err; - skb_reserve(skb, headroom - delta); + skb_reserve(skb, xdp.data - buf); skb_put(skb, len); if (metasize) skb_metadata_set(skb, metasize); From patchwork Thu Apr 27 03:05:31 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225136 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 46489C77B60 for ; Thu, 27 Apr 2023 03:06:55 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243005AbjD0DGw (ORCPT ); Wed, 26 Apr 2023 23:06:52 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44760 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S243010AbjD0DG3 (ORCPT ); Wed, 26 Apr 2023 23:06:29 -0400 Received: from out30-118.freemail.mail.aliyun.com (out30-118.freemail.mail.aliyun.com [115.124.30.118]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 721D146A1; Wed, 26 Apr 2023 20:05:54 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R191e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018045168;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5cqr5_1682564748; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5cqr5_1682564748) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:49 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 12/15] virtio_net: small: avoid double counting in xdp scenarios Date: Thu, 27 Apr 2023 11:05:31 +0800 Message-Id: <20230427030534.115066-13-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Avoid the problem that some variables(headroom and so on) will repeat the calculation when process xdp. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b8ec642899c9..f832ab8a3e6e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1027,11 +1027,10 @@ static struct sk_buff *receive_small(struct net_device *dev, struct sk_buff *skb; struct bpf_prog *xdp_prog; unsigned int xdp_headroom = (unsigned long)ctx; - unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; - unsigned int headroom = vi->hdr_len + header_offset; - unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); struct page *page = virt_to_head_page(buf); + unsigned int header_offset; + unsigned int headroom; + unsigned int buflen; len -= vi->hdr_len; stats->bytes += len; @@ -1059,6 +1058,11 @@ static struct sk_buff *receive_small(struct net_device *dev, rcu_read_unlock(); skip_xdp: + header_offset = VIRTNET_RX_PAD + xdp_headroom; + headroom = vi->hdr_len + header_offset; + buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + skb = build_skb(buf, buflen); if (!skb) goto err; From patchwork Thu Apr 27 03:05:32 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225137 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 937F8C77B7E for ; Thu, 27 Apr 2023 03:06:58 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243069AbjD0DGz (ORCPT ); Wed, 26 Apr 2023 23:06:55 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44378 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242984AbjD0DGa (ORCPT ); Wed, 26 Apr 2023 23:06:30 -0400 Received: from out30-111.freemail.mail.aliyun.com (out30-111.freemail.mail.aliyun.com [115.124.30.111]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7229246A2; Wed, 26 Apr 2023 20:05:55 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R161e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018046059;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5kKqO_1682564749; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5kKqO_1682564749) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:50 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 13/15] virtio_net: small: remove skip_xdp Date: Thu, 27 Apr 2023 11:05:32 +0800 Message-Id: <20230427030534.115066-14-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Because the skb build code is not shared between xdp and non-xdp, and the xdp code in receive_small() is simpler, so "skip_xdp" is not needed. We can remove it. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f832ab8a3e6e..a76db3d19eb7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1024,13 +1024,12 @@ static struct sk_buff *receive_small(struct net_device *dev, unsigned int *xdp_xmit, struct virtnet_rq_stats *stats) { - struct sk_buff *skb; - struct bpf_prog *xdp_prog; unsigned int xdp_headroom = (unsigned long)ctx; struct page *page = virt_to_head_page(buf); unsigned int header_offset; unsigned int headroom; unsigned int buflen; + struct sk_buff *skb; len -= vi->hdr_len; stats->bytes += len; @@ -1042,22 +1041,21 @@ static struct sk_buff *receive_small(struct net_device *dev, goto err; } - if (likely(!vi->xdp_enabled)) { - xdp_prog = NULL; - goto skip_xdp; - } + if (unlikely(vi->xdp_enabled)) { + struct bpf_prog *xdp_prog; - rcu_read_lock(); - xdp_prog = rcu_dereference(rq->xdp_prog); - if (xdp_prog) { - skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, xdp_headroom, - len, xdp_xmit, stats); + rcu_read_lock(); + xdp_prog = rcu_dereference(rq->xdp_prog); + if (xdp_prog) { + skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, + xdp_headroom, len, xdp_xmit, + stats); + rcu_read_unlock(); + return skb; + } rcu_read_unlock(); - return skb; } - rcu_read_unlock(); -skip_xdp: header_offset = VIRTNET_RX_PAD + xdp_headroom; headroom = vi->hdr_len + header_offset; buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + From patchwork Thu Apr 27 03:05:33 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225135 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 0753BC77B60 for ; Thu, 27 Apr 2023 03:06:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S242951AbjD0DGt (ORCPT ); Wed, 26 Apr 2023 23:06:49 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44714 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S242957AbjD0DG0 (ORCPT ); Wed, 26 Apr 2023 23:06:26 -0400 Received: from out30-118.freemail.mail.aliyun.com (out30-118.freemail.mail.aliyun.com [115.124.30.118]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 75C743A88; Wed, 26 Apr 2023 20:05:54 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R451e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018046049;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5fo0-_1682564750; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5fo0-_1682564750) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:51 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 14/15] virtio_net: introduce receive_small_build_xdp Date: Thu, 27 Apr 2023 11:05:33 +0800 Message-Id: <20230427030534.115066-15-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org Simplifying receive_small() function. Bringing the logic relating to build_skb together. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 48 ++++++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 17 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index a76db3d19eb7..c526852a8547 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -927,6 +927,34 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, return NULL; } +static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, + unsigned int xdp_headroom, + void *buf, + unsigned int len) +{ + unsigned int header_offset; + unsigned int headroom; + unsigned int buflen; + struct sk_buff *skb; + + header_offset = VIRTNET_RX_PAD + xdp_headroom; + headroom = vi->hdr_len + header_offset; + buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + skb = build_skb(buf, buflen); + if (!skb) + return NULL; + + skb_reserve(skb, headroom); + skb_put(skb, len); + + buf += header_offset; + memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); + + return skb; +} + static struct sk_buff *receive_small_xdp(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -1026,9 +1054,6 @@ static struct sk_buff *receive_small(struct net_device *dev, { unsigned int xdp_headroom = (unsigned long)ctx; struct page *page = virt_to_head_page(buf); - unsigned int header_offset; - unsigned int headroom; - unsigned int buflen; struct sk_buff *skb; len -= vi->hdr_len; @@ -1056,20 +1081,9 @@ static struct sk_buff *receive_small(struct net_device *dev, rcu_read_unlock(); } - header_offset = VIRTNET_RX_PAD + xdp_headroom; - headroom = vi->hdr_len + header_offset; - buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - - skb = build_skb(buf, buflen); - if (!skb) - goto err; - skb_reserve(skb, headroom); - skb_put(skb, len); - - buf += header_offset; - memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); - return skb; + skb = receive_small_build_skb(vi, xdp_headroom, buf, len); + if (likely(skb)) + return skb; err: stats->drops++; From patchwork Thu Apr 27 03:05:34 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13225138 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id CCCEEC7EE21 for ; Thu, 27 Apr 2023 03:07:11 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243023AbjD0DHJ (ORCPT ); Wed, 26 Apr 2023 23:07:09 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44790 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S243024AbjD0DGb (ORCPT ); Wed, 26 Apr 2023 23:06:31 -0400 Received: from out30-118.freemail.mail.aliyun.com (out30-118.freemail.mail.aliyun.com [115.124.30.118]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 38AE746BA; Wed, 26 Apr 2023 20:05:57 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R471e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018046056;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=13;SR=0;TI=SMTPD_---0Vh5Z4Xz_1682564751; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0Vh5Z4Xz_1682564751) by smtp.aliyun-inc.com; Thu, 27 Apr 2023 11:05:52 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux-foundation.org, bpf@vger.kernel.org Subject: [PATCH net-next v4 15/15] virtio_net: introduce virtnet_build_skb() Date: Thu, 27 Apr 2023 11:05:34 +0800 Message-Id: <20230427030534.115066-16-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> References: <20230427030534.115066-1-xuanzhuo@linux.alibaba.com> MIME-Version: 1.0 X-Git-Hash: 69b1082fef22 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org This logic is used in multiple places, now we separate it into a helper. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c526852a8547..256bcd0890a2 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -443,6 +443,22 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); } +static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen, + unsigned int headroom, + unsigned int len) +{ + struct sk_buff *skb; + + skb = build_skb(buf, buflen); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, headroom); + skb_put(skb, len); + + return skb; +} + /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, @@ -476,13 +492,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, /* copy small packet so we can reuse these pages */ if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { - skb = build_skb(buf, truesize); + skb = virtnet_build_skb(buf, truesize, p - buf, len); if (unlikely(!skb)) return NULL; - skb_reserve(skb, p - buf); - skb_put(skb, len); - page = (struct page *)page->private; if (page) give_pages(rq, page); @@ -942,13 +955,10 @@ static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi, buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - skb = build_skb(buf, buflen); - if (!skb) + skb = virtnet_build_skb(buf, buflen, headroom, len); + if (unlikely(!skb)) return NULL; - skb_reserve(skb, headroom); - skb_put(skb, len); - buf += header_offset; memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); @@ -1024,12 +1034,10 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev, goto err_xdp; } - skb = build_skb(buf, buflen); - if (!skb) + skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len); + if (unlikely(!skb)) goto err; - skb_reserve(skb, xdp.data - buf); - skb_put(skb, len); if (metasize) skb_metadata_set(skb, metasize);