From patchwork Fri Jul 5 07:37:25 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13724471 X-Patchwork-Delegate: kuba@kernel.org Received: from out30-98.freemail.mail.aliyun.com (out30-98.freemail.mail.aliyun.com [115.124.30.98]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 33F5213D8A8; Fri, 5 Jul 2024 07:37:38 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=115.124.30.98 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165061; cv=none; b=XjXMzTaymtW3W+cMFM7IWstQmMKGFu6X42wBrsIVXpFH8jNmWgArXiXrZNVdVZ5lwRop4z2PRa5OhpAR34GT6q9HBYGoTIq93UArmT3Oq5X43TGukKsG8ogPh84O2rgYLvo5qeOswp0VPRVoojbTbOzOpp10tSloXAt8olaKcWE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165061; c=relaxed/simple; bh=aZ8pENvHUJkljWVAMm2vHyYCxOseBejaoRanFMSMNbw=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=dgRUhsCVZJLUCvb1D5WYdz4PEn1Ts9+Si9jGXmTKwtaGk7yujuQvUHsw83RpuL5AxTHFyfM6KFjZY5gXrz00kAcNFhRhUf8JRo2cVgTke46wPnVdfx/PYIXTBiBcHPwAOriuyghK70095wAys2Owoamv0sayKBccJBOEIk32Hwc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com; spf=pass smtp.mailfrom=linux.alibaba.com; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b=f9G96nSG; arc=none smtp.client-ip=115.124.30.98 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b="f9G96nSG" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.alibaba.com; s=default; t=1720165056; h=From:To:Subject:Date:Message-Id:MIME-Version; bh=pUSM+Cs2Q5ZUzQ2KAzXHQfwuFb00FR3tFPJo4qCRG8Y=; b=f9G96nSGWZrE0wwdBQG5QLBUVvmHjafmIiJHDBa9531TS6KNVvBSZw++ZECPOKuvwIljv3u2oE8KFIUIZxkgENb94FxHvA0Hl+LHC1hTc2H+REMKaVy/MCfxTxYZbgGhu3gQ7ep4k8fJcB3z5m5+YSZBo4mZJmYtUL4imZjgrmg= X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R141e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=maildocker-contentspam033037067111;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=15;SR=0;TI=SMTPD_---0W9u53oH_1720165055; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0W9u53oH_1720165055) by smtp.aliyun-inc.com; Fri, 05 Jul 2024 15:37:36 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , Xuan Zhuo , =?utf-8?q?Eugenio_P=C3=A9rez?= , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux.dev, bpf@vger.kernel.org Subject: [PATCH net-next v7 01/10] virtio_net: replace VIRTIO_XDP_HEADROOM by XDP_PACKET_HEADROOM Date: Fri, 5 Jul 2024 15:37:25 +0800 Message-Id: <20240705073734.93905-2-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> References: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Git-Hash: bfc652b69f2c X-Patchwork-Delegate: kuba@kernel.org virtio net has VIRTIO_XDP_HEADROOM that is equal to XDP_PACKET_HEADROOM to calculate the headroom for xdp. But here we should use the macro XDP_PACKET_HEADROOM from bpf.h to calculate the headroom for xdp. So here we remove the VIRTIO_XDP_HEADROOM, and use the XDP_PACKET_HEADROOM to replace it. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 0b4747e81464..d99898e44456 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -40,9 +40,6 @@ module_param(napi_tx, bool, 0644); #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) -/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ -#define VIRTIO_XDP_HEADROOM 256 - /* Separating two types of XDP xmit */ #define VIRTIO_XDP_TX BIT(0) #define VIRTIO_XDP_REDIR BIT(1) @@ -1268,7 +1265,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, static unsigned int virtnet_get_headroom(struct virtnet_info *vi) { - return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; + return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0; } /* We copy the packet for XDP in the following cases: @@ -1332,7 +1329,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, } /* Headroom does not contribute to packet length */ - *len = page_off - VIRTIO_XDP_HEADROOM; + *len = page_off - XDP_PACKET_HEADROOM; return page; err_buf: __free_pages(page, 0); @@ -1619,8 +1616,8 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, void *ctx; xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); - xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM, - VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); + xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM, + XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); if (!*num_buf) return 0; @@ -1737,12 +1734,12 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, /* linearize data for XDP */ xdp_page = xdp_linearize_page(rq, num_buf, *page, offset, - VIRTIO_XDP_HEADROOM, + XDP_PACKET_HEADROOM, len); if (!xdp_page) return NULL; } else { - xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + + xdp_room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM + sizeof(struct skb_shared_info)); if (*len + xdp_room > PAGE_SIZE) return NULL; @@ -1751,7 +1748,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, if (!xdp_page) return NULL; - memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, + memcpy(page_address(xdp_page) + XDP_PACKET_HEADROOM, page_address(*page) + offset, *len); } @@ -1761,7 +1758,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, *page = xdp_page; - return page_address(*page) + VIRTIO_XDP_HEADROOM; + return page_address(*page) + XDP_PACKET_HEADROOM; } static struct sk_buff *receive_mergeable_xdp(struct net_device *dev, @@ -4971,7 +4968,7 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi) static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { - unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + + unsigned int room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM + sizeof(struct skb_shared_info)); unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; struct virtnet_info *vi = netdev_priv(dev); From patchwork Fri Jul 5 07:37:26 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13724472 X-Patchwork-Delegate: kuba@kernel.org Received: from out30-119.freemail.mail.aliyun.com (out30-119.freemail.mail.aliyun.com [115.124.30.119]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 5E1FE14037D; Fri, 5 Jul 2024 07:37:39 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=115.124.30.119 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165062; cv=none; b=EWEpReELmlTpr/FS8PC4heS2sdkQBVBBtp7lYpVFBbC0ZVnd38Kzg0ILDYyp/cXLaz7XUuGrVhNN7Wcb+R6cTkmX0AdrJZB0UmaZaz1APv9rmbes9XoUp+x9VrBWy4R4JvnQf+fon0c3iv/XVhw5wD5B4PII7bsJ81r4F0Xc6mY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165062; c=relaxed/simple; bh=S6oVuJH4Ey1aGRyuX5UtuKn3x/lfZdCtlD0vCpj5PUg=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=SKd0cQs9WnW6U1h3P9TIiZ2MAUNOxses70RkfzrwqUuG5Jxy0DJg/a6sHevU/01MSnI0Yh7CHn+BpTmlMgGOF1ZeN4ESnRUwQFUbDtR0jPgtIb+0UV+nMBPWtGiSngRn3w4A9baLaeHZdkdDoaudU7eqteNZ1st0bxYZRHpyG38= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com; spf=pass smtp.mailfrom=linux.alibaba.com; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b=hgjs3rbK; arc=none smtp.client-ip=115.124.30.119 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b="hgjs3rbK" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.alibaba.com; s=default; t=1720165057; h=From:To:Subject:Date:Message-Id:MIME-Version; bh=atkXl8sdxE5F980RyAJSy3BaZJBzT1htGKQ6PgTVJ0c=; b=hgjs3rbK+Rp/54PB4U+DEzd5dq0MieKoqwpeJUYhBXPb01Au3sH9sUM6SXs0ZV8KIwbrDCi01i39y2zp+9uVUVpoz88yBxHS5kXTjQI+PvRftHQpisriW7QQaEz/Tp/5KPrAgfdzbgjmnIss6DC1wyhjy/OaAfmCVNXUR6YSoiA= X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R111e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=maildocker-contentspam033032014031;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=15;SR=0;TI=SMTPD_---0W9uE.dK_1720165056; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0W9uE.dK_1720165056) by smtp.aliyun-inc.com; Fri, 05 Jul 2024 15:37:37 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , Xuan Zhuo , =?utf-8?q?Eugenio_P=C3=A9rez?= , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux.dev, bpf@vger.kernel.org Subject: [PATCH net-next v7 02/10] virtio_net: separate virtnet_rx_resize() Date: Fri, 5 Jul 2024 15:37:26 +0800 Message-Id: <20240705073734.93905-3-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> References: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Git-Hash: bfc652b69f2c X-Patchwork-Delegate: kuba@kernel.org This patch separates two sub-functions from virtnet_rx_resize(): * virtnet_rx_pause * virtnet_rx_resume Then the subsequent reset rx for xsk can share these two functions. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d99898e44456..df5b23374c53 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2665,28 +2665,41 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static int virtnet_rx_resize(struct virtnet_info *vi, - struct receive_queue *rq, u32 ring_num) +static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq) { bool running = netif_running(vi->dev); - int err, qindex; - - qindex = rq - vi->rq; if (running) { napi_disable(&rq->napi); virtnet_cancel_dim(vi, &rq->dim); } +} - err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); - if (err) - netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); +static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq) +{ + bool running = netif_running(vi->dev); if (!try_fill_recv(vi, rq, GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); if (running) virtnet_napi_enable(rq->vq, &rq->napi); +} + +static int virtnet_rx_resize(struct virtnet_info *vi, + struct receive_queue *rq, u32 ring_num) +{ + int err, qindex; + + qindex = rq - vi->rq; + + virtnet_rx_pause(vi, rq); + + err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); + if (err) + netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); + + virtnet_rx_resume(vi, rq); return err; } From patchwork Fri Jul 5 07:37:27 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13724473 X-Patchwork-Delegate: kuba@kernel.org Received: from out30-99.freemail.mail.aliyun.com (out30-99.freemail.mail.aliyun.com [115.124.30.99]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 40C47143866; Fri, 5 Jul 2024 07:37:40 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=115.124.30.99 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165064; cv=none; b=R3vDp82ZUCDo6erPIdUOwn03q/tkq7ZiZG4JFgFJIjl3q3iH2R0khGh+xH0slYm8cauq4370VLSzPrR0aVgjfI2yBpisaEa1ox12HhpDkckRVE2NJi/h+R4Rl/A3MmDWss8x+v6cX1FT8Gl6i/imv78fi276pH6m7UGI1GurJCE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165064; c=relaxed/simple; bh=N3BuQzEZ3xADxJCxh31dFbFp4xi723iW5Mhupk7GrXk=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=JyfdDTxzjVzs1m2G7QoDHQUdkSejhErPUpiGebSbDX0fl0ThJpv5ure/Zx4mDJiHru2dT9qR5sILATXuUymd9xcpl+QLfrhNfLHPL6T1gl5IIb2h48oqjA/lN+yjOUBTHYikAAw1lch2aydSXGVC7rYpQoB8oQ4Mrwha4m7JLI8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com; spf=pass smtp.mailfrom=linux.alibaba.com; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b=eEdYoh/I; arc=none smtp.client-ip=115.124.30.99 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b="eEdYoh/I" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.alibaba.com; s=default; t=1720165059; h=From:To:Subject:Date:Message-Id:MIME-Version; bh=ixQcmDs1008Adtr8nnLB1TsPVvNLEyo3g5zAUujjY4M=; b=eEdYoh/IiGcSuBw2mmpZn45sHSYiaQGXDDzx1Gd8cvCi+Dya1NGq53Wdn6fRDcv/LhexKdgCWhtJSgkF/5AygixDXh4YV/6mmb3pP19LFIkxsh24AjaXJFQOx+kseHJ6rnqxuSjtZDrkAGhG2XyJ7HRV3vFgmETa0zHnzu9OOoc= X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R131e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=maildocker-contentspam033037067111;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=15;SR=0;TI=SMTPD_---0W9uE.dr_1720165057; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0W9uE.dr_1720165057) by smtp.aliyun-inc.com; Fri, 05 Jul 2024 15:37:38 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , Xuan Zhuo , =?utf-8?q?Eugenio_P=C3=A9rez?= , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux.dev, bpf@vger.kernel.org Subject: [PATCH net-next v7 03/10] virtio_net: separate virtnet_tx_resize() Date: Fri, 5 Jul 2024 15:37:27 +0800 Message-Id: <20240705073734.93905-4-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> References: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Git-Hash: bfc652b69f2c X-Patchwork-Delegate: kuba@kernel.org This patch separates two sub-functions from virtnet_tx_resize(): * virtnet_tx_pause * virtnet_tx_resume Then the subsequent virtnet_tx_reset() can share these two functions. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 35 +++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index df5b23374c53..7d762614113b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2703,12 +2703,11 @@ static int virtnet_rx_resize(struct virtnet_info *vi, return err; } -static int virtnet_tx_resize(struct virtnet_info *vi, - struct send_queue *sq, u32 ring_num) +static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq) { bool running = netif_running(vi->dev); struct netdev_queue *txq; - int err, qindex; + int qindex; qindex = sq - vi->sq; @@ -2729,10 +2728,17 @@ static int virtnet_tx_resize(struct virtnet_info *vi, netif_stop_subqueue(vi->dev, qindex); __netif_tx_unlock_bh(txq); +} - err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); - if (err) - netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); +static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq) +{ + bool running = netif_running(vi->dev); + struct netdev_queue *txq; + int qindex; + + qindex = sq - vi->sq; + + txq = netdev_get_tx_queue(vi->dev, qindex); __netif_tx_lock_bh(txq); sq->reset = false; @@ -2741,6 +2747,23 @@ static int virtnet_tx_resize(struct virtnet_info *vi, if (running) virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); +} + +static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq, + u32 ring_num) +{ + int qindex, err; + + qindex = sq - vi->sq; + + virtnet_tx_pause(vi, sq); + + err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); + if (err) + netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); + + virtnet_tx_resume(vi, sq); + return err; } From patchwork Fri Jul 5 07:37:28 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13724476 X-Patchwork-Delegate: kuba@kernel.org Received: from out30-132.freemail.mail.aliyun.com (out30-132.freemail.mail.aliyun.com [115.124.30.132]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4B5FC14534B; Fri, 5 Jul 2024 07:37:41 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=115.124.30.132 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165066; cv=none; b=aV5k8NHVdW/lBSUO7Ir8OSqqblriqqr+muTW7ZLP7xCaNT4nil9QAgK3f0zlLpGHS2vaE4fc6UB/mNqavnkwdSAnfJKL+ip3fyoF9KfISnEvSraYrESbPl/byIfKXHf/WE6ZByxtRln3YegimAdbCHNnc/DMUS1yZ8h0PcD3uoM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165066; c=relaxed/simple; bh=IV40QPdhsyLoYmU4imjg3UYnG7CDYoFS43lM0om6mkY=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=H1ey0zvfmArCbmZrudOUZdgIW+x6+nr9gdPfwiS5wBQVLT2VC7e8c2Uf+Zb5uBTWQ8Orq6ntoDV1jk9HY4ZBxw+jzSgVaJd90aSc05KwfoRtHdhjQ3MVPUt4DhxHOFha0hFoEpkqNUE5HFogdZwPrlLAc+vUmJ9nlPXJQ8H8r+0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com; spf=pass smtp.mailfrom=linux.alibaba.com; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b=f5ChZaay; arc=none smtp.client-ip=115.124.30.132 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b="f5ChZaay" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.alibaba.com; s=default; t=1720165059; h=From:To:Subject:Date:Message-Id:MIME-Version; bh=QkrRXTCDLDlphnBuelc6bjg4YI24a+uIVOKNzc4W2sM=; b=f5ChZaay7lYMfCNRV9+IT01iVag+Fi9ExXQnfjM2Pypmle2JuQmARAuSUQDxkDu2Gg/iDPoVCoMVJeJa2FsVyn5ZtG3xsoCXEOuimU6/OIIbSXkR2g0+2wFSxiYL46qSUlrzPXPQC0qJu4eV7ylSaNNYAQy2bhM4q+SmEwTDngQ= X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R191e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=maildocker-contentspam033068173054;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=15;SR=0;TI=SMTPD_---0W9u5R-9_1720165058; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0W9u5R-9_1720165058) by smtp.aliyun-inc.com; Fri, 05 Jul 2024 15:37:39 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , Xuan Zhuo , =?utf-8?q?Eugenio_P=C3=A9rez?= , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux.dev, bpf@vger.kernel.org Subject: [PATCH net-next v7 04/10] virtio_net: separate receive_buf Date: Fri, 5 Jul 2024 15:37:28 +0800 Message-Id: <20240705073734.93905-5-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> References: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Git-Hash: bfc652b69f2c X-Patchwork-Delegate: kuba@kernel.org This commit separates the function receive_buf(), then we wrap the logic of handling the skb to an independent function virtnet_receive_done(). The subsequent commit will reuse it. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 62 +++++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 27 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7d762614113b..abfc84af90ce 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1968,6 +1968,40 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); } +static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq, + struct sk_buff *skb, u8 flags) +{ + struct virtio_net_common_hdr *hdr; + struct net_device *dev = vi->dev; + + hdr = skb_vnet_common_hdr(skb); + if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) + virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); + + if (flags & VIRTIO_NET_HDR_F_DATA_VALID) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (virtio_net_hdr_to_skb(skb, &hdr->hdr, + virtio_is_little_endian(vi->vdev))) { + net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", + dev->name, hdr->hdr.gso_type, + hdr->hdr.gso_size); + goto frame_err; + } + + skb_record_rx_queue(skb, vq2rxq(rq->vq)); + skb->protocol = eth_type_trans(skb, dev); + pr_debug("Receiving skb proto 0x%04x len %i type %i\n", + ntohs(skb->protocol), skb->len, skb->pkt_type); + + napi_gro_receive(&rq->napi, skb); + return; + +frame_err: + DEV_STATS_INC(dev, rx_frame_errors); + dev_kfree_skb(skb); +} + static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, void **ctx, unsigned int *xdp_xmit, @@ -1975,7 +2009,6 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, { struct net_device *dev = vi->dev; struct sk_buff *skb; - struct virtio_net_common_hdr *hdr; u8 flags; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { @@ -2005,32 +2038,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (unlikely(!skb)) return; - hdr = skb_vnet_common_hdr(skb); - if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) - virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); - - if (flags & VIRTIO_NET_HDR_F_DATA_VALID) - skb->ip_summed = CHECKSUM_UNNECESSARY; - - if (virtio_net_hdr_to_skb(skb, &hdr->hdr, - virtio_is_little_endian(vi->vdev))) { - net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", - dev->name, hdr->hdr.gso_type, - hdr->hdr.gso_size); - goto frame_err; - } - - skb_record_rx_queue(skb, vq2rxq(rq->vq)); - skb->protocol = eth_type_trans(skb, dev); - pr_debug("Receiving skb proto 0x%04x len %i type %i\n", - ntohs(skb->protocol), skb->len, skb->pkt_type); - - napi_gro_receive(&rq->napi, skb); - return; - -frame_err: - DEV_STATS_INC(dev, rx_frame_errors); - dev_kfree_skb(skb); + virtnet_receive_done(vi, rq, skb, flags); } /* Unlike mergeable buffers, all buffers are allocated to the From patchwork Fri Jul 5 07:37:29 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13724474 X-Patchwork-Delegate: kuba@kernel.org Received: from out30-98.freemail.mail.aliyun.com (out30-98.freemail.mail.aliyun.com [115.124.30.98]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3441813BAFA; Fri, 5 Jul 2024 07:37:42 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=115.124.30.98 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165065; cv=none; b=IFU+rzoRAk9b2tJ83nD6pbgLFsiOlJNq90E4HVH8cDZXuBJHr7d1TXVcsbwXRQrmET/xO0d8rmxdFVBUyDNAaIhnXBhdJEnysYthV5w2tPGwaQEN9ym1iXvpqzxAUb4pByE+FXwdqDRXsEYt1F8BB9Tk1FDEu6ZjJvyW6JzJQwQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165065; c=relaxed/simple; bh=Q8quGDSZzYUbIq22I2pv0We8cWtmL2jLiqoJdbdM8As=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=nC1sd82O/6Q16rIGOIWGHkxLQrgirXq0+hATumxD7mLyqcgZEkM0w3b4tapXB6BomVo7kjnZ48LsTJniOSvOzPbSKYgdiSYCbFxAIW1PpxSKfxS3p3msbZa21GVgLyrpBxRx4svBm8G3wtKerq3Q/ivVHTNSpHEmGw4R5nwkMms= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com; spf=pass smtp.mailfrom=linux.alibaba.com; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b=CQDiQQzy; arc=none smtp.client-ip=115.124.30.98 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b="CQDiQQzy" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.alibaba.com; s=default; t=1720165060; h=From:To:Subject:Date:Message-Id:MIME-Version; bh=zNeiDwdkqzBfnWgHli0nRbhmznR8NhiOcj4IWvM1Tec=; b=CQDiQQzyd4s1s7AnFuK3aV6IrKSQLaxQEZ67wgQUx7HUqecFjemBTyz05t1NO3/Dxef8BfsJC8/NNdAS+VRhc5YLwk1G3HJQl9exSfUFQRkgRkpazSq1mqAXWzOu8y1bTGUZUYBzgYA71hCHfaRo59XWCvofaNfVKled0h0OaYg= X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R201e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=maildocker-contentspam033045046011;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=15;SR=0;TI=SMTPD_---0W9u53pV_1720165059; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0W9u53pV_1720165059) by smtp.aliyun-inc.com; Fri, 05 Jul 2024 15:37:40 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , Xuan Zhuo , =?utf-8?q?Eugenio_P=C3=A9rez?= , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux.dev, bpf@vger.kernel.org Subject: [PATCH net-next v7 05/10] virtio_net: separate receive_mergeable Date: Fri, 5 Jul 2024 15:37:29 +0800 Message-Id: <20240705073734.93905-6-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> References: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Git-Hash: bfc652b69f2c X-Patchwork-Delegate: kuba@kernel.org This commit separates the function receive_mergeable(), put the logic of appending frag to the skb as an independent function. The subsequent commit will reuse it. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 77 ++++++++++++++++++++++++---------------- 1 file changed, 47 insertions(+), 30 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index abfc84af90ce..3c828cdd438b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1821,6 +1821,49 @@ static struct sk_buff *receive_mergeable_xdp(struct net_device *dev, return NULL; } +static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb, + struct sk_buff *curr_skb, + struct page *page, void *buf, + int len, int truesize) +{ + int num_skb_frags; + int offset; + + num_skb_frags = skb_shinfo(curr_skb)->nr_frags; + if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { + struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); + + if (unlikely(!nskb)) + return NULL; + + if (curr_skb == head_skb) + skb_shinfo(curr_skb)->frag_list = nskb; + else + curr_skb->next = nskb; + curr_skb = nskb; + head_skb->truesize += nskb->truesize; + num_skb_frags = 0; + } + + if (curr_skb != head_skb) { + head_skb->data_len += len; + head_skb->len += len; + head_skb->truesize += truesize; + } + + offset = buf - page_address(page); + if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { + put_page(page); + skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, + len, truesize); + } else { + skb_add_rx_frag(curr_skb, num_skb_frags, page, + offset, len, truesize); + } + + return curr_skb; +} + static struct sk_buff *receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -1870,8 +1913,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (unlikely(!curr_skb)) goto err_skb; while (--num_buf) { - int num_skb_frags; - buf = virtnet_rq_get_buf(rq, &len, &ctx); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers out of %d missing\n", @@ -1896,34 +1937,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_skb; } - num_skb_frags = skb_shinfo(curr_skb)->nr_frags; - if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { - struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); - - if (unlikely(!nskb)) - goto err_skb; - if (curr_skb == head_skb) - skb_shinfo(curr_skb)->frag_list = nskb; - else - curr_skb->next = nskb; - curr_skb = nskb; - head_skb->truesize += nskb->truesize; - num_skb_frags = 0; - } - if (curr_skb != head_skb) { - head_skb->data_len += len; - head_skb->len += len; - head_skb->truesize += truesize; - } - offset = buf - page_address(page); - if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { - put_page(page); - skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, - len, truesize); - } else { - skb_add_rx_frag(curr_skb, num_skb_frags, page, - offset, len, truesize); - } + curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page, + buf, len, truesize); + if (!curr_skb) + goto err_skb; } ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); From patchwork Fri Jul 5 07:37:30 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13724475 X-Patchwork-Delegate: kuba@kernel.org Received: from out30-130.freemail.mail.aliyun.com (out30-130.freemail.mail.aliyun.com [115.124.30.130]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 34F3A14037D; Fri, 5 Jul 2024 07:37:43 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=115.124.30.130 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165066; cv=none; b=f4y5NXCFGhdC0z2MN3RqUXeFEfUOKBm/sY1totAFkHKLFThHbq5GD1CQxrDEQL81tXQSJm4qvsNNnIBzn7U7mC9rl4R18b3LTRJchtqeZPdGtWNa8zft4abkuKEIYXSuQ878+sXoZ7lu62eVl98bPkdRL5nNGgQQabPVVxUmt5s= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165066; c=relaxed/simple; bh=JV2LkjIACGfOBIRgTYt5UVoaVJ8FhSrBLEmueGeXgkM=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=iaBW7gekxomSafjujHr3JDoDMUpWPyDfthVv0yKj4xVefmiN5MYHOJzu24aNngFy8sH33JdmEb1sQ7NJy+7ZygsFvPnUtp2asq9ib4r993G+rEf5ZwpLhoLKpYmgF0/NpAgstjtko/BgeBt/WP6qUS5UREppkSNHwD06q6UdlLc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com; spf=pass smtp.mailfrom=linux.alibaba.com; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b=HsuQY2Ty; arc=none smtp.client-ip=115.124.30.130 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b="HsuQY2Ty" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.alibaba.com; s=default; t=1720165062; h=From:To:Subject:Date:Message-Id:MIME-Version; bh=fL97hArjFxmQ1nwK4dedZW2HfLSmHSBbA74KNJSpIKE=; b=HsuQY2TyoiuZbrf9vUEbSKXMA9UPiPa9XQnjVIFUlbtsHnr7DI1LjuOjWAYI+3QVDwEPMhLhVfqXD8r8tCyxq1C6v8t7Z8bFtmxtj3ZhRu9b5kmej8J2F993mst+c4yB8iO65bP/5zbphWGH0TXS7euw6rCso6sognsdIj3X4VU= X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R141e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=maildocker-contentspam033037067109;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=15;SR=0;TI=SMTPD_---0W9uE.fF_1720165060; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0W9uE.fF_1720165060) by smtp.aliyun-inc.com; Fri, 05 Jul 2024 15:37:41 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , Xuan Zhuo , =?utf-8?q?Eugenio_P=C3=A9rez?= , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux.dev, bpf@vger.kernel.org Subject: [PATCH net-next v7 06/10] virtio_net: xsk: bind/unbind xsk for rx Date: Fri, 5 Jul 2024 15:37:30 +0800 Message-Id: <20240705073734.93905-7-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> References: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Git-Hash: bfc652b69f2c X-Patchwork-Delegate: kuba@kernel.org This patch implement the logic of bind/unbind xsk pool to rq. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- v7: 1. remove a container struct for xsk 2. update comments 3. add check between hdr_len and xsk headroom drivers/net/virtio_net.c | 134 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) -- 2.32.0.3.g01195cf9f diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3c828cdd438b..cd87b39600d4 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -25,6 +25,7 @@ #include #include #include +#include static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -348,6 +349,11 @@ struct receive_queue { /* Record the last dma info to free after new pages is allocated. */ struct virtnet_rq_dma *last_dma; + + struct xsk_buff_pool *xsk_pool; + + /* xdp rxq used by xsk */ + struct xdp_rxq_info xsk_rxq_info; }; /* This structure can contain rss message with maximum settings for indirection table and keysize @@ -5026,6 +5032,132 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi) return virtnet_set_guest_offloads(vi, offloads); } +static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq, + struct xsk_buff_pool *pool) +{ + int err, qindex; + + qindex = rq - vi->rq; + + if (pool) { + err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id); + if (err < 0) + return err; + + err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info, + MEM_TYPE_XSK_BUFF_POOL, NULL); + if (err < 0) + goto unreg; + + xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info); + } + + virtnet_rx_pause(vi, rq); + + err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf); + if (err) { + netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); + + pool = NULL; + } + + rq->xsk_pool = pool; + + virtnet_rx_resume(vi, rq); + + if (pool) + return 0; + +unreg: + xdp_rxq_info_unreg(&rq->xsk_rxq_info); + return err; +} + +static int virtnet_xsk_pool_enable(struct net_device *dev, + struct xsk_buff_pool *pool, + u16 qid) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct receive_queue *rq; + struct device *dma_dev; + struct send_queue *sq; + int err; + + if (vi->hdr_len > xsk_pool_get_headroom(pool)) + return -EINVAL; + + /* In big_packets mode, xdp cannot work, so there is no need to + * initialize xsk of rq. + */ + if (vi->big_packets && !vi->mergeable_rx_bufs) + return -ENOENT; + + if (qid >= vi->curr_queue_pairs) + return -EINVAL; + + sq = &vi->sq[qid]; + rq = &vi->rq[qid]; + + /* xsk assumes that tx and rx must have the same dma device. The af-xdp + * may use one buffer to receive from the rx and reuse this buffer to + * send by the tx. So the dma dev of sq and rq must be the same one. + * + * But vq->dma_dev allows every vq has the respective dma dev. So I + * check the dma dev of vq and sq is the same dev. + */ + if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq)) + return -EPERM; + + dma_dev = virtqueue_dma_dev(rq->vq); + if (!dma_dev) + return -EPERM; + + err = xsk_pool_dma_map(pool, dma_dev, 0); + if (err) + goto err_xsk_map; + + err = virtnet_rq_bind_xsk_pool(vi, rq, pool); + if (err) + goto err_rq; + + return 0; + +err_rq: + xsk_pool_dma_unmap(pool, 0); +err_xsk_map: + return err; +} + +static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct xsk_buff_pool *pool; + struct receive_queue *rq; + int err; + + if (qid >= vi->curr_queue_pairs) + return -EINVAL; + + rq = &vi->rq[qid]; + + pool = rq->xsk_pool; + + err = virtnet_rq_bind_xsk_pool(vi, rq, NULL); + + xsk_pool_dma_unmap(pool, 0); + + return err; +} + +static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp) +{ + if (xdp->xsk.pool) + return virtnet_xsk_pool_enable(dev, xdp->xsk.pool, + xdp->xsk.queue_id); + else + return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id); +} + static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { @@ -5151,6 +5283,8 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return virtnet_xdp_set(dev, xdp->prog, xdp->extack); + case XDP_SETUP_XSK_POOL: + return virtnet_xsk_pool_setup(dev, xdp); default: return -EINVAL; } From patchwork Fri Jul 5 07:37:31 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13724477 X-Patchwork-Delegate: kuba@kernel.org Received: from out30-99.freemail.mail.aliyun.com (out30-99.freemail.mail.aliyun.com [115.124.30.99]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 30129145359; Fri, 5 Jul 2024 07:37:44 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=115.124.30.99 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165067; cv=none; b=c980FPSAYrJ4AgWE7OsM2DHdY8raHino1Xr8kF6fbGSU/CzFy+pu19iUW3qf7z0ka8USqTyD1Z1vQLJ0kYa8DdiDRBx8euLVnRElEXKU4wzUh1BdEgcuvdftiArsYzIbclVizbrlwU9MGuyXLuoZlIlXw/Ix0eTLyd5RuAtI+gU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165067; c=relaxed/simple; bh=bQ8pchX1ZViQkpKM1jEfHZDoHW/xgSE6CVB8l2midVo=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=jTY8vWB9h6m1W4QG1VO5olUGTglCNL08WGqlQGNjQ+mdUZSMXC4p/2hnxOCBh1+qHTNFEPhU1Xk52S8FNXISI+oIb6jbumX8z9WETbWIyI7EuV4ia/JISrVkIzNdUOGcih12kgiPNHiKcWY3QsVTXmeyztJAhLqSFvI1rFWhWO4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com; spf=pass smtp.mailfrom=linux.alibaba.com; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b=ymwovyso; arc=none smtp.client-ip=115.124.30.99 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b="ymwovyso" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.alibaba.com; s=default; t=1720165063; h=From:To:Subject:Date:Message-Id:MIME-Version; bh=HOxERNJ2lKwT+7MibpvryEY8eB12BLqay9tuuRTXLko=; b=ymwovyso+FCB7CwgFsTVPFJtDaomvDoCTx1YZOEcBrPPowJfwmA3gKb9goKMWVqg9lVxAEu1fcS6AQuxx9QNYMpJp7YTmAl0bXw3MGV2TLUa2GKqzVXl3KFhWrQos2vfZx9qi1wULexY9abRDXc+04yHqiZSYjOdBcRecX0UlK8= X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R651e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=maildocker-contentspam033045220184;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=15;SR=0;TI=SMTPD_---0W9uE.fU_1720165061; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0W9uE.fU_1720165061) by smtp.aliyun-inc.com; Fri, 05 Jul 2024 15:37:42 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , Xuan Zhuo , =?utf-8?q?Eugenio_P=C3=A9rez?= , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux.dev, bpf@vger.kernel.org Subject: [PATCH net-next v7 07/10] virtio_net: xsk: support wakeup Date: Fri, 5 Jul 2024 15:37:31 +0800 Message-Id: <20240705073734.93905-8-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> References: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Git-Hash: bfc652b69f2c X-Patchwork-Delegate: kuba@kernel.org xsk wakeup is used to trigger the logic for xsk xmit by xsk framework or user. Virtio-net does not support to actively generate an interruption, so it tries to trigger tx NAPI on the local cpu. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- drivers/net/virtio_net.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index cd87b39600d4..29fa25ce1a7f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1054,6 +1054,29 @@ static void check_sq_full_and_disable(struct virtnet_info *vi, } } +static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct send_queue *sq; + + if (!netif_running(dev)) + return -ENETDOWN; + + if (qid >= vi->curr_queue_pairs) + return -EINVAL; + + sq = &vi->sq[qid]; + + if (napi_if_scheduled_mark_missed(&sq->napi)) + return 0; + + local_bh_disable(); + virtqueue_napi_schedule(&sq->napi, sq->vq); + local_bh_enable(); + + return 0; +} + static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, struct send_queue *sq, struct xdp_frame *xdpf) @@ -5399,6 +5422,7 @@ static const struct net_device_ops virtnet_netdev = { .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, .ndo_bpf = virtnet_xdp, .ndo_xdp_xmit = virtnet_xdp_xmit, + .ndo_xsk_wakeup = virtnet_xsk_wakeup, .ndo_features_check = passthru_features_check, .ndo_get_phys_port_name = virtnet_get_phys_port_name, .ndo_set_features = virtnet_set_features, From patchwork Fri Jul 5 07:37:32 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13724479 X-Patchwork-Delegate: kuba@kernel.org Received: from out30-118.freemail.mail.aliyun.com (out30-118.freemail.mail.aliyun.com [115.124.30.118]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B13B7144D2B; Fri, 5 Jul 2024 07:37:51 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=115.124.30.118 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165073; cv=none; b=itkGPJd1ut6BN0aK1NdYJTa6eAV44Pt3fhI+SYDACFXWz4b9vyU8vK+HrLnXU/icmO2FTMf/5CJTVi8Ph7uZHJKXu4wuOfLPjrUL60CGUTBL60GhYRQUxxj2YLa2Ab057+C1zWUT1ffzL9K1vyZHTKoJI2iIffgfG22UooBW2S4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165073; c=relaxed/simple; bh=13/lef437qAI6DqLrDfUizG7UV3GGpyLaM6oJGrwqSo=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=FSwiMtC6QpgRybft+czuxDc6+PTZwPPxubYjtM3zmucxO0J7kDEtrVrZLyt1+jWImig8LdvdJmmJa68FJqGYTV4j9dRrMgCL5lhCvSOoaV456CDvjiwIzXXoaJQPzxHESzi1HDcyOOGC/10aI1XYPEztQh5d6u9cwvW2I6hr6/k= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com; spf=pass smtp.mailfrom=linux.alibaba.com; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b=kdvJAlmy; arc=none smtp.client-ip=115.124.30.118 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b="kdvJAlmy" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.alibaba.com; s=default; t=1720165063; h=From:To:Subject:Date:Message-Id:MIME-Version; bh=+kmqoyURDw6HX0lZ9T8HBL1wsm2lhIiws+dW2ur3k0o=; b=kdvJAlmyKl1rBi74dU6vVmPBjG7r22QQ/CZT6KDvHwCH2001dUi9y4HLKyuUPRdE+K+61ywvp4M8AKLyxw3XzXR/HTOt8fEp6nV2psl8G0KMUQKi88e0xWZWiuCx1US1QqbpbX7qJSgh9VsiL+1c0oKIUsC8fIyAH1P1OQgg3n8= X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R121e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=maildocker-contentspam033037067113;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=15;SR=0;TI=SMTPD_---0W9uE.fq_1720165062; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0W9uE.fq_1720165062) by smtp.aliyun-inc.com; Fri, 05 Jul 2024 15:37:43 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , Xuan Zhuo , =?utf-8?q?Eugenio_P=C3=A9rez?= , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux.dev, bpf@vger.kernel.org Subject: [PATCH net-next v7 08/10] virtio_net: xsk: rx: support fill with xsk buffer Date: Fri, 5 Jul 2024 15:37:32 +0800 Message-Id: <20240705073734.93905-9-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> References: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Git-Hash: bfc652b69f2c X-Patchwork-Delegate: kuba@kernel.org Implement the logic of filling rq with XSK buffers. Signed-off-by: Xuan Zhuo --- v7: 1. some small fixes drivers/net/virtio_net.c | 70 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 66 insertions(+), 4 deletions(-) -- 2.32.0.3.g01195cf9f diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 29fa25ce1a7f..2b27f5ada64a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -354,6 +354,8 @@ struct receive_queue { /* xdp rxq used by xsk */ struct xdp_rxq_info xsk_rxq_info; + + struct xdp_buff **xsk_buffs; }; /* This structure can contain rss message with maximum settings for indirection table and keysize @@ -1054,6 +1056,53 @@ static void check_sq_full_and_disable(struct virtnet_info *vi, } } +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) +{ + sg->dma_address = addr; + sg->length = len; +} + +static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, + struct xsk_buff_pool *pool, gfp_t gfp) +{ + struct xdp_buff **xsk_buffs; + dma_addr_t addr; + int err = 0; + u32 len, i; + int num; + + xsk_buffs = rq->xsk_buffs; + + num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free); + if (!num) + return -ENOMEM; + + len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len; + + for (i = 0; i < num; ++i) { + /* use the part of XDP_PACKET_HEADROOM as the virtnet hdr space */ + addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len; + + sg_init_table(rq->sg, 1); + sg_fill_dma(rq->sg, addr, len); + + err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp); + if (err) + goto err; + } + + return num; + +err: + if (i) + err = i; + + for (; i < num; ++i) + xsk_buff_free(xsk_buffs[i]); + + return err; +} + static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag) { struct virtnet_info *vi = netdev_priv(dev); @@ -2245,7 +2294,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { int err; - bool oom; + + if (rq->xsk_pool) { + err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp); + goto kick; + } do { if (vi->mergeable_rx_bufs) @@ -2255,10 +2308,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, else err = add_recvbuf_small(vi, rq, gfp); - oom = err == -ENOMEM; if (err) break; } while (rq->vq->num_free); + +kick: if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { unsigned long flags; @@ -2267,7 +2321,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); } - return !oom; + return err != -ENOMEM; } static void skb_recv_done(struct virtqueue *rvq) @@ -5104,7 +5158,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev, struct receive_queue *rq; struct device *dma_dev; struct send_queue *sq; - int err; + int err, size; if (vi->hdr_len > xsk_pool_get_headroom(pool)) return -EINVAL; @@ -5135,6 +5189,12 @@ static int virtnet_xsk_pool_enable(struct net_device *dev, if (!dma_dev) return -EPERM; + size = virtqueue_get_vring_size(rq->vq); + + rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL); + if (!rq->xsk_buffs) + return -ENOMEM; + err = xsk_pool_dma_map(pool, dma_dev, 0); if (err) goto err_xsk_map; @@ -5169,6 +5229,8 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid) xsk_pool_dma_unmap(pool, 0); + kvfree(rq->xsk_buffs); + return err; } From patchwork Fri Jul 5 07:37:33 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13724478 X-Patchwork-Delegate: kuba@kernel.org Received: from out30-131.freemail.mail.aliyun.com (out30-131.freemail.mail.aliyun.com [115.124.30.131]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 2E3071442F4; Fri, 5 Jul 2024 07:37:46 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=115.124.30.131 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165069; cv=none; b=ja1qaSbbnLoDlB4zIX7K8ScQATclo6RZXr+dYnxSOWm39Ze97MkWLyuNmPp8+qmqDa7YvVxpvUZv8DvbDGd579gTux/HX1Dp2UAIrSsltOsPJRr4f54CVO7E4ffGF+Y3nbM7LFExk3hGDlr8GRnR3zoF211Ul9WinIumBx2u0sM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165069; c=relaxed/simple; bh=yNcRM6/XQ5dpnPW7D+sH8jXAcQeMEoK3wR1EGc5jdLo=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=CbpyAIgxdLf13W6DojGL7IVZywnV+9KXtOjMs5rSybppaqv1Hqcu1kdtlSooGR+swmNQuWskkoI1iPZ9d2uwc+OZRcxWYcegMtxFI2f7n3XMoxE6NaDW6WH4BUWYCmBGA5d0lY19WWewOlzrsapveY+mQ09zE7CC7v5HP+qEh2o= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com; spf=pass smtp.mailfrom=linux.alibaba.com; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b=IRmBZY2Q; arc=none smtp.client-ip=115.124.30.131 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b="IRmBZY2Q" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.alibaba.com; s=default; t=1720165065; h=From:To:Subject:Date:Message-Id:MIME-Version; bh=rQ0mHCE6Utdw6/6yyXkdojlqOfUn4EM+15JMY7dZD/o=; b=IRmBZY2QD+b72PQs7J/QHQrqfjgzoOC0tAAdQERiiZl03ypr5FaI2WFLFQEvS8tFheWNRJtTlRRM4dXGFNPkSyX+Xbb4NkdpjHHooBad3HRjHG/VXDnK88cgRuwXKS4AboeLCsyRzxIt1IpyL9x5SPgh7f7Fl1Smklz4P8ydeAw= X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R731e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=maildocker-contentspam033068173054;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=15;SR=0;TI=SMTPD_---0W9u53qf_1720165063; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0W9u53qf_1720165063) by smtp.aliyun-inc.com; Fri, 05 Jul 2024 15:37:44 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , Xuan Zhuo , =?utf-8?q?Eugenio_P=C3=A9rez?= , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux.dev, bpf@vger.kernel.org Subject: [PATCH net-next v7 09/10] virtio_net: xsk: rx: support recv small mode Date: Fri, 5 Jul 2024 15:37:33 +0800 Message-Id: <20240705073734.93905-10-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> References: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Git-Hash: bfc652b69f2c X-Patchwork-Delegate: kuba@kernel.org In the process: 1. We may need to copy data to create skb for XDP_PASS. 2. We may need to call xsk_buff_free() to release the buffer. 3. The handle for xdp_buff is difference from the buffer. If we pushed this logic into existing receive handle(merge and small), we would have to maintain code scattered inside merge and small (and big). So I think it is a good choice for us to put the xsk code into an independent function. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- v7: 1. rename xdp_construct_skb to xsk_construct_skb 2. refactor virtnet_receive() drivers/net/virtio_net.c | 176 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 168 insertions(+), 8 deletions(-) -- 2.32.0.3.g01195cf9f diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2b27f5ada64a..64d8cd481890 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -498,6 +498,12 @@ struct virtio_net_common_hdr { }; static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, + struct net_device *dev, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats); +static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq, + struct sk_buff *skb, u8 flags); static bool is_xdp_frame(void *ptr) { @@ -1062,6 +1068,124 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) sg->length = len; } +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi, + struct receive_queue *rq, void *buf, u32 len) +{ + struct xdp_buff *xdp; + u32 bufsize; + + xdp = (struct xdp_buff *)buf; + + bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len; + + if (unlikely(len > bufsize)) { + pr_debug("%s: rx error: len %u exceeds truesize %u\n", + vi->dev->name, len, bufsize); + DEV_STATS_INC(vi->dev, rx_length_errors); + xsk_buff_free(xdp); + return NULL; + } + + xsk_buff_set_size(xdp, len); + xsk_buff_dma_sync_for_cpu(xdp); + + return xdp; +} + +static struct sk_buff *xsk_construct_skb(struct receive_queue *rq, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + unsigned int size; + + size = xdp->data_end - xdp->data_hard_start; + skb = napi_alloc_skb(&rq->napi, size); + if (unlikely(!skb)) { + xsk_buff_free(xdp); + return NULL; + } + + skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); + + size = xdp->data_end - xdp->data_meta; + memcpy(__skb_put(skb, size), xdp->data_meta, size); + + if (metasize) { + __skb_pull(skb, metasize); + skb_metadata_set(skb, metasize); + } + + xsk_buff_free(xdp); + + return skb; +} + +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi, + struct receive_queue *rq, struct xdp_buff *xdp, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct bpf_prog *prog; + u32 ret; + + ret = XDP_PASS; + rcu_read_lock(); + prog = rcu_dereference(rq->xdp_prog); + if (prog) + ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); + rcu_read_unlock(); + + switch (ret) { + case XDP_PASS: + return xsk_construct_skb(rq, xdp); + + case XDP_TX: + case XDP_REDIRECT: + return NULL; + + default: + /* drop packet */ + xsk_buff_free(xdp); + u64_stats_inc(&stats->drops); + return NULL; + } +} + +static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, u32 len, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct net_device *dev = vi->dev; + struct sk_buff *skb = NULL; + struct xdp_buff *xdp; + u8 flags; + + len -= vi->hdr_len; + + u64_stats_add(&stats->bytes, len); + + xdp = buf_to_xdp(vi, rq, buf, len); + if (!xdp) + return; + + if (unlikely(len < ETH_HLEN)) { + pr_debug("%s: short packet %i\n", dev->name, len); + DEV_STATS_INC(dev, rx_length_errors); + xsk_buff_free(xdp); + return; + } + + flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags; + + if (!vi->mergeable_rx_bufs) + skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats); + + if (skb) + virtnet_receive_done(vi, rq, skb, flags); +} + static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, struct xsk_buff_pool *pool, gfp_t gfp) { @@ -2392,32 +2516,68 @@ static void refill_work(struct work_struct *work) } } -static int virtnet_receive(struct receive_queue *rq, int budget, - unsigned int *xdp_xmit) +static int virtnet_receive_xsk_bufs(struct virtnet_info *vi, + struct receive_queue *rq, + int budget, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + unsigned int len; + int packets = 0; + void *buf; + + while (packets < budget) { + buf = virtqueue_get_buf(rq->vq, &len); + if (!buf) + break; + + virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats); + packets++; + } + + return packets; +} + +static int virtnet_receive_packets(struct virtnet_info *vi, + struct receive_queue *rq, + int budget, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) { - struct virtnet_info *vi = rq->vq->vdev->priv; - struct virtnet_rq_stats stats = {}; unsigned int len; int packets = 0; void *buf; - int i; if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; - while (packets < budget && (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { - receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); + receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats); packets++; } } else { while (packets < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); + receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats); packets++; } } + return packets; +} + +static int virtnet_receive(struct receive_queue *rq, int budget, + unsigned int *xdp_xmit) +{ + struct virtnet_info *vi = rq->vq->vdev->priv; + struct virtnet_rq_stats stats = {}; + int i, packets; + + if (rq->xsk_pool) + packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats); + else + packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats); + if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { spin_lock(&vi->refill_lock); From patchwork Fri Jul 5 07:37:34 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xuan Zhuo X-Patchwork-Id: 13724480 X-Patchwork-Delegate: kuba@kernel.org Received: from out30-118.freemail.mail.aliyun.com (out30-118.freemail.mail.aliyun.com [115.124.30.118]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B4086145330; Fri, 5 Jul 2024 07:37:53 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=115.124.30.118 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165076; cv=none; b=mLGKeGcqGNB9raaO9Z+Bvel+dqQ62itAsvAqF9xfWMFoxghzV63IQ/5NmjQxBlgTQsafSythpB32fUPjgPSQICgXDuLXnmTSYFW//8FPra/oBOL3Kalo7KB8SGOMQ7HMQz0r6zvKe2j1LigUzUuy5AFRax5NYK3I6p1bOFGS1rA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1720165076; c=relaxed/simple; bh=bCWFnpj9o4+DpBTMgUBEoOAo1WULrZdndvRQAbBqfIU=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=beD0jUwjxnKNWIssB0UjdzjK06HxXWwTmJ8OlEZtuDgz/8a6FQsPuEIAbGpF+Z/bDbOYm2gRTnPG3DPzuDZsPcPSdYxO0Gr3/wwC1RTLxi6a810ErNfE16KCunIgOSwhR/UFyvip52319VLNpeK0mNHrsk9Z5xm20oM3RZLgCRg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com; spf=pass smtp.mailfrom=linux.alibaba.com; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b=dVpTUb1X; arc=none smtp.client-ip=115.124.30.118 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.alibaba.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.alibaba.com header.i=@linux.alibaba.com header.b="dVpTUb1X" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.alibaba.com; s=default; t=1720165065; h=From:To:Subject:Date:Message-Id:MIME-Version; bh=eSqZLYw1tnfN969d85a/aQLFRowDtR6gMpOsMidxQW0=; b=dVpTUb1X2FBg4szybyal7vzHRGXwl7NCYkD+E0aBan9rT6bc8SJnZQySySCKpEEfEMsw9twjRJdI1wNJZVU5GuQ//xYTU9huJ2MXI2YQcwWbAfg1YBBai6ZAOXAq9GOvOgyU448FlobnFPReJzi8Tj/ZRMp26G7qgEG2/ijLrwk= X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R101e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=maildocker-contentspam033037067112;MF=xuanzhuo@linux.alibaba.com;NM=1;PH=DS;RN=15;SR=0;TI=SMTPD_---0W9u53qw_1720165064; Received: from localhost(mailfrom:xuanzhuo@linux.alibaba.com fp:SMTPD_---0W9u53qw_1720165064) by smtp.aliyun-inc.com; Fri, 05 Jul 2024 15:37:45 +0800 From: Xuan Zhuo To: netdev@vger.kernel.org Cc: "Michael S. Tsirkin" , Jason Wang , Xuan Zhuo , =?utf-8?q?Eugenio_P=C3=A9rez?= , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Alexei Starovoitov , Daniel Borkmann , Jesper Dangaard Brouer , John Fastabend , virtualization@lists.linux.dev, bpf@vger.kernel.org Subject: [PATCH net-next v7 10/10] virtio_net: xsk: rx: support recv merge mode Date: Fri, 5 Jul 2024 15:37:34 +0800 Message-Id: <20240705073734.93905-11-xuanzhuo@linux.alibaba.com> X-Mailer: git-send-email 2.32.0.3.g01195cf9f In-Reply-To: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> References: <20240705073734.93905-1-xuanzhuo@linux.alibaba.com> Precedence: bulk X-Mailing-List: bpf@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Git-Hash: bfc652b69f2c X-Patchwork-Delegate: kuba@kernel.org Support AF-XDP for merge mode. Signed-off-by: Xuan Zhuo Acked-by: Jason Wang --- v7: 1. include the handle for unused buffers drivers/net/virtio_net.c | 144 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) -- 2.32.0.3.g01195cf9f diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 64d8cd481890..67724e7ab5e8 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -504,6 +504,10 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, struct virtnet_rq_stats *stats); static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq, struct sk_buff *skb, u8 flags); +static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb, + struct sk_buff *curr_skb, + struct page *page, void *buf, + int len, int truesize); static bool is_xdp_frame(void *ptr) { @@ -984,6 +988,11 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) rq = &vi->rq[i]; + if (rq->xsk_pool) { + xsk_buff_free((struct xdp_buff *)buf); + return; + } + if (!vi->big_packets || vi->mergeable_rx_bufs) virtnet_rq_unmap(rq, buf, 0); @@ -1152,6 +1161,139 @@ static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct } } +static void xsk_drop_follow_bufs(struct net_device *dev, + struct receive_queue *rq, + u32 num_buf, + struct virtnet_rq_stats *stats) +{ + struct xdp_buff *xdp; + u32 len; + + while (num_buf-- > 1) { + xdp = virtqueue_get_buf(rq->vq, &len); + if (unlikely(!xdp)) { + pr_debug("%s: rx error: %d buffers missing\n", + dev->name, num_buf); + DEV_STATS_INC(dev, rx_length_errors); + break; + } + u64_stats_add(&stats->bytes, len); + xsk_buff_free(xdp); + } +} + +static int xsk_append_merge_buffer(struct virtnet_info *vi, + struct receive_queue *rq, + struct sk_buff *head_skb, + u32 num_buf, + struct virtio_net_hdr_mrg_rxbuf *hdr, + struct virtnet_rq_stats *stats) +{ + struct sk_buff *curr_skb; + struct xdp_buff *xdp; + u32 len, truesize; + struct page *page; + void *buf; + + curr_skb = head_skb; + + while (--num_buf) { + buf = virtqueue_get_buf(rq->vq, &len); + if (unlikely(!buf)) { + pr_debug("%s: rx error: %d buffers out of %d missing\n", + vi->dev->name, num_buf, + virtio16_to_cpu(vi->vdev, + hdr->num_buffers)); + DEV_STATS_INC(vi->dev, rx_length_errors); + return -EINVAL; + } + + u64_stats_add(&stats->bytes, len); + + xdp = buf_to_xdp(vi, rq, buf, len); + if (!xdp) + goto err; + + buf = napi_alloc_frag(len); + if (!buf) { + xsk_buff_free(xdp); + goto err; + } + + memcpy(buf, xdp->data - vi->hdr_len, len); + + xsk_buff_free(xdp); + + page = virt_to_page(buf); + + truesize = len; + + curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page, + buf, len, truesize); + if (!curr_skb) { + put_page(page); + goto err; + } + } + + return 0; + +err: + xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats); + return -EINVAL; +} + +static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi, + struct receive_queue *rq, struct xdp_buff *xdp, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct virtio_net_hdr_mrg_rxbuf *hdr; + struct bpf_prog *prog; + struct sk_buff *skb; + u32 ret, num_buf; + + hdr = xdp->data - vi->hdr_len; + num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); + + ret = XDP_PASS; + rcu_read_lock(); + prog = rcu_dereference(rq->xdp_prog); + /* TODO: support multi buffer. */ + if (prog && num_buf == 1) + ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); + rcu_read_unlock(); + + switch (ret) { + case XDP_PASS: + skb = xsk_construct_skb(rq, xdp); + if (!skb) + goto drop_bufs; + + if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) { + dev_kfree_skb(skb); + goto drop; + } + + return skb; + + case XDP_TX: + case XDP_REDIRECT: + return NULL; + + default: + /* drop packet */ + xsk_buff_free(xdp); + } + +drop_bufs: + xsk_drop_follow_bufs(dev, rq, num_buf, stats); + +drop: + u64_stats_inc(&stats->drops); + return NULL; +} + static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf, u32 len, unsigned int *xdp_xmit, @@ -1181,6 +1323,8 @@ static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queu if (!vi->mergeable_rx_bufs) skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats); + else + skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats); if (skb) virtnet_receive_done(vi, rq, skb, flags);