diff mbox series

[net-next,v4,3/4] virtio_net: rx remove premapped failover code

Message ID 20240508063718.69806-4-xuanzhuo@linux.alibaba.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series virtio_net: rx enable premapped mode by default | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 926 this patch: 926
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 8 of 8 maintainers
netdev/build_clang success Errors and warnings before: 937 this patch: 937
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 937 this patch: 937
netdev/checkpatch warning WARNING: Do not crash the kernel unless it is absolutely unavoidable--use WARN_ON_ONCE() plus recovery code (if feasible) instead of BUG() or variants WARNING: line length of 84 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-05-11--03-00 (tests: 1016)

Commit Message

Xuan Zhuo May 8, 2024, 6:37 a.m. UTC
Now, the premapped mode can be enabled unconditionally.

So we can remove the failover code for merge and small mode.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Jason Wang <jasowang@redhat.com>
---
 drivers/net/virtio_net.c | 85 +++++++++++++++++-----------------------
 1 file changed, 35 insertions(+), 50 deletions(-)

Comments

Larysa Zaremba May 10, 2024, 7:42 a.m. UTC | #1
On Wed, May 08, 2024 at 02:37:17PM +0800, Xuan Zhuo wrote:
> Now, the premapped mode can be enabled unconditionally.
> 
> So we can remove the failover code for merge and small mode.
> 
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Acked-by: Jason Wang <jasowang@redhat.com>
> ---
>  drivers/net/virtio_net.c | 85 +++++++++++++++++-----------------------
>  1 file changed, 35 insertions(+), 50 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index a2452d35bb93..070a6ed0d812 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -344,9 +344,6 @@ struct receive_queue {
>  
>  	/* Record the last dma info to free after new pages is allocated. */
>  	struct virtnet_rq_dma *last_dma;
> -
> -	/* Do dma by self */
> -	bool do_dma;
>  };
>  
>  /* This structure can contain rss message with maximum settings for indirection table and keysize
> @@ -846,7 +843,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
>  	void *buf;
>  
>  	buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
> -	if (buf && rq->do_dma)
> +	if (buf)
>  		virtnet_rq_unmap(rq, buf, *len);
>  
>  	return buf;
> @@ -859,11 +856,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
>  	u32 offset;
>  	void *head;
>  
> -	if (!rq->do_dma) {
> -		sg_init_one(rq->sg, buf, len);
> -		return;
> -	}
> -
>  	head = page_address(rq->alloc_frag.page);
>  
>  	offset = buf - head;
> @@ -889,44 +881,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
>  
>  	head = page_address(alloc_frag->page);
>  
> -	if (rq->do_dma) {
> -		dma = head;
> -
> -		/* new pages */
> -		if (!alloc_frag->offset) {
> -			if (rq->last_dma) {
> -				/* Now, the new page is allocated, the last dma
> -				 * will not be used. So the dma can be unmapped
> -				 * if the ref is 0.
> -				 */
> -				virtnet_rq_unmap(rq, rq->last_dma, 0);
> -				rq->last_dma = NULL;
> -			}
> +	dma = head;
>  
> -			dma->len = alloc_frag->size - sizeof(*dma);
> +	/* new pages */
> +	if (!alloc_frag->offset) {
> +		if (rq->last_dma) {
> +			/* Now, the new page is allocated, the last dma
> +			 * will not be used. So the dma can be unmapped
> +			 * if the ref is 0.
> +			 */
> +			virtnet_rq_unmap(rq, rq->last_dma, 0);
> +			rq->last_dma = NULL;
> +		}
>  
> -			addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
> -							      dma->len, DMA_FROM_DEVICE, 0);
> -			if (virtqueue_dma_mapping_error(rq->vq, addr))
> -				return NULL;
> +		dma->len = alloc_frag->size - sizeof(*dma);
>  
> -			dma->addr = addr;
> -			dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
> +		addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
> +						      dma->len, DMA_FROM_DEVICE, 0);
> +		if (virtqueue_dma_mapping_error(rq->vq, addr))
> +			return NULL;
>  
> -			/* Add a reference to dma to prevent the entire dma from
> -			 * being released during error handling. This reference
> -			 * will be freed after the pages are no longer used.
> -			 */
> -			get_page(alloc_frag->page);
> -			dma->ref = 1;
> -			alloc_frag->offset = sizeof(*dma);
> +		dma->addr = addr;
> +		dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
>  
> -			rq->last_dma = dma;
> -		}
> +		/* Add a reference to dma to prevent the entire dma from
> +		 * being released during error handling. This reference
> +		 * will be freed after the pages are no longer used.
> +		 */
> +		get_page(alloc_frag->page);
> +		dma->ref = 1;
> +		alloc_frag->offset = sizeof(*dma);
>  
> -		++dma->ref;
> +		rq->last_dma = dma;
>  	}
>  
> +	++dma->ref;
> +
>  	buf = head + alloc_frag->offset;
>  
>  	get_page(alloc_frag->page);
> @@ -943,12 +933,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
>  	if (!vi->mergeable_rx_bufs && vi->big_packets)
>  		return;
>  
> -	for (i = 0; i < vi->max_queue_pairs; i++) {
> -		if (virtqueue_set_dma_premapped(vi->rq[i].vq))
> -			continue;
> -
> -		vi->rq[i].do_dma = true;
> -	}
> +	for (i = 0; i < vi->max_queue_pairs; i++)
> +		/* error never happen */

/* error should never happen */

Code seems fine
Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>

> +		BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
>  }
>  
>  static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
> @@ -2020,8 +2007,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
>  
>  	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
>  	if (err < 0) {
> -		if (rq->do_dma)
> -			virtnet_rq_unmap(rq, buf, 0);
> +		virtnet_rq_unmap(rq, buf, 0);
>  		put_page(virt_to_head_page(buf));
>  	}
>  
> @@ -2135,8 +2121,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
>  	ctx = mergeable_len_to_ctx(len + room, headroom);
>  	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
>  	if (err < 0) {
> -		if (rq->do_dma)
> -			virtnet_rq_unmap(rq, buf, 0);
> +		virtnet_rq_unmap(rq, buf, 0);
>  		put_page(virt_to_head_page(buf));
>  	}
>  
> @@ -5206,7 +5191,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
>  	int i;
>  	for (i = 0; i < vi->max_queue_pairs; i++)
>  		if (vi->rq[i].alloc_frag.page) {
> -			if (vi->rq[i].do_dma && vi->rq[i].last_dma)
> +			if (vi->rq[i].last_dma)
>  				virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
>  			put_page(vi->rq[i].alloc_frag.page);
>  		}
> -- 
> 2.32.0.3.g01195cf9f
> 
>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a2452d35bb93..070a6ed0d812 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -344,9 +344,6 @@  struct receive_queue {
 
 	/* Record the last dma info to free after new pages is allocated. */
 	struct virtnet_rq_dma *last_dma;
-
-	/* Do dma by self */
-	bool do_dma;
 };
 
 /* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -846,7 +843,7 @@  static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
 	void *buf;
 
 	buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
-	if (buf && rq->do_dma)
+	if (buf)
 		virtnet_rq_unmap(rq, buf, *len);
 
 	return buf;
@@ -859,11 +856,6 @@  static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
 	u32 offset;
 	void *head;
 
-	if (!rq->do_dma) {
-		sg_init_one(rq->sg, buf, len);
-		return;
-	}
-
 	head = page_address(rq->alloc_frag.page);
 
 	offset = buf - head;
@@ -889,44 +881,42 @@  static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
 
 	head = page_address(alloc_frag->page);
 
-	if (rq->do_dma) {
-		dma = head;
-
-		/* new pages */
-		if (!alloc_frag->offset) {
-			if (rq->last_dma) {
-				/* Now, the new page is allocated, the last dma
-				 * will not be used. So the dma can be unmapped
-				 * if the ref is 0.
-				 */
-				virtnet_rq_unmap(rq, rq->last_dma, 0);
-				rq->last_dma = NULL;
-			}
+	dma = head;
 
-			dma->len = alloc_frag->size - sizeof(*dma);
+	/* new pages */
+	if (!alloc_frag->offset) {
+		if (rq->last_dma) {
+			/* Now, the new page is allocated, the last dma
+			 * will not be used. So the dma can be unmapped
+			 * if the ref is 0.
+			 */
+			virtnet_rq_unmap(rq, rq->last_dma, 0);
+			rq->last_dma = NULL;
+		}
 
-			addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
-							      dma->len, DMA_FROM_DEVICE, 0);
-			if (virtqueue_dma_mapping_error(rq->vq, addr))
-				return NULL;
+		dma->len = alloc_frag->size - sizeof(*dma);
 
-			dma->addr = addr;
-			dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
+		addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
+						      dma->len, DMA_FROM_DEVICE, 0);
+		if (virtqueue_dma_mapping_error(rq->vq, addr))
+			return NULL;
 
-			/* Add a reference to dma to prevent the entire dma from
-			 * being released during error handling. This reference
-			 * will be freed after the pages are no longer used.
-			 */
-			get_page(alloc_frag->page);
-			dma->ref = 1;
-			alloc_frag->offset = sizeof(*dma);
+		dma->addr = addr;
+		dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
 
-			rq->last_dma = dma;
-		}
+		/* Add a reference to dma to prevent the entire dma from
+		 * being released during error handling. This reference
+		 * will be freed after the pages are no longer used.
+		 */
+		get_page(alloc_frag->page);
+		dma->ref = 1;
+		alloc_frag->offset = sizeof(*dma);
 
-		++dma->ref;
+		rq->last_dma = dma;
 	}
 
+	++dma->ref;
+
 	buf = head + alloc_frag->offset;
 
 	get_page(alloc_frag->page);
@@ -943,12 +933,9 @@  static void virtnet_rq_set_premapped(struct virtnet_info *vi)
 	if (!vi->mergeable_rx_bufs && vi->big_packets)
 		return;
 
-	for (i = 0; i < vi->max_queue_pairs; i++) {
-		if (virtqueue_set_dma_premapped(vi->rq[i].vq))
-			continue;
-
-		vi->rq[i].do_dma = true;
-	}
+	for (i = 0; i < vi->max_queue_pairs; i++)
+		/* error never happen */
+		BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
 }
 
 static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
@@ -2020,8 +2007,7 @@  static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
 
 	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
 	if (err < 0) {
-		if (rq->do_dma)
-			virtnet_rq_unmap(rq, buf, 0);
+		virtnet_rq_unmap(rq, buf, 0);
 		put_page(virt_to_head_page(buf));
 	}
 
@@ -2135,8 +2121,7 @@  static int add_recvbuf_mergeable(struct virtnet_info *vi,
 	ctx = mergeable_len_to_ctx(len + room, headroom);
 	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
 	if (err < 0) {
-		if (rq->do_dma)
-			virtnet_rq_unmap(rq, buf, 0);
+		virtnet_rq_unmap(rq, buf, 0);
 		put_page(virt_to_head_page(buf));
 	}
 
@@ -5206,7 +5191,7 @@  static void free_receive_page_frags(struct virtnet_info *vi)
 	int i;
 	for (i = 0; i < vi->max_queue_pairs; i++)
 		if (vi->rq[i].alloc_frag.page) {
-			if (vi->rq[i].do_dma && vi->rq[i].last_dma)
+			if (vi->rq[i].last_dma)
 				virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
 			put_page(vi->rq[i].alloc_frag.page);
 		}