diff mbox series

[net-next,2/3] virtio-net: batch dim request

Message ID 1705410693-118895-3-git-send-email-hengqi@linux.alibaba.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series virtio-net: a fix and some updates for virtio dim | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 1097 this patch: 1098
netdev/cc_maintainers success CCed 0 of 0 maintainers
netdev/build_clang success Errors and warnings before: 1108 this patch: 1108
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 1114 this patch: 1115
netdev/checkpatch warning WARNING: line length of 90 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Heng Qi Jan. 16, 2024, 1:11 p.m. UTC
Currently, when each time the driver attempts to update the coalescing
parameters for a vq, it needs to kick the device.
The following path is observed:
  1. Driver kicks the device;
  2. After the device receives the kick, CPU scheduling occurs and DMA
     multiple buffers multiple times;
  3. The device completes processing and replies with a response.

When large-queue devices issue multiple requests and kick the device
frequently, this often interrupt the work of the device-side CPU.
In addition, each vq request is processed separately, causing more
delays for the CPU to wait for the DMA request to complete.

These interruptions and overhead will strain the CPU responsible for
controlling the path of the DPU, especially in multi-device and
large-queue scenarios.

To solve the above problems, we internally tried batch request,
which merges requests from multiple queues and sends them at once.
We conservatively tested 8 queue commands and sent them together.
The DPU processing efficiency can be improved by 8 times, which
greatly eases the DPU's support for multi-device and multi-queue DIM.

Suggested-by: Xiaoming Zhao <zxm377917@alibaba-inc.com>
Signed-off-by: Heng Qi <hengqi@linux.alibaba.com>
---
 drivers/net/virtio_net.c        | 54 ++++++++++++++++++++++++++++++++---------
 include/uapi/linux/virtio_net.h |  1 +
 2 files changed, 44 insertions(+), 11 deletions(-)

Comments

Simon Horman Jan. 16, 2024, 7:49 p.m. UTC | #1
On Tue, Jan 16, 2024 at 09:11:32PM +0800, Heng Qi wrote:
> Currently, when each time the driver attempts to update the coalescing
> parameters for a vq, it needs to kick the device.
> The following path is observed:
>   1. Driver kicks the device;
>   2. After the device receives the kick, CPU scheduling occurs and DMA
>      multiple buffers multiple times;
>   3. The device completes processing and replies with a response.
> 
> When large-queue devices issue multiple requests and kick the device
> frequently, this often interrupt the work of the device-side CPU.
> In addition, each vq request is processed separately, causing more
> delays for the CPU to wait for the DMA request to complete.
> 
> These interruptions and overhead will strain the CPU responsible for
> controlling the path of the DPU, especially in multi-device and
> large-queue scenarios.
> 
> To solve the above problems, we internally tried batch request,
> which merges requests from multiple queues and sends them at once.
> We conservatively tested 8 queue commands and sent them together.
> The DPU processing efficiency can be improved by 8 times, which
> greatly eases the DPU's support for multi-device and multi-queue DIM.
> 
> Suggested-by: Xiaoming Zhao <zxm377917@alibaba-inc.com>
> Signed-off-by: Heng Qi <hengqi@linux.alibaba.com>

...

> @@ -3546,16 +3552,32 @@ static void virtnet_rx_dim_work(struct work_struct *work)
>  		update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
>  		if (update_moder.usec != rq->intr_coal.max_usecs ||
>  		    update_moder.pkts != rq->intr_coal.max_packets) {
> -			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
> -							       update_moder.usec,
> -							       update_moder.pkts);
> -			if (err)
> -				pr_debug("%s: Failed to send dim parameters on rxq%d\n",
> -					 dev->name, qnum);
> -			dim->state = DIM_START_MEASURE;
> +			coal->coal_vqs[j].vqn = cpu_to_le16(rxq2vq(i));
> +			coal->coal_vqs[j].coal.max_usecs = cpu_to_le32(update_moder.usec);
> +			coal->coal_vqs[j].coal.max_packets = cpu_to_le32(update_moder.pkts);
> +			rq->intr_coal.max_usecs = update_moder.usec;
> +			rq->intr_coal.max_packets = update_moder.pkts;
> +			j++;
>  		}
>  	}
>  
> +	if (!j)
> +		goto ret;
> +
> +	coal->num_entries = cpu_to_le32(j);
> +	sg_init_one(&sgs, coal, sizeof(struct virtnet_batch_coal) +
> +		    j * sizeof(struct virtio_net_ctrl_coal_vq));
> +	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
> +				  VIRTIO_NET_CTRL_NOTF_COAL_VQS_SET,
> +				  &sgs))
> +		dev_warn(&vi->vdev->dev, "Failed to add dim command\n.");
> +
> +	for (i = 0; i < j; i++) {
> +		rq = &vi->rq[(coal->coal_vqs[i].vqn) / 2];

Hi Heng Qi,

The type of .vqn is __le16, but here it is used as an
integer in host byte order. Perhaps this should be (completely untested!):

		rq = &vi->rq[le16_to_cpu(coal->coal_vqs[i].vqn) / 2];

> +		rq->dim.state = DIM_START_MEASURE;
> +	}
> +
> +ret:
>  	rtnl_unlock();
>  }
>
Heng Qi Jan. 17, 2024, 3:38 a.m. UTC | #2
在 2024/1/17 上午3:49, Simon Horman 写道:
> On Tue, Jan 16, 2024 at 09:11:32PM +0800, Heng Qi wrote:
>> Currently, when each time the driver attempts to update the coalescing
>> parameters for a vq, it needs to kick the device.
>> The following path is observed:
>>    1. Driver kicks the device;
>>    2. After the device receives the kick, CPU scheduling occurs and DMA
>>       multiple buffers multiple times;
>>    3. The device completes processing and replies with a response.
>>
>> When large-queue devices issue multiple requests and kick the device
>> frequently, this often interrupt the work of the device-side CPU.
>> In addition, each vq request is processed separately, causing more
>> delays for the CPU to wait for the DMA request to complete.
>>
>> These interruptions and overhead will strain the CPU responsible for
>> controlling the path of the DPU, especially in multi-device and
>> large-queue scenarios.
>>
>> To solve the above problems, we internally tried batch request,
>> which merges requests from multiple queues and sends them at once.
>> We conservatively tested 8 queue commands and sent them together.
>> The DPU processing efficiency can be improved by 8 times, which
>> greatly eases the DPU's support for multi-device and multi-queue DIM.
>>
>> Suggested-by: Xiaoming Zhao <zxm377917@alibaba-inc.com>
>> Signed-off-by: Heng Qi <hengqi@linux.alibaba.com>
> ...
>
>> @@ -3546,16 +3552,32 @@ static void virtnet_rx_dim_work(struct work_struct *work)
>>   		update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
>>   		if (update_moder.usec != rq->intr_coal.max_usecs ||
>>   		    update_moder.pkts != rq->intr_coal.max_packets) {
>> -			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
>> -							       update_moder.usec,
>> -							       update_moder.pkts);
>> -			if (err)
>> -				pr_debug("%s: Failed to send dim parameters on rxq%d\n",
>> -					 dev->name, qnum);
>> -			dim->state = DIM_START_MEASURE;
>> +			coal->coal_vqs[j].vqn = cpu_to_le16(rxq2vq(i));
>> +			coal->coal_vqs[j].coal.max_usecs = cpu_to_le32(update_moder.usec);
>> +			coal->coal_vqs[j].coal.max_packets = cpu_to_le32(update_moder.pkts);
>> +			rq->intr_coal.max_usecs = update_moder.usec;
>> +			rq->intr_coal.max_packets = update_moder.pkts;
>> +			j++;
>>   		}
>>   	}
>>   
>> +	if (!j)
>> +		goto ret;
>> +
>> +	coal->num_entries = cpu_to_le32(j);
>> +	sg_init_one(&sgs, coal, sizeof(struct virtnet_batch_coal) +
>> +		    j * sizeof(struct virtio_net_ctrl_coal_vq));
>> +	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
>> +				  VIRTIO_NET_CTRL_NOTF_COAL_VQS_SET,
>> +				  &sgs))
>> +		dev_warn(&vi->vdev->dev, "Failed to add dim command\n.");
>> +
>> +	for (i = 0; i < j; i++) {
>> +		rq = &vi->rq[(coal->coal_vqs[i].vqn) / 2];
> Hi Heng Qi,
>
> The type of .vqn is __le16, but here it is used as an
> integer in host byte order. Perhaps this should be (completely untested!):
>
> 		rq = &vi->rq[le16_to_cpu(coal->coal_vqs[i].vqn) / 2];

Hi Simon,

Thanks for the catch, I will check this out.

>
>> +		rq->dim.state = DIM_START_MEASURE;
>> +	}
>> +
>> +ret:
>>   	rtnl_unlock();
>>   }
>>
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f6ac3e7..e4305ad 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -133,6 +133,11 @@  struct virtnet_interrupt_coalesce {
 	u32 max_usecs;
 };
 
+struct virtnet_batch_coal {
+	__le32 num_entries;
+	struct virtio_net_ctrl_coal_vq coal_vqs[];
+};
+
 /* The dma information of pages allocated at a time. */
 struct virtnet_rq_dma {
 	dma_addr_t addr;
@@ -321,6 +326,7 @@  struct virtnet_info {
 	bool rx_dim_enabled;
 
 	/* Interrupt coalescing settings */
+	struct virtnet_batch_coal *batch_coal;
 	struct virtnet_interrupt_coalesce intr_coal_tx;
 	struct virtnet_interrupt_coalesce intr_coal_rx;
 
@@ -3520,9 +3526,10 @@  static void virtnet_rx_dim_work(struct work_struct *work)
 	struct receive_queue *rq = container_of(dim,
 			struct receive_queue, dim);
 	struct virtnet_info *vi = rq->vq->vdev->priv;
-	struct net_device *dev = vi->dev;
 	struct dim_cq_moder update_moder;
-	int i, qnum, err;
+	struct virtnet_batch_coal *coal = vi->batch_coal;
+	struct scatterlist sgs;
+	int i, j = 0;
 
 	if (!rtnl_trylock()) {
 		schedule_work(&dim->work);
@@ -3538,7 +3545,6 @@  static void virtnet_rx_dim_work(struct work_struct *work)
 	for (i = 0; i < vi->curr_queue_pairs; i++) {
 		rq = &vi->rq[i];
 		dim = &rq->dim;
-		qnum = rq - vi->rq;
 
 		if (!rq->dim_enabled)
 			continue;
@@ -3546,16 +3552,32 @@  static void virtnet_rx_dim_work(struct work_struct *work)
 		update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
 		if (update_moder.usec != rq->intr_coal.max_usecs ||
 		    update_moder.pkts != rq->intr_coal.max_packets) {
-			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
-							       update_moder.usec,
-							       update_moder.pkts);
-			if (err)
-				pr_debug("%s: Failed to send dim parameters on rxq%d\n",
-					 dev->name, qnum);
-			dim->state = DIM_START_MEASURE;
+			coal->coal_vqs[j].vqn = cpu_to_le16(rxq2vq(i));
+			coal->coal_vqs[j].coal.max_usecs = cpu_to_le32(update_moder.usec);
+			coal->coal_vqs[j].coal.max_packets = cpu_to_le32(update_moder.pkts);
+			rq->intr_coal.max_usecs = update_moder.usec;
+			rq->intr_coal.max_packets = update_moder.pkts;
+			j++;
 		}
 	}
 
+	if (!j)
+		goto ret;
+
+	coal->num_entries = cpu_to_le32(j);
+	sg_init_one(&sgs, coal, sizeof(struct virtnet_batch_coal) +
+		    j * sizeof(struct virtio_net_ctrl_coal_vq));
+	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
+				  VIRTIO_NET_CTRL_NOTF_COAL_VQS_SET,
+				  &sgs))
+		dev_warn(&vi->vdev->dev, "Failed to add dim command\n.");
+
+	for (i = 0; i < j; i++) {
+		rq = &vi->rq[(coal->coal_vqs[i].vqn) / 2];
+		rq->dim.state = DIM_START_MEASURE;
+	}
+
+ret:
 	rtnl_unlock();
 }
 
@@ -4380,7 +4402,7 @@  static int virtnet_find_vqs(struct virtnet_info *vi)
 
 static int virtnet_alloc_queues(struct virtnet_info *vi)
 {
-	int i;
+	int i, len;
 
 	if (vi->has_cvq) {
 		vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
@@ -4396,6 +4418,12 @@  static int virtnet_alloc_queues(struct virtnet_info *vi)
 	if (!vi->rq)
 		goto err_rq;
 
+	len = sizeof(struct virtnet_batch_coal) +
+	      vi->max_queue_pairs * sizeof(struct virtio_net_ctrl_coal_vq);
+	vi->batch_coal = kzalloc(len, GFP_KERNEL);
+	if (!vi->batch_coal)
+		goto err_coal;
+
 	INIT_DELAYED_WORK(&vi->refill, refill_work);
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		vi->rq[i].pages = NULL;
@@ -4418,6 +4446,8 @@  static int virtnet_alloc_queues(struct virtnet_info *vi)
 
 	return 0;
 
+err_coal:
+	kfree(vi->rq);
 err_rq:
 	kfree(vi->sq);
 err_sq:
@@ -4902,6 +4932,8 @@  static void virtnet_remove(struct virtio_device *vdev)
 
 	net_failover_destroy(vi->failover);
 
+	kfree(vi->batch_coal);
+
 	remove_vq_common(vi);
 
 	free_netdev(vi->dev);
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index cc65ef0..df6d112 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -394,6 +394,7 @@  struct virtio_net_ctrl_coal_rx {
 #define VIRTIO_NET_CTRL_NOTF_COAL_RX_SET		1
 #define VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET		2
 #define VIRTIO_NET_CTRL_NOTF_COAL_VQ_GET		3
+#define VIRTIO_NET_CTRL_NOTF_COAL_VQS_SET		4
 
 struct virtio_net_ctrl_coal {
 	__le32 max_packets;