diff mbox

virtio-net: fill only rx queues which are being used

Message ID 1366677336-2278-1-git-send-email-sasha.levin@oracle.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sasha Levin April 23, 2013, 12:35 a.m. UTC
Due to MQ support we may allocate a whole bunch of rx queues but
never use them. With this patch we'll safe the space used by
the receive buffers until they are actually in use:

sh-4.2# free -h
             total       used       free     shared    buffers     cached
Mem:          490M        35M       455M         0B         0B       4.1M
-/+ buffers/cache:        31M       459M
Swap:           0B         0B         0B
sh-4.2# ethtool -L eth0 combined 8
sh-4.2# free -h
             total       used       free     shared    buffers     cached
Mem:          490M       162M       327M         0B         0B       4.1M
-/+ buffers/cache:       158M       331M
Swap:           0B         0B         0B

Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
---
 drivers/net/virtio_net.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

Comments

Rusty Russell April 23, 2013, 4:13 a.m. UTC | #1
Sasha Levin <sasha.levin@oracle.com> writes:
> Due to MQ support we may allocate a whole bunch of rx queues but
> never use them. With this patch we'll safe the space used by
> the receive buffers until they are actually in use:

Idea is good, implementation needs a tiny tweak:

> @@ -912,8 +913,13 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
>  		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
>  			 queue_pairs);
>  		return -EINVAL;
> -	} else
> +	} else {
> +		if (queue_pairs > vi->curr_queue_pairs)
> +			for (i = 0; i < queue_pairs; i++)
> +				if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
> +					schedule_delayed_work(&vi->refill, 0);
>  		vi->curr_queue_pairs = queue_pairs;
> +	}
>  
>  	return 0;
>  }

You don't want to refill existing queues, so you don't need the "if".

        for (i = vi->curr_queue_pairs; i < queue_pairs; i++) {
		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
			schedule_delayed_work(&vi->refill, 0);

We don't free up buffers when we're reducing queues, but I consider that
a corner case.

Thanks,
Rusty.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Sasha Levin April 23, 2013, 4:49 a.m. UTC | #2
On 04/23/2013 12:13 AM, Rusty Russell wrote:
> Sasha Levin <sasha.levin@oracle.com> writes:
>> Due to MQ support we may allocate a whole bunch of rx queues but
>> never use them. With this patch we'll safe the space used by
>> the receive buffers until they are actually in use:
> 
> Idea is good, implementation needs a tiny tweak:
> 
>> @@ -912,8 +913,13 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
>>  		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
>>  			 queue_pairs);
>>  		return -EINVAL;
>> -	} else
>> +	} else {
>> +		if (queue_pairs > vi->curr_queue_pairs)
>> +			for (i = 0; i < queue_pairs; i++)
>> +				if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
>> +					schedule_delayed_work(&vi->refill, 0);
>>  		vi->curr_queue_pairs = queue_pairs;
>> +	}
>>  
>>  	return 0;
>>  }
> 
> You don't want to refill existing queues, so you don't need the "if".
> 
>         for (i = vi->curr_queue_pairs; i < queue_pairs; i++) {
> 		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
> 			schedule_delayed_work(&vi->refill, 0);

That makes more sense, I'll resend.

> We don't free up buffers when we're reducing queues, but I consider that
> a corner case.

It didn't bother anyone up until now, and the spec doesn't state anything
about it - so I preferred to just leave that alone. Unless the ARM folks
would find it useful?


Thanks,
Sasha
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Michael S. Tsirkin April 23, 2013, 7:08 a.m. UTC | #3
On Mon, Apr 22, 2013 at 08:35:36PM -0400, Sasha Levin wrote:
> Due to MQ support we may allocate a whole bunch of rx queues but
> never use them. With this patch we'll safe the space used by
> the receive buffers until they are actually in use:
> 
> sh-4.2# free -h
>              total       used       free     shared    buffers     cached
> Mem:          490M        35M       455M         0B         0B       4.1M
> -/+ buffers/cache:        31M       459M
> Swap:           0B         0B         0B
> sh-4.2# ethtool -L eth0 combined 8
> sh-4.2# free -h
>              total       used       free     shared    buffers     cached
> Mem:          490M       162M       327M         0B         0B       4.1M
> -/+ buffers/cache:       158M       331M
> Swap:           0B         0B         0B
> 
> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>

Overall the idea looks fine to me.

I also ask myself whether we should enable multiqueue capability
with big buffers. 130M extra memory seems excessive.
Want to try on the kvmtools version that has mergeable buffers?
Memory use should be much lower.

> ---
>  drivers/net/virtio_net.c | 16 +++++++++++-----
>  1 file changed, 11 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 6bfc511..4d82d17 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -581,7 +581,7 @@ static void refill_work(struct work_struct *work)
>  	bool still_empty;
>  	int i;
>  
> -	for (i = 0; i < vi->max_queue_pairs; i++) {
> +	for (i = 0; i < vi->curr_queue_pairs; i++) {
>  		struct receive_queue *rq = &vi->rq[i];
>  
>  		napi_disable(&rq->napi);
> @@ -636,7 +636,7 @@ static int virtnet_open(struct net_device *dev)
>  	struct virtnet_info *vi = netdev_priv(dev);
>  	int i;
>  
> -	for (i = 0; i < vi->max_queue_pairs; i++) {
> +	for (i = 0; i < vi->curr_queue_pairs; i++) {
>  		/* Make sure we have some buffers: if oom use wq. */
>  		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
>  			schedule_delayed_work(&vi->refill, 0);
> @@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
>  	struct scatterlist sg;
>  	struct virtio_net_ctrl_mq s;
>  	struct net_device *dev = vi->dev;
> +	int i;
>  
>  	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
>  		return 0;
> @@ -912,8 +913,13 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
>  		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
>  			 queue_pairs);
>  		return -EINVAL;
> -	} else
> +	} else {
> +		if (queue_pairs > vi->curr_queue_pairs)
> +			for (i = 0; i < queue_pairs; i++)
> +				if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
> +					schedule_delayed_work(&vi->refill, 0);
>  		vi->curr_queue_pairs = queue_pairs;
> +	}
>  
>  	return 0;
>  }
> @@ -1568,7 +1574,7 @@ static int virtnet_probe(struct virtio_device *vdev)
>  	}
>  
>  	/* Last of all, set up some receive buffers. */
> -	for (i = 0; i < vi->max_queue_pairs; i++) {
> +	for (i = 0; i < vi->curr_queue_pairs; i++) {
>  		try_fill_recv(&vi->rq[i], GFP_KERNEL);
>  
>  		/* If we didn't even get one input buffer, we're useless. */
> @@ -1692,7 +1698,7 @@ static int virtnet_restore(struct virtio_device *vdev)
>  
>  	netif_device_attach(vi->dev);
>  
> -	for (i = 0; i < vi->max_queue_pairs; i++)
> +	for (i = 0; i < vi->curr_queue_pairs; i++)
>  		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
>  			schedule_delayed_work(&vi->refill, 0);
>  
> -- 
> 1.8.2.1
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Will Deacon April 23, 2013, 9:18 a.m. UTC | #4
Hi guys,

On Tue, Apr 23, 2013 at 05:49:23AM +0100, Sasha Levin wrote:
> On 04/23/2013 12:13 AM, Rusty Russell wrote:
> > We don't free up buffers when we're reducing queues, but I consider that
> > a corner case.
> 
> It didn't bother anyone up until now, and the spec doesn't state anything
> about it - so I preferred to just leave that alone. Unless the ARM folks
> would find it useful?

I don't think we're too worried about that. If we're reducing queues, then
assumedly we survived up until that point with the larger memory usage, so
leaving the buffers around should be ok.

Will
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Sasha Levin April 23, 2013, 2:52 p.m. UTC | #5
On 04/23/2013 03:08 AM, Michael S. Tsirkin wrote:
> On Mon, Apr 22, 2013 at 08:35:36PM -0400, Sasha Levin wrote:
>> Due to MQ support we may allocate a whole bunch of rx queues but
>> never use them. With this patch we'll safe the space used by
>> the receive buffers until they are actually in use:
>>
>> sh-4.2# free -h
>>              total       used       free     shared    buffers     cached
>> Mem:          490M        35M       455M         0B         0B       4.1M
>> -/+ buffers/cache:        31M       459M
>> Swap:           0B         0B         0B
>> sh-4.2# ethtool -L eth0 combined 8
>> sh-4.2# free -h
>>              total       used       free     shared    buffers     cached
>> Mem:          490M       162M       327M         0B         0B       4.1M
>> -/+ buffers/cache:       158M       331M
>> Swap:           0B         0B         0B
>>
>> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
> 
> Overall the idea looks fine to me.
> 
> I also ask myself whether we should enable multiqueue capability
> with big buffers. 130M extra memory seems excessive.
> Want to try on the kvmtools version that has mergeable buffers?
> Memory use should be much lower.

It is indeed, with mergable buffers:

sh-4.2# free -h
             total       used       free     shared    buffers     cached
Mem:          490M        18M       471M         0B         0B       4.1M
-/+ buffers/cache:        14M       476M
Swap:           0B         0B         0B
sh-4.2# ethtool -L eth0 combined 8
sh-4.2# free -h
             total       used       free     shared    buffers     cached
Mem:          490M        26M       464M         0B         0B       4.1M
-/+ buffers/cache:        22M       468M
Swap:           0B         0B         0B

(18MB? Nice! :) )


Thanks,
Sasha
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6bfc511..4d82d17 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -581,7 +581,7 @@  static void refill_work(struct work_struct *work)
 	bool still_empty;
 	int i;
 
-	for (i = 0; i < vi->max_queue_pairs; i++) {
+	for (i = 0; i < vi->curr_queue_pairs; i++) {
 		struct receive_queue *rq = &vi->rq[i];
 
 		napi_disable(&rq->napi);
@@ -636,7 +636,7 @@  static int virtnet_open(struct net_device *dev)
 	struct virtnet_info *vi = netdev_priv(dev);
 	int i;
 
-	for (i = 0; i < vi->max_queue_pairs; i++) {
+	for (i = 0; i < vi->curr_queue_pairs; i++) {
 		/* Make sure we have some buffers: if oom use wq. */
 		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
 			schedule_delayed_work(&vi->refill, 0);
@@ -900,6 +900,7 @@  static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 	struct scatterlist sg;
 	struct virtio_net_ctrl_mq s;
 	struct net_device *dev = vi->dev;
+	int i;
 
 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
 		return 0;
@@ -912,8 +913,13 @@  static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
 			 queue_pairs);
 		return -EINVAL;
-	} else
+	} else {
+		if (queue_pairs > vi->curr_queue_pairs)
+			for (i = 0; i < queue_pairs; i++)
+				if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+					schedule_delayed_work(&vi->refill, 0);
 		vi->curr_queue_pairs = queue_pairs;
+	}
 
 	return 0;
 }
@@ -1568,7 +1574,7 @@  static int virtnet_probe(struct virtio_device *vdev)
 	}
 
 	/* Last of all, set up some receive buffers. */
-	for (i = 0; i < vi->max_queue_pairs; i++) {
+	for (i = 0; i < vi->curr_queue_pairs; i++) {
 		try_fill_recv(&vi->rq[i], GFP_KERNEL);
 
 		/* If we didn't even get one input buffer, we're useless. */
@@ -1692,7 +1698,7 @@  static int virtnet_restore(struct virtio_device *vdev)
 
 	netif_device_attach(vi->dev);
 
-	for (i = 0; i < vi->max_queue_pairs; i++)
+	for (i = 0; i < vi->curr_queue_pairs; i++)
 		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
 			schedule_delayed_work(&vi->refill, 0);