diff mbox series

[1/1] virtio-blk: add num_io_queues module parameter

Message ID 20210830120023.22202-1-mgurtovoy@nvidia.com (mailing list archive)
State New, archived
Headers show
Series [1/1] virtio-blk: add num_io_queues module parameter | expand

Commit Message

Max Gurtovoy Aug. 30, 2021, noon UTC
Sometimes a user would like to control the amount of IO queues to be
created for a block device. For example, for limiting the memory
footprint of virtio-blk devices.

Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
---
 drivers/block/virtio_blk.c | 26 +++++++++++++++++++++++++-
 1 file changed, 25 insertions(+), 1 deletion(-)

Comments

Christoph Hellwig Aug. 30, 2021, 4:48 p.m. UTC | #1
On Mon, Aug 30, 2021 at 03:00:23PM +0300, Max Gurtovoy wrote:
> +static int virtblk_queue_count_set(const char *val,
> +		const struct kernel_param *kp)
> +{
> +	unsigned int n;
> +	int ret;
> +
> +	ret = kstrtouint(val, 10, &n);
> +	if (ret != 0 || n > nr_cpu_ids)
> +		return -EINVAL;
> +	return param_set_uint(val, kp);
> +}


Thi can use param_set_uint_minmax.
Michael S. Tsirkin Aug. 30, 2021, 9:48 p.m. UTC | #2
On Mon, Aug 30, 2021 at 03:00:23PM +0300, Max Gurtovoy wrote:
> Sometimes a user would like to control the amount of IO queues to be
> created for a block device. For example, for limiting the memory
> footprint of virtio-blk devices.
> 
> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>


Hmm. It's already limited by # of CPUs... Why not just limit
from the hypervisor side? What's the actual use-case here?

> ---
>  drivers/block/virtio_blk.c | 26 +++++++++++++++++++++++++-
>  1 file changed, 25 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
> index e574fbf5e6df..77e8468e8593 100644
> --- a/drivers/block/virtio_blk.c
> +++ b/drivers/block/virtio_blk.c
> @@ -24,6 +24,28 @@
>  /* The maximum number of sg elements that fit into a virtqueue */
>  #define VIRTIO_BLK_MAX_SG_ELEMS 32768
>  
> +static int virtblk_queue_count_set(const char *val,
> +		const struct kernel_param *kp)
> +{
> +	unsigned int n;
> +	int ret;
> +
> +	ret = kstrtouint(val, 10, &n);
> +	if (ret != 0 || n > nr_cpu_ids)
> +		return -EINVAL;
> +	return param_set_uint(val, kp);
> +}
> +
> +static const struct kernel_param_ops queue_count_ops = {
> +	.set = virtblk_queue_count_set,
> +	.get = param_get_uint,
> +};
> +
> +static unsigned int num_io_queues;
> +module_param_cb(num_io_queues, &queue_count_ops, &num_io_queues, 0644);
> +MODULE_PARM_DESC(num_io_queues,
> +		 "Number of IO virt queues to use for blk device.");
> +
>  static int major;
>  static DEFINE_IDA(vd_index_ida);
>  
> @@ -501,7 +523,9 @@ static int init_vq(struct virtio_blk *vblk)
>  	if (err)
>  		num_vqs = 1;
>  
> -	num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
> +	num_vqs = min_t(unsigned int,
> +			min_not_zero(num_io_queues, nr_cpu_ids),
> +			num_vqs);
>  
>  	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
>  	if (!vblk->vqs)
> -- 
> 2.18.1
Max Gurtovoy Aug. 30, 2021, 11:12 p.m. UTC | #3
On 8/31/2021 12:48 AM, Michael S. Tsirkin wrote:
> On Mon, Aug 30, 2021 at 03:00:23PM +0300, Max Gurtovoy wrote:
>> Sometimes a user would like to control the amount of IO queues to be
>> created for a block device. For example, for limiting the memory
>> footprint of virtio-blk devices.
>>
>> Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
>
> Hmm. It's already limited by # of CPUs... Why not just limit
> from the hypervisor side? What's the actual use-case here?

Limiting and minimizing resource allocation is a real use case.

# of CPUs today might be 64 or 128. HW virtio-blk device might have this 
amount of queues (or at least 32).

But it's a waste to use all the queues since the device may reach to max 
IOPs with less queues. Multiply this by 16 or 32 devices we get a lot of 
memory wasted without a real need.

It's a common configuration we do in NVMf connect command and it can 
also be seen in other drivers in some variation (null_blk.submit_queues, 
ib_srp.ch_count and more).

Another use case is to add some flexibility for QOS.

Also if we can set the queue depth, it's a good idea to control the 
queue count as well.

If no objections, I'll take the comment from Christoph and send v2.

>
>> ---
>>   drivers/block/virtio_blk.c | 26 +++++++++++++++++++++++++-
>>   1 file changed, 25 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
>> index e574fbf5e6df..77e8468e8593 100644
>> --- a/drivers/block/virtio_blk.c
>> +++ b/drivers/block/virtio_blk.c
>> @@ -24,6 +24,28 @@
>>   /* The maximum number of sg elements that fit into a virtqueue */
>>   #define VIRTIO_BLK_MAX_SG_ELEMS 32768
>>   
>> +static int virtblk_queue_count_set(const char *val,
>> +		const struct kernel_param *kp)
>> +{
>> +	unsigned int n;
>> +	int ret;
>> +
>> +	ret = kstrtouint(val, 10, &n);
>> +	if (ret != 0 || n > nr_cpu_ids)
>> +		return -EINVAL;
>> +	return param_set_uint(val, kp);
>> +}
>> +
>> +static const struct kernel_param_ops queue_count_ops = {
>> +	.set = virtblk_queue_count_set,
>> +	.get = param_get_uint,
>> +};
>> +
>> +static unsigned int num_io_queues;
>> +module_param_cb(num_io_queues, &queue_count_ops, &num_io_queues, 0644);
>> +MODULE_PARM_DESC(num_io_queues,
>> +		 "Number of IO virt queues to use for blk device.");
>> +
>>   static int major;
>>   static DEFINE_IDA(vd_index_ida);
>>   
>> @@ -501,7 +523,9 @@ static int init_vq(struct virtio_blk *vblk)
>>   	if (err)
>>   		num_vqs = 1;
>>   
>> -	num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
>> +	num_vqs = min_t(unsigned int,
>> +			min_not_zero(num_io_queues, nr_cpu_ids),
>> +			num_vqs);
>>   
>>   	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
>>   	if (!vblk->vqs)
>> -- 
>> 2.18.1
diff mbox series

Patch

diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e574fbf5e6df..77e8468e8593 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -24,6 +24,28 @@ 
 /* The maximum number of sg elements that fit into a virtqueue */
 #define VIRTIO_BLK_MAX_SG_ELEMS 32768
 
+static int virtblk_queue_count_set(const char *val,
+		const struct kernel_param *kp)
+{
+	unsigned int n;
+	int ret;
+
+	ret = kstrtouint(val, 10, &n);
+	if (ret != 0 || n > nr_cpu_ids)
+		return -EINVAL;
+	return param_set_uint(val, kp);
+}
+
+static const struct kernel_param_ops queue_count_ops = {
+	.set = virtblk_queue_count_set,
+	.get = param_get_uint,
+};
+
+static unsigned int num_io_queues;
+module_param_cb(num_io_queues, &queue_count_ops, &num_io_queues, 0644);
+MODULE_PARM_DESC(num_io_queues,
+		 "Number of IO virt queues to use for blk device.");
+
 static int major;
 static DEFINE_IDA(vd_index_ida);
 
@@ -501,7 +523,9 @@  static int init_vq(struct virtio_blk *vblk)
 	if (err)
 		num_vqs = 1;
 
-	num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+	num_vqs = min_t(unsigned int,
+			min_not_zero(num_io_queues, nr_cpu_ids),
+			num_vqs);
 
 	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
 	if (!vblk->vqs)