diff mbox series

[08/15] block: pass a queue_limits argument to blk_alloc_queue

Message ID 20240212064609.1327143-9-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [01/15] block: move max_{open,active}_zones to struct queue_limits | expand

Commit Message

Christoph Hellwig Feb. 12, 2024, 6:46 a.m. UTC
Pass a queue_limits to blk_alloc_queue and apply it after validating and
capping the values using blk_validate_limits.  This will allow allocating
queues with valid queue limits instead of setting the values one at a
time later.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-core.c | 26 ++++++++++++++++++--------
 block/blk-mq.c   |  7 ++++---
 block/genhd.c    |  5 +++--
 3 files changed, 25 insertions(+), 13 deletions(-)

Comments

Damien Le Moal Feb. 12, 2024, 7:26 a.m. UTC | #1
On 2/12/24 15:46, Christoph Hellwig wrote:
> Pass a queue_limits to blk_alloc_queue and apply it after validating and
> capping the values using blk_validate_limits.  This will allow allocating
> queues with valid queue limits instead of setting the values one at a
> time later.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks good to me.

Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Hannes Reinecke Feb. 12, 2024, 7:32 a.m. UTC | #2
On 2/12/24 14:46, Christoph Hellwig wrote:
> Pass a queue_limits to blk_alloc_queue and apply it after validating and
> capping the values using blk_validate_limits.  This will allow allocating
> queues with valid queue limits instead of setting the values one at a
> time later.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   block/blk-core.c | 26 ++++++++++++++++++--------
>   block/blk-mq.c   |  7 ++++---
>   block/genhd.c    |  5 +++--
>   3 files changed, 25 insertions(+), 13 deletions(-)
> 
> diff --git a/block/blk-core.c b/block/blk-core.c
> index cb56724a8dfb25..a16b5abdbbf56f 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -394,24 +394,34 @@ static void blk_timeout_work(struct work_struct *work)
>   {
>   }
>   
> -struct request_queue *blk_alloc_queue(int node_id)
> +struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
>   {
>   	struct request_queue *q;
> +	int error;
>   
>   	q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
>   				  node_id);
>   	if (!q)
> -		return NULL;
> +		return ERR_PTR(-ENOMEM);
>   
>   	q->last_merge = NULL;
>   
>   	q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
> -	if (q->id < 0)
> +	if (q->id < 0) {
> +		error = q->id;
>   		goto fail_q;
> +	}
>   
>   	q->stats = blk_alloc_queue_stats();
> -	if (!q->stats)
> +	if (!q->stats) {
> +		error = -ENOMEM;
>   		goto fail_id;
> +	}
> +
> +	error = blk_set_default_limits(lim);
> +	if (error)
> +		goto fail_stats;
> +	q->limits = *lim;
>   
>   	q->node = node_id;
>   
> @@ -436,12 +446,12 @@ struct request_queue *blk_alloc_queue(int node_id)
>   	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
>   	 * See blk_register_queue() for details.
>   	 */
> -	if (percpu_ref_init(&q->q_usage_counter,
> +	error = percpu_ref_init(&q->q_usage_counter,
>   				blk_queue_usage_counter_release,
> -				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
> +				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL);
> +	if (error)
>   		goto fail_stats;
>   
> -	blk_set_default_limits(&q->limits);
>   	q->nr_requests = BLKDEV_DEFAULT_RQ;
>   
>   	return q;
> @@ -452,7 +462,7 @@ struct request_queue *blk_alloc_queue(int node_id)
>   	ida_free(&blk_queue_ida, q->id);
>   fail_q:
>   	kmem_cache_free(blk_requestq_cachep, q);
> -	return NULL;
> +	return ERR_PTR(error);
>   }
>   
>   /**
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 6d2f7b5caa01d8..9dd8055cc5246d 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -4086,12 +4086,13 @@ void blk_mq_release(struct request_queue *q)
>   static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
>   		void *queuedata)
>   {
> +	struct queue_limits lim = { };
>   	struct request_queue *q;
>   	int ret;
>   
> -	q = blk_alloc_queue(set->numa_node);
> -	if (!q)
> -		return ERR_PTR(-ENOMEM);
> +	q = blk_alloc_queue(&lim, set->numa_node);
> +	if (IS_ERR(q))
> +		return q;
>   	q->queuedata = queuedata;
>   	ret = blk_mq_init_allocated_queue(set, q);
>   	if (ret) {
> diff --git a/block/genhd.c b/block/genhd.c
> index d74fb5b4ae6818..7a8fd57c51f73c 100644
> --- a/block/genhd.c
> +++ b/block/genhd.c
> @@ -1393,11 +1393,12 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
>   
>   struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
>   {
> +	struct queue_limits lim = { };
>   	struct request_queue *q;
>   	struct gendisk *disk;
>   
> -	q = blk_alloc_queue(node);
> -	if (!q)
> +	q = blk_alloc_queue(&lim, node);
> +	if (IS_ERR(q))
>   		return NULL;
>   
>   	disk = __alloc_disk_node(q, node, lkclass);
Ah, here it is.

Please move the declaration from patch #4 to this one.

Cheers,

Hannes
diff mbox series

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index cb56724a8dfb25..a16b5abdbbf56f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -394,24 +394,34 @@  static void blk_timeout_work(struct work_struct *work)
 {
 }
 
-struct request_queue *blk_alloc_queue(int node_id)
+struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
 {
 	struct request_queue *q;
+	int error;
 
 	q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
 				  node_id);
 	if (!q)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	q->last_merge = NULL;
 
 	q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
-	if (q->id < 0)
+	if (q->id < 0) {
+		error = q->id;
 		goto fail_q;
+	}
 
 	q->stats = blk_alloc_queue_stats();
-	if (!q->stats)
+	if (!q->stats) {
+		error = -ENOMEM;
 		goto fail_id;
+	}
+
+	error = blk_set_default_limits(lim);
+	if (error)
+		goto fail_stats;
+	q->limits = *lim;
 
 	q->node = node_id;
 
@@ -436,12 +446,12 @@  struct request_queue *blk_alloc_queue(int node_id)
 	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
 	 * See blk_register_queue() for details.
 	 */
-	if (percpu_ref_init(&q->q_usage_counter,
+	error = percpu_ref_init(&q->q_usage_counter,
 				blk_queue_usage_counter_release,
-				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
+				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL);
+	if (error)
 		goto fail_stats;
 
-	blk_set_default_limits(&q->limits);
 	q->nr_requests = BLKDEV_DEFAULT_RQ;
 
 	return q;
@@ -452,7 +462,7 @@  struct request_queue *blk_alloc_queue(int node_id)
 	ida_free(&blk_queue_ida, q->id);
 fail_q:
 	kmem_cache_free(blk_requestq_cachep, q);
-	return NULL;
+	return ERR_PTR(error);
 }
 
 /**
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6d2f7b5caa01d8..9dd8055cc5246d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4086,12 +4086,13 @@  void blk_mq_release(struct request_queue *q)
 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
 		void *queuedata)
 {
+	struct queue_limits lim = { };
 	struct request_queue *q;
 	int ret;
 
-	q = blk_alloc_queue(set->numa_node);
-	if (!q)
-		return ERR_PTR(-ENOMEM);
+	q = blk_alloc_queue(&lim, set->numa_node);
+	if (IS_ERR(q))
+		return q;
 	q->queuedata = queuedata;
 	ret = blk_mq_init_allocated_queue(set, q);
 	if (ret) {
diff --git a/block/genhd.c b/block/genhd.c
index d74fb5b4ae6818..7a8fd57c51f73c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1393,11 +1393,12 @@  struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
 
 struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
 {
+	struct queue_limits lim = { };
 	struct request_queue *q;
 	struct gendisk *disk;
 
-	q = blk_alloc_queue(node);
-	if (!q)
+	q = blk_alloc_queue(&lim, node);
+	if (IS_ERR(q))
 		return NULL;
 
 	disk = __alloc_disk_node(q, node, lkclass);