diff mbox

[2/4] block: Introduce blk_wait_if_quiesced() and blk_finish_wait_if_quiesced()

Message ID 20180108185011.30018-3-bart.vanassche@wdc.com (mailing list archive)
State Not Applicable
Headers show

Commit Message

Bart Van Assche Jan. 8, 2018, 6:50 p.m. UTC
Introduce functions that allow block drivers to wait while a request
queue is in the quiesced state (blk-mq) or in the stopped state (legacy
block layer). The next patch will add calls to these functions in the
SCSI core.

Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Cc: Martin K. Petersen <martin.petersen@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Ming Lei <ming.lei@redhat.com>
---
 block/blk-core.c       |  1 +
 block/blk-mq.c         | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/blk-mq.h |  2 ++
 3 files changed, 64 insertions(+)

Comments

Hannes Reinecke Jan. 9, 2018, 6:41 a.m. UTC | #1
On 01/08/2018 07:50 PM, Bart Van Assche wrote:
> Introduce functions that allow block drivers to wait while a request
> queue is in the quiesced state (blk-mq) or in the stopped state (legacy
> block layer). The next patch will add calls to these functions in the
> SCSI core.
> 
> Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
> Cc: Martin K. Petersen <martin.petersen@oracle.com>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Hannes Reinecke <hare@suse.de>
> Cc: Johannes Thumshirn <jthumshirn@suse.de>
> Cc: Ming Lei <ming.lei@redhat.com>
> ---
>  block/blk-core.c       |  1 +
>  block/blk-mq.c         | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++
>  include/linux/blk-mq.h |  2 ++
>  3 files changed, 64 insertions(+)
> 
> diff --git a/block/blk-core.c b/block/blk-core.c
> index 605599a2ab3b..d70ff53e6505 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -285,6 +285,7 @@ void blk_start_queue(struct request_queue *q)
>  	WARN_ON_ONCE(q->mq_ops);
>  
>  	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
> +	wake_up_all(&q->mq_wq);
>  	__blk_run_queue(q);
>  }
>  EXPORT_SYMBOL(blk_start_queue);
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 8118890fb66f..c79b102680fe 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -248,11 +248,72 @@ void blk_mq_unquiesce_queue(struct request_queue *q)
>  	queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
>  	spin_unlock_irqrestore(q->queue_lock, flags);
>  
> +	wake_up_all(&q->mq_wq);
> +
>  	/* dispatch requests which are inserted during quiescing */
>  	blk_mq_run_hw_queues(q, true);
>  }
>  EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
>  
> +/**
> + * blk_wait_if_quiesced() - wait if a queue is quiesced (blk-mq) or stopped (legacy block layer)
> + * @q: Request queue pointer.
> + *
> + * Some block drivers, e.g. the SCSI core, can bypass the block layer core
> + * request submission mechanism. Surround such code with blk_wait_if_quiesced()
> + * / blk_finish_wait_if_quiesced() to avoid that request submission can happen
> + * while a queue is quiesced or stopped.
> + *
> + * Returns with the RCU read lock held (blk-mq) or with q->queue_lock held
> + * (legacy block layer).
> + *
> + * Note: this function does not support block drivers whose .queue_rq()
> + * implementation can sleep (BLK_MQ_F_BLOCKING).
> + */
> +int blk_wait_if_quiesced(struct request_queue *q)
> +{
> +	struct blk_mq_hw_ctx *hctx;
> +	unsigned int i;
> +
> +	might_sleep();
> +
> +	if (q->mq_ops) {
> +		queue_for_each_hw_ctx(q, hctx, i)
> +			WARN_ON(hctx->flags & BLK_MQ_F_BLOCKING);
> +
> +		rcu_read_lock();
> +		while (!blk_queue_dying(q) && blk_queue_quiesced(q)) {
> +			rcu_read_unlock();
> +			wait_event(q->mq_wq, blk_queue_dying(q) ||
> +				   !blk_queue_quiesced(q));
> +			rcu_read_lock();
> +		}
> +	} else {
> +		spin_lock_irq(q->queue_lock);
> +		wait_event_lock_irq(q->mq_wq,
> +				    blk_queue_dying(q) || !blk_queue_stopped(q),
> +				    *q->queue_lock);
> +		q->request_fn_active++;
> +	}
> +	return blk_queue_dying(q) ? -ENODEV : 0;
> +}
> +EXPORT_SYMBOL(blk_wait_if_quiesced);
> +
> +/**
> + * blk_finish_wait_if_quiesced() - counterpart of blk_wait_if_quiesced()
> + * @q: Request queue pointer.
> + */
> +void blk_finish_wait_if_quiesced(struct request_queue *q)
> +{
> +	if (q->mq_ops) {
> +		rcu_read_unlock();
> +	} else {
> +		q->request_fn_active--;
> +		spin_unlock_irq(q->queue_lock);
> +	}
> +}
> +EXPORT_SYMBOL(blk_finish_wait_if_quiesced);
> +
>  void blk_mq_wake_waiters(struct request_queue *q)
>  {
>  	struct blk_mq_hw_ctx *hctx;
> diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
> index 95c9a5c862e2..f6b787bd244e 100644
> --- a/include/linux/blk-mq.h
> +++ b/include/linux/blk-mq.h
> @@ -266,6 +266,8 @@ void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
>  void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
>  void blk_mq_quiesce_queue(struct request_queue *q);
>  void blk_mq_unquiesce_queue(struct request_queue *q);
> +int blk_wait_if_quiesced(struct request_queue *q);
> +void blk_finish_wait_if_quiesced(struct request_queue *q);
>  void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
>  bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
>  void blk_mq_run_hw_queues(struct request_queue *q, bool async);
> 
I'm always a bit cautious when having rcu_read_lock() and
rcu_read_unlock() in two separate functions.
Can we make this dependency more explicit by renaming the first function
to blk_start_wait_if_quiesced() and updating the comment to the effect
that both functions must be used in tandem?

Cheers,

Hannes
Bart Van Assche Jan. 10, 2018, 12:23 a.m. UTC | #2
On Tue, 2018-01-09 at 07:41 +0100, Hannes Reinecke wrote:
> I'm always a bit cautious when having rcu_read_lock() and

> rcu_read_unlock() in two separate functions.

> Can we make this dependency more explicit by renaming the first function

> to blk_start_wait_if_quiesced() and updating the comment to the effect

> that both functions must be used in tandem?


Hello Hannes,

That sounds like a good idea to me. I will make these changes and repost this
patch series.

Bart.
diff mbox

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index 605599a2ab3b..d70ff53e6505 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -285,6 +285,7 @@  void blk_start_queue(struct request_queue *q)
 	WARN_ON_ONCE(q->mq_ops);
 
 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+	wake_up_all(&q->mq_wq);
 	__blk_run_queue(q);
 }
 EXPORT_SYMBOL(blk_start_queue);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 8118890fb66f..c79b102680fe 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -248,11 +248,72 @@  void blk_mq_unquiesce_queue(struct request_queue *q)
 	queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 
+	wake_up_all(&q->mq_wq);
+
 	/* dispatch requests which are inserted during quiescing */
 	blk_mq_run_hw_queues(q, true);
 }
 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
 
+/**
+ * blk_wait_if_quiesced() - wait if a queue is quiesced (blk-mq) or stopped (legacy block layer)
+ * @q: Request queue pointer.
+ *
+ * Some block drivers, e.g. the SCSI core, can bypass the block layer core
+ * request submission mechanism. Surround such code with blk_wait_if_quiesced()
+ * / blk_finish_wait_if_quiesced() to avoid that request submission can happen
+ * while a queue is quiesced or stopped.
+ *
+ * Returns with the RCU read lock held (blk-mq) or with q->queue_lock held
+ * (legacy block layer).
+ *
+ * Note: this function does not support block drivers whose .queue_rq()
+ * implementation can sleep (BLK_MQ_F_BLOCKING).
+ */
+int blk_wait_if_quiesced(struct request_queue *q)
+{
+	struct blk_mq_hw_ctx *hctx;
+	unsigned int i;
+
+	might_sleep();
+
+	if (q->mq_ops) {
+		queue_for_each_hw_ctx(q, hctx, i)
+			WARN_ON(hctx->flags & BLK_MQ_F_BLOCKING);
+
+		rcu_read_lock();
+		while (!blk_queue_dying(q) && blk_queue_quiesced(q)) {
+			rcu_read_unlock();
+			wait_event(q->mq_wq, blk_queue_dying(q) ||
+				   !blk_queue_quiesced(q));
+			rcu_read_lock();
+		}
+	} else {
+		spin_lock_irq(q->queue_lock);
+		wait_event_lock_irq(q->mq_wq,
+				    blk_queue_dying(q) || !blk_queue_stopped(q),
+				    *q->queue_lock);
+		q->request_fn_active++;
+	}
+	return blk_queue_dying(q) ? -ENODEV : 0;
+}
+EXPORT_SYMBOL(blk_wait_if_quiesced);
+
+/**
+ * blk_finish_wait_if_quiesced() - counterpart of blk_wait_if_quiesced()
+ * @q: Request queue pointer.
+ */
+void blk_finish_wait_if_quiesced(struct request_queue *q)
+{
+	if (q->mq_ops) {
+		rcu_read_unlock();
+	} else {
+		q->request_fn_active--;
+		spin_unlock_irq(q->queue_lock);
+	}
+}
+EXPORT_SYMBOL(blk_finish_wait_if_quiesced);
+
 void blk_mq_wake_waiters(struct request_queue *q)
 {
 	struct blk_mq_hw_ctx *hctx;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 95c9a5c862e2..f6b787bd244e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -266,6 +266,8 @@  void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_quiesce_queue(struct request_queue *q);
 void blk_mq_unquiesce_queue(struct request_queue *q);
+int blk_wait_if_quiesced(struct request_queue *q);
+void blk_finish_wait_if_quiesced(struct request_queue *q);
 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);