diff mbox series

[v4,05/10] block: Serialize queue freezing and blk_pre_runtime_suspend()

Message ID 20180804000325.3610-6-bart.vanassche@wdc.com (mailing list archive)
State New, archived
Headers show
Series blk-mq: Enable runtime power management | expand

Commit Message

Bart Van Assche Aug. 4, 2018, 12:03 a.m. UTC
Serialize these operations because a later patch will add code into
blk_pre_runtime_suspend() that should not run concurrently with queue
freezing nor unfreezing.

Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jianchao Wang <jianchao.w.wang@oracle.com>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Alan Stern <stern@rowland.harvard.edu>
---
 block/blk-core.c       |  5 +++++
 block/blk-mq.c         |  3 +++
 block/blk-pm.c         | 44 ++++++++++++++++++++++++++++++++++++++++++
 include/linux/blk-pm.h |  6 ++++++
 include/linux/blkdev.h |  5 +++++
 5 files changed, 63 insertions(+)

Comments

Ming Lei Aug. 4, 2018, 10:23 a.m. UTC | #1
On Sat, Aug 4, 2018 at 8:03 AM, Bart Van Assche <bart.vanassche@wdc.com> wrote:
> Serialize these operations because a later patch will add code into
> blk_pre_runtime_suspend() that should not run concurrently with queue
> freezing nor unfreezing.
>
> Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Jianchao Wang <jianchao.w.wang@oracle.com>
> Cc: Ming Lei <ming.lei@redhat.com>
> Cc: Johannes Thumshirn <jthumshirn@suse.de>
> Cc: Alan Stern <stern@rowland.harvard.edu>
> ---
>  block/blk-core.c       |  5 +++++
>  block/blk-mq.c         |  3 +++
>  block/blk-pm.c         | 44 ++++++++++++++++++++++++++++++++++++++++++
>  include/linux/blk-pm.h |  6 ++++++
>  include/linux/blkdev.h |  5 +++++
>  5 files changed, 63 insertions(+)
>
> diff --git a/block/blk-core.c b/block/blk-core.c
> index 03cff7445dee..59382c758155 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -17,6 +17,7 @@
>  #include <linux/bio.h>
>  #include <linux/blkdev.h>
>  #include <linux/blk-mq.h>
> +#include <linux/blk-pm.h>
>  #include <linux/highmem.h>
>  #include <linux/mm.h>
>  #include <linux/kernel_stat.h>
> @@ -696,6 +697,7 @@ void blk_set_queue_dying(struct request_queue *q)
>          * prevent I/O from crossing blk_queue_enter().
>          */
>         blk_freeze_queue_start(q);
> +       blk_pm_runtime_unlock(q);
>
>         if (q->mq_ops)
>                 blk_mq_wake_waiters(q);
> @@ -756,6 +758,7 @@ void blk_cleanup_queue(struct request_queue *q)
>          * prevent that q->request_fn() gets invoked after draining finished.
>          */
>         blk_freeze_queue(q);
> +       blk_pm_runtime_unlock(q);
>         spin_lock_irq(lock);
>         queue_flag_set(QUEUE_FLAG_DEAD, q);
>         spin_unlock_irq(lock);
> @@ -1045,6 +1048,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
>  #ifdef CONFIG_BLK_DEV_IO_TRACE
>         mutex_init(&q->blk_trace_mutex);
>  #endif
> +       blk_pm_init(q);
> +
>         mutex_init(&q->sysfs_lock);
>         spin_lock_init(&q->__queue_lock);
>
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 8b23ae34d949..b1882a3a5216 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -9,6 +9,7 @@
>  #include <linux/backing-dev.h>
>  #include <linux/bio.h>
>  #include <linux/blkdev.h>
> +#include <linux/blk-pm.h>
>  #include <linux/kmemleak.h>
>  #include <linux/mm.h>
>  #include <linux/init.h>
> @@ -138,6 +139,7 @@ void blk_freeze_queue_start(struct request_queue *q)
>  {
>         int freeze_depth;
>
> +       blk_pm_runtime_lock(q);
>         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
>         if (freeze_depth == 1) {
>                 percpu_ref_kill(&q->q_usage_counter);
> @@ -201,6 +203,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
>                 percpu_ref_reinit(&q->q_usage_counter);
>                 wake_up_all(&q->mq_freeze_wq);
>         }
> +       blk_pm_runtime_unlock(q);
>  }

From user view, it isn't reasonable to prevent runtime suspend from happening
during queue freeze. The period can be a bit long, and it should be one perfect
opportunity to suspend device during the period since no any IO is possible.


Thanks,
Ming Lei
Bart Van Assche Aug. 6, 2018, 2:19 p.m. UTC | #2
On Sat, 2018-08-04 at 18:23 +0800, Ming Lei wrote:
> From user view, it isn't reasonable to prevent runtime suspend from happening
> during queue freeze. The period can be a bit long, and it should be one perfect
> opportunity to suspend device during the period since no any IO is possible.

Hello Ming,

I will look into reducing the scope of the code that is protected by
blk_pm_runtime_lock() / blk_pm_runtime_unlock() to the code in blk_freeze_queue_start()
under "if (freeze_depth == 1)".

Bart.
diff mbox series

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index 03cff7445dee..59382c758155 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -17,6 +17,7 @@ 
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/blk-mq.h>
+#include <linux/blk-pm.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
 #include <linux/kernel_stat.h>
@@ -696,6 +697,7 @@  void blk_set_queue_dying(struct request_queue *q)
 	 * prevent I/O from crossing blk_queue_enter().
 	 */
 	blk_freeze_queue_start(q);
+	blk_pm_runtime_unlock(q);
 
 	if (q->mq_ops)
 		blk_mq_wake_waiters(q);
@@ -756,6 +758,7 @@  void blk_cleanup_queue(struct request_queue *q)
 	 * prevent that q->request_fn() gets invoked after draining finished.
 	 */
 	blk_freeze_queue(q);
+	blk_pm_runtime_unlock(q);
 	spin_lock_irq(lock);
 	queue_flag_set(QUEUE_FLAG_DEAD, q);
 	spin_unlock_irq(lock);
@@ -1045,6 +1048,8 @@  struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
 #ifdef CONFIG_BLK_DEV_IO_TRACE
 	mutex_init(&q->blk_trace_mutex);
 #endif
+	blk_pm_init(q);
+
 	mutex_init(&q->sysfs_lock);
 	spin_lock_init(&q->__queue_lock);
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 8b23ae34d949..b1882a3a5216 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -9,6 +9,7 @@ 
 #include <linux/backing-dev.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/blk-pm.h>
 #include <linux/kmemleak.h>
 #include <linux/mm.h>
 #include <linux/init.h>
@@ -138,6 +139,7 @@  void blk_freeze_queue_start(struct request_queue *q)
 {
 	int freeze_depth;
 
+	blk_pm_runtime_lock(q);
 	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
 	if (freeze_depth == 1) {
 		percpu_ref_kill(&q->q_usage_counter);
@@ -201,6 +203,7 @@  void blk_mq_unfreeze_queue(struct request_queue *q)
 		percpu_ref_reinit(&q->q_usage_counter);
 		wake_up_all(&q->mq_freeze_wq);
 	}
+	blk_pm_runtime_unlock(q);
 }
 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
 
diff --git a/block/blk-pm.c b/block/blk-pm.c
index 9b636960d285..2a4632d0be4b 100644
--- a/block/blk-pm.c
+++ b/block/blk-pm.c
@@ -3,6 +3,45 @@ 
 #include <linux/blk-pm.h>
 #include <linux/blkdev.h>
 #include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+/*
+ * Initialize the request queue members used by blk_pm_runtime_lock() and
+ * blk_pm_runtime_unlock().
+ */
+void blk_pm_init(struct request_queue *q)
+{
+	spin_lock_init(&q->rpm_lock);
+	init_waitqueue_head(&q->rpm_wq);
+	q->rpm_owner = NULL;
+	q->rpm_nesting_level = 0;
+}
+
+void blk_pm_runtime_lock(struct request_queue *q)
+{
+	might_sleep();
+
+	spin_lock(&q->rpm_lock);
+	wait_event_exclusive_cmd(q->rpm_wq,
+			q->rpm_owner == NULL || q->rpm_owner == current,
+			spin_unlock(&q->rpm_lock), spin_lock(&q->rpm_lock));
+	if (q->rpm_owner == NULL)
+		q->rpm_owner = current;
+	q->rpm_nesting_level++;
+	spin_unlock(&q->rpm_lock);
+}
+
+void blk_pm_runtime_unlock(struct request_queue *q)
+{
+	spin_lock(&q->rpm_lock);
+	WARN_ON_ONCE(q->rpm_nesting_level <= 0);
+	if (--q->rpm_nesting_level == 0) {
+		q->rpm_owner = NULL;
+		wake_up(&q->rpm_wq);
+	}
+	spin_unlock(&q->rpm_lock);
+}
 
 /**
  * blk_pm_runtime_init - Block layer runtime PM initialization routine
@@ -68,6 +107,8 @@  int blk_pre_runtime_suspend(struct request_queue *q)
 	if (!q->dev)
 		return ret;
 
+	blk_pm_runtime_lock(q);
+
 	spin_lock_irq(q->queue_lock);
 	if (q->nr_pending) {
 		ret = -EBUSY;
@@ -76,6 +117,9 @@  int blk_pre_runtime_suspend(struct request_queue *q)
 		q->rpm_status = RPM_SUSPENDING;
 	}
 	spin_unlock_irq(q->queue_lock);
+
+	blk_pm_runtime_unlock(q);
+
 	return ret;
 }
 EXPORT_SYMBOL(blk_pre_runtime_suspend);
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
index b80c65aba249..aafcc7877e53 100644
--- a/include/linux/blk-pm.h
+++ b/include/linux/blk-pm.h
@@ -10,6 +10,9 @@  struct request_queue;
  * block layer runtime pm functions
  */
 #ifdef CONFIG_PM
+extern void blk_pm_init(struct request_queue *q);
+extern void blk_pm_runtime_lock(struct request_queue *q);
+extern void blk_pm_runtime_unlock(struct request_queue *q);
 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
 extern int blk_pre_runtime_suspend(struct request_queue *q);
 extern void blk_post_runtime_suspend(struct request_queue *q, int err);
@@ -17,6 +20,9 @@  extern void blk_pre_runtime_resume(struct request_queue *q);
 extern void blk_post_runtime_resume(struct request_queue *q, int err);
 extern void blk_set_runtime_active(struct request_queue *q);
 #else
+static inline void blk_pm_init(struct request_queue *q) {}
+static inline void blk_pm_runtime_lock(struct request_queue *q) {}
+static inline void blk_pm_runtime_unlock(struct request_queue *q) {}
 static inline void blk_pm_runtime_init(struct request_queue *q,
 				       struct device *dev) {}
 #endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2ef38739d645..72d569218231 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -548,6 +548,11 @@  struct request_queue {
 	struct device		*dev;
 	int			rpm_status;
 	unsigned int		nr_pending;
+	wait_queue_head_t	rpm_wq;
+	/* rpm_lock protects rpm_owner and rpm_nesting_level */
+	spinlock_t		rpm_lock;
+	struct task_struct	*rpm_owner;
+	int			rpm_nesting_level;
 #endif
 
 	/*