diff mbox

block: Ensure that a request queue is dissociated from the cgroup controller

Message ID ee8defd68976a607c03ee6c0ad4e14217f328db3.camel@wdc.com (mailing list archive)
State New, archived
Headers show

Commit Message

Bart Van Assche April 12, 2018, 10:40 p.m. UTC
On Thu, 2018-04-12 at 12:09 -0700, tj@kernel.org wrote:
> On Thu, Apr 12, 2018 at 06:56:26PM +0000, Bart Van Assche wrote:
> > On Thu, 2018-04-12 at 11:11 -0700, tj@kernel.org wrote:
> > > * Right now, blk_queue_enter/exit() doesn't have any annotations.
> > >   IOW, there's no way for paths which need enter locked to actually
> > >   assert that.  If we're gonna protect more things with queue
> > >   enter/exit, it gotta be annotated.
> > 
> > Hello Tejun,
> > 
> > One possibility is to check the count for the local CPU of q->q_usage_counter
> > at runtime, e.g. using WARN_ON_ONCE(). However, that might have a runtime
> > overhead. Another possibility is to follow the example of kernfs and to use
> > a lockdep map and lockdep_assert_held(). There might be other alternatives I
> > have not thought of.
> 
> Oh, I'd just do straight-forward lockdep annotation on
> queue_enter/exit functions and provide the necessary assert helpers.

Hello Tejun,

Is something like the patch below perhaps what you had in mind?

Thanks,

Bart.

Subject: [PATCH] block: Use lockdep to verify whether blk_queue_enter() is
 used when necessary

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Cc: Tejun Heo <tj@kernel.org>
---
 block/blk-cgroup.c         |  2 ++
 block/blk-core.c           | 13 ++++++++++++-
 block/blk.h                |  1 +
 include/linux/blk-cgroup.h |  2 ++
 include/linux/blkdev.h     |  1 +
 5 files changed, 18 insertions(+), 1 deletion(-)

Comments

Tejun Heo April 13, 2018, 3:18 p.m. UTC | #1
Hello, Bart.

On Thu, Apr 12, 2018 at 10:40:52PM +0000, Bart Van Assche wrote:
> Is something like the patch below perhaps what you had in mind?

Yeah, exactly.  It'd be really great to have the lockdep asserts add
to the right places.

Thanks.
diff mbox

Patch

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 1c16694ae145..c34e13e76f90 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -145,6 +145,8 @@  struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 {
 	struct blkcg_gq *blkg;
 
+	lock_is_held(&q->q_enter_map);
+
 	/*
 	 * Hint didn't match.  Look up from the radix tree.  Note that the
 	 * hint can only be updated under queue_lock as otherwise @blkg
diff --git a/block/blk-core.c b/block/blk-core.c
index 39308e874ffa..a61dbe7f24a5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -931,8 +931,13 @@  int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 		}
 		rcu_read_unlock();
 
-		if (success)
+		if (success) {
+			mutex_acquire_nest(&q->q_enter_map, 0, 0,
+					   lock_is_held(&q->q_enter_map) ?
+					   &q->q_enter_map : NULL,
+					   _RET_IP_);
 			return 0;
+		}
 
 		if (flags & BLK_MQ_REQ_NOWAIT)
 			return -EBUSY;
@@ -959,6 +964,7 @@  int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 
 void blk_queue_exit(struct request_queue *q)
 {
+	mutex_release(&q->q_enter_map, 0, _RET_IP_);
 	percpu_ref_put(&q->q_usage_counter);
 }
 
@@ -994,12 +1000,15 @@  struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
 					   spinlock_t *lock)
 {
 	struct request_queue *q;
+	static struct lock_class_key __key;
 
 	q = kmem_cache_alloc_node(blk_requestq_cachep,
 				gfp_mask | __GFP_ZERO, node_id);
 	if (!q)
 		return NULL;
 
+	lockdep_init_map(&q->q_enter_map, "q_enter_map", &__key, 0);
+
 	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
 	if (q->id < 0)
 		goto fail_q;
@@ -2264,6 +2273,8 @@  generic_make_request_checks(struct bio *bio)
 		goto end_io;
 	}
 
+	lock_is_held(&q->q_enter_map);
+
 	/*
 	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
 	 * if queue is not a request based queue.
diff --git a/block/blk.h b/block/blk.h
index 7cd64f533a46..26f313331b13 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -145,6 +145,7 @@  static inline void blk_queue_enter_live(struct request_queue *q)
 	 * been established, further references under that same context
 	 * need not check that the queue has been frozen (marked dead).
 	 */
+	mutex_acquire_nest(&q->q_enter_map, 0, 0, &q->q_enter_map, _RET_IP_);
 	percpu_ref_get(&q->q_usage_counter);
 }
 
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 6c666fd7de3c..52e2e2d9971e 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -266,6 +266,8 @@  static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
 {
 	struct blkcg_gq *blkg;
 
+	lock_is_held(&q->q_enter_map);
+
 	if (blkcg == &blkcg_root)
 		return q->root_blkg;
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0d22f351a74b..a0b1adbd4406 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -631,6 +631,7 @@  struct request_queue {
 	struct rcu_head		rcu_head;
 	wait_queue_head_t	mq_freeze_wq;
 	struct percpu_ref	q_usage_counter;
+	struct lockdep_map	q_enter_map;
 	struct list_head	all_q_node;
 
 	struct blk_mq_tag_set	*tag_set;