@@ -2353,7 +2353,6 @@ void blk_throtl_drain(struct request_queue *q)
struct bio *bio;
int rw;
- queue_lockdep_assert_held(q);
rcu_read_lock();
/*
@@ -35,19 +35,6 @@ extern struct kmem_cache *blk_requestq_cachep;
extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida;
-/*
- * @q->queue_lock is set while a queue is being initialized. Since we know
- * that no other threads access the queue object before @q->queue_lock has
- * been set, it is safe to manipulate queue flags without holding the
- * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
- * blk_init_allocated_queue().
- */
-static inline void queue_lockdep_assert_held(struct request_queue *q)
-{
- if (q->queue_lock)
- lockdep_assert_held(q->queue_lock);
-}
-
static inline struct blk_flush_queue *
blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
{
The only remaining user unconditionally drops and reacquires the lock, which means we really don't need any additional (conditional) annotation. Signed-off-by: Christoph Hellwig <hch@lst.de> --- block/blk-throttle.c | 1 - block/blk.h | 13 ------------- 2 files changed, 14 deletions(-)