@@ -375,7 +375,7 @@ void blk_clear_preempt_only(struct request_queue *q)
spin_lock_irqsave(q->queue_lock, flags);
queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
- wake_up_all(&q->mq_freeze_wq);
+ wake_up_all(&q->mq_wq);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
@@ -651,7 +651,7 @@ void blk_set_queue_dying(struct request_queue *q)
}
/* Make blk_queue_enter() reexamine the DYING flag. */
- wake_up_all(&q->mq_freeze_wq);
+ wake_up_all(&q->mq_wq);
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
@@ -854,7 +854,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
*/
smp_rmb();
- ret = wait_event_interruptible(q->mq_freeze_wq,
+ ret = wait_event_interruptible(q->mq_wq,
(atomic_read(&q->mq_freeze_depth) == 0 &&
(preempt || !blk_queue_preempt_only(q))) ||
blk_queue_dying(q));
@@ -875,7 +875,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
- wake_up_all(&q->mq_freeze_wq);
+ wake_up_all(&q->mq_wq);
}
static void blk_rq_timed_out_timer(struct timer_list *t)
@@ -951,7 +951,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
- init_waitqueue_head(&q->mq_freeze_wq);
+ init_waitqueue_head(&q->mq_wq);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
@@ -138,16 +138,16 @@ EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
void blk_mq_freeze_queue_wait(struct request_queue *q)
{
- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
+ wait_event(q->mq_wq, percpu_ref_is_zero(&q->q_usage_counter));
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout)
{
- return wait_event_timeout(q->mq_freeze_wq,
- percpu_ref_is_zero(&q->q_usage_counter),
- timeout);
+ return wait_event_timeout(q->mq_wq,
+ percpu_ref_is_zero(&q->q_usage_counter),
+ timeout);
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
@@ -186,7 +186,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
percpu_ref_reinit(&q->q_usage_counter);
- wake_up_all(&q->mq_freeze_wq);
+ wake_up_all(&q->mq_wq);
}
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
@@ -613,7 +613,7 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
- wait_queue_head_t mq_freeze_wq;
+ wait_queue_head_t mq_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;
Rename a waitqueue in struct request_queue since the next patch will add code that uses this waitqueue outside the request queue freezing implementation. Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Johannes Thumshirn <jthumshirn@suse.de> Cc: Ming Lei <ming.lei@redhat.com> --- block/blk-core.c | 10 +++++----- block/blk-mq.c | 10 +++++----- include/linux/blkdev.h | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-)