@@ -392,6 +392,22 @@ void blk_stop_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_stop_queue);
+void blk_unquiesce_timeout(struct request_queue *q)
+{
+ blk_mark_timeout_quiesce(q, false);
+ mod_timer(&q->timeout, jiffies + q->rq_timeout);
+}
+EXPORT_SYMBOL(blk_unquiesce_timeout);
+
+void blk_quiesce_timeout(struct request_queue *q)
+{
+ blk_mark_timeout_quiesce(q, true);
+
+ del_timer_sync(&q->timeout);
+ cancel_work_sync(&q->timeout_work);
+}
+EXPORT_SYMBOL(blk_quiesce_timeout);
+
/**
* blk_sync_queue - cancel any pending callbacks on a queue
* @q: the queue
@@ -412,8 +428,7 @@ EXPORT_SYMBOL(blk_stop_queue);
*/
void blk_sync_queue(struct request_queue *q)
{
- del_timer_sync(&q->timeout);
- cancel_work_sync(&q->timeout_work);
+ blk_quiesce_timeout(q);
if (q->mq_ops) {
struct blk_mq_hw_ctx *hctx;
@@ -425,6 +440,8 @@ void blk_sync_queue(struct request_queue *q)
} else {
cancel_delayed_work_sync(&q->delay_work);
}
+
+ blk_mark_timeout_quiesce(q, false);
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -917,6 +917,15 @@ static void blk_mq_timeout_work(struct work_struct *work)
};
struct blk_mq_hw_ctx *hctx;
int i;
+ bool timeout_off;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ timeout_off = q->timeout_off;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (timeout_off)
+ return;
/* A deadlock might occur if a request is stuck requiring a
* timeout at the same time a queue freeze is waiting
@@ -136,12 +136,15 @@ void blk_timeout_work(struct work_struct *work)
spin_lock_irqsave(q->queue_lock, flags);
+ if (q->timeout_off)
+ goto exit;
+
list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
blk_rq_check_expired(rq, &next, &next_set);
if (next_set)
mod_timer(&q->timeout, round_jiffies_up(next));
-
+exit:
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -584,6 +584,7 @@ struct request_queue {
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list;
+ bool timeout_off;
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
@@ -1017,6 +1018,18 @@ extern void blk_execute_rq(struct request_queue *, struct gendisk *,
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
+static inline void blk_mark_timeout_quiesce(struct request_queue *q, bool quiesce)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ q->timeout_off = quiesce;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+extern void blk_quiesce_timeout(struct request_queue *q);
+extern void blk_unquiesce_timeout(struct request_queue *q);
+
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
Turns out the current way can't drain timout completely because mod_timer() can be triggered in the work func, which can be just run inside the synced timeout work: del_timer_sync(&q->timeout); cancel_work_sync(&q->timeout_work); This patch introduces one flag of 'timeout_off' for fixing this issue, turns out this simple way does work. Also blk_quiesce_timeout() and blk_unquiesce_timeout() are introduced for draining timeout, which is needed by NVMe. Cc: James Smart <james.smart@broadcom.com> Cc: Bart Van Assche <bart.vanassche@wdc.com> Cc: Jianchao Wang <jianchao.w.wang@oracle.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagi@grimberg.me> Cc: linux-nvme@lists.infradead.org Cc: Laurence Oberman <loberman@redhat.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> --- block/blk-core.c | 21 +++++++++++++++++++-- block/blk-mq.c | 9 +++++++++ block/blk-timeout.c | 5 ++++- include/linux/blkdev.h | 13 +++++++++++++ 4 files changed, 45 insertions(+), 3 deletions(-)