@@ -304,10 +304,18 @@ EXPORT_SYMBOL(blk_sync_queue);
* This variant runs the queue whether or not the queue has been
* stopped. Must be called with the queue lock held and interrupts
* disabled. See also @blk_run_queue.
+ *
+ * Note:
+ * Request handling functions that unlock and relock the queue lock
+ * internally are allowed to invoke blk_run_queue(). This will not result
+ * in a recursive call of the request handler. However, such request
+ * handling functions must, before they return, either reexamine the
+ * request queue or invoke blk_delay_queue() to avoid that queue processing
+ * stops.
*/
inline void __blk_run_queue_uncond(struct request_queue *q)
{
- if (unlikely(blk_queue_dead(q)))
+ if (unlikely(blk_queue_dead(q) || q->request_fn_active))
return;
/*
@@ -728,14 +728,8 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue)
if (!md_in_flight(md))
wake_up(&md->wait);
- /*
- * Run this off this callpath, as drivers could invoke end_io while
- * inside their request_fn (and holding the queue lock). Calling
- * back into ->request_fn() could deadlock attempting to grab the
- * queue lock again.
- */
if (run_queue)
- blk_run_queue_async(md->queue);
+ blk_run_queue(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above