@@ -430,6 +430,11 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
if (success)
return 0;
+ if (flags & BLK_MQ_REQ_FORCE) {
+ percpu_ref_get(&q->q_usage_counter);
+ return 0;
+ }
+
if (flags & BLK_MQ_REQ_NOWAIT)
return -EBUSY;
@@ -617,7 +622,8 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
struct request *req;
WARN_ON_ONCE(op & REQ_NOWAIT);
- WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
+ WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT |
+ BLK_MQ_REQ_FORCE));
req = blk_mq_alloc_request(q, op, flags);
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
@@ -448,6 +448,13 @@ enum {
BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2),
/* set RQF_PREEMPT */
BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
+
+ /*
+ * force to allocate request and caller has to make sure queue
+ * won't be frozen completely during allocation, and this flag
+ * is only applied after queue freeze is started
+ */
+ BLK_MQ_REQ_FORCE = (__force blk_mq_req_flags_t)(1 << 4),
};
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,