@@ -300,13 +300,26 @@ EXPORT_SYMBOL(blk_sync_queue);
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
+ * Device driver will be notified of an urgent request
+ * pending under the following conditions:
+ * 1. The driver and the current scheduler support urgent reques handling
+ * 2. There is an urgent request pending in the scheduler
+ * 3. There isn't already an urgent request in flight, meaning previously
+ * notified urgent request completed (!q->notified_urgent)
*/
void __blk_run_queue(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
- q->request_fn(q);
+ if (!q->notified_urgent &&
+ q->elevator->type->ops.elevator_is_urgent_fn &&
+ q->urgent_request_fn &&
+ q->elevator->type->ops.elevator_is_urgent_fn(q)) {
+ q->notified_urgent = true;
+ q->urgent_request_fn(q);
+ } else
+ q->request_fn(q);
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -2232,8 +2245,17 @@ struct request *blk_fetch_request(struct request_queue *q)
struct request *rq;
rq = blk_peek_request(q);
- if (rq)
+ if (rq) {
+ /*
+ * Assumption: the next request fetched from scheduler after we
+ * notified "urgent request pending" - will be the urgent one
+ */
+ if (q->notified_urgent && !q->dispatched_urgent) {
+ q->dispatched_urgent = true;
+ (void)blk_mark_rq_urgent(rq);
+ }
blk_start_request(rq);
+ }
return rq;
}
EXPORT_SYMBOL(blk_fetch_request);
@@ -100,6 +100,18 @@ void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
/**
+ * blk_urgent_request() - Set an urgent_request handler function for queue
+ * @q: queue
+ * @fn: handler for urgent requests
+ *
+ */
+void blk_urgent_request(struct request_queue *q, request_fn_proc *fn)
+{
+ q->urgent_request_fn = fn;
+}
+EXPORT_SYMBOL(blk_urgent_request);
+
+/**
* blk_set_default_limits - reset limits to default values
* @lim: the queue_limits structure to reset
*
@@ -42,6 +42,7 @@ void blk_add_timer(struct request *);
*/
enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0,
+ REQ_ATOM_URGENT = 1,
};
/*
@@ -58,6 +59,16 @@ static inline void blk_clear_rq_complete(struct request *rq)
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}
+static inline int blk_mark_rq_urgent(struct request *rq)
+{
+ return test_and_set_bit(REQ_ATOM_URGENT, &rq->atomic_flags);
+}
+
+static inline void blk_clear_rq_urgent(struct request *rq)
+{
+ clear_bit(REQ_ATOM_URGENT, &rq->atomic_flags);
+}
+
/*
* Internal elevator interface
*/
@@ -756,6 +756,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
+ if (test_bit(REQ_ATOM_URGENT, &rq->atomic_flags)) {
+ q->notified_urgent = false;
+ q->dispatched_urgent = false;
+ blk_clear_rq_urgent(rq);
+ }
/*
* request is released from the driver, io must be done
*/
@@ -303,6 +303,7 @@ struct request_queue {
struct request_list root_rl;
request_fn_proc *request_fn;
+ request_fn_proc *urgent_request_fn;
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
@@ -391,6 +392,8 @@ struct request_queue {
#endif
struct queue_limits limits;
+ bool notified_urgent;
+ bool dispatched_urgent;
/*
* sg stuff
@@ -894,6 +897,7 @@ extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
request_fn_proc *, spinlock_t *);
+extern void blk_urgent_request(struct request_queue *q, request_fn_proc *fn);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
@@ -24,6 +24,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int);
typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
typedef int (elevator_reinsert_req_fn) (struct request_queue *,
struct request *);
+typedef bool (elevator_is_urgent_fn) (struct request_queue *);
typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
typedef int (elevator_may_queue_fn) (struct request_queue *, int);
@@ -50,6 +51,7 @@ struct elevator_ops
elevator_dispatch_fn *elevator_dispatch_fn;
elevator_add_req_fn *elevator_add_req_fn;
elevator_reinsert_req_fn *elevator_reinsert_req_fn;
+ elevator_is_urgent_fn *elevator_is_urgent_fn;
elevator_activate_req_fn *elevator_activate_req_fn;
elevator_deactivate_req_fn *elevator_deactivate_req_fn;