@@ -2130,6 +2130,7 @@ static struct elv_fs_entry cfq_attrs[] = {
ELV_ATTR(slice_async),
#ifdef CONFIG_GROUP_IOSCHED
ELV_ATTR(group_idle),
+ ELV_ATTR(fairness),
#endif
__ATTR_NULL
};
@@ -893,6 +893,8 @@ static void elv_ioq_served(struct io_queue *ioq, unsigned long served)
allocated_slice = elv_prio_to_slice(ioq->efqd, ioq);
+ queue_charge = group_charge = served;
+
/*
* We don't want to charge more than allocated slice otherwise this
* queue can miss one dispatch round doubling max latencies. On the
@@ -900,16 +902,15 @@ static void elv_ioq_served(struct io_queue *ioq, unsigned long served)
* we stick to CFQ theme of queue loosing its share if it does not
* use the slice and moves to the back of service tree (almost).
*/
- queue_charge = allocated_slice;
+ if (!ioq->efqd->fairness)
+ queue_charge = allocated_slice;
/*
* Group is charged the real time consumed so that it does not loose
* fair share.
*/
- if (served > allocated_slice)
+ if (!ioq->efqd->fairness && group_charge > allocated_slice)
group_charge = allocated_slice;
- else
- group_charge = served;
entity_served(&ioq->entity, served, queue_charge, group_charge,
ioq->nr_sectors);
@@ -951,6 +952,8 @@ SHOW_FUNCTION(elv_slice_sync_show, efqd->elv_slice[1], 1);
EXPORT_SYMBOL(elv_slice_sync_show);
SHOW_FUNCTION(elv_slice_async_show, efqd->elv_slice[0], 1);
EXPORT_SYMBOL(elv_slice_async_show);
+SHOW_FUNCTION(elv_fairness_show, efqd->fairness, 0);
+EXPORT_SYMBOL(elv_fairness_show);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -975,6 +978,8 @@ STORE_FUNCTION(elv_slice_sync_store, &efqd->elv_slice[1], 1, UINT_MAX, 1);
EXPORT_SYMBOL(elv_slice_sync_store);
STORE_FUNCTION(elv_slice_async_store, &efqd->elv_slice[0], 1, UINT_MAX, 1);
EXPORT_SYMBOL(elv_slice_async_store);
+STORE_FUNCTION(elv_fairness_store, &efqd->fairness, 0, 1, 0);
+EXPORT_SYMBOL(elv_fairness_store);
#undef STORE_FUNCTION
void elv_schedule_dispatch(struct request_queue *q)
@@ -2687,6 +2692,17 @@ void *elv_select_ioq(struct request_queue *q, int force)
}
expire:
+ if (efqd->fairness && !force && ioq && ioq->dispatched) {
+ /*
+ * If there are request dispatched from this queue, don't
+ * dispatch requests from new queue till all the requests from
+ * this queue have completed.
+ */
+ elv_log_ioq(efqd, ioq, "select: wait for requests to finish"
+ " disp=%lu", ioq->dispatched);
+ ioq = NULL;
+ goto keep_queue;
+ }
elv_slice_expired(q);
new_queue:
ioq = elv_set_active_ioq(q, new_ioq);
@@ -2839,6 +2855,10 @@ void elv_ioq_completed_request(struct request_queue *q, struct request *rq)
goto done;
}
+ /* Wait for requests to finish from this queue */
+ if (efqd->fairness && elv_ioq_nr_dispatched(ioq))
+ goto done;
+
/* Expire the queue */
elv_slice_expired(q);
goto done;
@@ -2849,7 +2869,7 @@ void elv_ioq_completed_request(struct request_queue *q, struct request *rq)
* If this is the last queue in the group and we did not
* decide to idle on queue, idle on group.
*/
- if (elv_iog_should_idle(ioq) && !ioq->dispatched
+ if (elv_iog_should_idle(ioq) && !elv_ioq_nr_dispatched(ioq)
&& !ioq_is_idling(ioq)) {
/*
* If queue has used up its slice, wait for the
@@ -192,6 +192,12 @@ struct elv_fq_data {
/* Fallback dummy ioq for extreme OOM conditions */
struct io_queue oom_ioq;
+
+ /*
+ * If set to 1, waits for all request completions from current
+ * queue before new queue is scheduled in
+ */
+ unsigned int fairness;
};
/* Logging facilities. */
@@ -451,7 +457,9 @@ extern ssize_t elv_slice_sync_store(struct elevator_queue *q, const char *name,
extern ssize_t elv_slice_async_show(struct elevator_queue *q, char *name);
extern ssize_t elv_slice_async_store(struct elevator_queue *q, const char *name,
size_t count);
-
+extern ssize_t elv_fairness_show(struct elevator_queue *q, char *name);
+extern ssize_t elv_fairness_store(struct elevator_queue *q, const char *name,
+ size_t count);
/* Functions used by elevator.c */
extern struct elv_fq_data *elv_alloc_fq_data(struct request_queue *q,
struct elevator_queue *e);