===================================================================
@@ -1212,6 +1212,30 @@ io_group_init_entity(struct io_cgroup *i
entity->my_sched_data = &iog->sched_data;
}
+/* Check if we plan to idle on the group associated with this queue or not */
+int elv_iog_should_idle(struct io_queue *ioq)
+{
+ struct io_group *iog = ioq_to_io_group(ioq);
+ struct elv_fq_data *efqd = ioq->efqd;
+
+ /*
+ * No idling on group if group idle is disabled or idling is disabled
+ * for this group. Currently for root group idling is disabled.
+ */
+ if (!efqd->elv_group_idle || !elv_iog_idle_window(iog))
+ return 0;
+
+ /*
+ * If this is last active queue in group with no request queued, we
+ * need to idle on group before expiring the queue to make sure group
+ * does not loose its share.
+ */
+ if ((elv_iog_nr_active(iog) <= 1) && !ioq->nr_queued)
+ return 1;
+
+ return 0;
+}
+
static void io_group_set_parent(struct io_group *iog, struct io_group *parent)
{
struct io_entity *entity;
@@ -2708,6 +2732,10 @@ static inline int is_only_root_group(voi
{
return 1;
}
+
+/* No group idling in flat mode */
+int elv_iog_should_idle(struct io_queue *ioq) { return 0; }
+
#endif /* GROUP_IOSCHED */
/* Elevator fair queuing function */
@@ -3308,12 +3336,18 @@ void __elv_ioq_slice_expired(struct requ
if (time_after(ioq->slice_end, jiffies)) {
slice_unused = ioq->slice_end - jiffies;
if (slice_unused == entity->budget) {
- /*
- * queue got expired immediately after
- * completing first request. Charge 1/2 of
- * time consumed in completing first request.
+ /* Queue got expired immediately after completing
+ * first request. It happens with idle class queues
+ * as well as can happen with closely cooperating
+ * queues or with queues for which idling is not
+ * enabled.
+ *
+ * Charge the full time since slice was started. This
+ * will include the seek cost also on rotational media.
+ * This is bit unfair but don't know what's the better
+ * way to handle such cases.
*/
- slice_used = (slice_used + 1)/2;
+ slice_used = jiffies - ioq->slice_start;
} else
slice_used = entity->budget - slice_unused;
} else {
@@ -3686,7 +3720,8 @@ void *elv_fq_select_ioq(struct request_q
/*
* The active queue has run out of time, expire it and select new.
*/
- if (elv_ioq_slice_used(ioq) && !elv_ioq_must_dispatch(ioq)) {
+ if ((elv_ioq_slice_used(ioq) || elv_ioq_class_idle(ioq))
+ && !elv_ioq_must_dispatch(ioq)) {
/*
* Queue has used up its slice. Wait busy is not on otherwise
* we wouldn't have been here. If this group will be deleted
@@ -3711,9 +3746,7 @@ void *elv_fq_select_ioq(struct request_q
* from queue and is not proportional to group's weight, it
* harms the fairness of the group.
*/
- if ((elv_iog_nr_active(iog) <= 1) && !ioq->nr_queued
- && !elv_iog_wait_busy_done(iog) && efqd->elv_group_idle
- && elv_iog_idle_window(iog)) {
+ if (elv_iog_should_idle(ioq) && !elv_iog_wait_busy_done(iog)) {
ioq = NULL;
goto keep_queue;
} else
@@ -3893,12 +3926,6 @@ void elv_ioq_completed_request(struct re
elv_clear_ioq_slice_new(ioq);
}
- if (elv_ioq_class_idle(ioq)) {
- if (elv_iosched_expire_ioq(q, 1, 0))
- elv_ioq_slice_expired(q);
- goto done;
- }
-
/*
* If there is only root group present, don't expire the queue
* for single queue ioschedulers (noop, deadline, AS). It is
@@ -3919,14 +3946,14 @@ void elv_ioq_completed_request(struct re
* mean seek distance, give them a chance to run instead
* of idling.
*/
- if (elv_ioq_slice_used(ioq)) {
+ if (elv_ioq_slice_used(ioq) || elv_ioq_class_idle(ioq)) {
/* This is the last empty queue in the group and it
* has consumed its slice. If we expire it right away
* group might loose its share. Wait for an extra
* group_idle period for a request before queue
* expires.
*/
- if ((elv_iog_nr_active(iog) <= 1) && !ioq->nr_queued) {
+ if (elv_iog_should_idle(ioq)) {
elv_iog_arm_slice_timer(q, iog, 1);
goto done;
}
@@ -3943,8 +3970,10 @@ void elv_ioq_completed_request(struct re
goto done;
/* Expire the queue */
- if (elv_iosched_expire_ioq(q, 1, 0))
+ if (elv_iosched_expire_ioq(q, 1, 0)) {
elv_ioq_slice_expired(q);
+ goto done;
+ }
} else if (!ioq->nr_queued && !elv_close_cooperator(q, ioq)
&& sync && !rq_noidle(rq))
elv_ioq_arm_slice_timer(q);
@@ -3953,9 +3982,8 @@ void elv_ioq_completed_request(struct re
* If this is the last queue in the group and we did not
* decide to idle on queue, idle on group.
*/
- if (elv_active_ioq(q->elevator) && !ioq->nr_queued &&
- !ioq->dispatched && !timer_pending(&efqd->idle_slice_timer)
- && (elv_iog_nr_active(iog) <= 1)) {
+ if (elv_iog_should_idle(ioq) && !ioq->dispatched
+ && !timer_pending(&efqd->idle_slice_timer)) {
/*
* If queue has used up its slice, wait for the
* one extra group_idle period to let the group
===================================================================
@@ -695,6 +695,7 @@ extern int elv_nr_busy_ioq(struct elevat
extern int elv_rq_in_driver(struct elevator_queue *e);
extern struct io_queue *elv_alloc_ioq(struct request_queue *q, gfp_t gfp_mask);
extern void elv_free_ioq(struct io_queue *ioq);
+extern int elv_iog_should_idle(struct io_queue *ioq);
#else /* CONFIG_ELV_FAIR_QUEUING */
===================================================================
@@ -1007,10 +1007,13 @@ static int cfq_dispatch_requests(struct
/*
* expire an async queue immediately if it has used up its slice. idle
* queue always expire after 1 dispatch round.
+ *
+ * Also do not expire the queue if we plan to do group idling on it.
+ * In that case it will be expired later.
*/
if (elv_nr_busy_ioq(q->elevator) > 1 && ((!cfq_cfqq_sync(cfqq) &&
cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
- cfq_class_idle(cfqq))) {
+ (cfq_class_idle(cfqq) && !elv_iog_should_idle(cfqq->ioq)))) {
cfq_slice_expired(cfqd);
}