@@ -203,6 +203,18 @@ out:
return;
}
+/**
+ * cpufreq_sched_reset_capacity - interface to scheduler for resetting capacity
+ * requests
+ * @cpu: cpu whose capacity request has to be reset
+ *
+ * This _wont trigger_ any capacity update.
+ */
+void cpufreq_sched_reset_cap(int cpu)
+{
+ per_cpu(pcpu_capacity, cpu) = 0;
+}
+
static inline void set_sched_energy_freq(void)
{
if (!sched_energy_freq())
@@ -4218,8 +4218,12 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
* during load balancing, but in these cases it seems wise to trigger
* as single request after load balancing is done.
*/
- if (task_sleep)
- update_capacity_of(cpu_of(rq));
+ if (task_sleep) {
+ if (rq->cfs.nr_running)
+ update_capacity_of(cpu_of(rq));
+ else if (sched_energy_freq())
+ cpufreq_sched_reset_cap(cpu_of(rq));
+ }
}
hrtick_update(rq);
}
@@ -1424,9 +1424,12 @@ static inline bool sched_energy_freq(void)
#ifdef CONFIG_CPU_FREQ_GOV_SCHED
void cpufreq_sched_set_cap(int cpu, unsigned long util);
+void cpufreq_sched_reset_cap(int cpu);
#else
static inline void cpufreq_sched_set_cap(int cpu, unsigned long util)
{ }
+static inline void cpufreq_sched_reset_cap(int cpu)
+{ }
#endif
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)