@@ -759,6 +759,7 @@ void sched_avg_update(struct rq *rq)
asm("" : "+rm" (rq->age_stamp));
rq->age_stamp += period;
rq->rt_avg /= 2;
+ rq->dl_avg /= 2;
}
}
@@ -747,7 +747,7 @@ static void update_curr_dl(struct rq *rq)
curr->se.exec_start = rq_clock_task(rq);
cpuacct_charge(curr, delta_exec);
- sched_rt_avg_update(rq, delta_exec);
+ sched_dl_avg_update(rq, delta_exec);
dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
if (dl_runtime_exceeded(dl_se)) {
@@ -6278,6 +6278,7 @@ static unsigned long scale_rt_capacity(int cpu)
*/
age_stamp = READ_ONCE(rq->age_stamp);
avg = READ_ONCE(rq->rt_avg);
+ avg += READ_ONCE(rq->dl_avg);
delta = __rq_clock_broken(rq) - age_stamp;
if (unlikely(delta < 0))
@@ -644,7 +644,7 @@ struct rq {
struct list_head cfs_tasks;
- u64 rt_avg;
+ u64 rt_avg, dl_avg;
u64 age_stamp;
u64 idle_stamp;
u64 avg_idle;
@@ -1499,8 +1499,14 @@ static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
}
+
+static inline void sched_dl_avg_update(struct rq *rq, u64 dl_delta)
+{
+ rq->dl_avg += dl_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
+}
#else
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
+static inline void sched_dl_avg_update(struct rq *rq, u64 dl_delta) { }
static inline void sched_avg_update(struct rq *rq) { }
#endif