@@ -10712,6 +10712,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
int r;
unsigned long long cycles, cycles_start = 0;
unsigned long long unhalted_cycles, unhalted_cycles_start = 0;
+ unsigned long long halted_cycles_ns = 0;
bool req_int_win =
dm_request_for_irq_injection(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu);
@@ -11083,8 +11084,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
cycles = get_cycles() - cycles_start;
unhalted_cycles = get_unhalted_cycles() -
unhalted_cycles_start;
- if (likely(cycles > unhalted_cycles))
- current->gtime_halted += cycles2ns(cycles - unhalted_cycles);
+ if (likely(cycles > unhalted_cycles)) {
+ halted_cycles_ns = cycles2ns(cycles - unhalted_cycles);
+ current->gtime_halted += halted_cycles_ns;
+ sched_account_gtime_halted(current, halted_cycles_ns);
+ }
}
local_irq_enable();
@@ -367,6 +367,8 @@ struct vtime {
u64 gtime;
};
+extern void sched_account_gtime_halted(struct task_struct *p, u64 gtime_halted);
+
/*
* Utilization clamp constraints.
* @UCLAMP_MIN: Minimum utilization
@@ -588,6 +590,8 @@ struct sched_entity {
*/
struct sched_avg avg;
#endif
+
+ u64 gtime_halted;
};
struct sched_rt_entity {
@@ -4487,6 +4487,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.nr_migrations = 0;
p->se.vruntime = 0;
p->se.vlag = 0;
+ p->se.gtime_halted = 0;
INIT_LIST_HEAD(&p->se.group_node);
/* A delayed task cannot be in clone(). */
@@ -13705,4 +13705,29 @@ __init void init_sched_fair_class(void)
#endif
#endif /* SMP */
+
+}
+
+#ifdef CONFIG_NO_HZ_FULL
+void sched_account_gtime_halted(struct task_struct *p, u64 gtime_halted)
+{
}
+#else
+/*
+ * The implementation hooking into PELT requires regular updates of
+ * gtime_halted. This is guaranteed unless we run on CONFIG_NO_HZ_FULL.
+ */
+void sched_account_gtime_halted(struct task_struct *p, u64 gtime_halted)
+{
+ struct sched_entity *se = &p->se;
+
+ if (unlikely(!gtime_halted))
+ return;
+
+ for_each_sched_entity(se) {
+ se->gtime_halted += gtime_halted;
+ se->cfs_rq->gtime_halted += gtime_halted;
+ }
+}
+#endif
+EXPORT_SYMBOL(sched_account_gtime_halted);
@@ -305,10 +305,23 @@ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se),
- cfs_rq->curr == se)) {
+ int ret = 0;
+ u64 delta = now - se->avg.last_update_time;
+ u64 gtime_halted = min(delta, se->gtime_halted);
- ___update_load_avg(&se->avg, se_weight(se));
+ ret = ___update_load_sum(now - gtime_halted, &se->avg, !!se->on_rq, se_runnable(se),
+ cfs_rq->curr == se);
+
+ if (gtime_halted) {
+ ret |= ___update_load_sum(now, &se->avg, 0, 0, 0);
+ se->gtime_halted -= gtime_halted;
+
+ /* decay residual halted time */
+ if (ret && se->gtime_halted)
+ se->gtime_halted = decay_load(se->gtime_halted, delta / 1024);
+ }
+
+ if (ret) {
cfs_se_util_change(&se->avg);
trace_pelt_se_tp(se);
return 1;
@@ -319,10 +332,25 @@ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se
int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
{
- if (___update_load_sum(now, &cfs_rq->avg,
- scale_load_down(cfs_rq->load.weight),
- cfs_rq->h_nr_runnable,
- cfs_rq->curr != NULL)) {
+ int ret = 0;
+ u64 delta = now - cfs_rq->avg.last_update_time;
+ u64 gtime_halted = min(delta, cfs_rq->gtime_halted);
+
+ ret = ___update_load_sum(now - gtime_halted, &cfs_rq->avg,
+ scale_load_down(cfs_rq->load.weight),
+ cfs_rq->h_nr_runnable,
+ cfs_rq->curr != NULL);
+
+ if (gtime_halted) {
+ ret |= ___update_load_sum(now, &cfs_rq->avg, 0, 0, 0);
+ cfs_rq->gtime_halted -= gtime_halted;
+
+ /* decay any residual halted time */
+ if (ret && cfs_rq->gtime_halted)
+ cfs_rq->gtime_halted = decay_load(cfs_rq->gtime_halted, delta / 1024);
+ }
+
+ if (ret) {
___update_load_avg(&cfs_rq->avg, 1);
trace_pelt_cfs_tp(cfs_rq);
@@ -744,6 +744,8 @@ struct cfs_rq {
struct list_head throttled_csd_list;
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
+
+ u64 gtime_halted;
};
#ifdef CONFIG_SCHED_CLASS_EXT