@@ -656,7 +656,8 @@ __update_runq_load(const struct scheduler *ops,
rqd->load += change;
rqd->load_last_update = now;
- ASSERT(rqd->avgload <= STIME_MAX && rqd->b_avgload <= STIME_MAX);
+ /* Overflow, capable of making the load look negative, must not occur. */
+ ASSERT(rqd->avgload > 0 && rqd->b_avgload > 0);
if ( unlikely(tb_init_done) )
{
@@ -714,6 +715,9 @@ __update_svc_load(const struct scheduler *ops,
}
svc->load_last_update = now;
+ /* Overflow, capable of making the load look negative, must not occur. */
+ ASSERT(svc->avgload > 0);
+
if ( unlikely(tb_init_done) )
{
struct {
@@ -1742,7 +1746,7 @@ retry:
* If we're under 100% capacaty, only shift if load difference
* is > 1. otherwise, shift if under 12.5%
*/
- if ( load_max < (cpus_max << prv->load_precision_shift) )
+ if ( load_max < ((s_time_t)cpus_max << prv->load_precision_shift) )
{
if ( st.load_delta < (1ULL << (prv->load_precision_shift +
opt_underload_balance_tolerance)) )
both introduced in d205f8a7f48e2ec ("xen: credit2: rework load tracking logic"). First, in __update_runq_load(), the ASSERT() was actually useless. Let's instead check that the computed value of the load has not overflowed (and hence gone negative). While there, do that in __update_svc_load() as well. Second, in balance_load(), cpus_max needs being extended in order to be correctly shifted, and the result compared with an s_time_t value, without risking loosing info. Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com> Reported-by: Andrew Cooper <andrew.cooper3@citrix.com> --- Cc: George Dunlap <george.dunlap@citrix.com> Cc: Anshul Makkar <anshul.makkar@citrix.com> --- xen/common/sched_credit2.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)