@@ -936,7 +936,7 @@ burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
if ( svc->cur_budget <= 0 )
{
svc->cur_budget = 0;
- set_bit(__RTDS_depleted, &svc->flags);
+ __set_bit(__RTDS_depleted, &svc->flags);
}
/* TRACE */
@@ -1050,7 +1050,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
if ( snext != scurr &&
!is_idle_vcpu(current) &&
vcpu_runnable(current) )
- set_bit(__RTDS_delayed_runq_add, &scurr->flags);
+ __set_bit(__RTDS_delayed_runq_add, &scurr->flags);
snext->last_start = now;
ret.time = -1; /* if an idle vcpu is picked */
@@ -1059,7 +1059,7 @@ rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
if ( snext != scurr )
{
q_remove(snext);
- set_bit(__RTDS_scheduled, &snext->flags);
+ __set_bit(__RTDS_scheduled, &snext->flags);
}
if ( snext->vcpu->processor != cpu )
{
@@ -1093,7 +1093,7 @@ rt_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
replq_remove(ops, svc);
}
else if ( svc->flags & RTDS_delayed_runq_add )
- clear_bit(__RTDS_delayed_runq_add, &svc->flags);
+ __clear_bit(__RTDS_delayed_runq_add, &svc->flags);
}
/*
@@ -1235,7 +1235,7 @@ rt_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
*/
if ( unlikely(svc->flags & RTDS_scheduled) )
{
- set_bit(__RTDS_delayed_runq_add, &svc->flags);
+ __set_bit(__RTDS_delayed_runq_add, &svc->flags);
/*
* The vcpu is waking up already, and we didn't even had the time to
* remove its next replenishment event from the replenishment queue
@@ -1266,12 +1266,12 @@ rt_context_saved(const struct scheduler *ops, struct vcpu *vc)
struct rt_vcpu *svc = rt_vcpu(vc);
spinlock_t *lock = vcpu_schedule_lock_irq(vc);
- clear_bit(__RTDS_scheduled, &svc->flags);
+ __clear_bit(__RTDS_scheduled, &svc->flags);
/* not insert idle vcpu to runq */
if ( is_idle_vcpu(vc) )
goto out;
- if ( test_and_clear_bit(__RTDS_delayed_runq_add, &svc->flags) &&
+ if ( __test_and_clear_bit(__RTDS_delayed_runq_add, &svc->flags) &&
likely(vcpu_runnable(vc)) )
{
runq_insert(ops, svc);
@@ -1447,7 +1447,7 @@ static void repl_timer_handler(void *data){
runq_tickle(ops, next_on_runq);
}
else if ( vcpu_on_q(svc) &&
- test_and_clear_bit(__RTDS_depleted, &svc->flags) )
+ __test_and_clear_bit(__RTDS_depleted, &svc->flags) )
runq_tickle(ops, svc);
list_del(&svc->replq_elem);
Vcpu flags are checked and cleared atomically. Performance can be improved with corresponding non-atomic versions since schedule.c already has spin_locks in place. Signed-off-by: Tianyang Chen <tiche@seas.upenn.edu> --- xen/common/sched_rt.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-)