diff mbox series

[v2,24/48] xen: switch from for_each_vcpu() to for_each_sched_unit()

Message ID 20190809145833.1020-25-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen: add core scheduling support | expand

Commit Message

Jürgen Groß Aug. 9, 2019, 2:58 p.m. UTC
Where appropriate switch from for_each_vcpu() to for_each_sched_unit()
in order to prepare core scheduling.

As it is beneficial once here and for sure in future add a
unit_scheduler() helper and let vcpu_scheduler() use it.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- handle affinity_broken correctly (Jan Beulich)
- add unit_scheduler() (Jan Beulich)
---
 xen/common/domain.c   |   9 ++-
 xen/common/schedule.c | 148 ++++++++++++++++++++++++++++++--------------------
 2 files changed, 93 insertions(+), 64 deletions(-)

Comments

Jan Beulich Sept. 9, 2019, 3:14 p.m. UTC | #1
On 09.08.2019 16:58, Juergen Gross wrote:
> @@ -504,22 +511,21 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
>      if ( IS_ERR(domdata) )
>          return PTR_ERR(domdata);
>  
> -    vcpu_priv = xzalloc_array(void *, d->max_vcpus);
> -    if ( vcpu_priv == NULL )
> +    unit_priv = xzalloc_array(void *, d->max_vcpus);

I find it confusing that an array of units (as per the use below)
is dimensioned by the domain's vCPU count. Isn't there a correlation
between vCPU IDs and units IDs, perhaps along the lines of CPU
APIC (thread), core, and socket IDs? If so, the array size could
be bounded here by a smaller (down the road) value.

> @@ -880,18 +889,36 @@ void vcpu_force_reschedule(struct vcpu *v)
>      vcpu_migrate_finish(v);
>  }
>  
> +static bool sched_check_affinity_broken(struct sched_unit *unit)

const

> +{
> +    struct vcpu *v;

const

> @@ -910,18 +937,20 @@ void restore_vcpu_affinity(struct domain *d)
>                      cpupool_domain_cpumask(d));
>          if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
>          {
> -            if ( v->affinity_broken )
> +            if ( sched_check_affinity_broken(unit) )
>              {
> -                sched_set_affinity(v, unit->cpu_hard_affinity_saved, NULL);
> -                v->affinity_broken = 0;
> +                sched_set_affinity(unit->vcpu_list,
> +                                   unit->cpu_hard_affinity_saved, NULL);
> +                sched_reset_affinity_broken(unit);
>                  cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
>                              cpupool_domain_cpumask(d));
>              }
>  
>              if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
>              {
> -                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
> -                sched_set_affinity(v, &cpumask_all, NULL);
> +                printk(XENLOG_DEBUG "Breaking affinity for %pv\n",
> +                       unit->vcpu_list);
> +                sched_set_affinity(unit->vcpu_list, &cpumask_all, NULL);
>                  cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
>                              cpupool_domain_cpumask(d));
>              }
>[...]> @@ -964,17 +992,18 @@ int cpu_disable_scheduler(unsigned int cpu)
>  
>      for_each_domain_in_cpupool ( d, c )
>      {
> -        for_each_vcpu ( d, v )
> +        struct sched_unit *unit;
> +
> +        for_each_sched_unit ( d, unit )
>          {
>              unsigned long flags;
> -            struct sched_unit *unit = v->sched_unit;
>              spinlock_t *lock = unit_schedule_lock_irqsave(unit, &flags);
>  
>              cpumask_and(&online_affinity, unit->cpu_hard_affinity, c->cpu_valid);
>              if ( cpumask_empty(&online_affinity) &&
>                   cpumask_test_cpu(cpu, unit->cpu_hard_affinity) )
>              {
> -                if ( v->affinity_broken )
> +                if ( unit->vcpu_list->affinity_broken )
>                  {
>                      /* The vcpu is temporarily pinned, can't move it. */
>                      unit_schedule_unlock_irqrestore(lock, flags, unit);
> @@ -982,14 +1011,15 @@ int cpu_disable_scheduler(unsigned int cpu)
>                      break;
>                  }
>  
> -                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
> +                printk(XENLOG_DEBUG "Breaking affinity for %pv\n",
> +                       unit->vcpu_list);
>  
> -                sched_set_affinity(v, &cpumask_all, NULL);
> +                sched_set_affinity(unit->vcpu_list, &cpumask_all, NULL);
>              }
>  
> -            if ( v->processor != cpu )
> +            if ( sched_unit_cpu(unit) != sched_get_resource_cpu(cpu) )
>              {
> -                /* The vcpu is not on this cpu, so we can move on. */
> +                /* The unit is not on this cpu, so we can move on. */
>                  unit_schedule_unlock_irqrestore(lock, flags, unit);
>                  continue;
>              }
> @@ -1002,17 +1032,17 @@ int cpu_disable_scheduler(unsigned int cpu)
>               *  * the scheduler will always find a suitable solution, or
>               *    things would have failed before getting in here.
>               */
> -            vcpu_migrate_start(v);
> +            vcpu_migrate_start(unit->vcpu_list);
>              unit_schedule_unlock_irqrestore(lock, flags, unit);
>  
> -            vcpu_migrate_finish(v);
> +            vcpu_migrate_finish(unit->vcpu_list);

All the ->vcpu_list references look bogus considering where you're
moving, but I can only guess that all of this will need touching
again later in the series. I wonder though whether these wouldn't
better change into for-each-vCPU-in-unit loops right away.

>              /*
>               * The only caveat, in this case, is that if a vcpu active in
>               * the hypervisor isn't migratable. In this case, the caller
>               * should try again after releasing and reaquiring all locks.
>               */
> -            if ( v->processor == cpu )
> +            if ( sched_unit_cpu(unit) == sched_get_resource_cpu(cpu) )

Is comparing the (pseudo) CPU values here the most efficient approach
generated code wise? Can't there be some pointer comparison that's
cheaper?

> @@ -1023,8 +1053,8 @@ int cpu_disable_scheduler(unsigned int cpu)
>  static int cpu_disable_scheduler_check(unsigned int cpu)
>  {
>      struct domain *d;
> -    struct vcpu *v;
>      struct cpupool *c;
> +    struct vcpu *v;
>  
>      c = per_cpu(cpupool, cpu);
>      if ( c == NULL )

Stray change?

Jan
Jürgen Groß Sept. 12, 2019, 2:02 p.m. UTC | #2
On 09.09.19 17:14, Jan Beulich wrote:
> On 09.08.2019 16:58, Juergen Gross wrote:
>> @@ -504,22 +511,21 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
>>       if ( IS_ERR(domdata) )
>>           return PTR_ERR(domdata);
>>   
>> -    vcpu_priv = xzalloc_array(void *, d->max_vcpus);
>> -    if ( vcpu_priv == NULL )
>> +    unit_priv = xzalloc_array(void *, d->max_vcpus);
> 
> I find it confusing that an array of units (as per the use below)
> is dimensioned by the domain's vCPU count. Isn't there a correlation
> between vCPU IDs and units IDs, perhaps along the lines of CPU
> APIC (thread), core, and socket IDs? If so, the array size could
> be bounded here by a smaller (down the road) value.

I'll add a comment in this regard in this patch and when the number
of vcpus per unit gets added (patch 31) I'll modify the array size.

> 
>> @@ -880,18 +889,36 @@ void vcpu_force_reschedule(struct vcpu *v)
>>       vcpu_migrate_finish(v);
>>   }
>>   
>> +static bool sched_check_affinity_broken(struct sched_unit *unit)
> 
> const

Okay.

> 
>> +{
>> +    struct vcpu *v;
> 
> const

Okay.

> 
>> @@ -910,18 +937,20 @@ void restore_vcpu_affinity(struct domain *d)
>>                       cpupool_domain_cpumask(d));
>>           if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
>>           {
>> -            if ( v->affinity_broken )
>> +            if ( sched_check_affinity_broken(unit) )
>>               {
>> -                sched_set_affinity(v, unit->cpu_hard_affinity_saved, NULL);
>> -                v->affinity_broken = 0;
>> +                sched_set_affinity(unit->vcpu_list,
>> +                                   unit->cpu_hard_affinity_saved, NULL);
>> +                sched_reset_affinity_broken(unit);
>>                   cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
>>                               cpupool_domain_cpumask(d));
>>               }
>>   
>>               if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
>>               {
>> -                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
>> -                sched_set_affinity(v, &cpumask_all, NULL);
>> +                printk(XENLOG_DEBUG "Breaking affinity for %pv\n",
>> +                       unit->vcpu_list);
>> +                sched_set_affinity(unit->vcpu_list, &cpumask_all, NULL);
>>                   cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
>>                               cpupool_domain_cpumask(d));
>>               }
>> [...]> @@ -964,17 +992,18 @@ int cpu_disable_scheduler(unsigned int cpu)
>>   
>>       for_each_domain_in_cpupool ( d, c )
>>       {
>> -        for_each_vcpu ( d, v )
>> +        struct sched_unit *unit;
>> +
>> +        for_each_sched_unit ( d, unit )
>>           {
>>               unsigned long flags;
>> -            struct sched_unit *unit = v->sched_unit;
>>               spinlock_t *lock = unit_schedule_lock_irqsave(unit, &flags);
>>   
>>               cpumask_and(&online_affinity, unit->cpu_hard_affinity, c->cpu_valid);
>>               if ( cpumask_empty(&online_affinity) &&
>>                    cpumask_test_cpu(cpu, unit->cpu_hard_affinity) )
>>               {
>> -                if ( v->affinity_broken )
>> +                if ( unit->vcpu_list->affinity_broken )
>>                   {
>>                       /* The vcpu is temporarily pinned, can't move it. */
>>                       unit_schedule_unlock_irqrestore(lock, flags, unit);
>> @@ -982,14 +1011,15 @@ int cpu_disable_scheduler(unsigned int cpu)
>>                       break;
>>                   }
>>   
>> -                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
>> +                printk(XENLOG_DEBUG "Breaking affinity for %pv\n",
>> +                       unit->vcpu_list);
>>   
>> -                sched_set_affinity(v, &cpumask_all, NULL);
>> +                sched_set_affinity(unit->vcpu_list, &cpumask_all, NULL);
>>               }
>>   
>> -            if ( v->processor != cpu )
>> +            if ( sched_unit_cpu(unit) != sched_get_resource_cpu(cpu) )
>>               {
>> -                /* The vcpu is not on this cpu, so we can move on. */
>> +                /* The unit is not on this cpu, so we can move on. */
>>                   unit_schedule_unlock_irqrestore(lock, flags, unit);
>>                   continue;
>>               }
>> @@ -1002,17 +1032,17 @@ int cpu_disable_scheduler(unsigned int cpu)
>>                *  * the scheduler will always find a suitable solution, or
>>                *    things would have failed before getting in here.
>>                */
>> -            vcpu_migrate_start(v);
>> +            vcpu_migrate_start(unit->vcpu_list);
>>               unit_schedule_unlock_irqrestore(lock, flags, unit);
>>   
>> -            vcpu_migrate_finish(v);
>> +            vcpu_migrate_finish(unit->vcpu_list);
> 
> All the ->vcpu_list references look bogus considering where you're
> moving, but I can only guess that all of this will need touching
> again later in the series. I wonder though whether these wouldn't
> better change into for-each-vCPU-in-unit loops right away.

Especially the vcpu_migrate part is more complicated. I think it is
much easier to review with the more mechanical changes split from the
logical changes.

> 
>>               /*
>>                * The only caveat, in this case, is that if a vcpu active in
>>                * the hypervisor isn't migratable. In this case, the caller
>>                * should try again after releasing and reaquiring all locks.
>>                */
>> -            if ( v->processor == cpu )
>> +            if ( sched_unit_cpu(unit) == sched_get_resource_cpu(cpu) )
> 
> Is comparing the (pseudo) CPU values here the most efficient approach
> generated code wise? Can't there be some pointer comparison that's
> cheaper?

Yes, you are right. unit->res == get_sched_res(cpu) is equivalent.

> 
>> @@ -1023,8 +1053,8 @@ int cpu_disable_scheduler(unsigned int cpu)
>>   static int cpu_disable_scheduler_check(unsigned int cpu)
>>   {
>>       struct domain *d;
>> -    struct vcpu *v;
>>       struct cpupool *c;
>> +    struct vcpu *v;
>>   
>>       c = per_cpu(cpupool, cpu);
>>       if ( c == NULL )
> 
> Stray change?

Yes.


Juergen
Jan Beulich Sept. 12, 2019, 2:40 p.m. UTC | #3
On 12.09.2019 16:02, Juergen Gross wrote:
> On 09.09.19 17:14, Jan Beulich wrote:
>> On 09.08.2019 16:58, Juergen Gross wrote:
>>> @@ -1002,17 +1032,17 @@ int cpu_disable_scheduler(unsigned int cpu)
>>>                *  * the scheduler will always find a suitable solution, or
>>>                *    things would have failed before getting in here.
>>>                */
>>> -            vcpu_migrate_start(v);
>>> +            vcpu_migrate_start(unit->vcpu_list);
>>>               unit_schedule_unlock_irqrestore(lock, flags, unit);
>>>   
>>> -            vcpu_migrate_finish(v);
>>> +            vcpu_migrate_finish(unit->vcpu_list);
>>
>> All the ->vcpu_list references look bogus considering where you're
>> moving, but I can only guess that all of this will need touching
>> again later in the series. I wonder though whether these wouldn't
>> better change into for-each-vCPU-in-unit loops right away.
> 
> Especially the vcpu_migrate part is more complicated. I think it is
> much easier to review with the more mechanical changes split from the
> logical changes.

Yes, and I appreciate you separating mechanical from logical changes.
However, as already pointed out in the context where I had convinced
you of using "vcpu_list" as a name, individual actions on vcpu_list
now look bogus throughout the series. They should really (almost?)
all be loops over the entire list; I have a hard time imagining
possible exceptions, but I'm not going to exclude there may be one
or a few. Introducing such loops should, as long as there's only
ever on vCPU on such a list, too be a mostly mechanical step, which
imo should have happened before (or with) changes like this one.

Jan
Jürgen Groß Sept. 12, 2019, 2:47 p.m. UTC | #4
On 12.09.19 16:40, Jan Beulich wrote:
> On 12.09.2019 16:02, Juergen Gross wrote:
>> On 09.09.19 17:14, Jan Beulich wrote:
>>> On 09.08.2019 16:58, Juergen Gross wrote:
>>>> @@ -1002,17 +1032,17 @@ int cpu_disable_scheduler(unsigned int cpu)
>>>>                 *  * the scheduler will always find a suitable solution, or
>>>>                 *    things would have failed before getting in here.
>>>>                 */
>>>> -            vcpu_migrate_start(v);
>>>> +            vcpu_migrate_start(unit->vcpu_list);
>>>>                unit_schedule_unlock_irqrestore(lock, flags, unit);
>>>>    
>>>> -            vcpu_migrate_finish(v);
>>>> +            vcpu_migrate_finish(unit->vcpu_list);
>>>
>>> All the ->vcpu_list references look bogus considering where you're
>>> moving, but I can only guess that all of this will need touching
>>> again later in the series. I wonder though whether these wouldn't
>>> better change into for-each-vCPU-in-unit loops right away.
>>
>> Especially the vcpu_migrate part is more complicated. I think it is
>> much easier to review with the more mechanical changes split from the
>> logical changes.
> 
> Yes, and I appreciate you separating mechanical from logical changes.
> However, as already pointed out in the context where I had convinced
> you of using "vcpu_list" as a name, individual actions on vcpu_list
> now look bogus throughout the series. They should really (almost?)
> all be loops over the entire list; I have a hard time imagining
> possible exceptions, but I'm not going to exclude there may be one
> or a few. Introducing such loops should, as long as there's only
> ever on vCPU on such a list, too be a mostly mechanical step, which
> imo should have happened before (or with) changes like this one.

I think the easiest way to handle that is to add a comment like:

/* TODO: switch to for_each_sched_unit_vcpu() */

This will show the need for the loop without having to do logic
changes.


Juergen
diff mbox series

Patch

diff --git a/xen/common/domain.c b/xen/common/domain.c
index 19d881b0c3..91b01c220e 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -550,7 +550,7 @@  void domain_update_node_affinity(struct domain *d)
     cpumask_var_t dom_cpumask, dom_cpumask_soft;
     cpumask_t *dom_affinity;
     const cpumask_t *online;
-    struct vcpu *v;
+    struct sched_unit *unit;
     unsigned int cpu;
 
     /* Do we have vcpus already? If not, no need to update node-affinity. */
@@ -583,12 +583,11 @@  void domain_update_node_affinity(struct domain *d)
          * and the full mask of where it would prefer to run (the union of
          * the soft affinity of all its various vcpus). Let's build them.
          */
-        for_each_vcpu ( d, v )
+        for_each_sched_unit ( d, unit )
         {
-            cpumask_or(dom_cpumask, dom_cpumask,
-                       v->sched_unit->cpu_hard_affinity);
+            cpumask_or(dom_cpumask, dom_cpumask, unit->cpu_hard_affinity);
             cpumask_or(dom_cpumask_soft, dom_cpumask_soft,
-                       v->sched_unit->cpu_soft_affinity);
+                       unit->cpu_soft_affinity);
         }
         /* Filter out non-online cpus */
         cpumask_and(dom_cpumask, dom_cpumask, online);
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index fcd083d31d..7b0ff8395e 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -147,26 +147,32 @@  static inline struct scheduler *dom_scheduler(const struct domain *d)
     return &ops;
 }
 
-static inline struct scheduler *vcpu_scheduler(const struct vcpu *v)
+static inline struct scheduler *unit_scheduler(const struct sched_unit *unit)
 {
-    struct domain *d = v->domain;
+    struct domain *d = unit->domain;
 
     if ( likely(d->cpupool != NULL) )
         return d->cpupool->sched;
 
     /*
-     * If d->cpupool is NULL, this is a vCPU of the idle domain. And this
+     * If d->cpupool is NULL, this is a unit of the idle domain. And this
      * case is special because the idle domain does not really belong to
      * a cpupool and, hence, doesn't really have a scheduler). In fact, its
-     * vCPUs (may) run on pCPUs which are in different pools, with different
+     * units (may) run on pCPUs which are in different pools, with different
      * schedulers.
      *
      * What we want, in this case, is the scheduler of the pCPU where this
-     * particular idle vCPU is running. And, since v->processor never changes
-     * for idle vCPUs, it is safe to use it, with no locks, to figure that out.
+     * particular idle unit is running. And, since unit->res never changes
+     * for idle units, it is safe to use it, with no locks, to figure that out.
      */
+
     ASSERT(is_idle_domain(d));
-    return per_cpu(scheduler, v->processor);
+    return per_cpu(scheduler, unit->res->processor);
+}
+
+static inline struct scheduler *vcpu_scheduler(const struct vcpu *v)
+{
+    return unit_scheduler(v->sched_unit);
 }
 #define VCPU2ONLINE(_v) cpupool_domain_cpumask((_v)->domain)
 
@@ -487,10 +493,11 @@  static void sched_move_irqs(struct sched_unit *unit)
 int sched_move_domain(struct domain *d, struct cpupool *c)
 {
     struct vcpu *v;
+    struct sched_unit *unit;
     unsigned int new_p;
-    void **vcpu_priv;
+    void **unit_priv;
     void *domdata;
-    void *vcpudata;
+    void *unitdata;
     struct scheduler *old_ops;
     void *old_domdata;
 
@@ -504,22 +511,21 @@  int sched_move_domain(struct domain *d, struct cpupool *c)
     if ( IS_ERR(domdata) )
         return PTR_ERR(domdata);
 
-    vcpu_priv = xzalloc_array(void *, d->max_vcpus);
-    if ( vcpu_priv == NULL )
+    unit_priv = xzalloc_array(void *, d->max_vcpus);
+    if ( unit_priv == NULL )
     {
         sched_free_domdata(c->sched, domdata);
         return -ENOMEM;
     }
 
-    for_each_vcpu ( d, v )
+    for_each_sched_unit ( d, unit )
     {
-        vcpu_priv[v->vcpu_id] = sched_alloc_vdata(c->sched, v->sched_unit,
-                                                  domdata);
-        if ( vcpu_priv[v->vcpu_id] == NULL )
+        unit_priv[unit->unit_id] = sched_alloc_vdata(c->sched, unit, domdata);
+        if ( unit_priv[unit->unit_id] == NULL )
         {
-            for_each_vcpu ( d, v )
-                xfree(vcpu_priv[v->vcpu_id]);
-            xfree(vcpu_priv);
+            for_each_sched_unit ( d, unit )
+                xfree(unit_priv[unit->unit_id]);
+            xfree(unit_priv);
             sched_free_domdata(c->sched, domdata);
             return -ENOMEM;
         }
@@ -530,30 +536,35 @@  int sched_move_domain(struct domain *d, struct cpupool *c)
     old_ops = dom_scheduler(d);
     old_domdata = d->sched_priv;
 
-    for_each_vcpu ( d, v )
+    for_each_sched_unit ( d, unit )
     {
-        sched_remove_unit(old_ops, v->sched_unit);
+        sched_remove_unit(old_ops, unit);
     }
 
     d->cpupool = c;
     d->sched_priv = domdata;
 
     new_p = cpumask_first(c->cpu_valid);
-    for_each_vcpu ( d, v )
+    for_each_sched_unit ( d, unit )
     {
         spinlock_t *lock;
+        unsigned int unit_p = new_p;
 
-        vcpudata = v->sched_unit->priv;
+        unitdata = unit->priv;
 
-        migrate_timer(&v->periodic_timer, new_p);
-        migrate_timer(&v->singleshot_timer, new_p);
-        migrate_timer(&v->poll_timer, new_p);
+        for_each_sched_unit_vcpu ( unit, v )
+        {
+            migrate_timer(&v->periodic_timer, new_p);
+            migrate_timer(&v->singleshot_timer, new_p);
+            migrate_timer(&v->poll_timer, new_p);
+            new_p = cpumask_cycle(new_p, c->cpu_valid);
+        }
 
-        lock = unit_schedule_lock_irq(v->sched_unit);
+        lock = unit_schedule_lock_irq(unit);
 
-        sched_set_affinity(v, &cpumask_all, &cpumask_all);
+        sched_set_affinity(unit->vcpu_list, &cpumask_all, &cpumask_all);
 
-        sched_set_res(v->sched_unit, get_sched_res(new_p));
+        sched_set_res(unit, get_sched_res(unit_p));
         /*
          * With v->processor modified we must not
          * - make any further changes assuming we hold the scheduler lock,
@@ -561,15 +572,13 @@  int sched_move_domain(struct domain *d, struct cpupool *c)
          */
         spin_unlock_irq(lock);
 
-        v->sched_unit->priv = vcpu_priv[v->vcpu_id];
+        unit->priv = unit_priv[unit->unit_id];
         if ( !d->is_dying )
-            sched_move_irqs(v->sched_unit);
-
-        new_p = cpumask_cycle(new_p, c->cpu_valid);
+            sched_move_irqs(unit);
 
-        sched_insert_unit(c->sched, v->sched_unit);
+        sched_insert_unit(c->sched, unit);
 
-        sched_free_vdata(old_ops, vcpudata);
+        sched_free_vdata(old_ops, unitdata);
     }
 
     domain_update_node_affinity(d);
@@ -578,7 +587,7 @@  int sched_move_domain(struct domain *d, struct cpupool *c)
 
     sched_free_domdata(old_ops, old_domdata);
 
-    xfree(vcpu_priv);
+    xfree(unit_priv);
 
     return 0;
 }
@@ -880,18 +889,36 @@  void vcpu_force_reschedule(struct vcpu *v)
     vcpu_migrate_finish(v);
 }
 
+static bool sched_check_affinity_broken(struct sched_unit *unit)
+{
+    struct vcpu *v;
+
+    for_each_sched_unit_vcpu ( unit, v )
+        if ( v->affinity_broken )
+            return true;
+
+    return false;
+}
+
+static void sched_reset_affinity_broken(struct sched_unit *unit)
+{
+    struct vcpu *v;
+
+    for_each_sched_unit_vcpu ( unit, v )
+        v->affinity_broken = false;
+}
+
 void restore_vcpu_affinity(struct domain *d)
 {
     unsigned int cpu = smp_processor_id();
-    struct vcpu *v;
+    struct sched_unit *unit;
 
     ASSERT(system_state == SYS_STATE_resume);
 
-    for_each_vcpu ( d, v )
+    for_each_sched_unit ( d, unit )
     {
         spinlock_t *lock;
-        unsigned int old_cpu = v->processor;
-        struct sched_unit *unit = v->sched_unit;
+        unsigned int old_cpu = sched_unit_cpu(unit);
         struct sched_resource *res;
 
         ASSERT(!unit_runnable(unit));
@@ -910,18 +937,20 @@  void restore_vcpu_affinity(struct domain *d)
                     cpupool_domain_cpumask(d));
         if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
         {
-            if ( v->affinity_broken )
+            if ( sched_check_affinity_broken(unit) )
             {
-                sched_set_affinity(v, unit->cpu_hard_affinity_saved, NULL);
-                v->affinity_broken = 0;
+                sched_set_affinity(unit->vcpu_list,
+                                   unit->cpu_hard_affinity_saved, NULL);
+                sched_reset_affinity_broken(unit);
                 cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
                             cpupool_domain_cpumask(d));
             }
 
             if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
             {
-                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
-                sched_set_affinity(v, &cpumask_all, NULL);
+                printk(XENLOG_DEBUG "Breaking affinity for %pv\n",
+                       unit->vcpu_list);
+                sched_set_affinity(unit->vcpu_list, &cpumask_all, NULL);
                 cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
                             cpupool_domain_cpumask(d));
             }
@@ -934,12 +963,12 @@  void restore_vcpu_affinity(struct domain *d)
 
         /* v->processor might have changed, so reacquire the lock. */
         lock = unit_schedule_lock_irq(unit);
-        res = sched_pick_resource(vcpu_scheduler(v), unit);
+        res = sched_pick_resource(unit_scheduler(unit), unit);
         sched_set_res(unit, res);
         spin_unlock_irq(lock);
 
-        if ( old_cpu != v->processor )
-            sched_move_irqs(v->sched_unit);
+        if ( old_cpu != sched_unit_cpu(unit) )
+            sched_move_irqs(unit);
     }
 
     domain_update_node_affinity(d);
@@ -953,7 +982,6 @@  void restore_vcpu_affinity(struct domain *d)
 int cpu_disable_scheduler(unsigned int cpu)
 {
     struct domain *d;
-    struct vcpu *v;
     struct cpupool *c;
     cpumask_t online_affinity;
     int ret = 0;
@@ -964,17 +992,18 @@  int cpu_disable_scheduler(unsigned int cpu)
 
     for_each_domain_in_cpupool ( d, c )
     {
-        for_each_vcpu ( d, v )
+        struct sched_unit *unit;
+
+        for_each_sched_unit ( d, unit )
         {
             unsigned long flags;
-            struct sched_unit *unit = v->sched_unit;
             spinlock_t *lock = unit_schedule_lock_irqsave(unit, &flags);
 
             cpumask_and(&online_affinity, unit->cpu_hard_affinity, c->cpu_valid);
             if ( cpumask_empty(&online_affinity) &&
                  cpumask_test_cpu(cpu, unit->cpu_hard_affinity) )
             {
-                if ( v->affinity_broken )
+                if ( unit->vcpu_list->affinity_broken )
                 {
                     /* The vcpu is temporarily pinned, can't move it. */
                     unit_schedule_unlock_irqrestore(lock, flags, unit);
@@ -982,14 +1011,15 @@  int cpu_disable_scheduler(unsigned int cpu)
                     break;
                 }
 
-                printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
+                printk(XENLOG_DEBUG "Breaking affinity for %pv\n",
+                       unit->vcpu_list);
 
-                sched_set_affinity(v, &cpumask_all, NULL);
+                sched_set_affinity(unit->vcpu_list, &cpumask_all, NULL);
             }
 
-            if ( v->processor != cpu )
+            if ( sched_unit_cpu(unit) != sched_get_resource_cpu(cpu) )
             {
-                /* The vcpu is not on this cpu, so we can move on. */
+                /* The unit is not on this cpu, so we can move on. */
                 unit_schedule_unlock_irqrestore(lock, flags, unit);
                 continue;
             }
@@ -1002,17 +1032,17 @@  int cpu_disable_scheduler(unsigned int cpu)
              *  * the scheduler will always find a suitable solution, or
              *    things would have failed before getting in here.
              */
-            vcpu_migrate_start(v);
+            vcpu_migrate_start(unit->vcpu_list);
             unit_schedule_unlock_irqrestore(lock, flags, unit);
 
-            vcpu_migrate_finish(v);
+            vcpu_migrate_finish(unit->vcpu_list);
 
             /*
              * The only caveat, in this case, is that if a vcpu active in
              * the hypervisor isn't migratable. In this case, the caller
              * should try again after releasing and reaquiring all locks.
              */
-            if ( v->processor == cpu )
+            if ( sched_unit_cpu(unit) == sched_get_resource_cpu(cpu) )
                 ret = -EAGAIN;
         }
     }
@@ -1023,8 +1053,8 @@  int cpu_disable_scheduler(unsigned int cpu)
 static int cpu_disable_scheduler_check(unsigned int cpu)
 {
     struct domain *d;
-    struct vcpu *v;
     struct cpupool *c;
+    struct vcpu *v;
 
     c = per_cpu(cpupool, cpu);
     if ( c == NULL )