diff mbox series

[v3,09/47] xen/sched: move some per-vcpu items to struct sched_unit

Message ID 20190914085251.18816-10-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen: add core scheduling support | expand

Commit Message

Jürgen Groß Sept. 14, 2019, 8:52 a.m. UTC
Affinities are scheduler specific attributes, they should be per
scheduling unit. So move all affinity related fields in struct vcpu
to struct sched_unit. While at it switch affinity related functions in
sched-if.h to use a pointer to sched_unit instead to vcpu as parameter.

The affinity_broken flag must be kept per vcpu as it is related to
guest actions on specific vcpus. When support of multiple vcpus per
sched_unit is being added, a unit is regarded as being subject to
"broken affinity" when any of its vcpus has the affinity_broken flag
set.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- move affinity_broken back to struct vcpu (Jan Beulich)
- print affinities only once per unit (Jan Beulich)
---
 xen/common/domain.c        | 15 ++------
 xen/common/domctl.c        | 13 ++++---
 xen/common/keyhandler.c    | 58 ++++++++++++++++--------------
 xen/common/sched_credit.c  | 18 +++++-----
 xen/common/sched_credit2.c | 42 +++++++++++-----------
 xen/common/sched_null.c    | 18 +++++-----
 xen/common/sched_rt.c      |  9 ++---
 xen/common/schedule.c      | 90 ++++++++++++++++++++++++++++------------------
 xen/include/xen/sched-if.h | 17 ++++-----
 xen/include/xen/sched.h    | 22 ++++++------
 10 files changed, 163 insertions(+), 139 deletions(-)

Comments

Jan Beulich Sept. 19, 2019, 3:38 p.m. UTC | #1
On 14.09.2019 10:52, Juergen Gross wrote:
> Affinities are scheduler specific attributes, they should be per
> scheduling unit. So move all affinity related fields in struct vcpu
> to struct sched_unit. While at it switch affinity related functions in
> sched-if.h to use a pointer to sched_unit instead to vcpu as parameter.
> 
> The affinity_broken flag must be kept per vcpu as it is related to
> guest actions on specific vcpus. When support of multiple vcpus per
> sched_unit is being added, a unit is regarded as being subject to
> "broken affinity" when any of its vcpus has the affinity_broken flag
> set.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
> V2:
> - move affinity_broken back to struct vcpu (Jan Beulich)
> - print affinities only once per unit (Jan Beulich)
> ---
>  xen/common/domain.c        | 15 ++------
>  xen/common/domctl.c        | 13 ++++---
>  xen/common/keyhandler.c    | 58 ++++++++++++++++--------------

Acked-by: Jan Beulich <jbeulich@suse.com>
with one minor remark:

> --- a/xen/common/keyhandler.c
> +++ b/xen/common/keyhandler.c
> @@ -251,6 +251,7 @@ static void reboot_machine(unsigned char key, struct cpu_user_regs *regs)
>  static void dump_domains(unsigned char key)
>  {
>      struct domain *d;
> +    struct sched_unit *unit;
>      struct vcpu   *v;
>      s_time_t       now = NOW();

I can see how it would look a little odd here because of the
neighboring variables, but generally speaking the new one should
be pointer to const (and the old ones should have been, too,
afaict).

Jan
Jürgen Groß Sept. 24, 2019, 11:44 a.m. UTC | #2
On 19.09.19 17:38, Jan Beulich wrote:
> On 14.09.2019 10:52, Juergen Gross wrote:
>> Affinities are scheduler specific attributes, they should be per
>> scheduling unit. So move all affinity related fields in struct vcpu
>> to struct sched_unit. While at it switch affinity related functions in
>> sched-if.h to use a pointer to sched_unit instead to vcpu as parameter.
>>
>> The affinity_broken flag must be kept per vcpu as it is related to
>> guest actions on specific vcpus. When support of multiple vcpus per
>> sched_unit is being added, a unit is regarded as being subject to
>> "broken affinity" when any of its vcpus has the affinity_broken flag
>> set.
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
>> ---
>> V2:
>> - move affinity_broken back to struct vcpu (Jan Beulich)
>> - print affinities only once per unit (Jan Beulich)
>> ---
>>   xen/common/domain.c        | 15 ++------
>>   xen/common/domctl.c        | 13 ++++---
>>   xen/common/keyhandler.c    | 58 ++++++++++++++++--------------
> 
> Acked-by: Jan Beulich <jbeulich@suse.com>
> with one minor remark:
> 
>> --- a/xen/common/keyhandler.c
>> +++ b/xen/common/keyhandler.c
>> @@ -251,6 +251,7 @@ static void reboot_machine(unsigned char key, struct cpu_user_regs *regs)
>>   static void dump_domains(unsigned char key)
>>   {
>>       struct domain *d;
>> +    struct sched_unit *unit;
>>       struct vcpu   *v;
>>       s_time_t       now = NOW();
> 
> I can see how it would look a little odd here because of the
> neighboring variables, but generally speaking the new one should
> be pointer to const (and the old ones should have been, too,
> afaict).

Yes. Will change unit for now and the others in - you might have guessed
that already - the ever growing scheduling cleanup series (yes, I'm
noting the topics for that series).


Juergen
Dario Faggioli Sept. 24, 2019, 5:23 p.m. UTC | #3
On Sat, 2019-09-14 at 10:52 +0200, Juergen Gross wrote:
> Affinities are scheduler specific attributes, they should be per
> scheduling unit. So move all affinity related fields in struct vcpu
> to struct sched_unit. While at it switch affinity related functions
> in
> sched-if.h to use a pointer to sched_unit instead to vcpu as
> parameter.
> 
> The affinity_broken flag must be kept per vcpu as it is related to
> guest actions on specific vcpus. When support of multiple vcpus per
> sched_unit is being added, a unit is regarded as being subject to
> "broken affinity" when any of its vcpus has the affinity_broken flag
> set.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>

Regards
diff mbox series

Patch

diff --git a/xen/common/domain.c b/xen/common/domain.c
index 0cff749bbe..8c782ee03f 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -132,10 +132,6 @@  static void vcpu_info_reset(struct vcpu *v)
 
 static void vcpu_destroy(struct vcpu *v)
 {
-    free_cpumask_var(v->cpu_hard_affinity);
-    free_cpumask_var(v->cpu_hard_affinity_saved);
-    free_cpumask_var(v->cpu_soft_affinity);
-
     free_vcpu_struct(v);
 }
 
@@ -159,11 +155,6 @@  struct vcpu *vcpu_create(
 
     grant_table_init_vcpu(v);
 
-    if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) ||
-         !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) ||
-         !zalloc_cpumask_var(&v->cpu_soft_affinity) )
-        goto fail;
-
     if ( is_idle_domain(d) )
     {
         v->runstate.state = RUNSTATE_running;
@@ -203,7 +194,6 @@  struct vcpu *vcpu_create(
     sched_destroy_vcpu(v);
  fail_wq:
     destroy_waitqueue_vcpu(v);
- fail:
     vcpu_destroy(v);
 
     return NULL;
@@ -600,9 +590,10 @@  void domain_update_node_affinity(struct domain *d)
          */
         for_each_vcpu ( d, v )
         {
-            cpumask_or(dom_cpumask, dom_cpumask, v->cpu_hard_affinity);
+            cpumask_or(dom_cpumask, dom_cpumask,
+                       v->sched_unit->cpu_hard_affinity);
             cpumask_or(dom_cpumask_soft, dom_cpumask_soft,
-                       v->cpu_soft_affinity);
+                       v->sched_unit->cpu_soft_affinity);
         }
         /* Filter out non-online cpus */
         cpumask_and(dom_cpumask, dom_cpumask, online);
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 6e6e9b9866..2f9768f08f 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -606,6 +606,7 @@  long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
     case XEN_DOMCTL_getvcpuaffinity:
     {
         struct vcpu *v;
+        const struct sched_unit *unit;
         struct xen_domctl_vcpuaffinity *vcpuaff = &op->u.vcpuaffinity;
 
         ret = -EINVAL;
@@ -616,6 +617,7 @@  long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
         if ( (v = d->vcpu[vcpuaff->vcpu]) == NULL )
             break;
 
+        unit = v->sched_unit;
         ret = -EINVAL;
         if ( vcpuaffinity_params_invalid(vcpuaff) )
             break;
@@ -635,7 +637,7 @@  long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
                 ret = -ENOMEM;
                 break;
             }
-            cpumask_copy(old_affinity, v->cpu_hard_affinity);
+            cpumask_copy(old_affinity, unit->cpu_hard_affinity);
 
             if ( !alloc_cpumask_var(&new_affinity) )
             {
@@ -668,7 +670,7 @@  long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
                  * For hard affinity, what we return is the intersection of
                  * cpupool's online mask and the new hard affinity.
                  */
-                cpumask_and(new_affinity, online, v->cpu_hard_affinity);
+                cpumask_and(new_affinity, online, unit->cpu_hard_affinity);
                 ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_hard,
                                                new_affinity);
             }
@@ -697,7 +699,8 @@  long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
                  * hard affinity.
                  */
                 cpumask_and(new_affinity, new_affinity, online);
-                cpumask_and(new_affinity, new_affinity, v->cpu_hard_affinity);
+                cpumask_and(new_affinity, new_affinity,
+                            unit->cpu_hard_affinity);
                 ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
                                                new_affinity);
             }
@@ -710,10 +713,10 @@  long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
         {
             if ( vcpuaff->flags & XEN_VCPUAFFINITY_HARD )
                 ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_hard,
-                                               v->cpu_hard_affinity);
+                                               unit->cpu_hard_affinity);
             if ( vcpuaff->flags & XEN_VCPUAFFINITY_SOFT )
                 ret = cpumask_to_xenctl_bitmap(&vcpuaff->cpumap_soft,
-                                               v->cpu_soft_affinity);
+                                               unit->cpu_soft_affinity);
         }
         break;
     }
diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c
index c36baa4dff..0317ae1561 100644
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -251,6 +251,7 @@  static void reboot_machine(unsigned char key, struct cpu_user_regs *regs)
 static void dump_domains(unsigned char key)
 {
     struct domain *d;
+    struct sched_unit *unit;
     struct vcpu   *v;
     s_time_t       now = NOW();
 
@@ -297,33 +298,38 @@  static void dump_domains(unsigned char key)
 
         printk("VCPU information and callbacks for domain %u:\n",
                d->domain_id);
-        for_each_vcpu ( d, v )
-        {
-            if ( !(v->vcpu_id & 0x3f) )
-                process_pending_softirqs();
 
-            printk("    VCPU%d: CPU%d [has=%c] poll=%d "
-                   "upcall_pend=%02x upcall_mask=%02x ",
-                   v->vcpu_id, v->processor,
-                   v->is_running ? 'T':'F', v->poll_evtchn,
-                   vcpu_info(v, evtchn_upcall_pending),
-                   !vcpu_event_delivery_is_enabled(v));
-            if ( vcpu_cpu_dirty(v) )
-                printk("dirty_cpu=%u", v->dirty_cpu);
-            printk("\n");
-            printk("    cpu_hard_affinity={%*pbl} cpu_soft_affinity={%*pbl}\n",
-                   CPUMASK_PR(v->cpu_hard_affinity),
-                   CPUMASK_PR(v->cpu_soft_affinity));
-            printk("    pause_count=%d pause_flags=%lx\n",
-                   atomic_read(&v->pause_count), v->pause_flags);
-            arch_dump_vcpu_info(v);
-
-            if ( v->periodic_period == 0 )
-                printk("No periodic timer\n");
-            else
-                printk("%"PRI_stime" Hz periodic timer (period %"PRI_stime" ms)\n",
-                       1000000000 / v->periodic_period,
-                       v->periodic_period / 1000000);
+        for_each_sched_unit ( d, unit )
+        {
+            printk("  UNIT%d affinities: hard={%*pbl} soft={%*pbl}\n",
+                   unit->unit_id, CPUMASK_PR(unit->cpu_hard_affinity),
+                   CPUMASK_PR(unit->cpu_soft_affinity));
+
+            for_each_sched_unit_vcpu ( unit, v )
+            {
+                if ( !(v->vcpu_id & 0x3f) )
+                    process_pending_softirqs();
+
+                printk("    VCPU%d: CPU%d [has=%c] poll=%d "
+                       "upcall_pend=%02x upcall_mask=%02x ",
+                       v->vcpu_id, v->processor,
+                       v->is_running ? 'T':'F', v->poll_evtchn,
+                       vcpu_info(v, evtchn_upcall_pending),
+                       !vcpu_event_delivery_is_enabled(v));
+                if ( vcpu_cpu_dirty(v) )
+                    printk("dirty_cpu=%u", v->dirty_cpu);
+                printk("\n");
+                printk("    pause_count=%d pause_flags=%lx\n",
+                       atomic_read(&v->pause_count), v->pause_flags);
+                arch_dump_vcpu_info(v);
+
+                if ( v->periodic_period == 0 )
+                    printk("No periodic timer\n");
+                else
+                    printk("%"PRI_stime" Hz periodic timer (period %"PRI_stime" ms)\n",
+                           1000000000 / v->periodic_period,
+                           v->periodic_period / 1000000);
+            }
         }
     }
 
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 7621a5527e..0bc1f70f32 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -352,6 +352,7 @@  DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
 static inline void __runq_tickle(struct csched_unit *new)
 {
     unsigned int cpu = new->vcpu->processor;
+    struct sched_unit *unit = new->vcpu->sched_unit;
     struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
     struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
     cpumask_t mask, idle_mask, *online;
@@ -377,7 +378,7 @@  static inline void __runq_tickle(struct csched_unit *new)
     if ( unlikely(test_bit(CSCHED_FLAG_VCPU_PINNED, &new->flags) &&
                   cpumask_test_cpu(cpu, &idle_mask)) )
     {
-        ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+        ASSERT(cpumask_cycle(cpu, unit->cpu_hard_affinity) == cpu);
         SCHED_STAT_CRANK(tickled_idle_cpu_excl);
         __cpumask_set_cpu(cpu, &mask);
         goto tickle;
@@ -412,11 +413,11 @@  static inline void __runq_tickle(struct csched_unit *new)
             int new_idlers_empty;
 
             if ( balance_step == BALANCE_SOFT_AFFINITY
-                 && !has_soft_affinity(new->vcpu) )
+                 && !has_soft_affinity(unit) )
                 continue;
 
             /* Are there idlers suitable for new (for this balance step)? */
-            affinity_balance_cpumask(new->vcpu, balance_step,
+            affinity_balance_cpumask(unit, balance_step,
                                      cpumask_scratch_cpu(cpu));
             cpumask_and(cpumask_scratch_cpu(cpu),
                         cpumask_scratch_cpu(cpu), &idle_mask);
@@ -445,8 +446,7 @@  static inline void __runq_tickle(struct csched_unit *new)
              */
             if ( new_idlers_empty && new->pri > cur->pri )
             {
-                if ( cpumask_intersects(cur->vcpu->cpu_hard_affinity,
-                                        &idle_mask) )
+                if ( cpumask_intersects(unit->cpu_hard_affinity, &idle_mask) )
                 {
                     SCHED_VCPU_STAT_CRANK(cur, kicked_away);
                     SCHED_VCPU_STAT_CRANK(cur, migrate_r);
@@ -728,7 +728,7 @@  _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
 
     for_each_affinity_balance_step( balance_step )
     {
-        affinity_balance_cpumask(vc, balance_step, cpus);
+        affinity_balance_cpumask(vc->sched_unit, balance_step, cpus);
         cpumask_and(cpus, online, cpus);
         /*
          * We want to pick up a pcpu among the ones that are online and
@@ -747,7 +747,7 @@  _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
          * balancing step all together.
          */
         if ( balance_step == BALANCE_SOFT_AFFINITY &&
-             (!has_soft_affinity(vc) || cpumask_empty(cpus)) )
+             (!has_soft_affinity(vc->sched_unit) || cpumask_empty(cpus)) )
             continue;
 
         /* If present, prefer vc's current processor */
@@ -1647,10 +1647,10 @@  csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
          * or counter.
          */
         if ( vc->is_running || (balance_step == BALANCE_SOFT_AFFINITY &&
-                                !has_soft_affinity(vc)) )
+                                !has_soft_affinity(vc->sched_unit)) )
             continue;
 
-        affinity_balance_cpumask(vc, balance_step, cpumask_scratch);
+        affinity_balance_cpumask(vc->sched_unit, balance_step, cpumask_scratch);
         if ( __csched_vcpu_is_migrateable(prv, vc, cpu, cpumask_scratch) )
         {
             /* We got a candidate. Grab it! */
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 4c471bf482..fee1471f80 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -700,10 +700,10 @@  static int get_fallback_cpu(struct csched2_unit *svc)
     {
         int cpu = v->processor;
 
-        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
+        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v->sched_unit) )
             continue;
 
-        affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
+        affinity_balance_cpumask(v->sched_unit, bs, cpumask_scratch_cpu(cpu));
         cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
                     cpupool_domain_cpumask(v->domain));
 
@@ -1391,10 +1391,10 @@  static s_time_t tickle_score(const struct scheduler *ops, s_time_t now,
      */
     if ( score > 0 )
     {
-        if ( cpumask_test_cpu(cpu, new->vcpu->cpu_soft_affinity) )
+        if ( cpumask_test_cpu(cpu, new->vcpu->sched_unit->cpu_soft_affinity) )
             score += CSCHED2_CREDIT_INIT;
 
-        if ( !cpumask_test_cpu(cpu, cur->vcpu->cpu_soft_affinity) )
+        if ( !cpumask_test_cpu(cpu, cur->vcpu->sched_unit->cpu_soft_affinity) )
             score += CSCHED2_CREDIT_INIT;
     }
 
@@ -1437,6 +1437,7 @@  runq_tickle(const struct scheduler *ops, struct csched2_unit *new, s_time_t now)
 {
     int i, ipid = -1;
     s_time_t max = 0;
+    struct sched_unit *unit = new->vcpu->sched_unit;
     unsigned int bs, cpu = new->vcpu->processor;
     struct csched2_runqueue_data *rqd = c2rqd(ops, cpu);
     cpumask_t *online = cpupool_domain_cpumask(new->vcpu->domain);
@@ -1474,7 +1475,7 @@  runq_tickle(const struct scheduler *ops, struct csched2_unit *new, s_time_t now)
                   cpumask_test_cpu(cpu, &rqd->idle) &&
                   !cpumask_test_cpu(cpu, &rqd->tickled)) )
     {
-        ASSERT(cpumask_cycle(cpu, new->vcpu->cpu_hard_affinity) == cpu);
+        ASSERT(cpumask_cycle(cpu, unit->cpu_hard_affinity) == cpu);
         SCHED_STAT_CRANK(tickled_idle_cpu_excl);
         ipid = cpu;
         goto tickle;
@@ -1483,10 +1484,10 @@  runq_tickle(const struct scheduler *ops, struct csched2_unit *new, s_time_t now)
     for_each_affinity_balance_step( bs )
     {
         /* Just skip first step, if we don't have a soft affinity */
-        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(new->vcpu) )
+        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(unit) )
             continue;
 
-        affinity_balance_cpumask(new->vcpu, bs, cpumask_scratch_cpu(cpu));
+        affinity_balance_cpumask(unit, bs, cpumask_scratch_cpu(cpu));
 
         /*
          * First of all, consider idle cpus, checking if we can just
@@ -1558,7 +1559,7 @@  runq_tickle(const struct scheduler *ops, struct csched2_unit *new, s_time_t now)
             ipid = cpu;
 
             /* If this is in new's soft affinity, just take it */
-            if ( cpumask_test_cpu(cpu, new->vcpu->cpu_soft_affinity) )
+            if ( cpumask_test_cpu(cpu, unit->cpu_soft_affinity) )
             {
                 SCHED_STAT_CRANK(tickled_busy_cpu);
                 goto tickle;
@@ -2244,7 +2245,7 @@  csched2_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
         goto out;
     }
 
-    cpumask_and(cpumask_scratch_cpu(cpu), vc->cpu_hard_affinity,
+    cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
                 cpupool_domain_cpumask(vc->domain));
 
     /*
@@ -2289,7 +2290,7 @@  csched2_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
      *
      * Find both runqueues in one pass.
      */
-    has_soft = has_soft_affinity(vc);
+    has_soft = has_soft_affinity(unit);
     for_each_cpu(i, &prv->active_queues)
     {
         struct csched2_runqueue_data *rqd;
@@ -2336,7 +2337,7 @@  csched2_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
             cpumask_t mask;
 
             cpumask_and(&mask, cpumask_scratch_cpu(cpu), &rqd->active);
-            if ( cpumask_intersects(&mask, svc->vcpu->cpu_soft_affinity) )
+            if ( cpumask_intersects(&mask, unit->cpu_soft_affinity) )
             {
                 min_s_avgload = rqd_avgload;
                 min_s_rqi = i;
@@ -2358,9 +2359,9 @@  csched2_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
          * Note that, to obtain the soft-affinity mask, we "just" put what we
          * have in cpumask_scratch in && with vc->cpu_soft_affinity. This is
          * ok because:
-         * - we know that vc->cpu_hard_affinity and vc->cpu_soft_affinity have
+         * - we know that unit->cpu_hard_affinity and ->cpu_soft_affinity have
          *   a non-empty intersection (because has_soft is true);
-         * - we have vc->cpu_hard_affinity & cpupool_domain_cpumask() already
+         * - we have unit->cpu_hard_affinity & cpupool_domain_cpumask() already
          *   in cpumask_scratch, we do save a lot doing like this.
          *
          * It's kind of like open coding affinity_balance_cpumask() but, in
@@ -2368,7 +2369,7 @@  csched2_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
          * cpumask operations.
          */
         cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
-                    vc->cpu_soft_affinity);
+                    unit->cpu_soft_affinity);
         cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
                     &prv->rqd[min_s_rqi].active);
     }
@@ -2476,6 +2477,7 @@  static void migrate(const struct scheduler *ops,
                     s_time_t now)
 {
     int cpu = svc->vcpu->processor;
+    struct sched_unit *unit = svc->vcpu->sched_unit;
 
     if ( unlikely(tb_init_done) )
     {
@@ -2513,7 +2515,7 @@  static void migrate(const struct scheduler *ops,
         }
         _runq_deassign(svc);
 
-        cpumask_and(cpumask_scratch_cpu(cpu), svc->vcpu->cpu_hard_affinity,
+        cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
                     cpupool_domain_cpumask(svc->vcpu->domain));
         cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
                     &trqd->active);
@@ -2547,7 +2549,7 @@  static bool vcpu_is_migrateable(struct csched2_unit *svc,
     struct vcpu *v = svc->vcpu;
     int cpu = svc->vcpu->processor;
 
-    cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+    cpumask_and(cpumask_scratch_cpu(cpu), v->sched_unit->cpu_hard_affinity,
                 cpupool_domain_cpumask(v->domain));
 
     return !(svc->flags & CSFLAG_runq_migrate_request) &&
@@ -2781,7 +2783,7 @@  csched2_unit_migrate(
 
     /* If here, new_cpu must be a valid Credit2 pCPU, and in our affinity. */
     ASSERT(cpumask_test_cpu(new_cpu, &csched2_priv(ops)->initialized));
-    ASSERT(cpumask_test_cpu(new_cpu, vc->cpu_hard_affinity));
+    ASSERT(cpumask_test_cpu(new_cpu, unit->cpu_hard_affinity));
 
     trqd = c2rqd(ops, new_cpu);
 
@@ -3321,9 +3323,9 @@  runq_candidate(struct csched2_runqueue_data *rqd,
     }
 
     /* If scurr has a soft-affinity, let's check whether cpu is part of it */
-    if ( has_soft_affinity(scurr->vcpu) )
+    if ( has_soft_affinity(scurr->vcpu->sched_unit) )
     {
-        affinity_balance_cpumask(scurr->vcpu, BALANCE_SOFT_AFFINITY,
+        affinity_balance_cpumask(scurr->vcpu->sched_unit, BALANCE_SOFT_AFFINITY,
                                  cpumask_scratch);
         if ( unlikely(!cpumask_test_cpu(cpu, cpumask_scratch)) )
         {
@@ -3378,7 +3380,7 @@  runq_candidate(struct csched2_runqueue_data *rqd,
         }
 
         /* Only consider vcpus that are allowed to run on this processor. */
-        if ( !cpumask_test_cpu(cpu, svc->vcpu->cpu_hard_affinity) )
+        if ( !cpumask_test_cpu(cpu, svc->vcpu->sched_unit->cpu_hard_affinity) )
         {
             (*skipped)++;
             continue;
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index b7700c91a2..1d96dc5fa5 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -122,7 +122,8 @@  static inline struct null_unit *null_unit(const struct sched_unit *unit)
 static inline bool vcpu_check_affinity(struct vcpu *v, unsigned int cpu,
                                        unsigned int balance_step)
 {
-    affinity_balance_cpumask(v, balance_step, cpumask_scratch_cpu(cpu));
+    affinity_balance_cpumask(v->sched_unit, balance_step,
+                             cpumask_scratch_cpu(cpu));
     cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu),
                 cpupool_domain_cpumask(v->domain));
 
@@ -273,10 +274,10 @@  pick_res(struct null_private *prv, const struct sched_unit *unit)
 
     for_each_affinity_balance_step( bs )
     {
-        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
+        if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(unit) )
             continue;
 
-        affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
+        affinity_balance_cpumask(unit, bs, cpumask_scratch_cpu(cpu));
         cpumask_and(cpumask_scratch_cpu(cpu), cpumask_scratch_cpu(cpu), cpus);
 
         /*
@@ -313,7 +314,7 @@  pick_res(struct null_private *prv, const struct sched_unit *unit)
      * as we will actually assign the vCPU to the pCPU we return from here,
      * only if the pCPU is free.
      */
-    cpumask_and(cpumask_scratch_cpu(cpu), cpus, v->cpu_hard_affinity);
+    cpumask_and(cpumask_scratch_cpu(cpu), cpus, unit->cpu_hard_affinity);
     new_cpu = cpumask_any(cpumask_scratch_cpu(cpu));
 
  out:
@@ -396,7 +397,8 @@  static bool vcpu_deassign(struct null_private *prv, struct vcpu *v)
     {
         list_for_each_entry( wvc, &prv->waitq, waitq_elem )
         {
-            if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) )
+            if ( bs == BALANCE_SOFT_AFFINITY &&
+                 !has_soft_affinity(wvc->vcpu->sched_unit) )
                 continue;
 
             if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
@@ -466,7 +468,7 @@  static void null_unit_insert(const struct scheduler *ops,
 
     lock = unit_schedule_lock(unit);
 
-    cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+    cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
                 cpupool_domain_cpumask(v->domain));
 
     /* If the pCPU is free, we assign v to it */
@@ -579,7 +581,7 @@  static void null_unit_wake(const struct scheduler *ops,
         list_add_tail(&nvc->waitq_elem, &prv->waitq);
         spin_unlock(&prv->waitq_lock);
 
-        cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+        cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
                     cpupool_domain_cpumask(v->domain));
 
         if ( !cpumask_intersects(&prv->cpus_free, cpumask_scratch_cpu(cpu)) )
@@ -848,7 +850,7 @@  static struct task_slice null_schedule(const struct scheduler *ops,
             list_for_each_entry( wvc, &prv->waitq, waitq_elem )
             {
                 if ( bs == BALANCE_SOFT_AFFINITY &&
-                     !has_soft_affinity(wvc->vcpu) )
+                     !has_soft_affinity(wvc->vcpu->sched_unit) )
                     continue;
 
                 if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 0d419b960e..927f630531 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -327,7 +327,7 @@  rt_dump_vcpu(const struct scheduler *ops, const struct rt_unit *svc)
     mask = cpumask_scratch_cpu(svc->vcpu->processor);
 
     cpupool_mask = cpupool_domain_cpumask(svc->vcpu->domain);
-    cpumask_and(mask, cpupool_mask, svc->vcpu->cpu_hard_affinity);
+    cpumask_and(mask, cpupool_mask, svc->vcpu->sched_unit->cpu_hard_affinity);
     printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
            " cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n"
            " \t\t priority_level=%d has_extratime=%d\n"
@@ -644,7 +644,7 @@  rt_res_pick(const struct scheduler *ops, const struct sched_unit *unit)
     int cpu;
 
     online = cpupool_domain_cpumask(vc->domain);
-    cpumask_and(&cpus, online, vc->cpu_hard_affinity);
+    cpumask_and(&cpus, online, unit->cpu_hard_affinity);
 
     cpu = cpumask_test_cpu(vc->processor, &cpus)
             ? vc->processor
@@ -1022,7 +1022,8 @@  runq_pick(const struct scheduler *ops, const cpumask_t *mask)
 
         /* mask cpu_hard_affinity & cpupool & mask */
         online = cpupool_domain_cpumask(iter_svc->vcpu->domain);
-        cpumask_and(&cpu_common, online, iter_svc->vcpu->cpu_hard_affinity);
+        cpumask_and(&cpu_common, online,
+                    iter_svc->vcpu->sched_unit->cpu_hard_affinity);
         cpumask_and(&cpu_common, mask, &cpu_common);
         if ( cpumask_empty(&cpu_common) )
             continue;
@@ -1191,7 +1192,7 @@  runq_tickle(const struct scheduler *ops, struct rt_unit *new)
         return;
 
     online = cpupool_domain_cpumask(new->vcpu->domain);
-    cpumask_and(&not_tickled, online, new->vcpu->cpu_hard_affinity);
+    cpumask_and(&not_tickled, online, new->vcpu->sched_unit->cpu_hard_affinity);
     cpumask_andnot(&not_tickled, &not_tickled, &prv->tickled);
 
     /*
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index fed488bc1f..11e5c2895b 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -329,6 +329,11 @@  static void sched_free_unit(struct sched_unit *unit)
     }
 
     unit->vcpu_list->sched_unit = NULL;
+
+    free_cpumask_var(unit->cpu_hard_affinity);
+    free_cpumask_var(unit->cpu_hard_affinity_saved);
+    free_cpumask_var(unit->cpu_soft_affinity);
+
     xfree(unit);
 }
 
@@ -354,7 +359,16 @@  static struct sched_unit *sched_alloc_unit(struct vcpu *v)
     unit->next_in_list = *prev_unit;
     *prev_unit = unit;
 
+    if ( !zalloc_cpumask_var(&unit->cpu_hard_affinity) ||
+         !zalloc_cpumask_var(&unit->cpu_hard_affinity_saved) ||
+         !zalloc_cpumask_var(&unit->cpu_soft_affinity) )
+        goto fail;
+
     return unit;
+
+ fail:
+    sched_free_unit(unit);
+    return NULL;
 }
 
 int sched_init_vcpu(struct vcpu *v, unsigned int processor)
@@ -744,7 +758,7 @@  static void vcpu_migrate_finish(struct vcpu *v)
              */
             if ( pick_called &&
                  (new_lock == get_sched_res(new_cpu)->schedule_lock) &&
-                 cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) &&
+                 cpumask_test_cpu(new_cpu, v->sched_unit->cpu_hard_affinity) &&
                  cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) )
                 break;
 
@@ -802,6 +816,7 @@  void restore_vcpu_affinity(struct domain *d)
     {
         spinlock_t *lock;
         unsigned int old_cpu = v->processor;
+        struct sched_unit *unit = v->sched_unit;
 
         ASSERT(!vcpu_runnable(v));
 
@@ -813,17 +828,17 @@  void restore_vcpu_affinity(struct domain *d)
          * set v->processor of each of their vCPUs to something that will
          * make sense for the scheduler of the cpupool in which they are in.
          */
-        lock = unit_schedule_lock_irq(v->sched_unit);
+        lock = unit_schedule_lock_irq(unit);
 
-        cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+        cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
                     cpupool_domain_cpumask(d));
         if ( cpumask_empty(cpumask_scratch_cpu(cpu)) )
         {
             if ( v->affinity_broken )
             {
-                sched_set_affinity(v, v->cpu_hard_affinity_saved, NULL);
+                sched_set_affinity(v, unit->cpu_hard_affinity_saved, NULL);
                 v->affinity_broken = 0;
-                cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+                cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
                             cpupool_domain_cpumask(d));
             }
 
@@ -831,21 +846,20 @@  void restore_vcpu_affinity(struct domain *d)
             {
                 printk(XENLOG_DEBUG "Breaking affinity for %pv\n", v);
                 sched_set_affinity(v, &cpumask_all, NULL);
-                cpumask_and(cpumask_scratch_cpu(cpu), v->cpu_hard_affinity,
+                cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity,
                             cpupool_domain_cpumask(d));
             }
         }
 
         v->processor = cpumask_any(cpumask_scratch_cpu(cpu));
-        v->sched_unit->res = get_sched_res(v->processor);
+        unit->res = get_sched_res(v->processor);
 
         spin_unlock_irq(lock);
 
         /* v->processor might have changed, so reacquire the lock. */
-        lock = unit_schedule_lock_irq(v->sched_unit);
-        v->sched_unit->res = sched_pick_resource(vcpu_scheduler(v),
-                                                 v->sched_unit);
-        v->processor = v->sched_unit->res->master_cpu;
+        lock = unit_schedule_lock_irq(unit);
+        unit->res = sched_pick_resource(vcpu_scheduler(v), unit);
+        v->processor = unit->res->master_cpu;
         spin_unlock_irq(lock);
 
         if ( old_cpu != v->processor )
@@ -877,16 +891,17 @@  int cpu_disable_scheduler(unsigned int cpu)
         for_each_vcpu ( d, v )
         {
             unsigned long flags;
-            spinlock_t *lock = unit_schedule_lock_irqsave(v->sched_unit, &flags);
+            struct sched_unit *unit = v->sched_unit;
+            spinlock_t *lock = unit_schedule_lock_irqsave(unit, &flags);
 
-            cpumask_and(&online_affinity, v->cpu_hard_affinity, c->cpu_valid);
+            cpumask_and(&online_affinity, unit->cpu_hard_affinity, c->cpu_valid);
             if ( cpumask_empty(&online_affinity) &&
-                 cpumask_test_cpu(cpu, v->cpu_hard_affinity) )
+                 cpumask_test_cpu(cpu, unit->cpu_hard_affinity) )
             {
                 if ( v->affinity_broken )
                 {
                     /* The vcpu is temporarily pinned, can't move it. */
-                    unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
+                    unit_schedule_unlock_irqrestore(lock, flags, unit);
                     ret = -EADDRINUSE;
                     break;
                 }
@@ -899,7 +914,7 @@  int cpu_disable_scheduler(unsigned int cpu)
             if ( v->processor != cpu )
             {
                 /* The vcpu is not on this cpu, so we can move on. */
-                unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
+                unit_schedule_unlock_irqrestore(lock, flags, unit);
                 continue;
             }
 
@@ -912,7 +927,7 @@  int cpu_disable_scheduler(unsigned int cpu)
              *    things would have failed before getting in here.
              */
             vcpu_migrate_start(v);
-            unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit);
+            unit_schedule_unlock_irqrestore(lock, flags, unit);
 
             vcpu_migrate_finish(v);
 
@@ -957,26 +972,29 @@  static int cpu_disable_scheduler_check(unsigned int cpu)
 void sched_set_affinity(
     struct vcpu *v, const cpumask_t *hard, const cpumask_t *soft)
 {
-    sched_adjust_affinity(dom_scheduler(v->domain), v->sched_unit, hard, soft);
+    struct sched_unit *unit = v->sched_unit;
+
+    sched_adjust_affinity(dom_scheduler(v->domain), unit, hard, soft);
 
     if ( hard )
-        cpumask_copy(v->cpu_hard_affinity, hard);
+        cpumask_copy(unit->cpu_hard_affinity, hard);
     if ( soft )
-        cpumask_copy(v->cpu_soft_affinity, soft);
+        cpumask_copy(unit->cpu_soft_affinity, soft);
 
-    v->soft_aff_effective = !cpumask_subset(v->cpu_hard_affinity,
-                                            v->cpu_soft_affinity) &&
-                            cpumask_intersects(v->cpu_soft_affinity,
-                                               v->cpu_hard_affinity);
+    unit->soft_aff_effective = !cpumask_subset(unit->cpu_hard_affinity,
+                                               unit->cpu_soft_affinity) &&
+                               cpumask_intersects(unit->cpu_soft_affinity,
+                                                  unit->cpu_hard_affinity);
 }
 
 static int vcpu_set_affinity(
     struct vcpu *v, const cpumask_t *affinity, const cpumask_t *which)
 {
+    struct sched_unit *unit = v->sched_unit;
     spinlock_t *lock;
     int ret = 0;
 
-    lock = unit_schedule_lock_irq(v->sched_unit);
+    lock = unit_schedule_lock_irq(unit);
 
     if ( v->affinity_broken )
         ret = -EBUSY;
@@ -986,19 +1004,19 @@  static int vcpu_set_affinity(
          * Tell the scheduler we changes something about affinity,
          * and ask to re-evaluate vcpu placement.
          */
-        if ( which == v->cpu_hard_affinity )
+        if ( which == unit->cpu_hard_affinity )
         {
             sched_set_affinity(v, affinity, NULL);
         }
         else
         {
-            ASSERT(which == v->cpu_soft_affinity);
+            ASSERT(which == unit->cpu_soft_affinity);
             sched_set_affinity(v, NULL, affinity);
         }
         vcpu_migrate_start(v);
     }
 
-    unit_schedule_unlock_irq(lock, v->sched_unit);
+    unit_schedule_unlock_irq(lock, unit);
 
     domain_update_node_affinity(v->domain);
 
@@ -1017,12 +1035,12 @@  int vcpu_set_hard_affinity(struct vcpu *v, const cpumask_t *affinity)
     if ( cpumask_empty(&online_affinity) )
         return -EINVAL;
 
-    return vcpu_set_affinity(v, affinity, v->cpu_hard_affinity);
+    return vcpu_set_affinity(v, affinity, v->sched_unit->cpu_hard_affinity);
 }
 
 int vcpu_set_soft_affinity(struct vcpu *v, const cpumask_t *affinity)
 {
-    return vcpu_set_affinity(v, affinity, v->cpu_soft_affinity);
+    return vcpu_set_affinity(v, affinity, v->sched_unit->cpu_soft_affinity);
 }
 
 /* Block the currently-executing domain until a pertinent event occurs. */
@@ -1226,11 +1244,12 @@  void watchdog_domain_destroy(struct domain *d)
  */
 int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t reason)
 {
+    struct sched_unit *unit = v->sched_unit;
     spinlock_t *lock;
     int ret = -EINVAL;
     bool migrate;
 
-    lock = unit_schedule_lock_irq(v->sched_unit);
+    lock = unit_schedule_lock_irq(unit);
 
     if ( cpu == NR_CPUS )
     {
@@ -1240,7 +1259,7 @@  int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t reason)
             v->affinity_broken &= ~reason;
         }
         if ( !ret && !v->affinity_broken )
-            sched_set_affinity(v, v->cpu_hard_affinity_saved, NULL);
+            sched_set_affinity(v, unit->cpu_hard_affinity_saved, NULL);
     }
     else if ( cpu < nr_cpu_ids )
     {
@@ -1251,7 +1270,8 @@  int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t reason)
         {
             if ( !v->affinity_broken )
             {
-                cpumask_copy(v->cpu_hard_affinity_saved, v->cpu_hard_affinity);
+                cpumask_copy(unit->cpu_hard_affinity_saved,
+                             unit->cpu_hard_affinity);
                 sched_set_affinity(v, cpumask_of(cpu), NULL);
             }
             v->affinity_broken |= reason;
@@ -1259,11 +1279,11 @@  int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t reason)
         }
     }
 
-    migrate = !ret && !cpumask_test_cpu(v->processor, v->cpu_hard_affinity);
+    migrate = !ret && !cpumask_test_cpu(v->processor, unit->cpu_hard_affinity);
     if ( migrate )
         vcpu_migrate_start(v);
 
-    unit_schedule_unlock_irq(lock, v->sched_unit);
+    unit_schedule_unlock_irq(lock, unit);
 
     if ( migrate )
         vcpu_migrate_finish(v);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index b8cc379e3a..c320854527 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -439,11 +439,11 @@  static inline cpumask_t* cpupool_domain_cpumask(struct domain *d)
  * * The hard affinity is not a subset of soft affinity
  * * There is an overlap between the soft and hard affinity masks
  */
-static inline int has_soft_affinity(const struct vcpu *v)
+static inline int has_soft_affinity(const struct sched_unit *unit)
 {
-    return v->soft_aff_effective &&
-           !cpumask_subset(cpupool_domain_cpumask(v->domain),
-                           v->cpu_soft_affinity);
+    return unit->soft_aff_effective &&
+           !cpumask_subset(cpupool_domain_cpumask(unit->domain),
+                           unit->cpu_soft_affinity);
 }
 
 /*
@@ -453,17 +453,18 @@  static inline int has_soft_affinity(const struct vcpu *v)
  * to avoid running a vcpu where it would like, but is not allowed to!
  */
 static inline void
-affinity_balance_cpumask(const struct vcpu *v, int step, cpumask_t *mask)
+affinity_balance_cpumask(const struct sched_unit *unit, int step,
+                         cpumask_t *mask)
 {
     if ( step == BALANCE_SOFT_AFFINITY )
     {
-        cpumask_and(mask, v->cpu_soft_affinity, v->cpu_hard_affinity);
+        cpumask_and(mask, unit->cpu_soft_affinity, unit->cpu_hard_affinity);
 
         if ( unlikely(cpumask_empty(mask)) )
-            cpumask_copy(mask, v->cpu_hard_affinity);
+            cpumask_copy(mask, unit->cpu_hard_affinity);
     }
     else /* step == BALANCE_HARD_AFFINITY */
-        cpumask_copy(mask, v->cpu_hard_affinity);
+        cpumask_copy(mask, unit->cpu_hard_affinity);
 }
 
 void sched_rm_cpu(unsigned int cpu);
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 8780888603..949bb6b696 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -212,9 +212,6 @@  struct vcpu
     bool             hcall_compat;
 #endif
 
-    /* Does soft affinity actually play a role (given hard affinity)? */
-    bool             soft_aff_effective;
-
     /* The CPU, if any, which is holding onto this VCPU's state. */
 #define VCPU_CPU_CLEAN (~0u)
     unsigned int     dirty_cpu;
@@ -246,14 +243,6 @@  struct vcpu
     evtchn_port_t    virq_to_evtchn[NR_VIRQS];
     spinlock_t       virq_lock;
 
-    /* Bitmask of CPUs on which this VCPU may run. */
-    cpumask_var_t    cpu_hard_affinity;
-    /* Used to save affinity during temporary pinning. */
-    cpumask_var_t    cpu_hard_affinity_saved;
-
-    /* Bitmask of CPUs on which this VCPU prefers to run. */
-    cpumask_var_t    cpu_soft_affinity;
-
     /* Tasklet for continue_hypercall_on_cpu(). */
     struct tasklet   continue_hypercall_tasklet;
 
@@ -280,6 +269,15 @@  struct sched_unit {
     struct sched_unit     *next_in_list;
     struct sched_resource *res;
     unsigned int           unit_id;
+
+    /* Does soft affinity actually play a role (given hard affinity)? */
+    bool                   soft_aff_effective;
+    /* Bitmask of CPUs on which this VCPU may run. */
+    cpumask_var_t          cpu_hard_affinity;
+    /* Used to save affinity during temporary pinning. */
+    cpumask_var_t          cpu_hard_affinity_saved;
+    /* Bitmask of CPUs on which this VCPU prefers to run. */
+    cpumask_var_t          cpu_soft_affinity;
 };
 
 #define for_each_sched_unit(d, e)                                         \
@@ -996,7 +994,7 @@  static inline bool hap_enabled(const struct domain *d)
 static inline bool is_hwdom_pinned_vcpu(const struct vcpu *v)
 {
     return (is_hardware_domain(v->domain) &&
-            cpumask_weight(v->cpu_hard_affinity) == 1);
+            cpumask_weight(v->sched_unit->cpu_hard_affinity) == 1);
 }
 
 #ifdef CONFIG_HAS_PASSTHROUGH