@@ -136,27 +136,6 @@
#define TRC_CSCHED_RATELIMIT TRC_SCHED_CLASS_EVT(CSCHED, 10)
#define TRC_CSCHED_STEAL_CHECK TRC_SCHED_CLASS_EVT(CSCHED, 11)
-
-/*
- * Hard and soft affinity load balancing.
- *
- * Idea is each vcpu has some pcpus that it prefers, some that it does not
- * prefer but is OK with, and some that it cannot run on at all. The first
- * set of pcpus are the ones that are both in the soft affinity *and* in the
- * hard affinity; the second set of pcpus are the ones that are in the hard
- * affinity but *not* in the soft affinity; the third set of pcpus are the
- * ones that are not in the hard affinity.
- *
- * We implement a two step balancing logic. Basically, every time there is
- * the need to decide where to run a vcpu, we first check the soft affinity
- * (well, actually, the && between soft and hard affinity), to see if we can
- * send it where it prefers to (and can) run on. However, if the first step
- * does not find any suitable and free pcpu, we fall back checking the hard
- * affinity.
- */
-#define CSCHED_BALANCE_SOFT_AFFINITY 0
-#define CSCHED_BALANCE_HARD_AFFINITY 1
-
/*
* Boot parameters
*/
@@ -331,52 +310,6 @@ runq_remove(struct csched_vcpu *svc)
__runq_remove(svc);
}
-#define for_each_csched_balance_step(step) \
- for ( (step) = 0; (step) <= CSCHED_BALANCE_HARD_AFFINITY; (step)++ )
-
-
-/*
- * Hard affinity balancing is always necessary and must never be skipped.
- * But soft affinity need only be considered when it has a functionally
- * different effect than other constraints (such as hard affinity, cpus
- * online, or cpupools).
- *
- * Soft affinity only needs to be considered if:
- * * The cpus in the cpupool are not a subset of soft affinity
- * * The hard affinity is not a subset of soft affinity
- * * There is an overlap between the soft affinity and the mask which is
- * currently being considered.
- */
-static inline int __vcpu_has_soft_affinity(const struct vcpu *vc,
- const cpumask_t *mask)
-{
- return !cpumask_subset(cpupool_domain_cpumask(vc->domain),
- vc->cpu_soft_affinity) &&
- !cpumask_subset(vc->cpu_hard_affinity, vc->cpu_soft_affinity) &&
- cpumask_intersects(vc->cpu_soft_affinity, mask);
-}
-
-/*
- * Each csched-balance step uses its own cpumask. This function determines
- * which one (given the step) and copies it in mask. For the soft affinity
- * balancing step, the pcpus that are not part of vc's hard affinity are
- * filtered out from the result, to avoid running a vcpu where it would
- * like, but is not allowed to!
- */
-static void
-csched_balance_cpumask(const struct vcpu *vc, int step, cpumask_t *mask)
-{
- if ( step == CSCHED_BALANCE_SOFT_AFFINITY )
- {
- cpumask_and(mask, vc->cpu_soft_affinity, vc->cpu_hard_affinity);
-
- if ( unlikely(cpumask_empty(mask)) )
- cpumask_copy(mask, vc->cpu_hard_affinity);
- }
- else /* step == CSCHED_BALANCE_HARD_AFFINITY */
- cpumask_copy(mask, vc->cpu_hard_affinity);
-}
-
static void burn_credits(struct csched_vcpu *svc, s_time_t now)
{
s_time_t delta;
@@ -441,18 +374,18 @@ static inline void __runq_tickle(struct csched_vcpu *new)
* Soft and hard affinity balancing loop. For vcpus without
* a useful soft affinity, consider hard affinity only.
*/
- for_each_csched_balance_step( balance_step )
+ for_each_affinity_balance_step( balance_step )
{
int new_idlers_empty;
- if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY
- && !__vcpu_has_soft_affinity(new->vcpu,
- new->vcpu->cpu_hard_affinity) )
+ if ( balance_step == BALANCE_SOFT_AFFINITY
+ && !has_soft_affinity(new->vcpu,
+ new->vcpu->cpu_hard_affinity) )
continue;
/* Are there idlers suitable for new (for this balance step)? */
- csched_balance_cpumask(new->vcpu, balance_step,
- cpumask_scratch_cpu(cpu));
+ affinity_balance_cpumask(new->vcpu, balance_step,
+ cpumask_scratch_cpu(cpu));
cpumask_and(cpumask_scratch_cpu(cpu),
cpumask_scratch_cpu(cpu), &idle_mask);
new_idlers_empty = cpumask_empty(cpumask_scratch_cpu(cpu));
@@ -463,7 +396,7 @@ static inline void __runq_tickle(struct csched_vcpu *new)
* hard affinity as well, before taking final decisions.
*/
if ( new_idlers_empty
- && balance_step == CSCHED_BALANCE_SOFT_AFFINITY )
+ && balance_step == BALANCE_SOFT_AFFINITY )
continue;
/*
@@ -789,7 +722,7 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
online = cpupool_domain_cpumask(vc->domain);
cpumask_and(&cpus, vc->cpu_hard_affinity, online);
- for_each_csched_balance_step( balance_step )
+ for_each_affinity_balance_step( balance_step )
{
/*
* We want to pick up a pcpu among the ones that are online and
@@ -809,12 +742,12 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
* cpus and, if the result is empty, we just skip the soft affinity
* balancing step all together.
*/
- if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY
- && !__vcpu_has_soft_affinity(vc, &cpus) )
+ if ( balance_step == BALANCE_SOFT_AFFINITY
+ && !has_soft_affinity(vc, &cpus) )
continue;
/* Pick an online CPU from the proper affinity mask */
- csched_balance_cpumask(vc, balance_step, &cpus);
+ affinity_balance_cpumask(vc, balance_step, &cpus);
cpumask_and(&cpus, &cpus, online);
/* If present, prefer vc's current processor */
@@ -1710,11 +1643,11 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
* or counter.
*/
if ( vc->is_running ||
- (balance_step == CSCHED_BALANCE_SOFT_AFFINITY
- && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity)) )
+ (balance_step == BALANCE_SOFT_AFFINITY
+ && !has_soft_affinity(vc, vc->cpu_hard_affinity)) )
continue;
- csched_balance_cpumask(vc, balance_step, cpumask_scratch);
+ affinity_balance_cpumask(vc, balance_step, cpumask_scratch);
if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) )
{
/* We got a candidate. Grab it! */
@@ -1774,7 +1707,7 @@ csched_load_balance(struct csched_private *prv, int cpu,
* 1. any "soft-affine work" to steal first,
* 2. if not finding anything, any "hard-affine work" to steal.
*/
- for_each_csched_balance_step( bstep )
+ for_each_affinity_balance_step( bstep )
{
/*
* We peek at the non-idling CPUs in a node-wise fashion. In fact,
@@ -208,4 +208,68 @@ static inline cpumask_t* cpupool_domain_cpumask(struct domain *d)
return d->cpupool->cpu_valid;
}
+/*
+ * Hard and soft affinity load balancing.
+ *
+ * Idea is each vcpu has some pcpus that it prefers, some that it does not
+ * prefer but is OK with, and some that it cannot run on at all. The first
+ * set of pcpus are the ones that are both in the soft affinity *and* in the
+ * hard affinity; the second set of pcpus are the ones that are in the hard
+ * affinity but *not* in the soft affinity; the third set of pcpus are the
+ * ones that are not in the hard affinity.
+ *
+ * We implement a two step balancing logic. Basically, every time there is
+ * the need to decide where to run a vcpu, we first check the soft affinity
+ * (well, actually, the && between soft and hard affinity), to see if we can
+ * send it where it prefers to (and can) run on. However, if the first step
+ * does not find any suitable and free pcpu, we fall back checking the hard
+ * affinity.
+ */
+#define BALANCE_SOFT_AFFINITY 0
+#define BALANCE_HARD_AFFINITY 1
+
+#define for_each_affinity_balance_step(step) \
+ for ( (step) = 0; (step) <= BALANCE_HARD_AFFINITY; (step)++ )
+
+/*
+ * Hard affinity balancing is always necessary and must never be skipped.
+ * But soft affinity need only be considered when it has a functionally
+ * different effect than other constraints (such as hard affinity, cpus
+ * online, or cpupools).
+ *
+ * Soft affinity only needs to be considered if:
+ * * The cpus in the cpupool are not a subset of soft affinity
+ * * The hard affinity is not a subset of soft affinity
+ * * There is an overlap between the soft affinity and the mask which is
+ * currently being considered.
+ */
+static inline int has_soft_affinity(const struct vcpu *v,
+ const cpumask_t *mask)
+{
+ return !cpumask_subset(cpupool_domain_cpumask(v->domain),
+ v->cpu_soft_affinity) &&
+ !cpumask_subset(v->cpu_hard_affinity, v->cpu_soft_affinity) &&
+ cpumask_intersects(v->cpu_soft_affinity, mask);
+}
+
+/*
+ * This function copies in mask the cpumask that should be used for a
+ * particular affinity balancing step. For the soft affinity one, the pcpus
+ * that are not part of vc's hard affinity are filtered out from the result,
+ * to avoid running a vcpu where it would like, but is not allowed to!
+ */
+static inline void
+affinity_balance_cpumask(const struct vcpu *v, int step, cpumask_t *mask)
+{
+ if ( step == BALANCE_SOFT_AFFINITY )
+ {
+ cpumask_and(mask, v->cpu_soft_affinity, v->cpu_hard_affinity);
+
+ if ( unlikely(cpumask_empty(mask)) )
+ cpumask_copy(mask, v->cpu_hard_affinity);
+ }
+ else /* step == BALANCE_HARD_AFFINITY */
+ cpumask_copy(mask, v->cpu_hard_affinity);
+}
+
#endif /* __XEN_SCHED_IF_H__ */