@@ -1057,6 +1057,8 @@ static inline int cpu_numa_flags(void)
}
#endif
+extern int arch_asym_cpu_priority(int cpu);
+
struct sched_domain_attr {
int relax_domain_level;
};
@@ -6301,7 +6301,22 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
WARN_ON(!sg);
do {
+ int cpu, max_cpu = -1;
+
sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+
+ if (!(sd->flags & SD_ASYM_PACKING))
+ goto next;
+
+ for_each_cpu(cpu, sched_group_cpus(sg)) {
+ if (max_cpu < 0)
+ max_cpu = cpu;
+ else if (sched_asym_prefer(cpu, max_cpu))
+ max_cpu = cpu;
+ }
+ sg->asym_prefer_cpu = max_cpu;
+
+next:
sg = sg->next;
} while (sg != sd->groups);
@@ -100,6 +100,17 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
*/
unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
+#ifdef CONFIG_SMP
+/*
+ * For asym packing, by default the lower numbered cpu has higher priority.
+ */
+int __weak arch_asym_cpu_priority(int cpu)
+{
+ return -cpu;
+}
+
+#endif
+
#ifdef CONFIG_CFS_BANDWIDTH
/*
* Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
@@ -7113,16 +7124,18 @@ static bool update_sd_pick_busiest(struct lb_env *env,
if (env->idle == CPU_NOT_IDLE)
return true;
/*
- * ASYM_PACKING needs to move all the work to the lowest
- * numbered CPUs in the group, therefore mark all groups
- * higher than ourself as busy.
+ * ASYM_PACKING needs to move all the work to the highest
+ * prority CPUs in the group, therefore mark all groups
+ * of lower priority than ourself as busy.
*/
- if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
+ if (sgs->sum_nr_running &&
+ sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
if (!sds->busiest)
return true;
- /* Prefer to move from highest possible cpu's work */
- if (group_first_cpu(sds->busiest) < group_first_cpu(sg))
+ /* Prefer to move from lowest priority cpu's work */
+ if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
+ sg->asym_prefer_cpu))
return true;
}
@@ -7274,8 +7287,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
if (!sds->busiest)
return 0;
- busiest_cpu = group_first_cpu(sds->busiest);
- if (env->dst_cpu > busiest_cpu)
+ busiest_cpu = sds->busiest->asym_prefer_cpu;
+ if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
return 0;
env->imbalance = DIV_ROUND_CLOSEST(
@@ -7613,10 +7626,11 @@ static int need_active_balance(struct lb_env *env)
/*
* ASYM_PACKING needs to force migrate tasks from busy but
- * higher numbered CPUs in order to pack all tasks in the
- * lowest numbered CPUs.
+ * lower priority CPUs in order to pack all tasks in the
+ * highest priority CPUs.
*/
- if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
+ if ((sd->flags & SD_ASYM_PACKING) &&
+ sched_asym_prefer(env->dst_cpu, env->src_cpu))
return 1;
}
@@ -8465,7 +8479,7 @@ static inline bool nohz_kick_needed(struct rq *rq)
unsigned long now = jiffies;
struct sched_domain_shared *sds;
struct sched_domain *sd;
- int nr_busy, cpu = rq->cpu;
+ int nr_busy, i, cpu = rq->cpu;
bool kick = false;
if (unlikely(rq->idle_balance))
@@ -8516,12 +8530,18 @@ static inline bool nohz_kick_needed(struct rq *rq)
}
sd = rcu_dereference(per_cpu(sd_asym, cpu));
- if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
- sched_domain_span(sd)) < cpu)) {
- kick = true;
- goto unlock;
- }
+ if (sd) {
+ for_each_cpu(i, sched_domain_span(sd)) {
+ if (i == cpu ||
+ !cpumask_test_cpu(i, nohz.idle_cpus_mask))
+ continue;
+ if (sched_asym_prefer(i, cpu)) {
+ kick = true;
+ goto unlock;
+ }
+ }
+ }
unlock:
rcu_read_unlock();
return kick;
@@ -539,6 +539,11 @@ struct dl_rq {
#ifdef CONFIG_SMP
+static inline bool sched_asym_prefer(int a, int b)
+{
+ return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
+}
+
/*
* We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by
@@ -905,6 +910,7 @@ struct sched_group {
unsigned int group_weight;
struct sched_group_capacity *sgc;
+ int asym_prefer_cpu; /* cpu of highest priority in group */
/*
* The CPUs this group covers.