@@ -76,22 +76,6 @@ int arch_asym_cpu_priority(int cpu)
return per_cpu(sched_core_priority, cpu);
}
-int arch_asym_max_cpu_and(const struct cpumask *mask1,
- const struct cpumask *mask2)
-{
- int cpu;
- int max_priority, max_cpu = nr_cpu_ids;
-
- for_each_cpu_and(cpu, mask1, mask2) {
- if (max_cpu == nr_cpu_ids ||
- arch_asym_cpu_priority(cpu) > max_priority) {
- max_cpu = cpu;
- max_priority = arch_asym_cpu_priority(cpu);
- }
- }
- return max_cpu;
-}
-
/**
* sched_set_itmt_core_prio() - Set CPU priority based on ITMT
* @prio: Priority of cpu core
@@ -102,16 +102,6 @@ int __weak arch_asym_cpu_priority(int cp
return -cpu;
}
-/*
- * Return the lowest numbered (or highest priority) cpu
- * in the intersection of two cpu masks. If no cpu is
- * is in both masks, nr_cpu_ids will be returned.
- */
-int __weak arch_asym_max_cpu_and(const struct cpumask *mask1,
- const struct cpumask *mask2)
-{
- return cpumask_first_and(mask1, mask2);
-}
#endif
#ifdef CONFIG_CFS_BANDWIDTH
@@ -8758,9 +8748,8 @@ static inline bool nohz_kick_needed(stru
unsigned long now = jiffies;
struct sched_domain_shared *sds;
struct sched_domain *sd;
- int nr_busy, cpu = rq->cpu;
+ int nr_busy, i, cpu = rq->cpu;
bool kick = false;
- int asym_idle_cpu;
if (unlikely(rq->idle_balance))
return false;
@@ -8811,15 +8800,17 @@ static inline bool nohz_kick_needed(stru
sd = rcu_dereference(per_cpu(sd_asym, cpu));
if (sd) {
- asym_idle_cpu = arch_asym_max_cpu_and(nohz.idle_cpus_mask,
- sched_domain_span(sd));
- if (asym_idle_cpu < nr_cpu_ids &&
- sched_asym_prefer(asym_idle_cpu, cpu)) {
- kick = true;
- goto unlock;
+ for_each_cpu(i, sched_domain_span(sd)) {
+ if (i == cpu ||
+ !cpumask_test_cpu(i, nohz.idle_cpus_mask))
+ continue;
+
+ if (sched_asym_prefer(i, cpu)) {
+ kick = true;
+ goto unlock;
+ }
}
}
-
unlock:
rcu_read_unlock();
return kick;