@@ -663,6 +663,7 @@ static inline bool supports_clearbhb(int scope)
}
const struct cpumask *system_32bit_el0_cpumask(void);
+const struct cpumask *fallback_32bit_el0_cpumask(void);
DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
static inline bool system_supports_32bit_el0(void)
@@ -282,6 +282,8 @@ task_cpu_possible_mask(struct task_struct *p)
}
#define task_cpu_possible_mask task_cpu_possible_mask
+const struct cpumask *task_cpu_fallback_mask(struct task_struct *p);
+
void verify_cpu_asid_bits(void);
void post_ttbr_update_workaround(void);
@@ -75,6 +75,7 @@
#include <linux/cpu.h>
#include <linux/kasan.h>
#include <linux/percpu.h>
+#include <linux/sched/isolation.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
@@ -133,6 +134,7 @@ DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
* Only valid if arm64_mismatched_32bit_el0 is enabled.
*/
static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
+static cpumask_var_t fallback_32bit_el0_mask __cpumask_var_read_mostly;
void dump_cpu_features(void)
{
@@ -1618,6 +1620,23 @@ const struct cpumask *system_32bit_el0_cpumask(void)
return cpu_possible_mask;
}
+const struct cpumask *task_cpu_fallback_mask(struct task_struct *p)
+{
+ if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ return housekeeping_cpumask(HK_TYPE_TICK);
+
+ if (!is_compat_thread(task_thread_info(p)))
+ return housekeeping_cpumask(HK_TYPE_TICK);
+
+ if (!system_supports_32bit_el0())
+ return cpu_none_mask;
+
+ if (!cpumask_empty(fallback_32bit_el0_mask))
+ return fallback_32bit_el0_mask;
+ else
+ return cpu_32bit_el0_mask;
+}
+
static int __init parse_32bit_el0_param(char *str)
{
allow_mismatched_32bit_el0 = true;
@@ -3605,22 +3624,33 @@ static int mismatched_32bit_el0_online(unsigned int cpu)
if (cpu_32bit) {
cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
+ if (housekeeping_cpu(cpu, HK_TYPE_TICK))
+ cpumask_set_cpu(cpu, fallback_32bit_el0_mask);
static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
}
+ if (cpu_32bit_unofflineable >= 0) {
+ if (!housekeeping_cpu(cpu_32bit_unofflineable, HK_TYPE_TICK) &&
+ cpu_32bit && housekeeping_cpu(cpu, HK_TYPE_TICK)) {
+ cpu_32bit_unofflineable = cpu;
+ pr_info("Asymmetric 32-bit EL0 support detected on housekeeping CPU %u;"
+ "CPU hot-unplug disabled on CPU %u\n", cpu, cpu);
+ }
+ return 0;
+ }
+
if (cpumask_test_cpu(0, cpu_32bit_el0_mask) == cpu_32bit)
return 0;
- if (cpu_32bit_unofflineable < 0)
- return 0;
-
/*
- * We've detected a mismatch. We need to keep one of our CPUs with
- * 32-bit EL0 online so that is_cpu_allowed() doesn't end up rejecting
- * every CPU in the system for a 32-bit task.
+ * We've detected a mismatch. We need to keep one of our CPUs, preferrably
+ * housekeeping, with 32-bit EL0 online so that is_cpu_allowed() doesn't end up
+ * rejecting every CPU in the system for a 32-bit task.
*/
- cpu_32bit_unofflineable = cpu_32bit ? cpu : cpumask_any_and(cpu_32bit_el0_mask,
- cpu_active_mask);
+ cpu_32bit_unofflineable = cpumask_any_and(fallback_32bit_el0_mask, cpu_active_mask);
+ if (cpu_32bit_unofflineable >= nr_cpu_ids)
+ cpu_32bit_unofflineable = cpumask_any_and(cpu_32bit_el0_mask, cpu_active_mask);
+
setup_elf_hwcaps(compat_elf_hwcaps);
elf_hwcap_fixup();
pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
@@ -3641,6 +3671,9 @@ static int __init init_32bit_el0_mask(void)
if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
return -ENOMEM;
+ if (!zalloc_cpumask_var(&fallback_32bit_el0_mask, GFP_KERNEL))
+ return -ENOMEM;
+
return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"arm64/mismatched_32bit_el0:online",
mismatched_32bit_el0_online, mismatched_32bit_el0_offline);
@@ -24,6 +24,7 @@ static inline void leave_mm(void) { }
#ifndef task_cpu_possible_mask
# define task_cpu_possible_mask(p) cpu_possible_mask
# define task_cpu_possible(cpu, p) true
+# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_TICK)
#else
# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
#endif
@@ -3494,7 +3494,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
*
* More yuck to audit.
*/
- do_set_cpus_allowed(p, task_cpu_possible_mask(p));
+ do_set_cpus_allowed(p, task_cpu_fallback_mask(p));
state = fail;
break;
case fail:
When a kthread or any other task has an affinity mask that is fully offline or unallowed, the scheduler reaffines the task to all possible CPUs as a last resort. This default decision doesn't mix up very well with nohz_full CPUs that are part of the possible cpumask but don't want to be disturbed by unbound kthreads or even detached pinned user tasks. Make the fallback affinity setting aware of nohz_full. ARM64 is a special case and its last resort EL0 32bits capable CPU can be updated as housekeeping CPUs appear on boot. Suggested-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> --- arch/arm64/include/asm/cpufeature.h | 1 + arch/arm64/include/asm/mmu_context.h | 2 ++ arch/arm64/kernel/cpufeature.c | 49 +++++++++++++++++++++++----- include/linux/mmu_context.h | 1 + kernel/sched/core.c | 2 +- 5 files changed, 46 insertions(+), 9 deletions(-)