@@ -251,6 +251,18 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) switch_mm(prev, next, current)
+static inline const struct cpumask *arch_cpu_allowed_mask(struct task_struct *p)
+{
+ if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ return cpu_possible_mask;
+
+ if (!is_compat_thread(task_thread_info(p)))
+ return cpu_possible_mask;
+
+ return system_32bit_el0_cpumask();
+}
+#define arch_cpu_allowed_mask arch_cpu_allowed_mask
+
void verify_cpu_asid_bits(void);
void post_ttbr_update_workaround(void);
Provide an implementation of arch_cpu_allowed_mask() so that we can prevent 64-bit-only cores being added to the 'cpus_mask' for compat tasks on systems with mismatched 32-bit support at EL0, Signed-off-by: Will Deacon <will@kernel.org> --- arch/arm64/include/asm/mmu_context.h | 12 ++++++++++++ 1 file changed, 12 insertions(+)