Message ID | 20210518094725.7701-7-will@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add support for 32-bit tasks on asymmetric AArch32 systems | expand |
On Tue, May 18, 2021 at 10:47:10AM +0100, Will Deacon wrote: > diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h > index 03dee12d2b61..bc4ac3c525e6 100644 > --- a/include/linux/mmu_context.h > +++ b/include/linux/mmu_context.h > @@ -14,4 +14,12 @@ > static inline void leave_mm(int cpu) { } > #endif > > +/* > + * CPUs that are capable of running task @p. By default, we assume a sane, > + * homogeneous system. Must contain at least one active CPU. > + */ > +#ifndef task_cpu_possible_mask > +# define task_cpu_possible_mask(p) cpu_possible_mask > +#endif #ifndef task_cpu_possible_mask # define task_cpu_possible_mask(p) cpu_possible_mask # define task_cpu_possible(cpu, p) true #else # define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p)) #endif > + > #endif > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > index 5226cc26a095..482f7fdca0e8 100644 > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -1813,8 +1813,11 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) > return cpu_online(cpu); > > /* Non kernel threads are not allowed during either online or offline. */ > if (!(p->flags & PF_KTHREAD)) > - return cpu_active(cpu); + return cpu_active(cpu) && task_cpu_possible(cpu, p); > /* KTHREAD_IS_PER_CPU is always allowed. */ > if (kthread_is_per_cpu(p)) Would something like that make sense?
On Fri, May 21, 2021 at 06:03:44PM +0200, Peter Zijlstra wrote: > On Tue, May 18, 2021 at 10:47:10AM +0100, Will Deacon wrote: > > diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h > > index 03dee12d2b61..bc4ac3c525e6 100644 > > --- a/include/linux/mmu_context.h > > +++ b/include/linux/mmu_context.h > > @@ -14,4 +14,12 @@ > > static inline void leave_mm(int cpu) { } > > #endif > > > > +/* > > + * CPUs that are capable of running task @p. By default, we assume a sane, > > + * homogeneous system. Must contain at least one active CPU. > > + */ > > +#ifndef task_cpu_possible_mask > > +# define task_cpu_possible_mask(p) cpu_possible_mask > > +#endif > > #ifndef task_cpu_possible_mask > # define task_cpu_possible_mask(p) cpu_possible_mask > # define task_cpu_possible(cpu, p) true > #else > # define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p)) > #endif > > > + > > #endif > > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > > index 5226cc26a095..482f7fdca0e8 100644 > > --- a/kernel/sched/core.c > > +++ b/kernel/sched/core.c > > @@ -1813,8 +1813,11 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) > > return cpu_online(cpu); > > > > /* Non kernel threads are not allowed during either online or offline. */ > > if (!(p->flags & PF_KTHREAD)) > > - return cpu_active(cpu); > + return cpu_active(cpu) && task_cpu_possible(cpu, p); > > > /* KTHREAD_IS_PER_CPU is always allowed. */ > > if (kthread_is_per_cpu(p)) > > Would something like that make sense? I think this is probably the only place that we could use the helper, but it's also one of the places where architectures that don't have to worry about asymmetry end up with the check so, yes, I'll do that for v7. Will
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index 03dee12d2b61..bc4ac3c525e6 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -14,4 +14,12 @@ static inline void leave_mm(int cpu) { } #endif +/* + * CPUs that are capable of running task @p. By default, we assume a sane, + * homogeneous system. Must contain at least one active CPU. + */ +#ifndef task_cpu_possible_mask +# define task_cpu_possible_mask(p) cpu_possible_mask +#endif + #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5226cc26a095..482f7fdca0e8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1813,8 +1813,11 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) return cpu_online(cpu); /* Non kernel threads are not allowed during either online or offline. */ - if (!(p->flags & PF_KTHREAD)) - return cpu_active(cpu); + if (!(p->flags & PF_KTHREAD)) { + if (cpu_active(cpu)) + return cpumask_test_cpu(cpu, task_cpu_possible_mask(p)); + return false; + } /* KTHREAD_IS_PER_CPU is always allowed. */ if (kthread_is_per_cpu(p)) @@ -2792,10 +2795,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p) * * More yuck to audit. */ - do_set_cpus_allowed(p, cpu_possible_mask); + do_set_cpus_allowed(p, task_cpu_possible_mask(p)); state = fail; break; - case fail: BUG(); break;