Message ID | 20210518094725.7701-12-will@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add support for 32-bit tasks on asymmetric AArch32 systems | expand |
On 05/18/21 10:47, Will Deacon wrote: > In preparation for replaying user affinity requests using a saved mask, > split sched_setaffinity() up so that the initial task lookup and > security checks are only performed when the request is coming directly > from userspace. > > Signed-off-by: Will Deacon <will@kernel.org> > --- > kernel/sched/core.c | 110 +++++++++++++++++++++++--------------------- > 1 file changed, 58 insertions(+), 52 deletions(-) > > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > index 9512623d5a60..808bbe669a6d 100644 > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -6788,9 +6788,61 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, > return retval; > } > > -long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) > +static int > +__sched_setaffinity(struct task_struct *p, const struct cpumask *mask) > { > + int retval; > cpumask_var_t cpus_allowed, new_mask; > + > + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) > + return -ENOMEM; > + > + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) > + return -ENOMEM; Shouldn't we free cpus_allowed first? Cheers -- Qais Yousef > + > + cpuset_cpus_allowed(p, cpus_allowed); > + cpumask_and(new_mask, mask, cpus_allowed); > + > + /* > + * Since bandwidth control happens on root_domain basis, > + * if admission test is enabled, we only admit -deadline > + * tasks allowed to run on all the CPUs in the task's > + * root_domain. > + */ > +#ifdef CONFIG_SMP > + if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { > + rcu_read_lock(); > + if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { > + retval = -EBUSY; > + rcu_read_unlock(); > + goto out_free_masks; > + } > + rcu_read_unlock(); > + } > +#endif > +again: > + retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK); > + if (retval) > + goto out_free_masks; > + > + cpuset_cpus_allowed(p, cpus_allowed); > + if (!cpumask_subset(new_mask, cpus_allowed)) { > + /* > + * We must have raced with a concurrent cpuset update. > + * Just reset the cpumask to the cpuset's cpus_allowed. > + */ > + cpumask_copy(new_mask, cpus_allowed); > + goto again; > + } > + > +out_free_masks: > + free_cpumask_var(new_mask); > + free_cpumask_var(cpus_allowed); > + return retval; > +} > + > +long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) > +{ > struct task_struct *p; > int retval; > > @@ -6810,68 +6862,22 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) > retval = -EINVAL; > goto out_put_task; > } > - if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { > - retval = -ENOMEM; > - goto out_put_task; > - } > - if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { > - retval = -ENOMEM; > - goto out_free_cpus_allowed; > - } > - retval = -EPERM; > + > if (!check_same_owner(p)) { > rcu_read_lock(); > if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { > rcu_read_unlock(); > - goto out_free_new_mask; > + retval = -EPERM; > + goto out_put_task; > } > rcu_read_unlock(); > } > > retval = security_task_setscheduler(p); > if (retval) > - goto out_free_new_mask; > - > - > - cpuset_cpus_allowed(p, cpus_allowed); > - cpumask_and(new_mask, in_mask, cpus_allowed); > - > - /* > - * Since bandwidth control happens on root_domain basis, > - * if admission test is enabled, we only admit -deadline > - * tasks allowed to run on all the CPUs in the task's > - * root_domain. > - */ > -#ifdef CONFIG_SMP > - if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { > - rcu_read_lock(); > - if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { > - retval = -EBUSY; > - rcu_read_unlock(); > - goto out_free_new_mask; > - } > - rcu_read_unlock(); > - } > -#endif > -again: > - retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK); > + goto out_put_task; > > - if (!retval) { > - cpuset_cpus_allowed(p, cpus_allowed); > - if (!cpumask_subset(new_mask, cpus_allowed)) { > - /* > - * We must have raced with a concurrent cpuset > - * update. Just reset the cpus_allowed to the > - * cpuset's cpus_allowed > - */ > - cpumask_copy(new_mask, cpus_allowed); > - goto again; > - } > - } > -out_free_new_mask: > - free_cpumask_var(new_mask); > -out_free_cpus_allowed: > - free_cpumask_var(cpus_allowed); > + retval = __sched_setaffinity(p, in_mask); > out_put_task: > put_task_struct(p); > return retval; > -- > 2.31.1.751.gd2f1c929bd-goog >
On Fri, May 21, 2021 at 05:41:01PM +0100, Qais Yousef wrote: > On 05/18/21 10:47, Will Deacon wrote: > > In preparation for replaying user affinity requests using a saved mask, > > split sched_setaffinity() up so that the initial task lookup and > > security checks are only performed when the request is coming directly > > from userspace. > > > > Signed-off-by: Will Deacon <will@kernel.org> > > --- > > kernel/sched/core.c | 110 +++++++++++++++++++++++--------------------- > > 1 file changed, 58 insertions(+), 52 deletions(-) > > > > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > > index 9512623d5a60..808bbe669a6d 100644 > > --- a/kernel/sched/core.c > > +++ b/kernel/sched/core.c > > @@ -6788,9 +6788,61 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, > > return retval; > > } > > > > -long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) > > +static int > > +__sched_setaffinity(struct task_struct *p, const struct cpumask *mask) > > { > > + int retval; > > cpumask_var_t cpus_allowed, new_mask; > > + > > + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) > > + return -ENOMEM; > > + > > + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) > > + return -ENOMEM; > > Shouldn't we free cpus_allowed first? Oops, yes. Now fixed. Thanks, Will
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9512623d5a60..808bbe669a6d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6788,9 +6788,61 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, return retval; } -long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) +static int +__sched_setaffinity(struct task_struct *p, const struct cpumask *mask) { + int retval; cpumask_var_t cpus_allowed, new_mask; + + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) + return -ENOMEM; + + if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) + return -ENOMEM; + + cpuset_cpus_allowed(p, cpus_allowed); + cpumask_and(new_mask, mask, cpus_allowed); + + /* + * Since bandwidth control happens on root_domain basis, + * if admission test is enabled, we only admit -deadline + * tasks allowed to run on all the CPUs in the task's + * root_domain. + */ +#ifdef CONFIG_SMP + if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { + rcu_read_lock(); + if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { + retval = -EBUSY; + rcu_read_unlock(); + goto out_free_masks; + } + rcu_read_unlock(); + } +#endif +again: + retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK); + if (retval) + goto out_free_masks; + + cpuset_cpus_allowed(p, cpus_allowed); + if (!cpumask_subset(new_mask, cpus_allowed)) { + /* + * We must have raced with a concurrent cpuset update. + * Just reset the cpumask to the cpuset's cpus_allowed. + */ + cpumask_copy(new_mask, cpus_allowed); + goto again; + } + +out_free_masks: + free_cpumask_var(new_mask); + free_cpumask_var(cpus_allowed); + return retval; +} + +long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) +{ struct task_struct *p; int retval; @@ -6810,68 +6862,22 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) retval = -EINVAL; goto out_put_task; } - if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { - retval = -ENOMEM; - goto out_put_task; - } - if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { - retval = -ENOMEM; - goto out_free_cpus_allowed; - } - retval = -EPERM; + if (!check_same_owner(p)) { rcu_read_lock(); if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { rcu_read_unlock(); - goto out_free_new_mask; + retval = -EPERM; + goto out_put_task; } rcu_read_unlock(); } retval = security_task_setscheduler(p); if (retval) - goto out_free_new_mask; - - - cpuset_cpus_allowed(p, cpus_allowed); - cpumask_and(new_mask, in_mask, cpus_allowed); - - /* - * Since bandwidth control happens on root_domain basis, - * if admission test is enabled, we only admit -deadline - * tasks allowed to run on all the CPUs in the task's - * root_domain. - */ -#ifdef CONFIG_SMP - if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { - rcu_read_lock(); - if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { - retval = -EBUSY; - rcu_read_unlock(); - goto out_free_new_mask; - } - rcu_read_unlock(); - } -#endif -again: - retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK); + goto out_put_task; - if (!retval) { - cpuset_cpus_allowed(p, cpus_allowed); - if (!cpumask_subset(new_mask, cpus_allowed)) { - /* - * We must have raced with a concurrent cpuset - * update. Just reset the cpus_allowed to the - * cpuset's cpus_allowed - */ - cpumask_copy(new_mask, cpus_allowed); - goto again; - } - } -out_free_new_mask: - free_cpumask_var(new_mask); -out_free_cpus_allowed: - free_cpumask_var(cpus_allowed); + retval = __sched_setaffinity(p, in_mask); out_put_task: put_task_struct(p); return retval;
In preparation for replaying user affinity requests using a saved mask, split sched_setaffinity() up so that the initial task lookup and security checks are only performed when the request is coming directly from userspace. Signed-off-by: Will Deacon <will@kernel.org> --- kernel/sched/core.c | 110 +++++++++++++++++++++++--------------------- 1 file changed, 58 insertions(+), 52 deletions(-)