@@ -5249,13 +5249,15 @@ static int __sched_setscheduler(struct task_struct *p,
return -EINVAL;
/*
- * Allow unprivileged RT tasks to decrease priority:
+ * Allow unprivileged RT tasks to decrease priority.
+ * Do not issue an audit event yet, only later on an actual
+ * permission denial.
*/
- if (user && !capable(CAP_SYS_NICE)) {
+ if (user && !ns_capable_noaudit(&init_user_ns, CAP_SYS_NICE)) {
if (fair_policy(policy)) {
if (attr->sched_nice < task_nice(p) &&
!can_nice(p, attr->sched_nice))
- return -EPERM;
+ goto incapable;
}
if (rt_policy(policy)) {
@@ -5264,12 +5266,12 @@ static int __sched_setscheduler(struct task_struct *p,
/* Can't set/change the rt policy: */
if (policy != p->policy && !rlim_rtprio)
- return -EPERM;
+ goto incapable;
/* Can't increase priority: */
if (attr->sched_priority > p->rt_priority &&
attr->sched_priority > rlim_rtprio)
- return -EPERM;
+ goto incapable;
}
/*
@@ -5279,7 +5281,7 @@ static int __sched_setscheduler(struct task_struct *p,
* or reduce their runtime (both ways reducing utilization)
*/
if (dl_policy(policy))
- return -EPERM;
+ goto incapable;
/*
* Treat SCHED_IDLE as nice 20. Only allow a switch to
@@ -5287,16 +5289,16 @@ static int __sched_setscheduler(struct task_struct *p,
*/
if (task_has_idle_policy(p) && !idle_policy(policy)) {
if (!can_nice(p, task_nice(p)))
- return -EPERM;
+ goto incapable;
}
/* Can't change other user's priorities: */
if (!check_same_owner(p))
- return -EPERM;
+ goto incapable;
/* Normal users shall not reset the sched_reset_on_fork flag: */
if (p->sched_reset_on_fork && !reset_on_fork)
- return -EPERM;
+ goto incapable;
}
if (user) {
@@ -5470,6 +5472,11 @@ static int __sched_setscheduler(struct task_struct *p,
if (pi)
cpuset_read_unlock();
return retval;
+
+incapable:
+ /* Generate an audit event */
+ (void) capable(CAP_SYS_NICE);
+ return -EPERM;
}
static int _sched_setscheduler(struct task_struct *p, int policy,