@@ -1429,7 +1429,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
}
rcu_read_unlock();
- if (p->fs->users > n_fs)
+ if (atomic_read(&p->fs->users) > n_fs)
bprm->unsafe |= LSM_UNSAFE_SHARE;
else
p->fs->in_exec = 1;
@@ -99,7 +99,7 @@ void exit_fs(struct task_struct *tsk)
task_lock(tsk);
spin_lock(&fs->lock);
tsk->fs = NULL;
- kill = !--fs->users;
+ kill = !atomic_dec_return(&fs->users);
spin_unlock(&fs->lock);
task_unlock(tsk);
if (kill)
@@ -112,7 +112,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
/* We don't need to lock fs - think why ;-) */
if (fs) {
- fs->users = 1;
+ atomic_set(&fs->users, 1);
fs->in_exec = 0;
spin_lock_init(&fs->lock);
seqcount_init(&fs->seq);
@@ -139,7 +139,7 @@ int unshare_fs_struct(void)
task_lock(current);
spin_lock(&fs->lock);
- kill = !--fs->users;
+ kill = !atomic_dec_return(&fs->users);
current->fs = new_fs;
spin_unlock(&fs->lock);
task_unlock(current);
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(current_umask);
/* to be mentioned only in INIT_TASK */
struct fs_struct init_fs = {
- .users = 1,
+ .users = ATOMIC_INIT(1),
.lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
.seq = SEQCNT_ZERO(init_fs.seq),
.umask = 0022,
@@ -3394,7 +3394,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
- if (fs->users != 1)
+ if (atomic_read(&fs->users) != 1)
return -EINVAL;
get_mnt_ns(mnt_ns);
@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
else
bytes += kobjsize(mm);
- if (current->fs && current->fs->users > 1)
+ if (current->fs && atomic_read(¤t->fs->users) > 1)
sbytes += kobjsize(current->fs);
else
bytes += kobjsize(current->fs);
@@ -6,7 +6,7 @@
#include <linux/seqlock.h>
struct fs_struct {
- int users;
+ atomic_t users;
spinlock_t lock;
seqcount_t seq;
int umask;
@@ -1203,7 +1203,7 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
spin_unlock(&fs->lock);
return -EAGAIN;
}
- fs->users++;
+ atomic_inc(&fs->users);
spin_unlock(&fs->lock);
return 0;
}
@@ -2129,7 +2129,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
- if (fs->users == 1)
+ if (atomic_read(&fs->users) == 1)
return 0;
*new_fsp = copy_fs_struct(fs);
@@ -2242,7 +2242,7 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
- if (--fs->users)
+ if (atomic_dec_return(&fs->users))
new_fs = NULL;
else
new_fs = fs;
@@ -1034,7 +1034,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
if (!thread_group_empty(current))
return -EINVAL;
- if (current->fs->users != 1)
+ if (atomic_read(¤t->fs->users) != 1)
return -EINVAL;
if (!ns_capable(user_ns, CAP_SYS_ADMIN))