@@ -653,7 +653,7 @@ flush_tlb_mm(struct mm_struct *mm)
if (mm == current->active_mm) {
flush_tlb_current(mm);
- if (atomic_read(&mm->mm_users) <= 1) {
+ if (refcount_read(&mm->mm_users) <= 1) {
int cpu, this_cpu = smp_processor_id();
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu) || cpu == this_cpu)
@@ -702,7 +702,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
if (mm == current->active_mm) {
flush_tlb_current_page(mm, vma, addr);
- if (atomic_read(&mm->mm_users) <= 1) {
+ if (refcount_read(&mm->mm_users) <= 1) {
int cpu, this_cpu = smp_processor_id();
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu) || cpu == this_cpu)
@@ -758,7 +758,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
if (mm == current->active_mm) {
__load_new_mm_context(mm);
- if (atomic_read(&mm->mm_users) <= 1) {
+ if (refcount_read(&mm->mm_users) <= 1) {
int cpu, this_cpu = smp_processor_id();
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu) || cpu == this_cpu)
@@ -124,7 +124,7 @@ void start_kernel_secondary(void)
/* MMU, Caches, Vector Table, Interrupts etc */
setup_processor();
- atomic_inc(&mm->mm_users);
+ refcount_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
@@ -297,7 +297,7 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm)
* Only for fork( ) do we need to move parent to a new MMU ctxt,
* all other cases are NOPs, hence this check.
*/
- if (atomic_read(&mm->mm_users) == 0)
+ if (refcount_read(&mm->mm_users) == 0)
return;
/*
@@ -371,7 +371,7 @@ asmlinkage void secondary_start_kernel(void)
* reference and switch to it.
*/
cpu = smp_processor_id();
- atomic_inc(&mm->mm_count);
+ refcount_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
@@ -222,7 +222,7 @@ asmlinkage void secondary_start_kernel(void)
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
- atomic_inc(&mm->mm_count);
+ refcount_inc(&mm->mm_count);
current->active_mm = mm;
/*
@@ -307,7 +307,7 @@ void secondary_start_kernel(void)
local_irq_disable();
/* Attach the new idle task to the global mm. */
- atomic_inc(&mm->mm_users);
+ refcount_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
@@ -422,7 +422,7 @@ void cpu_die(void)
{
(void)cpu_report_death();
- atomic_dec(&init_mm.mm_users);
+ refcount_dec(&init_mm.mm_users);
atomic_dec(&init_mm.mm_count);
local_irq_disable();
@@ -188,7 +188,7 @@ int cxn_pin_by_pid(pid_t pid)
task_lock(tsk);
if (tsk->mm) {
mm = tsk->mm;
- atomic_inc(&mm->mm_users);
+ refcount_inc(&mm->mm_users);
ret = 0;
}
task_unlock(tsk);
@@ -56,7 +56,7 @@ flush_tlb_mm (struct mm_struct *mm)
set_bit(mm->context, ia64_ctx.flushmap);
mm->context = 0;
- if (atomic_read(&mm->mm_users) == 0)
+ if (refcount_read(&mm->mm_users) == 0)
return; /* happens as a result of exit_mmap() */
#ifdef CONFIG_SMP
@@ -295,7 +295,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
cpumask_var_t cpus;
preempt_disable();
/* this happens for the common case of a single-threaded fork(): */
- if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
+ if (likely(mm == current->active_mm && refcount_read(&mm->mm_users) == 1))
{
local_finish_flush_tlb_mm(mm);
preempt_enable();
@@ -122,7 +122,7 @@ void sn_migrate(struct task_struct *task)
void sn_tlb_migrate_finish(struct mm_struct *mm)
{
/* flush_tlb_mm is inefficient if more than 1 users of mm */
- if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
+ if (mm == current->mm && mm && refcount_read(&mm->mm_users) == 1)
flush_tlb_mm(mm);
}
@@ -204,7 +204,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
return;
}
- if (atomic_read(&mm->mm_users) == 1 && mymm) {
+ if (refcount_read(&mm->mm_users) == 1 && mymm) {
flush_tlb_mm(mm);
__this_cpu_inc(ptcstats.change_rid);
preempt_enable();
@@ -344,7 +344,7 @@ asmlinkage void secondary_start_kernel(void)
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
- atomic_inc(&mm->mm_users);
+ refcount_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
@@ -642,7 +642,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
/* No need to send an IPI for the local CPU */
max_users = (task->mm == current->mm) ? 1 : 0;
- if (atomic_read(¤t->mm->mm_users) > max_users)
+ if (refcount_read(¤t->mm->mm_users) > max_users)
smp_call_function(prepare_for_fp_mode_switch,
(void *)current->mm, 1);
}
@@ -510,7 +510,7 @@ void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
- if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ if ((refcount_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
unsigned int cpu;
@@ -543,7 +543,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
struct mm_struct *mm = vma->vm_mm;
preempt_disable();
- if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ if ((refcount_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = start,
@@ -597,7 +597,7 @@ static void flush_tlb_page_ipi(void *info)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
preempt_disable();
- if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
+ if ((refcount_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = page,
@@ -21,7 +21,7 @@ extern void free_sid(unsigned long);
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
- BUG_ON(atomic_read(&mm->mm_users) != 1);
+ BUG_ON(refcount_read(&mm->mm_users) != 1);
mm->context = alloc_sid();
return 0;
@@ -403,7 +403,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
batchp = &get_cpu_var(hugepd_freelist_cur);
- if (atomic_read(&tlb->mm->mm_users) < 2 ||
+ if (refcount_read(&tlb->mm->mm_users) < 2 ||
cpumask_equal(mm_cpumask(tlb->mm),
cpumask_of(smp_processor_id()))) {
kmem_cache_free(hugepte_cache, hugepte);
@@ -110,7 +110,7 @@ int use_cop(unsigned long acop, struct mm_struct *mm)
* running. We need to send an IPI to force them to pick up any
* change in PID and ACOP.
*/
- if (atomic_read(&mm->mm_users) > 1)
+ if (refcount_read(&mm->mm_users) > 1)
smp_call_function(sync_cop, mm, 1);
out:
@@ -150,7 +150,7 @@ void drop_cop(unsigned long acop, struct mm_struct *mm)
* running. We need to send an IPI to force them to pick up any
* change in PID and ACOP.
*/
- if (atomic_read(&mm->mm_users) > 1)
+ if (refcount_read(&mm->mm_users) > 1)
smp_call_function(sync_cop, mm, 1);
if (free_pid != COP_PID_NONE)
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/time.h>
+#include <linux/refcount.h>
#include <uapi/asm/debug.h>
#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
@@ -31,7 +32,7 @@ struct debug_view;
typedef struct debug_info {
struct debug_info* next;
struct debug_info* prev;
- atomic_t ref_count;
+ refcount_t ref_count;
spinlock_t lock;
int level;
int nr_areas;
@@ -277,7 +277,7 @@ debug_info_alloc(const char *name, int pages_per_area, int nr_areas,
memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS *
sizeof(struct dentry*));
- atomic_set(&(rc->ref_count), 0);
+ refcount_set(&(rc->ref_count), 0);
return rc;
@@ -416,7 +416,7 @@ static void
debug_info_get(debug_info_t * db_info)
{
if (db_info)
- atomic_inc(&db_info->ref_count);
+ refcount_inc(&db_info->ref_count);
}
/*
@@ -431,7 +431,7 @@ debug_info_put(debug_info_t *db_info)
if (!db_info)
return;
- if (atomic_dec_and_test(&db_info->ref_count)) {
+ if (refcount_dec_and_test(&db_info->ref_count)) {
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!db_info->views[i])
continue;
@@ -179,7 +179,7 @@ asmlinkage void start_secondary(void)
enable_mmu();
atomic_inc(&mm->mm_count);
- atomic_inc(&mm->mm_users);
+ refcount_inc(&mm->mm_users);
current->active_mm = mm;
#ifdef CONFIG_MMU
enter_lazy_tlb(mm, current);
@@ -363,7 +363,7 @@ void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
- if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ if ((refcount_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
} else {
int i;
@@ -395,7 +395,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
preempt_disable();
- if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ if ((refcount_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd;
fd.vma = vma;
@@ -438,7 +438,7 @@ static void flush_tlb_page_ipi(void *info)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
preempt_disable();
- if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
+ if ((refcount_read(&vma->vm_mm->mm_users) != 1) ||
(current->mm != vma->vm_mm)) {
struct flush_tlb_data fd;
@@ -12,6 +12,7 @@
#include <linux/miscdevice.h>
#include <linux/bootmem.h>
#include <linux/export.h>
+#include <linux/refcount.h>
#include <asm/cpudata.h>
#include <asm/hypervisor.h>
@@ -70,7 +71,7 @@ struct mdesc_handle {
struct list_head list;
struct mdesc_mem_ops *mops;
void *self_base;
- atomic_t refcnt;
+ refcount_t refcnt;
unsigned int handle_size;
struct mdesc_hdr mdesc;
};
@@ -84,7 +85,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp,
memset(hp, 0, handle_size);
INIT_LIST_HEAD(&hp->list);
hp->self_base = base;
- atomic_set(&hp->refcnt, 1);
+ refcount_set(&hp->refcnt, 1);
hp->handle_size = handle_size;
}
@@ -114,7 +115,7 @@ static void __init mdesc_memblock_free(struct mdesc_handle *hp)
unsigned int alloc_size;
unsigned long start;
- BUG_ON(atomic_read(&hp->refcnt) != 0);
+ BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
alloc_size = PAGE_ALIGN(hp->handle_size);
@@ -154,7 +155,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
static void mdesc_kfree(struct mdesc_handle *hp)
{
- BUG_ON(atomic_read(&hp->refcnt) != 0);
+ BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
kfree(hp->self_base);
@@ -193,7 +194,7 @@ struct mdesc_handle *mdesc_grab(void)
spin_lock_irqsave(&mdesc_lock, flags);
hp = cur_mdesc;
if (hp)
- atomic_inc(&hp->refcnt);
+ refcount_inc(&hp->refcnt);
spin_unlock_irqrestore(&mdesc_lock, flags);
return hp;
@@ -205,7 +206,7 @@ void mdesc_release(struct mdesc_handle *hp)
unsigned long flags;
spin_lock_irqsave(&mdesc_lock, flags);
- if (atomic_dec_and_test(&hp->refcnt)) {
+ if (refcount_dec_and_test(&hp->refcnt)) {
list_del_init(&hp->list);
hp->mops->free(hp);
}
@@ -344,7 +345,7 @@ void mdesc_update(void)
if (status != HV_EOK || real_len > len) {
printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
status);
- atomic_dec(&hp->refcnt);
+ refcount_dec(&hp->refcnt);
mdesc_free(hp);
goto out;
}
@@ -357,7 +358,7 @@ void mdesc_update(void)
mdesc_notify_clients(orig_hp, hp);
spin_lock_irqsave(&mdesc_lock, flags);
- if (atomic_dec_and_test(&orig_hp->refcnt))
+ if (refcount_dec_and_test(&orig_hp->refcnt))
mdesc_free(orig_hp);
else
list_add(&orig_hp->list, &mdesc_zombie_list);
@@ -1063,7 +1063,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
u32 ctx = CTX_HWBITS(mm->context);
int cpu = get_cpu();
- if (atomic_read(&mm->mm_users) == 1) {
+ if (refcount_read(&mm->mm_users) == 1) {
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
goto local_flush_and_out;
}
@@ -1101,7 +1101,7 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
info.nr = nr;
info.vaddrs = vaddrs;
- if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+ if (mm == current->mm && refcount_read(&mm->mm_users) == 1)
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else
smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
@@ -1117,7 +1117,7 @@ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
unsigned long context = CTX_HWBITS(mm->context);
int cpu = get_cpu();
- if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+ if (mm == current->mm && refcount_read(&mm->mm_users) == 1)
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else
smp_cross_call_masked(&xcall_flush_tlb_page,
@@ -1662,7 +1662,7 @@ static void smp_flush_tlb_mm(struct mm_struct *mm)
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
if (!cpumask_empty(&cpu_mask)) {
xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
- if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
+ if (refcount_read(&mm->mm_users) == 1 && current->active_mm == mm)
cpumask_copy(mm_cpumask(mm),
cpumask_of(smp_processor_id()));
}
@@ -530,7 +530,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
* Don't bother flushing if this address space is about to be
* destroyed.
*/
- if (atomic_read(&mm->mm_users) == 0)
+ if (refcount_read(&mm->mm_users) == 0)
return;
fix_range(mm, start, end, 0);
@@ -3,6 +3,7 @@
#include <linux/ioport.h>
#include <linux/pci.h>
+#include <linux/refcount.h>
struct amd_nb_bus_dev_range {
u8 bus;
@@ -55,7 +56,7 @@ struct threshold_bank {
struct threshold_block *blocks;
/* initialized to the number of CPUs on the node sharing this bank */
- atomic_t cpus;
+ refcount_t cpus;
};
struct amd_northbridge {
@@ -1491,7 +1491,7 @@ void cpu_init(void)
for (i = 0; i <= IO_BITMAP_LONGS; i++)
t->io_bitmap[i] = ~0UL;
- atomic_inc(&init_mm.mm_count);
+ refcount_inc(&init_mm.mm_count);
me->active_mm = &init_mm;
BUG_ON(me->mm);
enter_lazy_tlb(&init_mm, me);
@@ -1542,7 +1542,7 @@ void cpu_init(void)
/*
* Set up and load the per-CPU TSS and LDT
*/
- atomic_inc(&init_mm.mm_count);
+ refcount_inc(&init_mm.mm_count);
curr->active_mm = &init_mm;
BUG_ON(curr->mm);
enter_lazy_tlb(&init_mm, curr);
@@ -1197,7 +1197,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out;
per_cpu(threshold_banks, cpu)[bank] = b;
- atomic_inc(&b->cpus);
+ refcount_inc(&b->cpus);
err = __threshold_add_blocks(b);
@@ -1220,7 +1220,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
per_cpu(threshold_banks, cpu)[bank] = b;
if (is_shared_bank(bank)) {
- atomic_set(&b->cpus, 1);
+ refcount_set(&b->cpus, 1);
/* nb is already initialized, see above */
if (nb) {
@@ -1284,7 +1284,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
goto free_out;
if (is_shared_bank(bank)) {
- if (!atomic_dec_and_test(&b->cpus)) {
+ if (!refcount_dec_and_test(&b->cpus)) {
__threshold_remove_blocks(b);
per_cpu(threshold_banks, cpu)[bank] = NULL;
return;
@@ -102,8 +102,8 @@ static pgd_t *tboot_pg_dir;
static struct mm_struct tboot_mm = {
.mm_rb = RB_ROOT,
.pgd = swapper_pg_dir,
- .mm_users = ATOMIC_INIT(2),
- .mm_count = ATOMIC_INIT(1),
+ .mm_users = REFCOUNT_INIT(2),
+ .mm_count = REFCOUNT_INIT(1),
.mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
@@ -135,7 +135,7 @@ void secondary_start_kernel(void)
/* All kernel threads share the same mm context. */
- atomic_inc(&mm->mm_users);
+ refcount_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
@@ -32,8 +32,8 @@ extern u64 efi_system_table;
static struct mm_struct efi_mm = {
.mm_rb = RB_ROOT,
- .mm_users = ATOMIC_INIT(2),
- .mm_count = ATOMIC_INIT(1),
+ .mm_users = REFCOUNT_INIT(2),
+ .mm_count = REFCOUNT_INIT(1),
.mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
@@ -334,7 +334,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
mm->i915 = to_i915(obj->base.dev);
mm->mm = current->mm;
- atomic_inc(¤t->mm->mm_count);
+ refcount_inc(¤t->mm->mm_count);
mm->mn = NULL;
@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
flags |= FOLL_WRITE;
ret = -EFAULT;
- if (atomic_inc_not_zero(&mm->mm_users)) {
+ if (refcount_inc_not_zero(&mm->mm_users)) {
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote
@@ -579,7 +579,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!svm->mm)
goto bad_req;
/* If the mm is already defunct, don't handle faults. */
- if (!atomic_inc_not_zero(&svm->mm->mm_users))
+ if (!refcount_inc_not_zero(&svm->mm->mm_users))
goto bad_req;
down_read(&svm->mm->mmap_sem);
vma = find_extend_vma(svm->mm, address);
@@ -347,7 +347,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
return nr;
tsk->flags |= PF_DUMPCORE;
- if (atomic_read(&mm->mm_users) == nr + 1)
+ if (refcount_read(&mm->mm_users) == nr + 1)
goto done;
/*
* We should find and kill all tasks which use this mm, and we should
@@ -1174,7 +1174,7 @@ static int de_thread(struct task_struct *tsk)
flush_itimer_signals();
#endif
- if (atomic_read(&oldsighand->count) != 1) {
+ if (refcount_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
/*
* This ->sighand is shared with the CLONE_SIGHAND
@@ -1184,7 +1184,7 @@ static int de_thread(struct task_struct *tsk)
if (!newsighand)
return -ENOMEM;
- atomic_set(&newsighand->count, 1);
+ refcount_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action,
sizeof(newsighand->action));
@@ -798,7 +798,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
if (!IS_ERR_OR_NULL(mm)) {
/* ensure this mm_struct can't be freed */
- atomic_inc(&mm->mm_count);
+ refcount_inc(&mm->mm_count);
/* but do not pin its memory */
mmput(mm);
}
@@ -845,7 +845,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
return -ENOMEM;
copied = 0;
- if (!atomic_inc_not_zero(&mm->mm_users))
+ if (!refcount_inc_not_zero(&mm->mm_users))
goto free;
/* Maybe we should limit FOLL_FORCE to actual ptrace users? */
@@ -953,7 +953,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
return -ENOMEM;
ret = 0;
- if (!atomic_inc_not_zero(&mm->mm_users))
+ if (!refcount_inc_not_zero(&mm->mm_users))
goto free;
down_read(&mm->mmap_sem);
@@ -1094,9 +1094,9 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
struct task_struct *p = find_lock_task_mm(task);
if (p) {
- if (atomic_read(&p->mm->mm_users) > 1) {
+ if (refcount_read(&p->mm->mm_users) > 1) {
mm = p->mm;
- atomic_inc(&mm->mm_count);
+ refcount_inc(&mm->mm_count);
}
task_unlock(p);
}
@@ -167,7 +167,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
return ERR_PTR(-ESRCH);
mm = priv->mm;
- if (!mm || !atomic_inc_not_zero(&mm->mm_users))
+ if (!mm || !refcount_inc_not_zero(&mm->mm_users))
return NULL;
down_read(&mm->mmap_sem);
@@ -1352,7 +1352,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
unsigned long end_vaddr;
int ret = 0, copied = 0;
- if (!mm || !atomic_inc_not_zero(&mm->mm_users))
+ if (!mm || !refcount_inc_not_zero(&mm->mm_users))
goto out;
ret = -EINVAL;
@@ -219,7 +219,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
return ERR_PTR(-ESRCH);
mm = priv->mm;
- if (!mm || !atomic_inc_not_zero(&mm->mm_users))
+ if (!mm || !refcount_inc_not_zero(&mm->mm_users))
return NULL;
down_read(&mm->mmap_sem);
@@ -1306,7 +1306,7 @@ static struct file *userfaultfd_file_create(int flags)
ctx->released = false;
ctx->mm = current->mm;
/* prevent the mm struct to be freed */
- atomic_inc(&ctx->mm->mm_count);
+ refcount_inc(&ctx->mm->mm_count);
file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
@@ -4,6 +4,7 @@
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/percpu_counter.h>
#include <linux/percpu-refcount.h>
@@ -50,7 +51,7 @@ enum wb_stat_item {
*/
struct bdi_writeback_congested {
unsigned long state; /* WB_[a]sync_congested flags */
- atomic_t refcnt; /* nr of attached wb's and blkg */
+ refcount_t refcnt; /* nr of attached wb's and blkg */
#ifdef CONFIG_CGROUP_WRITEBACK
struct backing_dev_info *bdi; /* the associated bdi */
@@ -422,13 +422,13 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
static inline struct bdi_writeback_congested *
wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
{
- atomic_inc(&bdi->wb_congested->refcnt);
+ refcount_inc(&bdi->wb_congested->refcnt);
return bdi->wb_congested;
}
static inline void wb_congested_put(struct bdi_writeback_congested *congested)
{
- if (atomic_dec_and_test(&congested->refcnt))
+ if (refcount_dec_and_test(&congested->refcnt))
kfree(congested);
}
@@ -13,6 +13,7 @@
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
@@ -149,7 +150,7 @@ struct cgroup_subsys_state {
*/
struct css_set {
/* Reference count */
- atomic_t refcount;
+ refcount_t refcount;
/*
* List running through all cgroup groups in the same hash
@@ -22,6 +22,7 @@
#include <linux/ns_common.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
+#include <linux/refcount.h>
#include <linux/cgroup-defs.h>
@@ -640,7 +641,7 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
struct cgroup_namespace {
- atomic_t count;
+ refcount_t count;
struct ns_common ns;
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -675,12 +676,12 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
static inline void get_cgroup_ns(struct cgroup_namespace *ns)
{
if (ns)
- atomic_inc(&ns->count);
+ refcount_inc(&ns->count);
}
static inline void put_cgroup_ns(struct cgroup_namespace *ns)
{
- if (ns && atomic_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->count))
free_cgroup_ns(ns);
}
@@ -17,6 +17,7 @@
#include <linux/key.h>
#include <linux/selinux.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/uidgid.h>
struct user_struct;
@@ -27,7 +28,7 @@ struct inode;
* COW Supplementary groups list
*/
struct group_info {
- atomic_t usage;
+ refcount_t usage;
int ngroups;
kgid_t gid[0];
};
@@ -43,7 +44,7 @@ struct group_info {
*/
static inline struct group_info *get_group_info(struct group_info *gi)
{
- atomic_inc(&gi->usage);
+ refcount_inc(&gi->usage);
return gi;
}
@@ -53,7 +54,7 @@ static inline struct group_info *get_group_info(struct group_info *gi)
*/
#define put_group_info(group_info) \
do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
+ if (refcount_dec_and_test(&(group_info)->usage)) \
groups_free(group_info); \
} while (0)
@@ -107,7 +108,7 @@ extern bool may_setgroups(void);
* same context as task->real_cred.
*/
struct cred {
- atomic_t usage;
+ refcount_t usage;
#ifdef CONFIG_DEBUG_CREDENTIALS
atomic_t subscribers; /* number of processes subscribed */
void *put_addr;
@@ -220,7 +221,7 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
*/
static inline struct cred *get_new_cred(struct cred *cred)
{
- atomic_inc(&cred->usage);
+ refcount_inc(&cred->usage);
return cred;
}
@@ -260,7 +261,7 @@ static inline void put_cred(const struct cred *_cred)
struct cred *cred = (struct cred *) _cred;
validate_creds(cred);
- if (atomic_dec_and_test(&(cred)->usage))
+ if (refcount_dec_and_test(&(cred)->usage))
__put_cred(cred);
}
@@ -12,6 +12,7 @@
#include <linux/securebits.h>
#include <linux/seqlock.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
@@ -65,7 +66,7 @@ extern struct fs_struct init_fs;
extern struct nsproxy init_nsproxy;
#define INIT_SIGHAND(sighand) { \
- .count = ATOMIC_INIT(1), \
+ .count = REFCOUNT_INIT(1), \
.action = { { { .sa_handler = SIG_DFL, } }, }, \
.siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
@@ -188,7 +189,7 @@ extern struct task_group root_task_group;
#ifdef CONFIG_THREAD_INFO_IN_TASK
# define INIT_TASK_TI(tsk) \
.thread_info = INIT_THREAD_INFO(tsk), \
- .stack_refcount = ATOMIC_INIT(1),
+ .stack_refcount = REFCOUNT_INIT(1),
#else
# define INIT_TASK_TI(tsk)
#endif
@@ -202,7 +203,7 @@ extern struct task_group root_task_group;
INIT_TASK_TI(tsk) \
.state = 0, \
.stack = init_stack, \
- .usage = ATOMIC_INIT(2), \
+ .usage = REFCOUNT_INIT(2), \
.flags = PF_KTHREAD, \
.prio = MAX_PRIO-20, \
.static_prio = MAX_PRIO-20, \
@@ -26,6 +26,7 @@
#include <linux/context_tracking.h>
#include <linux/irqbypass.h>
#include <linux/swait.h>
+#include <linux/refcount.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -403,7 +404,7 @@ struct kvm {
#endif
struct kvm_vm_stat stat;
struct kvm_arch arch;
- atomic_t users_count;
+ refcount_t users_count;
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
spinlock_t ring_lock;
@@ -7,6 +7,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -407,8 +408,8 @@ struct mm_struct {
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
- atomic_t mm_users; /* How many users with user space? */
- atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
+ refcount_t mm_users; /* How many users with user space? */
+ refcount_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
atomic_long_t nr_ptes; /* PTE page table pages */
#if CONFIG_PGTABLE_LEVELS > 2
atomic_long_t nr_pmds; /* PMD page table pages */
@@ -28,7 +28,7 @@ struct fs_struct;
* nsproxy is copied.
*/
struct nsproxy {
- atomic_t count;
+ refcount_t count;
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
@@ -74,14 +74,14 @@ int __init nsproxy_cache_init(void);
static inline void put_nsproxy(struct nsproxy *ns)
{
- if (atomic_dec_and_test(&ns->count)) {
+ if (refcount_dec_and_test(&ns->count)) {
free_nsproxy(ns);
}
}
static inline void get_nsproxy(struct nsproxy *ns)
{
- atomic_inc(&ns->count);
+ refcount_inc(&ns->count);
}
#endif
@@ -54,6 +54,7 @@ struct perf_guest_info_callbacks {
#include <linux/perf_regs.h>
#include <linux/workqueue.h>
#include <linux/cgroup.h>
+#include <linux/refcount.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -741,7 +742,7 @@ struct perf_event_context {
int nr_stat;
int nr_freq;
int rotate_disable;
- atomic_t refcount;
+ refcount_t refcount;
struct task_struct *task;
/*
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
+#include <linux/refcount.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -34,7 +35,7 @@ struct anon_vma {
* the reference is responsible for clearing up the
* anon_vma if they are the last user on release
*/
- atomic_t refcount;
+ refcount_t refcount;
/*
* Count of child anon_vmas and VMAs which points to this anon_vma.
@@ -101,14 +102,14 @@ enum ttu_flags {
#ifdef CONFIG_MMU
static inline void get_anon_vma(struct anon_vma *anon_vma)
{
- atomic_inc(&anon_vma->refcount);
+ refcount_inc(&anon_vma->refcount);
}
void __put_anon_vma(struct anon_vma *anon_vma);
static inline void put_anon_vma(struct anon_vma *anon_vma)
{
- if (atomic_dec_and_test(&anon_vma->refcount))
+ if (refcount_dec_and_test(&anon_vma->refcount))
__put_anon_vma(anon_vma);
}
@@ -43,6 +43,7 @@ struct sched_param {
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
+#include <linux/refcount.h>
#include <linux/rtmutex.h>
#include <linux/time.h>
@@ -555,7 +556,7 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
struct sighand_struct {
- atomic_t count;
+ refcount_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
wait_queue_head_t signalfd_wqh;
@@ -695,7 +696,7 @@ struct autogroup;
* the locking of signal_struct.
*/
struct signal_struct {
- atomic_t sigcnt;
+ refcount_t sigcnt;
atomic_t live;
int nr_threads;
struct list_head thread_head;
@@ -865,7 +866,7 @@ static inline int signal_group_exit(const struct signal_struct *sig)
* Some day this will be a full-fledged user tracking system..
*/
struct user_struct {
- atomic_t __count; /* reference count */
+ refcount_t __count; /* reference count */
atomic_t processes; /* How many processes does this user have? */
atomic_t sigpending; /* How many pending signals does this user have? */
#ifdef CONFIG_INOTIFY_USER
@@ -1508,7 +1509,7 @@ struct task_struct {
#endif
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
- atomic_t usage;
+ refcount_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
@@ -1986,7 +1987,7 @@ struct task_struct {
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
/* A live task holds one reference. */
- atomic_t stack_refcount;
+ refcount_t stack_refcount;
#endif
/* CPU-specific state of this task */
struct thread_struct thread;
@@ -2237,13 +2238,13 @@ static inline int is_global_init(struct task_struct *tsk)
extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
-#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+#define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0)
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
{
- if (atomic_dec_and_test(&t->usage))
+ if (refcount_dec_and_test(&t->usage))
__put_task_struct(t);
}
@@ -2703,7 +2704,7 @@ extern struct task_struct *find_task_by_pid_ns(pid_t nr,
extern struct user_struct * alloc_uid(kuid_t);
static inline struct user_struct *get_uid(struct user_struct *u)
{
- atomic_inc(&u->__count);
+ refcount_inc(&u->__count);
return u;
}
extern void free_uid(struct user_struct *);
@@ -2918,7 +2919,7 @@ extern struct mm_struct * mm_alloc(void);
extern void __mmdrop(struct mm_struct *);
static inline void mmdrop(struct mm_struct *mm)
{
- if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ if (unlikely(refcount_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
@@ -2930,7 +2931,7 @@ static inline void mmdrop_async_fn(struct work_struct *work)
static inline void mmdrop_async(struct mm_struct *mm)
{
- if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
+ if (unlikely(refcount_dec_and_test(&mm->mm_count))) {
INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
schedule_work(&mm->async_put_work);
}
@@ -2938,7 +2939,7 @@ static inline void mmdrop_async(struct mm_struct *mm)
static inline bool mmget_not_zero(struct mm_struct *mm)
{
- return atomic_inc_not_zero(&mm->mm_users);
+ return refcount_inc_not_zero(&mm->mm_users);
}
/* mmput gets rid of the mappings and all user-space */
@@ -3223,7 +3224,7 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline void *try_get_task_stack(struct task_struct *tsk)
{
- return atomic_inc_not_zero(&tsk->stack_refcount) ?
+ return refcount_inc_not_zero(&tsk->stack_refcount) ?
task_stack_page(tsk) : NULL;
}
@@ -9,7 +9,7 @@ struct audit_tree;
struct audit_chunk;
struct audit_tree {
- atomic_t count;
+ refcount_t count;
int goner;
struct audit_chunk *root;
struct list_head chunks;
@@ -77,7 +77,7 @@ static struct audit_tree *alloc_tree(const char *s)
tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
if (tree) {
- atomic_set(&tree->count, 1);
+ refcount_set(&tree->count, 1);
tree->goner = 0;
INIT_LIST_HEAD(&tree->chunks);
INIT_LIST_HEAD(&tree->rules);
@@ -91,12 +91,12 @@ static struct audit_tree *alloc_tree(const char *s)
static inline void get_tree(struct audit_tree *tree)
{
- atomic_inc(&tree->count);
+ refcount_inc(&tree->count);
}
static inline void put_tree(struct audit_tree *tree)
{
- if (atomic_dec_and_test(&tree->count))
+ if (refcount_dec_and_test(&tree->count))
kfree_rcu(tree, head);
}
@@ -963,7 +963,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
* We are guaranteed to have at least one reference to the mark from
* either the inode or the caller of fsnotify_destroy_mark().
*/
- BUG_ON(atomic_read(&entry->refcnt) < 1);
+ BUG_ON(refcount_read(&entry->refcnt) < 1);
}
static const struct fsnotify_ops audit_tree_ops = {
@@ -46,7 +46,7 @@
*/
struct audit_watch {
- atomic_t count; /* reference count */
+ refcount_t count; /* reference count */
dev_t dev; /* associated superblock device */
char *path; /* insertion path */
unsigned long ino; /* associated inode number */
@@ -111,12 +111,12 @@ static inline struct audit_parent *audit_find_parent(struct inode *inode)
void audit_get_watch(struct audit_watch *watch)
{
- atomic_inc(&watch->count);
+ refcount_inc(&watch->count);
}
void audit_put_watch(struct audit_watch *watch)
{
- if (atomic_dec_and_test(&watch->count)) {
+ if (refcount_dec_and_test(&watch->count)) {
WARN_ON(watch->parent);
WARN_ON(!list_empty(&watch->rules));
kfree(watch->path);
@@ -178,7 +178,7 @@ static struct audit_watch *audit_init_watch(char *path)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&watch->rules);
- atomic_set(&watch->count, 1);
+ refcount_set(&watch->count, 1);
watch->path = path;
watch->dev = AUDIT_DEV_UNSET;
watch->ino = AUDIT_INO_UNSET;
@@ -223,7 +223,7 @@ static u16 have_free_callback __read_mostly;
/* cgroup namespace for init task */
struct cgroup_namespace init_cgroup_ns = {
- .count = { .counter = 2, },
+ .count = REFCOUNT_INIT(2),
.user_ns = &init_user_ns,
.ns.ops = &cgroupns_operations,
.ns.inum = PROC_CGROUP_INIT_INO,
@@ -646,7 +646,7 @@ struct cgrp_cset_link {
* haven't been created.
*/
struct css_set init_css_set = {
- .refcount = ATOMIC_INIT(1),
+ .refcount = REFCOUNT_INIT(1),
.cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
.tasks = LIST_HEAD_INIT(init_css_set.tasks),
.mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
@@ -816,7 +816,7 @@ static void put_css_set_locked(struct css_set *cset)
lockdep_assert_held(&css_set_lock);
- if (!atomic_dec_and_test(&cset->refcount))
+ if (!refcount_dec_and_test(&cset->refcount))
return;
/* This css_set is dead. unlink it and release cgroup and css refs */
@@ -847,10 +847,13 @@ static void put_css_set(struct css_set *cset)
* can see it. Similar to atomic_dec_and_lock(), but for an
* rwlock
*/
- if (atomic_add_unless(&cset->refcount, -1, 1))
+ spin_lock_irqsave(&css_set_lock, flags);
+ if (refcount_read(&cset->refcount) != 1) {
+ WARN_ON(refcount_dec_and_test(&cset->refcount));
+ spin_unlock_irqrestore(&css_set_lock, flags);
return;
+ }
- spin_lock_irqsave(&css_set_lock, flags);
put_css_set_locked(cset);
spin_unlock_irqrestore(&css_set_lock, flags);
}
@@ -860,7 +863,7 @@ static void put_css_set(struct css_set *cset)
*/
static inline void get_css_set(struct css_set *cset)
{
- atomic_inc(&cset->refcount);
+ refcount_inc(&cset->refcount);
}
/**
@@ -1094,7 +1097,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
return NULL;
}
- atomic_set(&cset->refcount, 1);
+ refcount_set(&cset->refcount, 1);
INIT_LIST_HEAD(&cset->cgrp_links);
INIT_LIST_HEAD(&cset->tasks);
INIT_LIST_HEAD(&cset->mg_tasks);
@@ -3940,7 +3943,7 @@ static int cgroup_task_count(const struct cgroup *cgrp)
spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link)
- count += atomic_read(&link->cset->refcount);
+ count += refcount_read(&link->cset->refcount);
spin_unlock_irq(&css_set_lock);
return count;
}
@@ -6377,7 +6380,7 @@ static struct cgroup_namespace *alloc_cgroup_ns(void)
kfree(new_ns);
return ERR_PTR(ret);
}
- atomic_set(&new_ns->count, 1);
+ refcount_set(&new_ns->count, 1);
new_ns->ns.ops = &cgroupns_operations;
return new_ns;
}
@@ -6548,7 +6551,7 @@ static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
u64 count;
rcu_read_lock();
- count = atomic_read(&task_css_set(current)->refcount);
+ count = refcount_read(&task_css_set(current)->refcount);
rcu_read_unlock();
return count;
}
@@ -35,13 +35,13 @@ do { \
static struct kmem_cache *cred_jar;
/* init to 2 - one for init_task, one to ensure it is never freed */
-struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
+struct group_info init_groups = { .usage = REFCOUNT_INIT(2) };
/*
* The initial credentials for the initial task
*/
struct cred init_cred = {
- .usage = ATOMIC_INIT(4),
+ .usage = REFCOUNT_INIT(4),
#ifdef CONFIG_DEBUG_CREDENTIALS
.subscribers = ATOMIC_INIT(2),
.magic = CRED_MAGIC,
@@ -100,17 +100,17 @@ static void put_cred_rcu(struct rcu_head *rcu)
#ifdef CONFIG_DEBUG_CREDENTIALS
if (cred->magic != CRED_MAGIC_DEAD ||
- atomic_read(&cred->usage) != 0 ||
+ refcount_read(&cred->usage) != 0 ||
read_cred_subscribers(cred) != 0)
panic("CRED: put_cred_rcu() sees %p with"
" mag %x, put %p, usage %d, subscr %d\n",
cred, cred->magic, cred->put_addr,
- atomic_read(&cred->usage),
+ refcount_read(&cred->usage),
read_cred_subscribers(cred));
#else
- if (atomic_read(&cred->usage) != 0)
+ if (refcount_read(&cred->usage) != 0)
panic("CRED: put_cred_rcu() sees %p with usage %d\n",
- cred, atomic_read(&cred->usage));
+ cred, refcount_read(&cred->usage));
#endif
security_cred_free(cred);
@@ -134,10 +134,10 @@ static void put_cred_rcu(struct rcu_head *rcu)
void __put_cred(struct cred *cred)
{
kdebug("__put_cred(%p{%d,%d})", cred,
- atomic_read(&cred->usage),
+ refcount_read(&cred->usage),
read_cred_subscribers(cred));
- BUG_ON(atomic_read(&cred->usage) != 0);
+ BUG_ON(refcount_read(&cred->usage) != 0);
#ifdef CONFIG_DEBUG_CREDENTIALS
BUG_ON(read_cred_subscribers(cred) != 0);
cred->magic = CRED_MAGIC_DEAD;
@@ -158,7 +158,7 @@ void exit_creds(struct task_struct *tsk)
struct cred *cred;
kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
- atomic_read(&tsk->cred->usage),
+ refcount_read(&tsk->cred->usage),
read_cred_subscribers(tsk->cred));
cred = (struct cred *) tsk->real_cred;
@@ -193,7 +193,7 @@ const struct cred *get_task_cred(struct task_struct *task)
do {
cred = __task_cred((task));
BUG_ON(!cred);
- } while (!atomic_inc_not_zero(&((struct cred *)cred)->usage));
+ } while (!refcount_inc_not_zero(&((struct cred *)cred)->usage));
rcu_read_unlock();
return cred;
@@ -211,7 +211,7 @@ struct cred *cred_alloc_blank(void)
if (!new)
return NULL;
- atomic_set(&new->usage, 1);
+ refcount_set(&new->usage, 1);
#ifdef CONFIG_DEBUG_CREDENTIALS
new->magic = CRED_MAGIC;
#endif
@@ -257,7 +257,7 @@ struct cred *prepare_creds(void)
old = task->cred;
memcpy(new, old, sizeof(struct cred));
- atomic_set(&new->usage, 1);
+ refcount_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_group_info(new->group_info);
get_uid(new->user);
@@ -334,7 +334,7 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
get_cred(p->cred);
alter_cred_subscribers(p->cred, 2);
kdebug("share_creds(%p{%d,%d})",
- p->cred, atomic_read(&p->cred->usage),
+ p->cred, refcount_read(&p->cred->usage),
read_cred_subscribers(p->cred));
atomic_inc(&p->cred->user->processes);
return 0;
@@ -425,7 +425,7 @@ int commit_creds(struct cred *new)
const struct cred *old = task->real_cred;
kdebug("commit_creds(%p{%d,%d})", new,
- atomic_read(&new->usage),
+ refcount_read(&new->usage),
read_cred_subscribers(new));
BUG_ON(task->cred != old);
@@ -434,7 +434,7 @@ int commit_creds(struct cred *new)
validate_creds(old);
validate_creds(new);
#endif
- BUG_ON(atomic_read(&new->usage) < 1);
+ BUG_ON(refcount_read(&new->usage) < 1);
get_cred(new); /* we will require a ref for the subj creds too */
@@ -499,13 +499,13 @@ EXPORT_SYMBOL(commit_creds);
void abort_creds(struct cred *new)
{
kdebug("abort_creds(%p{%d,%d})", new,
- atomic_read(&new->usage),
+ refcount_read(&new->usage),
read_cred_subscribers(new));
#ifdef CONFIG_DEBUG_CREDENTIALS
BUG_ON(read_cred_subscribers(new) != 0);
#endif
- BUG_ON(atomic_read(&new->usage) < 1);
+ BUG_ON(refcount_read(&new->usage) < 1);
put_cred(new);
}
EXPORT_SYMBOL(abort_creds);
@@ -522,7 +522,7 @@ const struct cred *override_creds(const struct cred *new)
const struct cred *old = current->cred;
kdebug("override_creds(%p{%d,%d})", new,
- atomic_read(&new->usage),
+ refcount_read(&new->usage),
read_cred_subscribers(new));
validate_creds(old);
@@ -533,7 +533,7 @@ const struct cred *override_creds(const struct cred *new)
alter_cred_subscribers(old, -1);
kdebug("override_creds() = %p{%d,%d}", old,
- atomic_read(&old->usage),
+ refcount_read(&old->usage),
read_cred_subscribers(old));
return old;
}
@@ -551,7 +551,7 @@ void revert_creds(const struct cred *old)
const struct cred *override = current->cred;
kdebug("revert_creds(%p{%d,%d})", old,
- atomic_read(&old->usage),
+ refcount_read(&old->usage),
read_cred_subscribers(old));
validate_creds(old);
@@ -610,7 +610,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
validate_creds(old);
*new = *old;
- atomic_set(&new->usage, 1);
+ refcount_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_uid(new->user);
get_user_ns(new->user_ns);
@@ -734,7 +734,7 @@ static void dump_invalid_creds(const struct cred *cred, const char *label,
printk(KERN_ERR "CRED: ->magic=%x, put_addr=%p\n",
cred->magic, cred->put_addr);
printk(KERN_ERR "CRED: ->usage=%d, subscr=%d\n",
- atomic_read(&cred->usage),
+ refcount_read(&cred->usage),
read_cred_subscribers(cred));
printk(KERN_ERR "CRED: ->*uid = { %d,%d,%d,%d }\n",
from_kuid_munged(&init_user_ns, cred->uid),
@@ -808,7 +808,7 @@ void validate_creds_for_do_exit(struct task_struct *tsk)
{
kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})",
tsk->real_cred, tsk->cred,
- atomic_read(&tsk->cred->usage),
+ refcount_read(&tsk->cred->usage),
read_cred_subscribers(tsk->cred));
__validate_process_creds(tsk, __FILE__, __LINE__);
@@ -1117,7 +1117,7 @@ static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
static void get_ctx(struct perf_event_context *ctx)
{
- WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
+ WARN_ON(!refcount_inc_not_zero(&ctx->refcount));
}
static void free_ctx(struct rcu_head *head)
@@ -1131,7 +1131,7 @@ static void free_ctx(struct rcu_head *head)
static void put_ctx(struct perf_event_context *ctx)
{
- if (atomic_dec_and_test(&ctx->refcount)) {
+ if (refcount_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task && ctx->task != TASK_TOMBSTONE)
@@ -1209,7 +1209,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
again:
rcu_read_lock();
ctx = ACCESS_ONCE(event->ctx);
- if (!atomic_inc_not_zero(&ctx->refcount)) {
+ if (!refcount_inc_not_zero(&ctx->refcount)) {
rcu_read_unlock();
goto again;
}
@@ -1337,7 +1337,7 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
}
if (ctx->task == TASK_TOMBSTONE ||
- !atomic_inc_not_zero(&ctx->refcount)) {
+ !refcount_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock(&ctx->lock);
ctx = NULL;
} else {
@@ -3639,7 +3639,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
INIT_LIST_HEAD(&ctx->pinned_groups);
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
- atomic_set(&ctx->refcount, 1);
+ refcount_set(&ctx->refcount, 1);
}
static struct perf_event_context *
@@ -4934,7 +4934,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event *event)
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
- if (!atomic_inc_not_zero(&rb->refcount))
+ if (!refcount_inc_not_zero(&rb->refcount))
rb = NULL;
}
rcu_read_unlock();
@@ -4944,7 +4944,7 @@ struct ring_buffer *ring_buffer_get(struct perf_event *event)
void ring_buffer_put(struct ring_buffer *rb)
{
- if (!atomic_dec_and_test(&rb->refcount))
+ if (!refcount_dec_and_test(&rb->refcount))
return;
WARN_ON_ONCE(!list_empty(&rb->event_list));
@@ -5009,7 +5009,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
/* this has to be the last one */
rb_free_aux(rb);
- WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
+ WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
mutex_unlock(&event->mmap_mutex);
}
@@ -2,6 +2,7 @@
#define _KERNEL_EVENTS_INTERNAL_H
#include <linux/hardirq.h>
+#include <linux/refcount.h>
#include <linux/uaccess.h>
/* Buffer handling */
@@ -9,7 +10,7 @@
#define RING_BUFFER_WRITABLE 0x01
struct ring_buffer {
- atomic_t refcount;
+ refcount_t refcount;
struct rcu_head rcu_head;
#ifdef CONFIG_PERF_USE_VMALLOC
struct work_struct work;
@@ -47,7 +48,7 @@ struct ring_buffer {
atomic_t aux_mmap_count;
unsigned long aux_mmap_locked;
void (*free_aux)(void *);
- atomic_t aux_refcount;
+ refcount_t aux_refcount;
void **aux_pages;
void *aux_priv;
@@ -284,7 +284,7 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
else
rb->overwrite = 1;
- atomic_set(&rb->refcount, 1);
+ refcount_set(&rb->refcount, 1);
INIT_LIST_HEAD(&rb->event_list);
spin_lock_init(&rb->event_lock);
@@ -344,7 +344,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
if (!atomic_read(&rb->aux_mmap_count))
goto err;
- if (!atomic_inc_not_zero(&rb->aux_refcount))
+ if (!refcount_inc_not_zero(&rb->aux_refcount))
goto err;
/*
@@ -636,7 +636,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
* we keep a refcount here to make sure either of the two can
* reference them safely.
*/
- atomic_set(&rb->aux_refcount, 1);
+ refcount_set(&rb->aux_refcount, 1);
rb->aux_overwrite = overwrite;
rb->aux_watermark = watermark;
@@ -655,7 +655,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
void rb_free_aux(struct ring_buffer *rb)
{
- if (atomic_dec_and_test(&rb->aux_refcount))
+ if (refcount_dec_and_test(&rb->aux_refcount))
__rb_free_aux(rb);
}
@@ -37,6 +37,7 @@
#include <linux/percpu-rwsem.h>
#include <linux/task_work.h>
#include <linux/shmem_fs.h>
+#include <linux/refcount.h>
#include <linux/uprobes.h>
@@ -64,7 +65,7 @@ static struct percpu_rw_semaphore dup_mmap_sem;
struct uprobe {
struct rb_node rb_node; /* node in the rb tree */
- atomic_t ref;
+ refcount_t ref;
struct rw_semaphore register_rwsem;
struct rw_semaphore consumer_rwsem;
struct list_head pending_list;
@@ -363,13 +364,13 @@ set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long v
static struct uprobe *get_uprobe(struct uprobe *uprobe)
{
- atomic_inc(&uprobe->ref);
+ refcount_inc(&uprobe->ref);
return uprobe;
}
static void put_uprobe(struct uprobe *uprobe)
{
- if (atomic_dec_and_test(&uprobe->ref))
+ if (refcount_dec_and_test(&uprobe->ref))
kfree(uprobe);
}
@@ -451,7 +452,7 @@ static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
rb_link_node(&uprobe->rb_node, parent, p);
rb_insert_color(&uprobe->rb_node, &uprobes_tree);
/* get access + creation ref */
- atomic_set(&uprobe->ref, 2);
+ refcount_set(&uprobe->ref, 2);
return u;
}
@@ -741,7 +742,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
continue;
}
- if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
+ if (!refcount_inc_not_zero(&vma->vm_mm->mm_users))
continue;
info = prev;
@@ -1115,7 +1116,7 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
if (no_uprobe_events() || !valid_vma(vma, false))
return;
- if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
+ if (!refcount_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
return;
if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
@@ -396,7 +396,7 @@ void mm_update_next_owner(struct mm_struct *mm)
* candidates. Do not leave the mm pointing to a possibly
* freed task structure.
*/
- if (atomic_read(&mm->mm_users) <= 1) {
+ if (refcount_read(&mm->mm_users) <= 1) {
mm->owner = NULL;
return;
}
@@ -509,7 +509,7 @@ static void exit_mm(struct task_struct *tsk)
__set_task_state(tsk, TASK_RUNNING);
down_read(&mm->mmap_sem);
}
- atomic_inc(&mm->mm_count);
+ refcount_inc(&mm->mm_count);
BUG_ON(mm != tsk->active_mm);
/* more a memory barrier than a real lock */
task_lock(tsk);
@@ -330,7 +330,7 @@ static void release_task_stack(struct task_struct *tsk)
#ifdef CONFIG_THREAD_INFO_IN_TASK
void put_task_stack(struct task_struct *tsk)
{
- if (atomic_dec_and_test(&tsk->stack_refcount))
+ if (refcount_dec_and_test(&tsk->stack_refcount))
release_task_stack(tsk);
}
#endif
@@ -348,7 +348,7 @@ void free_task(struct task_struct *tsk)
* If the task had a separate stack allocation, it should be gone
* by now.
*/
- WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
+ WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
#endif
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
@@ -375,14 +375,14 @@ static inline void free_signal_struct(struct signal_struct *sig)
static inline void put_signal_struct(struct signal_struct *sig)
{
- if (atomic_dec_and_test(&sig->sigcnt))
+ if (refcount_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
- WARN_ON(atomic_read(&tsk->usage));
+ WARN_ON(refcount_read(&tsk->usage));
WARN_ON(tsk == current);
cgroup_free(tsk);
@@ -501,7 +501,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->stack_vm_area = stack_vm_area;
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
- atomic_set(&tsk->stack_refcount, 1);
+ refcount_set(&tsk->stack_refcount, 1);
#endif
if (err)
@@ -530,7 +530,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
* One for us, one for whoever does the "release_task()" (usually
* parent)
*/
- atomic_set(&tsk->usage, 2);
+ refcount_set(&tsk->usage, 2);
#ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
#endif
@@ -753,8 +753,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->mmap = NULL;
mm->mm_rb = RB_ROOT;
mm->vmacache_seqnum = 0;
- atomic_set(&mm->mm_users, 1);
- atomic_set(&mm->mm_count, 1);
+ refcount_set(&mm->mm_users, 1);
+ refcount_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
mm->core_state = NULL;
@@ -856,7 +856,7 @@ EXPORT_SYMBOL_GPL(__mmdrop);
static inline void __mmput(struct mm_struct *mm)
{
- VM_BUG_ON(atomic_read(&mm->mm_users));
+ VM_BUG_ON(refcount_read(&mm->mm_users));
uprobe_clear_state(mm);
exit_aio(mm);
@@ -883,7 +883,7 @@ void mmput(struct mm_struct *mm)
{
might_sleep();
- if (atomic_dec_and_test(&mm->mm_users))
+ if (refcount_dec_and_test(&mm->mm_users))
__mmput(mm);
}
EXPORT_SYMBOL_GPL(mmput);
@@ -897,7 +897,7 @@ static void mmput_async_fn(struct work_struct *work)
void mmput_async(struct mm_struct *mm)
{
- if (atomic_dec_and_test(&mm->mm_users)) {
+ if (refcount_dec_and_test(&mm->mm_users)) {
INIT_WORK(&mm->async_put_work, mmput_async_fn);
schedule_work(&mm->async_put_work);
}
@@ -994,7 +994,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
if (task->flags & PF_KTHREAD)
mm = NULL;
else
- atomic_inc(&mm->mm_users);
+ refcount_inc(&mm->mm_users);
}
task_unlock(task);
return mm;
@@ -1096,7 +1096,7 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
*/
if (tsk->clear_child_tid) {
if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
- atomic_read(&mm->mm_users) > 1) {
+ refcount_read(&mm->mm_users) > 1) {
/*
* We don't check the error code - if userspace has
* not set up a proper pointer then tough luck.
@@ -1182,7 +1182,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
vmacache_flush(tsk);
if (clone_flags & CLONE_VM) {
- atomic_inc(&oldmm->mm_users);
+ refcount_inc(&oldmm->mm_users);
mm = oldmm;
goto good_mm;
}
@@ -1279,7 +1279,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
struct sighand_struct *sig;
if (clone_flags & CLONE_SIGHAND) {
- atomic_inc(¤t->sighand->count);
+ refcount_inc(¤t->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
@@ -1287,14 +1287,14 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
if (!sig)
return -ENOMEM;
- atomic_set(&sig->count, 1);
+ refcount_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
void __cleanup_sighand(struct sighand_struct *sighand)
{
- if (atomic_dec_and_test(&sighand->count)) {
+ if (refcount_dec_and_test(&sighand->count)) {
signalfd_cleanup(sighand);
/*
* sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
@@ -1337,7 +1337,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->nr_threads = 1;
atomic_set(&sig->live, 1);
- atomic_set(&sig->sigcnt, 1);
+ refcount_set(&sig->sigcnt, 1);
/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
@@ -1808,7 +1808,7 @@ static __latent_entropy struct task_struct *copy_process(
} else {
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
- atomic_inc(¤t->signal->sigcnt);
+ refcount_inc(¤t->signal->sigcnt);
list_add_tail_rcu(&p->thread_group,
&p->group_leader->thread_group);
list_add_tail_rcu(&p->thread_node,
@@ -2120,7 +2120,7 @@ static int check_unshare_flags(unsigned long unshare_flags)
return -EINVAL;
}
if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
- if (atomic_read(¤t->sighand->count) > 1)
+ if (refcount_read(¤t->sighand->count) > 1)
return -EINVAL;
}
if (unshare_flags & CLONE_VM) {
@@ -65,6 +65,7 @@
#include <linux/freezer.h>
#include <linux/bootmem.h>
#include <linux/fault-inject.h>
+#include <linux/refcount.h>
#include <asm/futex.h>
@@ -207,7 +208,7 @@ struct futex_pi_state {
struct rt_mutex pi_mutex;
struct task_struct *owner;
- atomic_t refcount;
+ refcount_t refcount;
union futex_key key;
};
@@ -338,7 +339,7 @@ static inline bool should_fail_futex(bool fshared)
static inline void futex_get_mm(union futex_key *key)
{
- atomic_inc(&key->private.mm->mm_count);
+ refcount_inc(&key->private.mm->mm_count);
/*
* Ensure futex_get_mm() implies a full barrier such that
* get_futex_key() implies a full barrier. This is relied upon
@@ -792,7 +793,7 @@ static int refill_pi_state_cache(void)
INIT_LIST_HEAD(&pi_state->list);
/* pi_mutex gets initialized later */
pi_state->owner = NULL;
- atomic_set(&pi_state->refcount, 1);
+ refcount_set(&pi_state->refcount, 1);
pi_state->key = FUTEX_KEY_INIT;
current->pi_state_cache = pi_state;
@@ -821,7 +822,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
if (!pi_state)
return;
- if (!atomic_dec_and_test(&pi_state->refcount))
+ if (!refcount_dec_and_test(&pi_state->refcount))
return;
/*
@@ -845,7 +846,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
* refcount is at 0 - put it back to 1.
*/
pi_state->owner = NULL;
- atomic_set(&pi_state->refcount, 1);
+ refcount_set(&pi_state->refcount, 1);
current->pi_state_cache = pi_state;
}
}
@@ -989,7 +990,7 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
if (unlikely(!pi_state))
return -EINVAL;
- WARN_ON(!atomic_read(&pi_state->refcount));
+ WARN_ON(!refcount_read(&pi_state->refcount));
/*
* Handle the owner died case:
@@ -1040,7 +1041,7 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
if (pid != task_pid_vnr(pi_state->owner))
return -EINVAL;
out_state:
- atomic_inc(&pi_state->refcount);
+ refcount_inc(&pi_state->refcount);
*ps = pi_state;
return 0;
}
@@ -1907,7 +1908,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
* refcount on the pi_state and store the pointer in
* the futex_q object of the waiter.
*/
- atomic_inc(&pi_state->refcount);
+ refcount_inc(&pi_state->refcount);
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter,
@@ -22,7 +22,7 @@ struct group_info *groups_alloc(int gidsetsize)
if (!gi)
return NULL;
- atomic_set(&gi->usage, 1);
+ refcount_set(&gi->usage, 1);
gi->ngroups = gidsetsize;
return gi;
}
@@ -19,6 +19,7 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/kcov.h>
+#include <linux/refcount.h>
#include <asm/setup.h>
/*
@@ -35,7 +36,7 @@ struct kcov {
* - opened file descriptor
* - task with enabled coverage (we can't unwire it from another task)
*/
- atomic_t refcount;
+ refcount_t refcount;
/* The lock protects mode, size, area and t. */
spinlock_t lock;
enum kcov_mode mode;
@@ -101,12 +102,12 @@ EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
static void kcov_get(struct kcov *kcov)
{
- atomic_inc(&kcov->refcount);
+ refcount_inc(&kcov->refcount);
}
static void kcov_put(struct kcov *kcov)
{
- if (atomic_dec_and_test(&kcov->refcount)) {
+ if (refcount_dec_and_test(&kcov->refcount)) {
vfree(kcov->area);
kfree(kcov);
}
@@ -182,7 +183,7 @@ static int kcov_open(struct inode *inode, struct file *filep)
kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
if (!kcov)
return -ENOMEM;
- atomic_set(&kcov->refcount, 1);
+ refcount_set(&kcov->refcount, 1);
spin_lock_init(&kcov->lock);
filep->private_data = kcov;
return nonseekable_open(inode, filep);
@@ -30,7 +30,7 @@
static struct kmem_cache *nsproxy_cachep;
struct nsproxy init_nsproxy = {
- .count = ATOMIC_INIT(1),
+ .count = REFCOUNT_INIT(1),
.uts_ns = &init_uts_ns,
#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
.ipc_ns = &init_ipc_ns,
@@ -51,7 +51,7 @@ static inline struct nsproxy *create_nsproxy(void)
nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL);
if (nsproxy)
- atomic_set(&nsproxy->count, 1);
+ refcount_set(&nsproxy->count, 1);
return nsproxy;
}
@@ -224,7 +224,7 @@ void switch_task_namespaces(struct task_struct *p, struct nsproxy *new)
p->nsproxy = new;
task_unlock(p);
- if (ns && atomic_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->count))
free_nsproxy(ns);
}
@@ -2231,7 +2231,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
#endif
#ifdef CONFIG_NUMA_BALANCING
- if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
+ if (p->mm && refcount_read(&p->mm->mm_users) == 1) {
p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
p->mm->numa_scan_seq = 0;
}
@@ -2878,7 +2878,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
if (!mm) {
next->active_mm = oldmm;
- atomic_inc(&oldmm->mm_count);
+ refcount_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next);
} else
switch_mm_irqs_off(oldmm, mm, next);
@@ -6177,6 +6177,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
cpumask_or(covered, covered, sg_span);
sg->sgc = *per_cpu_ptr(sdd->sgc, i);
+
if (atomic_inc_return(&sg->sgc->ref) == 1)
build_group_mask(sd, sg);
@@ -7686,7 +7687,7 @@ void __init sched_init(void)
/*
* The boot idle thread does lazy MMU switching as well:
*/
- atomic_inc(&init_mm.mm_count);
+ refcount_inc(&init_mm.mm_count);
enter_lazy_tlb(&init_mm, current);
/*
@@ -1133,7 +1133,7 @@ static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
}
struct numa_group {
- atomic_t refcount;
+ refcount_t refcount;
spinlock_t lock; /* nr_tasks, tasks */
int nr_tasks;
@@ -2181,12 +2181,12 @@ static void task_numa_placement(struct task_struct *p)
static inline int get_numa_group(struct numa_group *grp)
{
- return atomic_inc_not_zero(&grp->refcount);
+ return refcount_inc_not_zero(&grp->refcount);
}
static inline void put_numa_group(struct numa_group *grp)
{
- if (atomic_dec_and_test(&grp->refcount))
+ if (refcount_dec_and_test(&grp->refcount))
kfree_rcu(grp, rcu);
}
@@ -2207,7 +2207,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
if (!grp)
return;
- atomic_set(&grp->refcount, 1);
+ refcount_set(&grp->refcount, 1);
grp->active_nodes = 1;
grp->max_faults_cpu = 0;
spin_lock_init(&grp->lock);
@@ -89,7 +89,7 @@ static DEFINE_SPINLOCK(uidhash_lock);
/* root_user.__count is 1, for init task cred */
struct user_struct root_user = {
- .__count = ATOMIC_INIT(1),
+ .__count = REFCOUNT_INIT(1),
.processes = ATOMIC_INIT(1),
.sigpending = ATOMIC_INIT(0),
.locked_shm = 0,
@@ -115,7 +115,7 @@ static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
hlist_for_each_entry(user, hashent, uidhash_node) {
if (uid_eq(user->uid, uid)) {
- atomic_inc(&user->__count);
+ refcount_inc(&user->__count);
return user;
}
}
@@ -162,7 +162,7 @@ void free_uid(struct user_struct *up)
return;
local_irq_save(flags);
- if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
+ if (refcount_dec_and_lock(&up->__count, &uidhash_lock))
free_user(up, flags);
else
local_irq_restore(flags);
@@ -183,7 +183,7 @@ struct user_struct *alloc_uid(kuid_t uid)
goto out_unlock;
new->uid = uid;
- atomic_set(&new->__count, 1);
+ refcount_set(&new->__count, 1);
/*
* Before adding this, check whether we raced
@@ -25,7 +25,7 @@ bool current_is_single_threaded(void)
if (atomic_read(&task->signal->live) != 1)
return false;
- if (atomic_read(&mm->mm_users) == 1)
+ if (refcount_read(&mm->mm_users) == 1)
return true;
ret = false;
@@ -416,8 +416,10 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
node = &parent->rb_left;
else if (congested->blkcg_id > blkcg_id)
node = &parent->rb_right;
- else
- goto found;
+ else {
+ refcount_inc(&congested->refcnt);
+ goto found;
+ }
}
if (new_congested) {
@@ -436,13 +438,12 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
if (!new_congested)
return NULL;
- atomic_set(&new_congested->refcnt, 0);
+ refcount_set(&new_congested->refcnt, 1);
new_congested->bdi = bdi;
new_congested->blkcg_id = blkcg_id;
goto retry;
found:
- atomic_inc(&congested->refcnt);
spin_unlock_irqrestore(&cgwb_lock, flags);
kfree(new_congested);
return congested;
@@ -459,7 +460,7 @@ void wb_congested_put(struct bdi_writeback_congested *congested)
unsigned long flags;
local_irq_save(flags);
- if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
+ if (!refcount_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
local_irq_restore(flags);
return;
}
@@ -134,8 +134,8 @@ void dump_mm(const struct mm_struct *mm)
mm->get_unmapped_area,
#endif
mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
- mm->pgd, atomic_read(&mm->mm_users),
- atomic_read(&mm->mm_count),
+ mm->pgd, refcount_read(&mm->mm_users),
+ refcount_read(&mm->mm_count),
atomic_long_read((atomic_long_t *)&mm->nr_ptes),
mm_nr_pmds((struct mm_struct *)mm),
mm->map_count,
@@ -30,6 +30,7 @@
#include <linux/userfaultfd_k.h>
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
+#include <linux/refcount.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -56,14 +57,14 @@ unsigned long transparent_hugepage_flags __read_mostly =
static struct shrinker deferred_split_shrinker;
-static atomic_t huge_zero_refcount;
+static refcount_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
static struct page *get_huge_zero_page(void)
{
struct page *zero_page;
retry:
- if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
+ if (likely(refcount_inc_not_zero(&huge_zero_refcount)))
return READ_ONCE(huge_zero_page);
zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
@@ -81,7 +82,7 @@ static struct page *get_huge_zero_page(void)
}
/* We take additional reference here. It will be put back by shrinker */
- atomic_set(&huge_zero_refcount, 2);
+ refcount_set(&huge_zero_refcount, 2);
preempt_enable();
return READ_ONCE(huge_zero_page);
}
@@ -92,7 +93,7 @@ static void put_huge_zero_page(void)
* Counter should never go to zero here. Only shrinker can put
* last reference.
*/
- BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
+ BUG_ON(refcount_dec_and_test(&huge_zero_refcount));
}
struct page *mm_get_huge_zero_page(struct mm_struct *mm)
@@ -119,13 +120,16 @@ static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
struct shrink_control *sc)
{
/* we can free zero page only if last reference remains */
- return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
+ return refcount_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
}
static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
- if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
+ /* the below is probably not fully safe */
+ /* do we need to take a lock? */
+ if (refcount_read(&huge_zero_refcount) == 1) {
+ refcount_set(&huge_zero_refcount, 0);
struct page *zero_page = xchg(&huge_zero_page, NULL);
BUG_ON(zero_page == NULL);
__free_pages(zero_page, compound_order(zero_page));
@@ -17,8 +17,8 @@
struct mm_struct init_mm = {
.mm_rb = RB_ROOT,
.pgd = swapper_pg_dir,
- .mm_users = ATOMIC_INIT(2),
- .mm_count = ATOMIC_INIT(1),
+ .mm_users = REFCOUNT_INIT(2),
+ .mm_count = REFCOUNT_INIT(1),
.mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
@@ -391,7 +391,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
static inline int khugepaged_test_exit(struct mm_struct *mm)
{
- return atomic_read(&mm->mm_users) == 0;
+ return refcount_read(&mm->mm_users) == 0;
}
int __khugepaged_enter(struct mm_struct *mm)
@@ -420,7 +420,7 @@ int __khugepaged_enter(struct mm_struct *mm)
list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
spin_unlock(&khugepaged_mm_lock);
- atomic_inc(&mm->mm_count);
+ refcount_inc(&mm->mm_count);
if (wakeup)
wake_up_interruptible(&khugepaged_wait);
@@ -105,7 +105,7 @@
#include <asm/sections.h>
#include <asm/processor.h>
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/kasan.h>
#include <linux/kmemcheck.h>
@@ -154,7 +154,7 @@ struct kmemleak_object {
struct rb_node rb_node;
struct rcu_head rcu; /* object_list lockless traversal */
/* object usage count; object freed when use_count == 0 */
- atomic_t use_count;
+ refcount_t use_count;
unsigned long pointer;
size_t size;
/* minimum number of a pointers found before it is considered leak */
@@ -434,7 +434,7 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
*/
static int get_object(struct kmemleak_object *object)
{
- return atomic_inc_not_zero(&object->use_count);
+ return refcount_inc_not_zero(&object->use_count);
}
/*
@@ -467,7 +467,7 @@ static void free_object_rcu(struct rcu_head *rcu)
*/
static void put_object(struct kmemleak_object *object)
{
- if (!atomic_dec_and_test(&object->use_count))
+ if (!refcount_dec_and_test(&object->use_count))
return;
/* should only get here after delete_object was called */
@@ -556,7 +556,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
INIT_LIST_HEAD(&object->gray_list);
INIT_HLIST_HEAD(&object->area_list);
spin_lock_init(&object->lock);
- atomic_set(&object->use_count, 1);
+ refcount_set(&object->use_count, 1);
object->flags = OBJECT_ALLOCATED;
object->pointer = ptr;
object->size = size;
@@ -629,7 +629,7 @@ static void __delete_object(struct kmemleak_object *object)
unsigned long flags;
WARN_ON(!(object->flags & OBJECT_ALLOCATED));
- WARN_ON(atomic_read(&object->use_count) < 1);
+ WARN_ON(refcount_read(&object->use_count) < 1);
/*
* Locking here also ensures that the corresponding memory block
@@ -1396,9 +1396,9 @@ static void kmemleak_scan(void)
* With a few exceptions there should be a maximum of
* 1 reference to any object at this point.
*/
- if (atomic_read(&object->use_count) > 1) {
+ if (refcount_read(&object->use_count) > 1) {
pr_debug("object->use_count = %d\n",
- atomic_read(&object->use_count));
+ refcount_read(&object->use_count));
dump_object_info(object);
}
#endif
@@ -352,7 +352,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
*/
static inline bool ksm_test_exit(struct mm_struct *mm)
{
- return atomic_read(&mm->mm_users) == 0;
+ return refcount_read(&mm->mm_users) == 0;
}
/*
@@ -1813,7 +1813,7 @@ int __ksm_enter(struct mm_struct *mm)
spin_unlock(&ksm_mmlist_lock);
set_bit(MMF_VM_MERGEABLE, &mm->flags);
- atomic_inc(&mm->mm_count);
+ refcount_inc(&mm->mm_count);
if (needs_wakeup)
wake_up_interruptible(&ksm_thread_wait);
@@ -375,7 +375,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
* When there's less then two users of this mm there cannot be a
* concurrent page-table walk.
*/
- if (atomic_read(&tlb->mm->mm_users) < 2) {
+ if (refcount_read(&tlb->mm->mm_users) < 2) {
__tlb_remove_table(table);
return;
}
@@ -25,7 +25,7 @@ void use_mm(struct mm_struct *mm)
task_lock(tsk);
active_mm = tsk->active_mm;
if (active_mm != mm) {
- atomic_inc(&mm->mm_count);
+ refcount_inc(&mm->mm_count);
tsk->active_mm = mm;
}
tsk->mm = mm;
@@ -249,7 +249,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
struct mmu_notifier_mm *mmu_notifier_mm;
int ret;
- BUG_ON(atomic_read(&mm->mm_users) <= 0);
+ BUG_ON(refcount_read(&mm->mm_users) <= 0);
/*
* Verify that mmu_notifier_init() already run and the global srcu is
@@ -275,7 +275,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
mm->mmu_notifier_mm = mmu_notifier_mm;
mmu_notifier_mm = NULL;
}
- atomic_inc(&mm->mm_count);
+ refcount_inc(&mm->mm_count);
/*
* Serialize the update against mmu_notifier_unregister. A
@@ -295,7 +295,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
up_write(&mm->mmap_sem);
kfree(mmu_notifier_mm);
out:
- BUG_ON(atomic_read(&mm->mm_users) <= 0);
+ BUG_ON(refcount_read(&mm->mm_users) <= 0);
return ret;
}
@@ -348,7 +348,7 @@ void __mmu_notifier_mm_destroy(struct mm_struct *mm)
*/
void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
{
- BUG_ON(atomic_read(&mm->mm_count) <= 0);
+ BUG_ON(refcount_read(&mm->mm_count) <= 0);
if (!hlist_unhashed(&mn->hlist)) {
/*
@@ -381,7 +381,7 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
*/
synchronize_srcu(&srcu);
- BUG_ON(atomic_read(&mm->mm_count) <= 0);
+ BUG_ON(refcount_read(&mm->mm_count) <= 0);
mmdrop(mm);
}
@@ -401,7 +401,7 @@ void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
hlist_del_init_rcu(&mn->hlist);
spin_unlock(&mm->mmu_notifier_mm->lock);
- BUG_ON(atomic_read(&mm->mm_count) <= 0);
+ BUG_ON(refcount_read(&mm->mm_count) <= 0);
mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
@@ -77,7 +77,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
/* Get target node for single threaded private VMAs */
if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
- atomic_read(&vma->vm_mm->mm_users) == 1)
+ refcount_read(&vma->vm_mm->mm_users) == 1)
target_node = numa_node_id();
arch_enter_lazy_mmu_mode();
@@ -660,7 +660,7 @@ static void mark_oom_victim(struct task_struct *tsk)
/* oom_mm is bound to the signal struct life time. */
if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
- atomic_inc(&tsk->signal->oom_mm->mm_count);
+ refcount_inc(&tsk->signal->oom_mm->mm_count);
/*
* Make sure that the task is woken up from uninterruptible sleep
@@ -781,7 +781,7 @@ static bool task_will_free_mem(struct task_struct *task)
if (test_bit(MMF_OOM_SKIP, &mm->flags))
return false;
- if (atomic_read(&mm->mm_users) <= 1)
+ if (refcount_read(&mm->mm_users) <= 1)
return true;
/*
@@ -877,7 +877,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
/* Get a reference to safely compare mm after task_unlock(victim) */
mm = victim->mm;
- atomic_inc(&mm->mm_count);
+ refcount_inc(&mm->mm_count);
/*
* We should send SIGKILL before setting TIF_MEMDIE in order to prevent
* the OOM victim from depleting the memory reserves from the user
@@ -77,7 +77,7 @@ static inline struct anon_vma *anon_vma_alloc(void)
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) {
- atomic_set(&anon_vma->refcount, 1);
+ refcount_set(&anon_vma->refcount, 1);
anon_vma->degree = 1; /* Reference for first vma */
anon_vma->parent = anon_vma;
/*
@@ -92,7 +92,7 @@ static inline struct anon_vma *anon_vma_alloc(void)
static inline void anon_vma_free(struct anon_vma *anon_vma)
{
- VM_BUG_ON(atomic_read(&anon_vma->refcount));
+ VM_BUG_ON(refcount_read(&anon_vma->refcount));
/*
* Synchronize against page_lock_anon_vma_read() such that
@@ -421,7 +421,7 @@ static void anon_vma_ctor(void *data)
struct anon_vma *anon_vma = data;
init_rwsem(&anon_vma->rwsem);
- atomic_set(&anon_vma->refcount, 0);
+ refcount_set(&anon_vma->refcount, 0);
anon_vma->rb_root = RB_ROOT;
}
@@ -470,7 +470,7 @@ struct anon_vma *page_get_anon_vma(struct page *page)
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
- if (!atomic_inc_not_zero(&anon_vma->refcount)) {
+ if (!refcount_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
goto out;
}
@@ -529,7 +529,7 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
}
/* trylock failed, we got to sleep */
- if (!atomic_inc_not_zero(&anon_vma->refcount)) {
+ if (!refcount_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
goto out;
}
@@ -544,7 +544,7 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
rcu_read_unlock();
anon_vma_lock_read(anon_vma);
- if (atomic_dec_and_test(&anon_vma->refcount)) {
+ if (refcount_dec_and_test(&anon_vma->refcount)) {
/*
* Oops, we held the last refcount, release the lock
* and bail -- can't simply use put_anon_vma() because
@@ -1711,7 +1711,7 @@ void __put_anon_vma(struct anon_vma *anon_vma)
struct anon_vma *root = anon_vma->root;
anon_vma_free(anon_vma);
- if (root != anon_vma && atomic_dec_and_test(&root->refcount))
+ if (root != anon_vma && refcount_dec_and_test(&root->refcount))
anon_vma_free(root);
}
@@ -1401,7 +1401,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
* that.
*/
start_mm = &init_mm;
- atomic_inc(&init_mm.mm_users);
+ refcount_inc(&init_mm.mm_users);
/*
* Keep on scanning until all entries have gone. Usually,
@@ -1447,10 +1447,10 @@ int try_to_unuse(unsigned int type, bool frontswap,
/*
* Don't hold on to start_mm if it looks like exiting.
*/
- if (atomic_read(&start_mm->mm_users) == 1) {
+ if (refcount_read(&start_mm->mm_users) == 1) {
mmput(start_mm);
start_mm = &init_mm;
- atomic_inc(&init_mm.mm_users);
+ refcount_inc(&init_mm.mm_users);
}
/*
@@ -1487,13 +1487,13 @@ int try_to_unuse(unsigned int type, bool frontswap,
struct mm_struct *prev_mm = start_mm;
struct mm_struct *mm;
- atomic_inc(&new_start_mm->mm_users);
- atomic_inc(&prev_mm->mm_users);
+ refcount_inc(&new_start_mm->mm_users);
+ refcount_inc(&prev_mm->mm_users);
spin_lock(&mmlist_lock);
while (swap_count(*swap_map) && !retval &&
(p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
- if (!atomic_inc_not_zero(&mm->mm_users))
+ if (!refcount_inc_not_zero(&mm->mm_users))
continue;
spin_unlock(&mmlist_lock);
mmput(prev_mm);
@@ -1511,7 +1511,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
if (set_start_mm && *swap_map < swcount) {
mmput(new_start_mm);
- atomic_inc(&mm->mm_users);
+ refcount_inc(&mm->mm_users);
new_start_mm = mm;
set_start_mm = 0;
}
@@ -26,7 +26,7 @@ void vmacache_flush_all(struct mm_struct *mm)
* to worry about other threads' seqnum. Current's
* flush will occur upon the next lookup.
*/
- if (atomic_read(&mm->mm_users) == 1)
+ if (refcount_read(&mm->mm_users) == 1)
return;
rcu_read_lock();
@@ -56,11 +56,11 @@ EXPORT_SYMBOL(zpool_register_driver);
*/
int zpool_unregister_driver(struct zpool_driver *driver)
{
- int ret = 0, refcount;
+ int ret = 0;
+ unsigned int refcount;
spin_lock(&drivers_lock);
refcount = atomic_read(&driver->refcount);
- WARN_ON(refcount < 0);
if (refcount > 0)
ret = -EBUSY;
else
@@ -137,7 +137,7 @@ struct rpc_cred null_cred = {
.cr_lru = LIST_HEAD_INIT(null_cred.cr_lru),
.cr_auth = &null_auth,
.cr_ops = &null_credops,
- .cr_count = ATOMIC_INIT(1),
+ .cr_count = REFCOUNT_INIT(1),
.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE,
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
.cr_magic = RPCAUTH_CRED_MAGIC,
@@ -204,7 +204,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
work->addr = hva;
work->arch = *arch;
work->mm = current->mm;
- atomic_inc(&work->mm->mm_users);
+ refcount_inc(&work->mm->mm_users);
kvm_get_kvm(work->vcpu->kvm);
/* this can't really happen otherwise gfn_to_pfn_async
@@ -616,13 +616,13 @@ static struct kvm *kvm_create_vm(unsigned long type)
return ERR_PTR(-ENOMEM);
spin_lock_init(&kvm->mmu_lock);
- atomic_inc(¤t->mm->mm_count);
+ refcount_inc(¤t->mm->mm_count);
kvm->mm = current->mm;
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
- atomic_set(&kvm->users_count, 1);
+ refcount_set(&kvm->users_count, 1);
INIT_LIST_HEAD(&kvm->devices);
r = kvm_arch_init_vm(kvm, type);
@@ -745,13 +745,13 @@ static void kvm_destroy_vm(struct kvm *kvm)
void kvm_get_kvm(struct kvm *kvm)
{
- atomic_inc(&kvm->users_count);
+ refcount_inc(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);
void kvm_put_kvm(struct kvm *kvm)
{
- if (atomic_dec_and_test(&kvm->users_count))
+ if (refcount_dec_and_test(&kvm->users_count))
kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);
@@ -3640,7 +3640,7 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
* To avoid the race between open and the removal of the debugfs
* directory we test against the users count.
*/
- if (!atomic_add_unless(&stat_data->kvm->users_count, 1, 0))
+ if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
return -ENOENT;
if (simple_attr_open(inode, file, get, set, fmt)) {