@@ -40,7 +40,7 @@ struct kmem_cache {
/* 4) cache creation/removal */
const char *name;
struct list_head list;
- int refcount;
+ atomic_t refcount;
int object_size;
int align;
@@ -74,7 +74,7 @@ struct kmem_cache {
struct kmem_cache_order_objects max;
struct kmem_cache_order_objects min;
gfp_t allocflags; /* gfp flags to use on each alloc */
- int refcount; /* Refcount for slab cache destroy */
+ atomic_t refcount; /* Refcount for slab cache destroy */
void (*ctor)(void *);
int inuse; /* Offset to metadata */
int align; /* Alignment */
@@ -1919,7 +1919,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
cachep = find_mergeable(size, align, flags, name, ctor);
if (cachep) {
- cachep->refcount++;
+ atomic_inc(&cachep->refcount);
/*
* Adjust the object sizes so that we clear
@@ -22,7 +22,7 @@ struct kmem_cache {
unsigned int align; /* Alignment as calculated */
unsigned long flags; /* Active flags on the slab */
const char *name; /* Slab name for sysfs */
- int refcount; /* Use counter */
+ atomic_t refcount; /* Use counter */
void (*ctor)(void *); /* Called on object slot creation */
struct list_head list; /* List of all slab caches on the system */
};
@@ -244,7 +244,7 @@ int slab_unmergeable(struct kmem_cache *s)
/*
* We may have set a slab to be unmergeable during bootstrap.
*/
- if (s->refcount < 0)
+ if (atomic_read(&s->refcount) < 0)
return 1;
return 0;
@@ -348,7 +348,7 @@ static struct kmem_cache *create_cache(const char *name,
if (err)
goto out_free_cache;
- s->refcount = 1;
+ atomic_set(&s->refcount, 1);
list_add(&s->list, &slab_caches);
out:
if (err)
@@ -718,8 +718,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
kasan_cache_destroy(s);
mutex_lock(&slab_mutex);
- s->refcount--;
- if (s->refcount)
+ atomic_dec(&s->refcount);
+ if (atomic_read(&s->refcount))
goto out_unlock;
err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
@@ -786,7 +786,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
name, size, err);
- s->refcount = -1; /* Exempt from merging for now */
+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
}
struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
@@ -799,7 +799,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
create_boot_cache(s, name, size, flags);
list_add(&s->list, &slab_caches);
- s->refcount = 1;
+ atomic_set(&s->refcount, 1);
return s;
}
@@ -4180,7 +4180,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
- s->refcount++;
+ atomic_inc(&s->refcount);
/*
* Adjust the object sizes so that we clear
@@ -4196,7 +4196,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
}
if (sysfs_slab_alias(s, name)) {
- s->refcount--;
+ atomic_dec(&s->refcount);
s = NULL;
}
}
@@ -4903,7 +4903,7 @@ SLAB_ATTR_RO(ctor);
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
}
SLAB_ATTR_RO(aliases);
@@ -5046,7 +5046,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
* as well as cause other issues like converting a mergeable
* cache into an umergeable one.
*/
- if (s->refcount > 1)
+ if (atomic_read(&s->refcount) > 1)
return -EINVAL;
s->flags &= ~SLAB_TRACE;
@@ -5164,7 +5164,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
size_t length)
{
- if (s->refcount > 1)
+ if (atomic_read(&s->refcount) > 1)
return -EINVAL;
s->flags &= ~SLAB_FAILSLAB;