@@ -168,7 +168,7 @@ static struct dentry *zs_stat_root;
static size_t huge_class_size;
struct size_class {
- spinlock_t lock;
+ struct mutex lock;
struct list_head fullness_list[NR_FULLNESS_GROUPS];
/*
* Size of objects stored in this class. Must be multiple
@@ -252,6 +252,16 @@ static bool zspool_lock_is_contended(struct zs_pool *pool)
return rwsem_is_contended(&pool->migrate_lock);
}
+static void size_class_lock(struct size_class *class)
+{
+ mutex_lock(&class->lock);
+}
+
+static void size_class_unlock(struct size_class *class)
+{
+ mutex_unlock(&class->lock);
+}
+
static inline void zpdesc_set_first(struct zpdesc *zpdesc)
{
SetPagePrivate(zpdesc_page(zpdesc));
@@ -657,8 +667,7 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
if (class->index != i)
continue;
- spin_lock(&class->lock);
-
+ size_class_lock(class);
seq_printf(s, " %5u %5u ", i, class->size);
for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) {
inuse_totals[fg] += class_stat_read(class, fg);
@@ -668,7 +677,7 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED);
obj_used = class_stat_read(class, ZS_OBJS_INUSE);
freeable = zs_can_compact(class);
- spin_unlock(&class->lock);
+ size_class_unlock(class);
objs_per_zspage = class->objs_per_zspage;
pages_used = obj_allocated / objs_per_zspage *
@@ -926,8 +935,6 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
{
struct zpdesc *zpdesc, *next;
- assert_spin_locked(&class->lock);
-
VM_BUG_ON(get_zspage_inuse(zspage));
VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);
@@ -1443,7 +1450,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
class = pool->size_class[get_size_class_index(size)];
/* class->lock effectively protects the zpage migration */
- spin_lock(&class->lock);
+ size_class_lock(class);
zspage = find_get_zspage(class);
if (likely(zspage)) {
obj_malloc(pool, zspage, handle);
@@ -1453,8 +1460,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
goto out;
}
-
- spin_unlock(&class->lock);
+ size_class_unlock(class);
zspage = alloc_zspage(pool, class, gfp);
if (!zspage) {
@@ -1462,7 +1468,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
return (unsigned long)ERR_PTR(-ENOMEM);
}
- spin_lock(&class->lock);
+ size_class_lock(class);
obj_malloc(pool, zspage, handle);
newfg = get_fullness_group(class, zspage);
insert_zspage(class, zspage, newfg);
@@ -1473,7 +1479,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
/* We completely set up zspage so mark them as movable */
SetZsPageMovable(pool, zspage);
out:
- spin_unlock(&class->lock);
+ size_class_unlock(class);
return handle;
}
@@ -1527,7 +1533,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
obj_to_zpdesc(obj, &f_zpdesc);
zspage = get_zspage(f_zpdesc);
class = zspage_class(pool, zspage);
- spin_lock(&class->lock);
+ size_class_lock(class);
pool_read_unlock(pool);
class_stat_sub(class, ZS_OBJS_INUSE, 1);
@@ -1537,7 +1543,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
if (fullness == ZS_INUSE_RATIO_0)
free_zspage(pool, class, zspage);
- spin_unlock(&class->lock);
+ size_class_unlock(class);
cache_free_handle(pool, handle);
}
EXPORT_SYMBOL_GPL(zs_free);
@@ -1846,7 +1852,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
/*
* the class lock protects zpage alloc/free in the zspage.
*/
- spin_lock(&class->lock);
+ size_class_lock(class);
/* the zspage_write_lock protects zpage access via zs_map_object */
zspage_write_lock(zspage);
@@ -1878,7 +1884,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
* it's okay to release migration_lock.
*/
pool_write_unlock(pool);
- spin_unlock(&class->lock);
+ size_class_unlock(class);
zspage_write_unlock(zspage);
zpdesc_get(newzpdesc);
@@ -1922,10 +1928,10 @@ static void async_free_zspage(struct work_struct *work)
if (class->index != i)
continue;
- spin_lock(&class->lock);
+ size_class_lock(class);
list_splice_init(&class->fullness_list[ZS_INUSE_RATIO_0],
&free_pages);
- spin_unlock(&class->lock);
+ size_class_unlock(class);
}
list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
@@ -1933,10 +1939,10 @@ static void async_free_zspage(struct work_struct *work)
lock_zspage(zspage);
class = zspage_class(pool, zspage);
- spin_lock(&class->lock);
+ size_class_lock(class);
class_stat_sub(class, ZS_INUSE_RATIO_0, 1);
__free_zspage(pool, class, zspage);
- spin_unlock(&class->lock);
+ size_class_unlock(class);
}
};
@@ -2001,7 +2007,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
* as well as zpage allocation/free
*/
pool_write_lock(pool);
- spin_lock(&class->lock);
+ size_class_lock(class);
while (zs_can_compact(class)) {
int fg;
@@ -2031,11 +2037,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
putback_zspage(class, dst_zspage);
dst_zspage = NULL;
- spin_unlock(&class->lock);
+ size_class_unlock(class);
pool_write_unlock(pool);
cond_resched();
pool_write_lock(pool);
- spin_lock(&class->lock);
+ size_class_lock(class);
}
}
@@ -2045,7 +2051,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
if (dst_zspage)
putback_zspage(class, dst_zspage);
- spin_unlock(&class->lock);
+ size_class_unlock(class);
pool_write_unlock(pool);
return pages_freed;
@@ -2255,7 +2261,7 @@ struct zs_pool *zs_create_pool(const char *name)
class->index = i;
class->pages_per_zspage = pages_per_zspage;
class->objs_per_zspage = objs_per_zspage;
- spin_lock_init(&class->lock);
+ mutex_init(&class->lock);
pool->size_class[i] = class;
fullness = ZS_INUSE_RATIO_0;
Switch over from spin-lock to mutex, also introduce simple helpers to lock/unlock size class. This is needed to make zsmalloc preemptible in the future. Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> --- mm/zsmalloc.c | 54 ++++++++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 24 deletions(-)