@@ -393,7 +393,7 @@ static struct kmem_cache *create_cache(const char *name,
goto out_free_cache;
s->refcount = 1;
- list_add(&s->list, &slab_caches);
+ list_add_tail(&s->list, &slab_caches);
memcg_link_cache(s);
out:
if (err)
@@ -4342,6 +4342,8 @@ void kmem_cache_setup_mobility(struct kmem_cache *s,
return;
}
+ mutex_lock(&slab_mutex);
+
s->isolate = isolate;
s->migrate = migrate;
@@ -4350,6 +4352,10 @@ void kmem_cache_setup_mobility(struct kmem_cache *s,
* to disable fast cmpxchg based processing.
*/
s->flags &= ~__CMPXCHG_DOUBLE;
+
+ list_move(&s->list, &slab_caches); /* Move to top */
+
+ mutex_unlock(&slab_mutex);
}
EXPORT_SYMBOL(kmem_cache_setup_mobility);
It is advantageous to have all defragmentable slabs together at the beginning of the list of slabs so that there is no need to scan the complete list. Put defragmentable caches first when adding a slab cache and others last. Co-developed-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tobin C. Harding <tobin@kernel.org> --- mm/slab_common.c | 2 +- mm/slub.c | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-)