@@ -492,14 +492,10 @@ EXPORT_SYMBOL(kmem_buckets_create);
* once or there will be a use-after-free problem. The actual deletion
* and release of the kobject does not need slab_mutex or cpu_hotplug_lock
* protection. So they are now done without holding those locks.
- *
- * Note that there will be a slight delay in the deletion of sysfs files
- * if kmem_cache_release() is called indrectly from a work function.
*/
static void kmem_cache_release(struct kmem_cache *s)
{
if (slab_state >= FULL) {
- sysfs_slab_unlink(s);
sysfs_slab_release(s);
} else {
slab_kmem_cache_release(s);
@@ -536,33 +532,11 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
rcu_barrier();
list_for_each_entry_safe(s, s2, &to_destroy, list) {
- debugfs_slab_release(s);
kfence_shutdown_cache(s);
kmem_cache_release(s);
}
}
-static int shutdown_cache(struct kmem_cache *s)
-{
- /* free asan quarantined objects */
- kasan_cache_shutdown(s);
-
- if (__kmem_cache_shutdown(s) != 0)
- return -EBUSY;
-
- list_del(&s->list);
-
- if (s->flags & SLAB_TYPESAFE_BY_RCU) {
- list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
- schedule_work(&slab_caches_to_rcu_destroy_work);
- } else {
- kfence_shutdown_cache(s);
- debugfs_slab_release(s);
- }
-
- return 0;
-}
-
void slab_kmem_cache_release(struct kmem_cache *s)
{
__kmem_cache_release(s);
@@ -572,8 +546,8 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
- int err = -EBUSY;
bool rcu_set;
+ int err;
if (unlikely(!s) || !kasan_check_byte(s))
return;
@@ -581,20 +555,45 @@ void kmem_cache_destroy(struct kmem_cache *s)
cpus_read_lock();
mutex_lock(&slab_mutex);
+ s->refcount--;
+ if (s->refcount) {
+ mutex_unlock(&slab_mutex);
+ cpus_read_unlock();
+ return;
+ }
+
rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
- s->refcount--;
- if (s->refcount)
- goto out_unlock;
+ /* free asan quarantined objects */
+ kasan_cache_shutdown(s);
- err = shutdown_cache(s);
+ err = __kmem_cache_shutdown(s);
WARN(err, "%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_);
-out_unlock:
+
+ if (!err)
+ list_del(&s->list);
+
mutex_unlock(&slab_mutex);
cpus_read_unlock();
- if (!err && !rcu_set)
+
+ if (slab_state >= FULL) {
+ sysfs_slab_unlink(s);
+ }
+ debugfs_slab_release(s);
+
+ if (err)
+ return;
+
+ if (rcu_set) {
+ mutex_lock(&slab_mutex);
+ list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
+ schedule_work(&slab_caches_to_rcu_destroy_work);
+ mutex_unlock(&slab_mutex);
+ } else {
+ kfence_shutdown_cache(s);
kmem_cache_release(s);
+ }
}
EXPORT_SYMBOL(kmem_cache_destroy);
kmem_cache_destroy() includes removing the associated sysfs and debugfs directories. Currently this might not happen immediately when: - the cache is SLAB_TYPESAFE_BY_RCU and the cleanup is delayed, including the directores removal - __kmem_cache_shutdown() fails due to outstanding objects - the directories remain indefinitely When a cache is recreated with the same name, such as due to module unload followed by a load, the directories will fail to be recreated for the new instance of the cache due to the old directories being present. We also want to add another possibility of delayed cleanup due to kfree_rcu() in flight so let's fix this first and have the directories removed immediately in kmem_cache_destroy() and regardless of __kmem_cache_shutdown() success. This should not make debugging harder if __kmem_cache_shutdown() fails, because a detailed report of outstanding objects is printed into dmesg already due to the failure. Note the record in /proc/slabinfo will remain until the cleanup is finished (or indefinitely if __kmem_cache_shutdown() fails) but that does not prevent a new record to be added for a new cache instance. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> --- mm/slab_common.c | 65 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 32 insertions(+), 33 deletions(-)