diff mbox series

[v3] mm/slab_common: Deleting kobject in kmem_cache_destroy() without holding slab_mutex/cpu_hotplug_lock

Message ID 20220812183033.346425-1-longman@redhat.com (mailing list archive)
State New
Headers show
Series [v3] mm/slab_common: Deleting kobject in kmem_cache_destroy() without holding slab_mutex/cpu_hotplug_lock | expand

Commit Message

Waiman Long Aug. 12, 2022, 6:30 p.m. UTC
A circular locking problem is reported by lockdep due to the following
circular locking dependency.

  +--> cpu_hotplug_lock --> slab_mutex --> kn->active --+
  |                                                     |
  +-----------------------------------------------------+

The forward cpu_hotplug_lock ==> slab_mutex ==> kn->active dependency
happens in

  kmem_cache_destroy():	cpus_read_lock(); mutex_lock(&slab_mutex);
  ==> sysfs_slab_unlink()
      ==> kobject_del()
          ==> kernfs_remove()
	      ==> __kernfs_remove()
	          ==> kernfs_drain(): rwsem_acquire(&kn->dep_map, ...);

The backward kn->active ==> cpu_hotplug_lock dependency happens in

  kernfs_fop_write_iter(): kernfs_get_active();
  ==> slab_attr_store()
      ==> cpu_partial_store()
          ==> flush_all(): cpus_read_lock()

One way to break this circular locking chain is to avoid holding
cpu_hotplug_lock and slab_mutex while deleting the kobject in
sysfs_slab_unlink() which should be equivalent to doing a write_lock
and write_unlock pair of the kn->active virtual lock.

Since the kobject structures are not protected by slab_mutex or the
cpu_hotplug_lock, we can certainly release those locks before doing
the delete operation.

Move sysfs_slab_unlink() and sysfs_slab_release() to the newly
created kmem_cache_release() and call it outside the slab_mutex &
cpu_hotplug_lock critical sections. There will be a slight delay
in the deletion of sysfs files if kmem_cache_release() is called
indirectly from a work function.

Signed-off-by: Waiman Long <longman@redhat.com>
---

 [v3] Move sysfs_slab_unlink() out to kmem_cache_release() and move
      schedule_work() back right after list_add_tail().

 mm/slab_common.c | 45 +++++++++++++++++++++++++++++----------------
 1 file changed, 29 insertions(+), 16 deletions(-)

Comments

Hyeonggon Yoo Aug. 13, 2022, 3 p.m. UTC | #1
On Fri, Aug 12, 2022 at 02:30:33PM -0400, Waiman Long wrote:
> A circular locking problem is reported by lockdep due to the following
> circular locking dependency.
> 
>   +--> cpu_hotplug_lock --> slab_mutex --> kn->active --+
>   |                                                     |
>   +-----------------------------------------------------+
> 
> The forward cpu_hotplug_lock ==> slab_mutex ==> kn->active dependency
> happens in
> 
>   kmem_cache_destroy():	cpus_read_lock(); mutex_lock(&slab_mutex);
>   ==> sysfs_slab_unlink()
>       ==> kobject_del()
>           ==> kernfs_remove()
> 	      ==> __kernfs_remove()
> 	          ==> kernfs_drain(): rwsem_acquire(&kn->dep_map, ...);

Maybe you mean this?

        /* but everyone should wait for draining */
        wait_event(root->deactivate_waitq,
                   atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);


> The backward kn->active ==> cpu_hotplug_lock dependency happens in
> 
>   kernfs_fop_write_iter(): kernfs_get_active();
>   ==> slab_attr_store()
>       ==> cpu_partial_store()
>           ==> flush_all(): cpus_read_lock()
> 
> One way to break this circular locking chain is to avoid holding
> cpu_hotplug_lock and slab_mutex while deleting the kobject in
> sysfs_slab_unlink() which should be equivalent to doing a write_lock
> and write_unlock pair of the kn->active virtual lock.
> 
> Since the kobject structures are not protected by slab_mutex or the
> cpu_hotplug_lock, we can certainly release those locks before doing
> the delete operation.
> 
> Move sysfs_slab_unlink() and sysfs_slab_release() to the newly
> created kmem_cache_release() and call it outside the slab_mutex &
> cpu_hotplug_lock critical sections. There will be a slight delay
> in the deletion of sysfs files if kmem_cache_release() is called
> indirectly from a work function.
> 
> Signed-off-by: Waiman Long <longman@redhat.com>
> ---
> 
>  [v3] Move sysfs_slab_unlink() out to kmem_cache_release() and move
>       schedule_work() back right after list_add_tail().
> 
>  mm/slab_common.c | 45 +++++++++++++++++++++++++++++----------------
>  1 file changed, 29 insertions(+), 16 deletions(-)
> 
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 17996649cfe3..07b948288f84 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -392,6 +392,28 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
>  }
>  EXPORT_SYMBOL(kmem_cache_create);
>  
> +#ifdef SLAB_SUPPORTS_SYSFS
> +/*
> + * For a given kmem_cache, kmem_cache_destroy() should only be called
> + * once or there will be a use-after-free problem. The actual deletion
> + * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
> + * protection. So they are now done without holding those locks.
> + *
> + * Note that there will be a slight delay in the deletion of sysfs files
> + * if kmem_cache_release() is called indrectly from a work function.
> + */
> +static void kmem_cache_release(struct kmem_cache *s)
> +{
> +	sysfs_slab_unlink(s);
> +	sysfs_slab_release(s);
> +}
> +#else
> +static void kmem_cache_release(struct kmem_cache *s)
> +{
> +	slab_kmem_cache_release(s);
> +}
> +#endif
> +
>  static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
>  {
>  	LIST_HEAD(to_destroy);
> @@ -418,11 +440,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
>  	list_for_each_entry_safe(s, s2, &to_destroy, list) {
>  		debugfs_slab_release(s);
>  		kfence_shutdown_cache(s);
> -#ifdef SLAB_SUPPORTS_SYSFS
> -		sysfs_slab_release(s);
> -#else
> -		slab_kmem_cache_release(s);
> -#endif
> +		kmem_cache_release(s);
>  	}
>  }
>  
> @@ -437,20 +455,11 @@ static int shutdown_cache(struct kmem_cache *s)
>  	list_del(&s->list);
>  
>  	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
> -#ifdef SLAB_SUPPORTS_SYSFS
> -		sysfs_slab_unlink(s);
> -#endif
>  		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
>  		schedule_work(&slab_caches_to_rcu_destroy_work);
>  	} else {
>  		kfence_shutdown_cache(s);
>  		debugfs_slab_release(s);
> -#ifdef SLAB_SUPPORTS_SYSFS
> -		sysfs_slab_unlink(s);
> -		sysfs_slab_release(s);
> -#else
> -		slab_kmem_cache_release(s);
> -#endif
>  	}
>  
>  	return 0;
> @@ -465,14 +474,16 @@ void slab_kmem_cache_release(struct kmem_cache *s)
>  
>  void kmem_cache_destroy(struct kmem_cache *s)
>  {
> +	int refcnt;
> +
>  	if (unlikely(!s) || !kasan_check_byte(s))
>  		return;
>  
>  	cpus_read_lock();
>  	mutex_lock(&slab_mutex);
>  
> -	s->refcount--;
> -	if (s->refcount)
> +	refcnt = --s->refcount;
> +	if (refcnt)
>  		goto out_unlock;
>  
>  	WARN(shutdown_cache(s),
> @@ -481,6 +492,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
>  out_unlock:
>  	mutex_unlock(&slab_mutex);
>  	cpus_read_unlock();
> +	if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
> +		kmem_cache_release(s);
>  }
>  EXPORT_SYMBOL(kmem_cache_destroy);
>  
> -- 
> 2.31.1
> 

little bit complicated but looks good to me.
Thank you for fixing this.

Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Roman Gushchin Aug. 13, 2022, 6:25 p.m. UTC | #2
On Fri, Aug 12, 2022 at 02:30:33PM -0400, Waiman Long wrote:
> A circular locking problem is reported by lockdep due to the following
> circular locking dependency.
> 
>   +--> cpu_hotplug_lock --> slab_mutex --> kn->active --+
>   |                                                     |
>   +-----------------------------------------------------+
> 
> The forward cpu_hotplug_lock ==> slab_mutex ==> kn->active dependency
> happens in
> 
>   kmem_cache_destroy():	cpus_read_lock(); mutex_lock(&slab_mutex);
>   ==> sysfs_slab_unlink()
>       ==> kobject_del()
>           ==> kernfs_remove()
> 	      ==> __kernfs_remove()
> 	          ==> kernfs_drain(): rwsem_acquire(&kn->dep_map, ...);
> 
> The backward kn->active ==> cpu_hotplug_lock dependency happens in
> 
>   kernfs_fop_write_iter(): kernfs_get_active();
>   ==> slab_attr_store()
>       ==> cpu_partial_store()
>           ==> flush_all(): cpus_read_lock()
> 
> One way to break this circular locking chain is to avoid holding
> cpu_hotplug_lock and slab_mutex while deleting the kobject in
> sysfs_slab_unlink() which should be equivalent to doing a write_lock
> and write_unlock pair of the kn->active virtual lock.
> 
> Since the kobject structures are not protected by slab_mutex or the
> cpu_hotplug_lock, we can certainly release those locks before doing
> the delete operation.
> 
> Move sysfs_slab_unlink() and sysfs_slab_release() to the newly
> created kmem_cache_release() and call it outside the slab_mutex &
> cpu_hotplug_lock critical sections. There will be a slight delay
> in the deletion of sysfs files if kmem_cache_release() is called
> indirectly from a work function.
> 
> Signed-off-by: Waiman Long <longman@redhat.com>

Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>

Thank you, Waiman!
David Rientjes Aug. 15, 2022, 1:11 a.m. UTC | #3
On Fri, 12 Aug 2022, Waiman Long wrote:

> A circular locking problem is reported by lockdep due to the following
> circular locking dependency.
> 
>   +--> cpu_hotplug_lock --> slab_mutex --> kn->active --+
>   |                                                     |
>   +-----------------------------------------------------+
> 
> The forward cpu_hotplug_lock ==> slab_mutex ==> kn->active dependency
> happens in
> 
>   kmem_cache_destroy():	cpus_read_lock(); mutex_lock(&slab_mutex);
>   ==> sysfs_slab_unlink()
>       ==> kobject_del()
>           ==> kernfs_remove()
> 	      ==> __kernfs_remove()
> 	          ==> kernfs_drain(): rwsem_acquire(&kn->dep_map, ...);
> 
> The backward kn->active ==> cpu_hotplug_lock dependency happens in
> 
>   kernfs_fop_write_iter(): kernfs_get_active();
>   ==> slab_attr_store()
>       ==> cpu_partial_store()
>           ==> flush_all(): cpus_read_lock()
> 
> One way to break this circular locking chain is to avoid holding
> cpu_hotplug_lock and slab_mutex while deleting the kobject in
> sysfs_slab_unlink() which should be equivalent to doing a write_lock
> and write_unlock pair of the kn->active virtual lock.
> 
> Since the kobject structures are not protected by slab_mutex or the
> cpu_hotplug_lock, we can certainly release those locks before doing
> the delete operation.
> 
> Move sysfs_slab_unlink() and sysfs_slab_release() to the newly
> created kmem_cache_release() and call it outside the slab_mutex &
> cpu_hotplug_lock critical sections. There will be a slight delay
> in the deletion of sysfs files if kmem_cache_release() is called
> indirectly from a work function.
> 
> Signed-off-by: Waiman Long <longman@redhat.com>

Acked-by: David Rientjes <rientjes@google.com>
Waiman Long Aug. 15, 2022, 2:01 p.m. UTC | #4
On 8/13/22 11:00, Hyeonggon Yoo wrote:
> On Fri, Aug 12, 2022 at 02:30:33PM -0400, Waiman Long wrote:
>> A circular locking problem is reported by lockdep due to the following
>> circular locking dependency.
>>
>>    +--> cpu_hotplug_lock --> slab_mutex --> kn->active --+
>>    |                                                     |
>>    +-----------------------------------------------------+
>>
>> The forward cpu_hotplug_lock ==> slab_mutex ==> kn->active dependency
>> happens in
>>
>>    kmem_cache_destroy():	cpus_read_lock(); mutex_lock(&slab_mutex);
>>    ==> sysfs_slab_unlink()
>>        ==> kobject_del()
>>            ==> kernfs_remove()
>> 	      ==> __kernfs_remove()
>> 	          ==> kernfs_drain(): rwsem_acquire(&kn->dep_map, ...);
> Maybe you mean this?
>
>          /* but everyone should wait for draining */
>          wait_event(root->deactivate_waitq,
>                     atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
>
This is part of the kernfs_drain() operation. However, I am focusing on 
the behavior of the pseudo lock that causes the lockdep splat in the 
first place. That is why I am showcasing the rwsem_acquire() call here.


>> The backward kn->active ==> cpu_hotplug_lock dependency happens in
>>
>>    kernfs_fop_write_iter(): kernfs_get_active();
>>    ==> slab_attr_store()
>>        ==> cpu_partial_store()
>>            ==> flush_all(): cpus_read_lock()
>>
>> One way to break this circular locking chain is to avoid holding
>> cpu_hotplug_lock and slab_mutex while deleting the kobject in
>> sysfs_slab_unlink() which should be equivalent to doing a write_lock
>> and write_unlock pair of the kn->active virtual lock.
>>
>> Since the kobject structures are not protected by slab_mutex or the
>> cpu_hotplug_lock, we can certainly release those locks before doing
>> the delete operation.
>>
>> Move sysfs_slab_unlink() and sysfs_slab_release() to the newly
>> created kmem_cache_release() and call it outside the slab_mutex &
>> cpu_hotplug_lock critical sections. There will be a slight delay
>> in the deletion of sysfs files if kmem_cache_release() is called
>> indirectly from a work function.
>>
>> Signed-off-by: Waiman Long <longman@redhat.com>
>> ---
>>
>>   [v3] Move sysfs_slab_unlink() out to kmem_cache_release() and move
>>        schedule_work() back right after list_add_tail().
>>
>>   mm/slab_common.c | 45 +++++++++++++++++++++++++++++----------------
>>   1 file changed, 29 insertions(+), 16 deletions(-)
>>
>> diff --git a/mm/slab_common.c b/mm/slab_common.c
>> index 17996649cfe3..07b948288f84 100644
>> --- a/mm/slab_common.c
>> +++ b/mm/slab_common.c
>> @@ -392,6 +392,28 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
>>   }
>>   EXPORT_SYMBOL(kmem_cache_create);
>>   
>> +#ifdef SLAB_SUPPORTS_SYSFS
>> +/*
>> + * For a given kmem_cache, kmem_cache_destroy() should only be called
>> + * once or there will be a use-after-free problem. The actual deletion
>> + * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
>> + * protection. So they are now done without holding those locks.
>> + *
>> + * Note that there will be a slight delay in the deletion of sysfs files
>> + * if kmem_cache_release() is called indrectly from a work function.
>> + */
>> +static void kmem_cache_release(struct kmem_cache *s)
>> +{
>> +	sysfs_slab_unlink(s);
>> +	sysfs_slab_release(s);
>> +}
>> +#else
>> +static void kmem_cache_release(struct kmem_cache *s)
>> +{
>> +	slab_kmem_cache_release(s);
>> +}
>> +#endif
>> +
>>   static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
>>   {
>>   	LIST_HEAD(to_destroy);
>> @@ -418,11 +440,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
>>   	list_for_each_entry_safe(s, s2, &to_destroy, list) {
>>   		debugfs_slab_release(s);
>>   		kfence_shutdown_cache(s);
>> -#ifdef SLAB_SUPPORTS_SYSFS
>> -		sysfs_slab_release(s);
>> -#else
>> -		slab_kmem_cache_release(s);
>> -#endif
>> +		kmem_cache_release(s);
>>   	}
>>   }
>>   
>> @@ -437,20 +455,11 @@ static int shutdown_cache(struct kmem_cache *s)
>>   	list_del(&s->list);
>>   
>>   	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
>> -#ifdef SLAB_SUPPORTS_SYSFS
>> -		sysfs_slab_unlink(s);
>> -#endif
>>   		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
>>   		schedule_work(&slab_caches_to_rcu_destroy_work);
>>   	} else {
>>   		kfence_shutdown_cache(s);
>>   		debugfs_slab_release(s);
>> -#ifdef SLAB_SUPPORTS_SYSFS
>> -		sysfs_slab_unlink(s);
>> -		sysfs_slab_release(s);
>> -#else
>> -		slab_kmem_cache_release(s);
>> -#endif
>>   	}
>>   
>>   	return 0;
>> @@ -465,14 +474,16 @@ void slab_kmem_cache_release(struct kmem_cache *s)
>>   
>>   void kmem_cache_destroy(struct kmem_cache *s)
>>   {
>> +	int refcnt;
>> +
>>   	if (unlikely(!s) || !kasan_check_byte(s))
>>   		return;
>>   
>>   	cpus_read_lock();
>>   	mutex_lock(&slab_mutex);
>>   
>> -	s->refcount--;
>> -	if (s->refcount)
>> +	refcnt = --s->refcount;
>> +	if (refcnt)
>>   		goto out_unlock;
>>   
>>   	WARN(shutdown_cache(s),
>> @@ -481,6 +492,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
>>   out_unlock:
>>   	mutex_unlock(&slab_mutex);
>>   	cpus_read_unlock();
>> +	if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
>> +		kmem_cache_release(s);
>>   }
>>   EXPORT_SYMBOL(kmem_cache_destroy);
>>   
>> -- 
>> 2.31.1
>>
> little bit complicated but looks good to me.
> Thank you for fixing this.
>
> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
>
Thanks for the review.

Cheers,
Longman
Waiman Long Aug. 15, 2022, 2:01 p.m. UTC | #5
On 8/13/22 14:25, Roman Gushchin wrote:
> On Fri, Aug 12, 2022 at 02:30:33PM -0400, Waiman Long wrote:
>> A circular locking problem is reported by lockdep due to the following
>> circular locking dependency.
>>
>>    +--> cpu_hotplug_lock --> slab_mutex --> kn->active --+
>>    |                                                     |
>>    +-----------------------------------------------------+
>>
>> The forward cpu_hotplug_lock ==> slab_mutex ==> kn->active dependency
>> happens in
>>
>>    kmem_cache_destroy():	cpus_read_lock(); mutex_lock(&slab_mutex);
>>    ==> sysfs_slab_unlink()
>>        ==> kobject_del()
>>            ==> kernfs_remove()
>> 	      ==> __kernfs_remove()
>> 	          ==> kernfs_drain(): rwsem_acquire(&kn->dep_map, ...);
>>
>> The backward kn->active ==> cpu_hotplug_lock dependency happens in
>>
>>    kernfs_fop_write_iter(): kernfs_get_active();
>>    ==> slab_attr_store()
>>        ==> cpu_partial_store()
>>            ==> flush_all(): cpus_read_lock()
>>
>> One way to break this circular locking chain is to avoid holding
>> cpu_hotplug_lock and slab_mutex while deleting the kobject in
>> sysfs_slab_unlink() which should be equivalent to doing a write_lock
>> and write_unlock pair of the kn->active virtual lock.
>>
>> Since the kobject structures are not protected by slab_mutex or the
>> cpu_hotplug_lock, we can certainly release those locks before doing
>> the delete operation.
>>
>> Move sysfs_slab_unlink() and sysfs_slab_release() to the newly
>> created kmem_cache_release() and call it outside the slab_mutex &
>> cpu_hotplug_lock critical sections. There will be a slight delay
>> in the deletion of sysfs files if kmem_cache_release() is called
>> indirectly from a work function.
>>
>> Signed-off-by: Waiman Long <longman@redhat.com>
> Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
>
> Thank you, Waiman!
>
Thanks for your suggestions that make this patch better.

Cheers,
Longman
Vlastimil Babka Aug. 23, 2022, 11:33 a.m. UTC | #6
On 8/12/22 20:30, Waiman Long wrote:
> A circular locking problem is reported by lockdep due to the following
> circular locking dependency.
> 
>   +--> cpu_hotplug_lock --> slab_mutex --> kn->active --+
>   |                                                     |
>   +-----------------------------------------------------+
> 
> The forward cpu_hotplug_lock ==> slab_mutex ==> kn->active dependency
> happens in
> 
>   kmem_cache_destroy():	cpus_read_lock(); mutex_lock(&slab_mutex);
>   ==> sysfs_slab_unlink()
>       ==> kobject_del()
>           ==> kernfs_remove()
> 	      ==> __kernfs_remove()
> 	          ==> kernfs_drain(): rwsem_acquire(&kn->dep_map, ...);
> 
> The backward kn->active ==> cpu_hotplug_lock dependency happens in
> 
>   kernfs_fop_write_iter(): kernfs_get_active();
>   ==> slab_attr_store()
>       ==> cpu_partial_store()
>           ==> flush_all(): cpus_read_lock()
> 
> One way to break this circular locking chain is to avoid holding
> cpu_hotplug_lock and slab_mutex while deleting the kobject in
> sysfs_slab_unlink() which should be equivalent to doing a write_lock
> and write_unlock pair of the kn->active virtual lock.
> 
> Since the kobject structures are not protected by slab_mutex or the
> cpu_hotplug_lock, we can certainly release those locks before doing
> the delete operation.
> 
> Move sysfs_slab_unlink() and sysfs_slab_release() to the newly
> created kmem_cache_release() and call it outside the slab_mutex &
> cpu_hotplug_lock critical sections. There will be a slight delay
> in the deletion of sysfs files if kmem_cache_release() is called
> indirectly from a work function.
> 
> Signed-off-by: Waiman Long <longman@redhat.com>

Thanks, added to slab.git for-6.0/fixes

> ---
> 
>  [v3] Move sysfs_slab_unlink() out to kmem_cache_release() and move
>       schedule_work() back right after list_add_tail().
> 
>  mm/slab_common.c | 45 +++++++++++++++++++++++++++++----------------
>  1 file changed, 29 insertions(+), 16 deletions(-)
> 
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 17996649cfe3..07b948288f84 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -392,6 +392,28 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
>  }
>  EXPORT_SYMBOL(kmem_cache_create);
>  
> +#ifdef SLAB_SUPPORTS_SYSFS
> +/*
> + * For a given kmem_cache, kmem_cache_destroy() should only be called
> + * once or there will be a use-after-free problem. The actual deletion
> + * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
> + * protection. So they are now done without holding those locks.
> + *
> + * Note that there will be a slight delay in the deletion of sysfs files
> + * if kmem_cache_release() is called indrectly from a work function.
> + */
> +static void kmem_cache_release(struct kmem_cache *s)
> +{
> +	sysfs_slab_unlink(s);
> +	sysfs_slab_release(s);
> +}
> +#else
> +static void kmem_cache_release(struct kmem_cache *s)
> +{
> +	slab_kmem_cache_release(s);
> +}
> +#endif
> +
>  static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
>  {
>  	LIST_HEAD(to_destroy);
> @@ -418,11 +440,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
>  	list_for_each_entry_safe(s, s2, &to_destroy, list) {
>  		debugfs_slab_release(s);
>  		kfence_shutdown_cache(s);
> -#ifdef SLAB_SUPPORTS_SYSFS
> -		sysfs_slab_release(s);
> -#else
> -		slab_kmem_cache_release(s);
> -#endif
> +		kmem_cache_release(s);
>  	}
>  }
>  
> @@ -437,20 +455,11 @@ static int shutdown_cache(struct kmem_cache *s)
>  	list_del(&s->list);
>  
>  	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
> -#ifdef SLAB_SUPPORTS_SYSFS
> -		sysfs_slab_unlink(s);
> -#endif
>  		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
>  		schedule_work(&slab_caches_to_rcu_destroy_work);
>  	} else {
>  		kfence_shutdown_cache(s);
>  		debugfs_slab_release(s);
> -#ifdef SLAB_SUPPORTS_SYSFS
> -		sysfs_slab_unlink(s);
> -		sysfs_slab_release(s);
> -#else
> -		slab_kmem_cache_release(s);
> -#endif
>  	}
>  
>  	return 0;
> @@ -465,14 +474,16 @@ void slab_kmem_cache_release(struct kmem_cache *s)
>  
>  void kmem_cache_destroy(struct kmem_cache *s)
>  {
> +	int refcnt;
> +
>  	if (unlikely(!s) || !kasan_check_byte(s))
>  		return;
>  
>  	cpus_read_lock();
>  	mutex_lock(&slab_mutex);
>  
> -	s->refcount--;
> -	if (s->refcount)
> +	refcnt = --s->refcount;
> +	if (refcnt)
>  		goto out_unlock;
>  
>  	WARN(shutdown_cache(s),
> @@ -481,6 +492,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
>  out_unlock:
>  	mutex_unlock(&slab_mutex);
>  	cpus_read_unlock();
> +	if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
> +		kmem_cache_release(s);
>  }
>  EXPORT_SYMBOL(kmem_cache_destroy);
>
Waiman Long Aug. 29, 2022, 6:47 p.m. UTC | #7
On 8/23/22 07:33, Vlastimil Babka wrote:
> On 8/12/22 20:30, Waiman Long wrote:
>> A circular locking problem is reported by lockdep due to the following
>> circular locking dependency.
>>
>>    +--> cpu_hotplug_lock --> slab_mutex --> kn->active --+
>>    |                                                     |
>>    +-----------------------------------------------------+
>>
>> The forward cpu_hotplug_lock ==> slab_mutex ==> kn->active dependency
>> happens in
>>
>>    kmem_cache_destroy():	cpus_read_lock(); mutex_lock(&slab_mutex);
>>    ==> sysfs_slab_unlink()
>>        ==> kobject_del()
>>            ==> kernfs_remove()
>> 	      ==> __kernfs_remove()
>> 	          ==> kernfs_drain(): rwsem_acquire(&kn->dep_map, ...);
>>
>> The backward kn->active ==> cpu_hotplug_lock dependency happens in
>>
>>    kernfs_fop_write_iter(): kernfs_get_active();
>>    ==> slab_attr_store()
>>        ==> cpu_partial_store()
>>            ==> flush_all(): cpus_read_lock()
>>
>> One way to break this circular locking chain is to avoid holding
>> cpu_hotplug_lock and slab_mutex while deleting the kobject in
>> sysfs_slab_unlink() which should be equivalent to doing a write_lock
>> and write_unlock pair of the kn->active virtual lock.
>>
>> Since the kobject structures are not protected by slab_mutex or the
>> cpu_hotplug_lock, we can certainly release those locks before doing
>> the delete operation.
>>
>> Move sysfs_slab_unlink() and sysfs_slab_release() to the newly
>> created kmem_cache_release() and call it outside the slab_mutex &
>> cpu_hotplug_lock critical sections. There will be a slight delay
>> in the deletion of sysfs files if kmem_cache_release() is called
>> indirectly from a work function.
>>
>> Signed-off-by: Waiman Long <longman@redhat.com>
> Thanks, added to slab.git for-6.0/fixes

Thanks for taking it.

Cheers,
Longman
diff mbox series

Patch

diff --git a/mm/slab_common.c b/mm/slab_common.c
index 17996649cfe3..07b948288f84 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -392,6 +392,28 @@  kmem_cache_create(const char *name, unsigned int size, unsigned int align,
 }
 EXPORT_SYMBOL(kmem_cache_create);
 
+#ifdef SLAB_SUPPORTS_SYSFS
+/*
+ * For a given kmem_cache, kmem_cache_destroy() should only be called
+ * once or there will be a use-after-free problem. The actual deletion
+ * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
+ * protection. So they are now done without holding those locks.
+ *
+ * Note that there will be a slight delay in the deletion of sysfs files
+ * if kmem_cache_release() is called indrectly from a work function.
+ */
+static void kmem_cache_release(struct kmem_cache *s)
+{
+	sysfs_slab_unlink(s);
+	sysfs_slab_release(s);
+}
+#else
+static void kmem_cache_release(struct kmem_cache *s)
+{
+	slab_kmem_cache_release(s);
+}
+#endif
+
 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
 {
 	LIST_HEAD(to_destroy);
@@ -418,11 +440,7 @@  static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
 	list_for_each_entry_safe(s, s2, &to_destroy, list) {
 		debugfs_slab_release(s);
 		kfence_shutdown_cache(s);
-#ifdef SLAB_SUPPORTS_SYSFS
-		sysfs_slab_release(s);
-#else
-		slab_kmem_cache_release(s);
-#endif
+		kmem_cache_release(s);
 	}
 }
 
@@ -437,20 +455,11 @@  static int shutdown_cache(struct kmem_cache *s)
 	list_del(&s->list);
 
 	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
-#ifdef SLAB_SUPPORTS_SYSFS
-		sysfs_slab_unlink(s);
-#endif
 		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
 		schedule_work(&slab_caches_to_rcu_destroy_work);
 	} else {
 		kfence_shutdown_cache(s);
 		debugfs_slab_release(s);
-#ifdef SLAB_SUPPORTS_SYSFS
-		sysfs_slab_unlink(s);
-		sysfs_slab_release(s);
-#else
-		slab_kmem_cache_release(s);
-#endif
 	}
 
 	return 0;
@@ -465,14 +474,16 @@  void slab_kmem_cache_release(struct kmem_cache *s)
 
 void kmem_cache_destroy(struct kmem_cache *s)
 {
+	int refcnt;
+
 	if (unlikely(!s) || !kasan_check_byte(s))
 		return;
 
 	cpus_read_lock();
 	mutex_lock(&slab_mutex);
 
-	s->refcount--;
-	if (s->refcount)
+	refcnt = --s->refcount;
+	if (refcnt)
 		goto out_unlock;
 
 	WARN(shutdown_cache(s),
@@ -481,6 +492,8 @@  void kmem_cache_destroy(struct kmem_cache *s)
 out_unlock:
 	mutex_unlock(&slab_mutex);
 	cpus_read_unlock();
+	if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
+		kmem_cache_release(s);
 }
 EXPORT_SYMBOL(kmem_cache_destroy);