diff mbox series

[2/3] mm/slab: delete cache_alloc_debugcheck_before()

Message ID 20220605152539.3196045-2-daniel.vetter@ffwll.ch (mailing list archive)
State New, archived
Headers show
Series [1/3] mm/page_alloc: use might_alloc() | expand

Commit Message

Daniel Vetter June 5, 2022, 3:25 p.m. UTC
It only does a might_sleep_if(GFP_RECLAIM) check, which is already
covered by the might_alloc() in slab_pre_alloc_hook(). And all callers
of cache_alloc_debugcheck_before() call that beforehand already.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: linux-mm@kvack.org
---
 mm/slab.c | 10 ----------
 1 file changed, 10 deletions(-)

Comments

David Hildenbrand June 7, 2022, 12:59 p.m. UTC | #1
On 05.06.22 17:25, Daniel Vetter wrote:
> It only does a might_sleep_if(GFP_RECLAIM) check, which is already
> covered by the might_alloc() in slab_pre_alloc_hook(). And all callers
> of cache_alloc_debugcheck_before() call that beforehand already.
> 
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> Cc: Christoph Lameter <cl@linux.com>
> Cc: Pekka Enberg <penberg@kernel.org>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Roman Gushchin <roman.gushchin@linux.dev>
> Cc: linux-mm@kvack.org
> ---

LGTM

Reviewed-by: David Hildenbrand <david@redhat.com>
David Rientjes June 12, 2022, 11 p.m. UTC | #2
On Sun, 5 Jun 2022, Daniel Vetter wrote:

> It only does a might_sleep_if(GFP_RECLAIM) check, which is already
> covered by the might_alloc() in slab_pre_alloc_hook(). And all callers
> of cache_alloc_debugcheck_before() call that beforehand already.
> 
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> Cc: Christoph Lameter <cl@linux.com>
> Cc: Pekka Enberg <penberg@kernel.org>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Roman Gushchin <roman.gushchin@linux.dev>
> Cc: linux-mm@kvack.org

Acked-by: David Rientjes <rientjes@google.com>
Muchun Song June 13, 2022, 3:21 a.m. UTC | #3
On Sun, Jun 05, 2022 at 05:25:38PM +0200, Daniel Vetter wrote:
> It only does a might_sleep_if(GFP_RECLAIM) check, which is already
> covered by the might_alloc() in slab_pre_alloc_hook(). And all callers
> of cache_alloc_debugcheck_before() call that beforehand already.
> 
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>

Nice cleanup.

Reviewed-by: Muchun Song <songmuchun@bytedance.com>

Thanks.
Vlastimil Babka June 14, 2022, 1:05 p.m. UTC | #4
On 6/5/22 17:25, Daniel Vetter wrote:
> It only does a might_sleep_if(GFP_RECLAIM) check, which is already
> covered by the might_alloc() in slab_pre_alloc_hook(). And all callers
> of cache_alloc_debugcheck_before() call that beforehand already.
> 
> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
> Cc: Christoph Lameter <cl@linux.com>
> Cc: Pekka Enberg <penberg@kernel.org>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Roman Gushchin <roman.gushchin@linux.dev>
> Cc: linux-mm@kvack.org

Thanks, added to slab/for-5.20/cleanup as it's slab-specific and independent
from 1/3 and 3/3.

> ---
>  mm/slab.c | 10 ----------
>  1 file changed, 10 deletions(-)
> 
> diff --git a/mm/slab.c b/mm/slab.c
> index b04e40078bdf..75779ac5f5ba 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -2973,12 +2973,6 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
>  	return ac->entry[--ac->avail];
>  }
>  
> -static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
> -						gfp_t flags)
> -{
> -	might_sleep_if(gfpflags_allow_blocking(flags));
> -}
> -
>  #if DEBUG
>  static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
>  				gfp_t flags, void *objp, unsigned long caller)
> @@ -3219,7 +3213,6 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
>  	if (unlikely(ptr))
>  		goto out_hooks;
>  
> -	cache_alloc_debugcheck_before(cachep, flags);
>  	local_irq_save(save_flags);
>  
>  	if (nodeid == NUMA_NO_NODE)
> @@ -3304,7 +3297,6 @@ slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
>  	if (unlikely(objp))
>  		goto out;
>  
> -	cache_alloc_debugcheck_before(cachep, flags);
>  	local_irq_save(save_flags);
>  	objp = __do_cache_alloc(cachep, flags);
>  	local_irq_restore(save_flags);
> @@ -3541,8 +3533,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
>  	if (!s)
>  		return 0;
>  
> -	cache_alloc_debugcheck_before(s, flags);
> -
>  	local_irq_disable();
>  	for (i = 0; i < size; i++) {
>  		void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);
diff mbox series

Patch

diff --git a/mm/slab.c b/mm/slab.c
index b04e40078bdf..75779ac5f5ba 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2973,12 +2973,6 @@  static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
 	return ac->entry[--ac->avail];
 }
 
-static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
-						gfp_t flags)
-{
-	might_sleep_if(gfpflags_allow_blocking(flags));
-}
-
 #if DEBUG
 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
 				gfp_t flags, void *objp, unsigned long caller)
@@ -3219,7 +3213,6 @@  slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
 	if (unlikely(ptr))
 		goto out_hooks;
 
-	cache_alloc_debugcheck_before(cachep, flags);
 	local_irq_save(save_flags);
 
 	if (nodeid == NUMA_NO_NODE)
@@ -3304,7 +3297,6 @@  slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags,
 	if (unlikely(objp))
 		goto out;
 
-	cache_alloc_debugcheck_before(cachep, flags);
 	local_irq_save(save_flags);
 	objp = __do_cache_alloc(cachep, flags);
 	local_irq_restore(save_flags);
@@ -3541,8 +3533,6 @@  int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 	if (!s)
 		return 0;
 
-	cache_alloc_debugcheck_before(s, flags);
-
 	local_irq_disable();
 	for (i = 0; i < size; i++) {
 		void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags);