diff mbox series

[v4,11/32] mm/slub: Convert kfree() to use a struct slab

Message ID 20220104001046.12263-12-vbabka@suse.cz (mailing list archive)
State New
Headers show
Series Separate struct slab from struct page | expand

Commit Message

Vlastimil Babka Jan. 4, 2022, 12:10 a.m. UTC
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Convert kfree(), kmem_cache_free() and ___cache_free() to resolve object
addresses to struct slab, using folio as intermediate step where needed.
Keep passing the result as struct page for now in preparation for mass
conversion of internal functions.

[ vbabka@suse.cz: Use folio as intermediate step when checking for
  large kmalloc pages, and when freeing them - rename
  free_nonslab_page() to free_large_kmalloc() that takes struct folio ]

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slub.c | 29 ++++++++++++++++-------------
 1 file changed, 16 insertions(+), 13 deletions(-)

Comments

Roman Gushchin Jan. 5, 2022, 1 a.m. UTC | #1
On Tue, Jan 04, 2022 at 01:10:25AM +0100, Vlastimil Babka wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> Convert kfree(), kmem_cache_free() and ___cache_free() to resolve object
> addresses to struct slab, using folio as intermediate step where needed.
> Keep passing the result as struct page for now in preparation for mass
> conversion of internal functions.
> 
> [ vbabka@suse.cz: Use folio as intermediate step when checking for
>   large kmalloc pages, and when freeing them - rename
>   free_nonslab_page() to free_large_kmalloc() that takes struct folio ]
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  mm/slub.c | 29 ++++++++++++++++-------------
>  1 file changed, 16 insertions(+), 13 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index a45b74d2712f..acf2608a57c5 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3517,7 +3517,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
>  #ifdef CONFIG_KASAN_GENERIC
>  void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
>  {
> -	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
> +	do_slab_free(cache, slab_page(virt_to_slab(x)), x, NULL, 1, addr);
>  }
>  #endif
>  
> @@ -3527,7 +3527,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
>  	if (!s)
>  		return;
>  	trace_kmem_cache_free(_RET_IP_, x, s->name);
> -	slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
> +	slab_free(s, slab_page(virt_to_slab(x)), x, NULL, 1, _RET_IP_);
>  }
>  EXPORT_SYMBOL(kmem_cache_free);
>  
> @@ -3539,16 +3539,17 @@ struct detached_freelist {
>  	struct kmem_cache *s;
>  };
>  
> -static inline void free_nonslab_page(struct page *page, void *object)
> +static inline void free_large_kmalloc(struct folio *folio, void *object)

It's way more clear now what's it's all about. Thanks!

Reviewed-by: Roman Gushchin <guro@fb.com>
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index a45b74d2712f..acf2608a57c5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3517,7 +3517,7 @@  static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
 #ifdef CONFIG_KASAN_GENERIC
 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
 {
-	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
+	do_slab_free(cache, slab_page(virt_to_slab(x)), x, NULL, 1, addr);
 }
 #endif
 
@@ -3527,7 +3527,7 @@  void kmem_cache_free(struct kmem_cache *s, void *x)
 	if (!s)
 		return;
 	trace_kmem_cache_free(_RET_IP_, x, s->name);
-	slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
+	slab_free(s, slab_page(virt_to_slab(x)), x, NULL, 1, _RET_IP_);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
@@ -3539,16 +3539,17 @@  struct detached_freelist {
 	struct kmem_cache *s;
 };
 
-static inline void free_nonslab_page(struct page *page, void *object)
+static inline void free_large_kmalloc(struct folio *folio, void *object)
 {
-	unsigned int order = compound_order(page);
+	unsigned int order = folio_order(folio);
 
-	if (WARN_ON_ONCE(!PageCompound(page)))
+	if (WARN_ON_ONCE(order == 0))
 		pr_warn_once("object pointer: 0x%p\n", object);
 
 	kfree_hook(object);
-	mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
-	__free_pages(page, order);
+	mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
+			      -(PAGE_SIZE << order));
+	__free_pages(folio_page(folio, 0), order);
 }
 
 /*
@@ -3588,7 +3589,7 @@  int build_detached_freelist(struct kmem_cache *s, size_t size,
 	if (!s) {
 		/* Handle kalloc'ed objects */
 		if (unlikely(!folio_test_slab(folio))) {
-			free_nonslab_page(folio_page(folio, 0), object);
+			free_large_kmalloc(folio, object);
 			p[size] = NULL; /* mark object processed */
 			return size;
 		}
@@ -4547,7 +4548,8 @@  EXPORT_SYMBOL(__ksize);
 
 void kfree(const void *x)
 {
-	struct page *page;
+	struct folio *folio;
+	struct slab *slab;
 	void *object = (void *)x;
 
 	trace_kfree(_RET_IP_, x);
@@ -4555,12 +4557,13 @@  void kfree(const void *x)
 	if (unlikely(ZERO_OR_NULL_PTR(x)))
 		return;
 
-	page = virt_to_head_page(x);
-	if (unlikely(!PageSlab(page))) {
-		free_nonslab_page(page, object);
+	folio = virt_to_folio(x);
+	if (unlikely(!folio_test_slab(folio))) {
+		free_large_kmalloc(folio, object);
 		return;
 	}
-	slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
+	slab = folio_slab(folio);
+	slab_free(slab->slab_cache, slab_page(slab), object, NULL, 1, _RET_IP_);
 }
 EXPORT_SYMBOL(kfree);