@@ -3517,7 +3517,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
#ifdef CONFIG_KASAN_GENERIC
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
{
- do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
+ do_slab_free(cache, slab_page(virt_to_slab(x)), x, NULL, 1, addr);
}
#endif
@@ -3527,7 +3527,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
if (!s)
return;
trace_kmem_cache_free(_RET_IP_, x, s->name);
- slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
+ slab_free(s, slab_page(virt_to_slab(x)), x, NULL, 1, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_free);
@@ -4546,7 +4546,8 @@ EXPORT_SYMBOL(__ksize);
void kfree(const void *x)
{
- struct page *page;
+ struct folio *folio;
+ struct slab *slab;
void *object = (void *)x;
trace_kfree(_RET_IP_, x);
@@ -4554,12 +4555,13 @@ void kfree(const void *x)
if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
- page = virt_to_head_page(x);
- if (unlikely(!PageSlab(page))) {
- free_nonslab_page(page, object);
+ folio = virt_to_folio(x);
+ if (unlikely(!folio_test_slab(folio))) {
+ free_nonslab_page(folio_page(folio, 0), object);
return;
}
- slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
+ slab = folio_slab(folio);
+ slab_free(slab->slab_cache, slab_page(slab), object, NULL, 1, _RET_IP_);
}
EXPORT_SYMBOL(kfree);