diff mbox series

[1/2] slub: Cure list_slab_objects() from double fix

Message ID 20200618201234.795692-1-bigeasy@linutronix.de (mailing list archive)
State New, archived
Headers show
Series [1/2] slub: Cure list_slab_objects() from double fix | expand

Commit Message

Sebastian Andrzej Siewior June 18, 2020, 8:12 p.m. UTC
According to Christopher Lameter two fixes have been merged for the same
problem. As far as I can tell, the code does not acquire the list_lock and
invoke kmalloc().
list_slab_objects() misses an unlock (the counterpart to get_map()) and
the memory allocated in free_partial() isn't used.

Revert the mentioned commit.

Fixes: aa456c7aebb14 ("slub: remove kmalloc under list_lock from list_slab_objects() V2")
Link: https://lkml.kernel.org/r/alpine.DEB.2.22.394.2006181501480.12014@www.lameter.com
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 mm/slub.c | 19 ++++---------------
 1 file changed, 4 insertions(+), 15 deletions(-)
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index b8f798b50d44d..72195cafbb503 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3766,15 +3766,13 @@  static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
 }
 
 static void list_slab_objects(struct kmem_cache *s, struct page *page,
-			      const char *text, unsigned long *map)
+			      const char *text)
 {
 #ifdef CONFIG_SLUB_DEBUG
 	void *addr = page_address(page);
+	unsigned long *map;
 	void *p;
 
-	if (!map)
-		return;
-
 	slab_err(s, page, text, s->name);
 	slab_lock(page);
 
@@ -3786,6 +3784,7 @@  static void list_slab_objects(struct kmem_cache *s, struct page *page,
 			print_tracking(s, p);
 		}
 	}
+	put_map(map);
 	slab_unlock(page);
 #endif
 }
@@ -3799,11 +3798,6 @@  static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
 	LIST_HEAD(discard);
 	struct page *page, *h;
-	unsigned long *map = NULL;
-
-#ifdef CONFIG_SLUB_DEBUG
-	map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
-#endif
 
 	BUG_ON(irqs_disabled());
 	spin_lock_irq(&n->list_lock);
@@ -3813,16 +3807,11 @@  static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 			list_add(&page->slab_list, &discard);
 		} else {
 			list_slab_objects(s, page,
-			  "Objects remaining in %s on __kmem_cache_shutdown()",
-			  map);
+			  "Objects remaining in %s on __kmem_cache_shutdown()");
 		}
 	}
 	spin_unlock_irq(&n->list_lock);
 
-#ifdef CONFIG_SLUB_DEBUG
-	bitmap_free(map);
-#endif
-
 	list_for_each_entry_safe(page, h, &discard, slab_list)
 		discard_slab(s, page);
 }