diff mbox series

[v2,4/5] mm/slub: convert object_map_lock to non-raw spinlock

Message ID 20220823170400.26546-5-vbabka@suse.cz (mailing list archive)
State New
Headers show
Series mm/slub: fix validation races and cleanup locking | expand

Commit Message

Vlastimil Babka Aug. 23, 2022, 5:03 p.m. UTC
The only remaining user of object_map_lock is list_slab_objects().
Obtaining the lock there used to happen under slab_lock() which implied
disabling irqs on PREEMPT_RT, thus it's a raw_spinlock. With the
slab_lock() removed, we can convert it to a normal spinlock.

Also remove the get_map()/put_map() wrappers as list_slab_objects()
became their only remaining user.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/slub.c | 36 ++++++------------------------------
 1 file changed, 6 insertions(+), 30 deletions(-)

Comments

Sebastian Andrzej Siewior Aug. 24, 2022, 3:53 p.m. UTC | #1
On 2022-08-23 19:03:59 [+0200], Vlastimil Babka wrote:
> The only remaining user of object_map_lock is list_slab_objects().
> Obtaining the lock there used to happen under slab_lock() which implied
> disabling irqs on PREEMPT_RT, thus it's a raw_spinlock. With the
> slab_lock() removed, we can convert it to a normal spinlock.
> 
> Also remove the get_map()/put_map() wrappers as list_slab_objects()
> became their only remaining user.
> 
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Acked-by: David Rientjes <rientjes@google.com>
> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

Sebastian
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index b4065e892f7c..0444a2ba4f12 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -565,7 +565,7 @@  static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
 
 #ifdef CONFIG_SLUB_DEBUG
 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
-static DEFINE_RAW_SPINLOCK(object_map_lock);
+static DEFINE_SPINLOCK(object_map_lock);
 
 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
 		       struct slab *slab)
@@ -599,30 +599,6 @@  static bool slab_add_kunit_errors(void)
 static inline bool slab_add_kunit_errors(void) { return false; }
 #endif
 
-/*
- * Determine a map of objects in use in a slab.
- *
- * Node listlock must be held to guarantee that the slab does
- * not vanish from under us.
- */
-static unsigned long *get_map(struct kmem_cache *s, struct slab *slab)
-	__acquires(&object_map_lock)
-{
-	VM_BUG_ON(!irqs_disabled());
-
-	raw_spin_lock(&object_map_lock);
-
-	__fill_map(object_map, s, slab);
-
-	return object_map;
-}
-
-static void put_map(unsigned long *map) __releases(&object_map_lock)
-{
-	VM_BUG_ON(map != object_map);
-	raw_spin_unlock(&object_map_lock);
-}
-
 static inline unsigned int size_from_object(struct kmem_cache *s)
 {
 	if (s->flags & SLAB_RED_ZONE)
@@ -4367,21 +4343,21 @@  static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
 {
 #ifdef CONFIG_SLUB_DEBUG
 	void *addr = slab_address(slab);
-	unsigned long flags;
-	unsigned long *map;
 	void *p;
 
 	slab_err(s, slab, text, s->name);
 
-	map = get_map(s, slab);
+	spin_lock(&object_map_lock);
+	__fill_map(object_map, s, slab);
+
 	for_each_object(p, s, addr, slab->objects) {
 
-		if (!test_bit(__obj_to_index(s, addr, p), map)) {
+		if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
 			pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
 			print_tracking(s, p);
 		}
 	}
-	put_map(map);
+	spin_unlock(&object_map_lock);
 #endif
 }