@@ -48,7 +48,7 @@
* 1. slab_mutex (Global Mutex)
* 2. node->list_lock (Spinlock)
* 3. kmem_cache->cpu_slab->lock (Local lock)
- * 4. slab_lock(page) (Only on some arches or for debugging)
+ * 4. slab_lock() (Only on some arches or for debugging)
* 5. object_map_lock (Only for debugging)
*
* slab_mutex
@@ -64,10 +64,10 @@
*
* The slab_lock is only used for debugging and on arches that do not
* have the ability to do a cmpxchg_double. It only protects:
- * A. page->freelist -> List of object free in a page
- * B. page->inuse -> Number of objects in use
- * C. page->objects -> Number of objects in page
- * D. page->frozen -> frozen state
+ * A. slab->freelist -> List of object free in a page
+ * B. slab->inuse -> Number of objects in use
+ * C. slab->objects -> Number of objects in page
+ * D. slab->frozen -> frozen state
*
* Frozen slabs
*
@@ -417,28 +417,26 @@ static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
/*
* Per slab locking using the pagelock
*/
-static __always_inline void __slab_lock(struct page *page)
+static __always_inline void __slab_lock(struct slab *slab)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
- bit_spin_lock(PG_locked, &page->flags);
+ bit_spin_lock(PG_locked, &slab->flags);
}
-static __always_inline void __slab_unlock(struct page *page)
+static __always_inline void __slab_unlock(struct slab *slab)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
- __bit_spin_unlock(PG_locked, &page->flags);
+ __bit_spin_unlock(PG_locked, &slab->flags);
}
-static __always_inline void slab_lock(struct page *page, unsigned long *flags)
+static __always_inline void slab_lock(struct slab *slab, unsigned long *flags)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_save(*flags);
- __slab_lock(page);
+ __slab_lock(slab);
}
-static __always_inline void slab_unlock(struct page *page, unsigned long *flags)
+static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags)
{
- __slab_unlock(page);
+ __slab_unlock(slab);
if (IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_restore(*flags);
}
@@ -468,15 +466,15 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
/* init to 0 to prevent spurious warnings */
unsigned long flags = 0;
- slab_lock(slab_page(slab), &flags);
+ slab_lock(slab, &flags);
if (slab->freelist == freelist_old &&
slab->counters == counters_old) {
slab->freelist = freelist_new;
slab->counters = counters_new;
- slab_unlock(slab_page(slab), &flags);
+ slab_unlock(slab, &flags);
return true;
}
- slab_unlock(slab_page(slab), &flags);
+ slab_unlock(slab, &flags);
}
cpu_relax();
@@ -507,16 +505,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
unsigned long flags;
local_irq_save(flags);
- __slab_lock(slab_page(slab));
+ __slab_lock(slab);
if (slab->freelist == freelist_old &&
slab->counters == counters_old) {
slab->freelist = freelist_new;
slab->counters = counters_new;
- __slab_unlock(slab_page(slab));
+ __slab_unlock(slab);
local_irq_restore(flags);
return true;
}
- __slab_unlock(slab_page(slab));
+ __slab_unlock(slab);
local_irq_restore(flags);
}
@@ -1353,7 +1351,7 @@ static noinline int free_debug_processing(
int ret = 0;
spin_lock_irqsave(&n->list_lock, flags);
- slab_lock(slab_page(slab), &flags2);
+ slab_lock(slab, &flags2);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
if (!check_slab(s, slab))
@@ -1386,7 +1384,7 @@ static noinline int free_debug_processing(
slab_err(s, slab, "Bulk freelist count(%d) invalid(%d)\n",
bulk_cnt, cnt);
- slab_unlock(slab_page(slab), &flags2);
+ slab_unlock(slab, &flags2);
spin_unlock_irqrestore(&n->list_lock, flags);
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
@@ -4214,7 +4212,7 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
void *p;
slab_err(s, slab, text, s->name);
- slab_lock(slab_page(slab), &flags);
+ slab_lock(slab, &flags);
map = get_map(s, slab);
for_each_object(p, s, addr, slab->objects) {
@@ -4225,7 +4223,7 @@ static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
}
}
put_map(map);
- slab_unlock(slab_page(slab), &flags);
+ slab_unlock(slab, &flags);
#endif
}
@@ -4958,7 +4956,7 @@ static void validate_slab(struct kmem_cache *s, struct slab *slab,
void *addr = slab_address(slab);
unsigned long flags;
- slab_lock(slab_page(slab), &flags);
+ slab_lock(slab, &flags);
if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
goto unlock;
@@ -4973,7 +4971,7 @@ static void validate_slab(struct kmem_cache *s, struct slab *slab,
break;
}
unlock:
- slab_unlock(slab_page(slab), &flags);
+ slab_unlock(slab, &flags);
}
static int validate_slab_node(struct kmem_cache *s,
Improve type safety to the point where we can get rid of the assertions that this is not a tail page. Remove a lot of calls to slab_page(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/slub.c | 52 +++++++++++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 27 deletions(-)