@@ -380,12 +380,12 @@ __slab_unlock(struct page *page, unsigne
static __always_inline void
slab_lock(struct page *page, unsigned long *flags)
{
- __slab_lock(page, flags, false);
+ __slab_lock(page, flags, IS_ENABLED(CONFIG_PREEMPT_RT));
}
static __always_inline void slab_unlock(struct page *page, unsigned long *flags)
{
- __slab_unlock(page, flags, false);
+ __slab_unlock(page, flags, IS_ENABLED(CONFIG_PREEMPT_RT));
}
static inline bool ___cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
@@ -429,14 +429,19 @@ static inline bool ___cmpxchg_double_sla
return false;
}
-/* Interrupts must be disabled (for the fallback code to work right) */
+/*
+ * Interrupts must be disabled (for the fallback code to work right), typically
+ * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
+ * so we disable interrupts explicitly here.
+ */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
void *freelist_new, unsigned long counters_new,
const char *n)
{
return ___cmpxchg_double_slab(s, page, freelist_old, counters_old,
- freelist_new, counters_new, n, false);
+ freelist_new, counters_new, n,
+ IS_ENABLED(CONFIG_PREEMPT_RT));
}
static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,