@@ -2333,25 +2333,15 @@ redo:
}
}
-/*
- * Unfreeze all the cpu partial slabs.
- *
- * This function must be called with preemption or migration
- * disabled with c local to the cpu.
- */
-static void unfreeze_partials(struct kmem_cache *s,
- struct kmem_cache_cpu *c)
-{
#ifdef CONFIG_SLUB_CPU_PARTIAL
+static void __unfreeze_partials(struct kmem_cache *s, struct page *partial_page)
+{
struct kmem_cache_node *n = NULL, *n2 = NULL;
- struct page *page, *partial_page, *discard_page = NULL;
+ struct page *page, *discard_page = NULL;
unsigned long flags;
local_irq_save(flags);
- partial_page = slub_percpu_partial(c);
- c->partial = NULL;
-
while (partial_page) {
struct page new;
struct page old;
@@ -2406,10 +2396,45 @@ static void unfreeze_partials(struct kme
discard_slab(s, page);
stat(s, FREE_SLAB);
}
+}
-#endif /* CONFIG_SLUB_CPU_PARTIAL */
+/*
+ * Unfreeze all the cpu partial slabs.
+ */
+static void unfreeze_partials(struct kmem_cache *s)
+{
+ struct page *partial_page;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ partial_page = this_cpu_read(s->cpu_slab->partial);
+ this_cpu_write(s->cpu_slab->partial, NULL);
+ local_irq_restore(flags);
+
+ if (partial_page)
+ __unfreeze_partials(s, partial_page);
+}
+
+static void unfreeze_partials_cpu(struct kmem_cache *s,
+ struct kmem_cache_cpu *c)
+{
+ struct page *partial_page;
+
+ partial_page = slub_percpu_partial(c);
+ c->partial = NULL;
+
+ if (partial_page)
+ __unfreeze_partials(s, partial_page);
}
+#else /* CONFIG_SLUB_CPU_PARTIAL */
+
+static inline void unfreeze_partials(struct kmem_cache *s) { }
+static inline void unfreeze_partials_cpu(struct kmem_cache *s,
+ struct kmem_cache_cpu *c) { }
+
+#endif /* CONFIG_SLUB_CPU_PARTIAL */
+
/*
* Put a page that was just frozen (in __slab_free|get_partial_node) into a
* partial page slot if available.
@@ -2438,7 +2463,7 @@ static void put_cpu_partial(struct kmem_
* partial array is full. Move the existing
* set to the per node partial list.
*/
- unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
+ unfreeze_partials(s);
oldpage = NULL;
pobjects = 0;
pages = 0;
@@ -2473,11 +2498,6 @@ static inline void flush_slab(struct kme
stat(s, CPUSLAB_FLUSH);
}
-/*
- * Flush cpu slab.
- *
- * Called from IPI handler with interrupts disabled.
- */
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
{
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
@@ -2485,14 +2505,23 @@ static inline void __flush_cpu_slab(stru
if (c->page)
flush_slab(s, c);
- unfreeze_partials(s, c);
+ unfreeze_partials_cpu(s, c);
}
+/*
+ * Flush cpu slab.
+ *
+ * Called from IPI handler with interrupts disabled.
+ */
static void flush_cpu_slab(void *d)
{
struct kmem_cache *s = d;
+ struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
+
+ if (c->page)
+ flush_slab(s, c);
- __flush_cpu_slab(s, smp_processor_id());
+ unfreeze_partials(s);
}
static bool has_cpu_slab(int cpu, void *info)