diff mbox series

[tip/core/rcu,28/41] slab: Replace synchronize_sched() with synchronize_rcu()

Message ID 20181111194410.6368-28-paulmck@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series None | expand

Commit Message

Paul E. McKenney Nov. 11, 2018, 7:43 p.m. UTC
Now that synchronize_rcu() waits for preempt-disable regions of code
as well as RCU read-side critical sections, synchronize_sched() can be
replaced by synchronize_rcu().  This commit therefore makes this change.

Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: <linux-mm@kvack.org>
---
 mm/slab.c        | 4 ++--
 mm/slab_common.c | 6 +++---
 2 files changed, 5 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/mm/slab.c b/mm/slab.c
index 2a5654bb3b3f..3abb9feb3818 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -962,10 +962,10 @@  static int setup_kmem_cache_node(struct kmem_cache *cachep,
 	 * To protect lockless access to n->shared during irq disabled context.
 	 * If n->shared isn't NULL in irq disabled context, accessing to it is
 	 * guaranteed to be valid until irq is re-enabled, because it will be
-	 * freed after synchronize_sched().
+	 * freed after synchronize_rcu().
 	 */
 	if (old_shared && force_change)
-		synchronize_sched();
+		synchronize_rcu();
 
 fail:
 	kfree(old_shared);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 7eb8dc136c1c..9c11e8a937d2 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -724,7 +724,7 @@  void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
 	css_get(&s->memcg_params.memcg->css);
 
 	s->memcg_params.deact_fn = deact_fn;
-	call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
+	call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
 }
 
 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
@@ -839,11 +839,11 @@  static void flush_memcg_workqueue(struct kmem_cache *s)
 	mutex_unlock(&slab_mutex);
 
 	/*
-	 * SLUB deactivates the kmem_caches through call_rcu_sched. Make
+	 * SLUB deactivates the kmem_caches through call_rcu. Make
 	 * sure all registered rcu callbacks have been invoked.
 	 */
 	if (IS_ENABLED(CONFIG_SLUB))
-		rcu_barrier_sched();
+		rcu_barrier();
 
 	/*
 	 * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB