From patchwork Thu May 30 13:45:42 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Frederic Weisbecker X-Patchwork-Id: 13680433 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 5C8FC17D89C; Thu, 30 May 2024 13:46:04 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076764; cv=none; b=bT2UwxTI63fLef9KXcPaRW/e34ZykJ6ZZyyGaQ2ApFCh801N/0L3mUNEvJ/bQhv9ayq9u9pNLHiD/woqakaI+0pT8T+Fmkuo0xqrHu+I5EyPb3XBeET9i1juIsbFKmTVfS1KZQu1ewd0bNxLETHUC0J2hRAE+XVrDeMONUt7ZDU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076764; c=relaxed/simple; bh=QUqTf47U5266+G1RAYGGur6TId4oP4I8Z4uT9fa4U8Y=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=lmxe9od22Nyv0fvXneEypbDNNBIucseEsUUnlflS5LEfupu2s+uxj0EhFRUVc3wQwFlzOImANQFjIwbCLG0F67F1vZR7MP0XMvBbRGEMhKTpyuz50EkK75ahmMcXf6RQN/d498WjqLhvsigf56PUGW1EwXh47HADeMGTW/SBltU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=RV6lIgdq; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="RV6lIgdq" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 5A1D6C32786; Thu, 30 May 2024 13:46:01 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1717076763; bh=QUqTf47U5266+G1RAYGGur6TId4oP4I8Z4uT9fa4U8Y=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=RV6lIgdqok+I3N32iQOTjFIewW7ShzbN5c8IEDw/xUl3IqeA7neBXsufFpkIleUht PA2oWdhohOGwViCjISPag8qAV5T+9yug+yumsdqh5Q8RRt2/FoBwllY7ovT14dObSy llfrTtfuncXmPPJp7jtGzVb+TuCcHg9T9z5XO08whx281ciU/Ia3c57Y9xyUHp1LRy 3ol14uzlG1ST66rKawwHQiRNtHVMzls9Iufrhwc/0tdqJFnlGp7/LvUoB9EIQiRVA8 CUtu95MQzYqDTCwwWk3Y3KonzRbvi5MubD6rLwyMTFszdIu6Dzq2FN/RekwOL9kPIE cniS3ZZGrABUg== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Boqun Feng , Joel Fernandes , Neeraj Upadhyay , "Paul E . McKenney" , Uladzislau Rezki , Zqiang , rcu Subject: [PATCH 01/11] rcu/nocb: Introduce RCU_NOCB_LOCKDEP_WARN() Date: Thu, 30 May 2024 15:45:42 +0200 Message-ID: <20240530134552.5467-2-frederic@kernel.org> X-Mailer: git-send-email 2.45.1 In-Reply-To: <20240530134552.5467-1-frederic@kernel.org> References: <20240530134552.5467-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: rcu@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Checking for races against concurrent (de-)offloading implies the creation of !CONFIG_RCU_NOCB_CPU stubs to check if each relevant lock is held. For now this only implies the nocb_lock but more are to be expected. Create instead a NOCB specific version of RCU_LOCKDEP_WARN() to avoid the proliferation of stubs. Signed-off-by: Frederic Weisbecker --- include/linux/rcupdate.h | 7 +++++++ kernel/rcu/tree_nocb.h | 14 -------------- kernel/rcu/tree_plugin.h | 4 ++-- 3 files changed, 9 insertions(+), 16 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index be450a3477be..9161e00a0cf4 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -145,11 +145,18 @@ void rcu_init_nohz(void); int rcu_nocb_cpu_offload(int cpu); int rcu_nocb_cpu_deoffload(int cpu); void rcu_nocb_flush_deferred_wakeup(void); + +#define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s) + #else /* #ifdef CONFIG_RCU_NOCB_CPU */ + static inline void rcu_init_nohz(void) { } static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; } static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } static inline void rcu_nocb_flush_deferred_wakeup(void) { } + +#define RCU_NOCB_LOCKDEP_WARN(c, s) + #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /* diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 3ce30841119a..f4112fc663a7 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -16,10 +16,6 @@ #ifdef CONFIG_RCU_NOCB_CPU static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ -static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp) -{ - return lockdep_is_held(&rdp->nocb_lock); -} static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp) { @@ -1653,16 +1649,6 @@ static void show_rcu_nocb_state(struct rcu_data *rdp) #else /* #ifdef CONFIG_RCU_NOCB_CPU */ -static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp) -{ - return 0; -} - -static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp) -{ - return false; -} - /* No ->nocb_lock to acquire. */ static void rcu_nocb_lock(struct rcu_data *rdp) { diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 51125f4130fd..0d6b152a9a17 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -24,10 +24,10 @@ static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) * timers have their own means of synchronization against the * offloaded state updaters. */ - RCU_LOCKDEP_WARN( + RCU_NOCB_LOCKDEP_WARN( !(lockdep_is_held(&rcu_state.barrier_mutex) || (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) || - rcu_lockdep_is_held_nocb(rdp) || + lockdep_is_held(&rdp->nocb_lock) || (!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) && rdp == this_cpu_ptr(&rcu_data)) || rcu_current_is_nocb_kthread(rdp)), From patchwork Thu May 30 13:45:43 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Frederic Weisbecker X-Patchwork-Id: 13680434 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 79E2E17D8B6; Thu, 30 May 2024 13:46:07 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076767; cv=none; b=nqc57JtR2u4ONpIeA8Hf/I2iNRI/Q/9RSHAXWmMgFrzsrqpq07UTQjxjlfwzuqIqOKNSx2HwWb0kSI73Jwcpzukjb0QPvlkAxZNQbOyipPKjrpAfwjjJNkyfwfmcQKNJvzbAGAccd92uYSetULeOKDUm3ezsY20LSKaUTXGO6iY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076767; c=relaxed/simple; bh=NawS5FIK/UBSIsMh81EVPQx6KaknSeZy9r3xaRXEbCo=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=iFV9OTI1iZ9Z35vjXwhLhhwUgXHYNB8A8Z9JnfYDmZhJRX2uA636hROAaFX0YfLPCUjBM0Uom9OOoYqKm4xVSzj3E3qDovls4CZRm4FCA7cv+EkNagaV4Jb5JrQ8OORjjPANpBTs+ebl8zBTFukYYdNwo2eLUvKrjBLB3oLgvnw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=eCNCG1RB; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="eCNCG1RB" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 76B24C4AF08; Thu, 30 May 2024 13:46:04 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1717076767; bh=NawS5FIK/UBSIsMh81EVPQx6KaknSeZy9r3xaRXEbCo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=eCNCG1RBRB339C6P4DXdUifWTwWOnIUkuqzxWs9vdt+MDCR1j/8yudpyF41aY11DV AveE2C/j3qJngHmNXlQRlyfee4Kr0Qh4HVs+yv9+xjVMwe4R5CBCHXb3/2Dd4UEQuv jBRzg5Ee2JIWLcCLHFpRnVe/1VDXNBOIgGCXcrkI3W3Wi2m1NC/CO+KqZU7m6EIlvM OAIh95d02qE950OgAJH1hN0ukumH6h6aM4sBGlmiACOD5UvuSbLIUKvdiSDkj+a6g9 3m/DiCotRFusug8UggvQNDsThJdZLe18+AZUJEE/cunjc4Se5wF3WtsC+tDExdQ1Gz lZ5vV+ie4aQ/w== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Boqun Feng , Joel Fernandes , Neeraj Upadhyay , "Paul E . McKenney" , Uladzislau Rezki , Zqiang , rcu Subject: [PATCH 02/11] rcu/nocb: Move nocb field at the end of state struct Date: Thu, 30 May 2024 15:45:43 +0200 Message-ID: <20240530134552.5467-3-frederic@kernel.org> X-Mailer: git-send-email 2.45.1 In-Reply-To: <20240530134552.5467-1-frederic@kernel.org> References: <20240530134552.5467-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: rcu@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 nocb_is_setup is a rarely used field, mostly on boot and CPU hotplug. It shouldn't occupy the middle of the rcu state hot fields cacheline. Move it to the end and build it conditionally while at it. More cold NOCB fields are to come. Signed-off-by: Frederic Weisbecker --- kernel/rcu/tree.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index fcf2b4aa3441..a297dc89a09c 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -411,7 +411,6 @@ struct rcu_state { arch_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp; /* Synchronize offline with */ /* GP pre-initialization. */ - int nocb_is_setup; /* nocb is setup from boot */ /* synchronize_rcu() part. */ struct llist_head srs_next; /* request a GP users. */ @@ -420,6 +419,10 @@ struct rcu_state { struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX]; struct work_struct srs_cleanup_work; atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */ + +#ifdef CONFIG_RCU_NOCB_CPU + int nocb_is_setup; /* nocb is setup from boot */ +#endif }; /* Values for rcu_state structure's gp_flags field. */ From patchwork Thu May 30 13:45:44 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Frederic Weisbecker X-Patchwork-Id: 13680435 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4A32917C9ED; Thu, 30 May 2024 13:46:10 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076771; cv=none; b=MNrDAzJXAMNA+TdN+wEBqsxx6hkpir1463v9XebCNev7+8jMXsjEhJIdl9IUEAAJH28bgYdzwh79mzKoiSouiPejSsdtWidzAHABmqZ8xSL8JquRhkq+R0wZRfL2MKR/EIlbowRlM9G8t0plPgrg7ZC954O5yTtjgpCZt4n6nzs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076771; c=relaxed/simple; bh=zOihjtXXxawP2UKkc/EPxOuxspC8wQgEULmFJLX+x9Q=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=tUOpRpat1w8JzcUpMwoFkEIVTfvhqpw/Edptm+/Fhu77GpZFofZxnhJcztdxhJPin21gb3ifl9fFYbEoPMhbcnNIJGUuuWIJnltbVLwmqHIFtTVYXTdrUojV6egpoZaKx4mAvYXMjrWJr0P1tWCwJkeUBywK0/seLCVYKmS4cF8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=rWSHQ5F4; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="rWSHQ5F4" Received: by smtp.kernel.org (Postfix) with ESMTPSA id BF6FCC2BBFC; Thu, 30 May 2024 13:46:07 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1717076770; bh=zOihjtXXxawP2UKkc/EPxOuxspC8wQgEULmFJLX+x9Q=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=rWSHQ5F4gDw7m3IWLVu+Ir5QmaJpVria+IBeezj0sr/40YLt+8jZS6qdKX6aMILQf Jk0mQAYRh7VUDdK7pU0I9SIf1zabnHrCMFBqYR1uoLkCg8KyC+mo4lppB+E5dGXM/z 6X4mE7KuJ8SntHSsY02X0GRudGz9++e0Jvm80XurXZ5Tg5hIrmHaX5tJqT+qEy/e0R YdGrVkkWCXsypJpzhKuQNLmJaYl1vVeYw69BlP6gtC6T18eu5bDtx3ouWt52ZMMmpL RA48cO4UjVYTbn2flMmd6HOq44Lw2vxvEzaMz8NCs6CCHgNDe4iupvO6tTkpQXZ2Ws I98r9m6CPZHCA== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Boqun Feng , Joel Fernandes , Neeraj Upadhyay , "Paul E . McKenney" , Uladzislau Rezki , Zqiang , rcu Subject: [PATCH 03/11] rcu/nocb: Assert no callbacks while nocb kthread allocation fails Date: Thu, 30 May 2024 15:45:44 +0200 Message-ID: <20240530134552.5467-4-frederic@kernel.org> X-Mailer: git-send-email 2.45.1 In-Reply-To: <20240530134552.5467-1-frederic@kernel.org> References: <20240530134552.5467-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: rcu@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 When a NOCB CPU fails to create a nocb kthread on bringup, the CPU is then deoffloaded. The barrier mutex is locked at this stage. It is typically used to protect against concurrent (de-)offloading and/or concurrent rcu_barrier() that would otherwise risk a nocb locking imbalance. However: * rcu_barrier() can't run concurrently if it's the boot CPU on early boot-up. * rcu_barrier() can run concurrently if it's a secondary CPU but it is expected to see 0 callbacks on this target because it's the first time it boots. * (de-)offloading can't happen concurrently with smp_init(), as rcutorture is initialized later, at least not before device_initcall(), and userspace isn't available yet. * (de-)offloading can't happen concurrently with cpu_up(), courtesy of cpu_hotplug_lock. But: * The lazy shrinker might run concurrently with cpu_up(). It shouldn't try to grab the nocb_lock and risk an imbalance due to lazy_len supposed to be 0 but be extra cautious. * Also be cautious against resume from hibernation potential subtleties. So keep the locking and add some assertions and comments. Signed-off-by: Frederic Weisbecker --- kernel/rcu/tree_nocb.h | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index f4112fc663a7..fdd0616f2fd1 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1442,7 +1442,7 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu) "rcuog/%d", rdp_gp->cpu); if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) { mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex); - goto end; + goto err; } WRITE_ONCE(rdp_gp->nocb_gp_kthread, t); if (kthread_prio) @@ -1454,7 +1454,7 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu) t = kthread_create(rcu_nocb_cb_kthread, rdp, "rcuo%c/%d", rcu_state.abbr, cpu); if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__)) - goto end; + goto err; if (rcu_rdp_is_offloaded(rdp)) wake_up_process(t); @@ -1467,7 +1467,15 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu) WRITE_ONCE(rdp->nocb_cb_kthread, t); WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); return; -end: + +err: + /* + * No need to protect against concurrent rcu_barrier() + * because the number of callbacks should be 0 for a non-boot CPU, + * therefore rcu_barrier() shouldn't even try to grab the nocb_lock. + * But hold barrier_mutex to avoid nocb_lock imbalance from shrinker. + */ + WARN_ON_ONCE(system_state > SYSTEM_BOOTING && rcu_segcblist_n_cbs(&rdp->cblist)); mutex_lock(&rcu_state.barrier_mutex); if (rcu_rdp_is_offloaded(rdp)) { rcu_nocb_rdp_deoffload(rdp); From patchwork Thu May 30 13:45:45 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Frederic Weisbecker X-Patchwork-Id: 13680436 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7DF93183097; Thu, 30 May 2024 13:46:14 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076774; cv=none; b=g82u042oxetI1/C0sa9Rzaz/EzSRgg+YHdzJRBzTTZmo+T5Z8+1QxjXHg7+6GQxn2kyH35f8rxj6H6vxxJD6R+HqjccWHDaoEpqrZJzqEg8tnQTobLzZFtweZAlByz7JTKn2QeBTkKjqLHpgTwSfLyRzi0TuWpZduRvv+0xMOkI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076774; c=relaxed/simple; bh=1x1ZVvoyd/scItjNHmQqJ4F98y6ofZVQkFFX62fDv8k=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=N2ZeVxX873U+IK+elYNvPuXB1A7s8S0zvnPR6xtR9MG0FaouX5nJxZh2kVuTdJidI9XknFi6FOrv9JJBBZt/QSUXZpKQmQZvp7CE0ANnNkenmnIZsKdAkXPUCDBzPsVnvtBUS79xmryBJ9All3Z82tHm/S7tEU6cqgtv53hXs28= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=M+Dp4oti; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="M+Dp4oti" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 6553DC3277B; Thu, 30 May 2024 13:46:11 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1717076774; bh=1x1ZVvoyd/scItjNHmQqJ4F98y6ofZVQkFFX62fDv8k=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=M+Dp4otiz13s3D9mmrv1NMF6JR15JpCaztJ+OxGyanZkeONy/xGoQLCC8EfW4Hk90 /Ockty4XmudJ5Lz5BDN70tYiaND2BUTaxbHwkCbfOfFf5W1JssvLMAw+7ni0YxnoAZ aMnUlpNNVPt1ksuEfWLecVJ8qTqcEi5zLx6PkS83cwxWdOSkwThZ0sr5AEZYLmhYQz btHODWYs/YIpqRPsP/lRtc6HJNjDCTU8z93oTmsyf6vMLumJU23sfRBZjar11ctiQC /EBdp5ZtFW3i8iDdaRf3FdaDgpQSrRs58qxUEonEMuiIxA0sSLM04jBNGjjT37GmoW ZbYhIRe8FAQBg== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Boqun Feng , Joel Fernandes , Neeraj Upadhyay , "Paul E . McKenney" , Uladzislau Rezki , Zqiang , rcu Subject: [PATCH 04/11] rcu/nocb: Introduce nocb mutex Date: Thu, 30 May 2024 15:45:45 +0200 Message-ID: <20240530134552.5467-5-frederic@kernel.org> X-Mailer: git-send-email 2.45.1 In-Reply-To: <20240530134552.5467-1-frederic@kernel.org> References: <20240530134552.5467-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: rcu@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 The barrier_mutex is used currently to protect (de-)offloading operations and prevent from nocb_lock locking imbalance in rcu_barrier() and shrinker, and also from misordered RCU barrier invocation. Now since RCU (de-)offloading is going to happen on offline CPUs, an RCU barrier will have to be executed while transitionning from offloaded to de-offloaded state. And this can't happen while holding the barrier_mutex. Introduce a NOCB mutex to protect (de-)offloading transitions. The barrier_mutex is still held for now when necessary to avoid barrier callbacks reordering and nocb_lock imbalance. Signed-off-by: Frederic Weisbecker --- kernel/rcu/tree.c | 3 +++ kernel/rcu/tree.h | 1 + kernel/rcu/tree_nocb.h | 20 ++++++++++++-------- kernel/rcu/tree_plugin.h | 1 + 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4cbc4e78a8c5..e904c187c281 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -97,6 +97,9 @@ static struct rcu_state rcu_state = { .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work, rcu_sr_normal_gp_cleanup_work), .srs_cleanups_pending = ATOMIC_INIT(0), +#ifdef CONFIG_RCU_NOCB_CPU + .nocb_mutex = __MUTEX_INITIALIZER(rcu_state.nocb_mutex), +#endif }; /* Dump rcu_node combining tree at boot to verify correct setup. */ diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index a297dc89a09c..16e6fe63d93c 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -421,6 +421,7 @@ struct rcu_state { atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */ #ifdef CONFIG_RCU_NOCB_CPU + struct mutex nocb_mutex; /* Guards (de-)offloading */ int nocb_is_setup; /* nocb is setup from boot */ #endif }; diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index fdd0616f2fd1..16bcb8b13a5e 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1141,6 +1141,7 @@ int rcu_nocb_cpu_deoffload(int cpu) int ret = 0; cpus_read_lock(); + mutex_lock(&rcu_state.nocb_mutex); mutex_lock(&rcu_state.barrier_mutex); if (rcu_rdp_is_offloaded(rdp)) { if (cpu_online(cpu)) { @@ -1153,6 +1154,7 @@ int rcu_nocb_cpu_deoffload(int cpu) } } mutex_unlock(&rcu_state.barrier_mutex); + mutex_unlock(&rcu_state.nocb_mutex); cpus_read_unlock(); return ret; @@ -1228,6 +1230,7 @@ int rcu_nocb_cpu_offload(int cpu) int ret = 0; cpus_read_lock(); + mutex_lock(&rcu_state.nocb_mutex); mutex_lock(&rcu_state.barrier_mutex); if (!rcu_rdp_is_offloaded(rdp)) { if (cpu_online(cpu)) { @@ -1240,6 +1243,7 @@ int rcu_nocb_cpu_offload(int cpu) } } mutex_unlock(&rcu_state.barrier_mutex); + mutex_unlock(&rcu_state.nocb_mutex); cpus_read_unlock(); return ret; @@ -1257,7 +1261,7 @@ lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) return 0; /* Protect rcu_nocb_mask against concurrent (de-)offloading. */ - if (!mutex_trylock(&rcu_state.barrier_mutex)) + if (!mutex_trylock(&rcu_state.nocb_mutex)) return 0; /* Snapshot count of all CPUs */ @@ -1267,7 +1271,7 @@ lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) count += READ_ONCE(rdp->lazy_len); } - mutex_unlock(&rcu_state.barrier_mutex); + mutex_unlock(&rcu_state.nocb_mutex); return count ? count : SHRINK_EMPTY; } @@ -1285,9 +1289,9 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) * Protect against concurrent (de-)offloading. Otherwise nocb locking * may be ignored or imbalanced. */ - if (!mutex_trylock(&rcu_state.barrier_mutex)) { + if (!mutex_trylock(&rcu_state.nocb_mutex)) { /* - * But really don't insist if barrier_mutex is contended since we + * But really don't insist if nocb_mutex is contended since we * can't guarantee that it will never engage in a dependency * chain involving memory allocation. The lock is seldom contended * anyway. @@ -1326,7 +1330,7 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) break; } - mutex_unlock(&rcu_state.barrier_mutex); + mutex_unlock(&rcu_state.nocb_mutex); return count ? count : SHRINK_STOP; } @@ -1473,15 +1477,15 @@ static void rcu_spawn_cpu_nocb_kthread(int cpu) * No need to protect against concurrent rcu_barrier() * because the number of callbacks should be 0 for a non-boot CPU, * therefore rcu_barrier() shouldn't even try to grab the nocb_lock. - * But hold barrier_mutex to avoid nocb_lock imbalance from shrinker. + * But hold nocb_mutex to avoid nocb_lock imbalance from shrinker. */ WARN_ON_ONCE(system_state > SYSTEM_BOOTING && rcu_segcblist_n_cbs(&rdp->cblist)); - mutex_lock(&rcu_state.barrier_mutex); + mutex_lock(&rcu_state.nocb_mutex); if (rcu_rdp_is_offloaded(rdp)) { rcu_nocb_rdp_deoffload(rdp); cpumask_clear_cpu(cpu, rcu_nocb_mask); } - mutex_unlock(&rcu_state.barrier_mutex); + mutex_unlock(&rcu_state.nocb_mutex); } /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 0d6b152a9a17..05239042a08b 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -28,6 +28,7 @@ static bool rcu_rdp_is_offloaded(struct rcu_data *rdp) !(lockdep_is_held(&rcu_state.barrier_mutex) || (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) || lockdep_is_held(&rdp->nocb_lock) || + lockdep_is_held(&rcu_state.nocb_mutex) || (!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) && rdp == this_cpu_ptr(&rcu_data)) || rcu_current_is_nocb_kthread(rdp)), From patchwork Thu May 30 13:45:46 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Frederic Weisbecker X-Patchwork-Id: 13680437 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 931E33DAC03; Thu, 30 May 2024 13:46:17 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076777; cv=none; b=GQUtwE+oqF+RYyB7j4MIkPJgRqEI1s0yOW0XOYoR7fpSYkeDFyC4D0PbG0iYe5ffs56tKVePAbPUhCKyughnzLloUqmKHPCc7hM3vaPIK9zkL76ig9NlaFKt9cz2c72Ip5A3UnndCbI36UFJwI0Si7CCWyFFkgjIT/E5I2pS9dk= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076777; c=relaxed/simple; bh=v/OAf2kjzQwHjqDLPSZ5h+nrrmz5IBPql2zqPPm7U4s=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=g4FZc+rpKVMZMeVwa9L+p4S5U0pEtDMM04iBO7sxXeYHnZLPCkiYx0l7daigWzdmpwWi7tH1c/jDxiOKlGowYJJHpmaeH2+l1H0AcTPp56YhFuFPh1NF+XBvFI1b4XUuViKZDlmoW8JgXmX2U1z6PNk5I1cCyfTQiamx6qfB0kI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=CF8Svygs; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="CF8Svygs" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 9DF68C2BBFC; Thu, 30 May 2024 13:46:14 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1717076777; bh=v/OAf2kjzQwHjqDLPSZ5h+nrrmz5IBPql2zqPPm7U4s=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=CF8SvygsyanYWKOiJQZrXr3ZFIfbRa5qZ8xDhQAeDCwNBcSylvE+IdDMdvzWWpXio 1zMFZcTWh1Um/rKCnRtLAHj9sHsJslzh3PXuFB3srZIw5+I1MZOSajVxHlwJFALTyj BB5Fd0IqYKKXtH0pYiEdtE+xvQdpNL6jeDFPKa4HzDM0NMkQULuXBsk3ROEHkzFddk Y0NKgZGYdgi6b3PxDvBBH8KNm76cAQctk+W1d8pfj4heLm9ZLx6RQ+nIP5PPV6qwzj ZqVXMIkcG7bbuLZ3YLXl8/FQG7TY8sUaUYj4IIfcVMihAo9avw+CdSSW1bukBBB3LY JhFSty8s7sKTQ== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Boqun Feng , Joel Fernandes , Neeraj Upadhyay , "Paul E . McKenney" , Uladzislau Rezki , Zqiang , rcu Subject: [PATCH 05/11] rcu/nocb: (De-)offload callbacks on offline CPUs only Date: Thu, 30 May 2024 15:45:46 +0200 Message-ID: <20240530134552.5467-6-frederic@kernel.org> X-Mailer: git-send-email 2.45.1 In-Reply-To: <20240530134552.5467-1-frederic@kernel.org> References: <20240530134552.5467-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: rcu@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Currently callbacks can be (de-)offloaded only on online CPUs. This involves an overly elaborated state machine in order to make sure that callbacks are always handled during the process while ensuring synchronization between rcu_core and NOCB kthreads. The only potential user of NOCB (de-)offloading appears to be a nohz_full toggling interface through cpusets. And the general agreement is now to work toward toggling the nohz_full state on offline CPUs to simplify the whole picture. Therefore, convert the (de-)offloading to only support offline CPUs. This involves the following changes: * Call rcu_barrier() before deoffloading. An offline offloaded CPU may still carry callbacks in its queue ignored by rcutree_migrate_callbacks(). Those callbacks must all be flushed before switching to a regular queue because no more kthreads will handle those before the CPU ever gets re-onlined. This means that further calls to rcu_barrier() will find an empty queue until the CPU goes through rcutree_report_cpu_starting(). As a result it is guaranteed that further rcu_barrier() won't try to lock the nocb_lock for that target and thus won't risk an imbalance. Therefore barrier_mutex doesn't need to be locked anymore upon deoffloading. * Assume the queue is empty before offloading, as rcutree_migrate_callbacks() took care of everything. This means that further calls to rcu_barrier() will find an empty queue until the CPU goes through rcutree_report_cpu_starting(). As a result it is guaranteed that further rcu_barrier() won't risk a nocb_lock imbalance. Therefore barrier_mutex doesn't need to be locked anymore upon offloading. * No need to flush bypass anymore. Further simplifications will follow in upcoming patches. Signed-off-by: Frederic Weisbecker --- kernel/rcu/tree_nocb.h | 82 +++++++++++------------------------------- 1 file changed, 21 insertions(+), 61 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 16bcb8b13a5e..8e766396df3a 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1049,43 +1049,26 @@ static int rdp_offload_toggle(struct rcu_data *rdp, return wake_gp; } -static long rcu_nocb_rdp_deoffload(void *arg) +static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp) { - struct rcu_data *rdp = arg; struct rcu_segcblist *cblist = &rdp->cblist; unsigned long flags; int wake_gp; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; - /* - * rcu_nocb_rdp_deoffload() may be called directly if - * rcuog/o[p] spawn failed, because at this time the rdp->cpu - * is not online yet. - */ - WARN_ON_ONCE((rdp->cpu != raw_smp_processor_id()) && cpu_online(rdp->cpu)); + /* CPU must be offline, unless it's early boot */ + WARN_ON_ONCE(cpu_online(rdp->cpu) && rdp->cpu != raw_smp_processor_id()); pr_info("De-offloading %d\n", rdp->cpu); + /* Flush all callbacks from segcblist and bypass */ + rcu_barrier(); + rcu_nocb_lock_irqsave(rdp, flags); - /* - * Flush once and for all now. This suffices because we are - * running on the target CPU holding ->nocb_lock (thus having - * interrupts disabled), and because rdp_offload_toggle() - * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED. - * Thus future calls to rcu_segcblist_completely_offloaded() will - * return false, which means that future calls to rcu_nocb_try_bypass() - * will refuse to put anything into the bypass. - */ - WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false)); - /* - * Start with invoking rcu_core() early. This way if the current thread - * happens to preempt an ongoing call to rcu_core() in the middle, - * leaving some work dismissed because rcu_core() still thinks the rdp is - * completely offloaded, we are guaranteed a nearby future instance of - * rcu_core() to catch up. - */ + WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); + WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist)); + rcu_segcblist_set_flags(cblist, SEGCBLIST_RCU_CORE); - invoke_rcu_core(); wake_gp = rdp_offload_toggle(rdp, false, flags); mutex_lock(&rdp_gp->nocb_gp_kthread_mutex); @@ -1128,10 +1111,6 @@ static long rcu_nocb_rdp_deoffload(void *arg) */ raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); - /* Sanity check */ - WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); - - return 0; } @@ -1142,18 +1121,16 @@ int rcu_nocb_cpu_deoffload(int cpu) cpus_read_lock(); mutex_lock(&rcu_state.nocb_mutex); - mutex_lock(&rcu_state.barrier_mutex); if (rcu_rdp_is_offloaded(rdp)) { - if (cpu_online(cpu)) { - ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp); + if (!cpu_online(cpu)) { + ret = rcu_nocb_rdp_deoffload(rdp); if (!ret) cpumask_clear_cpu(cpu, rcu_nocb_mask); } else { - pr_info("NOCB: Cannot CB-deoffload offline CPU %d\n", rdp->cpu); + pr_info("NOCB: Cannot CB-deoffload online CPU %d\n", rdp->cpu); ret = -EINVAL; } } - mutex_unlock(&rcu_state.barrier_mutex); mutex_unlock(&rcu_state.nocb_mutex); cpus_read_unlock(); @@ -1161,15 +1138,14 @@ int rcu_nocb_cpu_deoffload(int cpu) } EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload); -static long rcu_nocb_rdp_offload(void *arg) +static int rcu_nocb_rdp_offload(struct rcu_data *rdp) { - struct rcu_data *rdp = arg; struct rcu_segcblist *cblist = &rdp->cblist; unsigned long flags; int wake_gp; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; - WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id()); + WARN_ON_ONCE(cpu_online(rdp->cpu)); /* * For now we only support re-offload, ie: the rdp must have been * offloaded on boot first. @@ -1182,28 +1158,15 @@ static long rcu_nocb_rdp_offload(void *arg) pr_info("Offloading %d\n", rdp->cpu); + WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); + WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist)); + /* * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING * is set. */ raw_spin_lock_irqsave(&rdp->nocb_lock, flags); - /* - * We didn't take the nocb lock while working on the - * rdp->cblist with SEGCBLIST_LOCKING cleared (pure softirq/rcuc mode). - * Every modifications that have been done previously on - * rdp->cblist must be visible remotely by the nocb kthreads - * upon wake up after reading the cblist flags. - * - * The layout against nocb_lock enforces that ordering: - * - * __rcu_nocb_rdp_offload() nocb_cb_wait()/nocb_gp_wait() - * ------------------------- ---------------------------- - * WRITE callbacks rcu_nocb_lock() - * rcu_nocb_lock() READ flags - * WRITE flags READ callbacks - * rcu_nocb_unlock() rcu_nocb_unlock() - */ wake_gp = rdp_offload_toggle(rdp, true, flags); if (wake_gp) wake_up_process(rdp_gp->nocb_gp_kthread); @@ -1214,8 +1177,7 @@ static long rcu_nocb_rdp_offload(void *arg) rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)); /* - * All kthreads are ready to work, we can finally relieve rcu_core() and - * enable nocb bypass. + * All kthreads are ready to work, we can finally enable nocb bypass. */ rcu_nocb_lock_irqsave(rdp, flags); rcu_segcblist_clear_flags(cblist, SEGCBLIST_RCU_CORE); @@ -1231,18 +1193,16 @@ int rcu_nocb_cpu_offload(int cpu) cpus_read_lock(); mutex_lock(&rcu_state.nocb_mutex); - mutex_lock(&rcu_state.barrier_mutex); if (!rcu_rdp_is_offloaded(rdp)) { - if (cpu_online(cpu)) { - ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp); + if (!cpu_online(cpu)) { + ret = rcu_nocb_rdp_offload(rdp); if (!ret) cpumask_set_cpu(cpu, rcu_nocb_mask); } else { - pr_info("NOCB: Cannot CB-offload offline CPU %d\n", rdp->cpu); + pr_info("NOCB: Cannot CB-offload online CPU %d\n", rdp->cpu); ret = -EINVAL; } } - mutex_unlock(&rcu_state.barrier_mutex); mutex_unlock(&rcu_state.nocb_mutex); cpus_read_unlock(); From patchwork Thu May 30 13:45:47 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Frederic Weisbecker X-Patchwork-Id: 13680438 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6B11317622C; Thu, 30 May 2024 13:46:21 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076781; cv=none; b=orGlTqE9y88SWxUSIUzieLfTjNTplmcQBuxOsrGRg06l+Phb3prG1grB6FalgFD5TAvwtaD1WLhO9c02v3v5FBXfu1ixZjph/VPR83dhrK7WUpQtaFUHsV/mraOFKc4UhlSBpU5Ltogtm9mg64x4t9r+VPZ1qGdQj4HFeQbJq7A= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076781; c=relaxed/simple; bh=G7hgw2ekfOoOLYzqZVIcD2ljsQr8OX70amcE7dyLSUg=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=gEvXaX8C7TEYYcm/2Y1Rk0UoApNMYtHQhIvRbf7E5u3lSGeSLM79sLbL6ecMwkD0/hpktiHW643szjh1DpvJW3PnYX/BIQjI4g1zq/ybBKsOTeZnH+KQHi0t28fsQ6hLXYmN5rgD1dslvOsGnWNxegU+JqK3wUV7GnUXTlQ23S0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=n3Tkc9cE; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="n3Tkc9cE" Received: by smtp.kernel.org (Postfix) with ESMTPSA id EE6BBC4AF09; Thu, 30 May 2024 13:46:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1717076780; bh=G7hgw2ekfOoOLYzqZVIcD2ljsQr8OX70amcE7dyLSUg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=n3Tkc9cEw3jhxmznAFGQwQw+0ZMcZRVyAMfCJRBk51b3hyTrGmOxnIfdrqEflI8Mu C+yeAs/FIKtqWtmkhiFik/rS7mKQuSfzU+8DibRhM2IsX+V22Fb8dycjMS2uwbMbFz OpY9xz6MCE5jv6Kdq1GFF2rpPnCmdSMNPtfuX0zWmQNmA8PcfOonyN8oJNPG5mU905 vTiK3mtJ16aoJYkar7FaEwUBw59SzHimY7dTj383F//trdn8wJvtfNaNsF3ibLp8lp mYy6Q3xYD0+JqI5Wem1S3tMEssqmOA3ICU0pEzGpyzQ+ae5+ifjtzodvJIlHDRK80P ajxYr7SPEGwzg== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Boqun Feng , Joel Fernandes , Neeraj Upadhyay , "Paul E . McKenney" , Uladzislau Rezki , Zqiang , rcu Subject: [PATCH 06/11] rcu/nocb: Remove halfway (de-)offloading handling from bypass Date: Thu, 30 May 2024 15:45:47 +0200 Message-ID: <20240530134552.5467-7-frederic@kernel.org> X-Mailer: git-send-email 2.45.1 In-Reply-To: <20240530134552.5467-1-frederic@kernel.org> References: <20240530134552.5467-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: rcu@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Bypass enqueue can't happen anymore in the middle of (de-)offloading since this sort of transition now only applies to offline CPUs. The related safety check can therefore be removed. Signed-off-by: Frederic Weisbecker --- kernel/rcu/tree_nocb.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 8e766396df3a..af44e75eb0cd 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -409,14 +409,6 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, return false; } - // In the process of (de-)offloading: no bypassing, but - // locking. - if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) { - rcu_nocb_lock(rdp); - *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); - return false; /* Not offloaded, no bypassing. */ - } - // Don't use ->nocb_bypass during early boot. if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) { rcu_nocb_lock(rdp); From patchwork Thu May 30 13:45:48 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Frederic Weisbecker X-Patchwork-Id: 13680439 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AEC6D17622C; Thu, 30 May 2024 13:46:24 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076784; cv=none; b=eQV0YANO1pGViPQksBISR5qFjAOhp+Aop6lfIyXBOC02nz5/U2QQYpeCtWppwawiaREUTPuQoEks6Fp++66l2/GHyaRcHdwWlNDkI33lm4citlTnrGLtLq07Eknv0HagLJjfPfWFRaIDj/rqlcQKkLCboLBc+4aTJ8N9Em7+OuA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076784; c=relaxed/simple; bh=vKDdPpz7jOZDNyAElQztvVPztBWXtOQUT0AcGjdM2eY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=a3b76bIeXIqg8Xr/L/6Nuo6RAg10fm7W/KmRaG5XRlsDF0HIc2D4Ka8FYt/XyYn6iknJBT1SXC5kpXmB6IysalMvks1wxUhjkThJN9x8g43HJf5tlcfMT5PArJWN5SqrNI+lhOmT9v1CsVM6WdRP62ORWbhWjZ/sargLJvVMYi8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=GfNVBNeQ; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="GfNVBNeQ" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 6AF37C4AF0B; Thu, 30 May 2024 13:46:21 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1717076784; bh=vKDdPpz7jOZDNyAElQztvVPztBWXtOQUT0AcGjdM2eY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=GfNVBNeQ6tnVwZRHuT2REsRYE0fdHbYNj22cKYEgC12bhc2aTVqeaNIR9suq/y9jN O52ToT8GMFWz+t4OzQBylLmWCgaUlq+fGXpSyhJ3k8n+p1rwdBgAL1JVIHyHhz+CGB IUKLMqM2T+/TWNvv5hlapVDgXtgAtK1qNCay8YSyruX0pfkD7qk91JubzpgXFvTBFx RC4gOBtpveGEpBcwGEiYbjqcFHAArzaD3Mvmj/kJn7AI+Y1D2ovRE30zVX2p/IfKNQ kUm1vHm/+YI2eNRBIeZQEk398w2QzsYAnMgSbA+u9pUE8CbbGdhqcYssTJHOU+Ovi0 iUHDKJukf2a9Q== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Boqun Feng , Joel Fernandes , Neeraj Upadhyay , "Paul E . McKenney" , Uladzislau Rezki , Zqiang , rcu Subject: [PATCH 07/11] rcu/nocb: Remove halfway (de-)offloading handling from rcu_core()'s QS reporting Date: Thu, 30 May 2024 15:45:48 +0200 Message-ID: <20240530134552.5467-8-frederic@kernel.org> X-Mailer: git-send-email 2.45.1 In-Reply-To: <20240530134552.5467-1-frederic@kernel.org> References: <20240530134552.5467-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: rcu@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 RCU core can't be running anymore while in the middle of (de-)offloading since this sort of transition now only applies to offline CPUs. The locked callback acceleration handling during the transition can therefore be removed. Signed-off-by: Frederic Weisbecker --- kernel/rcu/tree.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e904c187c281..c502a38dc5f3 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2381,7 +2381,6 @@ rcu_report_qs_rdp(struct rcu_data *rdp) { unsigned long flags; unsigned long mask; - bool needacc = false; struct rcu_node *rnp; WARN_ON_ONCE(rdp->cpu != smp_processor_id()); @@ -2418,23 +2417,11 @@ rcu_report_qs_rdp(struct rcu_data *rdp) * to return true. So complain, but don't awaken. */ WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp)); - } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) { - /* - * ...but NOCB kthreads may miss or delay callbacks acceleration - * if in the middle of a (de-)offloading process. - */ - needacc = true; } rcu_disable_urgency_upon_qs(rdp); rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags); /* ^^^ Released rnp->lock */ - - if (needacc) { - rcu_nocb_lock_irqsave(rdp, flags); - rcu_accelerate_cbs_unlocked(rnp, rdp); - rcu_nocb_unlock_irqrestore(rdp, flags); - } } } From patchwork Thu May 30 13:45:49 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Frederic Weisbecker X-Patchwork-Id: 13680440 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4334E17622C; Thu, 30 May 2024 13:46:28 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076788; cv=none; b=fpI1rGb2ISJG1ov6yQHKiKe0bZBFUcu/CbS8Yf7mPDx089WBRlWpIznJylu3/DLKKss2xOa2lmfLnb5l5SCHI1NQMbChvsSSmS2ih9xkOZkSH29mfd3gNFcwFwCuqzAm0wNbHgwryqICOpe0OA1gCT/Me7MPoqsAlOLAvXG/5fw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076788; c=relaxed/simple; bh=YrjH2xrWlC5tQJfhshKipi5vOU0pIegfL1JgGkSpqiY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=QSKSEk+p7ojElwihz1XQgs7wj+Nx0kvvsyhEhses+XAKQZqvaH9dRf3JVT7AxOAbJX520SPxJZk1Jx3i9es+MUdcOlIlaI/7WCjhZCxXB4XuU4OVoXVTm8L/rXW3TZUkWpXWp2B9TyQncjYQ65x9vyo7DmkVCJuvsWuXMj9+c/g= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=pvridsxF; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="pvridsxF" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 173DAC32786; Thu, 30 May 2024 13:46:24 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1717076788; bh=YrjH2xrWlC5tQJfhshKipi5vOU0pIegfL1JgGkSpqiY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=pvridsxFGYPGdCvjw4uR9ikDEKtbn6JbSWdwpDSAq4MUevfM09TdB6WsG80RYlm8D JolTqbMQnjEOglV5slSWbVq0I3kJzlf7CTEjBVrRWcyjvmoWShS103G0fkjr5V/yxY Api5ogA1947XtziYfZUIfeQI3JlxmkTTcIHt0Iw5WJMCGxgulq8g7+3IakOCy73Ey+ vpMUIa+LRycoDeHQwFhnMDSpAd5qSMfs7ijEEveEUYUMOLEI8cHbpMVgnApr+Uoikf AbhdGULzRjD5cV/jDFfBYG+BWnJmCXnbNQBRSutubv+ClTUP0j3wZSZpoHkY61ol9a kHJSu1zA67V0A== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Boqun Feng , Joel Fernandes , Neeraj Upadhyay , "Paul E . McKenney" , Uladzislau Rezki , Zqiang , rcu Subject: [PATCH 08/11] rcu/nocb: Remove halfway (de-)offloading handling from rcu_core Date: Thu, 30 May 2024 15:45:49 +0200 Message-ID: <20240530134552.5467-9-frederic@kernel.org> X-Mailer: git-send-email 2.45.1 In-Reply-To: <20240530134552.5467-1-frederic@kernel.org> References: <20240530134552.5467-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: rcu@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 RCU core can't be running anymore while in the middle of (de-)offloading since this sort of transition now only applies to offline CPUs. The locked callback acceleration handling during the transition can therefore be removed, along with concurrent batch execution. Signed-off-by: Frederic Weisbecker --- kernel/rcu/tree.c | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index c502a38dc5f3..4f1863a0536d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2776,24 +2776,6 @@ static __latent_entropy void rcu_core(void) unsigned long flags; struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; - /* - * On RT rcu_core() can be preempted when IRQs aren't disabled. - * Therefore this function can race with concurrent NOCB (de-)offloading - * on this CPU and the below condition must be considered volatile. - * However if we race with: - * - * _ Offloading: In the worst case we accelerate or process callbacks - * concurrently with NOCB kthreads. We are guaranteed to - * call rcu_nocb_lock() if that happens. - * - * _ Deoffloading: In the worst case we miss callbacks acceleration or - * processing. This is fine because the early stage - * of deoffloading invokes rcu_core() after setting - * SEGCBLIST_RCU_CORE. So we guarantee that we'll process - * what could have been dismissed without the need to wait - * for the next rcu_pending() check in the next jiffy. - */ - const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist); if (cpu_is_offline(smp_processor_id())) return; @@ -2813,17 +2795,17 @@ static __latent_entropy void rcu_core(void) /* No grace period and unregistered callbacks? */ if (!rcu_gp_in_progress() && - rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) { - rcu_nocb_lock_irqsave(rdp, flags); + rcu_segcblist_is_enabled(&rdp->cblist) && !rcu_rdp_is_offloaded(rdp)) { + local_irq_save(flags); if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) rcu_accelerate_cbs_unlocked(rnp, rdp); - rcu_nocb_unlock_irqrestore(rdp, flags); + local_irq_restore(flags); } rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); /* If there are callbacks ready, invoke them. */ - if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) && + if (!rcu_rdp_is_offloaded(rdp) && rcu_segcblist_ready_cbs(&rdp->cblist) && likely(READ_ONCE(rcu_scheduler_fully_active))) { rcu_do_batch(rdp); /* Re-invoke RCU core processing if there are callbacks remaining. */ From patchwork Thu May 30 13:45:50 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Frederic Weisbecker X-Patchwork-Id: 13680441 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 012C51856F2; Thu, 30 May 2024 13:46:31 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076792; cv=none; b=g9v3R5Ll8wLSj9xlnMzZq8fIGhPYSR6BkwcLouI57VGcC3EJofKGCOcTpQ2P0CNEum1S2YQjkrwgUycx89mtDjuwO9qPlQTXgc5LsgW8cMqnqalojW05ASJt7pkTK1/axieE+NGVZj+SzCny9+rCDJqnUw9/iIb2r7GxRTEIj0s= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076792; c=relaxed/simple; bh=IYODWsPK3Egb5SjCYr+mLUaF9VNJJZjEGwfR/HQi4pQ=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=i2pcdtqB3TDn1I1QTvHVa3HRrb49HqBMxoSlP+XJ4CAqsFW9/H3TJK6gxrhy+n7RPCXXIjphhzGjT6unORa6fJwM9ugpOU4MOXfVxnxYnmEnWXq3GIOYX16pBlCOJO0k4jWaNeQwwjH8UQpUCPW360cVjGyUXOr/qkCjLbxOsAQ= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=nF3IR2Y4; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="nF3IR2Y4" Received: by smtp.kernel.org (Postfix) with ESMTPSA id D801BC32789; Thu, 30 May 2024 13:46:28 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1717076791; bh=IYODWsPK3Egb5SjCYr+mLUaF9VNJJZjEGwfR/HQi4pQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=nF3IR2Y4ZsVtVOhtrklXrjfW9q+U4wyf1DTBOrh537kQjoAIyZX/BQPIMdv8vOV11 LRBm9Nh/CK2+uBbReS4QVLWtDPgvEere/pa9irp2SCWQ0SLOl2s+Dr8muiW6/6Hg4n 9k/XXQFiZM49jP19yvJm/N3wdNeIr5BJEplMaiijLTpO9iHyqEF2mC+9SUZHh0hDhW PaK/m6UY+2ne58Wlr/7wjb9vdk2jZVnTnzDdclfgdDdRUb0q5jzv5W3LMXJXOTnFh4 IFqgWHf/+/vz+PApa1FytNayieKKRNOhJF50bY3VKAGU+jzFHM1FbFRC0t6kJ6FkhU BYDxTDw08XgVQ== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Boqun Feng , Joel Fernandes , Neeraj Upadhyay , "Paul E . McKenney" , Uladzislau Rezki , Zqiang , rcu Subject: [PATCH 09/11] rcu/nocb: Remove SEGCBLIST_RCU_CORE Date: Thu, 30 May 2024 15:45:50 +0200 Message-ID: <20240530134552.5467-10-frederic@kernel.org> X-Mailer: git-send-email 2.45.1 In-Reply-To: <20240530134552.5467-1-frederic@kernel.org> References: <20240530134552.5467-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: rcu@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 RCU core can't be running anymore while in the middle of (de-)offloading since this sort of transition now only applies to offline CPUs. The SEGCBLIST_RCU_CORE state can therefore be removed. Signed-off-by: Frederic Weisbecker --- include/linux/rcu_segcblist.h | 9 ++++----- kernel/rcu/rcu_segcblist.h | 9 --------- kernel/rcu/tree.c | 3 --- kernel/rcu/tree_nocb.h | 9 --------- 4 files changed, 4 insertions(+), 26 deletions(-) diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index ba95c06675e1..5469c54cd778 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -185,11 +185,10 @@ struct rcu_cblist { * ---------------------------------------------------------------------------- */ #define SEGCBLIST_ENABLED BIT(0) -#define SEGCBLIST_RCU_CORE BIT(1) -#define SEGCBLIST_LOCKING BIT(2) -#define SEGCBLIST_KTHREAD_CB BIT(3) -#define SEGCBLIST_KTHREAD_GP BIT(4) -#define SEGCBLIST_OFFLOADED BIT(5) +#define SEGCBLIST_LOCKING BIT(1) +#define SEGCBLIST_KTHREAD_CB BIT(2) +#define SEGCBLIST_KTHREAD_GP BIT(3) +#define SEGCBLIST_OFFLOADED BIT(4) struct rcu_segcblist { struct rcu_head *head; diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index 4fe877f5f654..7a0962dfee86 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -95,15 +95,6 @@ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) return false; } -static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp) -{ - if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && - !rcu_segcblist_test_flags(rsclp, SEGCBLIST_RCU_CORE)) - return true; - - return false; -} - /* * Are all segments following the specified segment of the specified * rcu_segcblist structure empty of callbacks? (The specified diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4f1863a0536d..8bec3c0c9636 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -79,9 +79,6 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *); static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { .gpwrap = true, -#ifdef CONFIG_RCU_NOCB_CPU - .cblist.flags = SEGCBLIST_RCU_CORE, -#endif }; static struct rcu_state rcu_state = { .level = { &rcu_state.node[0] }, diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index af44e75eb0cd..24daf606de0c 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1060,7 +1060,6 @@ static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp) WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist)); - rcu_segcblist_set_flags(cblist, SEGCBLIST_RCU_CORE); wake_gp = rdp_offload_toggle(rdp, false, flags); mutex_lock(&rdp_gp->nocb_gp_kthread_mutex); @@ -1168,13 +1167,6 @@ static int rcu_nocb_rdp_offload(struct rcu_data *rdp) swait_event_exclusive(rdp->nocb_state_wq, rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)); - /* - * All kthreads are ready to work, we can finally enable nocb bypass. - */ - rcu_nocb_lock_irqsave(rdp, flags); - rcu_segcblist_clear_flags(cblist, SEGCBLIST_RCU_CORE); - rcu_nocb_unlock_irqrestore(rdp, flags); - return 0; } @@ -1350,7 +1342,6 @@ void __init rcu_init_nohz(void) rcu_segcblist_init(&rdp->cblist); rcu_segcblist_offload(&rdp->cblist, true); rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP); - rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE); } rcu_organize_nocb_kthreads(); } From patchwork Thu May 30 13:45:51 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Frederic Weisbecker X-Patchwork-Id: 13680443 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 882391856F2; Thu, 30 May 2024 13:46:35 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076795; cv=none; b=hiWEsF/TfHgjjfxn/EHNiTo19fmhGIqSrs5EQa+LZVgL2rvFX3mreSUXezQUAwyjOh7bnvkU8bi9VpROcd9HjlzXjg0ayVEuWknr4XoIgPdXoQFeU4u72N31ymzUW+OM5Udy1zftkUjMuCEvK/STYZEdIwcJ5H7PM08vUYONFcI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076795; c=relaxed/simple; bh=zWdQY6e/8PSNUP7VQ0D8AgYnYw4k9+f5nEq3LZnmnr4=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=aEWp75NpH1ha+iDGeM1Ce3vC12OjAC1Zpl/FHsNavmWULskqdCsIbgtZKG3Y/62A0qdA+bOdFxla/f4FZc/dYeAslvKPVect+et6yswMuvORsFQ3QXOxZo4JyBxDGtSvmLcw0LiRmmRd2KR5IXfvxQhB3pG15YzHy6WXBXsk1ME= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=l14Shepl; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="l14Shepl" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 0E16FC3277B; Thu, 30 May 2024 13:46:31 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1717076795; bh=zWdQY6e/8PSNUP7VQ0D8AgYnYw4k9+f5nEq3LZnmnr4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=l14SheploPRJfl6S4qmmWBf75MaXZGZBlrapht8sNO4fm+uaCJODrPychuInfje+/ RSbctdwvNuUYxO8mtA33/gUFgomOBXy5TCF6rdJtDUwZK7CX7v32bzwfJzBzGdsKt+ wH54KuTJr+OasmLs/dUx6c2ya1cMrC9g/5HPhrozIib6EcjXHFR7ZtiiDaY1KkYdes HNANXkDRKinbY8eYvUM7Ae+i6+gIBW2AhTI4e3eMVwHZFAEgyiNjG6//ULwwcUAWLs GXh6HQXepy8aZ+yT5I3lkn5qGUKVCrb+wRySPjhhT+462tnXZO4LBJTIgA/namOLnh DDe7cM0tBaCBw== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Boqun Feng , Joel Fernandes , Neeraj Upadhyay , "Paul E . McKenney" , Uladzislau Rezki , Zqiang , rcu Subject: [PATCH 10/11] rcu/nocb: Remove SEGCBLIST_KTHREAD_CB Date: Thu, 30 May 2024 15:45:51 +0200 Message-ID: <20240530134552.5467-11-frederic@kernel.org> X-Mailer: git-send-email 2.45.1 In-Reply-To: <20240530134552.5467-1-frederic@kernel.org> References: <20240530134552.5467-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: rcu@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 This state excerpt from the (de-)offloading state machine was used to implement an ad-hoc kthread parking of rcuo kthreads. This code has been removed and therefore the related state can be erased as well. Signed-off-by: Frederic Weisbecker --- include/linux/rcu_segcblist.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 5469c54cd778..1ef1bb54853d 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -186,9 +186,8 @@ struct rcu_cblist { */ #define SEGCBLIST_ENABLED BIT(0) #define SEGCBLIST_LOCKING BIT(1) -#define SEGCBLIST_KTHREAD_CB BIT(2) -#define SEGCBLIST_KTHREAD_GP BIT(3) -#define SEGCBLIST_OFFLOADED BIT(4) +#define SEGCBLIST_KTHREAD_GP BIT(2) +#define SEGCBLIST_OFFLOADED BIT(3) struct rcu_segcblist { struct rcu_head *head; From patchwork Thu May 30 13:45:52 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Frederic Weisbecker X-Patchwork-Id: 13680444 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 32EB518629C; Thu, 30 May 2024 13:46:38 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076799; cv=none; b=OWjryyreVGAJhyFhvSvOViv+HogRXUUVX4c49syXoLmO1IkQT8FQIx5NIEHwlIUn+a0cUmjE3gMwvHzlxr26Fsjv1XKIb3TrvLiKbbSnBe0ipdpCIK+fjB8KfL2oUWkHBZTOb1G+I6qcqBXhV+eAZOw9oxMgr4NtSbcO4aJ/pCs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717076799; c=relaxed/simple; bh=5Eg4GaLBOxL8naL2rLPqjFnUy4IeU93CYyjutOJ74xo=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=RcyNDkGHeX5ujsKFwCxMnUoYZrUPAmDXhXqbj388xlc5UlmTDvQef6lCrJ/bJL6P45A/nRV4LofsVSjHn+rl2URsYYormMP75hA+8LeD1DW1eRz0DQNU5Iub3AoKGDKWRVxW+zUs5oFhn3LfQWWtDk5CplQiwDcKIlekSWuFo1Y= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=hqO9QMD8; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="hqO9QMD8" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 9D1B4C32786; Thu, 30 May 2024 13:46:35 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1717076798; bh=5Eg4GaLBOxL8naL2rLPqjFnUy4IeU93CYyjutOJ74xo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=hqO9QMD8iUVfaAcQq+oeajOCCmPeaGSEvPzFxDSqVRPsoF9X/kFYrzwwPUKxIjuxi 8UlNw8Z134d0H/rm16Z7HiHzEX7oWCFdxMC2fg5hiREV9WP8Z5EuLm8XMG0p+JrDUA e5t0xAYZa/1l5NeNUzIpu4T40MNihhDSEdRVMOW2cPU5mfDf8YqHDTtGjvukrvGK1j h/Kkv5EY8ezyD6xRXuUPKAf/d2NyKDUc/BJ7XuxUJ9zn9emYE7oWEInUhGkCmfZEyX 8Kmw+Cc69QvVaHaCJsPanWbmiaEw+okd7cTi0Qq0KIg1PxiQ0SUhKad/3PomNZPW15 heGyHIHGTbYqg== From: Frederic Weisbecker To: LKML Cc: Frederic Weisbecker , Boqun Feng , Joel Fernandes , Neeraj Upadhyay , "Paul E . McKenney" , Uladzislau Rezki , Zqiang , rcu Subject: [PATCH 11/11] rcu/nocb: Simplify (de-)offloading state machine Date: Thu, 30 May 2024 15:45:52 +0200 Message-ID: <20240530134552.5467-12-frederic@kernel.org> X-Mailer: git-send-email 2.45.1 In-Reply-To: <20240530134552.5467-1-frederic@kernel.org> References: <20240530134552.5467-1-frederic@kernel.org> Precedence: bulk X-Mailing-List: rcu@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Now that the (de-)offloading process can only apply to offline CPUs, there is no more concurrency between rcu_core and nocb kthreads. Also the mutation now happens on empty queues. Therefore the state machine can be reduced to a single bit called SEGCBLIST_OFFLOADED. Simplify the transition as follows: * Upon offloading: queue the rdp to be added to the rcuog list and wait for the rcuog kthread to set the SEGCBLIST_OFFLOADED bit. Unpark rcuo kthread. * Upon de-offloading: Park rcuo kthread. Queue the rdp to be removed from the rcuog list and wait for the rcuog kthread to clear the SEGCBLIST_OFFLOADED bit. Signed-off-by: Frederic Weisbecker --- include/linux/rcu_segcblist.h | 4 +- kernel/rcu/rcu_segcblist.c | 11 --- kernel/rcu/rcu_segcblist.h | 2 +- kernel/rcu/tree_nocb.h | 129 ++++++++++++++++------------------ 4 files changed, 61 insertions(+), 85 deletions(-) diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 1ef1bb54853d..2fdc2208f1ca 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -185,9 +185,7 @@ struct rcu_cblist { * ---------------------------------------------------------------------------- */ #define SEGCBLIST_ENABLED BIT(0) -#define SEGCBLIST_LOCKING BIT(1) -#define SEGCBLIST_KTHREAD_GP BIT(2) -#define SEGCBLIST_OFFLOADED BIT(3) +#define SEGCBLIST_OFFLOADED BIT(1) struct rcu_segcblist { struct rcu_head *head; diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index 1693ea22ef1b..298a2c573f02 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -260,17 +260,6 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp) rcu_segcblist_clear_flags(rsclp, SEGCBLIST_ENABLED); } -/* - * Mark the specified rcu_segcblist structure as offloaded (or not) - */ -void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload) -{ - if (offload) - rcu_segcblist_set_flags(rsclp, SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED); - else - rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED); -} - /* * Does the specified rcu_segcblist structure contain callbacks that * are ready to be invoked? diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index 7a0962dfee86..259904075636 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -89,7 +89,7 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) { if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && - rcu_segcblist_test_flags(rsclp, SEGCBLIST_LOCKING)) + rcu_segcblist_test_flags(rsclp, SEGCBLIST_OFFLOADED)) return true; return false; diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 24daf606de0c..72a2990d2087 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -604,37 +604,33 @@ static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head, } } -static int nocb_gp_toggle_rdp(struct rcu_data *rdp) +static void nocb_gp_toggle_rdp(struct rcu_data *rdp_gp, struct rcu_data *rdp) { struct rcu_segcblist *cblist = &rdp->cblist; unsigned long flags; - int ret; - rcu_nocb_lock_irqsave(rdp, flags); - if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) && - !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) { + /* + * Locking orders future de-offloaded callbacks enqueue against previous + * handling of this rdp. Ie: Make sure rcuog is done with this rdp before + * deoffloaded callbacks can be enqueued. + */ + raw_spin_lock_irqsave(&rdp->nocb_lock, flags); + if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) { /* * Offloading. Set our flag and notify the offload worker. * We will handle this rdp until it ever gets de-offloaded. */ - rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP); - ret = 1; - } else if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) && - rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) { + list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp); + rcu_segcblist_set_flags(cblist, SEGCBLIST_OFFLOADED); + } else { /* * De-offloading. Clear our flag and notify the de-offload worker. * We will ignore this rdp until it ever gets re-offloaded. */ - rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP); - ret = 0; - } else { - WARN_ON_ONCE(1); - ret = -1; + list_del(&rdp->nocb_entry_rdp); + rcu_segcblist_clear_flags(cblist, SEGCBLIST_OFFLOADED); } - - rcu_nocb_unlock_irqrestore(rdp, flags); - - return ret; + raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); } static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu) @@ -841,14 +837,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) } if (rdp_toggling) { - int ret; - - ret = nocb_gp_toggle_rdp(rdp_toggling); - if (ret == 1) - list_add_tail(&rdp_toggling->nocb_entry_rdp, &my_rdp->nocb_head_rdp); - else if (ret == 0) - list_del(&rdp_toggling->nocb_entry_rdp); - + nocb_gp_toggle_rdp(my_rdp, rdp_toggling); swake_up_one(&rdp_toggling->nocb_state_wq); } @@ -1018,16 +1007,11 @@ void rcu_nocb_flush_deferred_wakeup(void) } EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup); -static int rdp_offload_toggle(struct rcu_data *rdp, - bool offload, unsigned long flags) - __releases(rdp->nocb_lock) +static int rcu_nocb_queue_toggle_rdp(struct rcu_data *rdp) { - struct rcu_segcblist *cblist = &rdp->cblist; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; bool wake_gp = false; - - rcu_segcblist_offload(cblist, offload); - rcu_nocb_unlock_irqrestore(rdp, flags); + unsigned long flags; raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); // Queue this rdp for add/del to/from the list to iterate on rcuog @@ -1041,9 +1025,25 @@ static int rdp_offload_toggle(struct rcu_data *rdp, return wake_gp; } +static bool rcu_nocb_rdp_deoffload_wait_cond(struct rcu_data *rdp) +{ + unsigned long flags; + bool ret; + + /* + * Locking makes sure rcuog is done handling this rdp before deoffloaded + * enqueue can happen. Also it keeps the SEGCBLIST_OFFLOADED flag stable + * while the ->nocb_lock is held. + */ + raw_spin_lock_irqsave(&rdp->nocb_lock, flags); + ret = !rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED); + raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); + + return ret; +} + static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp) { - struct rcu_segcblist *cblist = &rdp->cblist; unsigned long flags; int wake_gp; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; @@ -1059,19 +1059,20 @@ static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp) rcu_nocb_lock_irqsave(rdp, flags); WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist)); + rcu_nocb_unlock_irqrestore(rdp, flags); - wake_gp = rdp_offload_toggle(rdp, false, flags); + wake_gp = rcu_nocb_queue_toggle_rdp(rdp); mutex_lock(&rdp_gp->nocb_gp_kthread_mutex); if (rdp_gp->nocb_gp_kthread) { if (wake_gp) wake_up_process(rdp_gp->nocb_gp_kthread); - swait_event_exclusive(rdp->nocb_state_wq, - !rcu_segcblist_test_flags(cblist, - SEGCBLIST_KTHREAD_GP)); if (rdp->nocb_cb_kthread) kthread_park(rdp->nocb_cb_kthread); + + swait_event_exclusive(rdp->nocb_state_wq, + rcu_nocb_rdp_deoffload_wait_cond(rdp)); } else { /* * No kthread to clear the flags for us or remove the rdp from the nocb list @@ -1079,29 +1080,14 @@ static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp) * but we stick to paranoia in this rare path. */ rcu_nocb_lock_irqsave(rdp, flags); - rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP); - rcu_nocb_unlock_irqrestore(rdp, flags); + rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_OFFLOADED); + raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); list_del(&rdp->nocb_entry_rdp); } + mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex); - /* - * Lock one last time to acquire latest callback updates from kthreads - * so we can later handle callbacks locally without locking. - */ - rcu_nocb_lock_irqsave(rdp, flags); - /* - * Theoretically we could clear SEGCBLIST_LOCKING after the nocb - * lock is released but how about being paranoid for once? - */ - rcu_segcblist_clear_flags(cblist, SEGCBLIST_LOCKING); - /* - * Without SEGCBLIST_LOCKING, we can't use - * rcu_nocb_unlock_irqrestore() anymore. - */ - raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); - return 0; } @@ -1129,10 +1115,20 @@ int rcu_nocb_cpu_deoffload(int cpu) } EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload); +static bool rcu_nocb_rdp_offload_wait_cond(struct rcu_data *rdp) +{ + unsigned long flags; + bool ret; + + raw_spin_lock_irqsave(&rdp->nocb_lock, flags); + ret = rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED); + raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); + + return ret; +} + static int rcu_nocb_rdp_offload(struct rcu_data *rdp) { - struct rcu_segcblist *cblist = &rdp->cblist; - unsigned long flags; int wake_gp; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; @@ -1152,20 +1148,14 @@ static int rcu_nocb_rdp_offload(struct rcu_data *rdp) WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist)); - /* - * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING - * is set. - */ - raw_spin_lock_irqsave(&rdp->nocb_lock, flags); - - wake_gp = rdp_offload_toggle(rdp, true, flags); + wake_gp = rcu_nocb_queue_toggle_rdp(rdp); if (wake_gp) wake_up_process(rdp_gp->nocb_gp_kthread); - kthread_unpark(rdp->nocb_cb_kthread); - swait_event_exclusive(rdp->nocb_state_wq, - rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)); + rcu_nocb_rdp_offload_wait_cond(rdp)); + + kthread_unpark(rdp->nocb_cb_kthread); return 0; } @@ -1340,8 +1330,7 @@ void __init rcu_init_nohz(void) rdp = per_cpu_ptr(&rcu_data, cpu); if (rcu_segcblist_empty(&rdp->cblist)) rcu_segcblist_init(&rdp->cblist); - rcu_segcblist_offload(&rdp->cblist, true); - rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP); + rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_OFFLOADED); } rcu_organize_nocb_kthreads(); }