@@ -38,24 +38,24 @@ EXPORT_SYMBOL_GPL(context_tracking);
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
#define TPS(x) tracepoint_string(x)
-/* Record the current task on dyntick-idle entry. */
-static __always_inline void rcu_dynticks_task_enter(void)
+/* Record the current task on exiting RCU-tasks (dyntick-idle entry). */
+static __always_inline void rcu_task_exit(void)
{
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
}
-/* Record no current task on dyntick-idle exit. */
-static __always_inline void rcu_dynticks_task_exit(void)
+/* Record no current task on entering RCU-tasks (dyntick-idle exit). */
+static __always_inline void rcu_task_enter(void)
{
#if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
}
-/* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
-static __always_inline void rcu_dynticks_task_trace_enter(void)
+/* Turn on heavyweight RCU tasks trace readers on kernel exit. */
+static __always_inline void rcu_task_trace_heavyweight_enter(void)
{
#ifdef CONFIG_TASKS_TRACE_RCU
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
@@ -63,8 +63,8 @@ static __always_inline void rcu_dynticks_task_trace_enter(void)
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
}
-/* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
-static __always_inline void rcu_dynticks_task_trace_exit(void)
+/* Turn off heavyweight RCU tasks trace readers on kernel entry. */
+static __always_inline void rcu_task_trace_heavyweight_exit(void)
{
#ifdef CONFIG_TASKS_TRACE_RCU
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
@@ -87,7 +87,7 @@ static noinstr void ct_kernel_exit_state(int offset)
* critical sections, and we also must force ordering with the
* next idle sojourn.
*/
- rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
+ rcu_task_trace_heavyweight_enter(); // Before CT state update!
seq = ct_state_inc(offset);
// RCU is no longer watching. Better be in extended quiescent state!
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & CT_RCU_WATCHING));
@@ -109,7 +109,7 @@ static noinstr void ct_kernel_enter_state(int offset)
*/
seq = ct_state_inc(offset);
// RCU is now watching. Better not be in an extended quiescent state!
- rcu_dynticks_task_trace_exit(); // After ->dynticks update!
+ rcu_task_trace_heavyweight_exit(); // After CT state update!
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & CT_RCU_WATCHING));
}
@@ -149,7 +149,7 @@ static void noinstr ct_kernel_exit(bool user, int offset)
// RCU is watching here ...
ct_kernel_exit_state(offset);
// ... but is no longer watching here.
- rcu_dynticks_task_enter();
+ rcu_task_exit();
}
/*
@@ -173,7 +173,7 @@ static void noinstr ct_kernel_enter(bool user, int offset)
ct->nesting++;
return;
}
- rcu_dynticks_task_exit();
+ rcu_task_enter();
// RCU is not watching here ...
ct_kernel_enter_state(offset);
// ... but is watching here.
@@ -240,7 +240,7 @@ void noinstr ct_nmi_exit(void)
// ... but is no longer watching here.
if (!in_nmi())
- rcu_dynticks_task_enter();
+ rcu_task_exit();
}
/**
@@ -274,7 +274,7 @@ void noinstr ct_nmi_enter(void)
if (rcu_dynticks_curr_cpu_in_eqs()) {
if (!in_nmi())
- rcu_dynticks_task_exit();
+ rcu_task_enter();
// RCU is not watching here ...
ct_kernel_enter_state(CT_RCU_WATCHING);