@@ -87,7 +87,7 @@ static __always_inline long ct_nesting_cpu(int cpu)
return ct->nesting;
}
-static __always_inline long ct_dynticks_nmi_nesting(void)
+static __always_inline long ct_nmi_nesting(void)
{
return __this_cpu_read(context_tracking.nmi_nesting);
}
@@ -125,7 +125,7 @@ static void noinstr ct_kernel_exit(bool user, int offset)
{
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
- WARN_ON_ONCE(ct_dynticks_nmi_nesting() != DYNTICK_IRQ_NONIDLE);
+ WARN_ON_ONCE(ct_nmi_nesting() != DYNTICK_IRQ_NONIDLE);
WRITE_ONCE(ct->nmi_nesting, 0);
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
ct_nesting() == 0);
@@ -185,7 +185,7 @@ static void noinstr ct_kernel_enter(bool user, int offset)
trace_rcu_dyntick(TPS("End"), ct_nesting(), 1, ct_rcu_watching());
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
WRITE_ONCE(ct->nesting, 1);
- WARN_ON_ONCE(ct_dynticks_nmi_nesting());
+ WARN_ON_ONCE(ct_nmi_nesting());
WRITE_ONCE(ct->nmi_nesting, DYNTICK_IRQ_NONIDLE);
instrumentation_end();
}
@@ -207,28 +207,28 @@ void noinstr ct_nmi_exit(void)
instrumentation_begin();
/*
- * Check for ->nmi_nesting underflow and bad ->dynticks.
+ * Check for ->nmi_nesting underflow and bad CT state.
* (We are exiting an NMI handler, so RCU better be paying attention
* to us!)
*/
- WARN_ON_ONCE(ct_dynticks_nmi_nesting() <= 0);
+ WARN_ON_ONCE(ct_nmi_nesting() <= 0);
WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
/*
* If the nesting level is not 1, the CPU wasn't RCU-idle, so
* leave it in non-RCU-idle state.
*/
- if (ct_dynticks_nmi_nesting() != 1) {
- trace_rcu_dyntick(TPS("--="), ct_dynticks_nmi_nesting(), ct_dynticks_nmi_nesting() - 2,
+ if (ct_nmi_nesting() != 1) {
+ trace_rcu_dyntick(TPS("--="), ct_nmi_nesting(), ct_nmi_nesting() - 2,
ct_rcu_watching());
WRITE_ONCE(ct->nmi_nesting, /* No store tearing. */
- ct_dynticks_nmi_nesting() - 2);
+ ct_nmi_nesting() - 2);
instrumentation_end();
return;
}
/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
- trace_rcu_dyntick(TPS("Startirq"), ct_dynticks_nmi_nesting(), 0, ct_rcu_watching());
+ trace_rcu_dyntick(TPS("Startirq"), ct_nmi_nesting(), 0, ct_rcu_watching());
WRITE_ONCE(ct->nmi_nesting, 0); /* Avoid store tearing. */
// instrumentation for the noinstr ct_kernel_exit_state()
@@ -261,7 +261,7 @@ void noinstr ct_nmi_enter(void)
struct context_tracking *ct = this_cpu_ptr(&context_tracking);
/* Complain about underflow. */
- WARN_ON_ONCE(ct_dynticks_nmi_nesting() < 0);
+ WARN_ON_ONCE(ct_nmi_nesting() < 0);
/*
* If idle from RCU viewpoint, atomically increment CT state
@@ -295,11 +295,11 @@ void noinstr ct_nmi_enter(void)
}
trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
- ct_dynticks_nmi_nesting(),
- ct_dynticks_nmi_nesting() + incby, ct_rcu_watching());
+ ct_nmi_nesting(),
+ ct_nmi_nesting() + incby, ct_rcu_watching());
instrumentation_end();
WRITE_ONCE(ct->nmi_nesting, /* Prevent store tearing. */
- ct_dynticks_nmi_nesting() + incby);
+ ct_nmi_nesting() + incby);
barrier();
}
@@ -382,11 +382,11 @@ static int rcu_is_cpu_rrupt_from_idle(void)
/* Check for counter underflows */
RCU_LOCKDEP_WARN(ct_nesting() < 0,
"RCU nesting counter underflow!");
- RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() <= 0,
+ RCU_LOCKDEP_WARN(ct_nmi_nesting() <= 0,
"RCU nmi_nesting counter underflow/zero!");
/* Are we at first interrupt nesting level? */
- nesting = ct_dynticks_nmi_nesting();
+ nesting = ct_nmi_nesting();
if (nesting > 1)
return false;
@@ -590,7 +590,7 @@ void rcu_irq_exit_check_preempt(void)
RCU_LOCKDEP_WARN(ct_nesting() <= 0,
"RCU nesting counter underflow/zero!");
- RCU_LOCKDEP_WARN(ct_dynticks_nmi_nesting() !=
+ RCU_LOCKDEP_WARN(ct_nmi_nesting() !=
DYNTICK_IRQ_NONIDLE,
"Bad RCU nmi_nesting counter\n");
RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),