@@ -116,25 +116,22 @@ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
*/
static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
- if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
- if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
- mb();
- clflush((void *)¤t_thread_info()->flags);
- mb();
- }
+ if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
+ mb();
+ clflush((void *)¤t_thread_info()->flags);
+ mb();
+ }
- __monitor((void *)¤t_thread_info()->flags, 0, 0);
+ __monitor((void *)¤t_thread_info()->flags, 0, 0);
- if (!need_resched()) {
- if (ecx & 1) {
- __mwait(eax, ecx);
- } else {
- __sti_mwait(eax, ecx);
- raw_local_irq_disable();
- }
+ if (!need_resched()) {
+ if (ecx & 1) {
+ __mwait(eax, ecx);
+ } else {
+ __sti_mwait(eax, ecx);
+ raw_local_irq_disable();
}
}
- current_clr_polling();
}
/*
@@ -217,10 +217,10 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
int index)
{
int entered_state;
-
struct cpuidle_state *target_state = &drv->states[index];
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
ktime_t time_start, time_end;
+ bool polling;
instrumentation_begin();
@@ -237,6 +237,23 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
broadcast = false;
}
+ polling = target_state->flags & CPUIDLE_FLAG_MWAIT;
+
+ /*
+ * If the target state doesn't poll on need_resched(), this is
+ * the last check after which further TIF_NEED_RESCHED remote setting
+ * will involve an IPI.
+ */
+ if (!polling && current_clr_polling_and_test()) {
+ if (broadcast)
+ tick_broadcast_exit();
+ dev->last_residency_ns = 0;
+ local_irq_enable();
+ instrumentation_end();
+ return -EBUSY;
+ }
+
+
if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
leave_mm();
@@ -336,6 +353,9 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
dev->states_usage[index].rejected++;
}
+ if (!polling)
+ __current_set_polling();
+
instrumentation_end();
return entered_state;
@@ -68,6 +68,8 @@ static __always_inline bool __must_check current_set_polling_and_test(void)
static __always_inline bool __must_check current_clr_polling_and_test(void)
{
+ bool ret;
+
__current_clr_polling();
/*
@@ -76,7 +78,10 @@ static __always_inline bool __must_check current_clr_polling_and_test(void)
*/
smp_mb__after_atomic();
- return unlikely(tif_need_resched());
+ ret = unlikely(tif_need_resched());
+ if (ret)
+ __current_set_polling();
+ return ret;
}
#else
@@ -114,12 +114,13 @@ void __cpuidle default_idle_call(void)
stop_critical_timings();
ct_cpuidle_enter();
- arch_cpu_idle();
+ arch_cpu_idle(); // XXX assumes !polling
ct_cpuidle_exit();
start_critical_timings();
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
cond_tick_broadcast_exit();
+ __current_set_polling();
}
local_irq_enable();
instrumentation_end();
@@ -128,31 +129,14 @@ void __cpuidle default_idle_call(void)
static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
+ int ret;
+
if (current_clr_polling_and_test())
return -EBUSY;
- return cpuidle_enter_s2idle(drv, dev);
-}
-
-static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
- int next_state)
-{
- /*
- * The idle task must be scheduled, it is pointless to go to idle, just
- * update no idle residency and return.
- */
- if (current_clr_polling_and_test()) {
- dev->last_residency_ns = 0;
- local_irq_enable();
- return -EBUSY;
- }
-
- /*
- * Enter the idle state previously returned by the governor decision.
- * This function will block until an interrupt occurs and will take
- * care of re-enabling the local interrupts
- */
- return cpuidle_enter(drv, dev, next_state);
+ ret = cpuidle_enter_s2idle(drv, dev);
+ __current_set_polling();
+ return ret;
}
/**
@@ -213,7 +197,7 @@ static void cpuidle_idle_call(void)
tick_nohz_idle_stop_tick();
next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
- call_cpuidle(drv, dev, next_state);
+ cpuidle_enter(drv, dev, next_state);
} else {
bool stop_tick = true;
@@ -227,7 +211,12 @@ static void cpuidle_idle_call(void)
else
tick_nohz_idle_retain_tick();
- entered_state = call_cpuidle(drv, dev, next_state);
+ /*
+ * Enter the idle state previously returned by the governor decision.
+ * This function will block until an interrupt occurs and will take
+ * care of re-enabling the local interrupts.
+ */
+ entered_state = cpuidle_enter(drv, dev, next_state);
/*
* Give the governor an opportunity to reflect on the outcome
*/
@@ -235,7 +224,6 @@ static void cpuidle_idle_call(void)
}
exit_idle:
- __current_set_polling();
/*
* It is up to the idle functions to re-enable local interrupts