@@ -789,8 +789,8 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
return index;
}
-static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static int acpi_idle_enter_freeze(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -798,16 +798,18 @@ static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
struct acpi_processor *pr = __this_cpu_read(processors);
if (unlikely(!pr))
- return;
+ return 0;
if (pr->flags.bm_check) {
acpi_idle_enter_bm(pr, cx, false);
- return;
+ return 0;
} else {
ACPI_FLUSH_CPU_CACHE();
}
}
acpi_idle_do_entry(cx);
+
+ return 0;
}
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
@@ -128,9 +128,13 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
}
#ifdef CONFIG_SUSPEND
-static void enter_freeze_proper(struct cpuidle_driver *drv,
- struct cpuidle_device *dev, int index)
+static int cpuidle_freeze_error;
+
+static int enter_freeze_proper(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, int index)
{
+ int ret;
+
/*
* trace_suspend_resume() called by tick_freeze() for the last CPU
* executing it contains RCU usage regarded as invalid in the idle
@@ -143,7 +147,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
* suspended is generally unsafe.
*/
stop_critical_timings();
- drv->states[index].enter_freeze(dev, drv, index);
+ ret = drv->states[index].enter_freeze(dev, drv, index);
WARN_ON(!irqs_disabled());
/*
* timekeeping_resume() that will be called by tick_unfreeze() for the
@@ -152,6 +156,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
*/
RCU_NONIDLE(tick_unfreeze());
start_critical_timings();
+ return ret;
}
/**
@@ -164,7 +169,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
*/
int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
- int index;
+ int index, ret = 0;
/*
* Find the deepest state with ->enter_freeze present, which guarantees
@@ -173,10 +178,27 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
*/
index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
if (index > 0)
- enter_freeze_proper(drv, dev, index);
+ ret = enter_freeze_proper(drv, dev, index);
+
+ if (ret < 0) {
+ cpuidle_freeze_error = ret;
+ freeze_wake();
+ }
return index;
}
+
+void cpuidle_prepare_freeze(void)
+{
+ cpuidle_freeze_error = 0;
+ cpuidle_resume();
+}
+
+int cpuidle_complete_freeze(void)
+{
+ cpuidle_pause();
+ return cpuidle_freeze_error;
+}
#endif /* CONFIG_SUSPEND */
/**
@@ -97,8 +97,8 @@ static const struct idle_cpu *icpu;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
-static void intel_idle_freeze(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index);
+static int intel_idle_freeze(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index);
static struct cpuidle_state *cpuidle_state_table;
/*
@@ -941,13 +941,15 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
* @drv: cpuidle driver
* @index: state index
*/
-static void intel_idle_freeze(struct cpuidle_device *dev,
+static int intel_idle_freeze(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
unsigned long ecx = 1; /* break on interrupt flag */
unsigned long eax = flg2MWAIT(drv->states[index].flags);
mwait_idle_with_hints(eax, ecx);
+
+ return 0;
}
static void __setup_broadcast_timer(bool on)
@@ -54,11 +54,11 @@ struct cpuidle_state {
/*
* CPUs execute ->enter_freeze with the local tick or entire timekeeping
* suspended, so it must not re-enable interrupts at any point (even
- * temporarily) or attempt to change states of clock event devices.
+ * temporarily). Returns 0 on success and non-zero if an error occurred.
*/
- void (*enter_freeze) (struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index);
+ int (*enter_freeze) (struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
};
/* Idle State Flags */
@@ -200,6 +200,8 @@ extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern void cpuidle_use_deepest_state(bool enable);
+extern void cpuidle_prepare_freeze(void);
+extern int cpuidle_complete_freeze(void);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
@@ -210,6 +212,8 @@ static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
static inline void cpuidle_use_deepest_state(bool enable)
{
}
+static inline void cpuidle_prepare_freeze(void) { }
+static inline int cpuidle_complete_freeze(void) { return -ENODEV; }
#endif
/* kernel/sched/idle.c */
@@ -70,8 +70,10 @@ static void freeze_begin(void)
suspend_freeze_state = FREEZE_STATE_NONE;
}
-static void freeze_enter(void)
+static int freeze_enter(void)
{
+ int error = 0;
+
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
spin_lock_irq(&suspend_freeze_lock);
@@ -82,7 +84,7 @@ static void freeze_enter(void)
spin_unlock_irq(&suspend_freeze_lock);
get_online_cpus();
- cpuidle_resume();
+ cpuidle_prepare_freeze();
/* Push all the CPUs into the idle loop. */
wake_up_all_idle_cpus();
@@ -90,7 +92,7 @@ static void freeze_enter(void)
wait_event(suspend_freeze_wait_head,
suspend_freeze_state == FREEZE_STATE_WAKE);
- cpuidle_pause();
+ error = cpuidle_complete_freeze();
put_online_cpus();
spin_lock_irq(&suspend_freeze_lock);
@@ -100,14 +102,17 @@ static void freeze_enter(void)
spin_unlock_irq(&suspend_freeze_lock);
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
+ return error;
}
-static void s2idle_loop(void)
+static int s2idle_loop(void)
{
+ int ret;
+
pr_debug("PM: suspend-to-idle\n");
do {
- freeze_enter();
+ ret = freeze_enter();
if (freeze_ops && freeze_ops->wake)
freeze_ops->wake();
@@ -116,13 +121,14 @@ static void s2idle_loop(void)
if (freeze_ops && freeze_ops->sync)
freeze_ops->sync();
- if (pm_wakeup_pending())
+ if (ret < 0 || pm_wakeup_pending())
break;
pm_wakeup_clear(false);
} while (!dpm_suspend_noirq(PMSG_SUSPEND));
pr_debug("PM: resume from suspend-to-idle\n");
+ return ret;
}
void freeze_wake(void)
@@ -396,7 +402,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
* all the devices are suspended.
*/
if (state == PM_SUSPEND_FREEZE) {
- s2idle_loop();
+ error = s2idle_loop();
goto Platform_early_resume;
}
This adds error reporting for cpuidle to freeze so suspend-to-idle can report errors when the CPU/SoC is unable to idle properly. Freeze will abort when an error is encounted. Signed-off-by: Derek Basehore <dbasehore@chromium.org> --- drivers/acpi/processor_idle.c | 10 ++++++---- drivers/cpuidle/cpuidle.c | 32 +++++++++++++++++++++++++++----- drivers/idle/intel_idle.c | 8 +++++--- include/linux/cpuidle.h | 12 ++++++++---- kernel/power/suspend.c | 20 +++++++++++++------- 5 files changed, 59 insertions(+), 23 deletions(-)