===================================================================
@@ -53,6 +53,8 @@ static pm_message_t pm_transition;
*/
static bool transition_started;
+static unsigned int nr_async;
+
/**
* device_pm_lock - Lock the list of active devices used by the PM core.
*/
@@ -166,6 +168,8 @@ static void dpm_reset_all(void)
list_for_each_entry(dev, &dpm_list, power.entry)
dpm_reset(dev);
+ printk(KERN_INFO "PM: Scheduled %d async operations\n", nr_async);
+ nr_async = 0;
}
/**
@@ -563,6 +567,7 @@ static int device_resume_noirq(struct de
if (pm_async_enabled && !pm_trace_enabled && dev->power.async_suspend) {
async_schedule(async_resume_noirq, dev);
+ nr_async++;
return 0;
}
@@ -723,6 +728,7 @@ static int device_resume(struct device *
if (pm_async_enabled && !pm_trace_enabled && dev->power.async_suspend) {
get_device(dev);
async_schedule(async_resume, dev);
+ nr_async++;
return 0;
}
@@ -968,6 +974,7 @@ static int device_suspend_noirq(struct d
if (pm_async_enabled && dev->power.async_suspend) {
async_schedule(async_suspend_noirq, dev);
+ nr_async++;
return 0;
}
@@ -1143,6 +1150,7 @@ static int device_suspend(struct device
if (pm_async_enabled && dev->power.async_suspend) {
get_device(dev);
async_schedule(async_suspend, dev);
+ nr_async++;
return 0;
}
@@ -1247,6 +1255,7 @@ static int dpm_prepare(pm_message_t stat
mutex_lock(&dpm_list_mtx);
transition_started = true;
atomic_set(&async_error, 0);
+ nr_async = 0;
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.next);