===================================================================
@@ -203,6 +203,7 @@ extern struct klist *bus_get_device_klis
* automatically.
* @pm: Power management operations of the device which matched
* this driver.
+ * @pm_async_work: Called after asynchronous runtime resume of the device.
* @p: Driver core's private data, no one other than the driver
* core can touch this.
*
@@ -232,6 +233,7 @@ struct device_driver {
const struct attribute_group **groups;
const struct dev_pm_ops *pm;
+ void (*pm_async_work) (struct device *dev);
struct driver_private *p;
};
===================================================================
@@ -538,6 +538,8 @@ struct dev_pm_info {
unsigned int irq_safe:1;
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
+ bool run_driver_work:1;
+ bool work_in_progress:1;
enum rpm_request request;
enum rpm_status runtime_status;
int runtime_error;
===================================================================
@@ -22,6 +22,7 @@
#define RPM_GET_PUT 0x04 /* Increment/decrement the
usage_count */
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
+#define RPM_RUN_WORK 0x10 /* Run asynchronous work routine */
#ifdef CONFIG_PM_RUNTIME
@@ -189,6 +190,11 @@ static inline int pm_request_autosuspend
static inline int pm_runtime_get(struct device *dev)
{
+ return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC | RPM_RUN_WORK);
+}
+
+static inline int pm_runtime_get_nowork(struct device *dev)
+{
return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
}
===================================================================
@@ -484,6 +484,15 @@ static int rpm_suspend(struct device *de
goto out;
}
+void rpm_queue_up_resume(struct device *dev)
+{
+ dev->power.request = RPM_REQ_RESUME;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+ }
+}
+
/**
* rpm_resume - Carry out runtime resume of given device.
* @dev: Device to resume.
@@ -495,8 +504,10 @@ static int rpm_suspend(struct device *de
* RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
* parallel with this function, either tell the other process to resume after
* suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
- * flag is set then queue a resume request; otherwise run the
- * ->runtime_resume() callback directly. Queue an idle notification for the
+ * flag is set, then queue a resume request and if the RPM_RUN_WORK flag is set
+ * too, schedule the executction of the device driver's .pm_async_work()
+ * callback after the resume request has been completed. Otherwise run the
+ * .runtime_resume() callback directly and queue an idle notification for the
* device if the resume succeeded.
*
* This function must be called under dev->power.lock with interrupts disabled.
@@ -519,12 +530,18 @@ static int rpm_resume(struct device *dev
goto out;
/*
- * Other scheduled or pending requests need to be canceled. Small
- * optimization: If an autosuspend timer is running, leave it running
- * rather than cancelling it now only to restart it again in the near
- * future.
+ * Other scheduled or pending requests need to be canceled. If the
+ * execution of driver work is queued up along with a resume request,
+ * do not cancel it.
+ */
+ if (dev->power.request != RPM_REQ_RESUME || !dev->power.run_driver_work)
+ dev->power.request = RPM_REQ_NONE;
+
+ /*
+ * Small optimization: If an autosuspend timer is running, leave it
+ * running rather than cancelling it now only to restart it again in the
+ * near future.
*/
- dev->power.request = RPM_REQ_NONE;
if (!dev->power.timer_autosuspends)
pm_runtime_deactivate_timer(dev);
@@ -533,6 +550,36 @@ static int rpm_resume(struct device *dev
goto out;
}
+ /*
+ * See if we can skip waking up the parent. This is safe only if
+ * power.no_callbacks is set, because otherwise we don't know whether
+ * the resume will actually succeed.
+ */
+ if (dev->power.no_callbacks && !parent && dev->parent) {
+ spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
+ if (dev->parent->power.disable_depth > 0
+ || dev->parent->power.ignore_children
+ || dev->parent->power.runtime_status == RPM_ACTIVE) {
+ atomic_inc(&dev->parent->power.child_count);
+ spin_unlock(&dev->parent->power.lock);
+ retval = 1;
+ goto no_callback; /* Assume success. */
+ }
+ spin_unlock(&dev->parent->power.lock);
+ }
+
+ /*
+ * If the driver's asynchronous work routine is to be executed, schedule
+ * it now.
+ */
+ if (rpmflags & RPM_RUN_WORK) {
+ WARN_ON_ONCE(!(rpmflags & RPM_ASYNC));
+ dev->power.run_driver_work = true;
+ rpm_queue_up_resume(dev);
+ retval = 0;
+ goto out;
+ }
+
if (dev->power.runtime_status == RPM_RESUMING
|| dev->power.runtime_status == RPM_SUSPENDING) {
DEFINE_WAIT(wait);
@@ -572,31 +619,9 @@ static int rpm_resume(struct device *dev
goto repeat;
}
- /*
- * See if we can skip waking up the parent. This is safe only if
- * power.no_callbacks is set, because otherwise we don't know whether
- * the resume will actually succeed.
- */
- if (dev->power.no_callbacks && !parent && dev->parent) {
- spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
- if (dev->parent->power.disable_depth > 0
- || dev->parent->power.ignore_children
- || dev->parent->power.runtime_status == RPM_ACTIVE) {
- atomic_inc(&dev->parent->power.child_count);
- spin_unlock(&dev->parent->power.lock);
- retval = 1;
- goto no_callback; /* Assume success. */
- }
- spin_unlock(&dev->parent->power.lock);
- }
-
/* Carry out an asynchronous or a synchronous resume. */
if (rpmflags & RPM_ASYNC) {
- dev->power.request = RPM_REQ_RESUME;
- if (!dev->power.request_pending) {
- dev->power.request_pending = true;
- queue_work(pm_wq, &dev->power.work);
- }
+ rpm_queue_up_resume(dev);
retval = 0;
goto out;
}
@@ -716,7 +741,25 @@ static void pm_runtime_work(struct work_
rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
break;
case RPM_REQ_RESUME:
- rpm_resume(dev, RPM_NOWAIT);
+ if (!dev->power.run_driver_work
+ || !dev->driver || !dev->driver->pm_async_work) {
+ rpm_resume(dev, RPM_NOWAIT);
+ break;
+ }
+
+ dev->power.run_driver_work = false;
+ dev->power.work_in_progress = true;
+ pm_runtime_get_noresume(dev);
+ rpm_resume(dev, 0);
+ spin_unlock_irq(&dev->power.lock);
+
+ dev->driver->pm_async_work(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.work_in_progress = false;
+ wake_up_all(&dev->power.wait_queue);
+ pm_runtime_put_noidle(dev);
+ rpm_idle(dev, RPM_NOWAIT);
break;
}
@@ -983,7 +1026,8 @@ static void __pm_runtime_barrier(struct
if (dev->power.runtime_status == RPM_SUSPENDING
|| dev->power.runtime_status == RPM_RESUMING
- || dev->power.idle_notification) {
+ || dev->power.idle_notification
+ || dev->power.work_in_progress) {
DEFINE_WAIT(wait);
/* Suspend, wake-up or idle notification in progress. */
@@ -992,7 +1036,8 @@ static void __pm_runtime_barrier(struct
TASK_UNINTERRUPTIBLE);
if (dev->power.runtime_status != RPM_SUSPENDING
&& dev->power.runtime_status != RPM_RESUMING
- && !dev->power.idle_notification)
+ && !dev->power.idle_notification
+ && !dev->power.work_in_progress)
break;
spin_unlock_irq(&dev->power.lock);