===================================================================
@@ -484,6 +484,15 @@ static int rpm_suspend(struct device *de
goto out;
}
+void rpm_queue_up_resume(struct device *dev)
+{
+ dev->power.request = RPM_REQ_RESUME;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+ }
+}
+
/**
* rpm_resume - Carry out runtime resume of given device.
* @dev: Device to resume.
@@ -519,12 +528,18 @@ static int rpm_resume(struct device *dev
goto out;
/*
- * Other scheduled or pending requests need to be canceled. Small
- * optimization: If an autosuspend timer is running, leave it running
- * rather than cancelling it now only to restart it again in the near
- * future.
+ * Other scheduled or pending requests need to be canceled. If the
+ * execution of a function is queued up along with a resume request,
+ * do not cancel it.
+ */
+ if (dev->power.request != RPM_REQ_RESUME || !dev->power.func)
+ dev->power.request = RPM_REQ_NONE;
+
+ /*
+ * Small optimization: If an autosuspend timer is running, leave it
+ * running rather than cancelling it now only to restart it again in the
+ * near future.
*/
- dev->power.request = RPM_REQ_NONE;
if (!dev->power.timer_autosuspends)
pm_runtime_deactivate_timer(dev);
@@ -591,11 +606,7 @@ static int rpm_resume(struct device *dev
/* Carry out an asynchronous or a synchronous resume. */
if (rpmflags & RPM_ASYNC) {
- dev->power.request = RPM_REQ_RESUME;
- if (!dev->power.request_pending) {
- dev->power.request_pending = true;
- queue_work(pm_wq, &dev->power.work);
- }
+ rpm_queue_up_resume(dev);
retval = 0;
goto out;
}
@@ -691,6 +702,7 @@ static int rpm_resume(struct device *dev
static void pm_runtime_work(struct work_struct *work)
{
struct device *dev = container_of(work, struct device, power.work);
+ void (*func)(struct device *) = NULL;
enum rpm_request req;
spin_lock_irq(&dev->power.lock);
@@ -715,11 +727,37 @@ static void pm_runtime_work(struct work_
rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
break;
case RPM_REQ_RESUME:
- rpm_resume(dev, RPM_NOWAIT);
+ func = dev->power.func;
+ if (func) {
+ dev->power.func = NULL;
+ rpm_resume(dev, 0);
+ /*
+ * The function might have been replaced when
+ * rpm_resume() was running the resume callback.
+ */
+ if (dev->power.func)
+ func = dev->power.func;
+ } else {
+ rpm_resume(dev, RPM_NOWAIT);
+ }
break;
}
out:
+ if (func) {
+ pm_runtime_get_noresume(dev);
+ dev->power.function_execution = true;
+ spin_unlock_irq(&dev->power.lock);
+
+ func(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.function_execution = false;
+ wake_up_all(&dev->power.wait_queue);
+ pm_runtime_put_noidle(dev);
+ rpm_idle(dev, RPM_NOWAIT);
+ }
+
spin_unlock_irq(&dev->power.lock);
}
@@ -878,6 +916,83 @@ int __pm_runtime_resume(struct device *d
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
+ * __pm_runtime_get_and_call - Increment device usage count and run a function.
+ * @dev: Device to handle.
+ * @func: Function to run.
+ * @force: Whether to run @func if runtime PM is disabled or in error state.
+ *
+ * Increment the device's runtime PM usage counter and execute the given
+ * function if the device's status is "active". Otherwise, the function is
+ * scheduled for future execution along with a resume request.
+ *
+ * If this routine is called twice in a row, the function passed to it in the
+ * second call replaces the previous one unless the execution of it has started
+ * already (in which case both functions will be run, unless the later one
+ * is canceled along with its resume request).
+ */
+int __pm_runtime_get_and_call(struct device *dev, void (*func)(struct device *),
+ bool force)
+{
+ unsigned long flags;
+ int ret;
+
+ pm_runtime_get_noresume(dev);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ ret = dev->power.runtime_error;
+ if (!ret && dev->power.disable_depth > 0)
+ ret = -EINVAL;
+
+ if (ret) {
+ if (func && force) {
+ dev->power.disable_depth++;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ func(dev);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ dev->power.disable_depth--;
+ }
+ goto out;
+ }
+
+ /*
+ * The approach here is the same as in rpm_suspend(): autosuspend timers
+ * will be activated shortly anyway, so it is pointless to cancel them
+ * now.
+ */
+ if (!dev->power.timer_autosuspends)
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->power.runtime_status == RPM_ACTIVE) {
+ dev->power.func = NULL;
+ dev->power.request = RPM_REQ_NONE;
+ ret = 0;
+ } else {
+ dev->power.func = func;
+ rpm_queue_up_resume(dev);
+ ret = 1;
+ }
+
+ if (func) {
+ dev->power.function_execution = true;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ func(dev);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ dev->power.function_execution = false;
+ wake_up_all(&dev->power.wait_queue);
+ }
+
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+
+/**
* __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
* @status: New runtime PM status of the device.
@@ -982,7 +1097,8 @@ static void __pm_runtime_barrier(struct
if (dev->power.runtime_status == RPM_SUSPENDING
|| dev->power.runtime_status == RPM_RESUMING
- || dev->power.idle_notification) {
+ || dev->power.idle_notification
+ || dev->power.function_execution) {
DEFINE_WAIT(wait);
/* Suspend, wake-up or idle notification in progress. */
@@ -991,7 +1107,8 @@ static void __pm_runtime_barrier(struct
TASK_UNINTERRUPTIBLE);
if (dev->power.runtime_status != RPM_SUSPENDING
&& dev->power.runtime_status != RPM_RESUMING
- && !dev->power.idle_notification)
+ && !dev->power.idle_notification
+ && !dev->power.function_execution)
break;
spin_unlock_irq(&dev->power.lock);
@@ -1278,6 +1395,7 @@ void pm_runtime_init(struct device *dev)
{
dev->power.runtime_status = RPM_SUSPENDED;
dev->power.idle_notification = false;
+ dev->power.function_execution = false;
dev->power.disable_depth = 1;
atomic_set(&dev->power.usage_count, 0);
===================================================================
@@ -538,6 +538,7 @@ struct dev_pm_info {
unsigned int irq_safe:1;
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
+ bool function_execution:1;
enum rpm_request request;
enum rpm_status runtime_status;
int runtime_error;
@@ -547,6 +548,7 @@ struct dev_pm_info {
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
struct dev_pm_qos_request *pq_req;
+ void (*func)(struct device *);
#endif
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
struct pm_qos_constraints *constraints;
===================================================================
@@ -47,6 +47,9 @@ extern void pm_runtime_set_autosuspend_d
extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
+extern int __pm_runtime_get_and_call(struct device *dev,
+ void (*func)(struct device *),
+ bool force);
static inline bool pm_children_suspended(struct device *dev)
{
@@ -150,6 +153,16 @@ static inline void pm_runtime_set_autosu
static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
+static inline int __pm_runtime_get_and_call(struct device *dev,
+ void (*func)(struct device *),
+ bool force)
+{
+ if (func && force)
+ func(dev);
+
+ return 0;
+}
+
#endif /* !CONFIG_PM_RUNTIME */
static inline int pm_runtime_idle(struct device *dev)
@@ -248,4 +261,10 @@ static inline void pm_runtime_dont_use_a
__pm_runtime_use_autosuspend(dev, false);
}
+static inline int pm_runtime_get_and_call(struct device *dev,
+ void (*func)(struct device *))
+{
+ return __pm_runtime_get_and_call(dev, func, false);
+}
+
#endif