@@ -178,13 +178,14 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
{
int i;
+ if (dbs_data->queue_stop)
+ return;
+
if (!all_cpus) {
__gov_queue_work(smp_processor_id(), dbs_data, delay);
} else {
- get_online_cpus();
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
- put_online_cpus();
}
}
EXPORT_SYMBOL_GPL(gov_queue_work);
@@ -193,12 +194,27 @@ static inline void gov_cancel_work(struct dbs_data *dbs_data,
struct cpufreq_policy *policy)
{
struct cpu_dbs_common_info *cdbs;
- int i;
+ int i, round = 2;
+ dbs_data->queue_stop = 1;
+redo:
+ round--;
for_each_cpu(i, policy->cpus) {
cdbs = dbs_data->cdata->get_cpu_cdbs(i);
cancel_delayed_work_sync(&cdbs->work);
}
+
+ /*
+ * Since there is no lock to prvent re-queue the
+ * cancelled work, some early cancelled work might
+ * have been queued again by later cancelled work.
+ *
+ * Flush the work again with dbs_data->queue_stop
+ * enabled, this time there will be no survivors.
+ */
+ if (round)
+ goto redo;
+ dbs_data->queue_stop = 0;
}
/* Will return if we need to evaluate cpu load again or not */
@@ -213,6 +213,7 @@ struct dbs_data {
unsigned int min_sampling_rate;
int usage_count;
void *tuners;
+ int queue_stop;
/* dbs_mutex protects dbs_enable in governor start/stop */
struct mutex mutex;