diff mbox

[2/2] cpufreq: governors: Remove code redundancy between governors

Message ID 118c59263751862b1750ed9f96639a4e70069e66.1359653181.git.viresh.kumar@linaro.org (mailing list archive)
State Accepted, archived
Headers show

Commit Message

Viresh Kumar Jan. 31, 2013, 5:28 p.m. UTC
With the inclusion of following patches:

9f4eb10 cpufreq: conservative: call dbs_check_cpu only when necessary
772b4b1 cpufreq: ondemand: call dbs_check_cpu only when necessary

code redundancy is introduced again. Get rid of it.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
---
 drivers/cpufreq/cpufreq_conservative.c | 52 ++++-------------------
 drivers/cpufreq/cpufreq_governor.c     | 18 ++++++++
 drivers/cpufreq/cpufreq_governor.h     |  2 +
 drivers/cpufreq/cpufreq_ondemand.c     | 77 ++++++++++------------------------
 4 files changed, 52 insertions(+), 97 deletions(-)

Comments

Fabio Baltieri Jan. 31, 2013, 6:50 p.m. UTC | #1
On Thu, Jan 31, 2013 at 10:58:02PM +0530, Viresh Kumar wrote:
> With the inclusion of following patches:
> 
> 9f4eb10 cpufreq: conservative: call dbs_check_cpu only when necessary
> 772b4b1 cpufreq: ondemand: call dbs_check_cpu only when necessary
> 
> code redundancy is introduced again. Get rid of it.
> 
> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
> ---

Hi,

Tested-by: Fabio Baltieri <fabio.baltieri@linaro.org>

Thanks,
Fabio
Rafael Wysocki Jan. 31, 2013, 10:23 p.m. UTC | #2
On Thursday, January 31, 2013 07:50:04 PM Fabio Baltieri wrote:
> On Thu, Jan 31, 2013 at 10:58:02PM +0530, Viresh Kumar wrote:
> > With the inclusion of following patches:
> > 
> > 9f4eb10 cpufreq: conservative: call dbs_check_cpu only when necessary
> > 772b4b1 cpufreq: ondemand: call dbs_check_cpu only when necessary
> > 
> > code redundancy is introduced again. Get rid of it.
> > 
> > Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
> > ---
> 
> Hi,
> 
> Tested-by: Fabio Baltieri <fabio.baltieri@linaro.org>

OK

Fabio, Viresh, Shawn,

This time I was *really* confused as to what patches I was supposed to take,
from whom and in what order, so I applied a number of them in the order given
by patchwork.  That worked well enough, because (almost) all of them applied
for me without conflicts.  That said I would appreciate it if you could look
into the bleeding-edge branch of my tree and see if there's anything missing
or something that shouldn't be there (cpufreq-wise).

Thanks,
Rafael
Fabio Baltieri Jan. 31, 2013, 10:51 p.m. UTC | #3
Hello Rafael,

On Thu, Jan 31, 2013 at 11:23:54PM +0100, Rafael J. Wysocki wrote:
> On Thursday, January 31, 2013 07:50:04 PM Fabio Baltieri wrote:
> > On Thu, Jan 31, 2013 at 10:58:02PM +0530, Viresh Kumar wrote:
> > > With the inclusion of following patches:
> > > 
> > > 9f4eb10 cpufreq: conservative: call dbs_check_cpu only when necessary
> > > 772b4b1 cpufreq: ondemand: call dbs_check_cpu only when necessary
> > > 
> > > code redundancy is introduced again. Get rid of it.
> > > 
> > > Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
> > > ---
> > 
> > Hi,
> > 
> > Tested-by: Fabio Baltieri <fabio.baltieri@linaro.org>
> 
> OK
> 
> Fabio, Viresh, Shawn,
> 
> This time I was *really* confused as to what patches I was supposed to take,
> from whom and in what order, so I applied a number of them in the order given
> by patchwork.  That worked well enough, because (almost) all of them applied
> for me without conflicts.  That said I would appreciate it if you could look
> into the bleeding-edge branch of my tree and see if there's anything missing
> or something that shouldn't be there (cpufreq-wise).

Sorry for the confusion, your current bleeding-edge branch (eed52da)
looks good to me.  I also did a quick build and run and it works fine on
my setup.

Many thanks,
Fabio
Viresh Kumar Feb. 1, 2013, 2:31 a.m. UTC | #4
On 1 February 2013 04:21, Fabio Baltieri <fabio.baltieri@linaro.org> wrote:
> On Thu, Jan 31, 2013 at 11:23:54PM +0100, Rafael J. Wysocki wrote:
>> This time I was *really* confused as to what patches I was supposed to take,
>> from whom and in what order, so I applied a number of them in the order given
>> by patchwork.  That worked well enough, because (almost) all of them applied
>> for me without conflicts.  That said I would appreciate it if you could look
>> into the bleeding-edge branch of my tree and see if there's anything missing
>> or something that shouldn't be there (cpufreq-wise).
>
> Sorry for the confusion, your current bleeding-edge branch (eed52da)
> looks good to me.  I also did a quick build and run and it works fine on
> my setup.

Really!! I see bleeding edge as df0e3f4 and i don't see the $(subject) patch
in it :)
--
To unsubscribe from this list: send the line "unsubscribe linux-pm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c18a304..e8bb915 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -111,58 +111,24 @@  static void cs_check_cpu(int cpu, unsigned int load)
 	}
 }
 
-static void cs_timer_update(struct cs_cpu_dbs_info_s *dbs_info, bool sample,
-			    struct delayed_work *dw)
+static void cs_dbs_timer(struct work_struct *work)
 {
+	struct delayed_work *dw = to_delayed_work(work);
+	struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
+			struct cs_cpu_dbs_info_s, cdbs.work.work);
 	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+	struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
+			cpu);
 	int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
 
-	if (sample)
+	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
+	if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
 		dbs_check_cpu(&cs_dbs_data, cpu);
 
 	schedule_delayed_work_on(smp_processor_id(), dw, delay);
+	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
 }
 
-static void cs_timer_coordinated(struct cs_cpu_dbs_info_s *dbs_info_local,
-				 struct delayed_work *dw)
-{
-	struct cs_cpu_dbs_info_s *dbs_info;
-	ktime_t time_now;
-	s64 delta_us;
-	bool sample = true;
-
-	/* use leader CPU's dbs_info */
-	dbs_info = &per_cpu(cs_cpu_dbs_info,
-			    dbs_info_local->cdbs.cur_policy->cpu);
-	mutex_lock(&dbs_info->cdbs.timer_mutex);
-
-	time_now = ktime_get();
-	delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);
-
-	/* Do nothing if we recently have sampled */
-	if (delta_us < (s64)(cs_tuners.sampling_rate / 2))
-		sample = false;
-	else
-		dbs_info->cdbs.time_stamp = time_now;
-
-	cs_timer_update(dbs_info, sample, dw);
-	mutex_unlock(&dbs_info->cdbs.timer_mutex);
-}
-
-static void cs_dbs_timer(struct work_struct *work)
-{
-	struct delayed_work *dw = to_delayed_work(work);
-	struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
-			struct cs_cpu_dbs_info_s, cdbs.work.work);
-
-	if (policy_is_shared(dbs_info->cdbs.cur_policy)) {
-		cs_timer_coordinated(dbs_info, dw);
-	} else {
-		mutex_lock(&dbs_info->cdbs.timer_mutex);
-		cs_timer_update(dbs_info, true, dw);
-		mutex_unlock(&dbs_info->cdbs.timer_mutex);
-	}
-}
 static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
 		void *data)
 {
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 29d6a59..dc99472 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -177,6 +177,24 @@  static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
 	cancel_delayed_work_sync(&cdbs->work);
 }
 
+/* Will return if we need to evaluate cpu load again or not */
+bool need_load_eval(struct cpu_dbs_common_info *cdbs,
+		unsigned int sampling_rate)
+{
+	if (policy_is_shared(cdbs->cur_policy)) {
+		ktime_t time_now = ktime_get();
+		s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
+
+		/* Do nothing if we recently have sampled */
+		if (delta_us < (s64)(sampling_rate / 2))
+			return false;
+		else
+			cdbs->time_stamp = time_now;
+	}
+
+	return true;
+}
+
 int cpufreq_governor_dbs(struct dbs_data *dbs_data,
 		struct cpufreq_policy *policy, unsigned int event)
 {
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index c19a16c..16314b6 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -171,6 +171,8 @@  static inline int delay_for_sampling_rate(unsigned int sampling_rate)
 
 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
+bool need_load_eval(struct cpu_dbs_common_info *cdbs,
+		unsigned int sampling_rate);
 int cpufreq_governor_dbs(struct dbs_data *dbs_data,
 		struct cpufreq_policy *policy, unsigned int event);
 #endif /* _CPUFREQ_GOVERNER_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 75efd5e..f38b8da 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -216,75 +216,44 @@  static void od_check_cpu(int cpu, unsigned int load_freq)
 	}
 }
 
-static void od_timer_update(struct od_cpu_dbs_info_s *dbs_info, bool sample,
-			    struct delayed_work *dw)
+static void od_dbs_timer(struct work_struct *work)
 {
+	struct delayed_work *dw = to_delayed_work(work);
+	struct od_cpu_dbs_info_s *dbs_info =
+		container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
 	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
-	int delay, sample_type = dbs_info->sample_type;
+	struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
+			cpu);
+	int delay, sample_type = core_dbs_info->sample_type;
+	bool eval_load;
+
+	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
+	eval_load = need_load_eval(&core_dbs_info->cdbs,
+			od_tuners.sampling_rate);
 
 	/* Common NORMAL_SAMPLE setup */
-	dbs_info->sample_type = OD_NORMAL_SAMPLE;
+	core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
 	if (sample_type == OD_SUB_SAMPLE) {
-		delay = dbs_info->freq_lo_jiffies;
-		if (sample)
-			__cpufreq_driver_target(dbs_info->cdbs.cur_policy,
-						dbs_info->freq_lo,
+		delay = core_dbs_info->freq_lo_jiffies;
+		if (eval_load)
+			__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
+						core_dbs_info->freq_lo,
 						CPUFREQ_RELATION_H);
 	} else {
-		if (sample)
+		if (eval_load)
 			dbs_check_cpu(&od_dbs_data, cpu);
-		if (dbs_info->freq_lo) {
+		if (core_dbs_info->freq_lo) {
 			/* Setup timer for SUB_SAMPLE */
-			dbs_info->sample_type = OD_SUB_SAMPLE;
-			delay = dbs_info->freq_hi_jiffies;
+			core_dbs_info->sample_type = OD_SUB_SAMPLE;
+			delay = core_dbs_info->freq_hi_jiffies;
 		} else {
 			delay = delay_for_sampling_rate(od_tuners.sampling_rate
-						* dbs_info->rate_mult);
+						* core_dbs_info->rate_mult);
 		}
 	}
 
 	schedule_delayed_work_on(smp_processor_id(), dw, delay);
-}
-
-static void od_timer_coordinated(struct od_cpu_dbs_info_s *dbs_info_local,
-				 struct delayed_work *dw)
-{
-	struct od_cpu_dbs_info_s *dbs_info;
-	ktime_t time_now;
-	s64 delta_us;
-	bool sample = true;
-
-	/* use leader CPU's dbs_info */
-	dbs_info = &per_cpu(od_cpu_dbs_info,
-			    dbs_info_local->cdbs.cur_policy->cpu);
-	mutex_lock(&dbs_info->cdbs.timer_mutex);
-
-	time_now = ktime_get();
-	delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);
-
-	/* Do nothing if we recently have sampled */
-	if (delta_us < (s64)(od_tuners.sampling_rate / 2))
-		sample = false;
-	else
-		dbs_info->cdbs.time_stamp = time_now;
-
-	od_timer_update(dbs_info, sample, dw);
-	mutex_unlock(&dbs_info->cdbs.timer_mutex);
-}
-
-static void od_dbs_timer(struct work_struct *work)
-{
-	struct delayed_work *dw = to_delayed_work(work);
-	struct od_cpu_dbs_info_s *dbs_info =
-		container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
-
-	if (policy_is_shared(dbs_info->cdbs.cur_policy)) {
-		od_timer_coordinated(dbs_info, dw);
-	} else {
-		mutex_lock(&dbs_info->cdbs.timer_mutex);
-		od_timer_update(dbs_info, true, dw);
-		mutex_unlock(&dbs_info->cdbs.timer_mutex);
-	}
+	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
 }
 
 /************************** sysfs interface ************************/