@@ -1487,6 +1487,7 @@ static void amd_cpuidle_init(struct acpi_processor_power *power)
vendor_override = -1;
}
+#ifdef CONFIG_PM_STATISTIC
uint32_t pmstat_get_cx_nr(unsigned int cpu)
{
return processor_powers[cpu] ? processor_powers[cpu]->count : 0;
@@ -1606,6 +1607,7 @@ int pmstat_reset_cx_stat(unsigned int cpu)
{
return 0;
}
+#endif /* CONFIG_PM_STATISTIC */
void cpuidle_disable_deep_cstate(void)
{
@@ -466,6 +466,7 @@ static int cf_check hwp_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
+#ifdef CONFIG_PM_STATISTIC
/*
* The SDM reads like turbo should be disabled with MSR_IA32_PERF_CTL and
* PERF_CTL_TURBO_DISENGAGE, but that does not seem to actually work, at least
@@ -508,6 +509,7 @@ static int cf_check hwp_cpufreq_update(unsigned int cpu, struct cpufreq_policy *
return per_cpu(hwp_drv_data, cpu)->ret;
}
+#endif /* CONFIG_PM_STATISTIC */
static const struct cpufreq_driver __initconst_cf_clobber
hwp_cpufreq_driver = {
@@ -516,9 +518,12 @@ hwp_cpufreq_driver = {
.target = hwp_cpufreq_target,
.init = hwp_cpufreq_cpu_init,
.exit = hwp_cpufreq_cpu_exit,
+#ifdef CONFIG_PM_STATISTIC
.update = hwp_cpufreq_update,
+#endif
};
+#ifdef CONFIG_PM_STATISTIC
int get_hwp_para(unsigned int cpu,
struct xen_cppc_para *cppc_para)
{
@@ -639,6 +644,7 @@ int set_hwp_para(struct cpufreq_policy *policy,
return hwp_cpufreq_target(policy, 0, 0);
}
+#endif /* CONFIG_PM_STATISTIC */
int __init hwp_register_driver(void)
{
@@ -49,6 +49,7 @@ static void cf_check transition_pstate(void *pstate)
wrmsrl(MSR_PSTATE_CTRL, *(unsigned int *)pstate);
}
+#ifdef CONFIG_PM_STATISTIC
static void cf_check update_cpb(void *data)
{
struct cpufreq_policy *policy = data;
@@ -77,6 +78,7 @@ static int cf_check powernow_cpufreq_update(
return 0;
}
+#endif /* CONFIG_PM_STATISTIC */
static int cf_check powernow_cpufreq_target(
struct cpufreq_policy *policy,
@@ -324,7 +326,9 @@ powernow_cpufreq_driver = {
.target = powernow_cpufreq_target,
.init = powernow_cpufreq_cpu_init,
.exit = powernow_cpufreq_cpu_exit,
+#ifdef CONFIG_PM_STATISTIC
.update = powernow_cpufreq_update
+#endif
};
unsigned int __init powernow_register_driver(void)
@@ -107,6 +107,11 @@ config NEEDS_LIBELF
config NUMA
bool
+config PM_STATISTIC
+ bool "Enable Performance Management Statistic Operations"
+ depends on ACPI && HAS_CPUFREQ
+ default y
+
config STATIC_MEMORY
bool "Static Allocation Support (UNSUPPORTED)" if UNSUPPORTED
depends on DOM0LESS_BOOT
@@ -170,7 +170,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
op->u.availheap.avail_bytes <<= PAGE_SHIFT;
break;
-#if defined (CONFIG_ACPI) && defined (CONFIG_HAS_CPUFREQ)
+#ifdef CONFIG_PM_STATISTIC
case XEN_SYSCTL_get_pmstat:
ret = do_get_pm_info(&op->u.get_pmstat);
break;
@@ -180,7 +180,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
if ( ret == -EAGAIN )
copyback = 1;
break;
-#endif
+#endif /* CONFIG_PM_STATISTIC */
case XEN_SYSCTL_page_offline_op:
{
@@ -5,7 +5,7 @@ obj-$(CONFIG_X86) += apei/
obj-bin-y += tables.init.o
obj-$(CONFIG_ACPI_NUMA) += numa.o
obj-y += osl.o
-obj-$(CONFIG_HAS_CPUFREQ) += pmstat.o
+obj-$(CONFIG_PM_STATISTIC) += pmstat.o
obj-$(CONFIG_X86) += hwregs.o
obj-$(CONFIG_X86) += reboot.o
@@ -43,6 +43,167 @@
DEFINE_PER_CPU_READ_MOSTLY(struct pm_px *, cpufreq_statistic_data);
+DEFINE_PER_CPU(spinlock_t, cpufreq_statistic_lock);
+
+/*********************************************************************
+ * Px STATISTIC INFO *
+ *********************************************************************/
+
+static void cpufreq_residency_update(unsigned int cpu, uint8_t state)
+{
+ uint64_t now, total_idle_ns;
+ int64_t delta;
+ struct pm_px *pxpt = per_cpu(cpufreq_statistic_data, cpu);
+
+ total_idle_ns = get_cpu_idle_time(cpu);
+ now = NOW();
+
+ delta = (now - pxpt->prev_state_wall) -
+ (total_idle_ns - pxpt->prev_idle_wall);
+
+ if ( likely(delta >= 0) )
+ pxpt->u.pt[state].residency += delta;
+
+ pxpt->prev_state_wall = now;
+ pxpt->prev_idle_wall = total_idle_ns;
+}
+
+void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to)
+{
+ struct pm_px *pxpt;
+ struct processor_pminfo *pmpt = processor_pminfo[cpu];
+ spinlock_t *cpufreq_statistic_lock =
+ &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if ( !pxpt || !pmpt ) {
+ spin_unlock(cpufreq_statistic_lock);
+ return;
+ }
+
+ pxpt->u.last = from;
+ pxpt->u.cur = to;
+ pxpt->u.pt[to].count++;
+
+ cpufreq_residency_update(cpu, from);
+
+ (*(pxpt->u.trans_pt + from * pmpt->perf.state_count + to))++;
+
+ spin_unlock(cpufreq_statistic_lock);
+}
+
+int cpufreq_statistic_init(unsigned int cpu)
+{
+ uint32_t i, count;
+ struct pm_px *pxpt;
+ const struct processor_pminfo *pmpt = processor_pminfo[cpu];
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock_init(cpufreq_statistic_lock);
+
+ if ( !pmpt )
+ return -EINVAL;
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if ( pxpt ) {
+ spin_unlock(cpufreq_statistic_lock);
+ return 0;
+ }
+
+ count = pmpt->perf.state_count;
+
+ pxpt = xzalloc(struct pm_px);
+ if ( !pxpt ) {
+ spin_unlock(cpufreq_statistic_lock);
+ return -ENOMEM;
+ }
+ per_cpu(cpufreq_statistic_data, cpu) = pxpt;
+
+ pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count);
+ if (!pxpt->u.trans_pt) {
+ xfree(pxpt);
+ spin_unlock(cpufreq_statistic_lock);
+ return -ENOMEM;
+ }
+
+ pxpt->u.pt = xzalloc_array(struct pm_px_val, count);
+ if (!pxpt->u.pt) {
+ xfree(pxpt->u.trans_pt);
+ xfree(pxpt);
+ spin_unlock(cpufreq_statistic_lock);
+ return -ENOMEM;
+ }
+
+ pxpt->u.total = pmpt->perf.state_count;
+ pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit;
+
+ for (i=0; i < pmpt->perf.state_count; i++)
+ pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency;
+
+ pxpt->prev_state_wall = NOW();
+ pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
+
+ spin_unlock(cpufreq_statistic_lock);
+
+ return 0;
+}
+
+void cpufreq_statistic_exit(unsigned int cpu)
+{
+ struct pm_px *pxpt;
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if (!pxpt) {
+ spin_unlock(cpufreq_statistic_lock);
+ return;
+ }
+
+ xfree(pxpt->u.trans_pt);
+ xfree(pxpt->u.pt);
+ xfree(pxpt);
+ per_cpu(cpufreq_statistic_data, cpu) = NULL;
+
+ spin_unlock(cpufreq_statistic_lock);
+}
+
+static void cpufreq_statistic_reset(unsigned int cpu)
+{
+ uint32_t i, j, count;
+ struct pm_px *pxpt;
+ const struct processor_pminfo *pmpt = processor_pminfo[cpu];
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) {
+ spin_unlock(cpufreq_statistic_lock);
+ return;
+ }
+
+ count = pmpt->perf.state_count;
+
+ for (i=0; i < count; i++) {
+ pxpt->u.pt[i].residency = 0;
+ pxpt->u.pt[i].count = 0;
+
+ for (j=0; j < count; j++)
+ *(pxpt->u.trans_pt + i*count + j) = 0;
+ }
+
+ pxpt->prev_state_wall = NOW();
+ pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
+
+ spin_unlock(cpufreq_statistic_lock);
+}
+
/*
* Get PM statistic info
*/
@@ -522,34 +683,3 @@ int do_pm_op(struct xen_sysctl_pm_op *op)
return ret;
}
-
-int acpi_set_pdc_bits(uint32_t acpi_id, XEN_GUEST_HANDLE(uint32) pdc)
-{
- u32 bits[3];
- int ret;
-
- if ( copy_from_guest(bits, pdc, 2) )
- ret = -EFAULT;
- else if ( bits[0] != ACPI_PDC_REVISION_ID || !bits[1] )
- ret = -EINVAL;
- else if ( copy_from_guest_offset(bits + 2, pdc, 2, 1) )
- ret = -EFAULT;
- else
- {
- u32 mask = 0;
-
- if ( xen_processor_pmbits & XEN_PROCESSOR_PM_CX )
- mask |= ACPI_PDC_C_MASK | ACPI_PDC_SMP_C1PT;
- if ( xen_processor_pmbits & XEN_PROCESSOR_PM_PX )
- mask |= ACPI_PDC_P_MASK | ACPI_PDC_SMP_C1PT;
- if ( xen_processor_pmbits & XEN_PROCESSOR_PM_TX )
- mask |= ACPI_PDC_T_MASK | ACPI_PDC_SMP_C1PT;
- bits[2] &= (ACPI_PDC_C_MASK | ACPI_PDC_P_MASK | ACPI_PDC_T_MASK |
- ACPI_PDC_SMP_C1PT) & ~mask;
- ret = arch_acpi_set_pdc_bits(acpi_id, bits, mask);
- }
- if ( !ret && __copy_to_guest_offset(pdc, 2, bits + 2, 1) )
- ret = -EFAULT;
-
- return ret;
-}
@@ -582,6 +582,37 @@ out:
return ret;
}
+int acpi_set_pdc_bits(uint32_t acpi_id, XEN_GUEST_HANDLE(uint32) pdc)
+{
+ u32 bits[3];
+ int ret;
+
+ if ( copy_from_guest(bits, pdc, 2) )
+ ret = -EFAULT;
+ else if ( bits[0] != ACPI_PDC_REVISION_ID || !bits[1] )
+ ret = -EINVAL;
+ else if ( copy_from_guest_offset(bits + 2, pdc, 2, 1) )
+ ret = -EFAULT;
+ else
+ {
+ u32 mask = 0;
+
+ if ( xen_processor_pmbits & XEN_PROCESSOR_PM_CX )
+ mask |= ACPI_PDC_C_MASK | ACPI_PDC_SMP_C1PT;
+ if ( xen_processor_pmbits & XEN_PROCESSOR_PM_PX )
+ mask |= ACPI_PDC_P_MASK | ACPI_PDC_SMP_C1PT;
+ if ( xen_processor_pmbits & XEN_PROCESSOR_PM_TX )
+ mask |= ACPI_PDC_T_MASK | ACPI_PDC_SMP_C1PT;
+ bits[2] &= (ACPI_PDC_C_MASK | ACPI_PDC_P_MASK | ACPI_PDC_T_MASK |
+ ACPI_PDC_SMP_C1PT) & ~mask;
+ ret = arch_acpi_set_pdc_bits(acpi_id, bits, mask);
+ }
+ if ( !ret && __copy_to_guest_offset(pdc, 2, bits + 2, 1) )
+ ret = -EFAULT;
+
+ return ret;
+}
+
static void cpufreq_cmdline_common_para(struct cpufreq_policy *new_policy)
{
if (usr_max_freq)
@@ -64,6 +64,7 @@ static int cf_check cpufreq_governor_userspace(
return ret;
}
+#ifdef CONFIG_PM_STATISTIC
int write_userspace_scaling_setspeed(unsigned int cpu, unsigned int freq)
{
struct cpufreq_policy *policy;
@@ -80,6 +81,7 @@ int write_userspace_scaling_setspeed(unsigned int cpu, unsigned int freq)
return __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
}
+#endif /* CONFIG_PM_STATISTIC */
static bool __init cf_check
cpufreq_userspace_handle_option(const char *name, const char *val)
@@ -57,6 +57,7 @@ static struct dbs_tuners {
static DEFINE_PER_CPU(struct timer, dbs_timer);
+#ifdef CONFIG_PM_STATISTIC
int write_ondemand_sampling_rate(unsigned int sampling_rate)
{
if ( (sampling_rate > MAX_SAMPLING_RATE / MICROSECS(1)) ||
@@ -93,6 +94,7 @@ int get_cpufreq_ondemand_para(uint32_t *sampling_rate_max,
return 0;
}
+#endif /* CONFIG_PM_STATISTIC */
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
@@ -35,168 +35,6 @@ struct cpufreq_driver __read_mostly cpufreq_driver;
struct processor_pminfo *__read_mostly processor_pminfo[NR_CPUS];
DEFINE_PER_CPU_READ_MOSTLY(struct cpufreq_policy *, cpufreq_cpu_policy);
-DEFINE_PER_CPU(spinlock_t, cpufreq_statistic_lock);
-
-/*********************************************************************
- * Px STATISTIC INFO *
- *********************************************************************/
-
-void cpufreq_residency_update(unsigned int cpu, uint8_t state)
-{
- uint64_t now, total_idle_ns;
- int64_t delta;
- struct pm_px *pxpt = per_cpu(cpufreq_statistic_data, cpu);
-
- total_idle_ns = get_cpu_idle_time(cpu);
- now = NOW();
-
- delta = (now - pxpt->prev_state_wall) -
- (total_idle_ns - pxpt->prev_idle_wall);
-
- if ( likely(delta >= 0) )
- pxpt->u.pt[state].residency += delta;
-
- pxpt->prev_state_wall = now;
- pxpt->prev_idle_wall = total_idle_ns;
-}
-
-void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to)
-{
- struct pm_px *pxpt;
- struct processor_pminfo *pmpt = processor_pminfo[cpu];
- spinlock_t *cpufreq_statistic_lock =
- &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if ( !pxpt || !pmpt ) {
- spin_unlock(cpufreq_statistic_lock);
- return;
- }
-
- pxpt->u.last = from;
- pxpt->u.cur = to;
- pxpt->u.pt[to].count++;
-
- cpufreq_residency_update(cpu, from);
-
- (*(pxpt->u.trans_pt + from * pmpt->perf.state_count + to))++;
-
- spin_unlock(cpufreq_statistic_lock);
-}
-
-int cpufreq_statistic_init(unsigned int cpu)
-{
- uint32_t i, count;
- struct pm_px *pxpt;
- const struct processor_pminfo *pmpt = processor_pminfo[cpu];
- spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock_init(cpufreq_statistic_lock);
-
- if ( !pmpt )
- return -EINVAL;
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if ( pxpt ) {
- spin_unlock(cpufreq_statistic_lock);
- return 0;
- }
-
- count = pmpt->perf.state_count;
-
- pxpt = xzalloc(struct pm_px);
- if ( !pxpt ) {
- spin_unlock(cpufreq_statistic_lock);
- return -ENOMEM;
- }
- per_cpu(cpufreq_statistic_data, cpu) = pxpt;
-
- pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count);
- if (!pxpt->u.trans_pt) {
- xfree(pxpt);
- spin_unlock(cpufreq_statistic_lock);
- return -ENOMEM;
- }
-
- pxpt->u.pt = xzalloc_array(struct pm_px_val, count);
- if (!pxpt->u.pt) {
- xfree(pxpt->u.trans_pt);
- xfree(pxpt);
- spin_unlock(cpufreq_statistic_lock);
- return -ENOMEM;
- }
-
- pxpt->u.total = pmpt->perf.state_count;
- pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit;
-
- for (i=0; i < pmpt->perf.state_count; i++)
- pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency;
-
- pxpt->prev_state_wall = NOW();
- pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
-
- spin_unlock(cpufreq_statistic_lock);
-
- return 0;
-}
-
-void cpufreq_statistic_exit(unsigned int cpu)
-{
- struct pm_px *pxpt;
- spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if (!pxpt) {
- spin_unlock(cpufreq_statistic_lock);
- return;
- }
-
- xfree(pxpt->u.trans_pt);
- xfree(pxpt->u.pt);
- xfree(pxpt);
- per_cpu(cpufreq_statistic_data, cpu) = NULL;
-
- spin_unlock(cpufreq_statistic_lock);
-}
-
-void cpufreq_statistic_reset(unsigned int cpu)
-{
- uint32_t i, j, count;
- struct pm_px *pxpt;
- const struct processor_pminfo *pmpt = processor_pminfo[cpu];
- spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) {
- spin_unlock(cpufreq_statistic_lock);
- return;
- }
-
- count = pmpt->perf.state_count;
-
- for (i=0; i < count; i++) {
- pxpt->u.pt[i].residency = 0;
- pxpt->u.pt[i].count = 0;
-
- for (j=0; j < count; j++)
- *(pxpt->u.trans_pt + i*count + j) = 0;
- }
-
- pxpt->prev_state_wall = NOW();
- pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
-
- spin_unlock(cpufreq_statistic_lock);
-}
-
-
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
@@ -386,6 +224,7 @@ int cpufreq_driver_getavg(unsigned int cpu, unsigned int flag)
return policy->cur;
}
+#ifdef CONFIG_PM_STATISTIC
int cpufreq_update_turbo(unsigned int cpu, int new_state)
{
struct cpufreq_policy *policy;
@@ -417,6 +256,7 @@ int cpufreq_update_turbo(unsigned int cpu, int new_state)
return ret;
}
+#endif /* CONFIG_PM_STATISTIC */
int cpufreq_get_turbo_status(unsigned int cpu)
@@ -167,7 +167,9 @@ struct cpufreq_driver {
int (*init)(struct cpufreq_policy *policy);
int (*verify)(struct cpufreq_policy *policy);
int (*setpolicy)(struct cpufreq_policy *policy);
+#ifdef CONFIG_PM_STATISTIC
int (*update)(unsigned int cpu, struct cpufreq_policy *policy);
+#endif
int (*target)(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
@@ -238,6 +240,7 @@ struct cpu_dbs_info_s {
int8_t stoppable;
};
+#ifdef CONFIG_PM_STATISTIC
int get_cpufreq_ondemand_para(uint32_t *sampling_rate_max,
uint32_t *sampling_rate_min,
uint32_t *sampling_rate,
@@ -246,6 +249,7 @@ int write_ondemand_sampling_rate(unsigned int sampling_rate);
int write_ondemand_up_threshold(unsigned int up_threshold);
int write_userspace_scaling_setspeed(unsigned int cpu, unsigned int freq);
+#endif /* CONFIG_PM_STATISTIC */
void cpufreq_dbs_timer_suspend(void);
void cpufreq_dbs_timer_resume(void);
@@ -270,10 +274,12 @@ bool hwp_active(void);
static inline bool hwp_active(void) { return false; }
#endif
+#ifdef CONFIG_PM_STATISTIC
int get_hwp_para(unsigned int cpu,
struct xen_cppc_para *cppc_para);
int set_hwp_para(struct cpufreq_policy *policy,
struct xen_set_cppc_para *set_cppc);
+#endif /* CONFIG_PM_STATISTIC */
int acpi_cpufreq_register(void);
@@ -9,11 +9,19 @@
unsigned int powernow_register_driver(void);
unsigned int get_measured_perf(unsigned int cpu, unsigned int flag);
-void cpufreq_residency_update(unsigned int cpu, uint8_t state);
+#ifdef CONFIG_PM_STATISTIC
void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to);
int cpufreq_statistic_init(unsigned int cpu);
void cpufreq_statistic_exit(unsigned int cpu);
-void cpufreq_statistic_reset(unsigned int cpu);
+#else
+static inline void cpufreq_statistic_update(unsigned int cpu, uint8_t from,
+ uint8_t to) {};
+static inline int cpufreq_statistic_init(unsigned int cpu)
+{
+ return 0;
+}
+static inline void cpufreq_statistic_exit(unsigned int cpu) {};
+#endif /* CONFIG_PM_STATISTIC */
int cpufreq_limit_change(unsigned int cpu);
@@ -158,6 +158,7 @@ int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
extern unsigned int max_cstate;
extern unsigned int max_csubstate;
+#ifdef CONFIG_PM_STATISTIC
static inline unsigned int acpi_get_cstate_limit(void)
{
return max_cstate;
@@ -177,6 +178,7 @@ static inline void acpi_set_csubstate_limit(unsigned int new_limit)
{
max_csubstate = new_limit;
}
+#endif /* CONFIG_PM_STATISTIC */
#else
static inline unsigned int acpi_get_cstate_limit(void) { return 0; }
@@ -15,11 +15,13 @@ struct compat_processor_power;
long compat_set_cx_pminfo(uint32_t acpi_id, struct compat_processor_power *power);
#endif
+#ifdef CONFIG_PM_STATISTIC
uint32_t pmstat_get_cx_nr(unsigned int cpu);
int pmstat_get_cx_stat(unsigned int cpu, struct pm_cx_stat *stat);
int pmstat_reset_cx_stat(unsigned int cpu);
int do_get_pm_info(struct xen_sysctl_get_pmstat *op);
int do_pm_op(struct xen_sysctl_pm_op *op);
+#endif /* CONFIG_PM_STATISTIC */
#endif /* __XEN_PMSTAT_H_ */
This commit introduces CONFIG_PM_STATISTIC for wrapping all operations regarding performance management statistic operations. The major codes reside in xen/drivers/acpi/pmstat.c, including two main pm-related sysctl op: do_get_pm_info() and do_pm_op(). Signed-off-by: Penny Zheng <Penny.Zheng@amd.com> --- xen/arch/x86/acpi/cpu_idle.c | 2 + xen/arch/x86/acpi/cpufreq/hwp.c | 6 + xen/arch/x86/acpi/cpufreq/powernow.c | 4 + xen/common/Kconfig | 5 + xen/common/sysctl.c | 4 +- xen/drivers/acpi/Makefile | 2 +- xen/drivers/acpi/pmstat.c | 192 ++++++++++++++++--- xen/drivers/cpufreq/cpufreq.c | 31 +++ xen/drivers/cpufreq/cpufreq_misc_governors.c | 2 + xen/drivers/cpufreq/cpufreq_ondemand.c | 2 + xen/drivers/cpufreq/utility.c | 164 +--------------- xen/include/acpi/cpufreq/cpufreq.h | 6 + xen/include/acpi/cpufreq/processor_perf.h | 12 +- xen/include/xen/acpi.h | 2 + xen/include/xen/pmstat.h | 2 + 15 files changed, 238 insertions(+), 198 deletions(-)