@@ -43,6 +43,174 @@
DEFINE_PER_CPU_READ_MOSTLY(struct pm_px *, cpufreq_statistic_data);
+DEFINE_PER_CPU(spinlock_t, cpufreq_statistic_lock);
+
+/*********************************************************************
+ * Px STATISTIC INFO *
+ *********************************************************************/
+
+static void cpufreq_residency_update(unsigned int cpu, uint8_t state)
+{
+ uint64_t now, total_idle_ns;
+ int64_t delta;
+ struct pm_px *pxpt = per_cpu(cpufreq_statistic_data, cpu);
+
+ total_idle_ns = get_cpu_idle_time(cpu);
+ now = NOW();
+
+ delta = (now - pxpt->prev_state_wall) -
+ (total_idle_ns - pxpt->prev_idle_wall);
+
+ if ( likely(delta >= 0) )
+ pxpt->u.pt[state].residency += delta;
+
+ pxpt->prev_state_wall = now;
+ pxpt->prev_idle_wall = total_idle_ns;
+}
+
+void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to)
+{
+ struct pm_px *pxpt;
+ const struct processor_pminfo *pmpt = processor_pminfo[cpu];
+ spinlock_t *cpufreq_statistic_lock =
+ &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if ( !pxpt || !pmpt ) {
+ spin_unlock(cpufreq_statistic_lock);
+ return;
+ }
+
+ pxpt->u.last = from;
+ pxpt->u.cur = to;
+ pxpt->u.pt[to].count++;
+
+ cpufreq_residency_update(cpu, from);
+
+ (*(pxpt->u.trans_pt + from * pmpt->perf.state_count + to))++;
+
+ spin_unlock(cpufreq_statistic_lock);
+}
+
+int cpufreq_statistic_init(unsigned int cpu)
+{
+ uint32_t i, count;
+ struct pm_px *pxpt;
+ const struct processor_pminfo *pmpt = processor_pminfo[cpu];
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock_init(cpufreq_statistic_lock);
+
+ if ( !pmpt )
+ return -EINVAL;
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if ( pxpt )
+ {
+ spin_unlock(cpufreq_statistic_lock);
+ return 0;
+ }
+
+ count = pmpt->perf.state_count;
+
+ pxpt = xzalloc(struct pm_px);
+ if ( !pxpt )
+ {
+ spin_unlock(cpufreq_statistic_lock);
+ return -ENOMEM;
+ }
+ per_cpu(cpufreq_statistic_data, cpu) = pxpt;
+
+ pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count);
+ if ( !pxpt->u.trans_pt )
+ {
+ xfree(pxpt);
+ spin_unlock(cpufreq_statistic_lock);
+ return -ENOMEM;
+ }
+
+ pxpt->u.pt = xzalloc_array(struct pm_px_val, count);
+ if ( !pxpt->u.pt )
+ {
+ xfree(pxpt->u.trans_pt);
+ xfree(pxpt);
+ spin_unlock(cpufreq_statistic_lock);
+ return -ENOMEM;
+ }
+
+ pxpt->u.total = pmpt->perf.state_count;
+ pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit;
+
+ for ( i = 0; i < pmpt->perf.state_count; i++ )
+ pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency;
+
+ pxpt->prev_state_wall = NOW();
+ pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
+
+ spin_unlock(cpufreq_statistic_lock);
+
+ return 0;
+}
+
+void cpufreq_statistic_exit(unsigned int cpu)
+{
+ struct pm_px *pxpt;
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if ( !pxpt )
+ {
+ spin_unlock(cpufreq_statistic_lock);
+ return;
+ }
+
+ xfree(pxpt->u.trans_pt);
+ xfree(pxpt->u.pt);
+ xfree(pxpt);
+ per_cpu(cpufreq_statistic_data, cpu) = NULL;
+
+ spin_unlock(cpufreq_statistic_lock);
+}
+
+static void cpufreq_statistic_reset(unsigned int cpu)
+{
+ uint32_t i, j, count;
+ struct pm_px *pxpt;
+ const struct processor_pminfo *pmpt = processor_pminfo[cpu];
+ spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
+
+ spin_lock(cpufreq_statistic_lock);
+
+ pxpt = per_cpu(cpufreq_statistic_data, cpu);
+ if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt )
+ {
+ spin_unlock(cpufreq_statistic_lock);
+ return;
+ }
+
+ count = pmpt->perf.state_count;
+
+ for ( i = 0; i < count; i++ )
+ {
+ pxpt->u.pt[i].residency = 0;
+ pxpt->u.pt[i].count = 0;
+
+ for ( j = 0; j < count; j++ )
+ *(pxpt->u.trans_pt + i * count + j) = 0;
+ }
+
+ pxpt->prev_state_wall = NOW();
+ pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
+
+ spin_unlock(cpufreq_statistic_lock);
+}
+
/*
* Get PM statistic info
*/
@@ -522,34 +690,3 @@ int do_pm_op(struct xen_sysctl_pm_op *op)
return ret;
}
-
-int acpi_set_pdc_bits(uint32_t acpi_id, XEN_GUEST_HANDLE(uint32) pdc)
-{
- u32 bits[3];
- int ret;
-
- if ( copy_from_guest(bits, pdc, 2) )
- ret = -EFAULT;
- else if ( bits[0] != ACPI_PDC_REVISION_ID || !bits[1] )
- ret = -EINVAL;
- else if ( copy_from_guest_offset(bits + 2, pdc, 2, 1) )
- ret = -EFAULT;
- else
- {
- u32 mask = 0;
-
- if ( xen_processor_pmbits & XEN_PROCESSOR_PM_CX )
- mask |= ACPI_PDC_C_MASK | ACPI_PDC_SMP_C1PT;
- if ( xen_processor_pmbits & XEN_PROCESSOR_PM_PX )
- mask |= ACPI_PDC_P_MASK | ACPI_PDC_SMP_C1PT;
- if ( xen_processor_pmbits & XEN_PROCESSOR_PM_TX )
- mask |= ACPI_PDC_T_MASK | ACPI_PDC_SMP_C1PT;
- bits[2] &= (ACPI_PDC_C_MASK | ACPI_PDC_P_MASK | ACPI_PDC_T_MASK |
- ACPI_PDC_SMP_C1PT) & ~mask;
- ret = arch_acpi_set_pdc_bits(acpi_id, bits, mask);
- }
- if ( !ret && __copy_to_guest_offset(pdc, 2, bits + 2, 1) )
- ret = -EFAULT;
-
- return ret;
-}
@@ -582,6 +582,37 @@ out:
return ret;
}
+int acpi_set_pdc_bits(uint32_t acpi_id, XEN_GUEST_HANDLE(uint32) pdc)
+{
+ u32 bits[3];
+ int ret;
+
+ if ( copy_from_guest(bits, pdc, 2) )
+ ret = -EFAULT;
+ else if ( bits[0] != ACPI_PDC_REVISION_ID || !bits[1] )
+ ret = -EINVAL;
+ else if ( copy_from_guest_offset(bits + 2, pdc, 2, 1) )
+ ret = -EFAULT;
+ else
+ {
+ u32 mask = 0;
+
+ if ( xen_processor_pmbits & XEN_PROCESSOR_PM_CX )
+ mask |= ACPI_PDC_C_MASK | ACPI_PDC_SMP_C1PT;
+ if ( xen_processor_pmbits & XEN_PROCESSOR_PM_PX )
+ mask |= ACPI_PDC_P_MASK | ACPI_PDC_SMP_C1PT;
+ if ( xen_processor_pmbits & XEN_PROCESSOR_PM_TX )
+ mask |= ACPI_PDC_T_MASK | ACPI_PDC_SMP_C1PT;
+ bits[2] &= (ACPI_PDC_C_MASK | ACPI_PDC_P_MASK | ACPI_PDC_T_MASK |
+ ACPI_PDC_SMP_C1PT) & ~mask;
+ ret = arch_acpi_set_pdc_bits(acpi_id, bits, mask);
+ }
+ if ( !ret && __copy_to_guest_offset(pdc, 2, bits + 2, 1) )
+ ret = -EFAULT;
+
+ return ret;
+}
+
static void cpufreq_cmdline_common_para(struct cpufreq_policy *new_policy)
{
if (usr_max_freq)
@@ -35,168 +35,6 @@ struct cpufreq_driver __read_mostly cpufreq_driver;
struct processor_pminfo *__read_mostly processor_pminfo[NR_CPUS];
DEFINE_PER_CPU_READ_MOSTLY(struct cpufreq_policy *, cpufreq_cpu_policy);
-DEFINE_PER_CPU(spinlock_t, cpufreq_statistic_lock);
-
-/*********************************************************************
- * Px STATISTIC INFO *
- *********************************************************************/
-
-void cpufreq_residency_update(unsigned int cpu, uint8_t state)
-{
- uint64_t now, total_idle_ns;
- int64_t delta;
- struct pm_px *pxpt = per_cpu(cpufreq_statistic_data, cpu);
-
- total_idle_ns = get_cpu_idle_time(cpu);
- now = NOW();
-
- delta = (now - pxpt->prev_state_wall) -
- (total_idle_ns - pxpt->prev_idle_wall);
-
- if ( likely(delta >= 0) )
- pxpt->u.pt[state].residency += delta;
-
- pxpt->prev_state_wall = now;
- pxpt->prev_idle_wall = total_idle_ns;
-}
-
-void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to)
-{
- struct pm_px *pxpt;
- struct processor_pminfo *pmpt = processor_pminfo[cpu];
- spinlock_t *cpufreq_statistic_lock =
- &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if ( !pxpt || !pmpt ) {
- spin_unlock(cpufreq_statistic_lock);
- return;
- }
-
- pxpt->u.last = from;
- pxpt->u.cur = to;
- pxpt->u.pt[to].count++;
-
- cpufreq_residency_update(cpu, from);
-
- (*(pxpt->u.trans_pt + from * pmpt->perf.state_count + to))++;
-
- spin_unlock(cpufreq_statistic_lock);
-}
-
-int cpufreq_statistic_init(unsigned int cpu)
-{
- uint32_t i, count;
- struct pm_px *pxpt;
- const struct processor_pminfo *pmpt = processor_pminfo[cpu];
- spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock_init(cpufreq_statistic_lock);
-
- if ( !pmpt )
- return -EINVAL;
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if ( pxpt ) {
- spin_unlock(cpufreq_statistic_lock);
- return 0;
- }
-
- count = pmpt->perf.state_count;
-
- pxpt = xzalloc(struct pm_px);
- if ( !pxpt ) {
- spin_unlock(cpufreq_statistic_lock);
- return -ENOMEM;
- }
- per_cpu(cpufreq_statistic_data, cpu) = pxpt;
-
- pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count);
- if (!pxpt->u.trans_pt) {
- xfree(pxpt);
- spin_unlock(cpufreq_statistic_lock);
- return -ENOMEM;
- }
-
- pxpt->u.pt = xzalloc_array(struct pm_px_val, count);
- if (!pxpt->u.pt) {
- xfree(pxpt->u.trans_pt);
- xfree(pxpt);
- spin_unlock(cpufreq_statistic_lock);
- return -ENOMEM;
- }
-
- pxpt->u.total = pmpt->perf.state_count;
- pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit;
-
- for (i=0; i < pmpt->perf.state_count; i++)
- pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency;
-
- pxpt->prev_state_wall = NOW();
- pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
-
- spin_unlock(cpufreq_statistic_lock);
-
- return 0;
-}
-
-void cpufreq_statistic_exit(unsigned int cpu)
-{
- struct pm_px *pxpt;
- spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if (!pxpt) {
- spin_unlock(cpufreq_statistic_lock);
- return;
- }
-
- xfree(pxpt->u.trans_pt);
- xfree(pxpt->u.pt);
- xfree(pxpt);
- per_cpu(cpufreq_statistic_data, cpu) = NULL;
-
- spin_unlock(cpufreq_statistic_lock);
-}
-
-void cpufreq_statistic_reset(unsigned int cpu)
-{
- uint32_t i, j, count;
- struct pm_px *pxpt;
- const struct processor_pminfo *pmpt = processor_pminfo[cpu];
- spinlock_t *cpufreq_statistic_lock = &per_cpu(cpufreq_statistic_lock, cpu);
-
- spin_lock(cpufreq_statistic_lock);
-
- pxpt = per_cpu(cpufreq_statistic_data, cpu);
- if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) {
- spin_unlock(cpufreq_statistic_lock);
- return;
- }
-
- count = pmpt->perf.state_count;
-
- for (i=0; i < count; i++) {
- pxpt->u.pt[i].residency = 0;
- pxpt->u.pt[i].count = 0;
-
- for (j=0; j < count; j++)
- *(pxpt->u.trans_pt + i*count + j) = 0;
- }
-
- pxpt->prev_state_wall = NOW();
- pxpt->prev_idle_wall = get_cpu_idle_time(cpu);
-
- spin_unlock(cpufreq_statistic_lock);
-}
-
-
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
@@ -9,11 +9,9 @@
unsigned int powernow_register_driver(void);
unsigned int get_measured_perf(unsigned int cpu, unsigned int flag);
-void cpufreq_residency_update(unsigned int cpu, uint8_t state);
void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to);
int cpufreq_statistic_init(unsigned int cpu);
void cpufreq_statistic_exit(unsigned int cpu);
-void cpufreq_statistic_reset(unsigned int cpu);
int cpufreq_limit_change(unsigned int cpu);
We intend to move the following functions into drivers/acpi/pmstat.c, as they are all designed for performance statistic: - cpufreq_residency_update - cpufreq_statistic_reset - cpufreq_statistic_update - cpufreq_statistic_init - cpufreq_statistic_exit and moving out acpi_set_pdc_bits(), as it is the handler for sub-hypercall XEN_PM_PDC, and shall stay with the other handlers together in drivers/cpufreq/cpufreq.c. This commit also applies various style corrections while moving these functions Signed-off-by: Penny Zheng <Penny.Zheng@amd.com> --- v1 -> v2: - new commit --- xen/drivers/acpi/pmstat.c | 199 ++++++++++++++++++---- xen/drivers/cpufreq/cpufreq.c | 31 ++++ xen/drivers/cpufreq/utility.c | 162 ------------------ xen/include/acpi/cpufreq/processor_perf.h | 2 - 4 files changed, 199 insertions(+), 195 deletions(-)