@@ -57,6 +57,16 @@ config X86_ACPI_CPUFREQ_CPB
By enabling this option the acpi_cpufreq driver provides the old
entry in addition to the new boost ones, for compatibility reasons.
+config X86_SFI_CPUFREQ
+ tristate "SFI Processor P-States driver"
+ depends on X86_INTEL_MID && SFI
+ help
+ This adds a CPUFreq driver for some Silver Mont based Intel
+ Atom platforms (like INTEL_MID) which utilizes the SFI based
+ processor performance states.
+
+ If in doubt, say N.
+
config ELAN_CPUFREQ
tristate "AMD Elan SC400 and SC410"
depends on MELAN
@@ -41,6 +41,7 @@ obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
+obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
##################################################################################
# ARM SoC drivers
new file mode 100644
@@ -0,0 +1,332 @@
+/*
+ * SFI Processor P-States Driver
+ * Based on ACPI Processor P-States Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Author: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
+ * Author: Srinidhi Kasagar <srinidhi.kasagar@intel.com>
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/cpuidle.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sfi.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+
+#define SFI_FREQ_MAX 32
+#define INTEL_MSR_RANGE 0xffff
+#define INTEL_MSR_BUSRATIO_MASK 0xff00
+#define SFI_CPU_MAX 8
+
+static DEFINE_PER_CPU(struct sfi_cpufreq_data *, drv_data);
+static DEFINE_MUTEX(performance_mutex);
+static struct sfi_freq_table_entry sfi_cpufreq_array[SFI_FREQ_MAX];
+
+/* sfi_perf_data is a pointer to percpu data. */
+static struct sfi_processor_performance *sfi_perf_data;
+static struct cpufreq_driver sfi_cpufreq_driver;
+static int sfi_cpufreq_num;
+
+/* Performance management */
+struct sfi_processor_px {
+ u32 core_frequency; /* megahertz */
+ u32 transition_latency; /* microseconds */
+ u32 control; /* control value */
+};
+
+struct sfi_processor_performance {
+ unsigned int state;
+ unsigned int state_count;
+ struct sfi_processor_px *states;
+};
+
+struct sfi_cpufreq_data {
+ struct sfi_processor_performance *sfi_data;
+ struct cpufreq_frequency_table *freq_table;
+};
+
+static int parse_freq(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_freq_table_entry *pentry;
+ int total_len;
+
+ sb = (struct sfi_table_simple *)table;
+ if (!sb) {
+ pr_warn("SFI: Unable to map frequency table\n");
+ return -ENODEV;
+ }
+
+ if (!sfi_cpufreq_num) {
+ sfi_cpufreq_num = SFI_GET_NUM_ENTRIES(sb,
+ struct sfi_freq_table_entry);
+ pentry = (struct sfi_freq_table_entry *)sb->pentry;
+ total_len = sfi_cpufreq_num * sizeof(*pentry);
+ memcpy(sfi_cpufreq_array, pentry, total_len);
+ }
+
+ return 0;
+}
+
+static int sfi_processor_get_performance_states(
+ struct sfi_processor_performance *performance)
+{
+ int result = 0;
+ int i;
+
+ performance->state_count = sfi_cpufreq_num;
+ performance->states =
+ kmalloc(sizeof(struct sfi_processor_px) * sfi_cpufreq_num,
+ GFP_KERNEL);
+ if (!performance->states)
+ result = -ENOMEM;
+
+ pr_info("Num p-states %d\n", sfi_cpufreq_num);
+
+ /* Populate the P-states info from the SFI table */
+ for (i = 0; i < sfi_cpufreq_num; i++) {
+ performance->states[i].core_frequency =
+ sfi_cpufreq_array[i].freq_mhz;
+ performance->states[i].transition_latency =
+ sfi_cpufreq_array[i].latency;
+ performance->states[i].control =
+ sfi_cpufreq_array[i].ctrl_val;
+ pr_info("State [%d]: core_frequency[%d] transition_latency[%d] control[0x%x]\n",
+ i,
+ (u32) performance->states[i].core_frequency,
+ (u32) performance->states[i].transition_latency,
+ (u32) performance->states[i].control);
+ }
+
+ return result;
+}
+
+static int sfi_processor_register_performance(
+ struct sfi_processor_performance *performance)
+{
+ mutex_lock(&performance_mutex);
+
+ /* parse the freq table from sfi */
+ sfi_cpufreq_num = 0;
+ sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, parse_freq);
+
+ sfi_processor_get_performance_states(performance);
+
+ mutex_unlock(&performance_mutex);
+
+ return 0;
+}
+
+void sfi_processor_unregister_performance(
+ struct sfi_processor_performance *performance)
+{
+ mutex_lock(&performance_mutex);
+ kfree(performance->states);
+ mutex_unlock(&performance_mutex);
+}
+
+static unsigned extract_freq(u32 msr, struct sfi_cpufreq_data *data)
+{
+ struct cpufreq_frequency_table *pos;
+ struct sfi_processor_performance *perf;
+ u32 sfi_ctrl;
+
+ msr &= INTEL_MSR_BUSRATIO_MASK;
+ perf = data->sfi_data;
+
+ cpufreq_for_each_entry(pos, data->freq_table) {
+ sfi_ctrl = perf->states[pos->driver_data].control
+ & INTEL_MSR_BUSRATIO_MASK;
+ if (sfi_ctrl == msr)
+ return pos->frequency;
+ }
+ return data->freq_table[0].frequency;
+}
+
+static u32 get_cur_val(const struct cpumask *mask)
+{
+ u32 val, dummy;
+
+ if (unlikely(cpumask_empty(mask)))
+ return 0;
+
+ rdmsr_on_cpu(cpumask_any(mask), MSR_IA32_PERF_STATUS, &val, &dummy);
+
+ return val;
+}
+
+static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+{
+ struct sfi_cpufreq_data *data = per_cpu(drv_data, cpu);
+ unsigned int cached_freq;
+
+ pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
+
+ cached_freq = data->freq_table[data->sfi_data->state].frequency;
+
+ return extract_freq(get_cur_val(cpumask_of(cpu)), data);
+
+}
+
+static int sfi_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct sfi_processor_performance *perf;
+ unsigned int next_perf_state = 0; /* Index into perf table */
+ u32 lo, hi;
+
+ perf = data->sfi_data;
+ next_perf_state = data->freq_table[index].driver_data;
+
+ rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi);
+ lo = (lo & ~INTEL_MSR_RANGE) |
+ ((u32) perf->states[next_perf_state].control & INTEL_MSR_RANGE);
+ wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi);
+
+ perf->state = next_perf_state;
+
+ return 0;
+}
+
+static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int i, result = 0, valid_states = 0;
+ unsigned int cpu = policy->cpu;
+ struct sfi_cpufreq_data *data;
+ struct sfi_processor_performance *perf;
+
+ pr_debug("%s: CPU:%d\n", __func__, policy->cpu);
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->sfi_data = per_cpu_ptr(sfi_perf_data, cpu);
+ per_cpu(drv_data, cpu) = data;
+
+ result = sfi_processor_register_performance(data->sfi_data);
+ if (result)
+ goto err_free;
+
+ perf = data->sfi_data;
+ policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
+
+ /* capability check */
+ if (perf->state_count <= 1) {
+ pr_debug("No P-States\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ data->freq_table = kzalloc(sizeof(*data->freq_table) *
+ (perf->state_count+1), GFP_KERNEL);
+ if (!data->freq_table) {
+ result = -ENOMEM;
+ goto err_unreg;
+ }
+
+ /* detect transition latency */
+ policy->cpuinfo.transition_latency = 0;
+ for (i = 0; i < perf->state_count; i++) {
+ if ((perf->states[i].transition_latency * 1000) >
+ policy->cpuinfo.transition_latency)
+ policy->cpuinfo.transition_latency =
+ perf->states[i].transition_latency * 1000;
+
+ pr_debug(" %cP%d: %d MHz, %d uS\n",
+ (i == perf->state ? '*' : ' '), i,
+ (u32) perf->states[i].core_frequency,
+ (u32) perf->states[i].transition_latency);
+ }
+
+ /* initialize the freq table */
+ for (i = 0; i < perf->state_count; i++) {
+ data->freq_table[valid_states].driver_data = i;
+ data->freq_table[valid_states].frequency =
+ perf->states[i].core_frequency * 1000;
+ valid_states++;
+ }
+ data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+ perf->state = 0;
+
+ result = cpufreq_table_validate_and_show(policy, data->freq_table);
+ if (result)
+ goto err_freqfree;
+
+ pr_debug("CPU%u - SFI performance management activated.\n", cpu);
+
+ return result;
+
+err_freqfree:
+ kfree(data->freq_table);
+err_unreg:
+ sfi_processor_unregister_performance(perf);
+err_free:
+ kfree(data);
+ per_cpu(drv_data, cpu) = NULL;
+
+ return result;
+}
+
+static int sfi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct sfi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+
+ pr_debug("sfi_cpufreq_cpu_exit\n");
+
+ per_cpu(drv_data, policy->cpu) = NULL;
+ sfi_processor_unregister_performance(data->sfi_data);
+ kfree(data->freq_table);
+ kfree(data);
+
+ return 0;
+}
+
+static struct cpufreq_driver sfi_cpufreq_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .get = get_cur_freq_on_cpu,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sfi_cpufreq_target,
+ .init = sfi_cpufreq_cpu_init,
+ .exit = sfi_cpufreq_cpu_exit,
+ .name = "sfi-cpufreq",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init sfi_cpufreq_init(void)
+{
+ sfi_perf_data = alloc_percpu(struct sfi_processor_performance);
+ if (!sfi_perf_data) {
+ pr_err("Memory allocation error for sfi_perf_data.\n");
+ return -ENOMEM;
+ }
+
+ return cpufreq_register_driver(&sfi_cpufreq_driver);
+}
+late_initcall(sfi_cpufreq_init);
+
+static void __exit sfi_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&sfi_cpufreq_driver);
+ free_percpu(sfi_perf_data);
+}
+module_exit(sfi_cpufreq_exit);
+
+MODULE_AUTHOR("Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>");
+MODULE_DESCRIPTION("SFI P-States Driver");
+MODULE_LICENSE("GPL");