@@ -145,7 +145,7 @@ struct hw_residencies
uint64_t cc7;
};
-static void do_get_hw_residencies(void *arg)
+static void cf_check do_get_hw_residencies(void *arg)
{
struct cpuinfo_x86 *c = ¤t_cpu_data;
struct hw_residencies *hw_res = arg;
@@ -129,7 +129,7 @@ struct drv_cmd {
u32 val;
};
-static void do_drv_read(void *drvcmd)
+static void cf_check do_drv_read(void *drvcmd)
{
struct drv_cmd *cmd;
@@ -148,7 +148,7 @@ static void do_drv_read(void *drvcmd)
}
}
-static void do_drv_write(void *drvcmd)
+static void cf_check do_drv_write(void *drvcmd)
{
struct drv_cmd *cmd;
uint64_t msr_content;
@@ -244,7 +244,7 @@ struct perf_pair {
static DEFINE_PER_CPU(struct perf_pair, gov_perf_pair);
static DEFINE_PER_CPU(struct perf_pair, usr_perf_pair);
-static void read_measured_perf_ctrs(void *_readin)
+static void cf_check read_measured_perf_ctrs(void *_readin)
{
struct perf_pair *readin = _readin;
@@ -340,7 +340,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
return extract_freq(get_cur_val(cpumask_of(cpu)), data);
}
-static void feature_detect(void *info)
+static void cf_check feature_detect(void *info)
{
struct cpufreq_policy *policy = info;
unsigned int eax;
@@ -44,12 +44,12 @@
#define ARCH_CPU_FLAG_RESUME 1
-static void transition_pstate(void *pstate)
+static void cf_check transition_pstate(void *pstate)
{
wrmsrl(MSR_PSTATE_CTRL, *(unsigned int *)pstate);
}
-static void update_cpb(void *data)
+static void cf_check update_cpb(void *data)
{
struct cpufreq_policy *policy = data;
@@ -165,7 +165,7 @@ struct amd_cpu_data {
u32 max_hw_pstate;
};
-static void get_cpu_data(void *arg)
+static void cf_check get_cpu_data(void *arg)
{
struct amd_cpu_data *data = arg;
struct processor_performance *perf = data->perf;
@@ -99,7 +99,7 @@ unsigned int acpi_get_processor_id(unsigned int cpu)
return INVALID_ACPIID;
}
-static void get_mwait_ecx(void *info)
+static void cf_check get_mwait_ecx(void *info)
{
*(u32 *)info = cpuid_ecx(CPUID_MWAIT_LEAF);
}
@@ -430,7 +430,7 @@ static void disable_c1_ramping(void)
}
}
-static void disable_c1e(void *unused)
+static void cf_check disable_c1e(void *unused)
{
uint64_t msr_content;
@@ -79,7 +79,7 @@ static int variable_period = 1;
* Collects information of correctable errors and notifies
* Dom0 via an event.
*/
-static void mce_amd_checkregs(void *info)
+static void cf_check mce_amd_checkregs(void *info)
{
mctelem_cookie_t mctc;
struct mca_summary bs;
@@ -961,7 +961,7 @@ void x86_mcinfo_dump(struct mc_info *mi)
} while ( 1 );
}
-static void do_mc_get_cpu_info(void *v)
+static void cf_check do_mc_get_cpu_info(void *v)
{
int cpu = smp_processor_id();
int cindex, cpn;
@@ -1242,7 +1242,7 @@ static void x86_mc_hwcr_wren_restore(uint64_t hwcr)
wrmsrl(MSR_K8_HWCR, hwcr);
}
-static void x86_mc_msrinject(void *data)
+static void cf_check x86_mc_msrinject(void *data)
{
struct xen_mc_msrinject *mci = data;
struct mcinfo_msr *msr;
@@ -1274,7 +1274,7 @@ static void x86_mc_msrinject(void *data)
}
/*ARGSUSED*/
-static void x86_mc_mceinject(void *data)
+static void cf_check x86_mc_mceinject(void *data)
{
printk("Simulating #MC on cpu %d\n", smp_processor_id());
__asm__ __volatile__("int $0x12");
@@ -599,7 +599,7 @@ static void mce_set_owner(void)
cmci_discover();
}
-static void __cpu_mcheck_distribute_cmci(void *unused)
+static void cf_check __cpu_mcheck_distribute_cmci(void *unused)
{
cmci_discover();
}
@@ -32,7 +32,7 @@ static uint64_t period = MCE_PERIOD;
static int adjust = 0;
static int variable_period = 1;
-static void mce_checkregs (void *info)
+static void cf_check mce_checkregs(void *info)
{
mctelem_cookie_t mctc;
struct mca_summary bs;
@@ -533,7 +533,7 @@ static int control_thread_fn(const struct microcode_patch *patch)
return ret;
}
-static int do_microcode_update(void *patch)
+static int cf_check do_microcode_update(void *patch)
{
unsigned int cpu = smp_processor_id();
int ret;
@@ -84,7 +84,7 @@ bool is_var_mtrr_overlapped(const struct mtrr_state *m)
return false;
}
-void mtrr_save_fixed_ranges(void *info)
+void cf_check mtrr_save_fixed_ranges(void *info)
{
get_fixed_ranges(mtrr_state.fixed_ranges);
}
@@ -131,7 +131,7 @@ struct set_mtrr_data {
*/
int hold_mtrr_updates_on_aps;
-static void ipi_handler(void *info)
+static void cf_check ipi_handler(void *info)
/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
[RETURNS] Nothing.
*/
@@ -877,7 +877,7 @@ static void mwait_idle(void)
cpuidle_current_governor->reflect(power);
}
-static void auto_demotion_disable(void *dummy)
+static void cf_check auto_demotion_disable(void *dummy)
{
u64 msr_bits;
@@ -886,13 +886,13 @@ static void auto_demotion_disable(void *dummy)
wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
}
-static void byt_auto_demotion_disable(void *dummy)
+static void cf_check byt_auto_demotion_disable(void *dummy)
{
wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
}
-static void c1e_promotion_disable(void *dummy)
+static void cf_check c1e_promotion_disable(void *dummy)
{
u64 msr_bits;
@@ -335,7 +335,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
#endif
}
-static void vpmu_save_force(void *arg)
+static void cf_check vpmu_save_force(void *arg)
{
struct vcpu *v = arg;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
@@ -528,7 +528,7 @@ void vpmu_initialise(struct vcpu *v)
put_vpmu(v);
}
-static void vpmu_clear_last(void *arg)
+static void cf_check vpmu_clear_last(void *arg)
{
if ( this_cpu(last_vcpu) == arg )
this_cpu(last_vcpu) = NULL;
@@ -289,7 +289,7 @@ int xg_free_unused_page(mfn_t mfn)
return rangeset_remove_range(mem, mfn_x(mfn), mfn_x(mfn));
}
-static void ap_resume(void *unused)
+static void cf_check ap_resume(void *unused)
{
BUG_ON(map_vcpuinfo());
BUG_ON(init_evtchn());
@@ -82,8 +82,7 @@ nestedhvm_vcpu_destroy(struct vcpu *v)
alternative_vcall(hvm_funcs.nhvm_vcpu_destroy, v);
}
-static void
-nestedhvm_flushtlb_ipi(void *info)
+static void cf_check nestedhvm_flushtlb_ipi(void *info)
{
struct vcpu *v = current;
struct domain *d = info;
@@ -589,7 +589,7 @@ static void vmx_free_vmcs(paddr_t pa)
free_domheap_page(maddr_to_page(pa));
}
-static void __vmx_clear_vmcs(void *info)
+static void cf_check __vmx_clear_vmcs(void *info)
{
struct vcpu *v = info;
struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
@@ -64,7 +64,7 @@ struct mtrr_state {
};
extern struct mtrr_state mtrr_state;
-extern void mtrr_save_fixed_ranges(void *);
+extern void cf_check mtrr_save_fixed_ranges(void *);
extern void mtrr_save_state(void);
extern int mtrr_add(unsigned long base, unsigned long size,
unsigned int type, char increment);
@@ -1127,7 +1127,7 @@ static inline void clear_pirq_eoi(struct domain *d, unsigned int irq)
}
}
-static void set_eoi_ready(void *data);
+static void cf_check set_eoi_ready(void *data);
static void cf_check irq_guest_eoi_timer_fn(void *data)
{
@@ -1398,7 +1398,7 @@ static void __set_eoi_ready(const struct irq_desc *desc)
}
/* Mark specified IRQ as ready-for-EOI (if it really is) and attempt to EOI. */
-static void set_eoi_ready(void *data)
+static void cf_check set_eoi_ready(void *data)
{
struct irq_desc *desc = data;
@@ -149,7 +149,7 @@ int nmi_active;
(P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
-static void __init wait_for_nmis(void *p)
+static void __init cf_check wait_for_nmis(void *p)
{
unsigned int start_count = this_cpu(nmi_count);
unsigned long ticks = 10 * 1000 * cpu_khz / nmi_hz;
@@ -131,7 +131,7 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
}
-static void nmi_save_registers(void * dummy)
+static void cf_check nmi_save_registers(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs * msrs = &cpu_msrs[cpu];
@@ -179,7 +179,7 @@ static int allocate_msrs(void)
}
-static void nmi_cpu_setup(void * dummy)
+static void cf_check nmi_cpu_setup(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs * msrs = &cpu_msrs[cpu];
@@ -245,7 +245,7 @@ static void nmi_restore_registers(struct op_msrs * msrs)
}
-static void nmi_cpu_shutdown(void * dummy)
+static void cf_check nmi_cpu_shutdown(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs * msrs = &cpu_msrs[cpu];
@@ -261,7 +261,7 @@ void nmi_release_counters(void)
}
-static void nmi_cpu_start(void * dummy)
+static void cf_check nmi_cpu_start(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs const * msrs = &cpu_msrs[cpu];
@@ -278,7 +278,7 @@ int nmi_start(void)
}
-static void nmi_cpu_stop(void * dummy)
+static void cf_check nmi_cpu_stop(void *dummy)
{
unsigned int v;
int cpu = smp_processor_id();
@@ -436,7 +436,7 @@ static void athlon_stop(struct op_msrs const * const msrs)
#define APIC_EILVT_MSG_NMI 0x4
#define APIC_EILVT_LVTOFF_IBS 1
#define APIC_EILVTn(n) (0x500 + 0x10 * n)
-static inline void __init init_ibs_nmi_per_cpu(void *arg)
+static inline void __init cf_check init_ibs_nmi_per_cpu(void *arg)
{
unsigned long reg;
@@ -47,7 +47,7 @@ struct resource_access {
long cf_check cpu_frequency_change_helper(void *);
void check_resource_access(struct resource_access *);
-void resource_access(void *);
+void cf_check resource_access(void *);
#ifndef COMPAT
typedef long ret_t;
@@ -149,7 +149,7 @@ void check_resource_access(struct resource_access *ra)
ra->nr_done = i;
}
-void resource_access(void *info)
+void cf_check resource_access(void *info)
{
struct resource_access *ra = info;
unsigned int i;
@@ -1247,7 +1247,7 @@ struct cos_write_info
const uint32_t *val;
};
-static void do_write_psr_msrs(void *data)
+static void cf_check do_write_psr_msrs(void *data)
{
const struct cos_write_info *info = data;
unsigned int i, index, cos = info->cos;
@@ -118,7 +118,7 @@ static inline void kb_wait(void)
break;
}
-static void noreturn __machine_halt(void *unused)
+static void noreturn cf_check __machine_halt(void *unused)
{
local_irq_disable();
@@ -548,7 +548,7 @@ static int __init cf_check reboot_init(void)
}
__initcall(reboot_init);
-static void noreturn __machine_restart(void *pdelay)
+static void cf_check noreturn __machine_restart(void *pdelay)
{
machine_restart(*(unsigned int *)pdelay);
}
@@ -339,7 +339,7 @@ void __stop_this_cpu(void)
cpumask_clear_cpu(smp_processor_id(), &cpu_online_map);
}
-static void stop_this_cpu(void *dummy)
+static void cf_check stop_this_cpu(void *dummy)
{
__stop_this_cpu();
for ( ; ; )
@@ -69,7 +69,7 @@ struct l3_cache_info {
unsigned long size;
};
-static void l3_cache_get(void *arg)
+static void cf_check l3_cache_get(void *arg)
{
struct cpuid4_info info;
struct l3_cache_info *l3_info = arg;
@@ -1661,7 +1661,7 @@ static void check_tsc_warp(unsigned long tsc_khz, unsigned long *max_warp)
static unsigned long tsc_max_warp, tsc_check_count;
static cpumask_t tsc_check_cpumask;
-static void tsc_check_slave(void *unused)
+static void cf_check tsc_check_slave(void *unused)
{
unsigned int cpu = smp_processor_id();
local_irq_disable();
@@ -1809,7 +1809,7 @@ static void time_calibration_tsc_rendezvous(void *_r)
}
/* Ordinary rendezvous function which does not modify TSC values. */
-static void time_calibration_std_rendezvous(void *_r)
+static void cf_check time_calibration_std_rendezvous(void *_r)
{
struct calibration_rendezvous *r = _r;
unsigned int total_cpus = cpumask_weight(&r->cpu_calibration_map);
@@ -1840,7 +1840,7 @@ static void time_calibration_std_rendezvous(void *_r)
* Rendezvous function used when clocksource is TSC and
* no CPU hotplug will be performed.
*/
-static void time_calibration_nop_rendezvous(void *rv)
+static void cf_check time_calibration_nop_rendezvous(void *rv)
{
const struct calibration_rendezvous *r = rv;
struct cpu_time_stamp *c = &this_cpu(cpu_calibration);
@@ -2032,7 +2032,7 @@ static void __init tsc_check_writability(void)
disable_tsc_sync = true;
}
-static void __init reset_percpu_time(void *unused)
+static void __init cf_check reset_percpu_time(void *unused)
{
struct cpu_time *t = &this_cpu(cpu_time);
@@ -84,13 +84,13 @@ static int cpu_notifier_call_chain(unsigned int cpu, unsigned long action,
return ret;
}
-static void _take_cpu_down(void *unused)
+static void cf_check _take_cpu_down(void *unused)
{
cpu_notifier_call_chain(smp_processor_id(), CPU_DYING, NULL, true);
__cpu_disable();
}
-static int take_cpu_down(void *arg)
+static int cf_check take_cpu_down(void *arg)
{
_take_cpu_down(arg);
return 0;
@@ -660,7 +660,7 @@ static int __init cf_check initialise_gdb(void)
}
presmp_initcall(initialise_gdb);
-static void gdb_pause_this_cpu(void *unused)
+static void cf_check gdb_pause_this_cpu(void *unused)
{
unsigned long flags;
@@ -360,7 +360,7 @@ static cpumask_t read_clocks_cpumask;
static DEFINE_PER_CPU(s_time_t, read_clocks_time);
static DEFINE_PER_CPU(u64, read_cycles_time);
-static void read_clocks_slave(void *unused)
+static void cf_check read_clocks_slave(void *unused)
{
unsigned int cpu = smp_processor_id();
local_irq_disable();
@@ -1898,7 +1898,7 @@ void __init end_boot_allocator(void)
printk("\n");
}
-static void __init smp_scrub_heap_pages(void *data)
+static void __init cf_check smp_scrub_heap_pages(void *data)
{
unsigned long mfn, start, end;
struct page_info *pg;