@@ -13,34 +13,35 @@
#include <mach_apic.h>
#include <asm/hvm/support.h>
#include <asm/setup.h>
#include "cpu.h"
#define select_idle_routine(x) ((void)0)
-static bool_t __init probe_intel_cpuid_faulting(void)
+static bool __init probe_intel_cpuid_faulting(void)
{
uint64_t x;
if (rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) ||
!(x & MSR_PLATFORM_INFO_CPUID_FAULTING))
return 0;
expected_levelling_cap |= LCAP_faulting;
levelling_caps |= LCAP_faulting;
__set_bit(X86_FEATURE_CPUID_FAULTING, boot_cpu_data.x86_capability);
return 1;
}
-static void set_cpuid_faulting(bool_t enable)
+DEFINE_PER_CPU(bool, cpuid_faulting_enabled);
+
+static void set_cpuid_faulting(bool enable)
{
- static DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
- bool_t *this_enabled = &this_cpu(cpuid_faulting_enabled);
+ bool *this_enabled = &this_cpu(cpuid_faulting_enabled);
uint32_t hi, lo;
ASSERT(cpu_has_cpuid_faulting);
if (*this_enabled == enable)
return;
rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
@@ -59,16 +59,19 @@ struct cpuidmasks
};
/* Per CPU shadows of masking MSR values, for lazy context switching. */
DECLARE_PER_CPU(struct cpuidmasks, cpuidmasks);
/* Default masking MSR values, calculated at boot. */
extern struct cpuidmasks cpuidmask_defaults;
+/* Whether or not cpuid faulting is available for the current domain. */
+DECLARE_PER_CPU(bool, cpuid_faulting_enabled);
+
#endif /* __ASSEMBLY__ */
#endif /* !__X86_CPUID_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
While we're here, use bool instead of bool_t. Signed-off-by: Kyle Huey <khuey@kylehuey.com> --- xen/arch/x86/cpu/intel.c | 9 +++++---- xen/include/asm-x86/cpuid.h | 3 +++ 2 files changed, 8 insertions(+), 4 deletions(-) base-commit: 71b8b46111219a2f83f4f9ae06ac5409744ea86e