diff mbox

[v2,19/30] x86/cpu: Rework Intel masking/faulting setup

Message ID 1454679743-18133-20-git-send-email-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Cooper Feb. 5, 2016, 1:42 p.m. UTC
This patch is best reviewed as its end result rather than as a diff, as it
rewrites almost all of the setup.

On the BSP, cpuid information is used to evaluate the potential available set
of masking MSRs, and they are unconditionally probed, filling in the
availability information and hardware defaults.  A side effect of this is that
probe_intel_cpuid_faulting() can move to being __init.

The command line parameters are then combined with the hardware defaults to
further restrict the Xen default masking level.  Each cpu is then context
switched into the default levelling state.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>

v2:
 * Style fixes
 * Provide extra information if opt_cpu_info
 * Extra comment indicating the expected use of intel_ctxt_switch_levelling()
---
 xen/arch/x86/cpu/intel.c | 242 +++++++++++++++++++++++++++++------------------
 1 file changed, 150 insertions(+), 92 deletions(-)

Comments

Jan Beulich Feb. 17, 2016, 7:57 a.m. UTC | #1
>>> On 05.02.16 at 14:42, <andrew.cooper3@citrix.com> wrote:
> --- a/xen/arch/x86/cpu/intel.c
> +++ b/xen/arch/x86/cpu/intel.c
> @@ -18,11 +18,18 @@
>  
>  #define select_idle_routine(x) ((void)0)
>  
> -static unsigned int probe_intel_cpuid_faulting(void)
> +static bool_t __init probe_intel_cpuid_faulting(void)
>  {
>  	uint64_t x;
> -	return !rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) &&
> -		(x & MSR_PLATFORM_INFO_CPUID_FAULTING);
> +
> +	if ( rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) ||
> +	     !(x & MSR_PLATFORM_INFO_CPUID_FAULTING) )
> +		return 0;

Partial Xen coding style again.

> @@ -44,41 +51,46 @@ void set_cpuid_faulting(bool_t enable)
>  }
>  
>  /*
> - * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
> - * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
> - * edx = 0xBFEBFBFF when executing CPUID.EAX = 1 normally. If you want to
> - * 'rev down' to E8400, you can set these values in these Xen boot parameters.
> + * Set caps in expected_levelling_cap, probe a specific masking MSR, and set
> + * caps in levelling_caps if it is found, or clobber the MSR index if missing.
> + * If preset, reads the default value into msr_val.
>   */
> -static void set_cpuidmask(const struct cpuinfo_x86 *c)
> +static void __init __probe_mask_msr(unsigned int *msr, uint64_t caps,
> +				    uint64_t *msr_val)
>  {
> -	static unsigned int msr_basic, msr_ext, msr_xsave;
> -	static enum { not_parsed, no_mask, set_mask } status;
> -	u64 msr_val;
> +	uint64_t val;
>  
> -	if (status == no_mask)
> -		return;
> +	expected_levelling_cap |= caps;
>  
> -	if (status == set_mask)
> -		goto setmask;
> +	if (rdmsr_safe(*msr, val) || wrmsr_safe(*msr, val))
> +		*msr = 0;
> +	else
> +	{
> +		levelling_caps |= caps;
> +		*msr_val = val;
> +	}
> +}

Same as for the AMD side: Perhaps neater if the function returned
the MSR value? (Also again partial Xen coding style here.)

> +/* Indicies of the masking MSRs, or 0 if unavailable. */
> +static unsigned int __read_mostly msr_basic, msr_ext, msr_xsave;

I think this way __read_mostly applies only to msr_basic, which I
don't think is what you want. Also I think you mean "indices" or
"indexes".

> +static void __init probe_masking_msrs(void)
> +{
> +	const struct cpuinfo_x86 *c = &boot_cpu_data;
> +	unsigned int exp_msr_basic = 0, exp_msr_ext = 0, exp_msr_xsave = 0;
>  
>  	/* Only family 6 supports this feature. */
> -	if (c->x86 != 6) {
> -		printk("No CPUID feature masking support available\n");
> +	if (c->x86 != 6)
>  		return;
> -	}
>  
>  	switch (c->x86_model) {
>  	case 0x17: /* Yorkfield, Wolfdale, Penryn, Harpertown(DP) */
>  	case 0x1d: /* Dunnington(MP) */
> -		msr_basic = MSR_INTEL_MASK_V1_CPUID1;
> +		exp_msr_basic = msr_basic = MSR_INTEL_MASK_V1_CPUID1;
>  		break;
>  
>  	case 0x1a: /* Bloomfield, Nehalem-EP(Gainestown) */
> @@ -88,71 +100,126 @@ static void set_cpuidmask(const struct cpuinfo_x86 *c)
>  	case 0x2c: /* Gulftown, Westmere-EP */
>  	case 0x2e: /* Nehalem-EX(Beckton) */
>  	case 0x2f: /* Westmere-EX */
> -		msr_basic = MSR_INTEL_MASK_V2_CPUID1;
> -		msr_ext   = MSR_INTEL_MASK_V2_CPUID80000001;
> +		exp_msr_basic = msr_basic = MSR_INTEL_MASK_V2_CPUID1;
> +		exp_msr_ext   = msr_ext   = MSR_INTEL_MASK_V2_CPUID80000001;
>  		break;
>  
>  	case 0x2a: /* SandyBridge */
>  	case 0x2d: /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP */
> -		msr_basic = MSR_INTEL_MASK_V3_CPUID1;
> -		msr_ext   = MSR_INTEL_MASK_V3_CPUID80000001;
> -		msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
> +		exp_msr_basic = msr_basic = MSR_INTEL_MASK_V3_CPUID1;
> +		exp_msr_ext   = msr_ext   = MSR_INTEL_MASK_V3_CPUID80000001;
> +		exp_msr_xsave = msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
>  		break;
>  	}

Instead of all these changes, and instead of the variable needing
initializers, you could simply initialize all three ext_msr_* right after
the switch().

> +static void intel_ctxt_switch_levelling(const struct domain *nextd)
> +{
> +	struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
> +	const struct cpuidmasks *masks = &cpuidmask_defaults;
> +
> +#define LAZY(msr, field)						\
> +	({								\
> +		if (msr && (these_masks->field != masks->field))	\
> +		{							\
> +			wrmsrl(msr, masks->field);			\
> +			these_masks->field = masks->field;		\
> +		}							\
> +	})
> +
> +	LAZY(msr_basic, _1cd);
> +	LAZY(msr_ext,   e1cd);
> +	LAZY(msr_xsave, Da1);

Please either use token concatenation inside the macro body to
eliminate the redundant msr_ prefixes here, or properly
parenthesize the uses of "msr" inside the macro body.

> +	if (opt_cpu_info) {
> +		printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps);
> +		printk(XENLOG_INFO
> +		       "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, "
> +		       "e1c 0x%08x, Da1 0x%08x\n",
> +		       (uint32_t)(cpuidmask_defaults._1cd >> 32),
> +		       (uint32_t)cpuidmask_defaults._1cd,
> +		       (uint32_t)(cpuidmask_defaults.e1cd >> 32),
> +		       (uint32_t)cpuidmask_defaults.e1cd,
> +		       (uint32_t)cpuidmask_defaults.Da1);

Could I convince you to make this second printk() dependent
upon there not being CPUID faulting support?

> @@ -190,22 +257,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
>  	    (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
>  		paddr_bits = 36;
>  
> -	if (c == &boot_cpu_data && c->x86 == 6) {
> -		if (probe_intel_cpuid_faulting())
> -			__set_bit(X86_FEATURE_CPUID_FAULTING,
> -				  c->x86_capability);
> -	} else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
> -		BUG_ON(!probe_intel_cpuid_faulting());
> -		__set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
> -	}
> +	if (c == &boot_cpu_data)
> +		intel_init_levelling();
> +
> +	if (test_bit(X86_FEATURE_CPUID_FAULTING, boot_cpu_data.x86_capability))
> +            __set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);

Mixing tabs and spaces for indentation.

Jan
Andrew Cooper Feb. 17, 2016, 10:59 a.m. UTC | #2
On 17/02/16 07:57, Jan Beulich wrote:
>
>> +/* Indicies of the masking MSRs, or 0 if unavailable. */
>> +static unsigned int __read_mostly msr_basic, msr_ext, msr_xsave;
> I think this way __read_mostly applies only to msr_basic, which I
> don't think is what you want. Also I think you mean "indices" or
> "indexes".

"Indices" is what I meant.

>
>> +static void __init probe_masking_msrs(void)
>> +{
>> +	const struct cpuinfo_x86 *c = &boot_cpu_data;
>> +	unsigned int exp_msr_basic = 0, exp_msr_ext = 0, exp_msr_xsave = 0;
>>  
>>  	/* Only family 6 supports this feature. */
>> -	if (c->x86 != 6) {
>> -		printk("No CPUID feature masking support available\n");
>> +	if (c->x86 != 6)
>>  		return;
>> -	}
>>  
>>  	switch (c->x86_model) {
>>  	case 0x17: /* Yorkfield, Wolfdale, Penryn, Harpertown(DP) */
>>  	case 0x1d: /* Dunnington(MP) */
>> -		msr_basic = MSR_INTEL_MASK_V1_CPUID1;
>> +		exp_msr_basic = msr_basic = MSR_INTEL_MASK_V1_CPUID1;
>>  		break;
>>  
>>  	case 0x1a: /* Bloomfield, Nehalem-EP(Gainestown) */
>> @@ -88,71 +100,126 @@ static void set_cpuidmask(const struct cpuinfo_x86 *c)
>>  	case 0x2c: /* Gulftown, Westmere-EP */
>>  	case 0x2e: /* Nehalem-EX(Beckton) */
>>  	case 0x2f: /* Westmere-EX */
>> -		msr_basic = MSR_INTEL_MASK_V2_CPUID1;
>> -		msr_ext   = MSR_INTEL_MASK_V2_CPUID80000001;
>> +		exp_msr_basic = msr_basic = MSR_INTEL_MASK_V2_CPUID1;
>> +		exp_msr_ext   = msr_ext   = MSR_INTEL_MASK_V2_CPUID80000001;
>>  		break;
>>  
>>  	case 0x2a: /* SandyBridge */
>>  	case 0x2d: /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP */
>> -		msr_basic = MSR_INTEL_MASK_V3_CPUID1;
>> -		msr_ext   = MSR_INTEL_MASK_V3_CPUID80000001;
>> -		msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
>> +		exp_msr_basic = msr_basic = MSR_INTEL_MASK_V3_CPUID1;
>> +		exp_msr_ext   = msr_ext   = MSR_INTEL_MASK_V3_CPUID80000001;
>> +		exp_msr_xsave = msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
>>  		break;
>>  	}
> Instead of all these changes, and instead of the variable needing
> initializers, you could simply initialize all three ext_msr_* right after
> the switch().

That would certainly be neater.

~Andrew
diff mbox

Patch

diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index ad22375..143f497 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -18,11 +18,18 @@ 
 
 #define select_idle_routine(x) ((void)0)
 
-static unsigned int probe_intel_cpuid_faulting(void)
+static bool_t __init probe_intel_cpuid_faulting(void)
 {
 	uint64_t x;
-	return !rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) &&
-		(x & MSR_PLATFORM_INFO_CPUID_FAULTING);
+
+	if ( rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) ||
+	     !(x & MSR_PLATFORM_INFO_CPUID_FAULTING) )
+		return 0;
+
+	expected_levelling_cap |= LCAP_faulting;
+	levelling_caps |=  LCAP_faulting;
+	__set_bit(X86_FEATURE_CPUID_FAULTING, boot_cpu_data.x86_capability);
+	return 1;
 }
 
 static DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
@@ -44,41 +51,46 @@  void set_cpuid_faulting(bool_t enable)
 }
 
 /*
- * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
- * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
- * edx = 0xBFEBFBFF when executing CPUID.EAX = 1 normally. If you want to
- * 'rev down' to E8400, you can set these values in these Xen boot parameters.
+ * Set caps in expected_levelling_cap, probe a specific masking MSR, and set
+ * caps in levelling_caps if it is found, or clobber the MSR index if missing.
+ * If preset, reads the default value into msr_val.
  */
-static void set_cpuidmask(const struct cpuinfo_x86 *c)
+static void __init __probe_mask_msr(unsigned int *msr, uint64_t caps,
+				    uint64_t *msr_val)
 {
-	static unsigned int msr_basic, msr_ext, msr_xsave;
-	static enum { not_parsed, no_mask, set_mask } status;
-	u64 msr_val;
+	uint64_t val;
 
-	if (status == no_mask)
-		return;
+	expected_levelling_cap |= caps;
 
-	if (status == set_mask)
-		goto setmask;
+	if (rdmsr_safe(*msr, val) || wrmsr_safe(*msr, val))
+		*msr = 0;
+	else
+	{
+		levelling_caps |= caps;
+		*msr_val = val;
+	}
+}
 
-	ASSERT((status == not_parsed) && (c == &boot_cpu_data));
-	status = no_mask;
+/* Indicies of the masking MSRs, or 0 if unavailable. */
+static unsigned int __read_mostly msr_basic, msr_ext, msr_xsave;
 
-	if (!~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
-	       opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
-	       opt_cpuid_mask_xsave_eax))
-		return;
+/*
+ * Probe for the existance of the expected masking MSRs.  They might easily
+ * not be available if Xen is running virtualised.
+ */
+static void __init probe_masking_msrs(void)
+{
+	const struct cpuinfo_x86 *c = &boot_cpu_data;
+	unsigned int exp_msr_basic = 0, exp_msr_ext = 0, exp_msr_xsave = 0;
 
 	/* Only family 6 supports this feature. */
-	if (c->x86 != 6) {
-		printk("No CPUID feature masking support available\n");
+	if (c->x86 != 6)
 		return;
-	}
 
 	switch (c->x86_model) {
 	case 0x17: /* Yorkfield, Wolfdale, Penryn, Harpertown(DP) */
 	case 0x1d: /* Dunnington(MP) */
-		msr_basic = MSR_INTEL_MASK_V1_CPUID1;
+		exp_msr_basic = msr_basic = MSR_INTEL_MASK_V1_CPUID1;
 		break;
 
 	case 0x1a: /* Bloomfield, Nehalem-EP(Gainestown) */
@@ -88,71 +100,126 @@  static void set_cpuidmask(const struct cpuinfo_x86 *c)
 	case 0x2c: /* Gulftown, Westmere-EP */
 	case 0x2e: /* Nehalem-EX(Beckton) */
 	case 0x2f: /* Westmere-EX */
-		msr_basic = MSR_INTEL_MASK_V2_CPUID1;
-		msr_ext   = MSR_INTEL_MASK_V2_CPUID80000001;
+		exp_msr_basic = msr_basic = MSR_INTEL_MASK_V2_CPUID1;
+		exp_msr_ext   = msr_ext   = MSR_INTEL_MASK_V2_CPUID80000001;
 		break;
 
 	case 0x2a: /* SandyBridge */
 	case 0x2d: /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP */
-		msr_basic = MSR_INTEL_MASK_V3_CPUID1;
-		msr_ext   = MSR_INTEL_MASK_V3_CPUID80000001;
-		msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
+		exp_msr_basic = msr_basic = MSR_INTEL_MASK_V3_CPUID1;
+		exp_msr_ext   = msr_ext   = MSR_INTEL_MASK_V3_CPUID80000001;
+		exp_msr_xsave = msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
 		break;
 	}
 
-	status = set_mask;
+	if (msr_basic)
+		__probe_mask_msr(&msr_basic, LCAP_1cd, &cpuidmask_defaults._1cd);
 
-	if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) {
-		if (msr_basic)
-			printk("Writing CPUID feature mask ecx:edx -> %08x:%08x\n",
-			       opt_cpuid_mask_ecx, opt_cpuid_mask_edx);
-		else
-			printk("No CPUID feature mask available\n");
-	}
-	else
-		msr_basic = 0;
-
-	if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
-		if (msr_ext)
-			printk("Writing CPUID extended feature mask ecx:edx -> %08x:%08x\n",
-			       opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx);
-		else
-			printk("No CPUID extended feature mask available\n");
-	}
-	else
-		msr_ext = 0;
-
-	if (~opt_cpuid_mask_xsave_eax) {
-		if (msr_xsave)
-			printk("Writing CPUID xsave feature mask eax -> %08x\n",
-			       opt_cpuid_mask_xsave_eax);
-		else
-			printk("No CPUID xsave feature mask available\n");
+	if (msr_ext)
+		__probe_mask_msr(&msr_ext, LCAP_e1cd, &cpuidmask_defaults.e1cd);
+
+	if (msr_xsave)
+		__probe_mask_msr(&msr_xsave, LCAP_Da1, &cpuidmask_defaults.Da1);
+
+	/*
+	 * Don't bother warning about a mismatch if virtualised.  These MSRs
+	 * are not architectural and almost never virtualised.
+	 */
+	if ((expected_levelling_cap == levelling_caps) ||
+	    cpu_has_hypervisor)
+		return;
+
+	printk(XENLOG_WARNING "Mismatch between expected (%#x) "
+	       "and real (%#x) levelling caps: missing %#x\n",
+	       expected_levelling_cap, levelling_caps,
+	       (expected_levelling_cap ^ levelling_caps) & levelling_caps);
+	printk(XENLOG_WARNING "Fam %#x, model %#x expected (%#x/%#x/%#x), "
+	       "got (%#x/%#x/%#x)\n", c->x86, c->x86_model,
+	       exp_msr_basic, exp_msr_ext, exp_msr_xsave,
+	       msr_basic, msr_ext, msr_xsave);
+	printk(XENLOG_WARNING
+	       "If not running virtualised, please report a bug\n");
+}
+
+/*
+ * Context switch levelling state to the next domain.  A parameter of NULL is
+ * used to context switch to the default host state, and is used by the BSP/AP
+ * startup code.
+ */
+static void intel_ctxt_switch_levelling(const struct domain *nextd)
+{
+	struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
+	const struct cpuidmasks *masks = &cpuidmask_defaults;
+
+#define LAZY(msr, field)						\
+	({								\
+		if (msr && (these_masks->field != masks->field))	\
+		{							\
+			wrmsrl(msr, masks->field);			\
+			these_masks->field = masks->field;		\
+		}							\
+	})
+
+	LAZY(msr_basic, _1cd);
+	LAZY(msr_ext,   e1cd);
+	LAZY(msr_xsave, Da1);
+
+#undef LAZY
+}
+
+/*
+ * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
+ * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
+ * edx = 0xBFEBFBFF when executing CPUID.EAX = 1 normally. If you want to
+ * 'rev down' to E8400, you can set these values in these Xen boot parameters.
+ */
+static void __init noinline intel_init_levelling(void)
+{
+	if (!probe_intel_cpuid_faulting())
+		probe_masking_msrs();
+
+	if (msr_basic) {
+		uint32_t ecx, edx, tmp;
+
+		cpuid(0x00000001, &tmp, &tmp, &ecx, &edx);
+
+		ecx &= opt_cpuid_mask_ecx;
+		edx &= opt_cpuid_mask_edx;
+
+		cpuidmask_defaults._1cd &= ((u64)edx << 32) | ecx;
 	}
-	else
-		msr_xsave = 0;
-
- setmask:
-	if (msr_basic &&
-	    wrmsr_safe(msr_basic,
-		       ((u64)opt_cpuid_mask_edx << 32) | opt_cpuid_mask_ecx)){
-		msr_basic = 0;
-		printk("Failed to set CPUID feature mask\n");
+
+	if (msr_ext) {
+		uint32_t ecx, edx, tmp;
+
+		cpuid(0x80000001, &tmp, &tmp, &ecx, &edx);
+
+		ecx &= opt_cpuid_mask_ext_ecx;
+		edx &= opt_cpuid_mask_ext_edx;
+
+		cpuidmask_defaults.e1cd &= ((u64)edx << 32) | ecx;
 	}
 
-	if (msr_ext &&
-	    wrmsr_safe(msr_ext,
-		       ((u64)opt_cpuid_mask_ext_edx << 32) | opt_cpuid_mask_ext_ecx)){
-		msr_ext = 0;
-		printk("Failed to set CPUID extended feature mask\n");
+	if (msr_xsave) {
+		uint32_t eax, tmp;
+
+		cpuid_count(0x0000000d, 1, &eax, &tmp, &tmp, &tmp);
+
+		eax &= opt_cpuid_mask_xsave_eax;
+
+		cpuidmask_defaults.Da1 &= (~0ULL << 32) | eax;
 	}
 
-	if (msr_xsave &&
-	    (rdmsr_safe(msr_xsave, msr_val) ||
-	     wrmsr_safe(msr_xsave,
-			(msr_val & (~0ULL << 32)) | opt_cpuid_mask_xsave_eax))){
-		msr_xsave = 0;
-		printk("Failed to set CPUID xsave feature mask\n");
+	if (opt_cpu_info) {
+		printk(XENLOG_INFO "Levelling caps: %#x\n", levelling_caps);
+		printk(XENLOG_INFO
+		       "MSR defaults: 1d 0x%08x, 1c 0x%08x, e1d 0x%08x, "
+		       "e1c 0x%08x, Da1 0x%08x\n",
+		       (uint32_t)(cpuidmask_defaults._1cd >> 32),
+		       (uint32_t)cpuidmask_defaults._1cd,
+		       (uint32_t)(cpuidmask_defaults.e1cd >> 32),
+		       (uint32_t)cpuidmask_defaults.e1cd,
+		       (uint32_t)cpuidmask_defaults.Da1);
 	}
 }
 
@@ -190,22 +257,13 @@  static void early_init_intel(struct cpuinfo_x86 *c)
 	    (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
 		paddr_bits = 36;
 
-	if (c == &boot_cpu_data && c->x86 == 6) {
-		if (probe_intel_cpuid_faulting())
-			__set_bit(X86_FEATURE_CPUID_FAULTING,
-				  c->x86_capability);
-	} else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
-		BUG_ON(!probe_intel_cpuid_faulting());
-		__set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
-	}
+	if (c == &boot_cpu_data)
+		intel_init_levelling();
+
+	if (test_bit(X86_FEATURE_CPUID_FAULTING, boot_cpu_data.x86_capability))
+            __set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
 
-	if (!cpu_has_cpuid_faulting)
-		set_cpuidmask(c);
-	else if ((c == &boot_cpu_data) &&
-		 (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
-		    opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
-		    opt_cpuid_mask_xsave_eax)))
-		printk("No CPUID feature masking support available\n");
+	intel_ctxt_switch_levelling(NULL);
 }
 
 /*