diff mbox

[v3,14/28] x86/cpu: Move set_cpumask() calls into c_early_init()

Message ID 1458056124-8024-15-git-send-email-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Cooper March 15, 2016, 3:35 p.m. UTC
Before c/s 44e24f8567 "x86: don't call generic_identify() redundantly", the
commandline-provided masks would take effect in Xen's view of the features.

As the masks got applied after the query for features, the redundant call to
generic_identify() would clobber the pre-masking feature information with the
post-masking information.

Move the set_cpumask() calls into c_early_init() so their effects take place
before the main query for features in generic_identify().

The cpuid_mask_* command line parameters now limit the entire system, a
feature XenServer was relying on for testing purposes.  Subsequent changes
will cause the mask MSRs to be context switched per-domain, removing the need
to use the command line parameters for heterogeneous levelling purposes.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
---
 xen/arch/x86/cpu/amd.c   |  8 ++++++--
 xen/arch/x86/cpu/intel.c | 34 +++++++++++++++++-----------------
 2 files changed, 23 insertions(+), 19 deletions(-)

Comments

Jan Beulich March 21, 2016, 4:16 p.m. UTC | #1
>>> On 15.03.16 at 16:35, <andrew.cooper3@citrix.com> wrote:
> Before c/s 44e24f8567 "x86: don't call generic_identify() redundantly", the
> commandline-provided masks would take effect in Xen's view of the features.
> 
> As the masks got applied after the query for features, the redundant call to
> generic_identify() would clobber the pre-masking feature information with the
> post-masking information.
> 
> Move the set_cpumask() calls into c_early_init() so their effects take place
> before the main query for features in generic_identify().
> 
> The cpuid_mask_* command line parameters now limit the entire system, a
> feature XenServer was relying on for testing purposes.  Subsequent changes
> will cause the mask MSRs to be context switched per-domain, removing the need
> to use the command line parameters for heterogeneous levelling purposes.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

As said before, I'm not in agreement with this change at this point in
the series. If you keep it here, I will make it a requirement to not go
in independently ahead of enough of the other patches of the series.

Jan
diff mbox

Patch

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 47a38c6..5516777 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -407,6 +407,11 @@  static void amd_get_topology(struct cpuinfo_x86 *c)
                                                          c->cpu_core_id);
 }
 
+static void early_init_amd(struct cpuinfo_x86 *c)
+{
+	set_cpuidmask(c);
+}
+
 static void init_amd(struct cpuinfo_x86 *c)
 {
 	u32 l, h;
@@ -595,14 +600,13 @@  static void init_amd(struct cpuinfo_x86 *c)
 	if ((smp_processor_id() == 1) && !cpu_has(c, X86_FEATURE_ITSC))
 		disable_c1_ramping();
 
-	set_cpuidmask(c);
-
 	check_syscfg_dram_mod_en();
 }
 
 static const struct cpu_dev amd_cpu_dev = {
 	.c_vendor	= "AMD",
 	.c_ident 	= { "AuthenticAMD" },
+	.c_early_init	= early_init_amd,
 	.c_init		= init_amd,
 };
 
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index bdf89f6..ad22375 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -189,6 +189,23 @@  static void early_init_intel(struct cpuinfo_x86 *c)
 	if (boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 3 &&
 	    (boot_cpu_data.x86_mask == 3 || boot_cpu_data.x86_mask == 4))
 		paddr_bits = 36;
+
+	if (c == &boot_cpu_data && c->x86 == 6) {
+		if (probe_intel_cpuid_faulting())
+			__set_bit(X86_FEATURE_CPUID_FAULTING,
+				  c->x86_capability);
+	} else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
+		BUG_ON(!probe_intel_cpuid_faulting());
+		__set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+	}
+
+	if (!cpu_has_cpuid_faulting)
+		set_cpuidmask(c);
+	else if ((c == &boot_cpu_data) &&
+		 (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
+		    opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
+		    opt_cpuid_mask_xsave_eax)))
+		printk("No CPUID feature masking support available\n");
 }
 
 /*
@@ -258,23 +275,6 @@  static void init_intel(struct cpuinfo_x86 *c)
 		detect_ht(c);
 	}
 
-	if (c == &boot_cpu_data && c->x86 == 6) {
-		if (probe_intel_cpuid_faulting())
-			__set_bit(X86_FEATURE_CPUID_FAULTING,
-				  c->x86_capability);
-	} else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
-		BUG_ON(!probe_intel_cpuid_faulting());
-		__set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
-	}
-
-	if (!cpu_has_cpuid_faulting)
-		set_cpuidmask(c);
-	else if ((c == &boot_cpu_data) &&
-		 (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
-		    opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
-		    opt_cpuid_mask_xsave_eax)))
-		printk("No CPUID feature masking support available\n");
-
 	/* Work around errata */
 	Intel_errata_workarounds(c);