@@ -331,6 +331,9 @@ static void __init noinline amd_init_levelling(void)
(uint32_t)cpuidmask_defaults._7ab0,
(uint32_t)cpuidmask_defaults._6c);
}
+
+ if (levelling_caps)
+ ctxt_switch_levelling = amd_ctxt_switch_levelling;
}
/*
@@ -88,6 +88,13 @@ static const struct cpu_dev default_cpu = {
};
static const struct cpu_dev *this_cpu = &default_cpu;
+static void default_ctxt_switch_levelling(const struct domain *nextd)
+{
+ /* Nop */
+}
+void (* __read_mostly ctxt_switch_levelling)(const struct domain *nextd) =
+ default_ctxt_switch_levelling;
+
bool_t opt_cpu_info;
boolean_param("cpuinfo", opt_cpu_info);
@@ -32,13 +32,15 @@ static bool_t __init probe_intel_cpuid_faulting(void)
return 1;
}
-static DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
-void set_cpuid_faulting(bool_t enable)
+static void set_cpuid_faulting(bool_t enable)
{
+ static DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
+ bool_t *this_enabled = &this_cpu(cpuid_faulting_enabled);
uint32_t hi, lo;
- if (!cpu_has_cpuid_faulting ||
- this_cpu(cpuid_faulting_enabled) == enable )
+ ASSERT(cpu_has_cpuid_faulting);
+
+ if (*this_enabled == enable)
return;
rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
@@ -47,7 +49,7 @@ void set_cpuid_faulting(bool_t enable)
lo |= MSR_MISC_FEATURES_CPUID_FAULTING;
wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
- this_cpu(cpuid_faulting_enabled) = enable;
+ *this_enabled = enable;
}
/*
@@ -154,6 +156,28 @@ static void intel_ctxt_switch_levelling(const struct domain *nextd)
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
const struct cpuidmasks *masks = &cpuidmask_defaults;
+ if (cpu_has_cpuid_faulting) {
+ /*
+ * We *should* be enabling faulting for the control domain.
+ *
+ * Unfortunately, the domain builder (having only ever been a
+ * PV guest) expects to be able to see host cpuid state in a
+ * native CPUID instruction, to correctly build a CPUID policy
+ * for HVM guests (notably the xstate leaves).
+ *
+ * This logic is fundimentally broken for HVM toolstack
+ * domains, and faulting causes PV guests to behave like HVM
+ * guests from their point of view.
+ *
+ * Future development plans will move responsibility for
+ * generating the maximum full cpuid policy into Xen, at which
+ * this problem will disappear.
+ */
+ set_cpuid_faulting(nextd && is_pv_domain(nextd) &&
+ !is_control_domain(nextd));
+ return;
+ }
+
#define LAZY(msr, field) \
({ \
if (unlikely(these_masks->field != masks->field) && \
@@ -227,6 +251,9 @@ static void __init noinline intel_init_levelling(void)
(uint32_t)cpuidmask_defaults.e1cd,
(uint32_t)cpuidmask_defaults.Da1);
}
+
+ if (levelling_caps)
+ ctxt_switch_levelling = intel_ctxt_switch_levelling;
}
static void early_init_intel(struct cpuinfo_x86 *c)
@@ -189,6 +189,9 @@ void machine_crash_shutdown(void)
nmi_shootdown_cpus();
+ /* Reset CPUID masking and faulting to the host's default. */
+ ctxt_switch_levelling(NULL);
+
info = kexec_crash_save_info();
info->xen_phys_start = xen_phys_start;
info->dom0_pfn_to_mfn_frame_list_list =
@@ -2082,9 +2082,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
load_segments(next);
}
- set_cpuid_faulting(is_pv_domain(nextd) &&
- !is_control_domain(nextd) &&
- !is_hardware_domain(nextd));
+ ctxt_switch_levelling(nextd);
}
context_saved(prev);
@@ -209,7 +209,7 @@ extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 cpu_data[];
#define current_cpu_data cpu_data[smp_processor_id()]
-extern void set_cpuid_faulting(bool_t enable);
+extern void (*ctxt_switch_levelling)(const struct domain *nextd);
extern u64 host_pat;
extern bool_t opt_cpu_info;