@@ -830,6 +830,9 @@ int xc_cpu_policy_apply_featureset(xc_interface *xch, xc_cpu_policy_t *policy,
const uint32_t *featureset,
unsigned int nr_features);
+/* Sanitize a policy: can change the contents of the passed policy. */
+void xc_cpu_policy_sanitize(xc_interface *xch, xc_cpu_policy_t *policy);
+
int xc_get_cpu_levelling_caps(xc_interface *xch, uint32_t *caps);
int xc_get_cpu_featureset(xc_interface *xch, uint32_t index,
uint32_t *nr_features, uint32_t *featureset);
@@ -690,3 +690,8 @@ int xc_cpu_policy_apply_featureset(xc_interface *xch, xc_cpu_policy_t *policy,
return 0;
}
+
+void xc_cpu_policy_sanitize(xc_interface *xch, xc_cpu_policy_t *policy)
+{
+ x86_cpuid_policy_shrink_max_leaves(&policy->cpuid);
+}
@@ -689,6 +689,13 @@ int libxl__cpuid_legacy(libxl_ctx *ctx, uint32_t domid, bool restore,
goto out;
}
+ /*
+ * Do not attempt any modifications if creating a policy that aims to be
+ * compatible with pre-4.13 Xen versions.
+ */
+ if (!restore)
+ xc_cpu_policy_sanitize(ctx->xch, policy);
+
r = xc_cpu_policy_set_domain(ctx->xch, domid, policy);
if (r) {
LOGED(ERROR, domid, "Failed to set domain CPUID policy");
@@ -8,10 +8,13 @@
#include <err.h>
#include <xen-tools/libs.h>
+#include <xen/asm/x86-defns.h>
#include <xen/asm/x86-vendors.h>
#include <xen/lib/x86/cpu-policy.h>
#include <xen/domctl.h>
+#define XSTATE_FP_SSE (X86_XCR0_FP | X86_XCR0_SSE)
+
static unsigned int nr_failures;
#define fail(fmt, ...) \
({ \
@@ -671,6 +674,103 @@ static void test_msr_get_entry(void)
}
}
+static void test_cpuid_maximum_leaf_shrinking(void)
+{
+ static const struct test {
+ const char *name;
+ struct cpuid_policy p;
+ } tests[] = {
+ {
+ .name = "basic",
+ .p = {
+ /* Very basic information only. */
+ .basic.max_leaf = 1,
+ .basic.raw_fms = 0xc2,
+ },
+ },
+ {
+ .name = "cache",
+ .p = {
+ /* Cache subleaves present. */
+ .basic.max_leaf = 4,
+ .cache.subleaf[0].type = 1,
+ },
+ },
+ {
+ .name = "feat#0",
+ .p = {
+ /* Subleaf 0 only with some valid bit. */
+ .basic.max_leaf = 7,
+ .feat.max_subleaf = 0,
+ .feat.fsgsbase = 1,
+ },
+ },
+ {
+ .name = "feat#1",
+ .p = {
+ /* Subleaf 1 only with some valid bit. */
+ .basic.max_leaf = 7,
+ .feat.max_subleaf = 1,
+ .feat.avx_vnni = 1,
+ },
+ },
+ {
+ .name = "topo",
+ .p = {
+ /* Topology subleaves present. */
+ .basic.max_leaf = 0xb,
+ .topo.subleaf[0].type = 1,
+ },
+ },
+ {
+ .name = "xstate",
+ .p = {
+ /* First subleaf always valid (and then non-zero). */
+ .basic.max_leaf = 0xd,
+ .xstate.xcr0_low = XSTATE_FP_SSE,
+ },
+ },
+ {
+ .name = "extd",
+ .p = {
+ /* Commonly available information only. */
+ .extd.max_leaf = 0x80000008,
+ .extd.maxphysaddr = 0x28,
+ .extd.maxlinaddr = 0x30,
+ },
+ },
+ };
+
+ printf("Testing CPUID maximum leaf shrinking:\n");
+
+ for ( size_t i = 0; i < ARRAY_SIZE(tests); ++i )
+ {
+ const struct test *t = &tests[i];
+ struct cpuid_policy *p = memdup(&t->p);
+
+ p->basic.max_leaf = ARRAY_SIZE(p->basic.raw) - 1;
+ p->feat.max_subleaf = ARRAY_SIZE(p->feat.raw) - 1;
+ p->extd.max_leaf = 0x80000000 | (ARRAY_SIZE(p->extd.raw) - 1);
+
+ x86_cpuid_policy_shrink_max_leaves(p);
+
+ /* Check the the resulting max (sub)leaf values against expecations. */
+ if ( p->basic.max_leaf != t->p.basic.max_leaf )
+ fail(" Test %s basic fail - expected %#x, got %#x\n",
+ t->name, t->p.basic.max_leaf, p->basic.max_leaf);
+
+ if ( p->extd.max_leaf != t->p.extd.max_leaf )
+ fail(" Test %s extd fail - expected %#x, got %#x\n",
+ t->name, t->p.extd.max_leaf, p->extd.max_leaf);
+
+ if ( p->feat.max_subleaf != t->p.feat.max_subleaf )
+ fail(" Test %s feat fail - expected %#x, got %#x\n",
+ t->name, t->p.feat.max_subleaf, p->feat.max_subleaf);
+
+ free(p);
+ }
+}
+
static void test_is_compatible_success(void)
{
static struct test {
@@ -787,6 +887,7 @@ int main(int argc, char **argv)
test_cpuid_deserialise_failure();
test_cpuid_out_of_range_clearing();
test_cpuid_get_leaf_failure();
+ test_cpuid_maximum_leaf_shrinking();
test_msr_serialise_success();
test_msr_deserialise_failure();
@@ -386,6 +386,13 @@ void x86_cpuid_policy_fill_native(struct cpuid_policy *p);
*/
void x86_cpuid_policy_clear_out_of_range_leaves(struct cpuid_policy *p);
+/**
+ * Shrink max leaf/subleaf values such that the last respective valid entry
+ * isn't all blank. While permitted by the spec, such extraneous leaves may
+ * provide undue "hints" to guests.
+ */
+void x86_cpuid_policy_shrink_max_leaves(struct cpuid_policy *p);
+
#ifdef __XEN__
#include <public/arch-x86/xen.h>
typedef XEN_GUEST_HANDLE_64(xen_cpuid_leaf_t) cpuid_leaf_buffer_t;
@@ -236,6 +236,45 @@ void x86_cpuid_policy_clear_out_of_range_leaves(struct cpuid_policy *p)
ARRAY_SIZE(p->extd.raw) - 1);
}
+void x86_cpuid_policy_shrink_max_leaves(struct cpuid_policy *p)
+{
+ unsigned int i;
+
+ p->basic.raw[0x4] = p->cache.raw[0];
+
+ for ( i = p->feat.max_subleaf; i; --i )
+ if ( p->feat.raw[i].a | p->feat.raw[i].b |
+ p->feat.raw[i].c | p->feat.raw[i].d )
+ break;
+ p->feat.max_subleaf = i;
+ p->basic.raw[0x7] = p->feat.raw[i];
+
+ p->basic.raw[0xb] = p->topo.raw[0];
+
+ /*
+ * Due to the way xstate gets handled in the hypervisor (see
+ * recalculate_xstate()) there is (for now at least) no need to fiddle
+ * with the xstate subleaves (IOW we assume they're already in consistent
+ * shape, for coming from either hardware or recalculate_xstate()).
+ */
+ p->basic.raw[0xd] = p->xstate.raw[0];
+
+ for ( i = p->basic.max_leaf; i; --i )
+ if ( p->basic.raw[i].a | p->basic.raw[i].b |
+ p->basic.raw[i].c | p->basic.raw[i].d )
+ break;
+ p->basic.max_leaf = i;
+
+ for ( i = p->extd.max_leaf & 0xffff; i; --i )
+ if ( p->extd.raw[i].a | p->extd.raw[i].b |
+ p->extd.raw[i].c | p->extd.raw[i].d )
+ break;
+ if ( i | p->extd.raw[0].b | p->extd.raw[0].c | p->extd.raw[0].d )
+ p->extd.max_leaf = 0x80000000 | i;
+ else
+ p->extd.max_leaf = 0;
+}
+
const uint32_t *x86_cpuid_lookup_deep_deps(uint32_t feature)
{
static const uint32_t deep_features[] = INIT_DEEP_FEATURES;