@@ -190,6 +190,16 @@ static void sanitise_featureset(uint32_t *fs)
}
}
+static void recalculate_arch_lbr(struct cpu_policy *p)
+{
+ if ( p->basic.max_leaf < 0x1c ||
+ !(cpu_policy_xstates(&host_cpu_policy) & X86_XSS_LBR) ||
+ p->basic.lbr_1Ca.supported_depths == 0)
+ p->feat.arch_lbr = 0;
+ if ( !p->feat.arch_lbr )
+ p->basic.raw[0x1c] = EMPTY_LEAF;
+}
+
static void recalculate_xstate(struct cpu_policy *p)
{
uint64_t xstates = XSTATE_FP_SSE;
@@ -219,6 +229,9 @@ static void recalculate_xstate(struct cpu_policy *p)
if ( p->feat.amx_tile )
xstates |= X86_XCR0_TILE_CFG | X86_XCR0_TILE_DATA;
+ if ( p->feat.arch_lbr )
+ xstates |= X86_XSS_LBR;
+
/* Subleaf 0 */
p->xstate.max_size =
xstate_uncompressed_size(xstates & ~XSTATE_XSAVES_ONLY);
@@ -271,6 +284,8 @@ static void recalculate_misc(struct cpu_policy *p)
p->basic.raw[0xc] = EMPTY_LEAF;
+ zero_leaves(p->basic.raw, 0xe, 0x1b);
+
p->extd.e1d &= ~CPUID_COMMON_1D_FEATURES;
/* Most of Power/RAS hidden from guests. */
@@ -630,6 +645,7 @@ static void __init calculate_pv_max_policy(void)
sanitise_featureset(fs);
x86_cpu_featureset_to_policy(fs, p);
+ recalculate_arch_lbr(p);
recalculate_xstate(p);
p->extd.raw[0xa] = EMPTY_LEAF; /* No SVM for PV guests. */
@@ -670,6 +686,7 @@ static void __init calculate_pv_def_policy(void)
}
x86_cpu_featureset_to_policy(fs, p);
+ recalculate_arch_lbr(p);
recalculate_xstate(p);
}
@@ -755,6 +772,14 @@ static void __init calculate_hvm_max_policy(void)
if ( !cpu_has_vmx_xsaves )
__clear_bit(X86_FEATURE_XSAVES, fs);
+
+ /*
+ * VMX bitmap is needed for passing through LBR info MSRs.
+ * Require it for virtual arch LBR.
+ */
+ if ( !cpu_has_vmx_guest_lbr_ctl || !cpu_has_vmx_msr_bitmap ||
+ !cpu_has_vmx_xsaves )
+ __clear_bit(X86_FEATURE_ARCH_LBR, fs);
}
/*
@@ -787,6 +812,7 @@ static void __init calculate_hvm_max_policy(void)
sanitise_featureset(fs);
x86_cpu_featureset_to_policy(fs, p);
+ recalculate_arch_lbr(p);
recalculate_xstate(p);
/* It's always possible to emulate CPUID faulting for HVM guests */
@@ -839,6 +865,7 @@ static void __init calculate_hvm_def_policy(void)
}
x86_cpu_featureset_to_policy(fs, p);
+ recalculate_arch_lbr(p);
recalculate_xstate(p);
}
@@ -971,6 +998,7 @@ void recalculate_cpuid_policy(struct domain *d)
p->extd.maxlinaddr = p->extd.lm ? 48 : 32;
+ recalculate_arch_lbr(p);
recalculate_xstate(p);
recalculate_misc(p);
@@ -505,6 +505,13 @@ static void generic_identify(struct cpuinfo_x86 *c)
&c->x86_capability[FEATURESET_Da1],
&tmp, &tmp, &tmp);
+ if (c->cpuid_level >= 0x1c)
+ cpuid(0x1c,
+ &c->x86_capability[FEATURESET_1Ca],
+ &c->x86_capability[FEATURESET_1Cb],
+ &c->x86_capability[FEATURESET_1Cc],
+ &tmp);
+
if (test_bit(X86_FEATURE_ARCH_CAPS, c->x86_capability))
rdmsr(MSR_ARCH_CAPABILITIES,
c->x86_capability[FEATURESET_m10Al],
Ensure that the arch LBR feature and its dependents are disabled if any prerequisites are not available. Signed-off-by: Tu Dinh <ngoc-tu.dinh@vates.tech> --- xen/arch/x86/cpu-policy.c | 28 ++++++++++++++++++++++++++++ xen/arch/x86/cpu/common.c | 7 +++++++ 2 files changed, 35 insertions(+)