@@ -2036,6 +2036,9 @@ void __init noreturn __start_xen(unsigned long mbi_p)
barrier();
wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
info->last_spec_ctrl = default_xen_spec_ctrl;
+
+ if ( cpu_has_auto_ibrs && (default_xen_spec_ctrl & SPEC_CTRL_IBRS) )
+ write_efer(read_efer() | EFER_AIBRSE);
}
/* Copy the cpu info block, and move onto the BSP stack. */
@@ -376,6 +376,9 @@ void start_secondary(void *unused)
{
wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
info->last_spec_ctrl = default_xen_spec_ctrl;
+
+ if ( cpu_has_auto_ibrs && (default_xen_spec_ctrl & SPEC_CTRL_IBRS) )
+ write_efer(read_efer() | EFER_AIBRSE);
}
update_mcu_opt_ctrl();
@@ -390,7 +390,7 @@ custom_param("pv-l1tf", parse_pv_l1tf);
static void __init print_details(enum ind_thunk thunk)
{
- unsigned int _7d0 = 0, _7d2 = 0, e8b = 0, max = 0, tmp;
+ unsigned int _7d0 = 0, _7d2 = 0, e8b = 0, e21a = 0, max = 0, tmp;
uint64_t caps = 0;
/* Collect diagnostics about available mitigations. */
@@ -399,7 +399,10 @@ static void __init print_details(enum ind_thunk thunk)
if ( max >= 2 )
cpuid_count(7, 2, &tmp, &tmp, &tmp, &_7d2);
if ( boot_cpu_data.extended_cpuid_level >= 0x80000008 )
+ {
cpuid(0x80000008, &tmp, &e8b, &tmp, &tmp);
+ cpuid(0x80000021, &e21a, &tmp, &tmp, &tmp);
+ }
if ( cpu_has_arch_caps )
rdmsrl(MSR_ARCH_CAPABILITIES, caps);
@@ -430,11 +433,12 @@ static void __init print_details(enum ind_thunk thunk)
(e8b & cpufeat_mask(X86_FEATURE_IBPB_RET)) ? " IBPB_RET" : "");
/* Hardware features which need driving to mitigate issues. */
- printk(" Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ printk(" Hardware features:%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
(e8b & cpufeat_mask(X86_FEATURE_IBPB)) ||
(_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBPB" : "",
(e8b & cpufeat_mask(X86_FEATURE_IBRS)) ||
(_7d0 & cpufeat_mask(X86_FEATURE_IBRSB)) ? " IBRS" : "",
+ (e21a & cpufeat_mask(X86_FEATURE_AUTOMATIC_IBRS)) ? " AUTO_IBRS" : "",
(e8b & cpufeat_mask(X86_FEATURE_AMD_STIBP)) ||
(_7d0 & cpufeat_mask(X86_FEATURE_STIBP)) ? " STIBP" : "",
(e8b & cpufeat_mask(X86_FEATURE_AMD_SSBD)) ||
@@ -468,7 +472,9 @@ static void __init print_details(enum ind_thunk thunk)
thunk == THUNK_JMP ? "JMP" : "?",
(!boot_cpu_has(X86_FEATURE_IBRSB) &&
!boot_cpu_has(X86_FEATURE_IBRS)) ? "No" :
- (default_xen_spec_ctrl & SPEC_CTRL_IBRS) ? "IBRS+" : "IBRS-",
+ (cpu_has_auto_ibrs &&
+ (default_xen_spec_ctrl & SPEC_CTRL_IBRS)) ? "AUTO_IBRS+" :
+ (default_xen_spec_ctrl & SPEC_CTRL_IBRS) ? "IBRS+" : "IBRS-",
(!boot_cpu_has(X86_FEATURE_STIBP) &&
!boot_cpu_has(X86_FEATURE_AMD_STIBP)) ? "" :
(default_xen_spec_ctrl & SPEC_CTRL_STIBP) ? " STIBP+" : " STIBP-",
@@ -1150,15 +1156,20 @@ void __init init_speculation_mitigations(void)
}
else
{
- /*
- * Evaluate the safest Branch Target Injection mitigations to use.
- * First, begin with compiler-aided mitigations.
- */
- if ( IS_ENABLED(CONFIG_INDIRECT_THUNK) )
+ /* Evaluate the safest BTI mitigations with lowest overhead */
+ if ( cpu_has_auto_ibrs )
+ {
+ /*
+ * We'd rather use Automatic IBRS if present. It helps in order
+ * to avoid stuffing the RSB manually on every VMEXIT.
+ */
+ ibrs = true;
+ }
+ else if ( IS_ENABLED(CONFIG_INDIRECT_THUNK) )
{
/*
- * On all hardware, we'd like to use retpoline in preference to
- * IBRS, but only if it is safe on this hardware.
+ * Otherwise, we'd like to use retpoline in preference to
+ * plain IBRS, but only if it is safe on this hardware.
*/
if ( retpoline_safe() )
thunk = THUNK_RETPOLINE;
@@ -1357,7 +1368,9 @@ void __init init_speculation_mitigations(void)
*/
if ( opt_rsb_hvm )
{
- setup_force_cpu_cap(X86_FEATURE_SC_RSB_HVM);
+ /* Automatic IBRS wipes the RSB for us on VMEXIT */
+ if ( !(ibrs && cpu_has_auto_ibrs) )
+ setup_force_cpu_cap(X86_FEATURE_SC_RSB_HVM);
/*
* For SVM, Xen's RSB safety actions are performed before STGI, so
@@ -1582,17 +1595,26 @@ void __init init_speculation_mitigations(void)
bsp_delay_spec_ctrl = !cpu_has_hypervisor && default_xen_spec_ctrl;
- /*
- * If delaying MSR_SPEC_CTRL setup, use the same mechanism as
- * spec_ctrl_enter_idle(), by using a shadow value of zero.
- */
if ( bsp_delay_spec_ctrl )
{
+ /*
+ * If delaying MSR_SPEC_CTRL setup, use the same mechanism as
+ * spec_ctrl_enter_idle(), by using a shadow value of zero.
+ */
info->shadow_spec_ctrl = 0;
barrier();
info->spec_ctrl_flags |= SCF_use_shadow;
barrier();
}
+ else if ( ibrs && cpu_has_auto_ibrs )
+ {
+ /*
+ * If we're not delaying setting SPEC_CTRL there's no need to
+ * delay setting Automatic IBRS either. Flip the toggle if
+ * supported and IBRS is expected.
+ */
+ write_efer(read_efer() | EFER_AIBRSE);
+ }
val = bsp_delay_spec_ctrl ? 0 : default_xen_spec_ctrl;
In cases where AutoIBRS is supported by the host: * Prefer AutoIBRS to retpolines as BTI mitigation in heuristics calculations. * Always enable AutoIBRS if IBRS is chosen as a BTI mitigation. * Avoid stuffing the RAS/RSB on VMEXIT if AutoIBRS is enabled. * Delay setting AutoIBRS until after dom0 is set up, just like setting SPEC_CTRL. Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com> --- xen/arch/x86/setup.c | 3 +++ xen/arch/x86/smpboot.c | 3 +++ xen/arch/x86/spec_ctrl.c | 52 ++++++++++++++++++++++++++++------------ 3 files changed, 43 insertions(+), 15 deletions(-)