@@ -140,9 +140,9 @@ gdt_48:
GLOBAL(trampoline_misc_enable_off)
.quad 0
-/* EFER OR-mask for boot paths. This gets adjusted with NX when available. */
+/* EFER OR-mask for boot paths. SCE conditional on PV support, NX added when available. */
GLOBAL(trampoline_efer)
- .long EFER_LME | EFER_SCE
+ .long EFER_LME | (EFER_SCE * IS_ENABLED(CONFIG_PV))
GLOBAL(trampoline_xen_phys_start)
.long 0
@@ -238,7 +238,8 @@ static void __init noreturn efi_arch_post_exit_boot(void)
/* Set system registers and transfer control. */
asm volatile("pushq $0\n\tpopfq");
rdmsrl(MSR_EFER, efer);
- efer |= EFER_SCE;
+ if ( IS_ENABLED(CONFIG_PV) )
+ efer |= EFER_SCE;
if ( cpu_has_nx )
efer |= EFER_NX;
wrmsrl(MSR_EFER, efer);
@@ -496,6 +496,10 @@ static void vmx_vcpu_destroy(struct vcpu *v)
*/
static void vmx_restore_host_msrs(void)
{
+ /* No PV guests? No need to set restore host SYSCALL infrastructure. */
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return;
+
/* Relies on the SYSCALL trampoline being at the start of the stubs. */
wrmsrl(MSR_STAR, XEN_MSR_STAR);
wrmsrl(MSR_LSTAR, this_cpu(stubs.addr));
@@ -299,17 +299,8 @@ static unsigned int write_stub_trampoline(
DEFINE_PER_CPU(struct stubs, stubs);
-#ifdef CONFIG_PV
void lstar_enter(void);
void cstar_enter(void);
-#else
-static void __cold star_enter(void)
-{
- panic("lstar/cstar\n");
-}
-#define lstar_enter star_enter
-#define cstar_enter star_enter
-#endif /* CONFIG_PV */
void subarch_percpu_traps_init(void)
{
@@ -321,6 +312,10 @@ void subarch_percpu_traps_init(void)
/* IST_MAX IST pages + at least 1 guard page + primary stack. */
BUILD_BUG_ON((IST_MAX + 1) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
+ /* No PV guests? No need to set up SYSCALL/SYSENTER infrastructure. */
+ if ( !IS_ENABLED(CONFIG_PV) )
+ return;
+
stub_page = map_domain_page(_mfn(this_cpu(stubs.mfn)));
/*
@@ -338,10 +333,8 @@ void subarch_percpu_traps_init(void)
{
/* SYSENTER entry. */
wrmsrl(MSR_IA32_SYSENTER_ESP, stack_bottom);
- wrmsrl(MSR_IA32_SYSENTER_EIP,
- IS_ENABLED(CONFIG_PV) ? (unsigned long)sysenter_entry : 0);
- wrmsr(MSR_IA32_SYSENTER_CS,
- IS_ENABLED(CONFIG_PV) ? __HYPERVISOR_CS : 0, 0);
+ wrmsrl(MSR_IA32_SYSENTER_EIP, (unsigned long)sysenter_entry);
+ wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0);
}
/* Trampoline for SYSCALL entry from compatibility mode. */
This will cause all SYSCALL/SYSRET instructions to suffer #UD rather than following the MSR_{L,C}STAR pointers, allowing us to drop the star_enter() panic helper, allowing us to clean up the IST stacks in a subsequent patch. Drop the now-dead conditional SYSENTER logic in the middle of subarch_percpu_traps_init(). In addition, vmx_restore_host_msrs() need not restore any host state. (Regarding the asymmetric changes, VT-x automatically restores SYSENTER state on vmexit, and SVM restores both SYSCALL/SYSENTER state with the VMSAVE/VMLOAD instructions.) Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Wei Liu <wl@xen.org> CC: Roger Pau Monné <roger.pau@citrix.com> CC: Kevin Tian <kevin.tian@intel.com> --- xen/arch/x86/boot/trampoline.S | 4 ++-- xen/arch/x86/efi/efi-boot.h | 3 ++- xen/arch/x86/hvm/vmx/vmx.c | 4 ++++ xen/arch/x86/x86_64/traps.c | 19 ++++++------------- 4 files changed, 14 insertions(+), 16 deletions(-)