diff mbox series

[2/3] x86/boot: Don't enable EFER.SCE for !CONFIG_PV builds

Message ID 20200420145911.5708-3-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show
Series x86: IST cleanup | expand

Commit Message

Andrew Cooper April 20, 2020, 2:59 p.m. UTC
This will cause all SYSCALL/SYSRET instructions to suffer #UD rather than
following the MSR_{L,C}STAR pointers, allowing us to drop the star_enter()
panic helper, allowing us to clean up the IST stacks in a subsequent patch.

Drop the now-dead conditional SYSENTER logic in the middle of
subarch_percpu_traps_init().

In addition, vmx_restore_host_msrs() need not restore any host
state.  (Regarding the asymmetric changes, VT-x automatically restores
SYSENTER state on vmexit, and SVM restores both SYSCALL/SYSENTER state with
the VMSAVE/VMLOAD instructions.)

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wl@xen.org>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Kevin Tian <kevin.tian@intel.com>
---
 xen/arch/x86/boot/trampoline.S |  4 ++--
 xen/arch/x86/efi/efi-boot.h    |  3 ++-
 xen/arch/x86/hvm/vmx/vmx.c     |  4 ++++
 xen/arch/x86/x86_64/traps.c    | 19 ++++++-------------
 4 files changed, 14 insertions(+), 16 deletions(-)

Comments

Jan Beulich April 21, 2020, 7:35 a.m. UTC | #1
On 20.04.2020 16:59, Andrew Cooper wrote:
> --- a/xen/arch/x86/efi/efi-boot.h
> +++ b/xen/arch/x86/efi/efi-boot.h
> @@ -238,7 +238,8 @@ static void __init noreturn efi_arch_post_exit_boot(void)
>      /* Set system registers and transfer control. */
>      asm volatile("pushq $0\n\tpopfq");
>      rdmsrl(MSR_EFER, efer);
> -    efer |= EFER_SCE;
> +    if ( IS_ENABLED(CONFIG_PV) )
> +        efer |= EFER_SCE;
>      if ( cpu_has_nx )
>          efer |= EFER_NX;
>      wrmsrl(MSR_EFER, efer);

Switch to simply ORing in trampoline_efer here?

With or without the adjustment
Reviewed-by: Jan Beulich <jbeulich@suse.com>

Jan
Tian, Kevin April 22, 2020, 8:02 a.m. UTC | #2
> From: Andrew Cooper <andrew.cooper3@citrix.com>
> Sent: Monday, April 20, 2020 10:59 PM
> 
> This will cause all SYSCALL/SYSRET instructions to suffer #UD rather than
> following the MSR_{L,C}STAR pointers, allowing us to drop the star_enter()
> panic helper, allowing us to clean up the IST stacks in a subsequent patch.
> 
> Drop the now-dead conditional SYSENTER logic in the middle of
> subarch_percpu_traps_init().
> 
> In addition, vmx_restore_host_msrs() need not restore any host
> state.  (Regarding the asymmetric changes, VT-x automatically restores
> SYSENTER state on vmexit, and SVM restores both SYSCALL/SYSENTER state
> with
> the VMSAVE/VMLOAD instructions.)
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
diff mbox series

Patch

diff --git a/xen/arch/x86/boot/trampoline.S b/xen/arch/x86/boot/trampoline.S
index 18c6638924..928b4ad4ce 100644
--- a/xen/arch/x86/boot/trampoline.S
+++ b/xen/arch/x86/boot/trampoline.S
@@ -140,9 +140,9 @@  gdt_48:
 GLOBAL(trampoline_misc_enable_off)
         .quad   0
 
-/* EFER OR-mask for boot paths.  This gets adjusted with NX when available. */
+/* EFER OR-mask for boot paths.  SCE conditional on PV support, NX added when available. */
 GLOBAL(trampoline_efer)
-        .long   EFER_LME | EFER_SCE
+        .long   EFER_LME | (EFER_SCE * IS_ENABLED(CONFIG_PV))
 
 GLOBAL(trampoline_xen_phys_start)
         .long   0
diff --git a/xen/arch/x86/efi/efi-boot.h b/xen/arch/x86/efi/efi-boot.h
index a13304201f..bf0fae4f89 100644
--- a/xen/arch/x86/efi/efi-boot.h
+++ b/xen/arch/x86/efi/efi-boot.h
@@ -238,7 +238,8 @@  static void __init noreturn efi_arch_post_exit_boot(void)
     /* Set system registers and transfer control. */
     asm volatile("pushq $0\n\tpopfq");
     rdmsrl(MSR_EFER, efer);
-    efer |= EFER_SCE;
+    if ( IS_ENABLED(CONFIG_PV) )
+        efer |= EFER_SCE;
     if ( cpu_has_nx )
         efer |= EFER_NX;
     wrmsrl(MSR_EFER, efer);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 869339062b..3e4b2e9a58 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -496,6 +496,10 @@  static void vmx_vcpu_destroy(struct vcpu *v)
  */
 static void vmx_restore_host_msrs(void)
 {
+    /* No PV guests?  No need to set restore host SYSCALL infrastructure. */
+    if ( !IS_ENABLED(CONFIG_PV) )
+        return;
+
     /* Relies on the SYSCALL trampoline being at the start of the stubs. */
     wrmsrl(MSR_STAR,         XEN_MSR_STAR);
     wrmsrl(MSR_LSTAR,        this_cpu(stubs.addr));
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index c3d4faea6b..93af0c5e87 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -299,17 +299,8 @@  static unsigned int write_stub_trampoline(
 
 DEFINE_PER_CPU(struct stubs, stubs);
 
-#ifdef CONFIG_PV
 void lstar_enter(void);
 void cstar_enter(void);
-#else
-static void __cold star_enter(void)
-{
-    panic("lstar/cstar\n");
-}
-#define lstar_enter star_enter
-#define cstar_enter star_enter
-#endif /* CONFIG_PV */
 
 void subarch_percpu_traps_init(void)
 {
@@ -321,6 +312,10 @@  void subarch_percpu_traps_init(void)
     /* IST_MAX IST pages + at least 1 guard page + primary stack. */
     BUILD_BUG_ON((IST_MAX + 1) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
 
+    /* No PV guests?  No need to set up SYSCALL/SYSENTER infrastructure. */
+    if ( !IS_ENABLED(CONFIG_PV) )
+        return;
+
     stub_page = map_domain_page(_mfn(this_cpu(stubs.mfn)));
 
     /*
@@ -338,10 +333,8 @@  void subarch_percpu_traps_init(void)
     {
         /* SYSENTER entry. */
         wrmsrl(MSR_IA32_SYSENTER_ESP, stack_bottom);
-        wrmsrl(MSR_IA32_SYSENTER_EIP,
-               IS_ENABLED(CONFIG_PV) ? (unsigned long)sysenter_entry : 0);
-        wrmsr(MSR_IA32_SYSENTER_CS,
-              IS_ENABLED(CONFIG_PV) ? __HYPERVISOR_CS : 0, 0);
+        wrmsrl(MSR_IA32_SYSENTER_EIP, (unsigned long)sysenter_entry);
+        wrmsr(MSR_IA32_SYSENTER_CS, __HYPERVISOR_CS, 0);
     }
 
     /* Trampoline for SYSCALL entry from compatibility mode. */