diff mbox series

[v3,2/3] x86/boot: Clear XD_DISABLE from the early boot path

Message ID 20230629121713.1211-3-alejandro.vallejo@cloud.com (mailing list archive)
State New, archived
Headers show
Series Introduce a REQUIRE_NX Kconfig option | expand

Commit Message

Alejandro Vallejo June 29, 2023, 12:17 p.m. UTC
Intel CPUs have a bit in MSR_IA32_MISC_ENABLE that may prevent the NX bit
from being advertised. Clear it unconditionally if we can't find the NX
feature right away on boot.

The conditions for the MSR being read on early boot are (in this order):

* Long Mode is supported
* NX isn't advertised
* The vendor is Intel

The order of checks has been chosen carefully so a virtualized Xen on a
hypervisor that doesn't emulate that MSR (but supports NX) doesn't triple
fault trying to access the non-existing MSR.

With that done, we can remove the XD_DISABLE checks in the intel-specific
init path (as they are already done in early assembly). Keep a printk to
highlight the fact that NX was forcefully enabled.

Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com>
---
v3:
  * In head.S: s/has_nx/got_nx and s/nx_bit/nx
  * Style changes in assembly instructions (spaces + width modifiers)
  * Big comment in head.S replaced
  * Jump directly to .Lno_nx if NX not found and XD_DISABLE not present
  * Restored rdmsrl (previously refactored into rdmsr_safe() in v2) and
    removed XD_DISABLE clearing in C (as it's now done in head.S).
  * Moved printk in intel.c to highlight the XD_DISABLE override even when
    done in head.S
---
 xen/arch/x86/boot/head.S               | 49 ++++++++++++++++++++++----
 xen/arch/x86/cpu/intel.c               | 16 ++++-----
 xen/arch/x86/include/asm/msr-index.h   |  2 +-
 xen/arch/x86/include/asm/x86-vendors.h |  6 ++--
 4 files changed, 53 insertions(+), 20 deletions(-)

Comments

Alejandro Vallejo June 29, 2023, 3:46 p.m. UTC | #1
On Thu, Jun 29, 2023 at 1:17 PM Alejandro Vallejo <
alejandro.vallejo@cloud.com> wrote:

> Intel CPUs have a bit in MSR_IA32_MISC_ENABLE that may prevent the NX bit
> from being advertised. Clear it unconditionally if we can't find the NX
> feature right away on boot.
>
> The conditions for the MSR being read on early boot are (in this order):
>
> * Long Mode is supported
> * NX isn't advertised
> * The vendor is Intel
>
> The order of checks has been chosen carefully so a virtualized Xen on a
> hypervisor that doesn't emulate that MSR (but supports NX) doesn't triple
> fault trying to access the non-existing MSR.
>
> With that done, we can remove the XD_DISABLE checks in the intel-specific
> init path (as they are already done in early assembly). Keep a printk to
> highlight the fact that NX was forcefully enabled.
>
> Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com>
> ---
> v3:
>   * In head.S: s/has_nx/got_nx and s/nx_bit/nx
>   * Style changes in assembly instructions (spaces + width modifiers)
>   * Big comment in head.S replaced
>   * Jump directly to .Lno_nx if NX not found and XD_DISABLE not present
>   * Restored rdmsrl (previously refactored into rdmsr_safe() in v2) and
>     removed XD_DISABLE clearing in C (as it's now done in head.S).
>   * Moved printk in intel.c to highlight the XD_DISABLE override even when
>     done in head.S
> ---
>  xen/arch/x86/boot/head.S               | 49 ++++++++++++++++++++++----
>  xen/arch/x86/cpu/intel.c               | 16 ++++-----
>  xen/arch/x86/include/asm/msr-index.h   |  2 +-
>  xen/arch/x86/include/asm/x86-vendors.h |  6 ++--
>  4 files changed, 53 insertions(+), 20 deletions(-)
>

@mantainers
Andrew Cooper June 30, 2023, 11:19 a.m. UTC | #2
On 29/06/2023 1:17 pm, Alejandro Vallejo wrote:
> Intel CPUs have a bit in MSR_IA32_MISC_ENABLE that may prevent the NX bit
> from being advertised. Clear it unconditionally if we can't find the NX
> feature right away on boot.
>
> The conditions for the MSR being read on early boot are (in this order):
>
> * Long Mode is supported
> * NX isn't advertised
> * The vendor is Intel
>
> The order of checks has been chosen carefully so a virtualized Xen on a
> hypervisor that doesn't emulate that MSR (but supports NX) doesn't triple
> fault trying to access the non-existing MSR.
>
> With that done, we can remove the XD_DISABLE checks in the intel-specific
> init path (as they are already done in early assembly). Keep a printk to
> highlight the fact that NX was forcefully enabled.
>
> Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>, with two minor
fixes.

> diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S
> index 9fbd602ea5..0e02c28f37 100644
> --- a/xen/arch/x86/boot/head.S
> +++ b/xen/arch/x86/boot/head.S
> @@ -652,16 +652,53 @@ trampoline_setup:
>          cpuid
>  1:      mov     %edx, CPUINFO_FEATURE_OFFSET(X86_FEATURE_LM) + sym_esi(boot_cpu_data)
>  
> -        /* Check for NX. Adjust EFER setting if available. */
> -        bt      $cpufeat_bit(X86_FEATURE_NX), %edx
> -        jnc     1f
> -        orb     $EFER_NXE >> 8, 1 + sym_esi(trampoline_efer)
> -1:
> -
>          /* Check for availability of long mode. */
>          bt      $cpufeat_bit(X86_FEATURE_LM),%edx
>          jnc     .Lbad_cpu
>  
> +        /* Check for NX */
> +        bt      $cpufeat_bit(X86_FEATURE_NX), %edx
> +        jc     .Lgot_nx
> +
> +        /*
> +         * NX appears to be unsupported, but it might be hidden.
> +         *
> +         * The feature is part of the AMD64 spec, but the very first Intel
> +         * 64bit CPUs lacked the feature, and thereafter there was a
> +         * firmware knob to disable the feature. Undo the disable if
> +         * possible.
> +         *
> +         * All 64bit Intel CPUs support this MSR. If virtualised, expect
> +         * the hypervisor to either emulate the MSR or give us NX.
> +         */
> +        xor     %eax, %eax
> +        cpuid
> +        cmp     $X86_VENDOR_INTEL_EBX, %ebx
> +        jnz     .Lno_nx
> +        cmp     $X86_VENDOR_INTEL_EDX, %edx
> +        jnz     .Lno_nx
> +        cmp     $X86_VENDOR_INTEL_ECX, %ecx
> +        jnz     .Lno_nx
> +
> +        /* Clear the XD_DISABLE bit */
> +        mov    $MSR_IA32_MISC_ENABLE, %ecx

Parameter indention here.

> diff --git a/xen/arch/x86/include/asm/x86-vendors.h b/xen/arch/x86/include/asm/x86-vendors.h
> index 0a37024cbd..9191da26d7 100644
> --- a/xen/arch/x86/include/asm/x86-vendors.h
> +++ b/xen/arch/x86/include/asm/x86-vendors.h
> @@ -12,9 +12,9 @@
>  #define X86_VENDOR_UNKNOWN 0
>  
>  #define X86_VENDOR_INTEL (1 << 0)
> -#define X86_VENDOR_INTEL_EBX 0x756e6547U /* "GenuineIntel" */
> -#define X86_VENDOR_INTEL_ECX 0x6c65746eU
> -#define X86_VENDOR_INTEL_EDX 0x49656e69U
> +#define X86_VENDOR_INTEL_EBX _AC(0x756e6547, U) /* "GenuineIntel" */
> +#define X86_VENDOR_INTEL_ECX _AC(0x6c65746e, U)
> +#define X86_VENDOR_INTEL_EDX _AC(0x49656e69, U)
>  
>  #define X86_VENDOR_AMD (1 << 1)
>  #define X86_VENDOR_AMD_EBX 0x68747541U /* "AuthenticAMD" */

And all vendors should get equivalent _AC() conversions.

Can fix both on commit.

~Andrew
Andrew Cooper June 30, 2023, 12:28 p.m. UTC | #3
On 29/06/2023 1:17 pm, Alejandro Vallejo wrote:
> diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
> index 168cd58f36..b2443b6831 100644
> --- a/xen/arch/x86/cpu/intel.c
> +++ b/xen/arch/x86/cpu/intel.c
> @@ -304,24 +304,20 @@ static void cf_check early_init_intel(struct cpuinfo_x86 *c)
>  	if (c->x86 == 15 && c->x86_cache_alignment == 64)
>  		c->x86_cache_alignment = 128;
>  
> +	if (bootsym(trampoline_misc_enable_off) &
> +	    MSR_IA32_MISC_ENABLE_XD_DISABLE)
> +		printk(KERN_INFO
> +		       "re-enabled NX (Execute Disable) protection\n");

One other thing, which I'll also fix on commit.

This now prints per CPU on any system where we had to set XD_DISABLE. 
It want's a c == &boot_cpu_data to limit it to once-only.

~Andrew
diff mbox series

Patch

diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S
index 9fbd602ea5..0e02c28f37 100644
--- a/xen/arch/x86/boot/head.S
+++ b/xen/arch/x86/boot/head.S
@@ -652,16 +652,53 @@  trampoline_setup:
         cpuid
 1:      mov     %edx, CPUINFO_FEATURE_OFFSET(X86_FEATURE_LM) + sym_esi(boot_cpu_data)
 
-        /* Check for NX. Adjust EFER setting if available. */
-        bt      $cpufeat_bit(X86_FEATURE_NX), %edx
-        jnc     1f
-        orb     $EFER_NXE >> 8, 1 + sym_esi(trampoline_efer)
-1:
-
         /* Check for availability of long mode. */
         bt      $cpufeat_bit(X86_FEATURE_LM),%edx
         jnc     .Lbad_cpu
 
+        /* Check for NX */
+        bt      $cpufeat_bit(X86_FEATURE_NX), %edx
+        jc     .Lgot_nx
+
+        /*
+         * NX appears to be unsupported, but it might be hidden.
+         *
+         * The feature is part of the AMD64 spec, but the very first Intel
+         * 64bit CPUs lacked the feature, and thereafter there was a
+         * firmware knob to disable the feature. Undo the disable if
+         * possible.
+         *
+         * All 64bit Intel CPUs support this MSR. If virtualised, expect
+         * the hypervisor to either emulate the MSR or give us NX.
+         */
+        xor     %eax, %eax
+        cpuid
+        cmp     $X86_VENDOR_INTEL_EBX, %ebx
+        jnz     .Lno_nx
+        cmp     $X86_VENDOR_INTEL_EDX, %edx
+        jnz     .Lno_nx
+        cmp     $X86_VENDOR_INTEL_ECX, %ecx
+        jnz     .Lno_nx
+
+        /* Clear the XD_DISABLE bit */
+        mov    $MSR_IA32_MISC_ENABLE, %ecx
+        rdmsr
+        btr     $2, %edx
+        jnc     .Lno_nx
+        wrmsr
+        orb     $MSR_IA32_MISC_ENABLE_XD_DISABLE >> 32, 4 + sym_esi(trampoline_misc_enable_off)
+
+        /* Check again for NX */
+        mov     $0x80000001, %eax
+        cpuid
+        bt      $cpufeat_bit(X86_FEATURE_NX), %edx
+        jnc     .Lno_nx
+
+.Lgot_nx:
+        /* Adjust EFER given that NX is present */
+        orb     $EFER_NXE >> 8, 1 + sym_esi(trampoline_efer)
+.Lno_nx:
+
         /* Stash TSC to calculate a good approximation of time-since-boot */
         rdtsc
         mov     %eax,     sym_esi(boot_tsc_stamp)
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 168cd58f36..b2443b6831 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -304,24 +304,20 @@  static void cf_check early_init_intel(struct cpuinfo_x86 *c)
 	if (c->x86 == 15 && c->x86_cache_alignment == 64)
 		c->x86_cache_alignment = 128;
 
+	if (bootsym(trampoline_misc_enable_off) &
+	    MSR_IA32_MISC_ENABLE_XD_DISABLE)
+		printk(KERN_INFO
+		       "re-enabled NX (Execute Disable) protection\n");
+
 	/* Unmask CPUID levels and NX if masked: */
 	rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
 
-	disable = misc_enable & (MSR_IA32_MISC_ENABLE_LIMIT_CPUID |
-				 MSR_IA32_MISC_ENABLE_XD_DISABLE);
+	disable = misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
 	if (disable) {
 		wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable & ~disable);
 		bootsym(trampoline_misc_enable_off) |= disable;
-		bootsym(trampoline_efer) |= EFER_NXE;
-	}
-
-	if (disable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID)
 		printk(KERN_INFO "revised cpuid level: %d\n",
 		       cpuid_eax(0));
-	if (disable & MSR_IA32_MISC_ENABLE_XD_DISABLE) {
-		write_efer(read_efer() | EFER_NXE);
-		printk(KERN_INFO
-		       "re-enabled NX (Execute Disable) protection\n");
 	}
 
 	/* CPUID workaround for Intel 0F33/0F34 CPU */
diff --git a/xen/arch/x86/include/asm/msr-index.h b/xen/arch/x86/include/asm/msr-index.h
index 2749e433d2..4f861c0bb4 100644
--- a/xen/arch/x86/include/asm/msr-index.h
+++ b/xen/arch/x86/include/asm/msr-index.h
@@ -502,7 +502,7 @@ 
 #define MSR_IA32_MISC_ENABLE_MONITOR_ENABLE (1<<18)
 #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID  (1<<22)
 #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1<<23)
-#define MSR_IA32_MISC_ENABLE_XD_DISABLE	(1ULL << 34)
+#define MSR_IA32_MISC_ENABLE_XD_DISABLE   (_AC(1, ULL) << 34)
 
 #define MSR_IA32_TSC_DEADLINE		0x000006E0
 #define MSR_IA32_ENERGY_PERF_BIAS	0x000001b0
diff --git a/xen/arch/x86/include/asm/x86-vendors.h b/xen/arch/x86/include/asm/x86-vendors.h
index 0a37024cbd..9191da26d7 100644
--- a/xen/arch/x86/include/asm/x86-vendors.h
+++ b/xen/arch/x86/include/asm/x86-vendors.h
@@ -12,9 +12,9 @@ 
 #define X86_VENDOR_UNKNOWN 0
 
 #define X86_VENDOR_INTEL (1 << 0)
-#define X86_VENDOR_INTEL_EBX 0x756e6547U /* "GenuineIntel" */
-#define X86_VENDOR_INTEL_ECX 0x6c65746eU
-#define X86_VENDOR_INTEL_EDX 0x49656e69U
+#define X86_VENDOR_INTEL_EBX _AC(0x756e6547, U) /* "GenuineIntel" */
+#define X86_VENDOR_INTEL_ECX _AC(0x6c65746e, U)
+#define X86_VENDOR_INTEL_EDX _AC(0x49656e69, U)
 
 #define X86_VENDOR_AMD (1 << 1)
 #define X86_VENDOR_AMD_EBX 0x68747541U /* "AuthenticAMD" */