diff mbox series

[for-4.20] x86/traps: Rework LER initialisation and support Zen5/Diamond Rapids

Message ID 20241231192002.1753737-1-andrew.cooper3@citrix.com (mailing list archive)
State New
Headers show
Series [for-4.20] x86/traps: Rework LER initialisation and support Zen5/Diamond Rapids | expand

Commit Message

Andrew Cooper Dec. 31, 2024, 7:20 p.m. UTC
AMD have always used the architectural MSRs for LER.  As the first processor
to support LER was the K7 (which was 32bit), we can assume it's presence
unconditionally in 64bit mode.

Intel are about to run out of space in Family 6 and start using 19.  It is
only the Pentium 4 which uses non-architectural LER MSRs.

percpu_traps_init(), which runs on every CPU, contains a lot of code which
should be init-only, and is the only reason why opt_ler can't be in initdata.

Write a brand new init_ler() which expects all future Intel and AMD CPUs to
continue using the architectural MSRs, and does all setup together.  Call it
from trap_init(), and remove the setup logic percpu_traps_init() except for
the single path configuring MSR_IA32_DEBUGCTLMSR.

Leave behind a warning if the user asked for LER and Xen couldn't enable it.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Oleksii Kurochko <oleksii.kurochko@gmail.com>

For 4.20.  This is needed for Zen5 CPUs (already available) as well as Intel
CPUs coming shortly.  It also moves some non-init logic to being init-only.
---
 xen/arch/x86/traps.c | 86 ++++++++++++++++++++------------------------
 1 file changed, 39 insertions(+), 47 deletions(-)

Comments

Marek Marczykowski-Górecki Dec. 31, 2024, 10:25 p.m. UTC | #1
On Tue, Dec 31, 2024 at 07:20:02PM +0000, Andrew Cooper wrote:
> AMD have always used the architectural MSRs for LER.  As the first processor
> to support LER was the K7 (which was 32bit), we can assume it's presence
> unconditionally in 64bit mode.
> 
> Intel are about to run out of space in Family 6 and start using 19.  It is
> only the Pentium 4 which uses non-architectural LER MSRs.
> 
> percpu_traps_init(), which runs on every CPU, contains a lot of code which
> should be init-only, and is the only reason why opt_ler can't be in initdata.
> 
> Write a brand new init_ler() which expects all future Intel and AMD CPUs to
> continue using the architectural MSRs, and does all setup together.  Call it
> from trap_init(), and remove the setup logic percpu_traps_init() except for
> the single path configuring MSR_IA32_DEBUGCTLMSR.
> 
> Leave behind a warning if the user asked for LER and Xen couldn't enable it.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> ---
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Roger Pau Monné <roger.pau@citrix.com>
> CC: Oleksii Kurochko <oleksii.kurochko@gmail.com>
> 
> For 4.20.  This is needed for Zen5 CPUs (already available) as well as Intel
> CPUs coming shortly.  It also moves some non-init logic to being init-only.
> ---

...

> @@ -2201,6 +2155,42 @@ void __init init_idt_traps(void)
>          this_cpu(compat_gdt) = boot_compat_gdt;
>  }
>  
> +static void __init init_ler(void)
> +{
> +    unsigned int msr = 0;
> +
> +    if ( !opt_ler )
> +        return;
> +
> +    /*
> +     * Intel Pentium 4 is the only known CPU to not use the architectural MSR
> +     * indicies.
> +     */
> +    switch ( boot_cpu_data.x86_vendor )
> +    {
> +    case X86_VENDOR_INTEL:
> +        if ( boot_cpu_data.x86 == 0xf )
> +        {
> +            ler_msr = MSR_P4_LER_FROM_LIP;

msr = 

and ...

> +            break;
> +        }
> +        fallthrough;
> +    case X86_VENDOR_AMD:
> +    case X86_VENDOR_HYGON:
> +        ler_msr = MSR_IA32_LASTINTFROMIP;

... here?

But also, why temporary variable? Is there something else that would set
non-zero value that should be preserved?

> +        break;
> +    }
> +
> +    if ( msr == 0 )
> +    {
> +        printk(XENLOG_WARNING "LER disabled: failed to identy MSRs\n");
> +        return;
> +    }
> +
> +    ler_msr = msr;
> +    setup_force_cpu_cap(X86_FEATURE_XEN_LBR);
> +}
> +
>  extern void (*const autogen_entrypoints[X86_NR_VECTORS])(void);
>  void __init trap_init(void)
>  {
> @@ -2226,6 +2216,8 @@ void __init trap_init(void)
>          }
>      }
>  
> +    init_ler();
> +
>      /* Cache {,compat_}gdt_l1e now that physically relocation is done. */
>      this_cpu(gdt_l1e) =
>          l1e_from_pfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);
> -- 
> 2.39.5
> 
>
Andrew Cooper Jan. 1, 2025, 2:35 p.m. UTC | #2
On 31/12/2024 10:25 pm, Marek Marczykowski-Górecki wrote:
> On Tue, Dec 31, 2024 at 07:20:02PM +0000, Andrew Cooper wrote:
>> AMD have always used the architectural MSRs for LER.  As the first processor
>> to support LER was the K7 (which was 32bit), we can assume it's presence
>> unconditionally in 64bit mode.
>>
>> Intel are about to run out of space in Family 6 and start using 19.  It is
>> only the Pentium 4 which uses non-architectural LER MSRs.
>>
>> percpu_traps_init(), which runs on every CPU, contains a lot of code which
>> should be init-only, and is the only reason why opt_ler can't be in initdata.
>>
>> Write a brand new init_ler() which expects all future Intel and AMD CPUs to
>> continue using the architectural MSRs, and does all setup together.  Call it
>> from trap_init(), and remove the setup logic percpu_traps_init() except for
>> the single path configuring MSR_IA32_DEBUGCTLMSR.
>>
>> Leave behind a warning if the user asked for LER and Xen couldn't enable it.
>>
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>> ---
>> CC: Jan Beulich <JBeulich@suse.com>
>> CC: Roger Pau Monné <roger.pau@citrix.com>
>> CC: Oleksii Kurochko <oleksii.kurochko@gmail.com>
>>
>> For 4.20.  This is needed for Zen5 CPUs (already available) as well as Intel
>> CPUs coming shortly.  It also moves some non-init logic to being init-only.
>> ---
> ...
>
>> @@ -2201,6 +2155,42 @@ void __init init_idt_traps(void)
>>          this_cpu(compat_gdt) = boot_compat_gdt;
>>  }
>>  
>> +static void __init init_ler(void)
>> +{
>> +    unsigned int msr = 0;
>> +
>> +    if ( !opt_ler )
>> +        return;
>> +
>> +    /*
>> +     * Intel Pentium 4 is the only known CPU to not use the architectural MSR
>> +     * indicies.
>> +     */
>> +    switch ( boot_cpu_data.x86_vendor )
>> +    {
>> +    case X86_VENDOR_INTEL:
>> +        if ( boot_cpu_data.x86 == 0xf )
>> +        {
>> +            ler_msr = MSR_P4_LER_FROM_LIP;
> msr = 
>
> and ...
>
>> +            break;
>> +        }
>> +        fallthrough;
>> +    case X86_VENDOR_AMD:
>> +    case X86_VENDOR_HYGON:
>> +        ler_msr = MSR_IA32_LASTINTFROMIP;
> ... here?

Yes.  Serves me right for a last minute refactor.

> But also, why temporary variable?

Code gen (ler_msr is a global so checking it at the end needs to come
from memory), and defence in depth (it introduces a path through the
function where we failed to find the MSR but still turned LER on anyway
if there was an unexpected value there already).

~Andrew
diff mbox series

Patch

diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index fd8a4448e3f7..377194d7b620 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -114,7 +114,7 @@  DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_page, tss_page);
 static int debug_stack_lines = 20;
 integer_param("debug_stack_lines", debug_stack_lines);
 
-static bool __ro_after_init opt_ler;
+static bool __initdata opt_ler;
 boolean_param("ler", opt_ler);
 
 /* LastExceptionFromIP on this hardware.  Zero if LER is not in use. */
@@ -2092,56 +2092,10 @@  static void __init set_intr_gate(unsigned int n, void *addr)
     __set_intr_gate(n, 0, addr);
 }
 
-static unsigned int noinline __init calc_ler_msr(void)
-{
-    switch ( boot_cpu_data.x86_vendor )
-    {
-    case X86_VENDOR_INTEL:
-        switch ( boot_cpu_data.x86 )
-        {
-        case 6:
-            return MSR_IA32_LASTINTFROMIP;
-
-        case 15:
-            return MSR_P4_LER_FROM_LIP;
-        }
-        break;
-
-    case X86_VENDOR_AMD:
-        switch ( boot_cpu_data.x86 )
-        {
-        case 6:
-        case 0xf ... 0x19:
-            return MSR_IA32_LASTINTFROMIP;
-        }
-        break;
-
-    case X86_VENDOR_HYGON:
-        return MSR_IA32_LASTINTFROMIP;
-    }
-
-    return 0;
-}
-
 void percpu_traps_init(void)
 {
     subarch_percpu_traps_init();
 
-    if ( !opt_ler )
-        return;
-
-    if ( !ler_msr )
-    {
-        ler_msr = calc_ler_msr();
-        if ( !ler_msr )
-        {
-            opt_ler = false;
-            return;
-        }
-
-        setup_force_cpu_cap(X86_FEATURE_XEN_LBR);
-    }
-
     if ( cpu_has_xen_lbr )
         wrmsrl(MSR_IA32_DEBUGCTLMSR, IA32_DEBUGCTLMSR_LBR);
 }
@@ -2201,6 +2155,42 @@  void __init init_idt_traps(void)
         this_cpu(compat_gdt) = boot_compat_gdt;
 }
 
+static void __init init_ler(void)
+{
+    unsigned int msr = 0;
+
+    if ( !opt_ler )
+        return;
+
+    /*
+     * Intel Pentium 4 is the only known CPU to not use the architectural MSR
+     * indicies.
+     */
+    switch ( boot_cpu_data.x86_vendor )
+    {
+    case X86_VENDOR_INTEL:
+        if ( boot_cpu_data.x86 == 0xf )
+        {
+            ler_msr = MSR_P4_LER_FROM_LIP;
+            break;
+        }
+        fallthrough;
+    case X86_VENDOR_AMD:
+    case X86_VENDOR_HYGON:
+        ler_msr = MSR_IA32_LASTINTFROMIP;
+        break;
+    }
+
+    if ( msr == 0 )
+    {
+        printk(XENLOG_WARNING "LER disabled: failed to identy MSRs\n");
+        return;
+    }
+
+    ler_msr = msr;
+    setup_force_cpu_cap(X86_FEATURE_XEN_LBR);
+}
+
 extern void (*const autogen_entrypoints[X86_NR_VECTORS])(void);
 void __init trap_init(void)
 {
@@ -2226,6 +2216,8 @@  void __init trap_init(void)
         }
     }
 
+    init_ler();
+
     /* Cache {,compat_}gdt_l1e now that physically relocation is done. */
     this_cpu(gdt_l1e) =
         l1e_from_pfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);