diff mbox series

[1/2] x86/vmx: Calculate model-specific LBRs once at start of day

Message ID 20230109120828.344-2-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show
Series x86/vmx: Don't crash guests when there is no model-specific LBRs available | expand

Commit Message

Andrew Cooper Jan. 9, 2023, 12:08 p.m. UTC
There is no point repeating this calculation at runtime, especially as it is
in the fallback path of the WRSMR/RDMSR handlers.

Move the infrastructure higher in vmx.c to avoid forward declarations,
renaming last_branch_msr_get() to get_model_specific_lbr() to highlight that
these are model-specific only.

No practical change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Wei Liu <wl@xen.org>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
---
 xen/arch/x86/hvm/vmx/vmx.c | 276 +++++++++++++++++++++++----------------------
 1 file changed, 139 insertions(+), 137 deletions(-)

Comments

Jan Beulich Jan. 10, 2023, 4:26 p.m. UTC | #1
On 09.01.2023 13:08, Andrew Cooper wrote:
> There is no point repeating this calculation at runtime, especially as it is
> in the fallback path of the WRSMR/RDMSR handlers.
> 
> Move the infrastructure higher in vmx.c to avoid forward declarations,
> renaming last_branch_msr_get() to get_model_specific_lbr() to highlight that
> these are model-specific only.
> 
> No practical change.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>
with one nit:

> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -396,6 +396,142 @@ void vmx_pi_hooks_deassign(struct domain *d)
>      domain_unpause(d);
>  }
>  
> +static const struct lbr_info {
> +    u32 base, count;
> +} p4_lbr[] = {
> +    { MSR_P4_LER_FROM_LIP,          1 },
> +    { MSR_P4_LER_TO_LIP,            1 },
> +    { MSR_P4_LASTBRANCH_TOS,        1 },
> +    { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
> +    { MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
> +    { 0, 0 }
> +}, c2_lbr[] = {
> +    { MSR_IA32_LASTINTFROMIP,       1 },
> +    { MSR_IA32_LASTINTTOIP,         1 },
> +    { MSR_C2_LASTBRANCH_TOS,        1 },
> +    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_C2_LASTBRANCH_FROM_TO },
> +    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_C2_LASTBRANCH_FROM_TO },
> +    { 0, 0 }
> +}, nh_lbr[] = {
> +    { MSR_IA32_LASTINTFROMIP,       1 },
> +    { MSR_IA32_LASTINTTOIP,         1 },
> +    { MSR_NHL_LBR_SELECT,           1 },
> +    { MSR_NHL_LASTBRANCH_TOS,       1 },
> +    { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
> +    { MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
> +    { 0, 0 }
> +}, sk_lbr[] = {
> +    { MSR_IA32_LASTINTFROMIP,       1 },
> +    { MSR_IA32_LASTINTTOIP,         1 },
> +    { MSR_NHL_LBR_SELECT,           1 },
> +    { MSR_NHL_LASTBRANCH_TOS,       1 },
> +    { MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH },
> +    { MSR_SKL_LASTBRANCH_0_TO_IP,   NUM_MSR_SKL_LASTBRANCH },
> +    { MSR_SKL_LASTBRANCH_0_INFO,    NUM_MSR_SKL_LASTBRANCH },
> +    { 0, 0 }
> +}, at_lbr[] = {
> +    { MSR_IA32_LASTINTFROMIP,       1 },
> +    { MSR_IA32_LASTINTTOIP,         1 },
> +    { MSR_C2_LASTBRANCH_TOS,        1 },
> +    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
> +    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
> +    { 0, 0 }
> +}, sm_lbr[] = {
> +    { MSR_IA32_LASTINTFROMIP,       1 },
> +    { MSR_IA32_LASTINTTOIP,         1 },
> +    { MSR_SM_LBR_SELECT,            1 },
> +    { MSR_SM_LASTBRANCH_TOS,        1 },
> +    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
> +    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
> +    { 0, 0 }
> +}, gm_lbr[] = {
> +    { MSR_IA32_LASTINTFROMIP,       1 },
> +    { MSR_IA32_LASTINTTOIP,         1 },
> +    { MSR_SM_LBR_SELECT,            1 },
> +    { MSR_SM_LASTBRANCH_TOS,        1 },
> +    { MSR_GM_LASTBRANCH_0_FROM_IP,  NUM_MSR_GM_LASTBRANCH_FROM_TO },
> +    { MSR_GM_LASTBRANCH_0_TO_IP,    NUM_MSR_GM_LASTBRANCH_FROM_TO },
> +    { 0, 0 }
> +};
> +static const struct lbr_info * __ro_after_init model_specific_lbr;
> +
> +static const struct __init lbr_info *get_model_specific_lbr(void)

Please move __init:

static const struct lbr_info *__init get_model_specific_lbr(void)

Jan
Andrew Cooper Jan. 10, 2023, 4:28 p.m. UTC | #2
On 10/01/2023 4:26 pm, Jan Beulich wrote:
> On 09.01.2023 13:08, Andrew Cooper wrote:
>> There is no point repeating this calculation at runtime, especially as it is
>> in the fallback path of the WRSMR/RDMSR handlers.
>>
>> Move the infrastructure higher in vmx.c to avoid forward declarations,
>> renaming last_branch_msr_get() to get_model_specific_lbr() to highlight that
>> these are model-specific only.
>>
>> No practical change.
>>
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> with one nit:
>
>> --- a/xen/arch/x86/hvm/vmx/vmx.c
>> +++ b/xen/arch/x86/hvm/vmx/vmx.c
>> @@ -396,6 +396,142 @@ void vmx_pi_hooks_deassign(struct domain *d)
>>      domain_unpause(d);
>>  }
>>  
>> +static const struct lbr_info {
>> +    u32 base, count;
>> +} p4_lbr[] = {
>> +    { MSR_P4_LER_FROM_LIP,          1 },
>> +    { MSR_P4_LER_TO_LIP,            1 },
>> +    { MSR_P4_LASTBRANCH_TOS,        1 },
>> +    { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
>> +    { MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
>> +    { 0, 0 }
>> +}, c2_lbr[] = {
>> +    { MSR_IA32_LASTINTFROMIP,       1 },
>> +    { MSR_IA32_LASTINTTOIP,         1 },
>> +    { MSR_C2_LASTBRANCH_TOS,        1 },
>> +    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_C2_LASTBRANCH_FROM_TO },
>> +    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_C2_LASTBRANCH_FROM_TO },
>> +    { 0, 0 }
>> +}, nh_lbr[] = {
>> +    { MSR_IA32_LASTINTFROMIP,       1 },
>> +    { MSR_IA32_LASTINTTOIP,         1 },
>> +    { MSR_NHL_LBR_SELECT,           1 },
>> +    { MSR_NHL_LASTBRANCH_TOS,       1 },
>> +    { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
>> +    { MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
>> +    { 0, 0 }
>> +}, sk_lbr[] = {
>> +    { MSR_IA32_LASTINTFROMIP,       1 },
>> +    { MSR_IA32_LASTINTTOIP,         1 },
>> +    { MSR_NHL_LBR_SELECT,           1 },
>> +    { MSR_NHL_LASTBRANCH_TOS,       1 },
>> +    { MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH },
>> +    { MSR_SKL_LASTBRANCH_0_TO_IP,   NUM_MSR_SKL_LASTBRANCH },
>> +    { MSR_SKL_LASTBRANCH_0_INFO,    NUM_MSR_SKL_LASTBRANCH },
>> +    { 0, 0 }
>> +}, at_lbr[] = {
>> +    { MSR_IA32_LASTINTFROMIP,       1 },
>> +    { MSR_IA32_LASTINTTOIP,         1 },
>> +    { MSR_C2_LASTBRANCH_TOS,        1 },
>> +    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
>> +    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
>> +    { 0, 0 }
>> +}, sm_lbr[] = {
>> +    { MSR_IA32_LASTINTFROMIP,       1 },
>> +    { MSR_IA32_LASTINTTOIP,         1 },
>> +    { MSR_SM_LBR_SELECT,            1 },
>> +    { MSR_SM_LASTBRANCH_TOS,        1 },
>> +    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
>> +    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
>> +    { 0, 0 }
>> +}, gm_lbr[] = {
>> +    { MSR_IA32_LASTINTFROMIP,       1 },
>> +    { MSR_IA32_LASTINTTOIP,         1 },
>> +    { MSR_SM_LBR_SELECT,            1 },
>> +    { MSR_SM_LASTBRANCH_TOS,        1 },
>> +    { MSR_GM_LASTBRANCH_0_FROM_IP,  NUM_MSR_GM_LASTBRANCH_FROM_TO },
>> +    { MSR_GM_LASTBRANCH_0_TO_IP,    NUM_MSR_GM_LASTBRANCH_FROM_TO },
>> +    { 0, 0 }
>> +};
>> +static const struct lbr_info * __ro_after_init model_specific_lbr;
>> +
>> +static const struct __init lbr_info *get_model_specific_lbr(void)
> Please move __init:
>
> static const struct lbr_info *__init get_model_specific_lbr(void)

Yeah, I noticed and fixed both style errors here.  Also an extraneous
space before __ro_after_init.

~Andrew
Tian, Kevin Jan. 12, 2023, 1:35 a.m. UTC | #3
> From: Andrew Cooper <andrew.cooper3@citrix.com>
> Sent: Monday, January 9, 2023 8:08 PM
> 
> There is no point repeating this calculation at runtime, especially as it is
> in the fallback path of the WRSMR/RDMSR handlers.
> 
> Move the infrastructure higher in vmx.c to avoid forward declarations,
> renaming last_branch_msr_get() to get_model_specific_lbr() to highlight that
> these are model-specific only.
> 
> No practical change.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 43a4865d1c76..17320f9fb267 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -396,6 +396,142 @@  void vmx_pi_hooks_deassign(struct domain *d)
     domain_unpause(d);
 }
 
+static const struct lbr_info {
+    u32 base, count;
+} p4_lbr[] = {
+    { MSR_P4_LER_FROM_LIP,          1 },
+    { MSR_P4_LER_TO_LIP,            1 },
+    { MSR_P4_LASTBRANCH_TOS,        1 },
+    { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
+    { MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
+    { 0, 0 }
+}, c2_lbr[] = {
+    { MSR_IA32_LASTINTFROMIP,       1 },
+    { MSR_IA32_LASTINTTOIP,         1 },
+    { MSR_C2_LASTBRANCH_TOS,        1 },
+    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_C2_LASTBRANCH_FROM_TO },
+    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_C2_LASTBRANCH_FROM_TO },
+    { 0, 0 }
+}, nh_lbr[] = {
+    { MSR_IA32_LASTINTFROMIP,       1 },
+    { MSR_IA32_LASTINTTOIP,         1 },
+    { MSR_NHL_LBR_SELECT,           1 },
+    { MSR_NHL_LASTBRANCH_TOS,       1 },
+    { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
+    { MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
+    { 0, 0 }
+}, sk_lbr[] = {
+    { MSR_IA32_LASTINTFROMIP,       1 },
+    { MSR_IA32_LASTINTTOIP,         1 },
+    { MSR_NHL_LBR_SELECT,           1 },
+    { MSR_NHL_LASTBRANCH_TOS,       1 },
+    { MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH },
+    { MSR_SKL_LASTBRANCH_0_TO_IP,   NUM_MSR_SKL_LASTBRANCH },
+    { MSR_SKL_LASTBRANCH_0_INFO,    NUM_MSR_SKL_LASTBRANCH },
+    { 0, 0 }
+}, at_lbr[] = {
+    { MSR_IA32_LASTINTFROMIP,       1 },
+    { MSR_IA32_LASTINTTOIP,         1 },
+    { MSR_C2_LASTBRANCH_TOS,        1 },
+    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
+    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
+    { 0, 0 }
+}, sm_lbr[] = {
+    { MSR_IA32_LASTINTFROMIP,       1 },
+    { MSR_IA32_LASTINTTOIP,         1 },
+    { MSR_SM_LBR_SELECT,            1 },
+    { MSR_SM_LASTBRANCH_TOS,        1 },
+    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
+    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
+    { 0, 0 }
+}, gm_lbr[] = {
+    { MSR_IA32_LASTINTFROMIP,       1 },
+    { MSR_IA32_LASTINTTOIP,         1 },
+    { MSR_SM_LBR_SELECT,            1 },
+    { MSR_SM_LASTBRANCH_TOS,        1 },
+    { MSR_GM_LASTBRANCH_0_FROM_IP,  NUM_MSR_GM_LASTBRANCH_FROM_TO },
+    { MSR_GM_LASTBRANCH_0_TO_IP,    NUM_MSR_GM_LASTBRANCH_FROM_TO },
+    { 0, 0 }
+};
+static const struct lbr_info * __ro_after_init model_specific_lbr;
+
+static const struct __init lbr_info *get_model_specific_lbr(void)
+{
+    switch ( boot_cpu_data.x86 )
+    {
+    case 6:
+        switch ( boot_cpu_data.x86_model )
+        {
+        /* Core2 Duo */
+        case 0x0f:
+        /* Enhanced Core */
+        case 0x17:
+        /* Xeon 7400 */
+        case 0x1d:
+            return c2_lbr;
+        /* Nehalem */
+        case 0x1a: case 0x1e: case 0x1f: case 0x2e:
+        /* Westmere */
+        case 0x25: case 0x2c: case 0x2f:
+        /* Sandy Bridge */
+        case 0x2a: case 0x2d:
+        /* Ivy Bridge */
+        case 0x3a: case 0x3e:
+        /* Haswell */
+        case 0x3c: case 0x3f: case 0x45: case 0x46:
+        /* Broadwell */
+        case 0x3d: case 0x47: case 0x4f: case 0x56:
+            return nh_lbr;
+        /* Skylake */
+        case 0x4e: case 0x5e:
+        /* Xeon Scalable */
+        case 0x55:
+        /* Cannon Lake */
+        case 0x66:
+        /* Goldmont Plus */
+        case 0x7a:
+        /* Ice Lake */
+        case 0x6a: case 0x6c: case 0x7d: case 0x7e:
+        /* Tiger Lake */
+        case 0x8c: case 0x8d:
+        /* Tremont */
+        case 0x86:
+        /* Kaby Lake */
+        case 0x8e: case 0x9e:
+        /* Comet Lake */
+        case 0xa5: case 0xa6:
+            return sk_lbr;
+        /* Atom */
+        case 0x1c: case 0x26: case 0x27: case 0x35: case 0x36:
+            return at_lbr;
+        /* Silvermont */
+        case 0x37: case 0x4a: case 0x4d: case 0x5a: case 0x5d:
+        /* Xeon Phi Knights Landing */
+        case 0x57:
+        /* Xeon Phi Knights Mill */
+        case 0x85:
+        /* Airmont */
+        case 0x4c:
+            return sm_lbr;
+        /* Goldmont */
+        case 0x5c: case 0x5f:
+            return gm_lbr;
+        }
+        break;
+
+    case 15:
+        switch ( boot_cpu_data.x86_model )
+        {
+        /* Pentium4/Xeon with em64t */
+        case 3: case 4: case 6:
+            return p4_lbr;
+        }
+        break;
+    }
+
+    return NULL;
+}
+
 static int cf_check vmx_domain_initialise(struct domain *d)
 {
     static const struct arch_csw csw = {
@@ -2846,6 +2982,7 @@  const struct hvm_function_table * __init start_vmx(void)
         vmx_function_table.tsc_scaling.setup = vmx_setup_tsc_scaling;
     }
 
+    model_specific_lbr = get_model_specific_lbr();
     lbr_tsx_fixup_check();
     ler_to_fixup_check();
 
@@ -2992,141 +3129,6 @@  static int vmx_cr_access(cr_access_qual_t qual)
     return X86EMUL_OKAY;
 }
 
-static const struct lbr_info {
-    u32 base, count;
-} p4_lbr[] = {
-    { MSR_P4_LER_FROM_LIP,          1 },
-    { MSR_P4_LER_TO_LIP,            1 },
-    { MSR_P4_LASTBRANCH_TOS,        1 },
-    { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
-    { MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
-    { 0, 0 }
-}, c2_lbr[] = {
-    { MSR_IA32_LASTINTFROMIP,       1 },
-    { MSR_IA32_LASTINTTOIP,         1 },
-    { MSR_C2_LASTBRANCH_TOS,        1 },
-    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_C2_LASTBRANCH_FROM_TO },
-    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_C2_LASTBRANCH_FROM_TO },
-    { 0, 0 }
-}, nh_lbr[] = {
-    { MSR_IA32_LASTINTFROMIP,       1 },
-    { MSR_IA32_LASTINTTOIP,         1 },
-    { MSR_NHL_LBR_SELECT,           1 },
-    { MSR_NHL_LASTBRANCH_TOS,       1 },
-    { MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
-    { MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
-    { 0, 0 }
-}, sk_lbr[] = {
-    { MSR_IA32_LASTINTFROMIP,       1 },
-    { MSR_IA32_LASTINTTOIP,         1 },
-    { MSR_NHL_LBR_SELECT,           1 },
-    { MSR_NHL_LASTBRANCH_TOS,       1 },
-    { MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH },
-    { MSR_SKL_LASTBRANCH_0_TO_IP,   NUM_MSR_SKL_LASTBRANCH },
-    { MSR_SKL_LASTBRANCH_0_INFO,    NUM_MSR_SKL_LASTBRANCH },
-    { 0, 0 }
-}, at_lbr[] = {
-    { MSR_IA32_LASTINTFROMIP,       1 },
-    { MSR_IA32_LASTINTTOIP,         1 },
-    { MSR_C2_LASTBRANCH_TOS,        1 },
-    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
-    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
-    { 0, 0 }
-}, sm_lbr[] = {
-    { MSR_IA32_LASTINTFROMIP,       1 },
-    { MSR_IA32_LASTINTTOIP,         1 },
-    { MSR_SM_LBR_SELECT,            1 },
-    { MSR_SM_LASTBRANCH_TOS,        1 },
-    { MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
-    { MSR_C2_LASTBRANCH_0_TO_IP,    NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
-    { 0, 0 }
-}, gm_lbr[] = {
-    { MSR_IA32_LASTINTFROMIP,       1 },
-    { MSR_IA32_LASTINTTOIP,         1 },
-    { MSR_SM_LBR_SELECT,            1 },
-    { MSR_SM_LASTBRANCH_TOS,        1 },
-    { MSR_GM_LASTBRANCH_0_FROM_IP,  NUM_MSR_GM_LASTBRANCH_FROM_TO },
-    { MSR_GM_LASTBRANCH_0_TO_IP,    NUM_MSR_GM_LASTBRANCH_FROM_TO },
-    { 0, 0 }
-};
-
-static const struct lbr_info *last_branch_msr_get(void)
-{
-    switch ( boot_cpu_data.x86 )
-    {
-    case 6:
-        switch ( boot_cpu_data.x86_model )
-        {
-        /* Core2 Duo */
-        case 0x0f:
-        /* Enhanced Core */
-        case 0x17:
-        /* Xeon 7400 */
-        case 0x1d:
-            return c2_lbr;
-        /* Nehalem */
-        case 0x1a: case 0x1e: case 0x1f: case 0x2e:
-        /* Westmere */
-        case 0x25: case 0x2c: case 0x2f:
-        /* Sandy Bridge */
-        case 0x2a: case 0x2d:
-        /* Ivy Bridge */
-        case 0x3a: case 0x3e:
-        /* Haswell */
-        case 0x3c: case 0x3f: case 0x45: case 0x46:
-        /* Broadwell */
-        case 0x3d: case 0x47: case 0x4f: case 0x56:
-            return nh_lbr;
-        /* Skylake */
-        case 0x4e: case 0x5e:
-        /* Xeon Scalable */
-        case 0x55:
-        /* Cannon Lake */
-        case 0x66:
-        /* Goldmont Plus */
-        case 0x7a:
-        /* Ice Lake */
-        case 0x6a: case 0x6c: case 0x7d: case 0x7e:
-        /* Tiger Lake */
-        case 0x8c: case 0x8d:
-        /* Tremont */
-        case 0x86:
-        /* Kaby Lake */
-        case 0x8e: case 0x9e:
-        /* Comet Lake */
-        case 0xa5: case 0xa6:
-            return sk_lbr;
-        /* Atom */
-        case 0x1c: case 0x26: case 0x27: case 0x35: case 0x36:
-            return at_lbr;
-        /* Silvermont */
-        case 0x37: case 0x4a: case 0x4d: case 0x5a: case 0x5d:
-        /* Xeon Phi Knights Landing */
-        case 0x57:
-        /* Xeon Phi Knights Mill */
-        case 0x85:
-        /* Airmont */
-        case 0x4c:
-            return sm_lbr;
-        /* Goldmont */
-        case 0x5c: case 0x5f:
-            return gm_lbr;
-        }
-        break;
-
-    case 15:
-        switch ( boot_cpu_data.x86_model )
-        {
-        /* Pentium4/Xeon with em64t */
-        case 3: case 4: case 6:
-            return p4_lbr;
-        }
-        break;
-    }
-
-    return NULL;
-}
-
 enum
 {
     LBR_FORMAT_32                 = 0x0, /* 32-bit record format */
@@ -3233,7 +3235,7 @@  static void __init ler_to_fixup_check(void)
 
 static int is_last_branch_msr(u32 ecx)
 {
-    const struct lbr_info *lbr = last_branch_msr_get();
+    const struct lbr_info *lbr = model_specific_lbr;
 
     if ( lbr == NULL )
         return 0;
@@ -3572,7 +3574,7 @@  static int cf_check vmx_msr_write_intercept(
         if ( !(v->arch.hvm.vmx.lbr_flags & LBR_MSRS_INSERTED) &&
              (msr_content & IA32_DEBUGCTLMSR_LBR) )
         {
-            const struct lbr_info *lbr = last_branch_msr_get();
+            const struct lbr_info *lbr = model_specific_lbr;
 
             if ( unlikely(!lbr) )
             {