[2/2] x86/vtx: Fixes to Haswell/Broadwell LBR TSX errata
diff mbox series

Message ID 20191028150152.21179-3-andrew.cooper3@citrix.com
State New
Headers show
Series
  • x86/vtx: Corrections to errata workarounds
Related show

Commit Message

Andrew Cooper Oct. 28, 2019, 3:01 p.m. UTC
Cross reference and list each errata, now that they are published.

These errata are specific to Haswell/Broadwell.  They should have model and
vendor checks, as Intel isn't the only vendor to implement VT-x.

All affected models use the same MSR indicies, so these can be hard coded
rather than looking up and storing constant values.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wl@xen.org>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
CC: Juergen Gross <jgross@suse.com>
---
 xen/arch/x86/hvm/vmx/vmx.c | 63 +++++++++++++++++++++++++---------------------
 1 file changed, 35 insertions(+), 28 deletions(-)

Comments

Jan Beulich Oct. 29, 2019, 3:32 p.m. UTC | #1
On 28.10.2019 16:01, Andrew Cooper wrote:
> Cross reference and list each errata, now that they are published.

Shouldn't this be "all errata" or "each erratum"?

> @@ -2727,40 +2719,50 @@ enum
>  
>  static bool __read_mostly lbr_tsx_fixup_needed;
>  static bool __read_mostly bdf93_fixup_needed;
> -static uint32_t __read_mostly lbr_from_start;
> -static uint32_t __read_mostly lbr_from_end;
> -static uint32_t __read_mostly lbr_lastint_from;
>  
>  static void __init lbr_tsx_fixup_check(void)
>  {
> -    bool tsx_support = cpu_has_hle || cpu_has_rtm;
>      uint64_t caps;
>      uint32_t lbr_format;
>  
> -    /* Fixup is needed only when TSX support is disabled ... */
> -    if ( tsx_support )
> +    /*
> +     * HSM182, HSD172, HSE117, BDM127, BDD117, BDF85, BDE105:
> +     *
> +     * On processors that do not support Intel Transactional Synchronization
> +     * Extensions (Intel TSX) (CPUID.07H.EBX bits 4 and 11 are both zero),
> +     * writes to MSR_LASTBRANCH_x_FROM_IP (MSR 680H to 68FH) may #GP unless
> +     * bits[62:61] are equal to bit[47].
> +     *
> +     * Software should sign the MSRs.

Missing "extend"?

> +     * Experimentally, MSR_LER_FROM_LIP (1DDH) is similarly impacted, so is
> +     * fixed up as well.
> +     */
> +    if ( cpu_has_hle || cpu_has_rtm ||
> +         boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
> +         boot_cpu_data.x86 != 6 ||
> +         (boot_cpu_data.x86_model != 0x3c && /* HSM182, HSD172 - 4th gen Core */
> +          boot_cpu_data.x86_model != 0x3f && /* HSE117 - Xeon E5 v3 */
> +          boot_cpu_data.x86_model != 0x45 && /* HSM182 - 4th gen Core */
> +          boot_cpu_data.x86_model != 0x46 && /* HSM182, HSD172 - 4th gen Core (GT3) */
> +          boot_cpu_data.x86_model != 0x3d && /* BDM127 - 5th gen Core */
> +          boot_cpu_data.x86_model != 0x47 && /* BDD117 - 5th gen Core (GT3) */
> +          boot_cpu_data.x86_model != 0x4f && /* BDF85  - Xeon E5-2600 v4 */
> +          boot_cpu_data.x86_model != 0x56) ) /* BDE105 - Xeon D-1500 */

Perhaps easier as switch(), as we do elsewhere?

> @@ -4133,8 +4135,12 @@ static void lbr_tsx_fixup(void)
>      struct vmx_msr_entry *msr_area = curr->arch.hvm.vmx.msr_area;
>      struct vmx_msr_entry *msr;
>  
> -    if ( (msr = vmx_find_msr(curr, lbr_from_start, VMX_MSR_GUEST)) != NULL )
> +    if ( (msr = vmx_find_msr(curr, MSR_P4_LASTBRANCH_0_FROM_LIP,
> +                             VMX_MSR_GUEST)) != NULL )
>      {
> +        unsigned int lbr_from_end =
> +            MSR_P4_LASTBRANCH_0_FROM_LIP + NUM_MSR_P4_LASTBRANCH_FROM_TO;

const?

With these largely cosmetic remarks taken care of as you see fit,
Reviewed-by: Jan Beulich <jbeulich@suse.com>

Jan
Andrew Cooper Oct. 29, 2019, 3:45 p.m. UTC | #2
On 29/10/2019 15:32, Jan Beulich wrote:
> On 28.10.2019 16:01, Andrew Cooper wrote:
>> Cross reference and list each errata, now that they are published.
> Shouldn't this be "all errata" or "each erratum"?

Probably.

>
>> @@ -2727,40 +2719,50 @@ enum
>>  
>>  static bool __read_mostly lbr_tsx_fixup_needed;
>>  static bool __read_mostly bdf93_fixup_needed;
>> -static uint32_t __read_mostly lbr_from_start;
>> -static uint32_t __read_mostly lbr_from_end;
>> -static uint32_t __read_mostly lbr_lastint_from;
>>  
>>  static void __init lbr_tsx_fixup_check(void)
>>  {
>> -    bool tsx_support = cpu_has_hle || cpu_has_rtm;
>>      uint64_t caps;
>>      uint32_t lbr_format;
>>  
>> -    /* Fixup is needed only when TSX support is disabled ... */
>> -    if ( tsx_support )
>> +    /*
>> +     * HSM182, HSD172, HSE117, BDM127, BDD117, BDF85, BDE105:
>> +     *
>> +     * On processors that do not support Intel Transactional Synchronization
>> +     * Extensions (Intel TSX) (CPUID.07H.EBX bits 4 and 11 are both zero),
>> +     * writes to MSR_LASTBRANCH_x_FROM_IP (MSR 680H to 68FH) may #GP unless
>> +     * bits[62:61] are equal to bit[47].
>> +     *
>> +     * Software should sign the MSRs.
> Missing "extend"?

Yes.

>
>> +     * Experimentally, MSR_LER_FROM_LIP (1DDH) is similarly impacted, so is
>> +     * fixed up as well.
>> +     */
>> +    if ( cpu_has_hle || cpu_has_rtm ||
>> +         boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
>> +         boot_cpu_data.x86 != 6 ||
>> +         (boot_cpu_data.x86_model != 0x3c && /* HSM182, HSD172 - 4th gen Core */
>> +          boot_cpu_data.x86_model != 0x3f && /* HSE117 - Xeon E5 v3 */
>> +          boot_cpu_data.x86_model != 0x45 && /* HSM182 - 4th gen Core */
>> +          boot_cpu_data.x86_model != 0x46 && /* HSM182, HSD172 - 4th gen Core (GT3) */
>> +          boot_cpu_data.x86_model != 0x3d && /* BDM127 - 5th gen Core */
>> +          boot_cpu_data.x86_model != 0x47 && /* BDD117 - 5th gen Core (GT3) */
>> +          boot_cpu_data.x86_model != 0x4f && /* BDF85  - Xeon E5-2600 v4 */
>> +          boot_cpu_data.x86_model != 0x56) ) /* BDE105 - Xeon D-1500 */
> Perhaps easier as switch(), as we do elsewhere?

So, I can, but it makes the logic a little awkward.  (Although less so
in this version where I moved the HLE/RTM check to the beginning).

I was pleased however to find that GCC did compile this to "subtract 3c,
upper bounds check against 1a, bit test against an immediate".

I'll see what I can do about rearranging this into a switch statement,
because we will need the horizontal space when switching to use
intel-family.h

>
>> @@ -4133,8 +4135,12 @@ static void lbr_tsx_fixup(void)
>>      struct vmx_msr_entry *msr_area = curr->arch.hvm.vmx.msr_area;
>>      struct vmx_msr_entry *msr;
>>  
>> -    if ( (msr = vmx_find_msr(curr, lbr_from_start, VMX_MSR_GUEST)) != NULL )
>> +    if ( (msr = vmx_find_msr(curr, MSR_P4_LASTBRANCH_0_FROM_LIP,
>> +                             VMX_MSR_GUEST)) != NULL )
>>      {
>> +        unsigned int lbr_from_end =
>> +            MSR_P4_LASTBRANCH_0_FROM_LIP + NUM_MSR_P4_LASTBRANCH_FROM_TO;
> const?
>
> With these largely cosmetic remarks taken care of as you see fit,
> Reviewed-by: Jan Beulich <jbeulich@suse.com>

Thanks,

~Andrew
Tian, Kevin Oct. 29, 2019, 6:39 p.m. UTC | #3
> From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
> Sent: Monday, October 28, 2019 11:02 PM
> 
> Cross reference and list each errata, now that they are published.
> 
> These errata are specific to Haswell/Broadwell.  They should have model
> and
> vendor checks, as Intel isn't the only vendor to implement VT-x.
> 
> All affected models use the same MSR indicies, so these can be hard coded
> rather than looking up and storing constant values.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

Patch
diff mbox series

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 32d289ce06..f20ee94f9e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2583,14 +2583,6 @@  static int vmx_cr_access(cr_access_qual_t qual)
     return X86EMUL_OKAY;
 }
 
-/* This defines the layout of struct lbr_info[] */
-#define LBR_LASTINT_FROM_IDX    0
-#define LBR_LASTINT_TO_IDX      1
-#define LBR_LASTBRANCH_TOS_IDX  2
-#define LBR_LASTBRANCH_FROM_IDX 3
-#define LBR_LASTBRANCH_TO_IDX   4
-#define LBR_LASTBRANCH_INFO     5
-
 static const struct lbr_info {
     u32 base, count;
 } p4_lbr[] = {
@@ -2727,40 +2719,50 @@  enum
 
 static bool __read_mostly lbr_tsx_fixup_needed;
 static bool __read_mostly bdf93_fixup_needed;
-static uint32_t __read_mostly lbr_from_start;
-static uint32_t __read_mostly lbr_from_end;
-static uint32_t __read_mostly lbr_lastint_from;
 
 static void __init lbr_tsx_fixup_check(void)
 {
-    bool tsx_support = cpu_has_hle || cpu_has_rtm;
     uint64_t caps;
     uint32_t lbr_format;
 
-    /* Fixup is needed only when TSX support is disabled ... */
-    if ( tsx_support )
+    /*
+     * HSM182, HSD172, HSE117, BDM127, BDD117, BDF85, BDE105:
+     *
+     * On processors that do not support Intel Transactional Synchronization
+     * Extensions (Intel TSX) (CPUID.07H.EBX bits 4 and 11 are both zero),
+     * writes to MSR_LASTBRANCH_x_FROM_IP (MSR 680H to 68FH) may #GP unless
+     * bits[62:61] are equal to bit[47].
+     *
+     * Software should sign the MSRs.
+     *
+     * Experimentally, MSR_LER_FROM_LIP (1DDH) is similarly impacted, so is
+     * fixed up as well.
+     */
+    if ( cpu_has_hle || cpu_has_rtm ||
+         boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+         boot_cpu_data.x86 != 6 ||
+         (boot_cpu_data.x86_model != 0x3c && /* HSM182, HSD172 - 4th gen Core */
+          boot_cpu_data.x86_model != 0x3f && /* HSE117 - Xeon E5 v3 */
+          boot_cpu_data.x86_model != 0x45 && /* HSM182 - 4th gen Core */
+          boot_cpu_data.x86_model != 0x46 && /* HSM182, HSD172 - 4th gen Core (GT3) */
+          boot_cpu_data.x86_model != 0x3d && /* BDM127 - 5th gen Core */
+          boot_cpu_data.x86_model != 0x47 && /* BDD117 - 5th gen Core (GT3) */
+          boot_cpu_data.x86_model != 0x4f && /* BDF85  - Xeon E5-2600 v4 */
+          boot_cpu_data.x86_model != 0x56) ) /* BDE105 - Xeon D-1500 */
         return;
 
+    /*
+     * Fixup is needed only when TSX support is disabled and the address
+     * format of LBR includes TSX bits 61:62
+     */
     if ( !cpu_has_pdcm )
         return;
 
     rdmsrl(MSR_IA32_PERF_CAPABILITIES, caps);
     lbr_format = caps & MSR_IA32_PERF_CAP_LBR_FORMAT;
 
-    /* ... and the address format of LBR includes TSX bits 61:62 */
     if ( lbr_format == LBR_FORMAT_EIP_FLAGS_TSX )
-    {
-        const struct lbr_info *lbr = last_branch_msr_get();
-
-        if ( lbr == NULL )
-            return;
-
-        lbr_lastint_from = lbr[LBR_LASTINT_FROM_IDX].base;
-        lbr_from_start = lbr[LBR_LASTBRANCH_FROM_IDX].base;
-        lbr_from_end = lbr_from_start + lbr[LBR_LASTBRANCH_FROM_IDX].count;
-
         lbr_tsx_fixup_needed = true;
-    }
 }
 
 static void __init bdf93_fixup_check(void)
@@ -4133,8 +4135,12 @@  static void lbr_tsx_fixup(void)
     struct vmx_msr_entry *msr_area = curr->arch.hvm.vmx.msr_area;
     struct vmx_msr_entry *msr;
 
-    if ( (msr = vmx_find_msr(curr, lbr_from_start, VMX_MSR_GUEST)) != NULL )
+    if ( (msr = vmx_find_msr(curr, MSR_P4_LASTBRANCH_0_FROM_LIP,
+                             VMX_MSR_GUEST)) != NULL )
     {
+        unsigned int lbr_from_end =
+            MSR_P4_LASTBRANCH_0_FROM_LIP + NUM_MSR_P4_LASTBRANCH_FROM_TO;
+
         /*
          * Sign extend into bits 61:62 while preserving bit 63
          * The loop relies on the fact that MSR array is sorted.
@@ -4143,7 +4149,8 @@  static void lbr_tsx_fixup(void)
             msr->data |= ((LBR_FROM_SIGNEXT_2MSB & msr->data) << 2);
     }
 
-    if ( (msr = vmx_find_msr(curr, lbr_lastint_from, VMX_MSR_GUEST)) != NULL )
+    if ( (msr = vmx_find_msr(curr, MSR_IA32_LASTINTFROMIP,
+                             VMX_MSR_GUEST)) != NULL )
         msr->data |= ((LBR_FROM_SIGNEXT_2MSB & msr->data) << 2);
 }