diff mbox

[v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations

Message ID 59637569020000780016A34F@prv-mh.provo.novell.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jan Beulich July 10, 2017, 10:39 a.m. UTC
Real hardware wraps silently in most cases, so we should behave the
same. Also split real and VM86 mode handling, as the latter really
ought to have limit checks applied.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Restore 32-bit wrap check for AMD.
v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
    is_canonical_address() invocation.
x86/HVM: don't #GP/#SS on wrapping virt->linear translations

Real hardware wraps silently in most cases, so we should behave the
same. Also split real and VM86 mode handling, as the latter really
ought to have limit checks applied.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Restore 32-bit wrap check for AMD.
v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
    is_canonical_address() invocation.

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2416,16 +2416,21 @@ bool_t hvm_virtual_to_linear_addr(
      */
     ASSERT(seg < x86_seg_none);
 
-    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
-         (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
+    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
     {
         /*
-         * REAL/VM86 MODE: Don't bother with segment access checks.
+         * REAL MODE: Don't bother with segment access checks.
          * Certain of them are not done in native real mode anyway.
          */
         addr = (uint32_t)(addr + reg->base);
-        last_byte = (uint32_t)addr + bytes - !!bytes;
-        if ( last_byte < addr )
+    }
+    else if ( (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) &&
+              is_x86_user_segment(seg) )
+    {
+        /* VM86 MODE: Fixed 64k limits on all user segments. */
+        addr = (uint32_t)(addr + reg->base);
+        last_byte = (uint32_t)offset + bytes - !!bytes;
+        if ( max(offset, last_byte) >> 16 )
             goto out;
     }
     else if ( hvm_long_mode_active(curr) &&
@@ -2447,8 +2452,7 @@ bool_t hvm_virtual_to_linear_addr(
             addr += reg->base;
 
         last_byte = addr + bytes - !!bytes;
-        if ( !is_canonical_address(addr) || last_byte < addr ||
-             !is_canonical_address(last_byte) )
+        if ( !is_canonical_address((long)addr < 0 ? addr : last_byte) )
             goto out;
     }
     else
@@ -2498,8 +2502,11 @@ bool_t hvm_virtual_to_linear_addr(
             if ( (offset <= reg->limit) || (last_byte < offset) )
                 goto out;
         }
-        else if ( (last_byte > reg->limit) || (last_byte < offset) )
-            goto out; /* last byte is beyond limit or wraps 0xFFFFFFFF */
+        else if ( last_byte > reg->limit )
+            goto out; /* last byte is beyond limit */
+        else if ( last_byte < offset &&
+                  curr->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
+            goto out; /* access wraps */
     }
 
     /* All checks ok. */

Comments

Jan Beulich Aug. 10, 2017, 7:19 a.m. UTC | #1
>>> On 10.07.17 at 12:39, <JBeulich@suse.com> wrote:
> Real hardware wraps silently in most cases, so we should behave the
> same. Also split real and VM86 mode handling, as the latter really
> ought to have limit checks applied.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v3: Restore 32-bit wrap check for AMD.
> v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
>     is_canonical_address() invocation.
> 
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2416,16 +2416,21 @@ bool_t hvm_virtual_to_linear_addr(
>       */
>      ASSERT(seg < x86_seg_none);
>  
> -    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
> -         (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
> +    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
>      {
>          /*
> -         * REAL/VM86 MODE: Don't bother with segment access checks.
> +         * REAL MODE: Don't bother with segment access checks.
>           * Certain of them are not done in native real mode anyway.
>           */
>          addr = (uint32_t)(addr + reg->base);
> -        last_byte = (uint32_t)addr + bytes - !!bytes;
> -        if ( last_byte < addr )
> +    }
> +    else if ( (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) &&
> +              is_x86_user_segment(seg) )
> +    {
> +        /* VM86 MODE: Fixed 64k limits on all user segments. */
> +        addr = (uint32_t)(addr + reg->base);
> +        last_byte = (uint32_t)offset + bytes - !!bytes;
> +        if ( max(offset, last_byte) >> 16 )
>              goto out;
>      }
>      else if ( hvm_long_mode_active(curr) &&
> @@ -2447,8 +2452,7 @@ bool_t hvm_virtual_to_linear_addr(
>              addr += reg->base;
>  
>          last_byte = addr + bytes - !!bytes;
> -        if ( !is_canonical_address(addr) || last_byte < addr ||
> -             !is_canonical_address(last_byte) )
> +        if ( !is_canonical_address((long)addr < 0 ? addr : last_byte) )
>              goto out;
>      }
>      else
> @@ -2498,8 +2502,11 @@ bool_t hvm_virtual_to_linear_addr(
>              if ( (offset <= reg->limit) || (last_byte < offset) )
>                  goto out;
>          }
> -        else if ( (last_byte > reg->limit) || (last_byte < offset) )
> -            goto out; /* last byte is beyond limit or wraps 0xFFFFFFFF */
> +        else if ( last_byte > reg->limit )
> +            goto out; /* last byte is beyond limit */
> +        else if ( last_byte < offset &&
> +                  curr->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
> +            goto out; /* access wraps */
>      }
>  
>      /* All checks ok. */
Jan Beulich Aug. 25, 2017, 2:59 p.m. UTC | #2
>>> On 10.08.17 at 09:19, <JBeulich@suse.com> wrote:
>>>> On 10.07.17 at 12:39, <JBeulich@suse.com> wrote:
>> Real hardware wraps silently in most cases, so we should behave the
>> same. Also split real and VM86 mode handling, as the latter really
>> ought to have limit checks applied.
>> 
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> v3: Restore 32-bit wrap check for AMD.
>> v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
>>     is_canonical_address() invocation.
>> 
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -2416,16 +2416,21 @@ bool_t hvm_virtual_to_linear_addr(
>>       */
>>      ASSERT(seg < x86_seg_none);
>>  
>> -    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
>> -         (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
>> +    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
>>      {
>>          /*
>> -         * REAL/VM86 MODE: Don't bother with segment access checks.
>> +         * REAL MODE: Don't bother with segment access checks.
>>           * Certain of them are not done in native real mode anyway.
>>           */
>>          addr = (uint32_t)(addr + reg->base);
>> -        last_byte = (uint32_t)addr + bytes - !!bytes;
>> -        if ( last_byte < addr )
>> +    }
>> +    else if ( (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) &&
>> +              is_x86_user_segment(seg) )
>> +    {
>> +        /* VM86 MODE: Fixed 64k limits on all user segments. */
>> +        addr = (uint32_t)(addr + reg->base);
>> +        last_byte = (uint32_t)offset + bytes - !!bytes;
>> +        if ( max(offset, last_byte) >> 16 )
>>              goto out;
>>      }
>>      else if ( hvm_long_mode_active(curr) &&
>> @@ -2447,8 +2452,7 @@ bool_t hvm_virtual_to_linear_addr(
>>              addr += reg->base;
>>  
>>          last_byte = addr + bytes - !!bytes;
>> -        if ( !is_canonical_address(addr) || last_byte < addr ||
>> -             !is_canonical_address(last_byte) )
>> +        if ( !is_canonical_address((long)addr < 0 ? addr : last_byte) )
>>              goto out;
>>      }
>>      else
>> @@ -2498,8 +2502,11 @@ bool_t hvm_virtual_to_linear_addr(
>>              if ( (offset <= reg->limit) || (last_byte < offset) )
>>                  goto out;
>>          }
>> -        else if ( (last_byte > reg->limit) || (last_byte < offset) )
>> -            goto out; /* last byte is beyond limit or wraps 0xFFFFFFFF */
>> +        else if ( last_byte > reg->limit )
>> +            goto out; /* last byte is beyond limit */
>> +        else if ( last_byte < offset &&
>> +                  curr->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
>> +            goto out; /* access wraps */
>>      }
>>  
>>      /* All checks ok. */
Andrew Cooper Sept. 5, 2017, 12:26 p.m. UTC | #3
On 10/07/17 11:39, Jan Beulich wrote:
> Real hardware wraps silently in most cases, so we should behave the
> same. Also split real and VM86 mode handling, as the latter really
> ought to have limit checks applied.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

The change looks ok, but this is a subtle adjustment with a lot of
changes in boundary cases.

ISTR you had an XTF test for some of these?  I'd feel rather more
confident if we could get that into automation.

~Andrew
Jan Beulich Sept. 5, 2017, 1:30 p.m. UTC | #4
>>> On 05.09.17 at 14:26, <andrew.cooper3@citrix.com> wrote:
> On 10/07/17 11:39, Jan Beulich wrote:
>> Real hardware wraps silently in most cases, so we should behave the
>> same. Also split real and VM86 mode handling, as the latter really
>> ought to have limit checks applied.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> The change looks ok, but this is a subtle adjustment with a lot of
> changes in boundary cases.
> 
> ISTR you had an XTF test for some of these?  I'd feel rather more
> confident if we could get that into automation.

Yes, that was a test you had handed to me, which I then extended
and handed back to you ("Compatibility mode LLDT/LTR testing"). I
still have it, but still in the raw shape it was in back then (i.e. unlikely
to be ready to go in).

Jan
diff mbox

Patch

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2416,16 +2416,21 @@  bool_t hvm_virtual_to_linear_addr(
      */
     ASSERT(seg < x86_seg_none);
 
-    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
-         (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
+    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
     {
         /*
-         * REAL/VM86 MODE: Don't bother with segment access checks.
+         * REAL MODE: Don't bother with segment access checks.
          * Certain of them are not done in native real mode anyway.
          */
         addr = (uint32_t)(addr + reg->base);
-        last_byte = (uint32_t)addr + bytes - !!bytes;
-        if ( last_byte < addr )
+    }
+    else if ( (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) &&
+              is_x86_user_segment(seg) )
+    {
+        /* VM86 MODE: Fixed 64k limits on all user segments. */
+        addr = (uint32_t)(addr + reg->base);
+        last_byte = (uint32_t)offset + bytes - !!bytes;
+        if ( max(offset, last_byte) >> 16 )
             goto out;
     }
     else if ( hvm_long_mode_active(curr) &&
@@ -2447,8 +2452,7 @@  bool_t hvm_virtual_to_linear_addr(
             addr += reg->base;
 
         last_byte = addr + bytes - !!bytes;
-        if ( !is_canonical_address(addr) || last_byte < addr ||
-             !is_canonical_address(last_byte) )
+        if ( !is_canonical_address((long)addr < 0 ? addr : last_byte) )
             goto out;
     }
     else
@@ -2498,8 +2502,11 @@  bool_t hvm_virtual_to_linear_addr(
             if ( (offset <= reg->limit) || (last_byte < offset) )
                 goto out;
         }
-        else if ( (last_byte > reg->limit) || (last_byte < offset) )
-            goto out; /* last byte is beyond limit or wraps 0xFFFFFFFF */
+        else if ( last_byte > reg->limit )
+            goto out; /* last byte is beyond limit */
+        else if ( last_byte < offset &&
+                  curr->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
+            goto out; /* access wraps */
     }
 
     /* All checks ok. */