diff mbox

[v2,1/4] x86/vmx: introduce vmwrite_safe()

Message ID 20170206145747.13885-2-sergey.dyasli@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sergey Dyasli Feb. 6, 2017, 2:57 p.m. UTC
Any fail during the original __vmwrite() leads to BUG() which can be
easily exploited from a guest in the nested vmx mode.

The new function returns error code depending on the outcome:

          VMsucceed: 0
        VMfailValid: VM Instruction Error Number
      VMfailInvalid: a new VMX_INSN_FAIL_INVALID

A new macro GAS_VMX_OP is introduced in order to improve the
readability of asm.  Existing ASM_FLAG_OUT macro is reused and copied
into asm_defns.h

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
 xen/arch/x86/hvm/vmx/vvmx.c        |  3 ++-
 xen/include/asm-x86/asm_defns.h    |  6 ++++++
 xen/include/asm-x86/hvm/vmx/vmcs.h |  1 +
 xen/include/asm-x86/hvm/vmx/vmx.h  | 29 +++++++++++++++++++++++++++++
 4 files changed, 38 insertions(+), 1 deletion(-)

Comments

Tian, Kevin Feb. 7, 2017, 6:31 a.m. UTC | #1
> From: Sergey Dyasli [mailto:sergey.dyasli@citrix.com]
> Sent: Monday, February 06, 2017 10:58 PM
> 
> Any fail during the original __vmwrite() leads to BUG() which can be
> easily exploited from a guest in the nested vmx mode.
> 
> The new function returns error code depending on the outcome:
> 
>           VMsucceed: 0
>         VMfailValid: VM Instruction Error Number
>       VMfailInvalid: a new VMX_INSN_FAIL_INVALID
> 
> A new macro GAS_VMX_OP is introduced in order to improve the
> readability of asm.  Existing ASM_FLAG_OUT macro is reused and copied
> into asm_defns.h
> 
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>

Acked-by: Kevin Tian <kevin.tian@intel.com>
Jan Beulich Feb. 7, 2017, 11:09 a.m. UTC | #2
>>> On 06.02.17 at 15:57, <sergey.dyasli@citrix.com> wrote:
> Any fail during the original __vmwrite() leads to BUG() which can be
> easily exploited from a guest in the nested vmx mode.
> 
> The new function returns error code depending on the outcome:
> 
>           VMsucceed: 0
>         VMfailValid: VM Instruction Error Number
>       VMfailInvalid: a new VMX_INSN_FAIL_INVALID
> 
> A new macro GAS_VMX_OP is introduced in order to improve the
> readability of asm.  Existing ASM_FLAG_OUT macro is reused and copied
> into asm_defns.h
> 
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
> ---

Please can you have the revision info for the individual patches
here. I know you've put it in the overview mail, but for reviewers
it's far more useful to (also) be here.

> --- a/xen/include/asm-x86/hvm/vmx/vmcs.h
> +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
> @@ -526,6 +526,7 @@ enum vmx_insn_errno
>      VMX_INSN_VMPTRLD_INVALID_PHYADDR       = 9,
>      VMX_INSN_UNSUPPORTED_VMCS_COMPONENT    = 12,
>      VMX_INSN_VMXON_IN_VMX_ROOT             = 15,
> +    VMX_INSN_FAIL_INVALID                  = ~0,
>  };

The main reason for me to ask for the type change here was to ...

> @@ -423,6 +429,29 @@ static inline bool_t __vmread_safe(unsigned long field, unsigned long *value)
>      return okay;
>  }
>  
> +static always_inline unsigned long vmwrite_safe(unsigned long field,
> +                                                unsigned long value)
> +{
> +    unsigned long ret = 0;
> +    bool fail_invalid, fail_valid;
> +
> +    asm volatile ( GAS_VMX_OP("vmwrite %[value], %[field]\n\t",
> +                              VMWRITE_OPCODE MODRM_EAX_ECX)
> +                   ASM_FLAG_OUT(, "setc %[invalid]\n\t")
> +                   ASM_FLAG_OUT(, "setz %[valid]\n\t")
> +                   : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
> +                     ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid)
> +                   : [field] GAS_VMX_OP("r", "a") (field),
> +                     [value] GAS_VMX_OP("rm", "c") (value));
> +
> +    if ( unlikely(fail_invalid) )
> +        ret = VMX_INSN_FAIL_INVALID;
> +    else if ( unlikely(fail_valid) )
> +        __vmread(VM_INSTRUCTION_ERROR, &ret);
> +
> +    return ret;
> +}

... allow the function to return enum vmx_insn_errno, and that
to not be a 64-bit quantity. As you're presumably aware, dealing
with 32-bit quantities is on the average slightly more efficient than
dealing with 64-bit ones. The code above should imo still BUG() if
the value read from VM_INSTRUCTION_ERROR doesn't fit in 32
bits (as it's a 32-bit field only anyway).

Also, looking at the entire asm() above another time, wouldn't it
end up a bit less convoluted if you simply used CMOVC for the
"invalid" code path? Similarly I wonder whether the second
VMREAD wouldn't better be moved into the asm(), utilizing the
UNLIKELY_START() et al constructs to get that code path
entirely out of the main path. These aren't requirements though,
just aspects to think about.

Jan
Andrew Cooper Feb. 7, 2017, 11:59 a.m. UTC | #3
On 07/02/17 11:09, Jan Beulich wrote:
>
>> @@ -423,6 +429,29 @@ static inline bool_t __vmread_safe(unsigned long field, unsigned long *value)
>>      return okay;
>>  }
>>  
>> +static always_inline unsigned long vmwrite_safe(unsigned long field,
>> +                                                unsigned long value)
>> +{
>> +    unsigned long ret = 0;
>> +    bool fail_invalid, fail_valid;
>> +
>> +    asm volatile ( GAS_VMX_OP("vmwrite %[value], %[field]\n\t",
>> +                              VMWRITE_OPCODE MODRM_EAX_ECX)
>> +                   ASM_FLAG_OUT(, "setc %[invalid]\n\t")
>> +                   ASM_FLAG_OUT(, "setz %[valid]\n\t")
>> +                   : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
>> +                     ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid)
>> +                   : [field] GAS_VMX_OP("r", "a") (field),
>> +                     [value] GAS_VMX_OP("rm", "c") (value));
>> +
>> +    if ( unlikely(fail_invalid) )
>> +        ret = VMX_INSN_FAIL_INVALID;
>> +    else if ( unlikely(fail_valid) )
>> +        __vmread(VM_INSTRUCTION_ERROR, &ret);
>> +
>> +    return ret;
>> +}
> ... allow the function to return enum vmx_insn_errno, and that
> to not be a 64-bit quantity. As you're presumably aware, dealing
> with 32-bit quantities is on the average slightly more efficient than
> dealing with 64-bit ones. The code above should imo still BUG() if
> the value read from VM_INSTRUCTION_ERROR doesn't fit in 32
> bits (as it's a 32-bit field only anyway).
>
> Also, looking at the entire asm() above another time, wouldn't it
> end up a bit less convoluted if you simply used CMOVC for the
> "invalid" code path? Similarly I wonder whether the second
> VMREAD wouldn't better be moved into the asm(), utilizing the
> UNLIKELY_START() et al constructs to get that code path
> entirely out of the main path. These aren't requirements though,
> just aspects to think about.

Embedding two vm*** instruction is substantially harder in the non
GAS_VMX_OP() case.  It either involves manual register scheduling, or a
separate ModRM and different explicit register fields.

As for extra logic, I have some further plans which would allow the
compiler to elide the __vmread() on some paths, which it can only for
logic exposed in C.  From this point of view, the less code in the asm
block, the better.

~Andrew
Jan Beulich Feb. 7, 2017, 1:18 p.m. UTC | #4
>>> On 07.02.17 at 12:59, <andrew.cooper3@citrix.com> wrote:
> On 07/02/17 11:09, Jan Beulich wrote:
>>
>>> @@ -423,6 +429,29 @@ static inline bool_t __vmread_safe(unsigned long field, 
> unsigned long *value)
>>>      return okay;
>>>  }
>>>  
>>> +static always_inline unsigned long vmwrite_safe(unsigned long field,
>>> +                                                unsigned long value)
>>> +{
>>> +    unsigned long ret = 0;
>>> +    bool fail_invalid, fail_valid;
>>> +
>>> +    asm volatile ( GAS_VMX_OP("vmwrite %[value], %[field]\n\t",
>>> +                              VMWRITE_OPCODE MODRM_EAX_ECX)
>>> +                   ASM_FLAG_OUT(, "setc %[invalid]\n\t")
>>> +                   ASM_FLAG_OUT(, "setz %[valid]\n\t")
>>> +                   : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
>>> +                     ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid)
>>> +                   : [field] GAS_VMX_OP("r", "a") (field),
>>> +                     [value] GAS_VMX_OP("rm", "c") (value));
>>> +
>>> +    if ( unlikely(fail_invalid) )
>>> +        ret = VMX_INSN_FAIL_INVALID;
>>> +    else if ( unlikely(fail_valid) )
>>> +        __vmread(VM_INSTRUCTION_ERROR, &ret);
>>> +
>>> +    return ret;
>>> +}
>> ... allow the function to return enum vmx_insn_errno, and that
>> to not be a 64-bit quantity. As you're presumably aware, dealing
>> with 32-bit quantities is on the average slightly more efficient than
>> dealing with 64-bit ones. The code above should imo still BUG() if
>> the value read from VM_INSTRUCTION_ERROR doesn't fit in 32
>> bits (as it's a 32-bit field only anyway).
>>
>> Also, looking at the entire asm() above another time, wouldn't it
>> end up a bit less convoluted if you simply used CMOVC for the
>> "invalid" code path? Similarly I wonder whether the second
>> VMREAD wouldn't better be moved into the asm(), utilizing the
>> UNLIKELY_START() et al constructs to get that code path
>> entirely out of the main path. These aren't requirements though,
>> just aspects to think about.
> 
> Embedding two vm*** instruction is substantially harder in the non
> GAS_VMX_OP() case.  It either involves manual register scheduling, or a
> separate ModRM and different explicit register fields.

Ah, right, that wouldn't be very nice indeed.

> As for extra logic, I have some further plans which would allow the
> compiler to elide the __vmread() on some paths, which it can only for
> logic exposed in C.  From this point of view, the less code in the asm
> block, the better.

Well, okay then.

Jan
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 9caebe5..105a3c0 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -483,7 +483,8 @@  static void vmfail_invalid(struct cpu_user_regs *regs)
 
 static void vmfail(struct cpu_user_regs *regs, enum vmx_insn_errno errno)
 {
-    if ( vcpu_nestedhvm(current).nv_vvmcxaddr != INVALID_PADDR )
+    if ( vcpu_nestedhvm(current).nv_vvmcxaddr != INVALID_PADDR &&
+         errno != VMX_INSN_FAIL_INVALID )
         vmfail_valid(regs, errno);
     else
         vmfail_invalid(regs);
diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index f1c6fa1..220ae2e 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -413,4 +413,10 @@  static always_inline void stac(void)
 #define REX64_PREFIX "rex64/"
 #endif
 
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+# define ASM_FLAG_OUT(yes, no) yes
+#else
+# define ASM_FLAG_OUT(yes, no) no
+#endif
+
 #endif /* __X86_ASM_DEFNS_H__ */
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index d71de04..8d43efd 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -526,6 +526,7 @@  enum vmx_insn_errno
     VMX_INSN_VMPTRLD_INVALID_PHYADDR       = 9,
     VMX_INSN_UNSUPPORTED_VMCS_COMPONENT    = 12,
     VMX_INSN_VMXON_IN_VMX_ROOT             = 15,
+    VMX_INSN_FAIL_INVALID                  = ~0,
 };
 
 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 4bf4d50..d40d6a5 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -306,6 +306,12 @@  extern uint8_t posted_intr_vector;
 #define INVVPID_ALL_CONTEXT                     2
 #define INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 3
 
+#ifdef HAVE_GAS_VMX
+# define GAS_VMX_OP(yes, no) yes
+#else
+# define GAS_VMX_OP(yes, no) no
+#endif
+
 static always_inline void __vmptrld(u64 addr)
 {
     asm volatile (
@@ -423,6 +429,29 @@  static inline bool_t __vmread_safe(unsigned long field, unsigned long *value)
     return okay;
 }
 
+static always_inline unsigned long vmwrite_safe(unsigned long field,
+                                                unsigned long value)
+{
+    unsigned long ret = 0;
+    bool fail_invalid, fail_valid;
+
+    asm volatile ( GAS_VMX_OP("vmwrite %[value], %[field]\n\t",
+                              VMWRITE_OPCODE MODRM_EAX_ECX)
+                   ASM_FLAG_OUT(, "setc %[invalid]\n\t")
+                   ASM_FLAG_OUT(, "setz %[valid]\n\t")
+                   : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
+                     ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid)
+                   : [field] GAS_VMX_OP("r", "a") (field),
+                     [value] GAS_VMX_OP("rm", "c") (value));
+
+    if ( unlikely(fail_invalid) )
+        ret = VMX_INSN_FAIL_INVALID;
+    else if ( unlikely(fail_valid) )
+        __vmread(VM_INSTRUCTION_ERROR, &ret);
+
+    return ret;
+}
+
 static always_inline void __invept(unsigned long type, u64 eptp, u64 gpa)
 {
     struct {