diff mbox

[1/3] x86/HVM: restrict emulation in hvm_descriptor_access_intercept()

Message ID 58EFACA10200007800150CF9@prv-mh.provo.novell.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jan Beulich April 13, 2017, 2:51 p.m. UTC
While I did review d0a699a389 ("x86/monitor: add support for descriptor
access events") it didn't really occur to me that somone could be this
blunt and add unguarded emulation again just a few weeks after we
guarded all special purpose emulator invocations. Fix this.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
x86/HVM: restrict emulation in hvm_descriptor_access_intercept()

While I did review d0a699a389 ("x86/monitor: add support for descriptor
access events") it didn't really occur to me that somone could be this
blunt and add unguarded emulation again just a few weeks after we
guarded all special purpose emulator invocations. Fix this.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3598,6 +3598,28 @@ gp_fault:
     return X86EMUL_EXCEPTION;
 }
 
+static bool is_sysdesc_access(const struct x86_emulate_state *state,
+                              const struct x86_emulate_ctxt *ctxt)
+{
+    unsigned int ext;
+    int mode = x86_insn_modrm(state, NULL, &ext);
+
+    switch ( ctxt->opcode )
+    {
+    case X86EMUL_OPC(0x0f, 0x00):
+        if ( !(ext & 4) ) /* SLDT / STR / LLDT / LTR */
+            return true;
+        break;
+
+    case X86EMUL_OPC(0x0f, 0x01):
+        if ( mode != 3 && !(ext & 4) ) /* SGDT / SIDT / LGDT / LIDT */
+            return true;
+        break;
+    }
+
+    return false;
+}
+
 int hvm_descriptor_access_intercept(uint64_t exit_info,
                                     uint64_t vmx_exit_qualification,
                                     unsigned int descriptor, bool is_write)
@@ -3611,24 +3633,8 @@ int hvm_descriptor_access_intercept(uint
         hvm_monitor_descriptor_access(exit_info, vmx_exit_qualification,
                                       descriptor, is_write);
     }
-    else
-    {
-        struct hvm_emulate_ctxt ctxt;
-
-        hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
-        switch ( hvm_emulate_one(&ctxt) )
-        {
-        case X86EMUL_UNHANDLEABLE:
-            domain_crash(currd);
-            return X86EMUL_UNHANDLEABLE;
-        case X86EMUL_EXCEPTION:
-            hvm_inject_event(&ctxt.ctxt.event);
-            /* fall through */
-        default:
-            hvm_emulate_writeback(&ctxt);
-            break;
-        }
-    }
+    else if ( !hvm_emulate_one_insn(is_sysdesc_access) )
+        domain_crash(currd);
 
     return X86EMUL_OKAY;
 }

Comments

Andrew Cooper April 13, 2017, 2:59 p.m. UTC | #1
On 13/04/17 15:51, Jan Beulich wrote:
> While I did review d0a699a389 ("x86/monitor: add support for descriptor
> access events") it didn't really occur to me that somone could be this
> blunt and add unguarded emulation again just a few weeks after we
> guarded all special purpose emulator invocations. Fix this.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Oops - I also omitted that point from my review checklist.  I will
endeavour not to make the same mistake again.

>
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3598,6 +3598,28 @@ gp_fault:
>      return X86EMUL_EXCEPTION;
>  }
>  
> +static bool is_sysdesc_access(const struct x86_emulate_state *state,
> +                              const struct x86_emulate_ctxt *ctxt)
> +{
> +    unsigned int ext;
> +    int mode = x86_insn_modrm(state, NULL, &ext);

Unfortunately, this is another example which Coverity will pick up on,
along with the use of x86_insn_modrm() in is_invlpg().

In the case that we return -EINVAL, ext is uninitialised when it gets
used below.

Other than that, the change looks good.

~Andrew

> +
> +    switch ( ctxt->opcode )
> +    {
> +    case X86EMUL_OPC(0x0f, 0x00):
> +        if ( !(ext & 4) ) /* SLDT / STR / LLDT / LTR */
> +            return true;
> +        break;
> +
> +    case X86EMUL_OPC(0x0f, 0x01):
> +        if ( mode != 3 && !(ext & 4) ) /* SGDT / SIDT / LGDT / LIDT */
> +            return true;
> +        break;
> +    }
> +
> +    return false;
> +}
> +
>  int hvm_descriptor_access_intercept(uint64_t exit_info,
>                                      uint64_t vmx_exit_qualification,
>                                      unsigned int descriptor, bool is_write)
Jan Beulich April 13, 2017, 3:28 p.m. UTC | #2
>>> On 13.04.17 at 16:59, <andrew.cooper3@citrix.com> wrote:
> On 13/04/17 15:51, Jan Beulich wrote:
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -3598,6 +3598,28 @@ gp_fault:
>>      return X86EMUL_EXCEPTION;
>>  }
>>  
>> +static bool is_sysdesc_access(const struct x86_emulate_state *state,
>> +                              const struct x86_emulate_ctxt *ctxt)
>> +{
>> +    unsigned int ext;
>> +    int mode = x86_insn_modrm(state, NULL, &ext);
> 
> Unfortunately, this is another example which Coverity will pick up on,
> along with the use of x86_insn_modrm() in is_invlpg().
> 
> In the case that we return -EINVAL, ext is uninitialised when it gets
> used below.

Sigh. Yes, we can work around this tool limitation, but honestly I
don't really agree doing so in cases like this (where the code is
clearly correct, as -EINVAL can't come back). Plus we have
precedent to the above other than just in is_invlpg():
priv_op_validate() does the same, as does emulate_gate_op().
If there was a way to work around the warnings without growing
generated code, I'd be less opposed (but still not entirely in
agreement), but all I can see is either making the return value
check more involved or adding initializers. In neither case the
compiler will be able to eliminate the resulting insns.

Jan
Andrew Cooper April 13, 2017, 4 p.m. UTC | #3
On 13/04/17 16:28, Jan Beulich wrote:
>>>> On 13.04.17 at 16:59, <andrew.cooper3@citrix.com> wrote:
>> On 13/04/17 15:51, Jan Beulich wrote:
>>> --- a/xen/arch/x86/hvm/hvm.c
>>> +++ b/xen/arch/x86/hvm/hvm.c
>>> @@ -3598,6 +3598,28 @@ gp_fault:
>>>      return X86EMUL_EXCEPTION;
>>>  }
>>>  
>>> +static bool is_sysdesc_access(const struct x86_emulate_state *state,
>>> +                              const struct x86_emulate_ctxt *ctxt)
>>> +{
>>> +    unsigned int ext;
>>> +    int mode = x86_insn_modrm(state, NULL, &ext);
>> Unfortunately, this is another example which Coverity will pick up on,
>> along with the use of x86_insn_modrm() in is_invlpg().
>>
>> In the case that we return -EINVAL, ext is uninitialised when it gets
>> used below.
> Sigh. Yes, we can work around this tool limitation

It is not a tools limitation.  It is an entirely legitimate complaint
about the state of the current code.

ext being not undefined depends on all 8k lines of emulator code not
being buggy and accidentally clearing d & ModRM (or is it Vsib for that
matter), or accidentally clobbering ->modrm_mod.

> , but honestly I
> don't really agree doing so in cases like this (where the code is
> clearly correct, as -EINVAL can't come back). Plus we have
> precedent to the above other than just in is_invlpg():
> priv_op_validate() does the same, as does emulate_gate_op().
> If there was a way to work around the warnings without growing
> generated code, I'd be less opposed (but still not entirely in
> agreement), but all I can see is either making the return value
> check more involved or adding initializers. In neither case the
> compiler will be able to eliminate the resulting insns.

How about returning

enum {
    modrm_reg,
    modrm_mem,
    modrm_none
};

The users of x86_insn_modrm() don't care about values between 0 and 2. 
They are only looking for "does it have a memory reference", or "does it
have extra opcode encoded in the reg field".

~Andrew
diff mbox

Patch

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3598,6 +3598,28 @@  gp_fault:
     return X86EMUL_EXCEPTION;
 }
 
+static bool is_sysdesc_access(const struct x86_emulate_state *state,
+                              const struct x86_emulate_ctxt *ctxt)
+{
+    unsigned int ext;
+    int mode = x86_insn_modrm(state, NULL, &ext);
+
+    switch ( ctxt->opcode )
+    {
+    case X86EMUL_OPC(0x0f, 0x00):
+        if ( !(ext & 4) ) /* SLDT / STR / LLDT / LTR */
+            return true;
+        break;
+
+    case X86EMUL_OPC(0x0f, 0x01):
+        if ( mode != 3 && !(ext & 4) ) /* SGDT / SIDT / LGDT / LIDT */
+            return true;
+        break;
+    }
+
+    return false;
+}
+
 int hvm_descriptor_access_intercept(uint64_t exit_info,
                                     uint64_t vmx_exit_qualification,
                                     unsigned int descriptor, bool is_write)
@@ -3611,24 +3633,8 @@  int hvm_descriptor_access_intercept(uint
         hvm_monitor_descriptor_access(exit_info, vmx_exit_qualification,
                                       descriptor, is_write);
     }
-    else
-    {
-        struct hvm_emulate_ctxt ctxt;
-
-        hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
-        switch ( hvm_emulate_one(&ctxt) )
-        {
-        case X86EMUL_UNHANDLEABLE:
-            domain_crash(currd);
-            return X86EMUL_UNHANDLEABLE;
-        case X86EMUL_EXCEPTION:
-            hvm_inject_event(&ctxt.ctxt.event);
-            /* fall through */
-        default:
-            hvm_emulate_writeback(&ctxt);
-            break;
-        }
-    }
+    else if ( !hvm_emulate_one_insn(is_sysdesc_access) )
+        domain_crash(currd);
 
     return X86EMUL_OKAY;
 }