diff mbox

[2/3] VMX: don't blindly enable descriptor table exiting control

Message ID 58EFACF80200007800150CFD@prv-mh.provo.novell.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jan Beulich April 13, 2017, 2:53 p.m. UTC
This is an optional feature and hence we should check for it before
use.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
VMX: don't blindly enable descriptor table exiting control

This is an optional feature and hence we should check for it before
use.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -866,7 +866,7 @@ static void svm_set_rdtsc_exiting(struct
     vmcb_set_general2_intercepts(vmcb, general2_intercepts);
 }
 
-static void svm_set_descriptor_access_exiting(struct vcpu *v, bool enable)
+static bool svm_set_descriptor_access_exiting(struct vcpu *v, bool enable)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
@@ -881,6 +881,8 @@ static void svm_set_descriptor_access_ex
         general1_intercepts &= ~mask;
 
     vmcb_set_general1_intercepts(vmcb, general1_intercepts);
+
+    return true;
 }
 
 static unsigned int svm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -226,6 +226,7 @@ static int vmx_init_vmcs_config(void)
         opt = (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                SECONDARY_EXEC_WBINVD_EXITING |
                SECONDARY_EXEC_ENABLE_EPT |
+               SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING |
                SECONDARY_EXEC_ENABLE_RDTSCP |
                SECONDARY_EXEC_PAUSE_LOOP_EXITING |
                SECONDARY_EXEC_ENABLE_INVPCID |
@@ -1020,6 +1021,13 @@ static int construct_vmcs(struct vcpu *v
 
     v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
 
+    /*
+     * Disable descriptor table exiting: It's controlled by the VM event
+     * monitor requesting it.
+     */
+    v->arch.hvm_vmx.secondary_exec_control &=
+        ~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+
     /* Disable VPID for now: we decide when to enable it on VMENTER. */
     v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1423,11 +1423,15 @@ static void vmx_set_rdtsc_exiting(struct
     vmx_vmcs_exit(v);
 }
 
-static void vmx_set_descriptor_access_exiting(struct vcpu *v, bool enable)
+static bool vmx_set_descriptor_access_exiting(struct vcpu *v, bool enable)
 {
     if ( enable )
+    {
+        if ( !cpu_has_vmx_dt_exiting )
+            return false;
         v->arch.hvm_vmx.secondary_exec_control |=
             SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+    }
     else
         v->arch.hvm_vmx.secondary_exec_control &=
             ~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
@@ -1435,6 +1439,8 @@ static void vmx_set_descriptor_access_ex
     vmx_vmcs_enter(v);
     vmx_update_secondary_exec_control(v);
     vmx_vmcs_exit(v);
+
+    return true;
 }
 
 static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
--- a/xen/arch/x86/monitor.c
+++ b/xen/arch/x86/monitor.c
@@ -213,7 +213,7 @@ int arch_monitor_domctl_event(struct dom
 
     case XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS:
     {
-        bool old_status = ad->monitor.descriptor_access_enabled;
+        bool old_status = ad->monitor.descriptor_access_enabled, ok = true;
         struct vcpu *v;
 
         if ( unlikely(old_status == requested_status) )
@@ -223,9 +223,15 @@ int arch_monitor_domctl_event(struct dom
         ad->monitor.descriptor_access_enabled = requested_status;
 
         for_each_vcpu ( d, v )
-            hvm_funcs.set_descriptor_access_exiting(v, requested_status);
+        {
+            ok = hvm_funcs.set_descriptor_access_exiting(v, requested_status);
+            if ( !ok )
+                break;
+        }
 
         domain_unpause(d);
+        if ( !ok )
+            return -EOPNOTSUPP;
         break;
     }
 
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -173,7 +173,7 @@ struct hvm_function_table {
     void (*handle_cd)(struct vcpu *v, unsigned long value);
     void (*set_info_guest)(struct vcpu *v);
     void (*set_rdtsc_exiting)(struct vcpu *v, bool_t);
-    void (*set_descriptor_access_exiting)(struct vcpu *v, bool);
+    bool (*set_descriptor_access_exiting)(struct vcpu *v, bool);
 
     /* Nested HVM */
     int (*nhvm_vcpu_initialise)(struct vcpu *v);
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -274,6 +274,8 @@ extern u64 vmx_ept_vpid_cap;
     (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
 #define cpu_has_vmx_ept \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)
+#define cpu_has_vmx_dt_exiting \
+    (vmx_secondary_exec_control & SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING)
 #define cpu_has_vmx_vpid \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
 #define cpu_has_monitor_trap_flag \
--- a/xen/include/asm-x86/monitor.h
+++ b/xen/include/asm-x86/monitor.h
@@ -77,13 +77,15 @@ static inline uint32_t arch_monitor_get_
                    (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST) |
                    (1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
                    (1U << XEN_DOMCTL_MONITOR_EVENT_CPUID) |
-                   (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT) |
-                   (1U << XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS);
+                   (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT);
 
     /* Since we know this is on VMX, we can just call the hvm func */
     if ( hvm_is_singlestep_supported() )
         capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP);
 
+    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
+        capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS);
+
     return capabilities;
 }

Comments

Razvan Cojocaru April 13, 2017, 2:59 p.m. UTC | #1
On 04/13/2017 05:53 PM, Jan Beulich wrote:
> This is an optional feature and hence we should check for it before
> use.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>


Thanks,
Razvan
Andrew Cooper April 13, 2017, 3:16 p.m. UTC | #2
On 13/04/17 15:53, Jan Beulich wrote:
> @@ -223,9 +223,15 @@ int arch_monitor_domctl_event(struct dom
>          ad->monitor.descriptor_access_enabled = requested_status;
>  
>          for_each_vcpu ( d, v )
> -            hvm_funcs.set_descriptor_access_exiting(v, requested_status);
> +        {
> +            ok = hvm_funcs.set_descriptor_access_exiting(v, requested_status);
> +            if ( !ok )
> +                break;
> +        }
>  
>          domain_unpause(d);
> +        if ( !ok )
> +            return -EOPNOTSUPP;

While the overall purpose of the patch is clearly a good thing, this
implementation isn't ideal.

In principle, this structure could hit a failure mid way through the
vcpus, leaving them in an inconsistent state.

Instead, why not clobber the set_descriptor_access_exiting() function
pointer if support isn't available, and use that (before the
domain_pause()) to fail with EOPNOTSUPP?

~Andrew
Jan Beulich April 13, 2017, 3:30 p.m. UTC | #3
>>> On 13.04.17 at 17:16, <andrew.cooper3@citrix.com> wrote:
> On 13/04/17 15:53, Jan Beulich wrote:
>> @@ -223,9 +223,15 @@ int arch_monitor_domctl_event(struct dom
>>          ad->monitor.descriptor_access_enabled = requested_status;
>>  
>>          for_each_vcpu ( d, v )
>> -            hvm_funcs.set_descriptor_access_exiting(v, requested_status);
>> +        {
>> +            ok = hvm_funcs.set_descriptor_access_exiting(v, requested_status);
>> +            if ( !ok )
>> +                break;
>> +        }
>>  
>>          domain_unpause(d);
>> +        if ( !ok )
>> +            return -EOPNOTSUPP;
> 
> While the overall purpose of the patch is clearly a good thing, this
> implementation isn't ideal.
> 
> In principle, this structure could hit a failure mid way through the
> vcpus, leaving them in an inconsistent state.
> 
> Instead, why not clobber the set_descriptor_access_exiting() function
> pointer if support isn't available, and use that (before the
> domain_pause()) to fail with EOPNOTSUPP?

Ah, yes, I'll do that. Don't know why it didn't occur to me right away
(as I didn't like this property of the code above either).

Jan
Tian, Kevin April 14, 2017, 6:22 a.m. UTC | #4
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Thursday, April 13, 2017 10:53 PM
> 
> This is an optional feature and hence we should check for it before
> use.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
diff mbox

Patch

--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -866,7 +866,7 @@  static void svm_set_rdtsc_exiting(struct
     vmcb_set_general2_intercepts(vmcb, general2_intercepts);
 }
 
-static void svm_set_descriptor_access_exiting(struct vcpu *v, bool enable)
+static bool svm_set_descriptor_access_exiting(struct vcpu *v, bool enable)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
@@ -881,6 +881,8 @@  static void svm_set_descriptor_access_ex
         general1_intercepts &= ~mask;
 
     vmcb_set_general1_intercepts(vmcb, general1_intercepts);
+
+    return true;
 }
 
 static unsigned int svm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -226,6 +226,7 @@  static int vmx_init_vmcs_config(void)
         opt = (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                SECONDARY_EXEC_WBINVD_EXITING |
                SECONDARY_EXEC_ENABLE_EPT |
+               SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING |
                SECONDARY_EXEC_ENABLE_RDTSCP |
                SECONDARY_EXEC_PAUSE_LOOP_EXITING |
                SECONDARY_EXEC_ENABLE_INVPCID |
@@ -1020,6 +1021,13 @@  static int construct_vmcs(struct vcpu *v
 
     v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
 
+    /*
+     * Disable descriptor table exiting: It's controlled by the VM event
+     * monitor requesting it.
+     */
+    v->arch.hvm_vmx.secondary_exec_control &=
+        ~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+
     /* Disable VPID for now: we decide when to enable it on VMENTER. */
     v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1423,11 +1423,15 @@  static void vmx_set_rdtsc_exiting(struct
     vmx_vmcs_exit(v);
 }
 
-static void vmx_set_descriptor_access_exiting(struct vcpu *v, bool enable)
+static bool vmx_set_descriptor_access_exiting(struct vcpu *v, bool enable)
 {
     if ( enable )
+    {
+        if ( !cpu_has_vmx_dt_exiting )
+            return false;
         v->arch.hvm_vmx.secondary_exec_control |=
             SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+    }
     else
         v->arch.hvm_vmx.secondary_exec_control &=
             ~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
@@ -1435,6 +1439,8 @@  static void vmx_set_descriptor_access_ex
     vmx_vmcs_enter(v);
     vmx_update_secondary_exec_control(v);
     vmx_vmcs_exit(v);
+
+    return true;
 }
 
 static void vmx_init_hypercall_page(struct domain *d, void *hypercall_page)
--- a/xen/arch/x86/monitor.c
+++ b/xen/arch/x86/monitor.c
@@ -213,7 +213,7 @@  int arch_monitor_domctl_event(struct dom
 
     case XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS:
     {
-        bool old_status = ad->monitor.descriptor_access_enabled;
+        bool old_status = ad->monitor.descriptor_access_enabled, ok = true;
         struct vcpu *v;
 
         if ( unlikely(old_status == requested_status) )
@@ -223,9 +223,15 @@  int arch_monitor_domctl_event(struct dom
         ad->monitor.descriptor_access_enabled = requested_status;
 
         for_each_vcpu ( d, v )
-            hvm_funcs.set_descriptor_access_exiting(v, requested_status);
+        {
+            ok = hvm_funcs.set_descriptor_access_exiting(v, requested_status);
+            if ( !ok )
+                break;
+        }
 
         domain_unpause(d);
+        if ( !ok )
+            return -EOPNOTSUPP;
         break;
     }
 
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -173,7 +173,7 @@  struct hvm_function_table {
     void (*handle_cd)(struct vcpu *v, unsigned long value);
     void (*set_info_guest)(struct vcpu *v);
     void (*set_rdtsc_exiting)(struct vcpu *v, bool_t);
-    void (*set_descriptor_access_exiting)(struct vcpu *v, bool);
+    bool (*set_descriptor_access_exiting)(struct vcpu *v, bool);
 
     /* Nested HVM */
     int (*nhvm_vcpu_initialise)(struct vcpu *v);
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -274,6 +274,8 @@  extern u64 vmx_ept_vpid_cap;
     (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
 #define cpu_has_vmx_ept \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)
+#define cpu_has_vmx_dt_exiting \
+    (vmx_secondary_exec_control & SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING)
 #define cpu_has_vmx_vpid \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
 #define cpu_has_monitor_trap_flag \
--- a/xen/include/asm-x86/monitor.h
+++ b/xen/include/asm-x86/monitor.h
@@ -77,13 +77,15 @@  static inline uint32_t arch_monitor_get_
                    (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST) |
                    (1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
                    (1U << XEN_DOMCTL_MONITOR_EVENT_CPUID) |
-                   (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT) |
-                   (1U << XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS);
+                   (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT);
 
     /* Since we know this is on VMX, we can just call the hvm func */
     if ( hvm_is_singlestep_supported() )
         capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP);
 
+    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
+        capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_DESC_ACCESS);
+
     return capabilities;
 }