diff mbox

[v2,1/3] xen/x86: ensure copying to L1 guest in update_runstate_area()

Message ID 20170223094117.7212-1-haozhong.zhang@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Haozhong Zhang Feb. 23, 2017, 9:41 a.m. UTC
For a HVM domain, if a vcpu is in the nested guest mode,
__raw_copy_to_guest() and __copy_to_guest() used by
update_runstate_area() will copy data to L2 guest rather than L1
guest.

Besides copying to the wrong address, this bug also causes crash in
the code path:
    context_switch(prev, next)
      _update_runstate_area(next)
        update_runstate_area(next)
	  __raw_copy_to_guest(...)
	    ...
	      __hvm_copy(...)
	        paging_gva_to_gfn(...)
		  nestedhap_walk_L1_p2m(...)
		    nvmx_hap_walk_L1_p2m(..)
		      vmx_vmcs_enter(v) [ v = next ]
		        vmx_vmcs_try_enter(v) [ v = next ]
			  if ( likely(v == current) )
                              return v->arch.hvm_vmx.vmcs_pa == this_cpu(current_vmcs);
vmx_vmcs_try_enter() will fail and trigger the assert in
vmx_vmcs_enter(), if vcpu 'next' is in the nested guest mode and is
being scheduled to another CPU.

This commit temporally clears the nested guest flag before all
__raw_copy_to_guest() and __copy_to_guest() in update_runstate_area(),
and restores the flag after those guest copy operations.

Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/domain.c | 23 +++++++++++++++++++++++
 1 file changed, 23 insertions(+)
diff mbox

Patch

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 7d3071e..6053dcf 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -50,6 +50,7 @@ 
 #include <asm/mpspec.h>
 #include <asm/ldt.h>
 #include <asm/hvm/hvm.h>
+#include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/support.h>
 #include <asm/hvm/viridian.h>
 #include <asm/debugreg.h>
@@ -1931,10 +1932,29 @@  bool_t update_runstate_area(struct vcpu *v)
     bool_t rc;
     smap_check_policy_t smap_policy;
     void __user *guest_handle = NULL;
+    bool nested_guest_mode = false;
 
     if ( guest_handle_is_null(runstate_guest(v)) )
         return 1;
 
+    /*
+     * Must be before all following __raw_copy_to_guest() and __copy_to_guest().
+     *
+     * Otherwise, if 'v' is in the nested guest mode, paging_gva_to_gfn() called
+     * from __raw_copy_to_guest() and __copy_to_guest() will treat the target
+     * address as L2 gva, and __raw_copy_to_guest() and __copy_to_guest() will
+     * consequently copy runstate to L2 guest rather than L1 guest.
+     *
+     * Therefore, we clear the nested guest flag before __raw_copy_to_guest()
+     * and __copy_to_guest(), and restore the flag after all guest copy.
+     */
+    if ( nestedhvm_enabled(v->domain) )
+    {
+        nested_guest_mode = nestedhvm_is_n2(v);
+        if ( nested_guest_mode )
+            nestedhvm_vcpu_exit_guestmode(v);
+    }
+
     smap_policy = smap_policy_change(v, SMAP_CHECK_ENABLED);
 
     if ( VM_ASSIST(v->domain, runstate_update_flag) )
@@ -1971,6 +1991,9 @@  bool_t update_runstate_area(struct vcpu *v)
 
     smap_policy_change(v, smap_policy);
 
+    if ( unlikely(nested_guest_mode) )
+        nestedhvm_vcpu_enter_guestmode(v);
+
     return rc;
 }