diff mbox

[RFC,10/12] x86/np2m: implement sharing of np2m between vCPUs

Message ID 20170718103429.25020-11-sergey.dyasli@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sergey Dyasli July 18, 2017, 10:34 a.m. UTC
Modify p2m_get_nestedp2m() to allow sharing a np2m between multiple
vcpus with the same np2m_base (L1 EPTP value in VMCS12).

np2m_schedule_in/out() callbacks are added to context_switch() as well
as pseudo schedule-out is performed during virtual_vmexit().

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
 xen/arch/x86/domain.c       |  2 ++
 xen/arch/x86/hvm/vmx/vvmx.c |  4 ++++
 xen/arch/x86/mm/p2m.c       | 29 +++++++++++++++++++++++++++--
 3 files changed, 33 insertions(+), 2 deletions(-)
diff mbox

Patch

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index dd8bf1302f..38c86a5ded 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1642,6 +1642,7 @@  void context_switch(struct vcpu *prev, struct vcpu *next)
     {
         _update_runstate_area(prev);
         vpmu_switch_from(prev);
+        np2m_schedule_out();
     }
 
     if ( is_hvm_domain(prevd) && !list_empty(&prev->arch.hvm_vcpu.tm_list) )
@@ -1690,6 +1691,7 @@  void context_switch(struct vcpu *prev, struct vcpu *next)
 
         /* Must be done with interrupts enabled */
         vpmu_switch_to(next);
+        np2m_schedule_in();
     }
 
     /* Ensure that the vcpu has an up-to-date time base. */
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 7b193767cd..2203d541ea 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1187,6 +1187,7 @@  static void virtual_vmentry(struct cpu_user_regs *regs)
 
     /* Setup virtual ETP for L2 guest*/
     if ( nestedhvm_paging_mode_hap(v) )
+        /* This will setup the initial np2m for the nested vCPU */
         __vmwrite(EPT_POINTER, get_shadow_eptp(v));
     else
         __vmwrite(EPT_POINTER, get_host_eptp(v));
@@ -1353,6 +1354,9 @@  static void virtual_vmexit(struct cpu_user_regs *regs)
          !(v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
         shadow_to_vvmcs_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
 
+    /* This will clear current pCPU bit in p2m->dirty_cpumask */
+    np2m_schedule_out();
+
     vmx_vmcs_switch(v->arch.hvm_vmx.vmcs_pa, nvcpu->nv_n1vmcx_pa);
 
     nestedhvm_vcpu_exit_guestmode(v);
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 364fdd8c13..480459ae51 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1830,6 +1830,7 @@  p2m_get_nestedp2m_locked(struct vcpu *v)
     struct domain *d = v->domain;
     struct p2m_domain *p2m;
     uint64_t np2m_base = nhvm_vcpu_p2m_base(v);
+    unsigned int i;
 
     /* Mask out low bits; this avoids collisions with P2M_BASE_EADDR */
     np2m_base &= ~(0xfffull);
@@ -1843,10 +1844,34 @@  p2m_get_nestedp2m_locked(struct vcpu *v)
     if ( p2m ) 
     {
         p2m_lock(p2m);
-        if ( p2m->np2m_base == np2m_base || p2m->np2m_base == P2M_BASE_EADDR )
+        if ( p2m->np2m_base == np2m_base )
         {
-            if ( p2m->np2m_base == P2M_BASE_EADDR )
+            /* Check if np2m was flushed just before the lock */
+            if ( nv->np2m_generation != p2m->np2m_generation )
                 nvcpu_flush(v);
+            /* np2m is up-to-date */
+            p2m->np2m_base = np2m_base;
+            assign_np2m(v, p2m);
+            nestedp2m_unlock(d);
+
+            return p2m;
+        }
+        else if ( p2m->np2m_base != P2M_BASE_EADDR )
+        {
+            /* vCPU is switching from some other valid np2m */
+            cpumask_clear_cpu(v->processor, p2m->dirty_cpumask);
+        }
+        p2m_unlock(p2m);
+    }
+
+    /* Share a np2m if possible */
+    for ( i = 0; i < MAX_NESTEDP2M; i++ )
+    {
+        p2m = d->arch.nested_p2m[i];
+        p2m_lock(p2m);
+        if ( p2m->np2m_base == np2m_base )
+        {
+            nvcpu_flush(v);
             p2m->np2m_base = np2m_base;
             assign_np2m(v, p2m);
             nestedp2m_unlock(d);