diff mbox

[v1,Altp2m,cleanup,3/3] Making altp2m struct dynamically allocated.

Message ID 1466525090-1692-4-git-send-email-paul.c.lai@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paul Lai June 21, 2016, 4:04 p.m. UTC
Ravi Sahita's dynamically allocated altp2m structs

Signed-off-by: Paul Lai <paul.c.lai@intel.com>
---
 xen/arch/x86/hvm/hvm.c       |  8 +++---
 xen/arch/x86/hvm/vmx/vmx.c   |  2 +-
 xen/arch/x86/mm/altp2m.c     | 18 +++++++-------
 xen/arch/x86/mm/mm-locks.h   |  4 +--
 xen/arch/x86/mm/p2m-ept.c    |  8 +++---
 xen/arch/x86/mm/p2m.c        | 59 ++++++++++++++++++++++++--------------------
 xen/include/asm-x86/altp2m.h |  7 +++++-
 xen/include/asm-x86/domain.h | 12 ++++++++-
 xen/include/asm-x86/p2m.h    |  2 +-
 9 files changed, 70 insertions(+), 50 deletions(-)

Comments

Jan Beulich June 28, 2016, 10:13 a.m. UTC | #1
>>> On 21.06.16 at 18:04, <paul.c.lai@intel.com> wrote:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -5228,7 +5228,7 @@ static int do_altp2m_op(
>  
>      if ( (a.cmd != HVMOP_altp2m_get_domain_state) &&
>           (a.cmd != HVMOP_altp2m_set_domain_state) &&
> -         !d->arch.altp2m_active )
> +         ! altp2m_active(d) )

Stray blank.

> @@ -5262,11 +5262,11 @@ static int do_altp2m_op(
>              break;
>          }
>  
> -        ostate = d->arch.altp2m_active;
> -        d->arch.altp2m_active = !!a.u.domain_state.state;
> +        ostate = altp2m_active(d);
> +	set_altp2m_active(d, !!a.u.domain_state.state);

Bogus tab indentation.

> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -193,12 +193,15 @@ static void p2m_teardown_altp2m(struct domain *d)
>  
>      for ( i = 0; i < MAX_ALTP2M; i++ )
>      {
> -        if ( !d->arch.altp2m_p2m[i] )
> +        if ( !d->arch.altp2m->altp2m_p2m[i] )
>              continue;
> -        p2m = d->arch.altp2m_p2m[i];
> +        p2m = d->arch.altp2m->altp2m_p2m[i];
>          p2m_free_one(p2m);
> -        d->arch.altp2m_p2m[i] = NULL;
> +        d->arch.altp2m->altp2m_p2m[i] = NULL;
>      }
> +
> +    if (d->arch.altp2m) 

Missing blanks.

> +        xfree(d->arch.altp2m);

But the conditional is pointless anyway.

> @@ -206,10 +209,12 @@ static int p2m_init_altp2m(struct domain *d)
>      unsigned int i;
>      struct p2m_domain *p2m;
>  
> -    mm_lock_init(&d->arch.altp2m_list_lock);
> +    d->arch.altp2m = xzalloc(struct altp2m_domain);
> +
> +    mm_lock_init(&d->arch.altp2m->altp2m_list_lock);

Missing error check.

> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -274,6 +274,13 @@ struct monitor_write_data {
>      uint64_t cr4;
>  };
>  
> +struct altp2m_domain {
> +    bool_t altp2m_active;
> +    struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
> +    mm_lock_t altp2m_list_lock;
> +    uint64_t *altp2m_eptp;
> +};

No point prefixing all the fields with altp2m_. And also the structure
now doesn't belong here anymore - it should move to e.g. p2m.h.

> @@ -320,10 +327,13 @@ struct arch_domain
>      mm_lock_t nested_p2m_lock;
>  
>      /* altp2m: allow multiple copies of host p2m */
> +    /*
>      bool_t altp2m_active;
>      struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
>      mm_lock_t altp2m_list_lock;
> -    uint64_t *altp2m_eptp;
> +    uint64_t *altp2m_eptp; 
> +    */

What's the purpose of this comment?

Jan
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 1595b3e..40270d0 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5228,7 +5228,7 @@  static int do_altp2m_op(
 
     if ( (a.cmd != HVMOP_altp2m_get_domain_state) &&
          (a.cmd != HVMOP_altp2m_set_domain_state) &&
-         !d->arch.altp2m_active )
+         ! altp2m_active(d) )
     {
         rc = -EOPNOTSUPP;
         goto out;
@@ -5262,11 +5262,11 @@  static int do_altp2m_op(
             break;
         }
 
-        ostate = d->arch.altp2m_active;
-        d->arch.altp2m_active = !!a.u.domain_state.state;
+        ostate = altp2m_active(d);
+	set_altp2m_active(d, !!a.u.domain_state.state);
 
         /* If the alternate p2m state has changed, handle appropriately */
-        if ( d->arch.altp2m_active != ostate &&
+        if ( altp2m_active(d) != ostate &&
              (ostate || !(rc = p2m_init_altp2m_by_id(d, 0))) )
         {
             for_each_vcpu( d, v )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 670d7dc..b522578 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2017,7 +2017,7 @@  static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v)
     {
         v->arch.hvm_vmx.secondary_exec_control |= mask;
         __vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING);
-        __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp));
+        __vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m->altp2m_eptp));
 
         if ( cpu_has_vmx_virt_exceptions )
         {
diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
index 1caf6b4..77187c9 100644
--- a/xen/arch/x86/mm/altp2m.c
+++ b/xen/arch/x86/mm/altp2m.c
@@ -72,23 +72,23 @@  hvm_altp2m_init( struct domain *d) {
     unsigned int i = 0;
 
     /* Init alternate p2m data */
-    if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL )
+    if ( (d->arch.altp2m->altp2m_eptp = alloc_xenheap_page()) == NULL )
     {
         rv = -ENOMEM;
         goto out;
     }
 
     for ( i = 0; i < MAX_EPTP; i++ )
-        d->arch.altp2m_eptp[i] = INVALID_MFN;
+        d->arch.altp2m->altp2m_eptp[i] = INVALID_MFN;
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        rv = p2m_alloc_table(d->arch.altp2m_p2m[i]);
+        rv = p2m_alloc_table(d->arch.altp2m->altp2m_p2m[i]);
         if ( rv != 0 )
            goto out;
     }
 
-    d->arch.altp2m_active = 0;
+    set_altp2m_active(d, 0);
  out:
     return rv;
 }
@@ -96,16 +96,16 @@  hvm_altp2m_init( struct domain *d) {
 void
 hvm_altp2m_teardown( struct domain *d) {
     unsigned int i = 0;
-    d->arch.altp2m_active = 0;
+    set_altp2m_active(d, 0);
 
-    if ( d->arch.altp2m_eptp )
+    if ( d->arch.altp2m->altp2m_eptp )
     {
-        free_xenheap_page(d->arch.altp2m_eptp);
-        d->arch.altp2m_eptp = NULL;
+        free_xenheap_page(d->arch.altp2m->altp2m_eptp);
+        d->arch.altp2m->altp2m_eptp = NULL;
     }
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
-        p2m_teardown(d->arch.altp2m_p2m[i]);
+        p2m_teardown(d->arch.altp2m->altp2m_p2m[i]);
 }
 
 /*
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 086c8bb..4d17b0a 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -251,8 +251,8 @@  declare_mm_rwlock(p2m);
  */
 
 declare_mm_lock(altp2mlist)
-#define altp2m_list_lock(d)   mm_lock(altp2mlist, &(d)->arch.altp2m_list_lock)
-#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m_list_lock)
+#define altp2m_list_lock(d)   mm_lock(altp2mlist, &(d)->arch.altp2m->altp2m_list_lock)
+#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m->altp2m_list_lock)
 
 /* P2M lock (per-altp2m-table)
  *
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index dff34b1..754b660 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1330,14 +1330,14 @@  void setup_ept_dump(void)
 }
 
 void p2m_init_altp2m_helper( struct domain *d, unsigned int i) {
-    struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
+    struct p2m_domain *p2m = d->arch.altp2m->altp2m_p2m[i];
     struct ept_data *ept;
 
     p2m->min_remapped_gfn = INVALID_GFN;
     p2m->max_remapped_gfn = 0;
     ept = &p2m->ept;
     ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
-    d->arch.altp2m_eptp[i] = ept_get_eptp(ept);
+    d->arch.altp2m->altp2m_eptp[i] = ept_get_eptp(ept);
 }
 
 unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
@@ -1350,10 +1350,10 @@  unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
+        if ( d->arch.altp2m->altp2m_eptp[i] == INVALID_MFN )
             continue;
 
-        p2m = d->arch.altp2m_p2m[i];
+        p2m = d->arch.altp2m->altp2m_p2m[i];
         ept = &p2m->ept;
 
         if ( eptp == ept_get_eptp(ept) )
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 90f2d95..70a8b15 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -193,12 +193,15 @@  static void p2m_teardown_altp2m(struct domain *d)
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( !d->arch.altp2m_p2m[i] )
+        if ( !d->arch.altp2m->altp2m_p2m[i] )
             continue;
-        p2m = d->arch.altp2m_p2m[i];
+        p2m = d->arch.altp2m->altp2m_p2m[i];
         p2m_free_one(p2m);
-        d->arch.altp2m_p2m[i] = NULL;
+        d->arch.altp2m->altp2m_p2m[i] = NULL;
     }
+
+    if (d->arch.altp2m) 
+        xfree(d->arch.altp2m);
 }
 
 static int p2m_init_altp2m(struct domain *d)
@@ -206,10 +209,12 @@  static int p2m_init_altp2m(struct domain *d)
     unsigned int i;
     struct p2m_domain *p2m;
 
-    mm_lock_init(&d->arch.altp2m_list_lock);
+    d->arch.altp2m = xzalloc(struct altp2m_domain);
+
+    mm_lock_init(&d->arch.altp2m->altp2m_list_lock);
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
+        d->arch.altp2m->altp2m_p2m[i] = p2m = p2m_init_one(d);
         if ( p2m == NULL )
         {
             p2m_teardown_altp2m(d);
@@ -1838,10 +1843,10 @@  long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
     if ( altp2m_idx )
     {
         if ( altp2m_idx >= MAX_ALTP2M ||
-             d->arch.altp2m_eptp[altp2m_idx] == INVALID_MFN )
+             d->arch.altp2m->altp2m_eptp[altp2m_idx] == INVALID_MFN )
             return -EINVAL;
 
-        ap2m = d->arch.altp2m_p2m[altp2m_idx];
+        ap2m = d->arch.altp2m->altp2m_p2m[altp2m_idx];
     }
 
     switch ( access )
@@ -2280,7 +2285,7 @@  bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
+    if ( d->arch.altp2m->altp2m_eptp[idx] != INVALID_MFN )
     {
         if ( idx != vcpu_altp2m(v).p2midx )
         {
@@ -2365,11 +2370,11 @@  void p2m_flush_altp2m(struct domain *d)
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        p2m_flush_table(d->arch.altp2m_p2m[i]);
+        p2m_flush_table(d->arch.altp2m->altp2m_p2m[i]);
         /* Uninit and reinit ept to force TLB shootdown */
-        ept_p2m_uninit(d->arch.altp2m_p2m[i]);
-        ept_p2m_init(d->arch.altp2m_p2m[i]);
-        d->arch.altp2m_eptp[i] = INVALID_MFN;
+        ept_p2m_uninit(d->arch.altp2m->altp2m_p2m[i]);
+        ept_p2m_init(d->arch.altp2m->altp2m_p2m[i]);
+        d->arch.altp2m->altp2m_eptp[i] = INVALID_MFN;
     }
 
     altp2m_list_unlock(d);
@@ -2384,7 +2389,7 @@  int p2m_init_altp2m_by_id(struct domain *d, unsigned int idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] == INVALID_MFN )
+    if ( d->arch.altp2m->altp2m_eptp[idx] == INVALID_MFN )
     {
         p2m_init_altp2m_helper(d, idx);
         rc = 0;
@@ -2403,7 +2408,7 @@  int p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( d->arch.altp2m_eptp[i] != INVALID_MFN )
+        if ( d->arch.altp2m->altp2m_eptp[i] != INVALID_MFN )
             continue;
 
         p2m_init_altp2m_helper(d, i);
@@ -2429,17 +2434,17 @@  int p2m_destroy_altp2m_by_id(struct domain *d, unsigned int idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
+    if ( d->arch.altp2m->altp2m_eptp[idx] != INVALID_MFN )
     {
-        p2m = d->arch.altp2m_p2m[idx];
+        p2m = d->arch.altp2m->altp2m_p2m[idx];
 
         if ( !_atomic_read(p2m->active_vcpus) )
         {
-            p2m_flush_table(d->arch.altp2m_p2m[idx]);
+            p2m_flush_table(d->arch.altp2m->altp2m_p2m[idx]);
             /* Uninit and reinit ept to force TLB shootdown */
-            ept_p2m_uninit(d->arch.altp2m_p2m[idx]);
-            ept_p2m_init(d->arch.altp2m_p2m[idx]);
-            d->arch.altp2m_eptp[idx] = INVALID_MFN;
+            ept_p2m_uninit(d->arch.altp2m->altp2m_p2m[idx]);
+            ept_p2m_init(d->arch.altp2m->altp2m_p2m[idx]);
+            d->arch.altp2m->altp2m_eptp[idx] = INVALID_MFN;
             rc = 0;
         }
     }
@@ -2463,7 +2468,7 @@  int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx)
 
     altp2m_list_lock(d);
 
-    if ( d->arch.altp2m_eptp[idx] != INVALID_MFN )
+    if ( d->arch.altp2m->altp2m_eptp[idx] != INVALID_MFN )
     {
         for_each_vcpu( d, v )
             if ( idx != vcpu_altp2m(v).p2midx )
@@ -2494,11 +2499,11 @@  int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
     unsigned int page_order;
     int rc = -EINVAL;
 
-    if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == INVALID_MFN )
+    if ( idx >= MAX_ALTP2M || d->arch.altp2m->altp2m_eptp[idx] == INVALID_MFN )
         return rc;
 
     hp2m = p2m_get_hostp2m(d);
-    ap2m = d->arch.altp2m_p2m[idx];
+    ap2m = d->arch.altp2m->altp2m_p2m[idx];
 
     p2m_lock(ap2m);
 
@@ -2589,10 +2594,10 @@  void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
 
     for ( i = 0; i < MAX_ALTP2M; i++ )
     {
-        if ( d->arch.altp2m_eptp[i] == INVALID_MFN )
+        if ( d->arch.altp2m->altp2m_eptp[i] == INVALID_MFN )
             continue;
 
-        p2m = d->arch.altp2m_p2m[i];
+        p2m = d->arch.altp2m->altp2m_p2m[i];
         m = get_gfn_type_access(p2m, gfn_x(gfn), &t, &a, 0, NULL);
 
         /* Check for a dropped page that may impact this altp2m */
@@ -2613,10 +2618,10 @@  void p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
                 for ( i = 0; i < MAX_ALTP2M; i++ )
                 {
                     if ( i == last_reset_idx ||
-                         d->arch.altp2m_eptp[i] == INVALID_MFN )
+                         d->arch.altp2m->altp2m_eptp[i] == INVALID_MFN )
                         continue;
 
-                    p2m = d->arch.altp2m_p2m[i];
+                    p2m = d->arch.altp2m->altp2m_p2m[i];
                     p2m_lock(p2m);
                     p2m_reset_altp2m(p2m);
                     p2m_unlock(p2m);
diff --git a/xen/include/asm-x86/altp2m.h b/xen/include/asm-x86/altp2m.h
index 7ce047d..eca0ec7 100644
--- a/xen/include/asm-x86/altp2m.h
+++ b/xen/include/asm-x86/altp2m.h
@@ -24,7 +24,12 @@ 
 /* Alternate p2m HVM on/off per domain */
 static inline bool_t altp2m_active(const struct domain *d)
 {
-    return d->arch.altp2m_active;
+    return d->arch.altp2m->altp2m_active;
+}
+
+static inline void set_altp2m_active(const struct domain *d, bool_t v)
+{
+    d->arch.altp2m->altp2m_active = v;
 }
 
 /* Alternate p2m VCPU */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 783fa4f..614dd40 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -274,6 +274,13 @@  struct monitor_write_data {
     uint64_t cr4;
 };
 
+struct altp2m_domain {
+    bool_t altp2m_active;
+    struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
+    mm_lock_t altp2m_list_lock;
+    uint64_t *altp2m_eptp;
+};
+
 struct arch_domain
 {
     struct page_info *perdomain_l3_pg;
@@ -320,10 +327,13 @@  struct arch_domain
     mm_lock_t nested_p2m_lock;
 
     /* altp2m: allow multiple copies of host p2m */
+    /*
     bool_t altp2m_active;
     struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
     mm_lock_t altp2m_list_lock;
-    uint64_t *altp2m_eptp;
+    uint64_t *altp2m_eptp; 
+    */
+    struct altp2m_domain *altp2m;
 
     /* NB. protected by d->event_lock and by irq_desc[irq].lock */
     struct radix_tree_root irq_pirq;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d7c8c12..3185ec7 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -776,7 +776,7 @@  static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
 
     BUG_ON(index >= MAX_ALTP2M);
 
-    return v->domain->arch.altp2m_p2m[index];
+    return v->domain->arch.altp2m->altp2m_p2m[index];
 }
 
 /* Switch alternate p2m for a single vcpu */