diff mbox

[for-next,2/8] x86/domain: factor out pv_vcpu_initialise

Message ID 20170410132716.31610-3-wei.liu2@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Wei Liu April 10, 2017, 1:27 p.m. UTC
No functional change.

Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
 xen/arch/x86/domain.c | 65 ++++++++++++++++++++++++++++-----------------------
 1 file changed, 36 insertions(+), 29 deletions(-)

Comments

Jan Beulich April 24, 2017, 9:57 a.m. UTC | #1
>>> On 10.04.17 at 15:27, <wei.liu2@citrix.com> wrote:
> +int vcpu_initialise(struct vcpu *v)
> +{
> +    struct domain *d = v->domain;
> +    int rc;
> +
> +    v->arch.flags = TF_kernel_mode;
> +
> +    rc = mapcache_vcpu_init(v);
> +    if ( rc )
> +        return rc;
> +
> +    if ( !is_idle_domain(d) )
> +    {
> +        paging_vcpu_init(v);
> +
> +        if ( (rc = vcpu_init_fpu(v)) != 0 )
> +            return rc;
> +
> +        vmce_init_vcpu(v);
> +    }
> +    else if ( (rc = xstate_alloc_save_area(v)) != 0 )
> +        return rc;
> +
> +    spin_lock_init(&v->arch.vpmu.vpmu_lock);
> +
> +    if ( is_hvm_domain(d) )
> +        rc = hvm_vcpu_initialise(v);
> +    else
> +        rc = pv_vcpu_initialise(v);
> +
>      if ( rc )
>      {
>          vcpu_destroy_fpu(v);

Below here v->arch.pv_vcpu.trap_ctxt is being freed, which now also
belongs into the PV function.

Jan
Wei Liu April 24, 2017, 11:16 a.m. UTC | #2
On Mon, Apr 24, 2017 at 03:57:42AM -0600, Jan Beulich wrote:
> >>> On 10.04.17 at 15:27, <wei.liu2@citrix.com> wrote:
> > +int vcpu_initialise(struct vcpu *v)
> > +{
> > +    struct domain *d = v->domain;
> > +    int rc;
> > +
> > +    v->arch.flags = TF_kernel_mode;
> > +
> > +    rc = mapcache_vcpu_init(v);
> > +    if ( rc )
> > +        return rc;
> > +
> > +    if ( !is_idle_domain(d) )
> > +    {
> > +        paging_vcpu_init(v);
> > +
> > +        if ( (rc = vcpu_init_fpu(v)) != 0 )
> > +            return rc;
> > +
> > +        vmce_init_vcpu(v);
> > +    }
> > +    else if ( (rc = xstate_alloc_save_area(v)) != 0 )
> > +        return rc;
> > +
> > +    spin_lock_init(&v->arch.vpmu.vpmu_lock);
> > +
> > +    if ( is_hvm_domain(d) )
> > +        rc = hvm_vcpu_initialise(v);
> > +    else
> > +        rc = pv_vcpu_initialise(v);
> > +
> >      if ( rc )
> >      {
> >          vcpu_destroy_fpu(v);
> 
> Below here v->arch.pv_vcpu.trap_ctxt is being freed, which now also
> belongs into the PV function.
> 

After looking more closely into the original code, I think there is at
least one issue that the perdomain mapping is  not destroyed in the
error path.

Note there is no memory leak because eventually all perdomain mappings
will be freed in arch_domain_destroy. But I think we should make the
code look less bad while we are at it.  It is probably going to be more
code churn coming.

Wei.

> Jan
>
diff mbox

Patch

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 90e2b1f82a..96c777c771 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -387,37 +387,10 @@  int switch_compat(struct domain *d)
     return rc;
 }
 
-int vcpu_initialise(struct vcpu *v)
+static int pv_vcpu_initialise(struct vcpu *v)
 {
     struct domain *d = v->domain;
-    int rc;
-
-    v->arch.flags = TF_kernel_mode;
-
-    rc = mapcache_vcpu_init(v);
-    if ( rc )
-        return rc;
-
-    if ( !is_idle_domain(d) )
-    {
-        paging_vcpu_init(v);
-
-        if ( (rc = vcpu_init_fpu(v)) != 0 )
-            return rc;
-
-        vmce_init_vcpu(v);
-    }
-    else if ( (rc = xstate_alloc_save_area(v)) != 0 )
-        return rc;
-
-    spin_lock_init(&v->arch.vpmu.vpmu_lock);
-
-    if ( is_hvm_domain(d) )
-    {
-        rc = hvm_vcpu_initialise(v);
-        goto done;
-    }
-
+    int rc = 0;
 
     spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock);
 
@@ -458,7 +431,41 @@  int vcpu_initialise(struct vcpu *v)
             goto done;
         }
     }
+
  done:
+    return rc;
+}
+
+int vcpu_initialise(struct vcpu *v)
+{
+    struct domain *d = v->domain;
+    int rc;
+
+    v->arch.flags = TF_kernel_mode;
+
+    rc = mapcache_vcpu_init(v);
+    if ( rc )
+        return rc;
+
+    if ( !is_idle_domain(d) )
+    {
+        paging_vcpu_init(v);
+
+        if ( (rc = vcpu_init_fpu(v)) != 0 )
+            return rc;
+
+        vmce_init_vcpu(v);
+    }
+    else if ( (rc = xstate_alloc_save_area(v)) != 0 )
+        return rc;
+
+    spin_lock_init(&v->arch.vpmu.vpmu_lock);
+
+    if ( is_hvm_domain(d) )
+        rc = hvm_vcpu_initialise(v);
+    else
+        rc = pv_vcpu_initialise(v);
+
     if ( rc )
     {
         vcpu_destroy_fpu(v);