diff mbox

[RFC,07/12] x86/np2m: add np2m_schedule_in/out()

Message ID 20170718103429.25020-8-sergey.dyasli@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sergey Dyasli July 18, 2017, 10:34 a.m. UTC
np2m maintenance is required for a nested vcpu during scheduling:

    1. On schedule-out: clear pCPU's bit in p2m->dirty_cpumask
                        to prevent useless IPIs.

    2. On schedule-in: check if np2m is up to date and wasn't flushed.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
 xen/arch/x86/mm/p2m.c     | 52 +++++++++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/p2m.h |  3 +++
 2 files changed, 55 insertions(+)

Comments

George Dunlap Aug. 28, 2017, 4:42 p.m. UTC | #1
On 07/18/2017 11:34 AM, Sergey Dyasli wrote:
> np2m maintenance is required for a nested vcpu during scheduling:
> 
>     1. On schedule-out: clear pCPU's bit in p2m->dirty_cpumask
>                         to prevent useless IPIs.
> 
>     2. On schedule-in: check if np2m is up to date and wasn't flushed.
> 
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
> ---
>  xen/arch/x86/mm/p2m.c     | 52 +++++++++++++++++++++++++++++++++++++++++++++++
>  xen/include/asm-x86/p2m.h |  3 +++
>  2 files changed, 55 insertions(+)
> 
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 3d65899b05..4b83d4a4f1 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1880,6 +1880,58 @@ p2m_get_p2m(struct vcpu *v)
>      return p2m_get_nestedp2m(v);
>  }
>  
> +static void np2m_schedule(bool sched_out)
> +{
> +    struct nestedvcpu *nv = &vcpu_nestedhvm(current);
> +    struct p2m_domain *p2m;
> +    bool sched_in = !sched_out;
> +
> +    if ( !nestedhvm_enabled(current->domain) ||
> +         !nestedhvm_vcpu_in_guestmode(current) ||
> +         !nestedhvm_paging_mode_hap(current) )
> +        return;
> +
> +    p2m = nv->nv_p2m;
> +    if ( p2m )
> +    {
> +        bool np2m_valid;
> +
> +        p2m_lock(p2m);
> +        np2m_valid = p2m->np2m_base == nhvm_vcpu_p2m_base(current) &&
> +                     nv->np2m_generation == p2m->np2m_generation;
> +        if ( sched_out && np2m_valid )
> +        {
> +            /*
> +             * The np2m is up to date but this vCPU will no longer use it,
> +             * which means there are no reasons to send a flush IPI.
> +             */
> +            cpumask_clear_cpu(current->processor, p2m->dirty_cpumask);
> +        }
> +        else if ( sched_in )
> +        {
> +            if ( !np2m_valid )
> +            {
> +                /* This vCPU's np2m was flushed while it was not runnable */
> +                hvm_asid_flush_core();
> +                vcpu_nestedhvm(current).nv_p2m = NULL;
> +            }
> +            else
> +                cpumask_set_cpu(current->processor, p2m->dirty_cpumask);
> +        }
> +        p2m_unlock(p2m);
> +    }
> +}

This level of sharing seems a tad excessive to me; but if we're going to
do it, I think it would be more clear if the callers called a function
called np2m_schedule() with `dir`, then define things something like this:

#define NP2M_SCHEDLE_IN  0
#define NP2M_SCHEDLE_OUT 1

 -George
diff mbox

Patch

diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 3d65899b05..4b83d4a4f1 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1880,6 +1880,58 @@  p2m_get_p2m(struct vcpu *v)
     return p2m_get_nestedp2m(v);
 }
 
+static void np2m_schedule(bool sched_out)
+{
+    struct nestedvcpu *nv = &vcpu_nestedhvm(current);
+    struct p2m_domain *p2m;
+    bool sched_in = !sched_out;
+
+    if ( !nestedhvm_enabled(current->domain) ||
+         !nestedhvm_vcpu_in_guestmode(current) ||
+         !nestedhvm_paging_mode_hap(current) )
+        return;
+
+    p2m = nv->nv_p2m;
+    if ( p2m )
+    {
+        bool np2m_valid;
+
+        p2m_lock(p2m);
+        np2m_valid = p2m->np2m_base == nhvm_vcpu_p2m_base(current) &&
+                     nv->np2m_generation == p2m->np2m_generation;
+        if ( sched_out && np2m_valid )
+        {
+            /*
+             * The np2m is up to date but this vCPU will no longer use it,
+             * which means there are no reasons to send a flush IPI.
+             */
+            cpumask_clear_cpu(current->processor, p2m->dirty_cpumask);
+        }
+        else if ( sched_in )
+        {
+            if ( !np2m_valid )
+            {
+                /* This vCPU's np2m was flushed while it was not runnable */
+                hvm_asid_flush_core();
+                vcpu_nestedhvm(current).nv_p2m = NULL;
+            }
+            else
+                cpumask_set_cpu(current->processor, p2m->dirty_cpumask);
+        }
+        p2m_unlock(p2m);
+    }
+}
+
+void np2m_schedule_out(void)
+{
+    np2m_schedule(true);
+}
+
+void np2m_schedule_in(void)
+{
+    np2m_schedule(false);
+}
+
 unsigned long paging_gva_to_gfn(struct vcpu *v,
                                 unsigned long va,
                                 uint32_t *pfec)
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index eedc7fd412..801a11a960 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -370,6 +370,9 @@  struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v);
  */
 struct p2m_domain *p2m_get_p2m(struct vcpu *v);
 
+void np2m_schedule_out(void);
+void np2m_schedule_in(void);
+
 static inline bool_t p2m_is_hostp2m(const struct p2m_domain *p2m)
 {
     return p2m->p2m_class == p2m_host;