@@ -1875,6 +1875,49 @@ p2m_get_p2m(struct vcpu *v)
return p2m_get_nestedp2m(v);
}
+void np2m_schedule(int dir)
+{
+ struct nestedvcpu *nv = &vcpu_nestedhvm(current);
+ struct p2m_domain *p2m;
+
+ ASSERT(dir == NP2M_SCHEDLE_IN || dir == NP2M_SCHEDLE_OUT);
+
+ if ( !nestedhvm_enabled(current->domain) ||
+ !nestedhvm_vcpu_in_guestmode(current) ||
+ !nestedhvm_paging_mode_hap(current) )
+ return;
+
+ p2m = nv->nv_p2m;
+ if ( p2m )
+ {
+ bool np2m_valid;
+
+ p2m_lock(p2m);
+ np2m_valid = p2m->np2m_base == nhvm_vcpu_p2m_base(current) &&
+ nv->np2m_generation == p2m->np2m_generation;
+ if ( dir == NP2M_SCHEDLE_OUT && np2m_valid )
+ {
+ /*
+ * The np2m is up to date but this vCPU will no longer use it,
+ * which means there are no reasons to send a flush IPI.
+ */
+ cpumask_clear_cpu(current->processor, p2m->dirty_cpumask);
+ }
+ else if ( dir == NP2M_SCHEDLE_IN )
+ {
+ if ( !np2m_valid )
+ {
+ /* This vCPU's np2m was flushed while it was not runnable */
+ hvm_asid_flush_core();
+ vcpu_nestedhvm(current).nv_p2m = NULL;
+ }
+ else
+ cpumask_set_cpu(current->processor, p2m->dirty_cpumask);
+ }
+ p2m_unlock(p2m);
+ }
+}
+
unsigned long paging_gva_to_gfn(struct vcpu *v,
unsigned long va,
uint32_t *pfec)
@@ -370,6 +370,11 @@ struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v);
*/
struct p2m_domain *p2m_get_p2m(struct vcpu *v);
+#define NP2M_SCHEDLE_IN 0
+#define NP2M_SCHEDLE_OUT 1
+
+void np2m_schedule(int dir);
+
static inline bool_t p2m_is_hostp2m(const struct p2m_domain *p2m)
{
return p2m->p2m_class == p2m_host;
np2m maintenance is required for a nested vcpu during scheduling: 1. On schedule-out: clear pCPU's bit in p2m->dirty_cpumask to prevent useless IPIs. 2. On schedule-in: check if np2m is up to date and wasn't flushed. Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com> --- RFC --> v1: - np2m_schedule() now accepts NP2M_SCHEDLE_IN/OUT xen/arch/x86/mm/p2m.c | 43 +++++++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/p2m.h | 5 +++++ 2 files changed, 48 insertions(+)