@@ -108,6 +108,7 @@ nestedhvm_flushtlb_ipi(void *info)
*/
hvm_asid_flush_core();
vcpu_nestedhvm(v).nv_p2m = NULL;
+ vcpu_2_nvmx(v).stale_eptp = true;
}
void
@@ -77,6 +77,8 @@ UNLIKELY_END(realmode)
mov %rsp,%rdi
call vmx_vmenter_helper
+ cmp $0,%eax
+ jne .Lvmx_vmentry_restart
mov VCPU_hvm_guest_cr2(%rbx),%rax
pop %r15
@@ -115,6 +117,10 @@ ENTRY(vmx_asm_do_vmentry)
GET_CURRENT(bx)
jmp .Lvmx_do_vmentry
+.Lvmx_vmentry_restart:
+ sti
+ jmp .Lvmx_do_vmentry
+
.Lvmx_goto_emulator:
sti
mov %rsp,%rdi
@@ -4236,13 +4236,17 @@ static void lbr_fixup(void)
bdw_erratum_bdf14_fixup();
}
-void vmx_vmenter_helper(const struct cpu_user_regs *regs)
+int vmx_vmenter_helper(const struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
u32 new_asid, old_asid;
struct hvm_vcpu_asid *p_asid;
bool_t need_flush;
+ /* Shadow EPTP can't be updated here because irqs are disabled */
+ if ( nestedhvm_vcpu_in_guestmode(curr) && vcpu_2_nvmx(curr).stale_eptp )
+ return 1;
+
if ( curr->domain->arch.hvm_domain.pi_ops.do_resume )
curr->domain->arch.hvm_domain.pi_ops.do_resume(curr);
@@ -4303,6 +4307,8 @@ void vmx_vmenter_helper(const struct cpu_user_regs *regs)
__vmwrite(GUEST_RIP, regs->rip);
__vmwrite(GUEST_RSP, regs->rsp);
__vmwrite(GUEST_RFLAGS, regs->rflags | X86_EFLAGS_MBS);
+
+ return 0;
}
/*
@@ -120,6 +120,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
nvmx->iobitmap[1] = NULL;
nvmx->msrbitmap = NULL;
INIT_LIST_HEAD(&nvmx->launched_list);
+ nvmx->stale_eptp = false;
return 0;
}
@@ -1390,12 +1391,26 @@ static void virtual_vmexit(struct cpu_user_regs *regs)
vmsucceed(regs);
}
+static void nvmx_eptp_update(void)
+{
+ if ( !nestedhvm_vcpu_in_guestmode(current) ||
+ vcpu_nestedhvm(current).nv_vmexit_pending ||
+ !vcpu_2_nvmx(current).stale_eptp ||
+ !nestedhvm_paging_mode_hap(current) )
+ return;
+
+ __vmwrite(EPT_POINTER, get_shadow_eptp(current));
+ vcpu_2_nvmx(current).stale_eptp = false;
+}
+
void nvmx_switch_guest(void)
{
struct vcpu *v = current;
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
struct cpu_user_regs *regs = guest_cpu_user_regs();
+ nvmx_eptp_update();
+
/*
* A pending IO emulation may still be not finished. In this case, no
* virtual vmswitch is allowed. Or else, the following IO emulation will
@@ -1817,6 +1817,12 @@ static void assign_np2m(struct vcpu *v, struct p2m_domain *p2m)
cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
}
+static void nvcpu_flush(struct vcpu *v)
+{
+ hvm_asid_flush_vcpu(v);
+ vcpu_2_nvmx(v).stale_eptp = true;
+}
+
struct p2m_domain *
p2m_get_nestedp2m(struct vcpu *v)
{
@@ -1840,7 +1846,7 @@ p2m_get_nestedp2m(struct vcpu *v)
if ( p2m->np2m_base == np2m_base || p2m->np2m_base == P2M_BASE_EADDR )
{
if ( p2m->np2m_base == P2M_BASE_EADDR )
- hvm_asid_flush_vcpu(v);
+ nvcpu_flush(v);
p2m->np2m_base = np2m_base;
assign_np2m(v, p2m);
p2m_unlock(p2m);
@@ -1857,7 +1863,7 @@ p2m_get_nestedp2m(struct vcpu *v)
p2m_flush_table(p2m);
p2m_lock(p2m);
p2m->np2m_base = np2m_base;
- hvm_asid_flush_vcpu(v);
+ nvcpu_flush(v);
assign_np2m(v, p2m);
p2m_unlock(p2m);
nestedp2m_unlock(d);
@@ -51,6 +51,8 @@ struct nestedvmx {
} ept;
uint32_t guest_vpid;
struct list_head launched_list;
+
+ bool stale_eptp; /* True, when EPTP in the shadow VMCS is no longer valid */
};
#define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx)
The new variable will indicate if update of a shadow EPTP is needed prior to vmentry. Update is required if a nested vcpu gets a new np2m or if its np2m was flushed by an IPI. Helper function nvcpu_flush() is added. Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com> --- xen/arch/x86/hvm/nestedhvm.c | 1 + xen/arch/x86/hvm/vmx/entry.S | 6 ++++++ xen/arch/x86/hvm/vmx/vmx.c | 8 +++++++- xen/arch/x86/hvm/vmx/vvmx.c | 15 +++++++++++++++ xen/arch/x86/mm/p2m.c | 10 ++++++++-- xen/include/asm-x86/hvm/vmx/vvmx.h | 2 ++ 6 files changed, 39 insertions(+), 3 deletions(-)