@@ -101,30 +101,23 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
{
int rc = 0;
+ unsigned long gfn, mask;
+ mfn_t mfn;
+
ASSERT(p2m);
ASSERT(p2m->set_entry);
+ ASSERT(p2m_locked_by_me(p2m));
- p2m_lock(p2m);
-
- /* If this p2m table has been flushed or recycled under our feet,
- * leave it alone. We'll pick up the right one as we try to
- * vmenter the guest. */
- if ( p2m->np2m_base == nhvm_vcpu_p2m_base(v) )
- {
- unsigned long gfn, mask;
- mfn_t mfn;
+ /*
+ * If this is a superpage mapping, round down both addresses to
+ * the start of the superpage.
+ */
+ mask = ~((1UL << page_order) - 1);
- /* If this is a superpage mapping, round down both addresses
- * to the start of the superpage. */
- mask = ~((1UL << page_order) - 1);
-
- gfn = (L2_gpa >> PAGE_SHIFT) & mask;
- mfn = _mfn((L0_gpa >> PAGE_SHIFT) & mask);
-
- rc = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
- }
+ gfn = (L2_gpa >> PAGE_SHIFT) & mask;
+ mfn = _mfn((L0_gpa >> PAGE_SHIFT) & mask);
- p2m_unlock(p2m);
+ rc = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
if ( rc )
{
@@ -212,7 +205,6 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
uint8_t p2ma_21 = p2m_access_rwx;
p2m = p2m_get_hostp2m(d); /* L0 p2m */
- nested_p2m = p2m_get_nestedp2m(v);
/* walk the L1 P2M table */
rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21,
@@ -278,8 +270,10 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
p2ma_10 &= (p2m_access_t)p2ma_21;
/* fix p2m_get_pagetable(nested_p2m) */
+ nested_p2m = p2m_get_nestedp2m_locked(v);
nestedhap_fix_p2m(v, nested_p2m, *L2_gpa, L0_gpa, page_order_20,
p2mt_10, p2ma_10);
+ p2m_unlock(nested_p2m);
return NESTEDHVM_PAGEFAULT_DONE;
}
@@ -1813,7 +1813,7 @@ static void assign_np2m(struct vcpu *v, struct p2m_domain *p2m)
}
struct p2m_domain *
-p2m_get_nestedp2m(struct vcpu *v)
+p2m_get_nestedp2m_locked(struct vcpu *v)
{
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
struct domain *d = v->domain;
@@ -1838,7 +1838,6 @@ p2m_get_nestedp2m(struct vcpu *v)
hvm_asid_flush_vcpu(v);
p2m->np2m_base = np2m_base;
assign_np2m(v, p2m);
- p2m_unlock(p2m);
nestedp2m_unlock(d);
return p2m;
@@ -1854,12 +1853,19 @@ p2m_get_nestedp2m(struct vcpu *v)
p2m->np2m_base = np2m_base;
hvm_asid_flush_vcpu(v);
assign_np2m(v, p2m);
- p2m_unlock(p2m);
nestedp2m_unlock(d);
return p2m;
}
+struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v)
+{
+ struct p2m_domain *p2m = p2m_get_nestedp2m_locked(v);
+ p2m_unlock(p2m);
+
+ return p2m;
+}
+
struct p2m_domain *
p2m_get_p2m(struct vcpu *v)
{
@@ -363,6 +363,8 @@ struct p2m_domain {
* Updates vCPU's n2pm to match its np2m_base in VMCx12 and returns that np2m.
*/
struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v);
+/* Similar to the above except that returned p2m is still write-locked */
+struct p2m_domain *p2m_get_nestedp2m_locked(struct vcpu *v);
/* If vcpu is in host mode then behaviour matches p2m_get_hostp2m().
* If vcpu is in guest mode then behaviour matches p2m_get_nestedp2m().