@@ -173,8 +173,7 @@ static int __init pvh_populate_memory_ra
continue;
}
- rc = guest_physmap_add_page(d, _gfn(start), page_to_mfn(page),
- order);
+ rc = p2m_add_page(d, _gfn(start), page_to_mfn(page), order, p2m_ram_rw);
if ( rc != 0 )
{
printk("Failed to populate memory: [%#lx,%#lx): %d\n",
@@ -39,9 +39,8 @@ int create_grant_p2m_mapping(uint64_t ad
p2mt = p2m_grant_map_ro;
else
p2mt = p2m_grant_map_rw;
- rc = guest_physmap_add_entry(current->domain,
- _gfn(addr >> PAGE_SHIFT),
- frame, PAGE_ORDER_4K, p2mt);
+ rc = p2m_add_page(current->domain, _gfn(addr >> PAGE_SHIFT),
+ frame, PAGE_ORDER_4K, p2mt);
if ( rc )
return GNTST_general_error;
else
@@ -68,7 +67,7 @@ int replace_grant_p2m_mapping(uint64_t a
type, mfn_x(old_mfn), mfn_x(frame));
return GNTST_general_error;
}
- if ( guest_physmap_remove_page(d, _gfn(gfn), frame, PAGE_ORDER_4K) )
+ if ( p2m_remove_page(d, _gfn(gfn), frame, PAGE_ORDER_4K) )
{
put_gfn(d, gfn);
return GNTST_general_error;
@@ -188,8 +188,7 @@ static void hvm_remove_ioreq_gfn(struct
if ( gfn_eq(iorp->gfn, INVALID_GFN) )
return;
- if ( guest_physmap_remove_page(d, iorp->gfn,
- page_to_mfn(iorp->page), 0) )
+ if ( p2m_remove_page(d, iorp->gfn, page_to_mfn(iorp->page), 0) )
domain_crash(d);
clear_page(iorp->va);
}
@@ -205,8 +204,7 @@ static int hvm_add_ioreq_gfn(struct iore
clear_page(iorp->va);
- rc = guest_physmap_add_page(d, iorp->gfn,
- page_to_mfn(iorp->page), 0);
+ rc = p2m_add_page(d, iorp->gfn, page_to_mfn(iorp->page), 0, p2m_ram_rw);
if ( rc == 0 )
paging_mark_pfn_dirty(d, _pfn(gfn_x(iorp->gfn)));
@@ -850,15 +850,17 @@ p2m_remove_entry(struct p2m_domain *p2m,
}
int
-guest_physmap_remove_page(struct domain *d, gfn_t gfn,
- mfn_t mfn, unsigned int page_order)
+p2m_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn,
+ unsigned int page_order)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int rc;
- /* IOMMU for PV guests is handled in get_page_type() and put_page(). */
if ( !paging_mode_translate(d) )
- return 0;
+ {
+ ASSERT_UNREACHABLE();
+ return -EPERM;
+ }
gfn_lock(p2m, gfn, page_order);
rc = p2m_remove_entry(p2m, gfn, mfn, page_order);
@@ -867,6 +869,17 @@ guest_physmap_remove_page(struct domain
return rc;
}
+int
+guest_physmap_remove_page(struct domain *d, gfn_t gfn,
+ mfn_t mfn, unsigned int page_order)
+{
+ /* IOMMU for PV guests is handled in get_page_type() and put_page(). */
+ if ( !paging_mode_translate(d) )
+ return 0;
+
+ return p2m_remove_page(d, gfn, mfn, page_order);
+}
+
#endif /* CONFIG_HVM */
int
@@ -905,14 +918,14 @@ guest_physmap_add_page(struct domain *d,
return 0;
}
- return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
+ return p2m_add_page(d, gfn, mfn, page_order, p2m_ram_rw);
}
#ifdef CONFIG_HVM
int
-guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
- unsigned int page_order, p2m_type_t t)
+p2m_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
+ unsigned int page_order, p2m_type_t t)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
unsigned long i;
@@ -2724,7 +2737,7 @@ static int p2m_add_foreign(struct domain
{
if ( is_special_page(mfn_to_page(prev_mfn)) )
/* Special pages are simply unhooked from this phys slot */
- rc = guest_physmap_remove_page(tdom, _gfn(gpfn), prev_mfn, 0);
+ rc = p2m_remove_page(tdom, _gfn(gpfn), prev_mfn, 0);
else
/* Normal domain memory is freed, to avoid leaking memory. */
rc = guest_remove_page(tdom, gpfn);
@@ -2732,7 +2745,7 @@ static int p2m_add_foreign(struct domain
goto put_both;
}
/*
- * Create the new mapping. Can't use guest_physmap_add_page() because it
+ * Create the new mapping. Can't use p2m_add_page() because it
* will update the m2p table which will result in mfn -> gpfn of dom0
* and not fgfn of domU.
*/
@@ -2846,7 +2859,7 @@ int xenmem_add_to_physmap_one(
{
if ( is_special_page(mfn_to_page(prev_mfn)) )
/* Special pages are simply unhooked from this phys slot. */
- rc = guest_physmap_remove_page(d, gpfn, prev_mfn, PAGE_ORDER_4K);
+ rc = p2m_remove_page(d, gpfn, prev_mfn, PAGE_ORDER_4K);
else if ( !mfn_eq(mfn, prev_mfn) )
/* Normal domain memory is freed, to avoid leaking memory. */
rc = guest_remove_page(d, gfn_x(gpfn));
@@ -2854,11 +2867,11 @@ int xenmem_add_to_physmap_one(
/* Unmap from old location, if any. */
if ( !rc && old_gpfn != INVALID_M2P_ENTRY && !gfn_eq(_gfn(old_gpfn), gpfn) )
- rc = guest_physmap_remove_page(d, _gfn(old_gpfn), mfn, PAGE_ORDER_4K);
+ rc = p2m_remove_page(d, _gfn(old_gpfn), mfn, PAGE_ORDER_4K);
/* Map at new location. */
if ( !rc )
- rc = guest_physmap_add_page(d, gpfn, mfn, PAGE_ORDER_4K);
+ rc = p2m_add_page(d, gpfn, mfn, PAGE_ORDER_4K, p2m_ram_rw);
put_all:
put_gfn(d, gfn_x(gpfn));
@@ -577,10 +577,11 @@ int p2m_alloc_table(struct p2m_domain *p
void p2m_teardown(struct p2m_domain *p2m);
void p2m_final_teardown(struct domain *d);
-/* Add a page to a domain's p2m table */
-int guest_physmap_add_entry(struct domain *d, gfn_t gfn,
- mfn_t mfn, unsigned int page_order,
- p2m_type_t t);
+/* Add/remove a page to/from a domain's p2m table. */
+int p2m_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
+ unsigned int page_order, p2m_type_t t);
+int p2m_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn,
+ unsigned int page_order);
/* Untyped version for RAM only, for compatibility and PV. */
int __must_check guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,