@@ -93,7 +93,9 @@ static int p2m_initialise(struct domain
int ret = 0;
mm_rwlock_init(&p2m->lock);
+#ifdef CONFIG_HVM
INIT_PAGE_LIST_HEAD(&p2m->pages);
+#endif
p2m->domain = d;
p2m->default_access = p2m_access_rwx;
@@ -627,6 +629,7 @@ struct page_info *p2m_get_page_from_gfn(
}
#ifdef CONFIG_HVM
+
/* Returns: 0 for success, -errno for failure */
int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn,
unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
@@ -666,7 +669,6 @@ int p2m_set_entry(struct p2m_domain *p2m
return rc;
}
-#endif
mfn_t p2m_alloc_ptp(struct p2m_domain *p2m, unsigned int level)
{
@@ -745,6 +747,8 @@ int p2m_alloc_table(struct p2m_domain *p
return 0;
}
+#endif /* CONFIG_HVM */
+
/*
* hvm fixme: when adding support for pvh non-hardware domains, this path must
* cleanup any foreign p2m types (release refcnts on them).
@@ -753,7 +757,9 @@ void p2m_teardown(struct p2m_domain *p2m
/* Return all the p2m pages to Xen.
* We know we don't have any extra mappings to these pages */
{
+#ifdef CONFIG_HVM
struct page_info *pg;
+#endif
struct domain *d;
if (p2m == NULL)
@@ -762,11 +768,16 @@ void p2m_teardown(struct p2m_domain *p2m
d = p2m->domain;
p2m_lock(p2m);
+
ASSERT(atomic_read(&d->shr_pages) == 0);
+
+#ifdef CONFIG_HVM
p2m->phys_table = pagetable_null();
while ( (pg = page_list_remove_head(&p2m->pages)) )
d->arch.paging.free_page(d, pg);
+#endif
+
p2m_unlock(p2m);
}
@@ -2700,8 +2700,10 @@ int shadow_enable(struct domain *d, u32
out_locked:
paging_unlock(d);
out_unlocked:
+#ifdef CONFIG_HVM
if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
p2m_teardown(p2m);
+#endif
if ( rv != 0 && pg != NULL )
{
pg->count_info &= ~PGC_count_mask;
@@ -370,6 +370,7 @@ static uint64_t domain_pgd_maddr(struct
ASSERT(spin_is_locked(&hd->arch.mapping_lock));
+#ifdef CONFIG_HVM
if ( iommu_use_hap_pt(d) )
{
pagetable_t pgt = p2m_get_pagetable(p2m_get_hostp2m(d));
@@ -377,6 +378,7 @@ static uint64_t domain_pgd_maddr(struct
pgd_maddr = pagetable_get_paddr(pgt);
}
else
+#endif
{
if ( !hd->arch.vtd.pgd_maddr )
{
@@ -208,9 +208,6 @@ struct p2m_domain {
/* Lock that protects updates to the p2m */
mm_rwlock_t lock;
- /* Shadow translated domain: p2m mapping */
- pagetable_t phys_table;
-
/*
* Same as a domain's dirty_cpumask but limited to
* this p2m and those physical cpus whose vcpu's are in
@@ -229,9 +226,6 @@ struct p2m_domain {
*/
p2m_access_t default_access;
- /* Pages used to construct the p2m */
- struct page_list_head pages;
-
/* Host p2m: Log-dirty ranges registered for the domain. */
struct rangeset *logdirty_ranges;
@@ -239,6 +233,12 @@ struct p2m_domain {
bool global_logdirty;
#ifdef CONFIG_HVM
+ /* Translated domain: p2m mapping */
+ pagetable_t phys_table;
+
+ /* Pages used to construct the p2m */
+ struct page_list_head pages;
+
/* Alternate p2m: count of vcpu's currently using this p2m. */
atomic_t active_vcpus;