@@ -559,6 +559,10 @@ int arch_vcpu_create(struct vcpu *v)
v->arch.flags = TF_kernel_mode;
+ rc = create_perdomain_mapping(v, PERDOMAIN_VIRT_START, 0, false);
+ if ( rc )
+ return rc;
+
rc = mapcache_vcpu_init(v);
if ( rc )
return rc;
@@ -607,6 +611,7 @@ int arch_vcpu_create(struct vcpu *v)
return rc;
fail:
+ free_perdomain_mappings(v);
paging_vcpu_teardown(v);
vcpu_destroy_fpu(v);
xfree(v->arch.msrs);
@@ -629,6 +634,8 @@ void arch_vcpu_destroy(struct vcpu *v)
hvm_vcpu_destroy(v);
else
pv_vcpu_destroy(v);
+
+ free_perdomain_mappings(v);
}
int arch_sanitise_domain_config(struct xen_domctl_createdomain *config)
@@ -870,8 +877,7 @@ int arch_domain_create(struct domain *d,
}
else if ( is_pv_domain(d) )
{
- if ( (rc = mapcache_domain_init(d)) != 0 )
- goto fail;
+ mapcache_domain_init(d);
if ( (rc = pv_domain_initialise(d)) != 0 )
goto fail;
@@ -909,7 +915,6 @@ int arch_domain_create(struct domain *d,
XFREE(d->arch.cpu_policy);
if ( paging_initialised )
paging_final_teardown(d);
- free_perdomain_mappings(d);
return rc;
}
@@ -935,7 +940,6 @@ void arch_domain_destroy(struct domain *d)
if ( is_pv_domain(d) )
pv_domain_destroy(d);
- free_perdomain_mappings(d);
free_xenheap_page(d->shared_info);
cleanup_domain_irq_mapping(d);
@@ -231,7 +231,7 @@ void unmap_domain_page(const void *ptr)
local_irq_restore(flags);
}
-int mapcache_domain_init(struct domain *d)
+void mapcache_domain_init(struct domain *d)
{
struct mapcache_domain *dcache = &d->arch.pv.mapcache;
unsigned int bitmap_pages;
@@ -240,7 +240,7 @@ int mapcache_domain_init(struct domain *d)
#ifdef NDEBUG
if ( !mem_hotplug && max_page <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
- return 0;
+ return;
#endif
BUILD_BUG_ON(MAPCACHE_VIRT_END + PAGE_SIZE * (3 +
@@ -252,9 +252,6 @@ int mapcache_domain_init(struct domain *d)
(bitmap_pages + 1) * PAGE_SIZE / sizeof(long);
spin_lock_init(&dcache->lock);
-
- return create_perdomain_mapping(d, (unsigned long)dcache->inuse,
- 2 * bitmap_pages + 1, false);
}
int mapcache_vcpu_init(struct vcpu *v)
@@ -271,14 +268,14 @@ int mapcache_vcpu_init(struct vcpu *v)
if ( ents > dcache->entries )
{
/* Populate page tables. */
- int rc = create_perdomain_mapping(d, MAPCACHE_VIRT_START, ents, false);
+ int rc = create_perdomain_mapping(v, MAPCACHE_VIRT_START, ents, false);
/* Populate bit maps. */
if ( !rc )
- rc = create_perdomain_mapping(d, (unsigned long)dcache->inuse,
+ rc = create_perdomain_mapping(v, (unsigned long)dcache->inuse,
nr, true);
if ( !rc )
- rc = create_perdomain_mapping(d, (unsigned long)dcache->garbage,
+ rc = create_perdomain_mapping(v, (unsigned long)dcache->garbage,
nr, true);
if ( rc )
@@ -601,10 +601,6 @@ int hvm_domain_initialise(struct domain *d,
INIT_LIST_HEAD(&d->arch.hvm.mmcfg_regions);
INIT_LIST_HEAD(&d->arch.hvm.msix_tables);
- rc = create_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0, false);
- if ( rc )
- goto fail;
-
hvm_init_cacheattr_region_list(d);
rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
@@ -708,7 +704,6 @@ int hvm_domain_initialise(struct domain *d,
XFREE(d->arch.hvm.irq);
fail0:
hvm_destroy_cacheattr_region_list(d);
- fail:
hvm_domain_relinquish_resources(d);
XFREE(d->arch.hvm.io_handler);
XFREE(d->arch.hvm.pl_time);
@@ -73,7 +73,7 @@ struct mapcache_domain {
unsigned long *garbage;
};
-int mapcache_domain_init(struct domain *d);
+void mapcache_domain_init(struct domain *d);
int mapcache_vcpu_init(struct vcpu *v);
void mapcache_override_current(struct vcpu *v);
@@ -600,13 +600,13 @@ int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
#define NIL(type) ((type *)-sizeof(type))
#define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr))))
-int create_perdomain_mapping(struct domain *d, unsigned long va,
+int create_perdomain_mapping(struct vcpu *v, unsigned long va,
unsigned int nr, bool populate);
void populate_perdomain_mapping(const struct vcpu *v, unsigned long va,
mfn_t *mfn, unsigned long nr);
void destroy_perdomain_mapping(const struct vcpu *v, unsigned long va,
unsigned int nr);
-void free_perdomain_mappings(struct domain *d);
+void free_perdomain_mappings(struct vcpu *v);
void __iomem *ioremap_wc(paddr_t pa, size_t len);
@@ -6300,9 +6300,10 @@ static bool perdomain_l1e_needs_freeing(l1_pgentry_t l1e)
(_PAGE_PRESENT | _PAGE_AVAIL0);
}
-int create_perdomain_mapping(struct domain *d, unsigned long va,
+int create_perdomain_mapping(struct vcpu *v, unsigned long va,
unsigned int nr, bool populate)
{
+ struct domain *d = v->domain;
struct page_info *pg;
l3_pgentry_t *l3tab;
l2_pgentry_t *l2tab;
@@ -6560,8 +6561,9 @@ void destroy_perdomain_mapping(const struct vcpu *v, unsigned long va,
unmap_domain_page(l3tab);
}
-void free_perdomain_mappings(struct domain *d)
+void free_perdomain_mappings(struct vcpu *v)
{
+ struct domain *d = v->domain;
l3_pgentry_t *l3tab;
unsigned int i;
@@ -277,7 +277,7 @@ int switch_compat(struct domain *d)
static int pv_create_gdt_ldt_l1tab(struct vcpu *v)
{
- return create_perdomain_mapping(v->domain, GDT_VIRT_START(v),
+ return create_perdomain_mapping(v, GDT_VIRT_START(v),
1U << GDT_LDT_VCPU_SHIFT, false);
}
@@ -730,7 +730,7 @@ void __init zap_low_mappings(void)
int setup_compat_arg_xlat(struct vcpu *v)
{
- return create_perdomain_mapping(v->domain, ARG_XLAT_START(v),
+ return create_perdomain_mapping(v, ARG_XLAT_START(v),
PFN_UP(COMPAT_ARG_XLAT_SIZE), true);
}
In preparation for the per-domain area being per-vCPU. This requires moving some of the {create,destroy}_perdomain_mapping() calls to the domain initialization and tear down paths into vCPU initialization and tear down. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- xen/arch/x86/domain.c | 12 ++++++++---- xen/arch/x86/domain_page.c | 13 +++++-------- xen/arch/x86/hvm/hvm.c | 5 ----- xen/arch/x86/include/asm/domain.h | 2 +- xen/arch/x86/include/asm/mm.h | 4 ++-- xen/arch/x86/mm.c | 6 ++++-- xen/arch/x86/pv/domain.c | 2 +- xen/arch/x86/x86_64/mm.c | 2 +- 8 files changed, 22 insertions(+), 24 deletions(-)