@@ -206,7 +206,9 @@ static void __init noinline probe_masking_msrs(void)
static void amd_ctxt_switch_levelling(const struct domain *nextd)
{
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
- const struct cpuidmasks *masks = &cpuidmask_defaults;
+ const struct cpuidmasks *masks =
+ (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
+ ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
#define LAZY(cap, msr, field) \
({ \
@@ -154,13 +154,16 @@ static void __init probe_masking_msrs(void)
static void intel_ctxt_switch_levelling(const struct domain *nextd)
{
struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
- const struct cpuidmasks *masks = &cpuidmask_defaults;
+ const struct cpuidmasks *masks;
if (cpu_has_cpuid_faulting) {
set_cpuid_faulting(nextd && is_pv_domain(nextd));
return;
}
+ masks = (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
+ ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
+
#define LAZY(msr, field) \
({ \
if (unlikely(these_masks->field != masks->field) && \
@@ -578,6 +578,14 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
goto fail;
clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
+ if ( levelling_caps & ~LCAP_faulting )
+ {
+ d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
+ if ( !d->arch.pv_domain.cpuidmasks )
+ goto fail;
+ *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
+ }
+
rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
NULL, NULL);
@@ -673,7 +681,10 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
paging_final_teardown(d);
free_perdomain_mappings(d);
if ( is_pv_domain(d) )
+ {
+ xfree(d->arch.pv_domain.cpuidmasks);
free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+ }
psr_domain_free(d);
return rc;
}
@@ -693,7 +704,10 @@ void arch_domain_destroy(struct domain *d)
free_perdomain_mappings(d);
if ( is_pv_domain(d) )
+ {
free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+ xfree(d->arch.pv_domain.cpuidmasks);
+ }
free_xenheap_page(d->shared_info);
cleanup_domain_irq_mapping(d);
@@ -252,6 +252,8 @@ struct pv_domain
/* map_domain_page() mapping cache. */
struct mapcache_domain mapcache;
+
+ struct cpuidmasks *cpuidmasks;
};
struct monitor_write_data {
And use them in preference to cpumask_defaults on context switch. HVM domains must not be masked (to avoid interfering with cpuid calls within the guest), so always lazily context switch to the host default. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> v3: * Indentation fixes. * Only allocate PV cpuidmasks if the host is has cpumasks to use. v2: * s/cpumasks/cpuidmasks/ * Use structure assignment * Fix error path in arch_domain_create() --- xen/arch/x86/cpu/amd.c | 4 +++- xen/arch/x86/cpu/intel.c | 5 ++++- xen/arch/x86/domain.c | 14 ++++++++++++++ xen/include/asm-x86/domain.h | 2 ++ 4 files changed, 23 insertions(+), 2 deletions(-)