diff mbox

[v2,21/30] x86/pv: Provide custom cpumasks for PV domains

Message ID 1454679743-18133-22-git-send-email-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Cooper Feb. 5, 2016, 1:42 p.m. UTC
And use them in preference to cpumask_defaults on context switch.  HVM domains
must not be masked (to avoid interfering with cpuid calls within the guest),
so always lazily context switch to the host default.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>

v2:
 * s/cpumasks/cpuidmasks/
 * Use structure assignment
 * Fix error path in arch_domain_create()
---
 xen/arch/x86/cpu/amd.c       |  4 +++-
 xen/arch/x86/cpu/intel.c     |  5 ++++-
 xen/arch/x86/domain.c        | 11 +++++++++++
 xen/include/asm-x86/domain.h |  2 ++
 4 files changed, 20 insertions(+), 2 deletions(-)

Comments

Jan Beulich Feb. 17, 2016, 8:13 a.m. UTC | #1
>>> On 05.02.16 at 14:42, <andrew.cooper3@citrix.com> wrote:
> --- a/xen/arch/x86/cpu/amd.c
> +++ b/xen/arch/x86/cpu/amd.c
> @@ -208,7 +208,9 @@ static void __init noinline probe_masking_msrs(void)
>  static void amd_ctxt_switch_levelling(const struct domain *nextd)
>  {
>  	struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
> -	const struct cpuidmasks *masks = &cpuidmask_defaults;
> +	const struct cpuidmasks *masks =
> +            (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
> +            ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;

Mixing tabs and spaces for indentation.

> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -574,6 +574,11 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
>              goto fail;
>          clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
>  
> +        d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
> +        if ( !d->arch.pv_domain.cpuidmasks )
> +            goto fail;
> +        *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;

Along the lines of not masking features for the hypervisor's own use
(see the respective comment on the earlier patch) I think this patch,
here or in domain_build.c, should except Dom0 from having the
default masking applied. This shouldn't, however, extend to CPUID
faulting. (Perhaps this rather belongs here so that the non-Dom0
hardware domain case can also be taken care of.)

Jan
Andrew Cooper Feb. 17, 2016, 11:03 a.m. UTC | #2
On 17/02/16 08:13, Jan Beulich wrote:
>>>> On 05.02.16 at 14:42, <andrew.cooper3@citrix.com> wrote:
>> --- a/xen/arch/x86/cpu/amd.c
>> +++ b/xen/arch/x86/cpu/amd.c
>> @@ -208,7 +208,9 @@ static void __init noinline probe_masking_msrs(void)
>>  static void amd_ctxt_switch_levelling(const struct domain *nextd)
>>  {
>>  	struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
>> -	const struct cpuidmasks *masks = &cpuidmask_defaults;
>> +	const struct cpuidmasks *masks =
>> +            (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
>> +            ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
> Mixing tabs and spaces for indentation.
>
>> --- a/xen/arch/x86/domain.c
>> +++ b/xen/arch/x86/domain.c
>> @@ -574,6 +574,11 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
>>              goto fail;
>>          clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
>>  
>> +        d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
>> +        if ( !d->arch.pv_domain.cpuidmasks )
>> +            goto fail;
>> +        *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
> Along the lines of not masking features for the hypervisor's own use
> (see the respective comment on the earlier patch) I think this patch,
> here or in domain_build.c, should except Dom0 from having the
> default masking applied. This shouldn't, however, extend to CPUID
> faulting. (Perhaps this rather belongs here so that the non-Dom0
> hardware domain case can also be taken care of.)

Very specifically not.  It is wrong to special case Dom0 and the
hardware domain, as their cpuid values should relevent to their VM, not
the host.

The default cpuid policy level with real hardware, so its no practical
change at this point.

~Andrew
Jan Beulich Feb. 17, 2016, 11:14 a.m. UTC | #3
>>> On 17.02.16 at 12:03, <andrew.cooper3@citrix.com> wrote:
> On 17/02/16 08:13, Jan Beulich wrote:
>>>>> On 05.02.16 at 14:42, <andrew.cooper3@citrix.com> wrote:
>>> --- a/xen/arch/x86/cpu/amd.c
>>> +++ b/xen/arch/x86/cpu/amd.c
>>> @@ -208,7 +208,9 @@ static void __init noinline probe_masking_msrs(void)
>>>  static void amd_ctxt_switch_levelling(const struct domain *nextd)
>>>  {
>>>  	struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
>>> -	const struct cpuidmasks *masks = &cpuidmask_defaults;
>>> +	const struct cpuidmasks *masks =
>>> +            (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
>>> +            ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
>> Mixing tabs and spaces for indentation.
>>
>>> --- a/xen/arch/x86/domain.c
>>> +++ b/xen/arch/x86/domain.c
>>> @@ -574,6 +574,11 @@ int arch_domain_create(struct domain *d, unsigned int 
> domcr_flags,
>>>              goto fail;
>>>          clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
>>>  
>>> +        d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
>>> +        if ( !d->arch.pv_domain.cpuidmasks )
>>> +            goto fail;
>>> +        *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
>> Along the lines of not masking features for the hypervisor's own use
>> (see the respective comment on the earlier patch) I think this patch,
>> here or in domain_build.c, should except Dom0 from having the
>> default masking applied. This shouldn't, however, extend to CPUID
>> faulting. (Perhaps this rather belongs here so that the non-Dom0
>> hardware domain case can also be taken care of.)
> 
> Very specifically not.  It is wrong to special case Dom0 and the
> hardware domain, as their cpuid values should relevent to their VM, not
> the host.

I can't see how this second half of the sentence is a reason for
not special casing Dom0.

> The default cpuid policy level with real hardware, so its no practical
> change at this point.

As long as no-one uses the then deprecated command line options.

Jan
Andrew Cooper Feb. 18, 2016, 12:48 p.m. UTC | #4
On 17/02/16 11:14, Jan Beulich wrote:
>>>> On 17.02.16 at 12:03, <andrew.cooper3@citrix.com> wrote:
>> On 17/02/16 08:13, Jan Beulich wrote:
>>>>>> On 05.02.16 at 14:42, <andrew.cooper3@citrix.com> wrote:
>>>> --- a/xen/arch/x86/cpu/amd.c
>>>> +++ b/xen/arch/x86/cpu/amd.c
>>>> @@ -208,7 +208,9 @@ static void __init noinline probe_masking_msrs(void)
>>>>  static void amd_ctxt_switch_levelling(const struct domain *nextd)
>>>>  {
>>>>  	struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
>>>> -	const struct cpuidmasks *masks = &cpuidmask_defaults;
>>>> +	const struct cpuidmasks *masks =
>>>> +            (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
>>>> +            ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
>>> Mixing tabs and spaces for indentation.
>>>
>>>> --- a/xen/arch/x86/domain.c
>>>> +++ b/xen/arch/x86/domain.c
>>>> @@ -574,6 +574,11 @@ int arch_domain_create(struct domain *d, unsigned int 
>> domcr_flags,
>>>>              goto fail;
>>>>          clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
>>>>  
>>>> +        d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
>>>> +        if ( !d->arch.pv_domain.cpuidmasks )
>>>> +            goto fail;
>>>> +        *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
>>> Along the lines of not masking features for the hypervisor's own use
>>> (see the respective comment on the earlier patch) I think this patch,
>>> here or in domain_build.c, should except Dom0 from having the
>>> default masking applied. This shouldn't, however, extend to CPUID
>>> faulting. (Perhaps this rather belongs here so that the non-Dom0
>>> hardware domain case can also be taken care of.)
>> Very specifically not.  It is wrong to special case Dom0 and the
>> hardware domain, as their cpuid values should relevent to their VM, not
>> the host.
> I can't see how this second half of the sentence is a reason for
> not special casing Dom0.

Dom0 is just a VM which happens to have all the hardware by default.

It has the same requirements as all other VMs when it comes to cpuid;
most notably that it shouldn't see features which it can't use.  The
problem comes far more obvious with an HVMLite dom0, running an
almost-native kernel.

~Andrew
diff mbox

Patch

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 9d162bc..deb98ea 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -208,7 +208,9 @@  static void __init noinline probe_masking_msrs(void)
 static void amd_ctxt_switch_levelling(const struct domain *nextd)
 {
 	struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
-	const struct cpuidmasks *masks = &cpuidmask_defaults;
+	const struct cpuidmasks *masks =
+            (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
+            ? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
 
 #define LAZY(cap, msr, field)						\
 	({								\
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 95d44dd..b403af4 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -151,13 +151,16 @@  static void __init probe_masking_msrs(void)
 static void intel_ctxt_switch_levelling(const struct domain *nextd)
 {
 	struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
-	const struct cpuidmasks *masks = &cpuidmask_defaults;
+	const struct cpuidmasks *masks;
 
 	if (cpu_has_cpuid_faulting) {
 		set_cpuid_faulting(nextd && is_pv_domain(nextd));
 		return;
 	}
 
+	masks = (nextd && is_pv_domain(nextd) && nextd->arch.pv_domain.cpuidmasks)
+		? nextd->arch.pv_domain.cpuidmasks : &cpuidmask_defaults;
+
 #define LAZY(msr, field)						\
 	({								\
 		if (msr && (these_masks->field != masks->field))	\
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index dbce90f..d7cd4d2 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -574,6 +574,11 @@  int arch_domain_create(struct domain *d, unsigned int domcr_flags,
             goto fail;
         clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
 
+        d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
+        if ( !d->arch.pv_domain.cpuidmasks )
+            goto fail;
+        *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
+
         rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
                                       GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
                                       NULL, NULL);
@@ -663,7 +668,10 @@  int arch_domain_create(struct domain *d, unsigned int domcr_flags,
         paging_final_teardown(d);
     free_perdomain_mappings(d);
     if ( is_pv_domain(d) )
+    {
+        xfree(d->arch.pv_domain.cpuidmasks);
         free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+    }
     psr_domain_free(d);
     return rc;
 }
@@ -683,7 +691,10 @@  void arch_domain_destroy(struct domain *d)
 
     free_perdomain_mappings(d);
     if ( is_pv_domain(d) )
+    {
         free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+        xfree(d->arch.pv_domain.cpuidmasks);
+    }
 
     free_xenheap_page(d->shared_info);
     cleanup_domain_irq_mapping(d);
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 4072e27..c464932 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -252,6 +252,8 @@  struct pv_domain
 
     /* map_domain_page() mapping cache. */
     struct mapcache_domain mapcache;
+
+    struct cpuidmasks *cpuidmasks;
 };
 
 struct monitor_write_data {