diff mbox

[v3,2/6] x86/msr: add VMX MSRs into struct msr_domain_policy

Message ID 1508139738.3378.1.camel@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sergey Dyasli Oct. 16, 2017, 7:42 a.m. UTC
On Fri, 2017-10-13 at 16:16 +0100, Andrew Cooper wrote:
> On 13/10/17 13:35, Sergey Dyasli wrote:
> > @@ -210,6 +375,255 @@ struct msr_domain_policy
> >          bool available; /* This MSR is non-architectural */
> >          bool cpuid_faulting;
> >      } plaform_info;
> > +
> > +    /* 0x00000480  MSR_IA32_VMX_BASIC */
> > +    struct {
> > +        bool available;
> 
> We don't need available bits for any of these MSRs.  Their availability
> is cpuid->basic.vmx, and we don't want (let alone need) to duplicate
> information like this.

Andrew,

What do you think about the following way of checking the availability?


-- 
Thanks,
Sergey

Comments

Andrew Cooper Oct. 16, 2017, 2:01 p.m. UTC | #1
On 16/10/17 08:42, Sergey Dyasli wrote:
> On Fri, 2017-10-13 at 16:16 +0100, Andrew Cooper wrote:
>> On 13/10/17 13:35, Sergey Dyasli wrote:
>>> @@ -210,6 +375,255 @@ struct msr_domain_policy
>>>          bool available; /* This MSR is non-architectural */
>>>          bool cpuid_faulting;
>>>      } plaform_info;
>>> +
>>> +    /* 0x00000480  MSR_IA32_VMX_BASIC */
>>> +    struct {
>>> +        bool available;
>> We don't need available bits for any of these MSRs.  Their availability
>> is cpuid->basic.vmx, and we don't want (let alone need) to duplicate
>> information like this.
> Andrew,
>
> What do you think about the following way of checking the availability?

Preferably not.  You are duplicating a lot of information already
available in the guest_{rd,wr}msr(), and visually separating the
availability check from the data returned.  Worst however, is that you
risk having a mismatch between the MSR ranges which fall into this
check, and those which are calculated by it.

>
> diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
> index 2527fdd1d1..828f1bb503 100644
> --- a/xen/arch/x86/msr.c
> +++ b/xen/arch/x86/msr.c
> @@ -33,6 +33,43 @@ struct msr_domain_policy __read_mostly     raw_msr_domain_policy,
>  struct msr_vcpu_policy __read_mostly hvm_max_msr_vcpu_policy,
>                         __read_mostly  pv_max_msr_vcpu_policy;
>  
> +bool msr_vmx_available(const struct domain *d, uint32_t msr)
> +{
> +    const struct msr_domain_policy *dp = d->arch.msr;
> +    bool secondary_available;
> +
> +    if ( !nestedhvm_enabled(d) || !d->arch.cpuid->basic.vmx )
> +        return false;

For now, we do need to double up the d->arch.cpuid->basic.vmx with
nestedhvm_enabled(d), but rest assured that nestedhvm_enabled(d) will be
disappearing in due course.  (It exists only because we don't have fine
grain toolstack control of CPUID/MSR values yet).

> +
> +    secondary_available =
> +        dp->vmx_procbased_ctls.u.allowed_1.activate_secondary_controls;
> +
> +    switch (msr)
> +    {
> +    case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMCS_ENUM:
> +        return true;
> +
> +    case MSR_IA32_VMX_PROCBASED_CTLS2:
> +        return secondary_available;
> +
> +    case MSR_IA32_VMX_EPT_VPID_CAP:
> +        return ( secondary_available &&
> +                 (dp->vmx_procbased_ctls2.u.allowed_1.enable_ept ||
> +                  dp->vmx_procbased_ctls2.u.allowed_1.enable_vpid) );

This check can be made more efficient in two ways.  First, use a bitwise
rather than logical or, which allows both _ept and _vpid to be tested
with a single instruction, rather than a conditional branch.

Secondly, the CPUID infrastructure has logic to flatten dependency
trees, so we don't need to encode logic paths like this.  In practice
however, you only read into the policy for details which match the
dependency tree, so you can drop the secondary_available check here, as
you know that if secondary_available is clear,
dp->vmx_procbased_ctls2.raw will be 0.

~Andrew

> +
> +    case MSR_IA32_VMX_TRUE_PINBASED_CTLS ... MSR_IA32_VMX_TRUE_ENTRY_CTLS:
> +        return dp->vmx_basic.u.default1_zero;
> +
> +    case MSR_IA32_VMX_VMFUNC:
> +        return ( secondary_available &&
> +                 dp->vmx_procbased_ctls2.u.allowed_1.enable_vm_functions );
> +
> +    default: break;
> +    }
> +
> +    return false;
> +}
> +
>  static void __init calculate_raw_vmx_policy(struct msr_domain_policy *dp)
>  {
>      if ( !cpu_has_vmx )
>
Sergey Dyasli Oct. 18, 2017, 7:30 a.m. UTC | #2
On Mon, 2017-10-16 at 15:01 +0100, Andrew Cooper wrote:
> On 16/10/17 08:42, Sergey Dyasli wrote:

> > +

> > +    secondary_available =

> > +        dp->vmx_procbased_ctls.u.allowed_1.activate_secondary_controls;

> > +

> > +    switch (msr)

> > +    {

> > +    case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMCS_ENUM:

> > +        return true;

> > +

> > +    case MSR_IA32_VMX_PROCBASED_CTLS2:

> > +        return secondary_available;

> > +

> > +    case MSR_IA32_VMX_EPT_VPID_CAP:

> > +        return ( secondary_available &&

> > +                 (dp->vmx_procbased_ctls2.u.allowed_1.enable_ept ||

> > +                  dp->vmx_procbased_ctls2.u.allowed_1.enable_vpid) );

> 

> This check can be made more efficient in two ways.  First, use a bitwise

> rather than logical or, which allows both _ept and _vpid to be tested

> with a single instruction, rather than a conditional branch.


But it's compiler's job to optimize conditions like that.
I'm getting the following asm:

            if ( dp->vmx_procbased_ctls2.allowed_1.enable_ept ||
ffff82d08027bc3d:       48 c1 e8 20             shr    $0x20,%rax
ffff82d08027bc41:       a8 22                   test   $0x22,%al
ffff82d08027bc43:       74 0d                   je     ffff82d08027bc52 <recalculate_domain_vmx_msr_policy+0x196>

And "test   $0x22" is exactly the test for "enable_ept || enable_vpid"
with a single instruction.

-- 
Thanks,
Sergey
diff mbox

Patch

diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 2527fdd1d1..828f1bb503 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -33,6 +33,43 @@  struct msr_domain_policy __read_mostly     raw_msr_domain_policy,
 struct msr_vcpu_policy __read_mostly hvm_max_msr_vcpu_policy,
                        __read_mostly  pv_max_msr_vcpu_policy;
 
+bool msr_vmx_available(const struct domain *d, uint32_t msr)
+{
+    const struct msr_domain_policy *dp = d->arch.msr;
+    bool secondary_available;
+
+    if ( !nestedhvm_enabled(d) || !d->arch.cpuid->basic.vmx )
+        return false;
+
+    secondary_available =
+        dp->vmx_procbased_ctls.u.allowed_1.activate_secondary_controls;
+
+    switch (msr)
+    {
+    case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMCS_ENUM:
+        return true;
+
+    case MSR_IA32_VMX_PROCBASED_CTLS2:
+        return secondary_available;
+
+    case MSR_IA32_VMX_EPT_VPID_CAP:
+        return ( secondary_available &&
+                 (dp->vmx_procbased_ctls2.u.allowed_1.enable_ept ||
+                  dp->vmx_procbased_ctls2.u.allowed_1.enable_vpid) );
+
+    case MSR_IA32_VMX_TRUE_PINBASED_CTLS ... MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+        return dp->vmx_basic.u.default1_zero;
+
+    case MSR_IA32_VMX_VMFUNC:
+        return ( secondary_available &&
+                 dp->vmx_procbased_ctls2.u.allowed_1.enable_vm_functions );
+
+    default: break;
+    }
+
+    return false;
+}
+
 static void __init calculate_raw_vmx_policy(struct msr_domain_policy *dp)
 {
     if ( !cpu_has_vmx )