diff mbox

[v4,4/7] x86/msr: add VMX MSRs into HVM_max domain policy

Message ID 20171018082722.6279-5-sergey.dyasli@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sergey Dyasli Oct. 18, 2017, 8:27 a.m. UTC
Currently, when nested virt is enabled, the set of L1 VMX features
is fixed and calculated by nvmx_msr_read_intercept() as an intersection
between the full set of Xen's supported L1 VMX features, the set of
actual H/W features and, for MSR_IA32_VMX_EPT_VPID_CAP, the set of
features that Xen uses.

Add calculate_hvm_max_vmx_policy() which will save the end result of
nvmx_msr_read_intercept() on current H/W into HVM_max domain policy.
There will be no functional change to what L1 sees in VMX MSRs. But the
actual use of HVM_max domain policy will happen later, when VMX MSRs
are handled by guest_rd/wrmsr().

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
 xen/arch/x86/msr.c | 129 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 129 insertions(+)
diff mbox

Patch

diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 7ac0fceb49..ff270befbb 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -85,6 +85,133 @@  static void __init calculate_host_policy(void)
     *dp = raw_msr_domain_policy;
 }
 
+#define vmx_host_allowed_cpy(dp, msr, field)                            \
+    do {                                                                \
+        dp->msr.allowed_1.field =                                       \
+            host_msr_domain_policy.msr.allowed_1.field;                 \
+        dp->msr.allowed_0.field =                                       \
+            host_msr_domain_policy.msr.allowed_0.field;                 \
+    } while (0)
+
+#define vmx_host_allowed_cpyb(dp, block, msr, field)                    \
+    do {                                                                \
+        dp->block.msr.allowed_1.field =                                 \
+            host_msr_domain_policy.block.msr.allowed_1.field;           \
+        dp->block.msr.allowed_0.field =                                 \
+            host_msr_domain_policy.block.msr.allowed_0.field;           \
+    } while (0)
+
+static void __init calculate_hvm_max_vmx_policy(struct msr_domain_policy *dp)
+{
+    if ( !cpu_has_vmx )
+        return;
+
+    dp->vmx.basic.raw = host_msr_domain_policy.vmx.basic.raw;
+
+    dp->vmx.pinbased_ctls.raw = ((uint64_t) VMX_PINBASED_CTLS_DEFAULT1 << 32) |
+                                            VMX_PINBASED_CTLS_DEFAULT1;
+    vmx_host_allowed_cpyb(dp, vmx, pinbased_ctls, ext_intr_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, pinbased_ctls, nmi_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, pinbased_ctls, preempt_timer);
+
+    dp->vmx.procbased_ctls.raw =
+        ((uint64_t) VMX_PROCBASED_CTLS_DEFAULT1 << 32) |
+                    VMX_PROCBASED_CTLS_DEFAULT1;
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, virtual_intr_pending);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, use_tsc_offseting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, hlt_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, invlpg_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, mwait_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, rdpmc_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, rdtsc_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, cr8_load_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, cr8_store_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, tpr_shadow);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, virtual_nmi_pending);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, mov_dr_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, uncond_io_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, activate_io_bitmap);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, monitor_trap_flag);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, activate_msr_bitmap);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, monitor_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, pause_exiting);
+    vmx_host_allowed_cpyb(dp, vmx, procbased_ctls, activate_secondary_controls);
+
+    dp->vmx.exit_ctls.raw = ((uint64_t) VMX_EXIT_CTLS_DEFAULT1 << 32) |
+                                        VMX_EXIT_CTLS_DEFAULT1;
+    vmx_host_allowed_cpyb(dp, vmx, exit_ctls, ia32e_mode);
+    vmx_host_allowed_cpyb(dp, vmx, exit_ctls, load_perf_global_ctrl);
+    vmx_host_allowed_cpyb(dp, vmx, exit_ctls, ack_intr_on_exit);
+    vmx_host_allowed_cpyb(dp, vmx, exit_ctls, save_guest_pat);
+    vmx_host_allowed_cpyb(dp, vmx, exit_ctls, load_host_pat);
+    vmx_host_allowed_cpyb(dp, vmx, exit_ctls, save_guest_efer);
+    vmx_host_allowed_cpyb(dp, vmx, exit_ctls, load_host_efer);
+    vmx_host_allowed_cpyb(dp, vmx, exit_ctls, save_preempt_timer);
+
+    dp->vmx.entry_ctls.raw = ((uint64_t) VMX_ENTRY_CTLS_DEFAULT1 << 32) |
+                                         VMX_ENTRY_CTLS_DEFAULT1;
+    vmx_host_allowed_cpyb(dp, vmx, entry_ctls, ia32e_mode);
+    vmx_host_allowed_cpyb(dp, vmx, entry_ctls, load_perf_global_ctrl);
+    vmx_host_allowed_cpyb(dp, vmx, entry_ctls, load_guest_pat);
+    vmx_host_allowed_cpyb(dp, vmx, entry_ctls, load_guest_efer);
+
+    dp->vmx.misc.raw = host_msr_domain_policy.vmx.misc.raw;
+    /* Do not support CR3-target feature now */
+    dp->vmx.misc.cr3_target = false;
+
+    /* PG, PE bits must be 1 in VMX operation */
+    dp->vmx.cr0_fixed0.allowed_0.pe = true;
+    dp->vmx.cr0_fixed0.allowed_0.pg = true;
+
+    /* allow 0-settings for all bits */
+    dp->vmx.cr0_fixed1.raw = 0xffffffff;
+
+    /* VMXE bit must be 1 in VMX operation */
+    dp->vmx.cr4_fixed0.allowed_0.vmxe = true;
+
+    /*
+     * Allowed CR4 bits will be updated during domain creation by
+     * hvm_cr4_guest_valid_bits()
+     */
+    dp->vmx.cr4_fixed1.raw = host_msr_domain_policy.vmx.cr4_fixed1.raw;
+
+    /* The max index of VVMCS encoding is 0x1f. */
+    dp->vmx.vmcs_enum.vmcs_encoding_max_idx = 0x1f;
+
+    if ( dp->vmx.procbased_ctls.allowed_1.activate_secondary_controls )
+    {
+        vmx_host_allowed_cpy(dp, vmx_procbased_ctls2, virtualize_apic_accesses);
+        vmx_host_allowed_cpy(dp, vmx_procbased_ctls2, enable_ept);
+        vmx_host_allowed_cpy(dp, vmx_procbased_ctls2, descriptor_table_exiting);
+        vmx_host_allowed_cpy(dp, vmx_procbased_ctls2, enable_vpid);
+        vmx_host_allowed_cpy(dp, vmx_procbased_ctls2, unrestricted_guest);
+
+        if ( dp->vmx_procbased_ctls2.allowed_1.enable_ept ||
+             dp->vmx_procbased_ctls2.allowed_1.enable_vpid )
+        {
+            dp->vmx_ept_vpid_cap.raw = nept_get_ept_vpid_cap();
+        }
+    }
+
+    if ( dp->vmx.basic.default1_zero )
+    {
+        dp->vmx_true_ctls.pinbased.raw = dp->vmx.pinbased_ctls.raw;
+
+        dp->vmx_true_ctls.procbased.raw = dp->vmx.procbased_ctls.raw;
+        vmx_host_allowed_cpyb(dp, vmx_true_ctls, procbased, cr3_load_exiting);
+        vmx_host_allowed_cpyb(dp, vmx_true_ctls, procbased, cr3_store_exiting);
+
+        dp->vmx_true_ctls.exit.raw = dp->vmx.exit_ctls.raw;
+
+        dp->vmx_true_ctls.entry.raw = dp->vmx.entry_ctls.raw;
+    }
+
+    /* MSR_IA32_VMX_VMFUNC is N/A */
+}
+
+#undef vmx_host_allowed_cpy
+#undef vmx_host_allowed_cpyb
+
 static void __init calculate_hvm_max_policy(void)
 {
     struct msr_domain_policy *dp = &hvm_max_msr_domain_policy;
@@ -102,6 +229,8 @@  static void __init calculate_hvm_max_policy(void)
 
     /* 0x00000140  MSR_INTEL_MISC_FEATURES_ENABLES */
     vp->misc_features_enables.available = dp->plaform_info.available;
+
+    calculate_hvm_max_vmx_policy(dp);
 }
 
 static void __init calculate_pv_max_policy(void)