@@ -2906,10 +2906,6 @@ static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
if ( nestedhvm_enabled(curr->domain) )
*msr_content |= IA32_FEATURE_CONTROL_ENABLE_VMXON_OUTSIDE_SMX;
break;
- case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_VMFUNC:
- if ( !nvmx_msr_read_intercept(msr, msr_content) )
- goto gp_fault;
- break;
case MSR_IA32_MISC_ENABLE:
rdmsrl(MSR_IA32_MISC_ENABLE, *msr_content);
/* Debug Trace Store is not supported. */
@@ -3133,8 +3129,6 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
break;
}
case MSR_IA32_FEATURE_CONTROL:
- case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
- /* None of these MSRs are writeable. */
goto gp_fault;
case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
@@ -1976,184 +1976,6 @@ int nvmx_handle_invvpid(struct cpu_user_regs *regs)
return X86EMUL_OKAY;
}
-#define __emul_value(enable1, default1) \
- ((enable1 | default1) << 32 | (default1))
-
-#define gen_vmx_msr(enable1, default1, host_value) \
- (((__emul_value(enable1, default1) & host_value) & (~0ul << 32)) | \
- ((uint32_t)(__emul_value(enable1, default1) | host_value)))
-
-/*
- * Capability reporting
- */
-int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
-{
- struct vcpu *v = current;
- struct domain *d = v->domain;
- u64 data = 0, host_data = 0;
- int r = 1;
-
- /* VMX capablity MSRs are available only when guest supports VMX. */
- if ( !nestedhvm_enabled(d) || !d->arch.cpuid->basic.vmx )
- return 0;
-
- /*
- * These MSRs are only available when flags in other MSRs are set.
- * These prerequisites are listed in the Intel 64 and IA-32
- * Architectures Software Developer’s Manual, Vol 3, Appendix A.
- */
- switch ( msr )
- {
- case MSR_IA32_VMX_PROCBASED_CTLS2:
- if ( !cpu_has_vmx_secondary_exec_control )
- return 0;
- break;
-
- case MSR_IA32_VMX_EPT_VPID_CAP:
- if ( !(cpu_has_vmx_ept || cpu_has_vmx_vpid) )
- return 0;
- break;
-
- case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
- case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
- case MSR_IA32_VMX_TRUE_EXIT_CTLS:
- case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
- if ( !(vmx_basic_msr & VMX_BASIC_DEFAULT1_ZERO) )
- return 0;
- break;
-
- case MSR_IA32_VMX_VMFUNC:
- if ( !cpu_has_vmx_vmfunc )
- return 0;
- break;
- }
-
- rdmsrl(msr, host_data);
-
- /*
- * Remove unsupport features from n1 guest capability MSR
- */
- switch (msr) {
- case MSR_IA32_VMX_BASIC:
- {
- const struct vmcs_struct *vmcs =
- map_domain_page(_mfn(PFN_DOWN(v->arch.hvm_vmx.vmcs_pa)));
-
- data = (host_data & (~0ul << 32)) |
- (vmcs->vmcs_revision_id & 0x7fffffff);
- unmap_domain_page(vmcs);
- break;
- }
- case MSR_IA32_VMX_PINBASED_CTLS:
- case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
- /* 1-settings */
- data = PIN_BASED_EXT_INTR_MASK |
- PIN_BASED_NMI_EXITING |
- PIN_BASED_PREEMPT_TIMER;
- data = gen_vmx_msr(data, VMX_PINBASED_CTLS_DEFAULT1, host_data);
- break;
- case MSR_IA32_VMX_PROCBASED_CTLS:
- case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
- {
- u32 default1_bits = VMX_PROCBASED_CTLS_DEFAULT1;
- /* 1-settings */
- data = CPU_BASED_HLT_EXITING |
- CPU_BASED_VIRTUAL_INTR_PENDING |
- CPU_BASED_CR8_LOAD_EXITING |
- CPU_BASED_CR8_STORE_EXITING |
- CPU_BASED_INVLPG_EXITING |
- CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_MONITOR_EXITING |
- CPU_BASED_MWAIT_EXITING |
- CPU_BASED_MOV_DR_EXITING |
- CPU_BASED_ACTIVATE_IO_BITMAP |
- CPU_BASED_USE_TSC_OFFSETING |
- CPU_BASED_UNCOND_IO_EXITING |
- CPU_BASED_RDTSC_EXITING |
- CPU_BASED_MONITOR_TRAP_FLAG |
- CPU_BASED_VIRTUAL_NMI_PENDING |
- CPU_BASED_ACTIVATE_MSR_BITMAP |
- CPU_BASED_PAUSE_EXITING |
- CPU_BASED_RDPMC_EXITING |
- CPU_BASED_TPR_SHADOW |
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
-
- if ( msr == MSR_IA32_VMX_TRUE_PROCBASED_CTLS )
- default1_bits &= ~(CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_INVLPG_EXITING);
-
- data = gen_vmx_msr(data, default1_bits, host_data);
- break;
- }
- case MSR_IA32_VMX_PROCBASED_CTLS2:
- /* 1-settings */
- data = SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING |
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
- SECONDARY_EXEC_ENABLE_VPID |
- SECONDARY_EXEC_UNRESTRICTED_GUEST |
- SECONDARY_EXEC_ENABLE_EPT;
- data = gen_vmx_msr(data, 0, host_data);
- break;
- case MSR_IA32_VMX_EXIT_CTLS:
- case MSR_IA32_VMX_TRUE_EXIT_CTLS:
- /* 1-settings */
- data = VM_EXIT_ACK_INTR_ON_EXIT |
- VM_EXIT_IA32E_MODE |
- VM_EXIT_SAVE_PREEMPT_TIMER |
- VM_EXIT_SAVE_GUEST_PAT |
- VM_EXIT_LOAD_HOST_PAT |
- VM_EXIT_SAVE_GUEST_EFER |
- VM_EXIT_LOAD_HOST_EFER |
- VM_EXIT_LOAD_PERF_GLOBAL_CTRL;
- data = gen_vmx_msr(data, VMX_EXIT_CTLS_DEFAULT1, host_data);
- break;
- case MSR_IA32_VMX_ENTRY_CTLS:
- case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
- /* 1-settings */
- data = VM_ENTRY_LOAD_GUEST_PAT |
- VM_ENTRY_LOAD_GUEST_EFER |
- VM_ENTRY_LOAD_PERF_GLOBAL_CTRL |
- VM_ENTRY_IA32E_MODE;
- data = gen_vmx_msr(data, VMX_ENTRY_CTLS_DEFAULT1, host_data);
- break;
-
- case MSR_IA32_VMX_VMCS_ENUM:
- /* The max index of VVMCS encoding is 0x1f. */
- data = 0x1f << 1;
- break;
- case MSR_IA32_VMX_CR0_FIXED0:
- /* PG, PE bits must be 1 in VMX operation */
- data = X86_CR0_PE | X86_CR0_PG;
- break;
- case MSR_IA32_VMX_CR0_FIXED1:
- /* allow 0-settings for all bits */
- data = 0xffffffff;
- break;
- case MSR_IA32_VMX_CR4_FIXED0:
- /* VMXE bit must be 1 in VMX operation */
- data = X86_CR4_VMXE;
- break;
- case MSR_IA32_VMX_CR4_FIXED1:
- data = hvm_cr4_guest_valid_bits(v, 0);
- break;
- case MSR_IA32_VMX_MISC:
- /* Do not support CR3-target feature now */
- data = host_data & ~VMX_MISC_CR3_TARGET;
- break;
- case MSR_IA32_VMX_EPT_VPID_CAP:
- data = nept_get_ept_vpid_cap();
- break;
- default:
- r = 0;
- break;
- }
-
- *msr_content = data;
- return r;
-}
-
/* This function uses L2_gpa to walk the P2M page table in L1. If the
* walk is successful, the translated value is returned in
* L1_gpa. The result value tells what to do next.
@@ -426,6 +426,13 @@ int init_vcpu_msr_policy(struct vcpu *v)
return 0;
}
+#define vmx_guest_rdmsr(dp, name, msr) \
+ case name: \
+ if ( !dp->msr.available ) \
+ goto gp_fault; \
+ *val = dp->msr.u.raw; \
+ break;
+
int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
{
const struct msr_domain_policy *dp = v->domain->arch.msr;
@@ -447,6 +454,27 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
_MSR_MISC_FEATURES_CPUID_FAULTING;
break;
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_BASIC, vmx_basic);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_PINBASED_CTLS, vmx_pinbased_ctls);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_PROCBASED_CTLS, vmx_procbased_ctls);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_EXIT_CTLS, vmx_exit_ctls);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_ENTRY_CTLS, vmx_entry_ctls);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_MISC, vmx_misc);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_CR4_FIXED0, vmx_cr4_fixed0);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_CR4_FIXED1, vmx_cr4_fixed1);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_VMCS_ENUM, vmx_vmcs_enum);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_PROCBASED_CTLS2, vmx_procbased_ctls2);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_EPT_VPID_CAP, vmx_ept_vpid_cap);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
+ vmx_true_pinbased_ctls);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
+ vmx_true_procbased_ctls);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_TRUE_EXIT_CTLS, vmx_true_exit_ctls);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_TRUE_ENTRY_CTLS, vmx_true_entry_ctls);
+ vmx_guest_rdmsr(dp, MSR_IA32_VMX_VMFUNC, vmx_vmfunc);
+
default:
return X86EMUL_UNHANDLEABLE;
}
@@ -457,6 +485,8 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
return X86EMUL_EXCEPTION;
}
+#undef vmx_guest_rdmsr
+
int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
{
struct domain *d = v->domain;
@@ -491,6 +521,10 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
break;
}
+ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
+ /* None of these MSRs are writeable. */
+ goto gp_fault;
+
default:
return X86EMUL_UNHANDLEABLE;
}
@@ -222,8 +222,6 @@ int nvmx_handle_vmresume(struct cpu_user_regs *regs);
int nvmx_handle_vmlaunch(struct cpu_user_regs *regs);
int nvmx_handle_invept(struct cpu_user_regs *regs);
int nvmx_handle_invvpid(struct cpu_user_regs *regs);
-int nvmx_msr_read_intercept(unsigned int msr,
- u64 *msr_content);
void nvmx_update_exec_control(struct vcpu *v, u32 value);
void nvmx_update_secondary_exec_control(struct vcpu *v,
Now that each domain has a correct view of VMX MSRs in it's per-domain MSR policy, it's possible to handle guest's RD/WRMSR with the new handlers. Do it and remove the old nvmx_msr_read_intercept() and associated bits. There is no functional change to what a guest sees in VMX MSRs. Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com> --- xen/arch/x86/hvm/vmx/vmx.c | 6 -- xen/arch/x86/hvm/vmx/vvmx.c | 178 ------------------------------------- xen/arch/x86/msr.c | 34 +++++++ xen/include/asm-x86/hvm/vmx/vvmx.h | 2 - 4 files changed, 34 insertions(+), 186 deletions(-)