@@ -3465,7 +3465,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
{
struct vcpu *v = current;
struct domain *d = v->domain;
- int ret = X86EMUL_OKAY;
+ int ret;
HVMTRACE_3D(MSR_WRITE, msr,
(uint32_t)msr_content, (uint32_t)(msr_content >> 32));
@@ -3483,6 +3483,11 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
return X86EMUL_OKAY;
}
+ if ( (ret = guest_wrmsr(v, msr, msr_content)) != X86EMUL_UNHANDLEABLE )
+ return ret;
+ else
+ ret = X86EMUL_OKAY;
+
switch ( msr )
{
unsigned int index;
@@ -3116,29 +3116,6 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
goto gp_fault;
break;
- case MSR_INTEL_PLATFORM_INFO:
- if ( msr_content ||
- rdmsr_safe(MSR_INTEL_PLATFORM_INFO, msr_content) )
- goto gp_fault;
- break;
-
- case MSR_INTEL_MISC_FEATURES_ENABLES:
- {
- struct msr_vcpu_policy *vp = v->arch.msr;
- bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting;
-
- if ( msr_content & ~MSR_MISC_FEATURES_CPUID_FAULTING )
- goto gp_fault;
-
- vp->misc_features_enables.cpuid_faulting =
- msr_content & MSR_MISC_FEATURES_CPUID_FAULTING;
-
- if ( cpu_has_cpuid_faulting &&
- (old_cpuid_faulting ^ vp->misc_features_enables.cpuid_faulting) )
- ctxt_switch_levelling(v);
- break;
- }
-
default:
if ( passive_domain_do_wrmsr(msr, msr_content) )
return X86EMUL_OKAY;
@@ -148,6 +148,50 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
return X86EMUL_EXCEPTION;
}
+int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
+{
+ struct domain *d = v->domain;
+ struct msr_domain_policy *dp = d->arch.msr;
+ struct msr_vcpu_policy *vp = v->arch.msr;
+
+ switch ( msr )
+ {
+ case MSR_INTEL_PLATFORM_INFO:
+ goto gp_fault;
+
+ case MSR_INTEL_MISC_FEATURES_ENABLES:
+ {
+ uint64_t rsvd = ~0ull;
+ bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting;
+
+ if ( !vp->misc_features_enables.available )
+ goto gp_fault;
+
+ if ( dp->plaform_info.cpuid_faulting )
+ rsvd &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
+
+ if ( val & rsvd )
+ goto gp_fault;
+
+ vp->misc_features_enables.cpuid_faulting =
+ val & MSR_MISC_FEATURES_CPUID_FAULTING;
+
+ if ( is_hvm_domain(d) && cpu_has_cpuid_faulting &&
+ (old_cpuid_faulting ^ vp->misc_features_enables.cpuid_faulting) )
+ ctxt_switch_levelling(v);
+ break;
+ }
+
+ default:
+ return X86EMUL_UNHANDLEABLE;
+ }
+
+ return X86EMUL_OKAY;
+
+ gp_fault:
+ return X86EMUL_EXCEPTION;
+}
+
/*
* Local variables:
* mode: C
@@ -983,6 +983,10 @@ static int priv_op_write_msr(unsigned int reg, uint64_t val,
struct vcpu *curr = current;
const struct domain *currd = curr->domain;
bool vpmu_msr = false;
+ int ret;
+
+ if ( (ret = guest_wrmsr(curr, reg, val)) != X86EMUL_UNHANDLEABLE )
+ return ret;
switch ( reg )
{
@@ -1126,24 +1130,6 @@ static int priv_op_write_msr(unsigned int reg, uint64_t val,
wrmsrl(reg, val);
return X86EMUL_OKAY;
- case MSR_INTEL_PLATFORM_INFO:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- val || rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val) )
- break;
- return X86EMUL_OKAY;
-
- case MSR_INTEL_MISC_FEATURES_ENABLES:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- (val & ~MSR_MISC_FEATURES_CPUID_FAULTING) ||
- rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES, temp) )
- break;
- if ( (val & MSR_MISC_FEATURES_CPUID_FAULTING) &&
- !this_cpu(cpuid_faulting_enabled) )
- break;
- curr->arch.msr->misc_features_enables.cpuid_faulting =
- !!(val & MSR_MISC_FEATURES_CPUID_FAULTING);
- return X86EMUL_OKAY;
-
case MSR_P6_PERFCTR(0) ... MSR_P6_PERFCTR(7):
case MSR_P6_EVNTSEL(0) ... MSR_P6_EVNTSEL(3):
case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR2:
@@ -233,6 +233,7 @@ int init_vcpu_msr_policy(struct vcpu *v);
* by the new MSR infrastructure.
*/
int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val);
+int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val);
#endif /* !__ASSEMBLY__ */
The new function is responsible for handling WRMSR from both HVM and PV guests. Currently it handles only 2 MSRs: MSR_INTEL_PLATFORM_INFO MSR_INTEL_MISC_FEATURES_ENABLES It has a different behaviour compared to the old MSR handlers: if MSR is being handled by guest_wrmsr() then WRMSR will either succeed (if a guest is allowed to access it and provided a correct value based on its MSR policy) or produce a GP fault. A guest will never see a successful WRMSR of some MSR unknown to this function. guest_wrmsr() unifies and replaces the handling code from vmx_msr_write_intercept() and priv_op_write_msr(). Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com> --- xen/arch/x86/hvm/hvm.c | 7 ++++++- xen/arch/x86/hvm/vmx/vmx.c | 23 ---------------------- xen/arch/x86/msr.c | 44 ++++++++++++++++++++++++++++++++++++++++++ xen/arch/x86/pv/emul-priv-op.c | 22 ++++----------------- xen/include/asm-x86/msr.h | 1 + 5 files changed, 55 insertions(+), 42 deletions(-)