@@ -111,6 +111,7 @@ static int check_segment(struct segment_register *reg, enum x86_segment seg)
/* Called by VCPUOP_initialise for HVM guests. */
int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
{
+ const struct domain *d = v->domain;
struct cpu_user_regs *uregs = &v->arch.user_regs;
struct segment_register cs, ds, ss, es, tr;
const char *errstr;
@@ -272,7 +273,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
- if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(v, 0) )
+ if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(d, false) )
{
gprintk(XENLOG_ERR, "Bad CR4 value: %#016lx\n",
v->arch.hvm_vcpu.guest_cr[4]);
@@ -928,9 +928,8 @@ const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
X86_CR0_CD | X86_CR0_PG)))
/* These bits in CR4 can be set by the guest. */
-unsigned long hvm_cr4_guest_valid_bits(const struct vcpu *v, bool restore)
+unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore)
{
- const struct domain *d = v->domain;
const struct cpuid_policy *p;
bool mce, vmxe;
@@ -997,7 +996,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
return -EINVAL;
}
- if ( ctxt.cr4 & ~hvm_cr4_guest_valid_bits(v, 1) )
+ if ( ctxt.cr4 & ~hvm_cr4_guest_valid_bits(d, true) )
{
printk(XENLOG_G_ERR "HVM%d restore: bad CR4 %#" PRIx64 "\n",
d->domain_id, ctxt.cr4);
@@ -2308,7 +2307,7 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
struct vcpu *v = current;
unsigned long old_cr;
- if ( value & ~hvm_cr4_guest_valid_bits(v, 0) )
+ if ( value & ~hvm_cr4_guest_valid_bits(v->domain, false) )
{
HVM_DBG_LOG(DBG_LEVEL_1,
"Guest attempts to set reserved bit in CR4: %lx",
@@ -119,9 +119,9 @@ bool svm_vmcb_isvalid(const char *from, const struct vmcb_struct *vmcb,
(cr3 >> v->domain->arch.cpuid->extd.maxphysaddr))) )
PRINTF("CR3: MBZ bits are set (%#"PRIx64")\n", cr3);
- if ( cr4 & ~hvm_cr4_guest_valid_bits(v, false) )
+ if ( cr4 & ~hvm_cr4_guest_valid_bits(v->domain, false) )
PRINTF("CR4: invalid bits are set (%#"PRIx64", valid: %#"PRIx64")\n",
- cr4, hvm_cr4_guest_valid_bits(v, false));
+ cr4, hvm_cr4_guest_valid_bits(v->domain, false));
if ( vmcb_get_dr6(vmcb) >> 32 )
PRINTF("DR6: bits [63:32] are not zero (%#"PRIx64")\n",
@@ -2136,7 +2136,7 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
data = X86_CR4_VMXE;
break;
case MSR_IA32_VMX_CR4_FIXED1:
- data = hvm_cr4_guest_valid_bits(v, 0);
+ data = hvm_cr4_guest_valid_bits(d, false);
break;
case MSR_IA32_VMX_MISC:
/* Do not support CR3-target feature now */
@@ -612,7 +612,7 @@ static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
/* Check CR4/EFER values */
const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
signed int cr0_pg);
-unsigned long hvm_cr4_guest_valid_bits(const struct vcpu *v, bool restore);
+unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore);
/*
* This must be defined as a macro instead of an inline function,
With the new cpuid infrastructure there is a domain-wide struct cpuid policy and there is no need to pass a separate struct vcpu * into hvm_cr4_guest_valid_bits() anymore. Make the function accept struct domain * instead and update callers. Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com> --- xen/arch/x86/hvm/domain.c | 3 ++- xen/arch/x86/hvm/hvm.c | 7 +++---- xen/arch/x86/hvm/svm/svmdebug.c | 4 ++-- xen/arch/x86/hvm/vmx/vvmx.c | 2 +- xen/include/asm-x86/hvm/hvm.h | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-)