@@ -65,8 +65,18 @@ static void update_domain_cpuid_info(struct domain *d,
.ecx = ctl->ecx
}
};
+ int old_vendor = d->arch.x86_vendor;
d->arch.x86_vendor = get_cpu_vendor(vendor_id.str, gcv_guest);
+
+ if ( is_hvm_domain(d) && (d->arch.x86_vendor != old_vendor) )
+ {
+ struct vcpu *v;
+
+ for_each_vcpu( d, v )
+ hvm_update_guest_vendor(v);
+ }
+
break;
}
@@ -707,6 +717,12 @@ long arch_do_domctl(
xen_domctl_cpuid_t *ctl = &domctl->u.cpuid;
cpuid_input_t *cpuid, *unused = NULL;
+ if ( d == currd ) /* no domain_pause() */
+ {
+ ret = -EINVAL;
+ break;
+ }
+
for ( i = 0; i < MAX_CPUID_INPUT; i++ )
{
cpuid = &d->arch.cpuids[i];
@@ -724,6 +740,8 @@ long arch_do_domctl(
break;
}
+ domain_pause(d);
+
if ( i < MAX_CPUID_INPUT )
*cpuid = *ctl;
else if ( unused )
@@ -734,6 +752,7 @@ long arch_do_domctl(
if ( !ret )
update_domain_cpuid_info(d, ctl);
+ domain_unpause(d);
break;
}
@@ -93,12 +93,10 @@ unsigned long __section(".bss.page_aligned")
static bool_t __initdata opt_hap_enabled = 1;
boolean_param("hap", opt_hap_enabled);
-#ifndef NDEBUG
+#ifndef opt_hvm_fep
/* Permit use of the Forced Emulation Prefix in HVM guests */
-static bool_t opt_hvm_fep;
+bool_t opt_hvm_fep;
boolean_param("hvm_fep", opt_hvm_fep);
-#else
-#define opt_hvm_fep 0
#endif
/* Xen command-line option to enable altp2m */
@@ -2488,6 +2486,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
hvm_set_guest_tsc(v, 0);
}
+ hvm_update_guest_vendor(v);
+
return 0;
fail7:
@@ -597,6 +597,21 @@ static void svm_update_guest_efer(struct vcpu *v)
vmcb_set_efer(vmcb, new_efer);
}
+static void svm_update_guest_vendor(struct vcpu *v)
+{
+ struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
+ struct vmcb_struct *vmcb = arch_svm->vmcb;
+ u32 bitmap = vmcb_get_exception_intercepts(vmcb);
+
+ if ( opt_hvm_fep ||
+ (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) )
+ bitmap |= (1U << TRAP_invalid_op);
+ else
+ bitmap &= ~(1U << TRAP_invalid_op);
+
+ vmcb_set_exception_intercepts(vmcb, bitmap);
+}
+
static void svm_sync_vmcb(struct vcpu *v)
{
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
@@ -2245,6 +2260,7 @@ static struct hvm_function_table __initdata svm_function_table = {
.get_shadow_gs_base = svm_get_shadow_gs_base,
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
+ .update_guest_vendor = svm_update_guest_vendor,
.set_guest_pat = svm_set_guest_pat,
.get_guest_pat = svm_get_guest_pat,
.set_tsc_offset = svm_set_tsc_offset,
@@ -73,6 +73,7 @@ static void vmx_free_vlapic_mapping(struct domain *d);
static void vmx_install_vlapic_mapping(struct vcpu *v);
static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
static void vmx_update_guest_efer(struct vcpu *v);
+static void vmx_update_guest_vendor(struct vcpu *v);
static void vmx_cpuid_intercept(
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
@@ -398,6 +399,19 @@ void vmx_update_exception_bitmap(struct vcpu *v)
__vmwrite(EXCEPTION_BITMAP, bitmap);
}
+static void vmx_update_guest_vendor(struct vcpu *v)
+{
+ if ( opt_hvm_fep ||
+ (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) )
+ v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op);
+ else
+ v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_invalid_op);
+
+ vmx_vmcs_enter(v);
+ vmx_update_exception_bitmap(v);
+ vmx_vmcs_exit(v);
+}
+
static int vmx_guest_x86_mode(struct vcpu *v)
{
unsigned long cs_ar_bytes;
@@ -1963,6 +1977,7 @@ static struct hvm_function_table __initdata vmx_function_table = {
.update_host_cr3 = vmx_update_host_cr3,
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
+ .update_guest_vendor = vmx_update_guest_vendor,
.set_guest_pat = vmx_set_guest_pat,
.get_guest_pat = vmx_get_guest_pat,
.set_tsc_offset = vmx_set_tsc_offset,
@@ -28,6 +28,13 @@
#include <public/hvm/ioreq.h>
#include <xen/mm.h>
+#ifndef NDEBUG
+/* Permit use of the Forced Emulation Prefix in HVM guests */
+extern bool_t opt_hvm_fep;
+#else
+#define opt_hvm_fep 0
+#endif
+
/* Interrupt acknowledgement sources. */
enum hvm_intsrc {
hvm_intsrc_none,
@@ -136,6 +143,8 @@ struct hvm_function_table {
void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
void (*update_guest_efer)(struct vcpu *v);
+ void (*update_guest_vendor)(struct vcpu *v);
+
int (*get_guest_pat)(struct vcpu *v, u64 *);
int (*set_guest_pat)(struct vcpu *v, u64);
@@ -316,6 +325,11 @@ static inline void hvm_update_guest_efer(struct vcpu *v)
hvm_funcs.update_guest_efer(v);
}
+static inline void hvm_update_guest_vendor(struct vcpu *v)
+{
+ hvm_funcs.update_guest_vendor(v);
+}
+
/*
* Called to ensure than all guest-specific mappings in a tagged TLB are
* flushed; does *not* flush Xen's TLB entries, and on processors without a
@@ -387,7 +401,6 @@ static inline int hvm_event_pending(struct vcpu *v)
/* These exceptions must always be intercepted. */
#define HVM_TRAP_MASK ((1U << TRAP_debug) | \
- (1U << TRAP_invalid_op) | \
(1U << TRAP_alignment_check) | \
(1U << TRAP_machine_check))