@@ -5170,12 +5170,6 @@ static int x86_cpu_filter_features(X86CPU *cpu)
return rv;
}
-#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
- (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
- (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
-#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
- (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
- (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
@@ -728,6 +728,13 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_VENDOR_HYGON "HygonGenuine"
+#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
+#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
+
#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
@@ -1866,6 +1873,21 @@ static inline int32_t x86_get_a20_mask(CPUX86State *env)
}
}
+static inline bool cpu_has_vmx(CPUX86State *env)
+{
+ return (env->features[FEAT_1_ECX] & CPUID_EXT_VMX);
+}
+
+static inline bool cpu_has_svm(CPUX86State *env)
+{
+ return (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM);
+}
+
+static inline bool cpu_has_nested_virt(CPUX86State *env)
+{
+ return (cpu_has_vmx(env) || cpu_has_svm(env));
+}
+
/* fpu_helper.c */
void update_fp_status(CPUX86State *env);
void update_mxcsr_status(CPUX86State *env);
@@ -906,7 +906,7 @@ static int hyperv_init_vcpu(X86CPU *cpu)
}
static Error *invtsc_mig_blocker;
-static Error *vmx_mig_blocker;
+static Error *nested_virt_mig_blocker;
#define KVM_MAX_CPUID_ENTRIES 100
@@ -1270,13 +1270,13 @@ int kvm_arch_init_vcpu(CPUState *cs)
!!(c->ecx & CPUID_EXT_SMX);
}
- if ((env->features[FEAT_1_ECX] & CPUID_EXT_VMX) && !vmx_mig_blocker) {
- error_setg(&vmx_mig_blocker,
- "Nested VMX virtualization does not support live migration yet");
- r = migrate_add_blocker(vmx_mig_blocker, &local_err);
+ if (cpu_has_nested_virt(env) && !nested_virt_mig_blocker) {
+ error_setg(&nested_virt_mig_blocker,
+ "Nested virtualization does not support live migration yet");
+ r = migrate_add_blocker(nested_virt_mig_blocker, &local_err);
if (local_err) {
error_report_err(local_err);
- error_free(vmx_mig_blocker);
+ error_free(nested_virt_mig_blocker);
return r;
}
}
@@ -1347,7 +1347,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
fail:
migrate_del_blocker(invtsc_mig_blocker);
fail2:
- migrate_del_blocker(vmx_mig_blocker);
+ migrate_del_blocker(nested_virt_mig_blocker);
return r;
}