@@ -96,6 +96,11 @@ KVM_FEATURE_MSI_EXT_DEST_ID 15 guest checks this feature bit
before using extended destination
ID bits in MSI address bits 11-5.
+KVM_FEATURE_SEV_LIVE_MIGRATION 16 guest checks this feature bit before
+ using the page encryption state
+ hypercall to notify the page state
+ change
+
KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 host will warn if no guest-side
per-cpu warps are expected in
kvmclock
@@ -376,3 +376,19 @@ data:
write '1' to bit 0 of the MSR, this causes the host to re-scan its queue
and check if there are more notifications pending. The MSR is available
if KVM_FEATURE_ASYNC_PF_INT is present in CPUID.
+
+MSR_KVM_SEV_LIVE_MIG_EN:
+ 0x4b564d08
+
+ Control SEV Live Migration features.
+
+data:
+ Bit 0 enables (1) or disables (0) host-side SEV Live Migration feature,
+ in other words, this is guest->host communication that it's properly
+ handling the encryption bitmap.
+
+ Bit 1 enables (1) or disables (0) support for SEV Live Migration extensions,
+ any future extensions related to this live migration support, such as
+ extensions/support for accelerated migration, etc.
+
+ All other bits are reserved.
@@ -33,6 +33,7 @@
#define KVM_FEATURE_PV_SCHED_YIELD 13
#define KVM_FEATURE_ASYNC_PF_INT 14
#define KVM_FEATURE_MSI_EXT_DEST_ID 15
+#define KVM_FEATURE_SEV_LIVE_MIGRATION 16
#define KVM_HINTS_REALTIME 0
@@ -54,6 +55,7 @@
#define MSR_KVM_POLL_CONTROL 0x4b564d05
#define MSR_KVM_ASYNC_PF_INT 0x4b564d06
#define MSR_KVM_ASYNC_PF_ACK 0x4b564d07
+#define MSR_KVM_SEV_LIVE_MIG_EN 0x4b564d08
struct kvm_steal_time {
__u64 steal;
@@ -136,4 +138,7 @@ struct kvm_vcpu_pv_apf_data {
#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
#define KVM_PV_EOI_DISABLED 0x0
+#define KVM_SEV_LIVE_MIGRATION_ENABLED (1 << 0)
+#define KVM_SEV_LIVE_MIGRATION_EXTENSIONS_SUPPORTED (1 << 1)
+
#endif /* _UAPI_ASM_X86_KVM_PARA_H */
@@ -1483,6 +1483,17 @@ int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
return 0;
}
+void sev_update_migration_flags(struct kvm *kvm, u64 data)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ if (!sev_guest(kvm))
+ return;
+
+ if (data & KVM_SEV_LIVE_MIGRATION_ENABLED)
+ sev->live_migration_enabled = true;
+}
+
int svm_get_page_enc_bitmap(struct kvm *kvm,
struct kvm_page_enc_bitmap *bmap)
{
@@ -1495,6 +1506,9 @@ int svm_get_page_enc_bitmap(struct kvm *kvm,
if (!sev_guest(kvm))
return -ENOTTY;
+ if (!sev->live_migration_enabled)
+ return -EINVAL;
+
gfn_start = bmap->start_gfn;
gfn_end = gfn_start + bmap->num_pages;
@@ -2765,6 +2765,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
svm->msr_decfg = data;
break;
}
+ case MSR_KVM_SEV_LIVE_MIG_EN:
+ sev_update_migration_flags(vcpu->kvm, data);
+ break;
case MSR_IA32_APICBASE:
if (kvm_vcpu_apicv_active(vcpu))
avic_update_vapic_bar(to_svm(vcpu), data);
@@ -3769,6 +3772,19 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.cr3_lm_rsvd_bits &= ~(1UL << (best->ebx & 0x3f));
}
+ /*
+ * If SEV guest then enable the Live migration feature.
+ */
+ if (sev_guest(vcpu->kvm)) {
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
+ if (!best)
+ return;
+
+ best->eax |= (1 << KVM_FEATURE_SEV_LIVE_MIGRATION);
+ }
+
if (!kvm_vcpu_apicv_active(vcpu))
return;
@@ -66,6 +66,7 @@ struct kvm_sev_info {
int fd; /* SEV device fd */
unsigned long pages_locked; /* Number of pages locked */
struct list_head regions_list; /* List of registered regions */
+ bool live_migration_enabled;
unsigned long *page_enc_bmap;
unsigned long page_enc_bmap_size;
};
@@ -505,5 +506,6 @@ int svm_unregister_enc_region(struct kvm *kvm,
void pre_sev_run(struct vcpu_svm *svm, int cpu);
int __init sev_hardware_setup(void);
void sev_hardware_teardown(void);
+void sev_update_migration_flags(struct kvm *kvm, u64 data);
#endif