@@ -86,6 +86,10 @@ KVM_FEATURE_PV_SCHED_YIELD 13 guest checks this feature bit
before using paravirtualized
sched yield.
+KVM_FEATURE_SEV_LIVE_MIGRATION 14 guest checks this feature bit
+ before enabling SEV live
+ migration feature.
+
KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24 host will warn if no guest-side
per-cpu warps are expeced in
kvmclock
@@ -319,3 +319,13 @@ data:
KVM guests can request the host not to poll on HLT, for example if
they are performing polling themselves.
+
+MSR_KVM_SEV_LIVE_MIG_EN:
+ 0x4b564d06
+
+ Control SEV Live Migration features.
+
+data:
+ Bit 0 enables (1) or disables (0) host-side SEV Live Migration feature.
+ Bit 1 enables (1) or disables (0) support for SEV Live Migration extensions.
+ All other bits are reserved.
@@ -780,6 +780,9 @@ struct kvm_vcpu_arch {
u64 msr_kvm_poll_control;
+ /* SEV Live Migration MSR (AMD only) */
+ u64 msr_kvm_sev_live_migration_flag;
+
/*
* Indicates the guest is trying to write a gfn that contains one or
* more of the PTEs used to translate the write itself, i.e. the access
@@ -31,6 +31,7 @@
#define KVM_FEATURE_PV_SEND_IPI 11
#define KVM_FEATURE_POLL_CONTROL 12
#define KVM_FEATURE_PV_SCHED_YIELD 13
+#define KVM_FEATURE_SEV_LIVE_MIGRATION 14
#define KVM_HINTS_REALTIME 0
@@ -50,6 +51,7 @@
#define MSR_KVM_STEAL_TIME 0x4b564d03
#define MSR_KVM_PV_EOI_EN 0x4b564d04
#define MSR_KVM_POLL_CONTROL 0x4b564d05
+#define MSR_KVM_SEV_LIVE_MIG_EN 0x4b564d06
struct kvm_steal_time {
__u64 steal;
@@ -122,4 +124,7 @@ struct kvm_vcpu_pv_apf_data {
#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
#define KVM_PV_EOI_DISABLED 0x0
+#define KVM_SEV_LIVE_MIGRATION_ENABLED (1 << 0)
+#define KVM_SEV_LIVE_MIGRATION_EXTENSIONS_SUPPORTED (1 << 1)
+
#endif /* _UAPI_ASM_X86_KVM_PARA_H */
@@ -418,6 +418,10 @@ static void __init sev_map_percpu_data(void)
if (!sev_active())
return;
+ if (kvm_para_has_feature(KVM_FEATURE_SEV_LIVE_MIGRATION)) {
+ wrmsrl(MSR_KVM_SEV_LIVE_MIG_EN, KVM_SEV_LIVE_MIGRATION_ENABLED);
+ }
+
for_each_possible_cpu(cpu) {
__set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
__set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
@@ -716,7 +716,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
(1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
(1 << KVM_FEATURE_PV_SEND_IPI) |
(1 << KVM_FEATURE_POLL_CONTROL) |
- (1 << KVM_FEATURE_PV_SCHED_YIELD);
+ (1 << KVM_FEATURE_PV_SCHED_YIELD) |
+ (1 << KVM_FEATURE_SEV_LIVE_MIGRATION);
if (sched_info_on())
entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
@@ -7632,12 +7632,17 @@ static int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
unsigned long npages, unsigned long enc)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_vcpu *vcpu = kvm->vcpus[0];
gfn_t gfn_start, gfn_end;
int ret;
if (!sev_guest(kvm))
return -EINVAL;
+ if (!(vcpu->arch.msr_kvm_sev_live_migration_flag &
+ KVM_SEV_LIVE_MIGRATION_ENABLED))
+ return -ENOTTY;
+
if (!npages)
return 0;
@@ -2880,6 +2880,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.msr_kvm_poll_control = data;
break;
+ case MSR_KVM_SEV_LIVE_MIG_EN:
+ vcpu->arch.msr_kvm_sev_live_migration_flag = data;
+ break;
+
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
@@ -3126,6 +3130,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_KVM_POLL_CONTROL:
msr_info->data = vcpu->arch.msr_kvm_poll_control;
break;
+ case MSR_KVM_SEV_LIVE_MIG_EN:
+ msr_info->data = vcpu->arch.msr_kvm_sev_live_migration_flag;
+ break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MCG_CAP:
@@ -502,8 +502,20 @@ void __init mem_encrypt_init(void)
* With SEV, we need to make a hypercall when page encryption state is
* changed.
*/
- if (sev_active())
+ if (sev_active()) {
+ unsigned long nr_pages;
+
pv_ops.mmu.page_encryption_changed = set_memory_enc_dec_hypercall;
+
+ /*
+ * Ensure that _bss_decrypted section is marked as decrypted in the
+ * page encryption bitmap.
+ */
+ nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
+ PAGE_SIZE);
+ set_memory_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
+ nr_pages, 0);
+ }
#endif
pr_info("AMD %s active\n",