Message ID | 20211005141357.2393627-3-pgonda@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add AMD SEV and SEV-ES intra host migration support | expand |
On 10/5/21 9:13 AM, Peter Gonda wrote: > For SEV-ES to work with intra host migration the VMSAs, GHCB metadata, > and other SEV-ES info needs to be preserved along with the guest's > memory. > > Signed-off-by: Peter Gonda <pgonda@google.com> > Reviewed-by: Marc Orr <marcorr@google.com> > Cc: Marc Orr <marcorr@google.com> > Cc: Paolo Bonzini <pbonzini@redhat.com> > Cc: Sean Christopherson <seanjc@google.com> > Cc: David Rientjes <rientjes@google.com> > Cc: Dr. David Alan Gilbert <dgilbert@redhat.com> > Cc: Brijesh Singh <brijesh.singh@amd.com> > Cc: Vitaly Kuznetsov <vkuznets@redhat.com> > Cc: Wanpeng Li <wanpengli@tencent.com> > Cc: Jim Mattson <jmattson@google.com> > Cc: Joerg Roedel <joro@8bytes.org> > Cc: Thomas Gleixner <tglx@linutronix.de> > Cc: Ingo Molnar <mingo@redhat.com> > Cc: Borislav Petkov <bp@alien8.de> > Cc: "H. Peter Anvin" <hpa@zytor.com> > Cc: kvm@vger.kernel.org > Cc: linux-kernel@vger.kernel.org > --- > arch/x86/kvm/svm/sev.c | 53 +++++++++++++++++++++++++++++++++++++++++- > 1 file changed, 52 insertions(+), 1 deletion(-) > > diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c > index 6fc1935b52ea..321b55654f36 100644 > --- a/arch/x86/kvm/svm/sev.c > +++ b/arch/x86/kvm/svm/sev.c > @@ -1576,6 +1576,51 @@ static void sev_migrate_from(struct kvm_sev_info *dst, > list_replace_init(&src->regions_list, &dst->regions_list); > } > > +static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) > +{ > + int i; > + struct kvm_vcpu *dst_vcpu, *src_vcpu; > + struct vcpu_svm *dst_svm, *src_svm; > + > + if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) > + return -EINVAL; > + > + kvm_for_each_vcpu(i, src_vcpu, src) { > + if (!src_vcpu->arch.guest_state_protected) > + return -EINVAL; > + } > + > + kvm_for_each_vcpu(i, src_vcpu, src) { > + src_svm = to_svm(src_vcpu); > + dst_vcpu = dst->vcpus[i]; > + dst_vcpu = kvm_get_vcpu(dst, i); One of these assignments of dst_vcpu can be deleted. > + dst_svm = to_svm(dst_vcpu); > + > + /* > + * Transfer VMSA and GHCB state to the destination. Nullify and > + * clear source fields as appropriate, the state now belongs to > + * the destination. > + */ > + dst_vcpu->vcpu_id = src_vcpu->vcpu_id; > + dst_svm->vmsa = src_svm->vmsa; > + src_svm->vmsa = NULL; > + dst_svm->ghcb = src_svm->ghcb; > + src_svm->ghcb = NULL; > + dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa; > + dst_svm->ghcb_sa = src_svm->ghcb_sa; > + src_svm->ghcb_sa = NULL; > + dst_svm->ghcb_sa_len = src_svm->ghcb_sa_len; > + src_svm->ghcb_sa_len = 0; > + dst_svm->ghcb_sa_sync = src_svm->ghcb_sa_sync; > + src_svm->ghcb_sa_sync = false; > + dst_svm->ghcb_sa_free = src_svm->ghcb_sa_free; > + src_svm->ghcb_sa_free = false; Would it make sense to have a pre-patch that puts these fields into a struct? Then you can just copy the struct and zero it after. If anything is ever added for any reason, then it could/should be added to the struct and this code wouldn't have to change. It might be more churn than it's worth, just a thought. Thanks, Tom > + } > + to_kvm_svm(src)->sev_info.es_active = false; > + > + return 0; > +} > + > int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) > { > struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info; > @@ -1604,7 +1649,7 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) > if (ret) > goto out_fput; > > - if (!sev_guest(source_kvm) || sev_es_guest(source_kvm)) { > + if (!sev_guest(source_kvm)) { > ret = -EINVAL; > goto out_source; > } > @@ -1615,6 +1660,12 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) > if (ret) > goto out_source_vcpu; > > + if (sev_es_guest(source_kvm)) { > + ret = sev_es_migrate_from(kvm, source_kvm); > + if (ret) > + goto out_source_vcpu; > + } > + > sev_migrate_from(dst_sev, &to_kvm_svm(source_kvm)->sev_info); > kvm_for_each_vcpu (i, vcpu, source_kvm) { > kvm_vcpu_reset(vcpu, /* init_event= */ false); >
On Fri, Oct 8, 2021 at 9:38 AM Tom Lendacky <thomas.lendacky@amd.com> wrote: > > On 10/5/21 9:13 AM, Peter Gonda wrote: > > For SEV-ES to work with intra host migration the VMSAs, GHCB metadata, > > and other SEV-ES info needs to be preserved along with the guest's > > memory. > > > > Signed-off-by: Peter Gonda <pgonda@google.com> > > Reviewed-by: Marc Orr <marcorr@google.com> > > Cc: Marc Orr <marcorr@google.com> > > Cc: Paolo Bonzini <pbonzini@redhat.com> > > Cc: Sean Christopherson <seanjc@google.com> > > Cc: David Rientjes <rientjes@google.com> > > Cc: Dr. David Alan Gilbert <dgilbert@redhat.com> > > Cc: Brijesh Singh <brijesh.singh@amd.com> > > Cc: Vitaly Kuznetsov <vkuznets@redhat.com> > > Cc: Wanpeng Li <wanpengli@tencent.com> > > Cc: Jim Mattson <jmattson@google.com> > > Cc: Joerg Roedel <joro@8bytes.org> > > Cc: Thomas Gleixner <tglx@linutronix.de> > > Cc: Ingo Molnar <mingo@redhat.com> > > Cc: Borislav Petkov <bp@alien8.de> > > Cc: "H. Peter Anvin" <hpa@zytor.com> > > Cc: kvm@vger.kernel.org > > Cc: linux-kernel@vger.kernel.org > > --- > > arch/x86/kvm/svm/sev.c | 53 +++++++++++++++++++++++++++++++++++++++++- > > 1 file changed, 52 insertions(+), 1 deletion(-) > > > > diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c > > index 6fc1935b52ea..321b55654f36 100644 > > --- a/arch/x86/kvm/svm/sev.c > > +++ b/arch/x86/kvm/svm/sev.c > > @@ -1576,6 +1576,51 @@ static void sev_migrate_from(struct kvm_sev_info *dst, > > list_replace_init(&src->regions_list, &dst->regions_list); > > } > > > > +static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) > > +{ > > + int i; > > + struct kvm_vcpu *dst_vcpu, *src_vcpu; > > + struct vcpu_svm *dst_svm, *src_svm; > > + > > + if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) > > + return -EINVAL; > > + > > + kvm_for_each_vcpu(i, src_vcpu, src) { > > + if (!src_vcpu->arch.guest_state_protected) > > + return -EINVAL; > > + } > > + > > + kvm_for_each_vcpu(i, src_vcpu, src) { > > + src_svm = to_svm(src_vcpu); > > + dst_vcpu = dst->vcpus[i]; > > + dst_vcpu = kvm_get_vcpu(dst, i); > > One of these assignments of dst_vcpu can be deleted. Good catch I'll remove the `dst_vcpu = dst->vcpus[i];` line. > > > + dst_svm = to_svm(dst_vcpu); > > + > > + /* > > + * Transfer VMSA and GHCB state to the destination. Nullify and > > + * clear source fields as appropriate, the state now belongs to > > + * the destination. > > + */ > > + dst_vcpu->vcpu_id = src_vcpu->vcpu_id; > > + dst_svm->vmsa = src_svm->vmsa; > > + src_svm->vmsa = NULL; > > + dst_svm->ghcb = src_svm->ghcb; > > + src_svm->ghcb = NULL; > > + dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa; > > + dst_svm->ghcb_sa = src_svm->ghcb_sa; > > + src_svm->ghcb_sa = NULL; > > + dst_svm->ghcb_sa_len = src_svm->ghcb_sa_len; > > + src_svm->ghcb_sa_len = 0; > > + dst_svm->ghcb_sa_sync = src_svm->ghcb_sa_sync; > > + src_svm->ghcb_sa_sync = false; > > + dst_svm->ghcb_sa_free = src_svm->ghcb_sa_free; > > + src_svm->ghcb_sa_free = false; > > Would it make sense to have a pre-patch that puts these fields into a > struct? Then you can just copy the struct and zero it after. If anything > is ever added for any reason, then it could/should be added to the struct > and this code wouldn't have to change. It might be more churn than it's > worth, just a thought. > That sounds like a good idea to me. I'll add a new patch to the start of the series which adds in something like: struct vcpu_sev_es_state { /* SEV-ES support */ struct vmcb_save_area *vmsa; struct ghcb *ghcb; struct kvm_host_map ghcb_map; bool received_first_sipi; /* SEV-ES scratch area support */ void *ghcb_sa; u64 ghcb_sa_len; bool ghcb_sa_sync; bool ghcb_sa_free; }; struct vcpu_svm { ... struct vcpu_sev_es_state sev_es_state; ... }; I think that will make this less tedious / error prone code. Names sound OK or better suggestion?
On 10/8/21 12:26 PM, Peter Gonda wrote: > On Fri, Oct 8, 2021 at 9:38 AM Tom Lendacky <thomas.lendacky@amd.com> wrote: >> >> Would it make sense to have a pre-patch that puts these fields into a >> struct? Then you can just copy the struct and zero it after. If anything >> is ever added for any reason, then it could/should be added to the struct >> and this code wouldn't have to change. It might be more churn than it's >> worth, just a thought. >> > > That sounds like a good idea to me. I'll add a new patch to the start > of the series which adds in something like: > > struct vcpu_sev_es_state { > /* SEV-ES support */ > struct vmcb_save_area *vmsa; > struct ghcb *ghcb; > struct kvm_host_map ghcb_map; > bool received_first_sipi; > /* SEV-ES scratch area support */ > void *ghcb_sa; > u64 ghcb_sa_len; > bool ghcb_sa_sync; > bool ghcb_sa_free; > }; > > struct vcpu_svm { > ... > struct vcpu_sev_es_state sev_es_state; > ... > }; > > I think that will make this less tedious / error prone code. Names > sound OK or better suggestion? Those names seem fine to me. If you want to shorten them, you could always drop the "_state" portion. Thanks, Tom >
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 6fc1935b52ea..321b55654f36 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1576,6 +1576,51 @@ static void sev_migrate_from(struct kvm_sev_info *dst, list_replace_init(&src->regions_list, &dst->regions_list); } +static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) +{ + int i; + struct kvm_vcpu *dst_vcpu, *src_vcpu; + struct vcpu_svm *dst_svm, *src_svm; + + if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) + return -EINVAL; + + kvm_for_each_vcpu(i, src_vcpu, src) { + if (!src_vcpu->arch.guest_state_protected) + return -EINVAL; + } + + kvm_for_each_vcpu(i, src_vcpu, src) { + src_svm = to_svm(src_vcpu); + dst_vcpu = dst->vcpus[i]; + dst_vcpu = kvm_get_vcpu(dst, i); + dst_svm = to_svm(dst_vcpu); + + /* + * Transfer VMSA and GHCB state to the destination. Nullify and + * clear source fields as appropriate, the state now belongs to + * the destination. + */ + dst_vcpu->vcpu_id = src_vcpu->vcpu_id; + dst_svm->vmsa = src_svm->vmsa; + src_svm->vmsa = NULL; + dst_svm->ghcb = src_svm->ghcb; + src_svm->ghcb = NULL; + dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa; + dst_svm->ghcb_sa = src_svm->ghcb_sa; + src_svm->ghcb_sa = NULL; + dst_svm->ghcb_sa_len = src_svm->ghcb_sa_len; + src_svm->ghcb_sa_len = 0; + dst_svm->ghcb_sa_sync = src_svm->ghcb_sa_sync; + src_svm->ghcb_sa_sync = false; + dst_svm->ghcb_sa_free = src_svm->ghcb_sa_free; + src_svm->ghcb_sa_free = false; + } + to_kvm_svm(src)->sev_info.es_active = false; + + return 0; +} + int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) { struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info; @@ -1604,7 +1649,7 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) if (ret) goto out_fput; - if (!sev_guest(source_kvm) || sev_es_guest(source_kvm)) { + if (!sev_guest(source_kvm)) { ret = -EINVAL; goto out_source; } @@ -1615,6 +1660,12 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) if (ret) goto out_source_vcpu; + if (sev_es_guest(source_kvm)) { + ret = sev_es_migrate_from(kvm, source_kvm); + if (ret) + goto out_source_vcpu; + } + sev_migrate_from(dst_sev, &to_kvm_svm(source_kvm)->sev_info); kvm_for_each_vcpu (i, vcpu, source_kvm) { kvm_vcpu_reset(vcpu, /* init_event= */ false);