Message ID | 20211012204858.3614961-4-pgonda@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add AMD SEV and SEV-ES intra host migration support | expand |
On 10/12/21 3:48 PM, Peter Gonda wrote: > For SEV-ES to work with intra host migration the VMSAs, GHCB metadata, > and other SEV-ES info needs to be preserved along with the guest's > memory. > > Signed-off-by: Peter Gonda <pgonda@google.com> > Reviewed-by: Marc Orr <marcorr@google.com> > Cc: Marc Orr <marcorr@google.com> > Cc: Paolo Bonzini <pbonzini@redhat.com> > Cc: Sean Christopherson <seanjc@google.com> > Cc: David Rientjes <rientjes@google.com> > Cc: Dr. David Alan Gilbert <dgilbert@redhat.com> > Cc: Brijesh Singh <brijesh.singh@amd.com> > Cc: Tom Lendacky <thomas.lendacky@amd.com> > Cc: Vitaly Kuznetsov <vkuznets@redhat.com> > Cc: Wanpeng Li <wanpengli@tencent.com> > Cc: Jim Mattson <jmattson@google.com> > Cc: Joerg Roedel <joro@8bytes.org> > Cc: Thomas Gleixner <tglx@linutronix.de> > Cc: Ingo Molnar <mingo@redhat.com> > Cc: Borislav Petkov <bp@alien8.de> > Cc: "H. Peter Anvin" <hpa@zytor.com> > Cc: kvm@vger.kernel.org > Cc: linux-kernel@vger.kernel.org > --- > arch/x86/kvm/svm/sev.c | 48 +++++++++++++++++++++++++++++++++++++++++- > 1 file changed, 47 insertions(+), 1 deletion(-) > > diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c > index 42ff1ccfe1dc..a486ab08a766 100644 > --- a/arch/x86/kvm/svm/sev.c > +++ b/arch/x86/kvm/svm/sev.c > @@ -1600,6 +1600,46 @@ static void sev_migrate_from(struct kvm_sev_info *dst, > list_replace_init(&src->regions_list, &dst->regions_list); > } > > +static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) > +{ > + int i; > + struct kvm_vcpu *dst_vcpu, *src_vcpu; > + struct vcpu_svm *dst_svm, *src_svm; > + > + if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) > + return -EINVAL; > + > + kvm_for_each_vcpu(i, src_vcpu, src) { > + if (!src_vcpu->arch.guest_state_protected) > + return -EINVAL; > + } > + > + kvm_for_each_vcpu(i, src_vcpu, src) { > + src_svm = to_svm(src_vcpu); > + dst_vcpu = kvm_get_vcpu(dst, i); > + dst_svm = to_svm(dst_vcpu); > + > + /* > + * Transfer VMSA and GHCB state to the destination. Nullify and > + * clear source fields as appropriate, the state now belongs to > + * the destination. > + */ > + dst_vcpu->vcpu_id = src_vcpu->vcpu_id; > + memcpy(&dst_svm->sev_es, &src_svm->sev_es, > + sizeof(dst_svm->sev_es)); > + dst_svm->vmcb->control.ghcb_gpa = > + src_svm->vmcb->control.ghcb_gpa; > + dst_svm->vmcb->control.vmsa_pa = __pa(dst_svm->sev_es.vmsa); > + dst_vcpu->arch.guest_state_protected = true; Maybe just add a blank line here to separate the setting and clearing (only if you have to do another version). > + src_svm->vmcb->control.ghcb_gpa = 0; > + src_svm->vmcb->control.vmsa_pa = 0; > + src_vcpu->arch.guest_state_protected = false; In the previous patch you were clearing some of the fields that are now in the vcpu_sev_es_state. Did you want to memset that to zero now? Thanks, Tom > + } > + to_kvm_svm(src)->sev_info.es_active = false; > + > + return 0; > +} > +
On Fri, Oct 15, 2021 at 3:36 PM Tom Lendacky <thomas.lendacky@amd.com> wrote: > > On 10/12/21 3:48 PM, Peter Gonda wrote: > > For SEV-ES to work with intra host migration the VMSAs, GHCB metadata, > > and other SEV-ES info needs to be preserved along with the guest's > > memory. > > > > Signed-off-by: Peter Gonda <pgonda@google.com> > > Reviewed-by: Marc Orr <marcorr@google.com> > > Cc: Marc Orr <marcorr@google.com> > > Cc: Paolo Bonzini <pbonzini@redhat.com> > > Cc: Sean Christopherson <seanjc@google.com> > > Cc: David Rientjes <rientjes@google.com> > > Cc: Dr. David Alan Gilbert <dgilbert@redhat.com> > > Cc: Brijesh Singh <brijesh.singh@amd.com> > > Cc: Tom Lendacky <thomas.lendacky@amd.com> > > Cc: Vitaly Kuznetsov <vkuznets@redhat.com> > > Cc: Wanpeng Li <wanpengli@tencent.com> > > Cc: Jim Mattson <jmattson@google.com> > > Cc: Joerg Roedel <joro@8bytes.org> > > Cc: Thomas Gleixner <tglx@linutronix.de> > > Cc: Ingo Molnar <mingo@redhat.com> > > Cc: Borislav Petkov <bp@alien8.de> > > Cc: "H. Peter Anvin" <hpa@zytor.com> > > Cc: kvm@vger.kernel.org > > Cc: linux-kernel@vger.kernel.org > > --- > > arch/x86/kvm/svm/sev.c | 48 +++++++++++++++++++++++++++++++++++++++++- > > 1 file changed, 47 insertions(+), 1 deletion(-) > > > > diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c > > index 42ff1ccfe1dc..a486ab08a766 100644 > > --- a/arch/x86/kvm/svm/sev.c > > +++ b/arch/x86/kvm/svm/sev.c > > @@ -1600,6 +1600,46 @@ static void sev_migrate_from(struct kvm_sev_info *dst, > > list_replace_init(&src->regions_list, &dst->regions_list); > > } > > > > +static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) > > +{ > > + int i; > > + struct kvm_vcpu *dst_vcpu, *src_vcpu; > > + struct vcpu_svm *dst_svm, *src_svm; > > + > > + if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) > > + return -EINVAL; > > + > > + kvm_for_each_vcpu(i, src_vcpu, src) { > > + if (!src_vcpu->arch.guest_state_protected) > > + return -EINVAL; > > + } > > + > > + kvm_for_each_vcpu(i, src_vcpu, src) { > > + src_svm = to_svm(src_vcpu); > > + dst_vcpu = kvm_get_vcpu(dst, i); > > + dst_svm = to_svm(dst_vcpu); > > + > > + /* > > + * Transfer VMSA and GHCB state to the destination. Nullify and > > + * clear source fields as appropriate, the state now belongs to > > + * the destination. > > + */ > > + dst_vcpu->vcpu_id = src_vcpu->vcpu_id; > > + memcpy(&dst_svm->sev_es, &src_svm->sev_es, > > + sizeof(dst_svm->sev_es)); > > + dst_svm->vmcb->control.ghcb_gpa = > > + src_svm->vmcb->control.ghcb_gpa; > > + dst_svm->vmcb->control.vmsa_pa = __pa(dst_svm->sev_es.vmsa); > > + dst_vcpu->arch.guest_state_protected = true; > > Maybe just add a blank line here to separate the setting and clearing > (only if you have to do another version). > > > + src_svm->vmcb->control.ghcb_gpa = 0; > > + src_svm->vmcb->control.vmsa_pa = 0; > > + src_vcpu->arch.guest_state_protected = false; > > In the previous patch you were clearing some of the fields that are now in > the vcpu_sev_es_state. Did you want to memset that to zero now? Oops, making that an easy memset was one of the pros of the |sev_es| refactor. Will fix and add newline in V11. > > Thanks, > Tom > > > + } > > + to_kvm_svm(src)->sev_info.es_active = false; > > + > > + return 0; > > +} > > +
On 10/20/21 3:43 PM, Peter Gonda wrote: > On Fri, Oct 15, 2021 at 3:36 PM Tom Lendacky <thomas.lendacky@amd.com> wrote: >> >> On 10/12/21 3:48 PM, Peter Gonda wrote: >>> For SEV-ES to work with intra host migration the VMSAs, GHCB metadata, >>> and other SEV-ES info needs to be preserved along with the guest's >>> memory. >>> >>> Signed-off-by: Peter Gonda <pgonda@google.com> >>> Reviewed-by: Marc Orr <marcorr@google.com> >>> Cc: Marc Orr <marcorr@google.com> >>> Cc: Paolo Bonzini <pbonzini@redhat.com> >>> Cc: Sean Christopherson <seanjc@google.com> >>> Cc: David Rientjes <rientjes@google.com> >>> Cc: Dr. David Alan Gilbert <dgilbert@redhat.com> >>> Cc: Brijesh Singh <brijesh.singh@amd.com> >>> Cc: Tom Lendacky <thomas.lendacky@amd.com> >>> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> >>> Cc: Wanpeng Li <wanpengli@tencent.com> >>> Cc: Jim Mattson <jmattson@google.com> >>> Cc: Joerg Roedel <joro@8bytes.org> >>> Cc: Thomas Gleixner <tglx@linutronix.de> >>> Cc: Ingo Molnar <mingo@redhat.com> >>> Cc: Borislav Petkov <bp@alien8.de> >>> Cc: "H. Peter Anvin" <hpa@zytor.com> >>> Cc: kvm@vger.kernel.org >>> Cc: linux-kernel@vger.kernel.org >>> --- >>> arch/x86/kvm/svm/sev.c | 48 +++++++++++++++++++++++++++++++++++++++++- >>> 1 file changed, 47 insertions(+), 1 deletion(-) >>> >>> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c >>> index 42ff1ccfe1dc..a486ab08a766 100644 >>> --- a/arch/x86/kvm/svm/sev.c >>> +++ b/arch/x86/kvm/svm/sev.c >>> @@ -1600,6 +1600,46 @@ static void sev_migrate_from(struct kvm_sev_info *dst, >>> list_replace_init(&src->regions_list, &dst->regions_list); >>> } >>> >>> +static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) >>> +{ >>> + int i; >>> + struct kvm_vcpu *dst_vcpu, *src_vcpu; >>> + struct vcpu_svm *dst_svm, *src_svm; >>> + >>> + if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) >>> + return -EINVAL; >>> + >>> + kvm_for_each_vcpu(i, src_vcpu, src) { >>> + if (!src_vcpu->arch.guest_state_protected) >>> + return -EINVAL; >>> + } >>> + >>> + kvm_for_each_vcpu(i, src_vcpu, src) { >>> + src_svm = to_svm(src_vcpu); >>> + dst_vcpu = kvm_get_vcpu(dst, i); >>> + dst_svm = to_svm(dst_vcpu); >>> + >>> + /* >>> + * Transfer VMSA and GHCB state to the destination. Nullify and >>> + * clear source fields as appropriate, the state now belongs to >>> + * the destination. >>> + */ >>> + dst_vcpu->vcpu_id = src_vcpu->vcpu_id; >>> + memcpy(&dst_svm->sev_es, &src_svm->sev_es, >>> + sizeof(dst_svm->sev_es)); >>> + dst_svm->vmcb->control.ghcb_gpa = >>> + src_svm->vmcb->control.ghcb_gpa; >>> + dst_svm->vmcb->control.vmsa_pa = __pa(dst_svm->sev_es.vmsa); >>> + dst_vcpu->arch.guest_state_protected = true; >> >> Maybe just add a blank line here to separate the setting and clearing >> (only if you have to do another version). >> >>> + src_svm->vmcb->control.ghcb_gpa = 0; >>> + src_svm->vmcb->control.vmsa_pa = 0; >>> + src_vcpu->arch.guest_state_protected = false; >> >> In the previous patch you were clearing some of the fields that are now in >> the vcpu_sev_es_state. Did you want to memset that to zero now? > > Oops, making that an easy memset was one of the pros of the |sev_es| > refactor. Will fix and add newline in V11. And totally up to you, but I think you can replace the memcpy() above with a direct assignment, if you want: dst_svm->sev_es = src_svm->sev_es; Thanks, Tom > >> >> Thanks, >> Tom >> >>> + } >>> + to_kvm_svm(src)->sev_info.es_active = false; >>> + >>> + return 0; >>> +} >>> +
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 42ff1ccfe1dc..a486ab08a766 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1600,6 +1600,46 @@ static void sev_migrate_from(struct kvm_sev_info *dst, list_replace_init(&src->regions_list, &dst->regions_list); } +static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) +{ + int i; + struct kvm_vcpu *dst_vcpu, *src_vcpu; + struct vcpu_svm *dst_svm, *src_svm; + + if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) + return -EINVAL; + + kvm_for_each_vcpu(i, src_vcpu, src) { + if (!src_vcpu->arch.guest_state_protected) + return -EINVAL; + } + + kvm_for_each_vcpu(i, src_vcpu, src) { + src_svm = to_svm(src_vcpu); + dst_vcpu = kvm_get_vcpu(dst, i); + dst_svm = to_svm(dst_vcpu); + + /* + * Transfer VMSA and GHCB state to the destination. Nullify and + * clear source fields as appropriate, the state now belongs to + * the destination. + */ + dst_vcpu->vcpu_id = src_vcpu->vcpu_id; + memcpy(&dst_svm->sev_es, &src_svm->sev_es, + sizeof(dst_svm->sev_es)); + dst_svm->vmcb->control.ghcb_gpa = + src_svm->vmcb->control.ghcb_gpa; + dst_svm->vmcb->control.vmsa_pa = __pa(dst_svm->sev_es.vmsa); + dst_vcpu->arch.guest_state_protected = true; + src_svm->vmcb->control.ghcb_gpa = 0; + src_svm->vmcb->control.vmsa_pa = 0; + src_vcpu->arch.guest_state_protected = false; + } + to_kvm_svm(src)->sev_info.es_active = false; + + return 0; +} + int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) { struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info; @@ -1628,7 +1668,7 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) if (ret) goto out_fput; - if (!sev_guest(source_kvm) || sev_es_guest(source_kvm)) { + if (!sev_guest(source_kvm)) { ret = -EINVAL; goto out_source; } @@ -1639,6 +1679,12 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) if (ret) goto out_source_vcpu; + if (sev_es_guest(source_kvm)) { + ret = sev_es_migrate_from(kvm, source_kvm); + if (ret) + goto out_source_vcpu; + } + sev_migrate_from(dst_sev, &to_kvm_svm(source_kvm)->sev_info); kvm_for_each_vcpu (i, vcpu, source_kvm) { kvm_vcpu_reset(vcpu, /* init_event= */ false);