diff mbox series

KVM: SEV: Allow SEV intra-host migration of VM with mirrors

Message ID 20220111154048.2108264-1-pgonda@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: SEV: Allow SEV intra-host migration of VM with mirrors | expand

Commit Message

Peter Gonda Jan. 11, 2022, 3:40 p.m. UTC
For SEV-ES VMs with mirrors to be intra-host migrated they need to be
able to migrate with the mirror. This is due to that fact that all VMSAs
need to be added into the VM with LAUNCH_UPDATE_VMSA before
lAUNCH_FINISH. Allowing migration with mirrors allows users of SEV-ES to
keep the mirror VMs VMSAs during migration.

Adds a list of mirror VMs for the original VM iterate through during its
migration. During the iteration the owner pointers can be updated from
the source to the destination. This fixes the ASID leaking issue which
caused the blocking of migration of VMs with mirrors.

Signed-off-by: Peter Gonda <pgonda@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Marc Orr <marcorr@google.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
---
 arch/x86/kvm/svm/sev.c                        | 45 ++++++++++++-----
 arch/x86/kvm/svm/svm.h                        |  4 ++
 .../selftests/kvm/x86_64/sev_migrate_tests.c  | 48 +++++++++++++------
 3 files changed, 70 insertions(+), 27 deletions(-)

Comments

Sean Christopherson Feb. 7, 2022, 11:19 p.m. UTC | #1
On Tue, Jan 11, 2022, Peter Gonda wrote:
> @@ -1623,22 +1624,41 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
>  	}
>  }
>  
> -static void sev_migrate_from(struct kvm_sev_info *dst,
> -			      struct kvm_sev_info *src)
> +static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
>  {
> +	struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
> +	struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
> +	struct kvm_sev_info *mirror, *tmp;
> +
>  	dst->active = true;
>  	dst->asid = src->asid;
>  	dst->handle = src->handle;
>  	dst->pages_locked = src->pages_locked;
>  	dst->enc_context_owner = src->enc_context_owner;
> +	dst->num_mirrored_vms = src->num_mirrored_vms;
>  
>  	src->asid = 0;
>  	src->active = false;
>  	src->handle = 0;
>  	src->pages_locked = 0;
>  	src->enc_context_owner = NULL;
> +	src->num_mirrored_vms = 0;
>  
>  	list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
> +	list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms);
> +
> +	/*
> +	 * If this VM has mirrors we need to update the KVM refcounts from the
> +	 * source to the destination.
> +	 */

It's worth calling out that a reference is being taken on behalf of the mirror,
that detail is easy to miss.  And maybe call out that the caller holds a reference
to @src_kvm?

	/*
	 * If this VM has mirrors, "transfer" each mirror's refcount of the
	 * source to the destination (this KVM).  The caller holds a reference
	 * to the source, so there's no danger of use-after-free.
	 */

> +	if (dst->num_mirrored_vms > 0) {
> +		list_for_each_entry_safe(mirror, tmp, &dst->mirror_vms,
> +					  mirror_entry) {
> +			kvm_get_kvm(dst_kvm);
> +			kvm_put_kvm(src_kvm);
> +			mirror->enc_context_owner = dst_kvm;
> +		}
> +	}
>  }
>  
>  static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)

...

> @@ -2050,10 +2062,17 @@ void sev_vm_destroy(struct kvm *kvm)
>  	if (is_mirroring_enc_context(kvm)) {
>  		struct kvm *owner_kvm = sev->enc_context_owner;
>  		struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
> +		struct kvm_sev_info *mirror, *tmp;
>  
>  		mutex_lock(&owner_kvm->lock);
>  		if (!WARN_ON(!owner_sev->num_mirrored_vms))
>  			owner_sev->num_mirrored_vms--;
> +
> +		list_for_each_entry_safe(mirror, tmp, &owner_sev->mirror_vms,
> +					  mirror_entry)
> +			if (mirror == sev)
> +				list_del(&mirror->mirror_entry);
> +

There's no need to walk the list just to find the entry you already have.  Maaaaybe
if you were sanity checking, but it's not like we can do anything helpful if the
sanity check fails, so eating a #GP due to consuming e.g. LIST_POISON1 is just as
good as anything else.

	if (is_mirroring_enc_context(kvm)) {
		struct kvm *owner_kvm = sev->enc_context_owner;

		mutex_lock(&owner_kvm->lock);
		list_del(&->mirror_entry);
		mutex_unlock(&owner_kvm->lock);
		kvm_put_kvm(owner_kvm);
		return;
	}

>  		mutex_unlock(&owner_kvm->lock);
>  		kvm_put_kvm(owner_kvm);
>  		return;
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index daa8ca84afcc..b9f5e33d5232 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -81,6 +81,10 @@ struct kvm_sev_info {
>  	u64 ap_jump_table;	/* SEV-ES AP Jump Table address */
>  	struct kvm *enc_context_owner; /* Owner of copied encryption context */
>  	unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
> +	union {
> +		struct list_head mirror_vms; /* List of VMs mirroring */
> +		struct list_head mirror_entry; /* Use as a list entry of mirrors */
> +	};


Whoops.  IIRC, I suggested a union for tracking mirrors vs mirrored.  After seeing
the code, that was a bad suggestion.  Memory isn't at a premimum for a per-VM
object, so storing an extra list_head is a non-issue.

If we split the two, then num_mirrored_vms goes away, and more importantly we won't
have to deal with bugs where we inevitably forget to guard access to the union with
a check against num_mirrored_vms.

E.g. (completely untested and probably incomplete)

---
 arch/x86/kvm/svm/sev.c | 32 +++++++++-----------------------
 arch/x86/kvm/svm/svm.h |  7 ++-----
 2 files changed, 11 insertions(+), 28 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 369cf8c4da61..41f7e733c33e 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1635,29 +1635,25 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
 	dst->handle = src->handle;
 	dst->pages_locked = src->pages_locked;
 	dst->enc_context_owner = src->enc_context_owner;
-	dst->num_mirrored_vms = src->num_mirrored_vms;

 	src->asid = 0;
 	src->active = false;
 	src->handle = 0;
 	src->pages_locked = 0;
 	src->enc_context_owner = NULL;
-	src->num_mirrored_vms = 0;

 	list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
 	list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms);

 	/*
-	 * If this VM has mirrors we need to update the KVM refcounts from the
-	 * source to the destination.
+	 * If this VM has mirrors, "transfer" each mirror's refcount from the
+	 * source to the destination (this KVM).  The caller holds a reference
+	 * to the source, so there's no danger of use-after-free.
 	 */
-	if (dst->num_mirrored_vms > 0) {
-		list_for_each_entry_safe(mirror, tmp, &dst->mirror_vms,
-					  mirror_entry) {
-			kvm_get_kvm(dst_kvm);
-			kvm_put_kvm(src_kvm);
-			mirror->enc_context_owner = dst_kvm;
-		}
+	list_for_each_entry_safe(mirror, tmp, &dst->mirror_vms, mirror_entry) {
+		kvm_get_kvm(dst_kvm);
+		kvm_put_kvm(src_kvm);
+		mirror->enc_context_owner = dst_kvm;
 	}
 }

@@ -2019,7 +2015,6 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
 	 */
 	source_sev = &to_kvm_svm(source_kvm)->sev_info;
 	kvm_get_kvm(source_kvm);
-	source_sev->num_mirrored_vms++;
 	mirror_sev = &to_kvm_svm(kvm)->sev_info;
 	list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);

@@ -2053,7 +2048,7 @@ void sev_vm_destroy(struct kvm *kvm)
 	struct list_head *head = &sev->regions_list;
 	struct list_head *pos, *q;

-	WARN_ON(sev->num_mirrored_vms);
+	WARN_ON(!list_empty(&sev->mirror_vms));

 	if (!sev_guest(kvm))
 		return;
@@ -2061,18 +2056,9 @@ void sev_vm_destroy(struct kvm *kvm)
 	/* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
 	if (is_mirroring_enc_context(kvm)) {
 		struct kvm *owner_kvm = sev->enc_context_owner;
-		struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
-		struct kvm_sev_info *mirror, *tmp;

 		mutex_lock(&owner_kvm->lock);
-		if (!WARN_ON(!owner_sev->num_mirrored_vms))
-			owner_sev->num_mirrored_vms--;
-
-		list_for_each_entry_safe(mirror, tmp, &owner_sev->mirror_vms,
-					  mirror_entry)
-			if (mirror == sev)
-				list_del(&mirror->mirror_entry);
-
+		list_del(&mirror->mirror_entry);
 		mutex_unlock(&owner_kvm->lock);
 		kvm_put_kvm(owner_kvm);
 		return;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 0876329f273d..79bf568c2558 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -79,11 +79,8 @@ struct kvm_sev_info {
 	struct list_head regions_list;  /* List of registered regions */
 	u64 ap_jump_table;	/* SEV-ES AP Jump Table address */
 	struct kvm *enc_context_owner; /* Owner of copied encryption context */
-	unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
-	union {
-		struct list_head mirror_vms; /* List of VMs mirroring */
-		struct list_head mirror_entry; /* Use as a list entry of mirrors */
-	};
+	struct list_head mirror_vms; /* List of VMs mirroring */
+	struct list_head mirror_entry; /* Use as a list entry of mirrors */
 	struct misc_cg *misc_cg; /* For misc cgroup accounting */
 	atomic_t migration_in_progress;
 };

base-commit: 618a9a6fda17f48d86a1ce9851bd8ceffdc57d75
--
Peter Gonda Feb. 11, 2022, 7:38 p.m. UTC | #2
On Mon, Feb 7, 2022 at 4:19 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Tue, Jan 11, 2022, Peter Gonda wrote:
> > @@ -1623,22 +1624,41 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
> >       }
> >  }
> >
> > -static void sev_migrate_from(struct kvm_sev_info *dst,
> > -                           struct kvm_sev_info *src)
> > +static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
> >  {
> > +     struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
> > +     struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
> > +     struct kvm_sev_info *mirror, *tmp;
> > +
> >       dst->active = true;
> >       dst->asid = src->asid;
> >       dst->handle = src->handle;
> >       dst->pages_locked = src->pages_locked;
> >       dst->enc_context_owner = src->enc_context_owner;
> > +     dst->num_mirrored_vms = src->num_mirrored_vms;
> >
> >       src->asid = 0;
> >       src->active = false;
> >       src->handle = 0;
> >       src->pages_locked = 0;
> >       src->enc_context_owner = NULL;
> > +     src->num_mirrored_vms = 0;
> >
> >       list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
> > +     list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms);
> > +
> > +     /*
> > +      * If this VM has mirrors we need to update the KVM refcounts from the
> > +      * source to the destination.
> > +      */
>
> It's worth calling out that a reference is being taken on behalf of the mirror,
> that detail is easy to miss.  And maybe call out that the caller holds a reference
> to @src_kvm?
>
>         /*
>          * If this VM has mirrors, "transfer" each mirror's refcount of the
>          * source to the destination (this KVM).  The caller holds a reference
>          * to the source, so there's no danger of use-after-free.
>          */

Thanks took your comment.

>
> > +     if (dst->num_mirrored_vms > 0) {
> > +             list_for_each_entry_safe(mirror, tmp, &dst->mirror_vms,
> > +                                       mirror_entry) {
> > +                     kvm_get_kvm(dst_kvm);
> > +                     kvm_put_kvm(src_kvm);
> > +                     mirror->enc_context_owner = dst_kvm;
> > +             }
> > +     }
> >  }
> >
> >  static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
>
> ...
>
> > @@ -2050,10 +2062,17 @@ void sev_vm_destroy(struct kvm *kvm)
> >       if (is_mirroring_enc_context(kvm)) {
> >               struct kvm *owner_kvm = sev->enc_context_owner;
> >               struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
> > +             struct kvm_sev_info *mirror, *tmp;
> >
> >               mutex_lock(&owner_kvm->lock);
> >               if (!WARN_ON(!owner_sev->num_mirrored_vms))
> >                       owner_sev->num_mirrored_vms--;
> > +
> > +             list_for_each_entry_safe(mirror, tmp, &owner_sev->mirror_vms,
> > +                                       mirror_entry)
> > +                     if (mirror == sev)
> > +                             list_del(&mirror->mirror_entry);
> > +
>
> There's no need to walk the list just to find the entry you already have.  Maaaaybe
> if you were sanity checking, but it's not like we can do anything helpful if the
> sanity check fails, so eating a #GP due to consuming e.g. LIST_POISON1 is just as
> good as anything else.
>
>         if (is_mirroring_enc_context(kvm)) {
>                 struct kvm *owner_kvm = sev->enc_context_owner;
>
>                 mutex_lock(&owner_kvm->lock);
>                 list_del(&->mirror_entry);
>                 mutex_unlock(&owner_kvm->lock);
>                 kvm_put_kvm(owner_kvm);
>                 return;
>         }

Thats better thanks.

>
> >               mutex_unlock(&owner_kvm->lock);
> >               kvm_put_kvm(owner_kvm);
> >               return;
> > diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> > index daa8ca84afcc..b9f5e33d5232 100644
> > --- a/arch/x86/kvm/svm/svm.h
> > +++ b/arch/x86/kvm/svm/svm.h
> > @@ -81,6 +81,10 @@ struct kvm_sev_info {
> >       u64 ap_jump_table;      /* SEV-ES AP Jump Table address */
> >       struct kvm *enc_context_owner; /* Owner of copied encryption context */
> >       unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
> > +     union {
> > +             struct list_head mirror_vms; /* List of VMs mirroring */
> > +             struct list_head mirror_entry; /* Use as a list entry of mirrors */
> > +     };
>
>
> Whoops.  IIRC, I suggested a union for tracking mirrors vs mirrored.  After seeing
> the code, that was a bad suggestion.  Memory isn't at a premimum for a per-VM
> object, so storing an extra list_head is a non-issue.
>
> If we split the two, then num_mirrored_vms goes away, and more importantly we won't
> have to deal with bugs where we inevitably forget to guard access to the union with
> a check against num_mirrored_vms.
>
> E.g. (completely untested and probably incomplete)

Done. Thats more readable I think now.

>
> ---
>  arch/x86/kvm/svm/sev.c | 32 +++++++++-----------------------
>  arch/x86/kvm/svm/svm.h |  7 ++-----
>  2 files changed, 11 insertions(+), 28 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 369cf8c4da61..41f7e733c33e 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -1635,29 +1635,25 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
>         dst->handle = src->handle;
>         dst->pages_locked = src->pages_locked;
>         dst->enc_context_owner = src->enc_context_owner;
> -       dst->num_mirrored_vms = src->num_mirrored_vms;
>
>         src->asid = 0;
>         src->active = false;
>         src->handle = 0;
>         src->pages_locked = 0;
>         src->enc_context_owner = NULL;
> -       src->num_mirrored_vms = 0;
>
>         list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
>         list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms);
>
>         /*
> -        * If this VM has mirrors we need to update the KVM refcounts from the
> -        * source to the destination.
> +        * If this VM has mirrors, "transfer" each mirror's refcount from the
> +        * source to the destination (this KVM).  The caller holds a reference
> +        * to the source, so there's no danger of use-after-free.
>          */
> -       if (dst->num_mirrored_vms > 0) {
> -               list_for_each_entry_safe(mirror, tmp, &dst->mirror_vms,
> -                                         mirror_entry) {
> -                       kvm_get_kvm(dst_kvm);
> -                       kvm_put_kvm(src_kvm);
> -                       mirror->enc_context_owner = dst_kvm;
> -               }
> +       list_for_each_entry_safe(mirror, tmp, &dst->mirror_vms, mirror_entry) {
> +               kvm_get_kvm(dst_kvm);
> +               kvm_put_kvm(src_kvm);
> +               mirror->enc_context_owner = dst_kvm;
>         }
>  }
>
> @@ -2019,7 +2015,6 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
>          */
>         source_sev = &to_kvm_svm(source_kvm)->sev_info;
>         kvm_get_kvm(source_kvm);
> -       source_sev->num_mirrored_vms++;
>         mirror_sev = &to_kvm_svm(kvm)->sev_info;
>         list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
>
> @@ -2053,7 +2048,7 @@ void sev_vm_destroy(struct kvm *kvm)
>         struct list_head *head = &sev->regions_list;
>         struct list_head *pos, *q;
>
> -       WARN_ON(sev->num_mirrored_vms);
> +       WARN_ON(!list_empty(&sev->mirror_vms));
>
>         if (!sev_guest(kvm))
>                 return;
> @@ -2061,18 +2056,9 @@ void sev_vm_destroy(struct kvm *kvm)
>         /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
>         if (is_mirroring_enc_context(kvm)) {
>                 struct kvm *owner_kvm = sev->enc_context_owner;
> -               struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
> -               struct kvm_sev_info *mirror, *tmp;
>
>                 mutex_lock(&owner_kvm->lock);
> -               if (!WARN_ON(!owner_sev->num_mirrored_vms))
> -                       owner_sev->num_mirrored_vms--;
> -
> -               list_for_each_entry_safe(mirror, tmp, &owner_sev->mirror_vms,
> -                                         mirror_entry)
> -                       if (mirror == sev)
> -                               list_del(&mirror->mirror_entry);
> -
> +               list_del(&mirror->mirror_entry);
>                 mutex_unlock(&owner_kvm->lock);
>                 kvm_put_kvm(owner_kvm);
>                 return;
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 0876329f273d..79bf568c2558 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -79,11 +79,8 @@ struct kvm_sev_info {
>         struct list_head regions_list;  /* List of registered regions */
>         u64 ap_jump_table;      /* SEV-ES AP Jump Table address */
>         struct kvm *enc_context_owner; /* Owner of copied encryption context */
> -       unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
> -       union {
> -               struct list_head mirror_vms; /* List of VMs mirroring */
> -               struct list_head mirror_entry; /* Use as a list entry of mirrors */
> -       };
> +       struct list_head mirror_vms; /* List of VMs mirroring */
> +       struct list_head mirror_entry; /* Use as a list entry of mirrors */
>         struct misc_cg *misc_cg; /* For misc cgroup accounting */
>         atomic_t migration_in_progress;
>  };
>
> base-commit: 618a9a6fda17f48d86a1ce9851bd8ceffdc57d75

I added a base commit to my V2 patch.

> --
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 322553322202..e396ae04f891 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -258,6 +258,7 @@  static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
 		goto e_free;
 
 	INIT_LIST_HEAD(&sev->regions_list);
+	INIT_LIST_HEAD(&sev->mirror_vms);
 
 	return 0;
 
@@ -1623,22 +1624,41 @@  static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
 	}
 }
 
-static void sev_migrate_from(struct kvm_sev_info *dst,
-			      struct kvm_sev_info *src)
+static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
 {
+	struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
+	struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
+	struct kvm_sev_info *mirror, *tmp;
+
 	dst->active = true;
 	dst->asid = src->asid;
 	dst->handle = src->handle;
 	dst->pages_locked = src->pages_locked;
 	dst->enc_context_owner = src->enc_context_owner;
+	dst->num_mirrored_vms = src->num_mirrored_vms;
 
 	src->asid = 0;
 	src->active = false;
 	src->handle = 0;
 	src->pages_locked = 0;
 	src->enc_context_owner = NULL;
+	src->num_mirrored_vms = 0;
 
 	list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
+	list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms);
+
+	/*
+	 * If this VM has mirrors we need to update the KVM refcounts from the
+	 * source to the destination.
+	 */
+	if (dst->num_mirrored_vms > 0) {
+		list_for_each_entry_safe(mirror, tmp, &dst->mirror_vms,
+					  mirror_entry) {
+			kvm_get_kvm(dst_kvm);
+			kvm_put_kvm(src_kvm);
+			mirror->enc_context_owner = dst_kvm;
+		}
+	}
 }
 
 static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
@@ -1708,15 +1728,6 @@  int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
 
 	src_sev = &to_kvm_svm(source_kvm)->sev_info;
 
-	/*
-	 * VMs mirroring src's encryption context rely on it to keep the
-	 * ASID allocated, but below we are clearing src_sev->asid.
-	 */
-	if (src_sev->num_mirrored_vms) {
-		ret = -EBUSY;
-		goto out_unlock;
-	}
-
 	dst_sev->misc_cg = get_current_misc_cg();
 	cg_cleanup_sev = dst_sev;
 	if (dst_sev->misc_cg != src_sev->misc_cg) {
@@ -1738,7 +1749,7 @@  int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
 		if (ret)
 			goto out_source_vcpu;
 	}
-	sev_migrate_from(dst_sev, src_sev);
+	sev_migrate_from(kvm, source_kvm);
 	kvm_vm_dead(source_kvm);
 	cg_cleanup_sev = src_sev;
 	ret = 0;
@@ -2009,9 +2020,10 @@  int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
 	source_sev = &to_kvm_svm(source_kvm)->sev_info;
 	kvm_get_kvm(source_kvm);
 	source_sev->num_mirrored_vms++;
+	mirror_sev = &to_kvm_svm(kvm)->sev_info;
+	list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
 
 	/* Set enc_context_owner and copy its encryption context over */
-	mirror_sev = &to_kvm_svm(kvm)->sev_info;
 	mirror_sev->enc_context_owner = source_kvm;
 	mirror_sev->active = true;
 	mirror_sev->asid = source_sev->asid;
@@ -2050,10 +2062,17 @@  void sev_vm_destroy(struct kvm *kvm)
 	if (is_mirroring_enc_context(kvm)) {
 		struct kvm *owner_kvm = sev->enc_context_owner;
 		struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
+		struct kvm_sev_info *mirror, *tmp;
 
 		mutex_lock(&owner_kvm->lock);
 		if (!WARN_ON(!owner_sev->num_mirrored_vms))
 			owner_sev->num_mirrored_vms--;
+
+		list_for_each_entry_safe(mirror, tmp, &owner_sev->mirror_vms,
+					  mirror_entry)
+			if (mirror == sev)
+				list_del(&mirror->mirror_entry);
+
 		mutex_unlock(&owner_kvm->lock);
 		kvm_put_kvm(owner_kvm);
 		return;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index daa8ca84afcc..b9f5e33d5232 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -81,6 +81,10 @@  struct kvm_sev_info {
 	u64 ap_jump_table;	/* SEV-ES AP Jump Table address */
 	struct kvm *enc_context_owner; /* Owner of copied encryption context */
 	unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
+	union {
+		struct list_head mirror_vms; /* List of VMs mirroring */
+		struct list_head mirror_entry; /* Use as a list entry of mirrors */
+	};
 	struct misc_cg *misc_cg; /* For misc cgroup accounting */
 	atomic_t migration_in_progress;
 };
diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
index 80056bbbb003..cb1962c89945 100644
--- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
+++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
@@ -341,37 +341,57 @@  static void test_sev_mirror_parameters(void)
 
 static void test_sev_move_copy(void)
 {
-	struct kvm_vm *dst_vm, *sev_vm, *mirror_vm, *dst_mirror_vm;
-	int ret;
+	struct kvm_vm *dst_vm, *dst2_vm, *dst3_vm, *sev_vm, *mirror_vm,
+		      *dst_mirror_vm, *dst2_mirror_vm, *dst3_mirror_vm;
 
 	sev_vm = sev_vm_create(/* es= */ false);
 	dst_vm = aux_vm_create(true);
+	dst2_vm = aux_vm_create(true);
+	dst3_vm = aux_vm_create(true);
 	mirror_vm = aux_vm_create(false);
 	dst_mirror_vm = aux_vm_create(false);
+	dst2_mirror_vm = aux_vm_create(false);
+	dst3_mirror_vm = aux_vm_create(false);
 
 	sev_mirror_create(mirror_vm->fd, sev_vm->fd);
-	ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
-	TEST_ASSERT(ret == -1 && errno == EBUSY,
-		    "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
-		    errno);
 
-	/* The mirror itself can be migrated.  */
 	sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
-	ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
-	TEST_ASSERT(ret == -1 && errno == EBUSY,
-		    "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
-		    errno);
+	sev_migrate_from(dst_vm->fd, sev_vm->fd);
+
+	sev_migrate_from(dst2_vm->fd, dst_vm->fd);
+	sev_migrate_from(dst2_mirror_vm->fd, dst_mirror_vm->fd);
+
+	sev_migrate_from(dst3_mirror_vm->fd, dst2_mirror_vm->fd);
+	sev_migrate_from(dst3_vm->fd, dst2_vm->fd);
+
+	kvm_vm_free(dst_vm);
+	kvm_vm_free(sev_vm);
+	kvm_vm_free(dst2_vm);
+	kvm_vm_free(dst3_vm);
+	kvm_vm_free(mirror_vm);
+	kvm_vm_free(dst_mirror_vm);
+	kvm_vm_free(dst2_mirror_vm);
+	kvm_vm_free(dst3_mirror_vm);
 
 	/*
-	 * mirror_vm is not a mirror anymore, dst_mirror_vm is.  Thus,
-	 * the owner can be copied as soon as dst_mirror_vm is gone.
+	 * Run similar test be destroy mirrors before mirrored VMs to ensure
+	 * destruction is done safely.
 	 */
-	kvm_vm_free(dst_mirror_vm);
+	sev_vm = sev_vm_create(/* es= */ false);
+	dst_vm = aux_vm_create(true);
+	mirror_vm = aux_vm_create(false);
+	dst_mirror_vm = aux_vm_create(false);
+
+	sev_mirror_create(mirror_vm->fd, sev_vm->fd);
+
+	sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
 	sev_migrate_from(dst_vm->fd, sev_vm->fd);
 
 	kvm_vm_free(mirror_vm);
+	kvm_vm_free(dst_mirror_vm);
 	kvm_vm_free(dst_vm);
 	kvm_vm_free(sev_vm);
+
 }
 
 int main(int argc, char *argv[])