diff mbox series

[v8,13/18] KVM: x86: Introduce new KVM_FEATURE_SEV_LIVE_MIGRATION feature & Custom MSR.

Message ID 4ff020b446baa06037136ceeb1e66d4eba8ad492.1588711355.git.ashish.kalra@amd.com (mailing list archive)
State New, archived
Headers show
Series Add AMD SEV guest live migration support | expand

Commit Message

Kalra, Ashish May 5, 2020, 9:19 p.m. UTC
From: Ashish Kalra <ashish.kalra@amd.com>

Add new KVM_FEATURE_SEV_LIVE_MIGRATION feature for guest to check
for host-side support for SEV live migration. Also add a new custom
MSR_KVM_SEV_LIVE_MIG_EN for guest to enable the SEV live migration
feature.

Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
---
 Documentation/virt/kvm/cpuid.rst     |  5 +++++
 Documentation/virt/kvm/msr.rst       | 10 ++++++++++
 arch/x86/include/uapi/asm/kvm_para.h |  5 +++++
 arch/x86/kvm/svm/sev.c               | 14 ++++++++++++++
 arch/x86/kvm/svm/svm.c               | 16 ++++++++++++++++
 arch/x86/kvm/svm/svm.h               |  2 ++
 6 files changed, 52 insertions(+)

Comments

Steve Rutherford May 30, 2020, 2:07 a.m. UTC | #1
On Tue, May 5, 2020 at 2:19 PM Ashish Kalra <Ashish.Kalra@amd.com> wrote:
>
> From: Ashish Kalra <ashish.kalra@amd.com>
>
> Add new KVM_FEATURE_SEV_LIVE_MIGRATION feature for guest to check
> for host-side support for SEV live migration. Also add a new custom
> MSR_KVM_SEV_LIVE_MIG_EN for guest to enable the SEV live migration
> feature.
>
> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
> ---
>  Documentation/virt/kvm/cpuid.rst     |  5 +++++
>  Documentation/virt/kvm/msr.rst       | 10 ++++++++++
>  arch/x86/include/uapi/asm/kvm_para.h |  5 +++++
>  arch/x86/kvm/svm/sev.c               | 14 ++++++++++++++
>  arch/x86/kvm/svm/svm.c               | 16 ++++++++++++++++
>  arch/x86/kvm/svm/svm.h               |  2 ++
>  6 files changed, 52 insertions(+)
>
> diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst
> index 01b081f6e7ea..0514523e00cd 100644
> --- a/Documentation/virt/kvm/cpuid.rst
> +++ b/Documentation/virt/kvm/cpuid.rst
> @@ -86,6 +86,11 @@ KVM_FEATURE_PV_SCHED_YIELD        13          guest checks this feature bit
>                                                before using paravirtualized
>                                                sched yield.
>
> +KVM_FEATURE_SEV_LIVE_MIGRATION    14          guest checks this feature bit before
> +                                              using the page encryption state
> +                                              hypercall to notify the page state
> +                                              change
> +
>  KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24          host will warn if no guest-side
>                                                per-cpu warps are expeced in
>                                                kvmclock
> diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst
> index 33892036672d..7cd7786bbb03 100644
> --- a/Documentation/virt/kvm/msr.rst
> +++ b/Documentation/virt/kvm/msr.rst
> @@ -319,3 +319,13 @@ data:
>
>         KVM guests can request the host not to poll on HLT, for example if
>         they are performing polling themselves.
> +
> +MSR_KVM_SEV_LIVE_MIG_EN:
> +        0x4b564d06
> +
> +       Control SEV Live Migration features.
> +
> +data:
> +        Bit 0 enables (1) or disables (0) host-side SEV Live Migration feature.
> +        Bit 1 enables (1) or disables (0) support for SEV Live Migration extensions.
> +        All other bits are reserved.
> diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
> index 2a8e0b6b9805..d9d4953b42ad 100644
> --- a/arch/x86/include/uapi/asm/kvm_para.h
> +++ b/arch/x86/include/uapi/asm/kvm_para.h
> @@ -31,6 +31,7 @@
>  #define KVM_FEATURE_PV_SEND_IPI        11
>  #define KVM_FEATURE_POLL_CONTROL       12
>  #define KVM_FEATURE_PV_SCHED_YIELD     13
> +#define KVM_FEATURE_SEV_LIVE_MIGRATION 14
>
>  #define KVM_HINTS_REALTIME      0
>
> @@ -50,6 +51,7 @@
>  #define MSR_KVM_STEAL_TIME  0x4b564d03
>  #define MSR_KVM_PV_EOI_EN      0x4b564d04
>  #define MSR_KVM_POLL_CONTROL   0x4b564d05
> +#define MSR_KVM_SEV_LIVE_MIG_EN        0x4b564d06
>
>  struct kvm_steal_time {
>         __u64 steal;
> @@ -122,4 +124,7 @@ struct kvm_vcpu_pv_apf_data {
>  #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
>  #define KVM_PV_EOI_DISABLED 0x0
>
> +#define KVM_SEV_LIVE_MIGRATION_ENABLED                 (1 << 0)
> +#define KVM_SEV_LIVE_MIGRATION_EXTENSIONS_SUPPORTED    (1 << 1)
> +
>  #endif /* _UAPI_ASM_X86_KVM_PARA_H */
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index c0d7043a0627..6f69c3a47583 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -1469,6 +1469,17 @@ int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
>         return 0;
>  }
>
> +void sev_update_migration_flags(struct kvm *kvm, u64 data)
> +{
> +       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> +
> +       if (!sev_guest(kvm))
> +               return;
> +
> +       if (data & KVM_SEV_LIVE_MIGRATION_ENABLED)
> +               sev->live_migration_enabled = true;
> +}
> +
>  int svm_get_page_enc_bitmap(struct kvm *kvm,
>                                    struct kvm_page_enc_bitmap *bmap)
>  {
> @@ -1481,6 +1492,9 @@ int svm_get_page_enc_bitmap(struct kvm *kvm,
>         if (!sev_guest(kvm))
>                 return -ENOTTY;
>
> +       if (!sev->live_migration_enabled)
> +               return -EINVAL;
> +
>         gfn_start = bmap->start_gfn;
>         gfn_end = gfn_start + bmap->num_pages;
>
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 442adbbb0641..a99f5457f244 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -2633,6 +2633,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
>                 svm->msr_decfg = data;
>                 break;
>         }
> +       case MSR_KVM_SEV_LIVE_MIG_EN:
> +               sev_update_migration_flags(vcpu->kvm, data);
> +               break;
>         case MSR_IA32_APICBASE:
>                 if (kvm_vcpu_apicv_active(vcpu))
>                         avic_update_vapic_bar(to_svm(vcpu), data);
> @@ -3493,6 +3496,19 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
>         svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
>                              guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
>
> +        /*
> +         * If SEV guest then enable the Live migration feature.
> +         */
> +        if (sev_guest(vcpu->kvm)) {
> +              struct kvm_cpuid_entry2 *best;
> +
> +              best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
> +              if (!best)
> +                      return;
> +
> +              best->eax |= (1 << KVM_FEATURE_SEV_LIVE_MIGRATION);
> +        }
> +
>         if (!kvm_vcpu_apicv_active(vcpu))
>                 return;
>
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index fd99e0a5417a..77f132a6fead 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -65,6 +65,7 @@ struct kvm_sev_info {
>         int fd;                 /* SEV device fd */
>         unsigned long pages_locked; /* Number of pages locked */
>         struct list_head regions_list;  /* List of registered regions */
> +       bool live_migration_enabled;
>         unsigned long *page_enc_bmap;
>         unsigned long page_enc_bmap_size;
>  };
> @@ -494,5 +495,6 @@ int svm_unregister_enc_region(struct kvm *kvm,
>  void pre_sev_run(struct vcpu_svm *svm, int cpu);
>  int __init sev_hardware_setup(void);
>  void sev_hardware_teardown(void);
> +void sev_update_migration_flags(struct kvm *kvm, u64 data);
>
>  #endif
> --
> 2.17.1
>

Reviewed-by: Steve Rutherford <srutherford@google.com>
Paolo Bonzini Dec. 4, 2020, 11:20 a.m. UTC | #2
On 05/05/20 23:19, Ashish Kalra wrote:
> From: Ashish Kalra <ashish.kalra@amd.com>
> 
> Add new KVM_FEATURE_SEV_LIVE_MIGRATION feature for guest to check
> for host-side support for SEV live migration. Also add a new custom
> MSR_KVM_SEV_LIVE_MIG_EN for guest to enable the SEV live migration
> feature.
> 
> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
> ---
>   Documentation/virt/kvm/cpuid.rst     |  5 +++++
>   Documentation/virt/kvm/msr.rst       | 10 ++++++++++
>   arch/x86/include/uapi/asm/kvm_para.h |  5 +++++
>   arch/x86/kvm/svm/sev.c               | 14 ++++++++++++++
>   arch/x86/kvm/svm/svm.c               | 16 ++++++++++++++++
>   arch/x86/kvm/svm/svm.h               |  2 ++
>   6 files changed, 52 insertions(+)
> 
> diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst
> index 01b081f6e7ea..0514523e00cd 100644
> --- a/Documentation/virt/kvm/cpuid.rst
> +++ b/Documentation/virt/kvm/cpuid.rst
> @@ -86,6 +86,11 @@ KVM_FEATURE_PV_SCHED_YIELD        13          guest checks this feature bit
>                                                 before using paravirtualized
>                                                 sched yield.
>   
> +KVM_FEATURE_SEV_LIVE_MIGRATION    14          guest checks this feature bit before
> +                                              using the page encryption state
> +                                              hypercall to notify the page state
> +                                              change
> +
>   KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24          host will warn if no guest-side
>                                                 per-cpu warps are expeced in
>                                                 kvmclock
> diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst
> index 33892036672d..7cd7786bbb03 100644
> --- a/Documentation/virt/kvm/msr.rst
> +++ b/Documentation/virt/kvm/msr.rst
> @@ -319,3 +319,13 @@ data:
>   
>   	KVM guests can request the host not to poll on HLT, for example if
>   	they are performing polling themselves.
> +
> +MSR_KVM_SEV_LIVE_MIG_EN:
> +        0x4b564d06
> +
> +	Control SEV Live Migration features.
> +
> +data:
> +        Bit 0 enables (1) or disables (0) host-side SEV Live Migration feature.
> +        Bit 1 enables (1) or disables (0) support for SEV Live Migration extensions.
> +        All other bits are reserved.

This doesn't say what the feature is or does, and what the extensions 
are.  As far as I understand bit 0 is a guest->host communication that 
it's properly handling the encryption bitmap.

I applied patches -13, this one a bit changed as follows.

diff --git a/Documentation/virt/kvm/cpuid.rst 
b/Documentation/virt/kvm/cpuid.rst
index cf62162d4be2..7d82d7da3835 100644
--- a/Documentation/virt/kvm/cpuid.rst
+++ b/Documentation/virt/kvm/cpuid.rst
@@ -96,6 +96,11 @@ KVM_FEATURE_MSI_EXT_DEST_ID        15          guest 
checks this feature bit
                                                 before using extended 
destination
                                                 ID bits in MSI address 
bits 11-5.

+KVM_FEATURE_ENCRYPTED_VM_BIT       16          guest checks this 
feature bit before
+                                               using the page 
encryption state
+                                               hypercall and encrypted VM
+                                               features MSR
+
  KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24          host will warn if no 
guest-side
                                                 per-cpu warps are 
expected in
                                                 kvmclock
diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst
index e37a14c323d2..02528bc760b8 100644
--- a/Documentation/virt/kvm/msr.rst
+++ b/Documentation/virt/kvm/msr.rst
@@ -376,3 +376,13 @@ data:
  	write '1' to bit 0 of the MSR, this causes the host to re-scan its queue
  	and check if there are more notifications pending. The MSR is available
  	if KVM_FEATURE_ASYNC_PF_INT is present in CPUID.
+
+MSR_KVM_ENC_VM_FEATURE:
+        0x4b564d08
+
+	Control encrypted VM features.
+
+data:
+        Bit 0 tells the host that the guest is (1) or is not (0) 
issuing the
+        ``KVM_HC_PAGE_ENC_STATUS`` hypercall to keep the encrypted bitmap
+       up to date.
diff --git a/arch/x86/include/uapi/asm/kvm_para.h 
b/arch/x86/include/uapi/asm/kvm_para.h
index 950afebfba88..3dda6e416a70 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -33,6 +33,7 @@
  #define KVM_FEATURE_PV_SCHED_YIELD	13
  #define KVM_FEATURE_ASYNC_PF_INT	14
  #define KVM_FEATURE_MSI_EXT_DEST_ID	15
+#define KVM_FEATURE_ENCRYPTED_VM	16

  #define KVM_HINTS_REALTIME      0

@@ -54,6 +55,7 @@
  #define MSR_KVM_POLL_CONTROL	0x4b564d05
  #define MSR_KVM_ASYNC_PF_INT	0x4b564d06
  #define MSR_KVM_ASYNC_PF_ACK	0x4b564d07
+#define MSR_KVM_ENC_VM_FEATURE	0x4b564d08

  struct kvm_steal_time {
  	__u64 steal;
@@ -136,4 +138,6 @@ struct kvm_vcpu_pv_apf_data {
  #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
  #define KVM_PV_EOI_DISABLED 0x0

+#define KVM_ENC_VM_BITMAP_VALID			(1 << 0)
+
  #endif /* _UAPI_ASM_X86_KVM_PARA_H */
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index fa67f498e838..0673531233da 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1478,6 +1478,17 @@ int svm_page_enc_status_hc(struct kvm *kvm, 
unsigned long gpa,
  	return 0;
  }

+void sev_update_enc_vm_flags(struct kvm *kvm, u64 data)
+{
+	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+	if (!sev_guest(kvm))
+		return;
+
+	if (data & KVM_ENC_VM_BITMAP_VALID)
+		sev->live_migration_enabled = true;
+}
+
  int svm_get_page_enc_bitmap(struct kvm *kvm,
  				   struct kvm_page_enc_bitmap *bmap)
  {
@@ -1490,6 +1501,9 @@ int svm_get_page_enc_bitmap(struct kvm *kvm,
  	if (!sev_guest(kvm))
  		return -ENOTTY;

+	if (!sev->live_migration_enabled)
+		return -EINVAL;
+
  	gfn_start = bmap->start_gfn;
  	gfn_end = gfn_start + bmap->num_pages;

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 66f7014eaae2..8ac2c5b9c675 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2766,6 +2766,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, 
struct msr_data *msr)
  		svm->msr_decfg = data;
  		break;
  	}
+	case MSR_KVM_ENC_VM_FEATURE:
+		sev_update_enc_vm_flags(vcpu->kvm, data);
+		break;
  	case MSR_IA32_APICBASE:
  		if (kvm_vcpu_apicv_active(vcpu))
  			avic_update_vapic_bar(to_svm(vcpu), data);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 287559b8c5b2..363c3f8d00b7 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -66,6 +66,7 @@ struct kvm_sev_info {
  	int fd;			/* SEV device fd */
  	unsigned long pages_locked; /* Number of pages locked */
  	struct list_head regions_list;  /* List of registered regions */
+	bool live_migration_enabled;
  	unsigned long *page_enc_bmap;
  	unsigned long page_enc_bmap_size;
  };
@@ -504,5 +505,6 @@ int svm_page_enc_status_hc(struct kvm *kvm, unsigned 
long gpa,
  				  unsigned long npages, unsigned long enc);
  int svm_get_page_enc_bitmap(struct kvm *kvm, struct 
kvm_page_enc_bitmap *bmap);
  int svm_set_page_enc_bitmap(struct kvm *kvm, struct 
kvm_page_enc_bitmap *bmap);
+void sev_update_enc_vm_flags(struct kvm *kvm, u64 data);

  #endif

Paolo
Sean Christopherson Dec. 4, 2020, 4:48 p.m. UTC | #3
On Fri, Dec 04, 2020, Paolo Bonzini wrote:
> I applied patches -13, this one a bit changed as follows.

Can we hold up on applying this series?  Unless I'm misunderstanding things,
much of what you're applying is superseded by a much more recent series to add
only the page encryption bitmap[*].  I have several concerns/comments for that
series that I would like to hash out before we add a new ioctl().  I'll try to
respond next week, my time is unfortunately limited due to onboarding activities.

[*] https://lkml.kernel.org/r/cover.1606782580.git.ashish.kalra@amd.com
Kalra, Ashish Dec. 4, 2020, 5:08 p.m. UTC | #4
An immediate response, actually the SEV live migration patches are preferred over the Page encryption bitmap
patches, in other words, if SEV live migration patches are applied then we don't need the Page encryption bitmap
patches and we prefer the live migration series to be applied.

It is not that page encryption bitmap series supersede the live migration patches, they are just cut of the
live migration patches. 

Thanks,
Ashish
Sean Christopherson Dec. 4, 2020, 5:23 p.m. UTC | #5
On Fri, Dec 04, 2020, Ashish Kalra wrote:
> An immediate response, actually the SEV live migration patches are preferred
> over the Page encryption bitmap patches, in other words, if SEV live
> migration patches are applied then we don't need the Page encryption bitmap
> patches and we prefer the live migration series to be applied.
> 
> It is not that page encryption bitmap series supersede the live migration
> patches, they are just cut of the live migration patches. 

In that case, can you post a fresh version of the live migration series?  Paolo
is obviously willing to take a big chunk of that series, and it will likely be
easier to review with the full context, e.g. one of my comments on the standalone
encryption bitmap series was going to be that it's hard to review without seeing
the live migration aspect.

Thanks!
Kalra, Ashish Dec. 4, 2020, 6:06 p.m. UTC | #6
Yes i will post a fresh version of the live migration patches. 

Also, can you please check your email settings, we are only able to see your response on the
mailing list but we are not getting your direct responses.

Thanks,
Ashish
Sean Christopherson Dec. 4, 2020, 6:41 p.m. UTC | #7
On Fri, Dec 4, 2020 at 10:07 AM Ashish Kalra <Ashish.Kalra@amd.com> wrote:
>
> Yes i will post a fresh version of the live migration patches.
>
> Also, can you please check your email settings, we are only able to see your response on the
> mailing list but we are not getting your direct responses.

Hrm, as in you don't get the email?

Is this email any different?  Sending via gmail instead of mutt...
Kalra, Ashish Dec. 4, 2020, 6:48 p.m. UTC | #8
This time I received your email directly.

Thanks,
Ashish

> On Dec 4, 2020, at 12:41 PM, Sean Christopherson <seanjc@google.com> wrote:
> 
> On Fri, Dec 4, 2020 at 10:07 AM Ashish Kalra <Ashish.Kalra@amd.com> wrote:
>> 
>> Yes i will post a fresh version of the live migration patches.
>> 
>> Also, can you please check your email settings, we are only able to see your response on the
>> mailing list but we are not getting your direct responses.
> 
> Hrm, as in you don't get the email?
> 
> Is this email any different?  Sending via gmail instead of mutt...
Tom Lendacky Dec. 4, 2020, 7:02 p.m. UTC | #9
On 12/4/20 12:41 PM, Sean Christopherson wrote:
> On Fri, Dec 4, 2020 at 10:07 AM Ashish Kalra <Ashish.Kalra@amd.com> wrote:
>>
>> Yes i will post a fresh version of the live migration patches.
>>
>> Also, can you please check your email settings, we are only able to see your response on the
>> mailing list but we are not getting your direct responses.
> 
> Hrm, as in you don't get the email?
> 
> Is this email any different?  Sending via gmail instead of mutt...

FWIW, I received the previous email(s). It's probably something on our end.

Thanks,
Tom

>
Kalra, Ashish Dec. 4, 2020, 9:42 p.m. UTC | #10
Hello Paolo,

On Fri, Dec 04, 2020 at 12:20:46PM +0100, Paolo Bonzini wrote:
> On 05/05/20 23:19, Ashish Kalra wrote:
> > From: Ashish Kalra <ashish.kalra@amd.com>
> > 
> > Add new KVM_FEATURE_SEV_LIVE_MIGRATION feature for guest to check
> > for host-side support for SEV live migration. Also add a new custom
> > MSR_KVM_SEV_LIVE_MIG_EN for guest to enable the SEV live migration
> > feature.
> > 
> > Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
> > ---
> >   Documentation/virt/kvm/cpuid.rst     |  5 +++++
> >   Documentation/virt/kvm/msr.rst       | 10 ++++++++++
> >   arch/x86/include/uapi/asm/kvm_para.h |  5 +++++
> >   arch/x86/kvm/svm/sev.c               | 14 ++++++++++++++
> >   arch/x86/kvm/svm/svm.c               | 16 ++++++++++++++++
> >   arch/x86/kvm/svm/svm.h               |  2 ++
> >   6 files changed, 52 insertions(+)
> > 
> > diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst
> > index 01b081f6e7ea..0514523e00cd 100644
> > --- a/Documentation/virt/kvm/cpuid.rst
> > +++ b/Documentation/virt/kvm/cpuid.rst
> > @@ -86,6 +86,11 @@ KVM_FEATURE_PV_SCHED_YIELD        13          guest checks this feature bit
> >                                                 before using paravirtualized
> >                                                 sched yield.
> > +KVM_FEATURE_SEV_LIVE_MIGRATION    14          guest checks this feature bit before
> > +                                              using the page encryption state
> > +                                              hypercall to notify the page state
> > +                                              change
> > +
> >   KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24          host will warn if no guest-side
> >                                                 per-cpu warps are expeced in
> >                                                 kvmclock
> > diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst
> > index 33892036672d..7cd7786bbb03 100644
> > --- a/Documentation/virt/kvm/msr.rst
> > +++ b/Documentation/virt/kvm/msr.rst
> > @@ -319,3 +319,13 @@ data:
> >   	KVM guests can request the host not to poll on HLT, for example if
> >   	they are performing polling themselves.
> > +
> > +MSR_KVM_SEV_LIVE_MIG_EN:
> > +        0x4b564d06
> > +
> > +	Control SEV Live Migration features.
> > +
> > +data:
> > +        Bit 0 enables (1) or disables (0) host-side SEV Live Migration feature.
> > +        Bit 1 enables (1) or disables (0) support for SEV Live Migration extensions.
> > +        All other bits are reserved.
> 
> This doesn't say what the feature is or does, and what the extensions are.
> As far as I understand bit 0 is a guest->host communication that it's
> properly handling the encryption bitmap.
> 
Yes, your understanding for bit 0 is correct, the extensions are for any
future extensions related to this live migration support, such as
extensions/support for accelerated migration, etc. 

> I applied patches -13, this one a bit changed as follows.

Yes, i will post a fresh series of this patch-set.

Thanks,
Ashish

> 
> diff --git a/Documentation/virt/kvm/cpuid.rst
> b/Documentation/virt/kvm/cpuid.rst
> index cf62162d4be2..7d82d7da3835 100644
> --- a/Documentation/virt/kvm/cpuid.rst
> +++ b/Documentation/virt/kvm/cpuid.rst
> @@ -96,6 +96,11 @@ KVM_FEATURE_MSI_EXT_DEST_ID        15          guest
> checks this feature bit
>                                                 before using extended
> destination
>                                                 ID bits in MSI address bits
> 11-5.
> 
> +KVM_FEATURE_ENCRYPTED_VM_BIT       16          guest checks this feature
> bit before
> +                                               using the page encryption
> state
> +                                               hypercall and encrypted VM
> +                                               features MSR
> +
>  KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24          host will warn if no
> guest-side
>                                                 per-cpu warps are expected
> in
>                                                 kvmclock
> diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst
> index e37a14c323d2..02528bc760b8 100644
> --- a/Documentation/virt/kvm/msr.rst
> +++ b/Documentation/virt/kvm/msr.rst
> @@ -376,3 +376,13 @@ data:
>  	write '1' to bit 0 of the MSR, this causes the host to re-scan its queue
>  	and check if there are more notifications pending. The MSR is available
>  	if KVM_FEATURE_ASYNC_PF_INT is present in CPUID.
> +
> +MSR_KVM_ENC_VM_FEATURE:
> +        0x4b564d08
> +
> +	Control encrypted VM features.
> +
> +data:
> +        Bit 0 tells the host that the guest is (1) or is not (0) issuing
> the
> +        ``KVM_HC_PAGE_ENC_STATUS`` hypercall to keep the encrypted bitmap
> +       up to date.
> diff --git a/arch/x86/include/uapi/asm/kvm_para.h
> b/arch/x86/include/uapi/asm/kvm_para.h
> index 950afebfba88..3dda6e416a70 100644
> --- a/arch/x86/include/uapi/asm/kvm_para.h
> +++ b/arch/x86/include/uapi/asm/kvm_para.h
> @@ -33,6 +33,7 @@
>  #define KVM_FEATURE_PV_SCHED_YIELD	13
>  #define KVM_FEATURE_ASYNC_PF_INT	14
>  #define KVM_FEATURE_MSI_EXT_DEST_ID	15
> +#define KVM_FEATURE_ENCRYPTED_VM	16
> 
>  #define KVM_HINTS_REALTIME      0
> 
> @@ -54,6 +55,7 @@
>  #define MSR_KVM_POLL_CONTROL	0x4b564d05
>  #define MSR_KVM_ASYNC_PF_INT	0x4b564d06
>  #define MSR_KVM_ASYNC_PF_ACK	0x4b564d07
> +#define MSR_KVM_ENC_VM_FEATURE	0x4b564d08
> 
>  struct kvm_steal_time {
>  	__u64 steal;
> @@ -136,4 +138,6 @@ struct kvm_vcpu_pv_apf_data {
>  #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
>  #define KVM_PV_EOI_DISABLED 0x0
> 
> +#define KVM_ENC_VM_BITMAP_VALID			(1 << 0)
> +
>  #endif /* _UAPI_ASM_X86_KVM_PARA_H */
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index fa67f498e838..0673531233da 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -1478,6 +1478,17 @@ int svm_page_enc_status_hc(struct kvm *kvm, unsigned
> long gpa,
>  	return 0;
>  }
> 
> +void sev_update_enc_vm_flags(struct kvm *kvm, u64 data)
> +{
> +	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
> +
> +	if (!sev_guest(kvm))
> +		return;
> +
> +	if (data & KVM_ENC_VM_BITMAP_VALID)
> +		sev->live_migration_enabled = true;
> +}
> +
>  int svm_get_page_enc_bitmap(struct kvm *kvm,
>  				   struct kvm_page_enc_bitmap *bmap)
>  {
> @@ -1490,6 +1501,9 @@ int svm_get_page_enc_bitmap(struct kvm *kvm,
>  	if (!sev_guest(kvm))
>  		return -ENOTTY;
> 
> +	if (!sev->live_migration_enabled)
> +		return -EINVAL;
> +
>  	gfn_start = bmap->start_gfn;
>  	gfn_end = gfn_start + bmap->num_pages;
> 
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 66f7014eaae2..8ac2c5b9c675 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -2766,6 +2766,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct
> msr_data *msr)
>  		svm->msr_decfg = data;
>  		break;
>  	}
> +	case MSR_KVM_ENC_VM_FEATURE:
> +		sev_update_enc_vm_flags(vcpu->kvm, data);
> +		break;
>  	case MSR_IA32_APICBASE:
>  		if (kvm_vcpu_apicv_active(vcpu))
>  			avic_update_vapic_bar(to_svm(vcpu), data);
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 287559b8c5b2..363c3f8d00b7 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -66,6 +66,7 @@ struct kvm_sev_info {
>  	int fd;			/* SEV device fd */
>  	unsigned long pages_locked; /* Number of pages locked */
>  	struct list_head regions_list;  /* List of registered regions */
> +	bool live_migration_enabled;
>  	unsigned long *page_enc_bmap;
>  	unsigned long page_enc_bmap_size;
>  };
> @@ -504,5 +505,6 @@ int svm_page_enc_status_hc(struct kvm *kvm, unsigned
> long gpa,
>  				  unsigned long npages, unsigned long enc);
>  int svm_get_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap
> *bmap);
>  int svm_set_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap
> *bmap);
> +void sev_update_enc_vm_flags(struct kvm *kvm, u64 data);
> 
>  #endif
> 
> Paolo
>
Paolo Bonzini Dec. 6, 2020, 10:57 a.m. UTC | #11
On 04/12/20 18:23, Sean Christopherson wrote:
> On Fri, Dec 04, 2020, Ashish Kalra wrote:
>> An immediate response, actually the SEV live migration patches are preferred
>> over the Page encryption bitmap patches, in other words, if SEV live
>> migration patches are applied then we don't need the Page encryption bitmap
>> patches and we prefer the live migration series to be applied.
>>
>> It is not that page encryption bitmap series supersede the live migration
>> patches, they are just cut of the live migration patches.
> In that case, can you post a fresh version of the live migration series?  Paolo
> is obviously willing to take a big chunk of that series, and it will likely be
> easier to review with the full context, e.g. one of my comments on the standalone
> encryption bitmap series was going to be that it's hard to review without seeing
> the live migration aspect.

It still applies without change.  For now I'll only keep the series 
queued in my (n)SVM branch, but will hold on applying it to kvm.git's 
queue and next branches.

Thanks,

Paolo
Kalra, Ashish Dec. 6, 2020, 2:09 p.m. UTC | #12
> On Dec 6, 2020, at 4:58 AM, Paolo Bonzini <pbonzini@redhat.com> wrote:
> 
> On 04/12/20 18:23, Sean Christopherson wrote:
>>> On Fri, Dec 04, 2020, Ashish Kalra wrote:
>>> An immediate response, actually the SEV live migration patches are preferred
>>> over the Page encryption bitmap patches, in other words, if SEV live
>>> migration patches are applied then we don't need the Page encryption bitmap
>>> patches and we prefer the live migration series to be applied.
>>> 
>>> It is not that page encryption bitmap series supersede the live migration
>>> patches, they are just cut of the live migration patches.
>> In that case, can you post a fresh version of the live migration series?  Paolo
>> is obviously willing to take a big chunk of that series, and it will likely be
>> easier to review with the full context, e.g. one of my comments on the standalone
>> encryption bitmap series was going to be that it's hard to review without seeing
>> the live migration aspect.
> 
> It still applies without change.  For now I'll only keep the series queued in my (n)SVM branch, but will hold on applying it to kvm.git's queue and next branches.
> 

Ok thanks Paolo.
diff mbox series

Patch

diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst
index 01b081f6e7ea..0514523e00cd 100644
--- a/Documentation/virt/kvm/cpuid.rst
+++ b/Documentation/virt/kvm/cpuid.rst
@@ -86,6 +86,11 @@  KVM_FEATURE_PV_SCHED_YIELD        13          guest checks this feature bit
                                               before using paravirtualized
                                               sched yield.
 
+KVM_FEATURE_SEV_LIVE_MIGRATION    14          guest checks this feature bit before
+                                              using the page encryption state
+                                              hypercall to notify the page state
+                                              change
+
 KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24          host will warn if no guest-side
                                               per-cpu warps are expeced in
                                               kvmclock
diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst
index 33892036672d..7cd7786bbb03 100644
--- a/Documentation/virt/kvm/msr.rst
+++ b/Documentation/virt/kvm/msr.rst
@@ -319,3 +319,13 @@  data:
 
 	KVM guests can request the host not to poll on HLT, for example if
 	they are performing polling themselves.
+
+MSR_KVM_SEV_LIVE_MIG_EN:
+        0x4b564d06
+
+	Control SEV Live Migration features.
+
+data:
+        Bit 0 enables (1) or disables (0) host-side SEV Live Migration feature.
+        Bit 1 enables (1) or disables (0) support for SEV Live Migration extensions.
+        All other bits are reserved.
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 2a8e0b6b9805..d9d4953b42ad 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -31,6 +31,7 @@ 
 #define KVM_FEATURE_PV_SEND_IPI	11
 #define KVM_FEATURE_POLL_CONTROL	12
 #define KVM_FEATURE_PV_SCHED_YIELD	13
+#define KVM_FEATURE_SEV_LIVE_MIGRATION	14
 
 #define KVM_HINTS_REALTIME      0
 
@@ -50,6 +51,7 @@ 
 #define MSR_KVM_STEAL_TIME  0x4b564d03
 #define MSR_KVM_PV_EOI_EN      0x4b564d04
 #define MSR_KVM_POLL_CONTROL	0x4b564d05
+#define MSR_KVM_SEV_LIVE_MIG_EN	0x4b564d06
 
 struct kvm_steal_time {
 	__u64 steal;
@@ -122,4 +124,7 @@  struct kvm_vcpu_pv_apf_data {
 #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
 #define KVM_PV_EOI_DISABLED 0x0
 
+#define KVM_SEV_LIVE_MIGRATION_ENABLED			(1 << 0)
+#define KVM_SEV_LIVE_MIGRATION_EXTENSIONS_SUPPORTED	(1 << 1)
+
 #endif /* _UAPI_ASM_X86_KVM_PARA_H */
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index c0d7043a0627..6f69c3a47583 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1469,6 +1469,17 @@  int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
 	return 0;
 }
 
+void sev_update_migration_flags(struct kvm *kvm, u64 data)
+{
+	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+	if (!sev_guest(kvm))
+		return;
+
+	if (data & KVM_SEV_LIVE_MIGRATION_ENABLED)
+		sev->live_migration_enabled = true;
+}
+
 int svm_get_page_enc_bitmap(struct kvm *kvm,
 				   struct kvm_page_enc_bitmap *bmap)
 {
@@ -1481,6 +1492,9 @@  int svm_get_page_enc_bitmap(struct kvm *kvm,
 	if (!sev_guest(kvm))
 		return -ENOTTY;
 
+	if (!sev->live_migration_enabled)
+		return -EINVAL;
+
 	gfn_start = bmap->start_gfn;
 	gfn_end = gfn_start + bmap->num_pages;
 
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 442adbbb0641..a99f5457f244 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2633,6 +2633,9 @@  static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
 		svm->msr_decfg = data;
 		break;
 	}
+	case MSR_KVM_SEV_LIVE_MIG_EN:
+		sev_update_migration_flags(vcpu->kvm, data);
+		break;
 	case MSR_IA32_APICBASE:
 		if (kvm_vcpu_apicv_active(vcpu))
 			avic_update_vapic_bar(to_svm(vcpu), data);
@@ -3493,6 +3496,19 @@  static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 	svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
 			     guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
 
+        /*
+         * If SEV guest then enable the Live migration feature.
+         */
+        if (sev_guest(vcpu->kvm)) {
+              struct kvm_cpuid_entry2 *best;
+
+              best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
+              if (!best)
+                      return;
+
+              best->eax |= (1 << KVM_FEATURE_SEV_LIVE_MIGRATION);
+        }
+
 	if (!kvm_vcpu_apicv_active(vcpu))
 		return;
 
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index fd99e0a5417a..77f132a6fead 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -65,6 +65,7 @@  struct kvm_sev_info {
 	int fd;			/* SEV device fd */
 	unsigned long pages_locked; /* Number of pages locked */
 	struct list_head regions_list;  /* List of registered regions */
+	bool live_migration_enabled;
 	unsigned long *page_enc_bmap;
 	unsigned long page_enc_bmap_size;
 };
@@ -494,5 +495,6 @@  int svm_unregister_enc_region(struct kvm *kvm,
 void pre_sev_run(struct vcpu_svm *svm, int cpu);
 int __init sev_hardware_setup(void);
 void sev_hardware_teardown(void);
+void sev_update_migration_flags(struct kvm *kvm, u64 data);
 
 #endif