Message ID | 69dd6d5c4f467e6c8a0f4f1065f7f2a3d25f37f8.1617302792.git.ashish.kalra@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add AMD SEV guest live migration support | expand |
On Mon, Apr 5, 2021 at 7:30 AM Ashish Kalra <Ashish.Kalra@amd.com> wrote: > > From: Ashish Kalra <ashish.kalra@amd.com> > > Add new KVM_FEATURE_SEV_LIVE_MIGRATION feature for guest to check > for host-side support for SEV live migration. Also add a new custom > MSR_KVM_SEV_LIVE_MIGRATION for guest to enable the SEV live migration > feature. > > MSR is handled by userspace using MSR filters. > > Signed-off-by: Ashish Kalra <ashish.kalra@amd.com> > --- > Documentation/virt/kvm/cpuid.rst | 5 +++++ > Documentation/virt/kvm/msr.rst | 12 ++++++++++++ > arch/x86/include/uapi/asm/kvm_para.h | 4 ++++ > arch/x86/kvm/cpuid.c | 3 ++- > arch/x86/kvm/svm/svm.c | 22 ++++++++++++++++++++++ > 5 files changed, 45 insertions(+), 1 deletion(-) > > diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst > index cf62162d4be2..0bdb6cdb12d3 100644 > --- a/Documentation/virt/kvm/cpuid.rst > +++ b/Documentation/virt/kvm/cpuid.rst > @@ -96,6 +96,11 @@ KVM_FEATURE_MSI_EXT_DEST_ID 15 guest checks this feature bit > before using extended destination > ID bits in MSI address bits 11-5. > > +KVM_FEATURE_SEV_LIVE_MIGRATION 16 guest checks this feature bit before > + using the page encryption state > + hypercall to notify the page state > + change > + > KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 host will warn if no guest-side > per-cpu warps are expected in > kvmclock > diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst > index e37a14c323d2..020245d16087 100644 > --- a/Documentation/virt/kvm/msr.rst > +++ b/Documentation/virt/kvm/msr.rst > @@ -376,3 +376,15 @@ data: > write '1' to bit 0 of the MSR, this causes the host to re-scan its queue > and check if there are more notifications pending. The MSR is available > if KVM_FEATURE_ASYNC_PF_INT is present in CPUID. > + > +MSR_KVM_SEV_LIVE_MIGRATION: > + 0x4b564d08 > + > + Control SEV Live Migration features. > + > +data: > + Bit 0 enables (1) or disables (0) host-side SEV Live Migration feature, > + in other words, this is guest->host communication that it's properly > + handling the shared pages list. > + > + All other bits are reserved. > diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h > index 950afebfba88..f6bfa138874f 100644 > --- a/arch/x86/include/uapi/asm/kvm_para.h > +++ b/arch/x86/include/uapi/asm/kvm_para.h > @@ -33,6 +33,7 @@ > #define KVM_FEATURE_PV_SCHED_YIELD 13 > #define KVM_FEATURE_ASYNC_PF_INT 14 > #define KVM_FEATURE_MSI_EXT_DEST_ID 15 > +#define KVM_FEATURE_SEV_LIVE_MIGRATION 16 > > #define KVM_HINTS_REALTIME 0 > > @@ -54,6 +55,7 @@ > #define MSR_KVM_POLL_CONTROL 0x4b564d05 > #define MSR_KVM_ASYNC_PF_INT 0x4b564d06 > #define MSR_KVM_ASYNC_PF_ACK 0x4b564d07 > +#define MSR_KVM_SEV_LIVE_MIGRATION 0x4b564d08 > > struct kvm_steal_time { > __u64 steal; > @@ -136,4 +138,6 @@ struct kvm_vcpu_pv_apf_data { > #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK > #define KVM_PV_EOI_DISABLED 0x0 > > +#define KVM_SEV_LIVE_MIGRATION_ENABLED BIT_ULL(0) > + > #endif /* _UAPI_ASM_X86_KVM_PARA_H */ > diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c > index 6bd2f8b830e4..4e2e69a692aa 100644 > --- a/arch/x86/kvm/cpuid.c > +++ b/arch/x86/kvm/cpuid.c > @@ -812,7 +812,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) > (1 << KVM_FEATURE_PV_SEND_IPI) | > (1 << KVM_FEATURE_POLL_CONTROL) | > (1 << KVM_FEATURE_PV_SCHED_YIELD) | > - (1 << KVM_FEATURE_ASYNC_PF_INT); > + (1 << KVM_FEATURE_ASYNC_PF_INT) | > + (1 << KVM_FEATURE_SEV_LIVE_MIGRATION); > > if (sched_info_on()) > entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index 3cbf000beff1..1ac79e2f2a6c 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -2800,6 +2800,17 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > case MSR_F10H_DECFG: > msr_info->data = svm->msr_decfg; > break; > + case MSR_KVM_SEV_LIVE_MIGRATION: > + if (!sev_guest(vcpu->kvm)) > + return 1; > + > + if (!guest_cpuid_has(vcpu, KVM_FEATURE_SEV_LIVE_MIGRATION)) > + return 1; > + > + /* > + * Let userspace handle the MSR using MSR filters. > + */ > + return KVM_MSR_RET_FILTERED; > default: > return kvm_get_msr_common(vcpu, msr_info); > } > @@ -2996,6 +3007,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) > svm->msr_decfg = data; > break; > } > + case MSR_KVM_SEV_LIVE_MIGRATION: > + if (!sev_guest(vcpu->kvm)) > + return 1; > + > + if (!guest_cpuid_has(vcpu, KVM_FEATURE_SEV_LIVE_MIGRATION)) > + return 1; > + > + /* > + * Let userspace handle the MSR using MSR filters. > + */ > + return KVM_MSR_RET_FILTERED; It's a little unintuitive to see KVM_MSR_RET_FILTERED here, since userspace can make this happen on its own without having an entry in this switch statement (by setting it in the msr filter bitmaps). When using MSR filters, I would only expect to get MSR filter exits for MSRs I specifically asked for. Not a huge deal, just a little unintuitive. I'm not sure other options are much better (you could put KVM_MSR_RET_INVALID, or you could just not have these entries in svm_{get,set}_msr). --Steve
Hello Steve, On Mon, Apr 05, 2021 at 06:39:03PM -0700, Steve Rutherford wrote: > On Mon, Apr 5, 2021 at 7:30 AM Ashish Kalra <Ashish.Kalra@amd.com> wrote: > > > > From: Ashish Kalra <ashish.kalra@amd.com> > > > > Add new KVM_FEATURE_SEV_LIVE_MIGRATION feature for guest to check > > for host-side support for SEV live migration. Also add a new custom > > MSR_KVM_SEV_LIVE_MIGRATION for guest to enable the SEV live migration > > feature. > > > > MSR is handled by userspace using MSR filters. > > > > Signed-off-by: Ashish Kalra <ashish.kalra@amd.com> > > --- > > Documentation/virt/kvm/cpuid.rst | 5 +++++ > > Documentation/virt/kvm/msr.rst | 12 ++++++++++++ > > arch/x86/include/uapi/asm/kvm_para.h | 4 ++++ > > arch/x86/kvm/cpuid.c | 3 ++- > > arch/x86/kvm/svm/svm.c | 22 ++++++++++++++++++++++ > > 5 files changed, 45 insertions(+), 1 deletion(-) > > > > diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst > > index cf62162d4be2..0bdb6cdb12d3 100644 > > --- a/Documentation/virt/kvm/cpuid.rst > > +++ b/Documentation/virt/kvm/cpuid.rst > > @@ -96,6 +96,11 @@ KVM_FEATURE_MSI_EXT_DEST_ID 15 guest checks this feature bit > > before using extended destination > > ID bits in MSI address bits 11-5. > > > > +KVM_FEATURE_SEV_LIVE_MIGRATION 16 guest checks this feature bit before > > + using the page encryption state > > + hypercall to notify the page state > > + change > > + > > KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 host will warn if no guest-side > > per-cpu warps are expected in > > kvmclock > > diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst > > index e37a14c323d2..020245d16087 100644 > > --- a/Documentation/virt/kvm/msr.rst > > +++ b/Documentation/virt/kvm/msr.rst > > @@ -376,3 +376,15 @@ data: > > write '1' to bit 0 of the MSR, this causes the host to re-scan its queue > > and check if there are more notifications pending. The MSR is available > > if KVM_FEATURE_ASYNC_PF_INT is present in CPUID. > > + > > +MSR_KVM_SEV_LIVE_MIGRATION: > > + 0x4b564d08 > > + > > + Control SEV Live Migration features. > > + > > +data: > > + Bit 0 enables (1) or disables (0) host-side SEV Live Migration feature, > > + in other words, this is guest->host communication that it's properly > > + handling the shared pages list. > > + > > + All other bits are reserved. > > diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h > > index 950afebfba88..f6bfa138874f 100644 > > --- a/arch/x86/include/uapi/asm/kvm_para.h > > +++ b/arch/x86/include/uapi/asm/kvm_para.h > > @@ -33,6 +33,7 @@ > > #define KVM_FEATURE_PV_SCHED_YIELD 13 > > #define KVM_FEATURE_ASYNC_PF_INT 14 > > #define KVM_FEATURE_MSI_EXT_DEST_ID 15 > > +#define KVM_FEATURE_SEV_LIVE_MIGRATION 16 > > > > #define KVM_HINTS_REALTIME 0 > > > > @@ -54,6 +55,7 @@ > > #define MSR_KVM_POLL_CONTROL 0x4b564d05 > > #define MSR_KVM_ASYNC_PF_INT 0x4b564d06 > > #define MSR_KVM_ASYNC_PF_ACK 0x4b564d07 > > +#define MSR_KVM_SEV_LIVE_MIGRATION 0x4b564d08 > > > > struct kvm_steal_time { > > __u64 steal; > > @@ -136,4 +138,6 @@ struct kvm_vcpu_pv_apf_data { > > #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK > > #define KVM_PV_EOI_DISABLED 0x0 > > > > +#define KVM_SEV_LIVE_MIGRATION_ENABLED BIT_ULL(0) > > + > > #endif /* _UAPI_ASM_X86_KVM_PARA_H */ > > diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c > > index 6bd2f8b830e4..4e2e69a692aa 100644 > > --- a/arch/x86/kvm/cpuid.c > > +++ b/arch/x86/kvm/cpuid.c > > @@ -812,7 +812,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) > > (1 << KVM_FEATURE_PV_SEND_IPI) | > > (1 << KVM_FEATURE_POLL_CONTROL) | > > (1 << KVM_FEATURE_PV_SCHED_YIELD) | > > - (1 << KVM_FEATURE_ASYNC_PF_INT); > > + (1 << KVM_FEATURE_ASYNC_PF_INT) | > > + (1 << KVM_FEATURE_SEV_LIVE_MIGRATION); > > > > if (sched_info_on()) > > entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); > > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > > index 3cbf000beff1..1ac79e2f2a6c 100644 > > --- a/arch/x86/kvm/svm/svm.c > > +++ b/arch/x86/kvm/svm/svm.c > > @@ -2800,6 +2800,17 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > > case MSR_F10H_DECFG: > > msr_info->data = svm->msr_decfg; > > break; > > + case MSR_KVM_SEV_LIVE_MIGRATION: > > + if (!sev_guest(vcpu->kvm)) > > + return 1; > > + > > + if (!guest_cpuid_has(vcpu, KVM_FEATURE_SEV_LIVE_MIGRATION)) > > + return 1; > > + > > + /* > > + * Let userspace handle the MSR using MSR filters. > > + */ > > + return KVM_MSR_RET_FILTERED; > > default: > > return kvm_get_msr_common(vcpu, msr_info); > > } > > @@ -2996,6 +3007,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) > > svm->msr_decfg = data; > > break; > > } > > + case MSR_KVM_SEV_LIVE_MIGRATION: > > + if (!sev_guest(vcpu->kvm)) > > + return 1; > > + > > + if (!guest_cpuid_has(vcpu, KVM_FEATURE_SEV_LIVE_MIGRATION)) > > + return 1; > > + > > + /* > > + * Let userspace handle the MSR using MSR filters. > > + */ > > + return KVM_MSR_RET_FILTERED; > > It's a little unintuitive to see KVM_MSR_RET_FILTERED here, since > userspace can make this happen on its own without having an entry in > this switch statement (by setting it in the msr filter bitmaps). When > using MSR filters, I would only expect to get MSR filter exits for > MSRs I specifically asked for. > > Not a huge deal, just a little unintuitive. I'm not sure other options > are much better (you could put KVM_MSR_RET_INVALID, or you could just > not have these entries in svm_{get,set}_msr). > Actually KVM_MSR_RET_FILTERED seems more logical to use, especially in comparison with KVM_MSR_RET_INVALID. Also, hooking this msr in svm_{get,set}_msr allows some in-kernel error pre-processsing before doing the pass-through to userspace. Thanks, Ashish
On 06/04/21 15:26, Ashish Kalra wrote: >> It's a little unintuitive to see KVM_MSR_RET_FILTERED here, since >> userspace can make this happen on its own without having an entry in >> this switch statement (by setting it in the msr filter bitmaps). When >> using MSR filters, I would only expect to get MSR filter exits for >> MSRs I specifically asked for. >> >> Not a huge deal, just a little unintuitive. I'm not sure other options >> are much better (you could put KVM_MSR_RET_INVALID, or you could just >> not have these entries in svm_{get,set}_msr). >> > Actually KVM_MSR_RET_FILTERED seems more logical to use, especially in > comparison with KVM_MSR_RET_INVALID. > > Also, hooking this msr in svm_{get,set}_msr allows some in-kernel error > pre-processsing before doing the pass-through to userspace. I agree that it should be up to userspace to set up the filter since we now have that functionality. Let me read the whole threads for the past versions to see what the objections were... Paolo
Hello Paolo, On Tue, Apr 06, 2021 at 03:47:59PM +0200, Paolo Bonzini wrote: > On 06/04/21 15:26, Ashish Kalra wrote: > > > It's a little unintuitive to see KVM_MSR_RET_FILTERED here, since > > > userspace can make this happen on its own without having an entry in > > > this switch statement (by setting it in the msr filter bitmaps). When > > > using MSR filters, I would only expect to get MSR filter exits for > > > MSRs I specifically asked for. > > > > > > Not a huge deal, just a little unintuitive. I'm not sure other options > > > are much better (you could put KVM_MSR_RET_INVALID, or you could just > > > not have these entries in svm_{get,set}_msr). > > > > > Actually KVM_MSR_RET_FILTERED seems more logical to use, especially in > > comparison with KVM_MSR_RET_INVALID. > > > > Also, hooking this msr in svm_{get,set}_msr allows some in-kernel error > > pre-processsing before doing the pass-through to userspace. > > I agree that it should be up to userspace to set up the filter since we now > have that functionality. > The userspace is still setting up the filter and handling this MSR, it is only some basic error pre-processing being done in-kernel here. Thanks, Ashish > Let me read the whole threads for the past versions to see what the > objections were... > > Paolo >
On Tue, Apr 6, 2021 at 7:00 AM Ashish Kalra <ashish.kalra@amd.com> wrote: > > Hello Paolo, > > On Tue, Apr 06, 2021 at 03:47:59PM +0200, Paolo Bonzini wrote: > > On 06/04/21 15:26, Ashish Kalra wrote: > > > > It's a little unintuitive to see KVM_MSR_RET_FILTERED here, since > > > > userspace can make this happen on its own without having an entry in > > > > this switch statement (by setting it in the msr filter bitmaps). When > > > > using MSR filters, I would only expect to get MSR filter exits for > > > > MSRs I specifically asked for. > > > > > > > > Not a huge deal, just a little unintuitive. I'm not sure other options > > > > are much better (you could put KVM_MSR_RET_INVALID, or you could just > > > > not have these entries in svm_{get,set}_msr). > > > > > > > Actually KVM_MSR_RET_FILTERED seems more logical to use, especially in > > > comparison with KVM_MSR_RET_INVALID. > > > > > > Also, hooking this msr in svm_{get,set}_msr allows some in-kernel error > > > pre-processsing before doing the pass-through to userspace. > > > > I agree that it should be up to userspace to set up the filter since we now > > have that functionality. > > > > The userspace is still setting up the filter and handling this MSR, it > is only some basic error pre-processing being done in-kernel here. The bit that is unintuitive is that userspace will still get the kvm_exit from an msr filter for KVM_MSR_RET_FILTERED even if they did not add it to the filters. I don't think this is a huge deal: userspace asked for it indirectly (through cpuid+sev enablement). > > Thanks, > Ashish > > > Let me read the whole threads for the past versions to see what the > > objections were... > > > > Paolo > >
diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst index cf62162d4be2..0bdb6cdb12d3 100644 --- a/Documentation/virt/kvm/cpuid.rst +++ b/Documentation/virt/kvm/cpuid.rst @@ -96,6 +96,11 @@ KVM_FEATURE_MSI_EXT_DEST_ID 15 guest checks this feature bit before using extended destination ID bits in MSI address bits 11-5. +KVM_FEATURE_SEV_LIVE_MIGRATION 16 guest checks this feature bit before + using the page encryption state + hypercall to notify the page state + change + KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 host will warn if no guest-side per-cpu warps are expected in kvmclock diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst index e37a14c323d2..020245d16087 100644 --- a/Documentation/virt/kvm/msr.rst +++ b/Documentation/virt/kvm/msr.rst @@ -376,3 +376,15 @@ data: write '1' to bit 0 of the MSR, this causes the host to re-scan its queue and check if there are more notifications pending. The MSR is available if KVM_FEATURE_ASYNC_PF_INT is present in CPUID. + +MSR_KVM_SEV_LIVE_MIGRATION: + 0x4b564d08 + + Control SEV Live Migration features. + +data: + Bit 0 enables (1) or disables (0) host-side SEV Live Migration feature, + in other words, this is guest->host communication that it's properly + handling the shared pages list. + + All other bits are reserved. diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 950afebfba88..f6bfa138874f 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h @@ -33,6 +33,7 @@ #define KVM_FEATURE_PV_SCHED_YIELD 13 #define KVM_FEATURE_ASYNC_PF_INT 14 #define KVM_FEATURE_MSI_EXT_DEST_ID 15 +#define KVM_FEATURE_SEV_LIVE_MIGRATION 16 #define KVM_HINTS_REALTIME 0 @@ -54,6 +55,7 @@ #define MSR_KVM_POLL_CONTROL 0x4b564d05 #define MSR_KVM_ASYNC_PF_INT 0x4b564d06 #define MSR_KVM_ASYNC_PF_ACK 0x4b564d07 +#define MSR_KVM_SEV_LIVE_MIGRATION 0x4b564d08 struct kvm_steal_time { __u64 steal; @@ -136,4 +138,6 @@ struct kvm_vcpu_pv_apf_data { #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK #define KVM_PV_EOI_DISABLED 0x0 +#define KVM_SEV_LIVE_MIGRATION_ENABLED BIT_ULL(0) + #endif /* _UAPI_ASM_X86_KVM_PARA_H */ diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 6bd2f8b830e4..4e2e69a692aa 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -812,7 +812,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) (1 << KVM_FEATURE_PV_SEND_IPI) | (1 << KVM_FEATURE_POLL_CONTROL) | (1 << KVM_FEATURE_PV_SCHED_YIELD) | - (1 << KVM_FEATURE_ASYNC_PF_INT); + (1 << KVM_FEATURE_ASYNC_PF_INT) | + (1 << KVM_FEATURE_SEV_LIVE_MIGRATION); if (sched_info_on()) entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 3cbf000beff1..1ac79e2f2a6c 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2800,6 +2800,17 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_F10H_DECFG: msr_info->data = svm->msr_decfg; break; + case MSR_KVM_SEV_LIVE_MIGRATION: + if (!sev_guest(vcpu->kvm)) + return 1; + + if (!guest_cpuid_has(vcpu, KVM_FEATURE_SEV_LIVE_MIGRATION)) + return 1; + + /* + * Let userspace handle the MSR using MSR filters. + */ + return KVM_MSR_RET_FILTERED; default: return kvm_get_msr_common(vcpu, msr_info); } @@ -2996,6 +3007,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->msr_decfg = data; break; } + case MSR_KVM_SEV_LIVE_MIGRATION: + if (!sev_guest(vcpu->kvm)) + return 1; + + if (!guest_cpuid_has(vcpu, KVM_FEATURE_SEV_LIVE_MIGRATION)) + return 1; + + /* + * Let userspace handle the MSR using MSR filters. + */ + return KVM_MSR_RET_FILTERED; case MSR_IA32_APICBASE: if (kvm_vcpu_apicv_active(vcpu)) avic_update_vapic_bar(to_svm(vcpu), data);