diff mbox series

[RFC,v7,03/64] KVM: SVM: Advertise private memory support to KVM

Message ID 20221214194056.161492-4-michael.roth@amd.com (mailing list archive)
State New, archived
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Michael Roth Dec. 14, 2022, 7:39 p.m. UTC
From: Nikunj A Dadhania <nikunj@amd.com>

KVM should use private memory for guests that have upm_mode flag set.

Add a kvm_x86_ops hook for determining UPM support that accounts for
this situation by only enabling UPM test mode in the case of non-SEV
guests.

Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
[mdr: add x86 hook for determining restricted/private memory support]
Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 arch/x86/include/asm/kvm-x86-ops.h |  1 +
 arch/x86/include/asm/kvm_host.h    |  1 +
 arch/x86/kvm/svm/svm.c             | 10 ++++++++++
 arch/x86/kvm/x86.c                 |  8 ++++++++
 4 files changed, 20 insertions(+)

Comments

Borislav Petkov Dec. 23, 2022, 4:56 p.m. UTC | #1
On Wed, Dec 14, 2022 at 01:39:55PM -0600, Michael Roth wrote:
> +       bool (*private_mem_enabled)(struct kvm *kvm);

This looks like a function returning boolean to me. IOW, you can
simplify this to:

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 82ba4a564e58..4449aeff0dff 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -129,6 +129,7 @@ KVM_X86_OP(msr_filter_changed)
 KVM_X86_OP(complete_emulated_msr)
 KVM_X86_OP(vcpu_deliver_sipi_vector)
 KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
+KVM_X86_OP_OPTIONAL_RET0(private_mem_enabled);
 
 #undef KVM_X86_OP
 #undef KVM_X86_OP_OPTIONAL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1da0474edb2d..1b4b89ddeb55 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1574,6 +1574,7 @@ struct kvm_x86_ops {
 
 	void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
 			     int root_level);
+	bool (*private_mem_enabled)(struct kvm *kvm);
 
 	bool (*has_wbinvd_exit)(void);
 
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index ce362e88a567..73b780fa4653 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4680,6 +4680,14 @@ static int svm_vm_init(struct kvm *kvm)
 	return 0;
 }
 
+static bool svm_private_mem_enabled(struct kvm *kvm)
+{
+	if (sev_guest(kvm))
+		return kvm->arch.upm_mode;
+
+	return IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING);
+}
+
 static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.name = "kvm_amd",
 
@@ -4760,6 +4768,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 
 	.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
 
+	.private_mem_enabled = svm_private_mem_enabled,
+
 	.has_wbinvd_exit = svm_has_wbinvd_exit,
 
 	.get_l2_tsc_offset = svm_get_l2_tsc_offset,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 823646d601db..9a1ca59d36a4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12556,6 +12556,11 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
 }
 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
 
+bool kvm_arch_has_private_mem(struct kvm *kvm)
+{
+	return static_call(kvm_x86_private_mem_enabled)(kvm);
+}
+
 void kvm_arch_pre_destroy_vm(struct kvm *kvm)
 {
 	kvm_mmu_pre_destroy_vm(kvm);
Michael Roth Jan. 5, 2023, 2:14 a.m. UTC | #2
On Fri, Dec 23, 2022 at 05:56:50PM +0100, Borislav Petkov wrote:
> On Wed, Dec 14, 2022 at 01:39:55PM -0600, Michael Roth wrote:
> > +       bool (*private_mem_enabled)(struct kvm *kvm);
> 
> This looks like a function returning boolean to me. IOW, you can
> simplify this to:

The semantics and existing uses of KVM_X86_OP_OPTIONAL_RET0() gave me the
impression it needed to return an integer value, since by default if a
platform doesn't implement the op it would "return 0", and so could
still be called unconditionally.

Maybe that's not actually enforced, by it seems awkward to try to use a
bool return instead. At least for KVM_X86_OP_OPTIONAL_RET0().

However, we could just use KVM_X86_OP() to declare it so we can cleanly
use a function that returns bool, and then we just need to do:

  bool kvm_arch_has_private_mem(struct kvm *kvm)
  {
          if (kvm_x86_ops.private_mem_enabled)
                  return static_call(kvm_x86_private_mem_enabled)(kvm);
  }
    
instead of relying on default return value. So I'll take that approach
and adopt your other suggested changes.

...

On a separate topic though, at a high level, this hook is basically a way
for platform-specific code to tell generic KVM code that private memslots
are supported by overriding the kvm_arch_has_private_mem() weak
reference. In this case the AMD platform is using using kvm->arch.upm_mode
flag to convey that, which is in turn set by the
KVM_CAP_UNMAPPED_PRIVATE_MEMORY introduced in this series.

But if, as I suggested in response to your PATCH 2 comments, we drop
KVM_CAP_UNAMMPED_PRIVATE_MEMORY in favor of
KVM_SET_SUPPORTED_MEMORY_ATTRIBUTES ioctl to enable "UPM mode" in SEV/SNP
code, then we need to rethink things a bit, since KVM_SET_MEMORY_ATTRIBUTES
in-part relies on kvm_arch_has_private_mem() to determine what flags are
supported, whereas SEV/SNP code would be using what was set by
KVM_SET_MEMORY_ATTRIBUTES to determine the return value in
kvm_arch_has_private_mem().

So, for AMD, the return value of kvm_arch_has_private_mem() needs to rely
on something else. Maybe the logic can just be:

  bool svm_private_mem_enabled(struct kvm *kvm)
  {
    return sev_enabled(kvm) || sev_snp_enabled(kvm)
  }

(at least in the context of this patchset where UPM support is added for
both SEV and SNP).

So I'll plan to make that change as well.

-Mike

> 
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index 82ba4a564e58..4449aeff0dff 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -129,6 +129,7 @@ KVM_X86_OP(msr_filter_changed)
>  KVM_X86_OP(complete_emulated_msr)
>  KVM_X86_OP(vcpu_deliver_sipi_vector)
>  KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
> +KVM_X86_OP_OPTIONAL_RET0(private_mem_enabled);
>  
>  #undef KVM_X86_OP
>  #undef KVM_X86_OP_OPTIONAL
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 1da0474edb2d..1b4b89ddeb55 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1574,6 +1574,7 @@ struct kvm_x86_ops {
>  
>  	void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
>  			     int root_level);
> +	bool (*private_mem_enabled)(struct kvm *kvm);
>  
>  	bool (*has_wbinvd_exit)(void);
>  
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index ce362e88a567..73b780fa4653 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -4680,6 +4680,14 @@ static int svm_vm_init(struct kvm *kvm)
>  	return 0;
>  }
>  
> +static bool svm_private_mem_enabled(struct kvm *kvm)
> +{
> +	if (sev_guest(kvm))
> +		return kvm->arch.upm_mode;
> +
> +	return IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING);
> +}
> +
>  static struct kvm_x86_ops svm_x86_ops __initdata = {
>  	.name = "kvm_amd",
>  
> @@ -4760,6 +4768,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
>  
>  	.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
>  
> +	.private_mem_enabled = svm_private_mem_enabled,
> +
>  	.has_wbinvd_exit = svm_has_wbinvd_exit,
>  
>  	.get_l2_tsc_offset = svm_get_l2_tsc_offset,
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 823646d601db..9a1ca59d36a4 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -12556,6 +12556,11 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
>  }
>  EXPORT_SYMBOL_GPL(__x86_set_memory_region);
>  
> +bool kvm_arch_has_private_mem(struct kvm *kvm)
> +{
> +	return static_call(kvm_x86_private_mem_enabled)(kvm);
> +}
> +
>  void kvm_arch_pre_destroy_vm(struct kvm *kvm)
>  {
>  	kvm_mmu_pre_destroy_vm(kvm);
> 
> -- 
> Regards/Gruss,
>     Boris.
> 
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fpeople.kernel.org%2Ftglx%2Fnotes-about-netiquette&data=05%7C01%7Cmichael.roth%40amd.com%7C319e89ce555a46eace4d08dae506b51a%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C638074114318137471%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000%7C%7C%7C&sdata=aG11K7va1BhemwlKCKKdcIXEwXGUzImYL%2BZ9%2FQ7XToI%3D&reserved=0
Borislav Petkov Jan. 5, 2023, 3:04 p.m. UTC | #3
On Wed, Jan 04, 2023 at 08:14:19PM -0600, Michael Roth wrote:
> Maybe that's not actually enforced, by it seems awkward to try to use a
> bool return instead. At least for KVM_X86_OP_OPTIONAL_RET0().

I don't see there being a problem/restriction for bool functions, see

5be2226f417d ("KVM: x86: allow defining return-0 static calls")

and __static_call_return0() returns a long which, if you wanna interpret as
bool, works too as "false".

I still need to disassemble and single-step through a static_call to see what
all that magic does in detail, to be sure.

> However, we could just use KVM_X86_OP() to declare it so we can cleanly
> use a function that returns bool, and then we just need to do:
> 
>   bool kvm_arch_has_private_mem(struct kvm *kvm)
>   {
>           if (kvm_x86_ops.private_mem_enabled)
>                   return static_call(kvm_x86_private_mem_enabled)(kvm);

That would be defeating the whole purpose of static calls, AFAICT, as you're
testing the pointer. Might as well leave it be a normal function pointer then.

> On a separate topic though, at a high level, this hook is basically a way
> for platform-specific code to tell generic KVM code that private memslots
> are supported by overriding the kvm_arch_has_private_mem() weak
> reference. In this case the AMD platform is using using kvm->arch.upm_mode
> flag to convey that, which is in turn set by the
> KVM_CAP_UNMAPPED_PRIVATE_MEMORY introduced in this series.
> 
> But if, as I suggested in response to your PATCH 2 comments, we drop
> KVM_CAP_UNAMMPED_PRIVATE_MEMORY in favor of
> KVM_SET_SUPPORTED_MEMORY_ATTRIBUTES ioctl to enable "UPM mode" in SEV/SNP
> code, then we need to rethink things a bit, since KVM_SET_MEMORY_ATTRIBUTES
> in-part relies on kvm_arch_has_private_mem() to determine what flags are
> supported, whereas SEV/SNP code would be using what was set by
> KVM_SET_MEMORY_ATTRIBUTES to determine the return value in
> kvm_arch_has_private_mem().
> 
> So, for AMD, the return value of kvm_arch_has_private_mem() needs to rely
> on something else. Maybe the logic can just be:
> 
>   bool svm_private_mem_enabled(struct kvm *kvm)
>   {
>     return sev_enabled(kvm) || sev_snp_enabled(kvm)

I haven't followed the whole discussion in detail but this means that SEV/SNP
*means* UPM. I.e., no SEV/SNP without UPM, correct? I guess that's the final
thing you guys decided to do ...

Thx.
Michael Roth Jan. 5, 2023, 6:17 p.m. UTC | #4
On Thu, Jan 05, 2023 at 04:04:51PM +0100, Borislav Petkov wrote:
> On Wed, Jan 04, 2023 at 08:14:19PM -0600, Michael Roth wrote:
> > Maybe that's not actually enforced, by it seems awkward to try to use a
> > bool return instead. At least for KVM_X86_OP_OPTIONAL_RET0().
> 
> I don't see there being a problem/restriction for bool functions, see
> 
> 5be2226f417d ("KVM: x86: allow defining return-0 static calls")
> 
> and __static_call_return0() returns a long which, if you wanna interpret as
> bool, works too as "false".
> 
> I still need to disassemble and single-step through a static_call to see what
> all that magic does in detail, to be sure.

Hmm, yah, looking at that patch it seems pretty clear (at least at the
time) that bool returns are expected to work fine and there are still
existing cases that do things that way.

> 
> > However, we could just use KVM_X86_OP() to declare it so we can cleanly
> > use a function that returns bool, and then we just need to do:
> > 
> >   bool kvm_arch_has_private_mem(struct kvm *kvm)
> >   {
> >           if (kvm_x86_ops.private_mem_enabled)
> >                   return static_call(kvm_x86_private_mem_enabled)(kvm);
> 
> That would be defeating the whole purpose of static calls, AFAICT, as you're
> testing the pointer. Might as well leave it be a normal function pointer then.

That's true, we should just use the function pointer for those cases.
But given the above maybe KVM_X86_OP_OPTIONAL_RET0() is fine after all
so we can continue using static_call().

> 
> > On a separate topic though, at a high level, this hook is basically a way
> > for platform-specific code to tell generic KVM code that private memslots
> > are supported by overriding the kvm_arch_has_private_mem() weak
> > reference. In this case the AMD platform is using using kvm->arch.upm_mode
> > flag to convey that, which is in turn set by the
> > KVM_CAP_UNMAPPED_PRIVATE_MEMORY introduced in this series.
> > 
> > But if, as I suggested in response to your PATCH 2 comments, we drop
> > KVM_CAP_UNAMMPED_PRIVATE_MEMORY in favor of
> > KVM_SET_SUPPORTED_MEMORY_ATTRIBUTES ioctl to enable "UPM mode" in SEV/SNP
> > code, then we need to rethink things a bit, since KVM_SET_MEMORY_ATTRIBUTES
> > in-part relies on kvm_arch_has_private_mem() to determine what flags are
> > supported, whereas SEV/SNP code would be using what was set by
> > KVM_SET_MEMORY_ATTRIBUTES to determine the return value in
> > kvm_arch_has_private_mem().
> > 
> > So, for AMD, the return value of kvm_arch_has_private_mem() needs to rely
> > on something else. Maybe the logic can just be:
> > 
> >   bool svm_private_mem_enabled(struct kvm *kvm)
> >   {
> >     return sev_enabled(kvm) || sev_snp_enabled(kvm)
> 
> I haven't followed the whole discussion in detail but this means that SEV/SNP
> *means* UPM. I.e., no SEV/SNP without UPM, correct? I guess that's the final
> thing you guys decided to do ...

It's more about reporting whether UPM is *possible*. In the case of
this patchset there is support for using UPM in conjunction with SEV
or SEV-SNP, and that's all the above check is conveying. This is
basically what KVM_SET_SUPPORTED_MEMORY_ATTRIBUTES ioctl will rely on to
determine what attributes it reports to the guest as being supported,
which is basically what tells userspace whether or not it can make use
of UPM features.

In the case of SEV, it would still be up to userspace whether or not it
actually wants to make use of UPM functionality like KVM_SET_MEMORY_ATTRIBUTES
and private memslots. Otherwise, to maintain backward-compatibility, 
userspace can do things as it has always done and continue running SEV without
relying on private memslots/KVM_SET_MEMORY_ATTRIBUTES or any of the new ioctls.

For SNP however it is required that userspace uses/implements UPM
functionality.

-Mike

> 
> Thx.
> 
> -- 
> Regards/Gruss,
>     Boris.
> 
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fpeople.kernel.org%2Ftglx%2Fnotes-about-netiquette&data=05%7C01%7Cmichael.roth%40amd.com%7C4bfcf0c3d1e54046703008daef2e35d4%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C638085279100077750%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000%7C%7C%7C&sdata=oCjQx7LV1paTYdMA4JpFjIexf5UkiZEf2pYOWTvpkZ0%3D&reserved=0
Borislav Petkov Jan. 13, 2023, 2:16 p.m. UTC | #5
On Thu, Jan 05, 2023 at 12:17:41PM -0600, Michael Roth wrote:
> In the case of SEV, it would still be up to userspace whether or not it
> actually wants to make use of UPM functionality like KVM_SET_MEMORY_ATTRIBUTES
> and private memslots. Otherwise, to maintain backward-compatibility, 
> userspace can do things as it has always done and continue running SEV without
> relying on private memslots/KVM_SET_MEMORY_ATTRIBUTES or any of the new ioctls.
> 
> For SNP however it is required that userspace uses/implements UPM
> functionality.

Makes sense to me.
Huang, Kai Jan. 18, 2023, 12:20 a.m. UTC | #6
On Wed, 2022-12-14 at 13:39 -0600, Michael Roth wrote:
> From: Nikunj A Dadhania <nikunj@amd.com>
> 
> KVM should use private memory for guests that have upm_mode flag set.
> 
> Add a kvm_x86_ops hook for determining UPM support that accounts for
> this situation by only enabling UPM test mode in the case of non-SEV
> guests.
> 
> Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
> [mdr: add x86 hook for determining restricted/private memory support]
> Signed-off-by: Michael Roth <michael.roth@amd.com>
> ---
>  arch/x86/include/asm/kvm-x86-ops.h |  1 +
>  arch/x86/include/asm/kvm_host.h    |  1 +
>  arch/x86/kvm/svm/svm.c             | 10 ++++++++++
>  arch/x86/kvm/x86.c                 |  8 ++++++++
>  4 files changed, 20 insertions(+)
> 
> diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> index abccd51dcfca..f530a550c092 100644
> --- a/arch/x86/include/asm/kvm-x86-ops.h
> +++ b/arch/x86/include/asm/kvm-x86-ops.h
> @@ -131,6 +131,7 @@ KVM_X86_OP(msr_filter_changed)
>  KVM_X86_OP(complete_emulated_msr)
>  KVM_X86_OP(vcpu_deliver_sipi_vector)
>  KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
> +KVM_X86_OP_OPTIONAL_RET0(private_mem_enabled);
>  
>  #undef KVM_X86_OP
>  #undef KVM_X86_OP_OPTIONAL
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 2b6244525107..9317abffbf68 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1635,6 +1635,7 @@ struct kvm_x86_ops {
>  
>  	void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
>  			     int root_level);
> +	int (*private_mem_enabled)(struct kvm *kvm);
>  
>  	bool (*has_wbinvd_exit)(void);
>  
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 91352d692845..7f3e4d91c0c6 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -4694,6 +4694,14 @@ static int svm_vm_init(struct kvm *kvm)
>  	return 0;
>  }
>  
> +static int svm_private_mem_enabled(struct kvm *kvm)
> +{
> +	if (sev_guest(kvm))
> +		return kvm->arch.upm_mode ? 1 : 0;
> +
> +	return IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING) ? 1 : 0;
> +}
> +

Is this new callback really needed?  Shouldn't kvm->arch.upm_mode be sufficient
enough to indicate whether the private memory will be used or not?

Probably the CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING is the concern here.  But this
Kconfig option is not even x86-specific, so shouldn't the handling of it be done
in common code too?

For instance, can we explicitly set 'kvm->arch.upm_mode' to 'true' at some point
of creating the VM if we see CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING is true?

[snip]
Sean Christopherson Jan. 18, 2023, 9:33 p.m. UTC | #7
On Wed, Jan 18, 2023, Huang, Kai wrote:
> On Wed, 2022-12-14 at 13:39 -0600, Michael Roth wrote:
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index 91352d692845..7f3e4d91c0c6 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -4694,6 +4694,14 @@ static int svm_vm_init(struct kvm *kvm)
> >  	return 0;
> >  }
> >  
> > +static int svm_private_mem_enabled(struct kvm *kvm)
> > +{
> > +	if (sev_guest(kvm))
> > +		return kvm->arch.upm_mode ? 1 : 0;
> > +
> > +	return IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING) ? 1 : 0;
> > +}
> > +
> 
> Is this new callback really needed?

Probably not.  For anything in this series that gets within spitting distance of
CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING, my recommendation is to make a mental note
but otherwise ignore things like this for now.  I suspect it will be much, much
more efficient to sort all of this out when I smush UPM+SNP+TDX together in a few
weeks.
Jarkko Sakkinen Jan. 20, 2023, 9:20 p.m. UTC | #8
On Wed, Jan 04, 2023 at 08:14:19PM -0600, Michael Roth wrote:
> On Fri, Dec 23, 2022 at 05:56:50PM +0100, Borislav Petkov wrote:
> > On Wed, Dec 14, 2022 at 01:39:55PM -0600, Michael Roth wrote:
> > > +       bool (*private_mem_enabled)(struct kvm *kvm);
> > 
> > This looks like a function returning boolean to me. IOW, you can
> > simplify this to:
> 
> The semantics and existing uses of KVM_X86_OP_OPTIONAL_RET0() gave me the
> impression it needed to return an integer value, since by default if a
> platform doesn't implement the op it would "return 0", and so could
> still be called unconditionally.
> 
> Maybe that's not actually enforced, by it seems awkward to try to use a
> bool return instead. At least for KVM_X86_OP_OPTIONAL_RET0().
> 
> However, we could just use KVM_X86_OP() to declare it so we can cleanly
> use a function that returns bool, and then we just need to do:
> 
>   bool kvm_arch_has_private_mem(struct kvm *kvm)
>   {
>           if (kvm_x86_ops.private_mem_enabled)
>                   return static_call(kvm_x86_private_mem_enabled)(kvm);

I guess this is missing:

        return false;

>   }
>     
> instead of relying on default return value. So I'll take that approach
> and adopt your other suggested changes.
> 
> ...
> 
> On a separate topic though, at a high level, this hook is basically a way
> for platform-specific code to tell generic KVM code that private memslots
> are supported by overriding the kvm_arch_has_private_mem() weak
> reference. In this case the AMD platform is using using kvm->arch.upm_mode
> flag to convey that, which is in turn set by the
> KVM_CAP_UNMAPPED_PRIVATE_MEMORY introduced in this series.
> 
> But if, as I suggested in response to your PATCH 2 comments, we drop
> KVM_CAP_UNAMMPED_PRIVATE_MEMORY in favor of
> KVM_SET_SUPPORTED_MEMORY_ATTRIBUTES ioctl to enable "UPM mode" in SEV/SNP
> code, then we need to rethink things a bit, since KVM_SET_MEMORY_ATTRIBUTES
> in-part relies on kvm_arch_has_private_mem() to determine what flags are
> supported, whereas SEV/SNP code would be using what was set by
> KVM_SET_MEMORY_ATTRIBUTES to determine the return value in
> kvm_arch_has_private_mem().

Does this mean that internal calls to  kvm_vm_set_region_attr() will
cease to exist, and it will rely for user space to use the ioctl
properly instead?

> So, for AMD, the return value of kvm_arch_has_private_mem() needs to rely
> on something else. Maybe the logic can just be:
> 
>   bool svm_private_mem_enabled(struct kvm *kvm)
>   {
>     return sev_enabled(kvm) || sev_snp_enabled(kvm)
>   }
> 
> (at least in the context of this patchset where UPM support is added for
> both SEV and SNP).
> 
> So I'll plan to make that change as well.
> 
> -Mike
> 
> > 
> > diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> > index 82ba4a564e58..4449aeff0dff 100644
> > --- a/arch/x86/include/asm/kvm-x86-ops.h
> > +++ b/arch/x86/include/asm/kvm-x86-ops.h
> > @@ -129,6 +129,7 @@ KVM_X86_OP(msr_filter_changed)
> >  KVM_X86_OP(complete_emulated_msr)
> >  KVM_X86_OP(vcpu_deliver_sipi_vector)
> >  KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
> > +KVM_X86_OP_OPTIONAL_RET0(private_mem_enabled);
> >  
> >  #undef KVM_X86_OP
> >  #undef KVM_X86_OP_OPTIONAL
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index 1da0474edb2d..1b4b89ddeb55 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -1574,6 +1574,7 @@ struct kvm_x86_ops {
> >  
> >  	void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
> >  			     int root_level);
> > +	bool (*private_mem_enabled)(struct kvm *kvm);
> >  
> >  	bool (*has_wbinvd_exit)(void);
> >  
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index ce362e88a567..73b780fa4653 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -4680,6 +4680,14 @@ static int svm_vm_init(struct kvm *kvm)
> >  	return 0;
> >  }
> >  
> > +static bool svm_private_mem_enabled(struct kvm *kvm)
> > +{
> > +	if (sev_guest(kvm))
> > +		return kvm->arch.upm_mode;
> > +
> > +	return IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING);
> > +}
> > +
> >  static struct kvm_x86_ops svm_x86_ops __initdata = {
> >  	.name = "kvm_amd",
> >  
> > @@ -4760,6 +4768,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
> >  
> >  	.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
> >  
> > +	.private_mem_enabled = svm_private_mem_enabled,
> > +
> >  	.has_wbinvd_exit = svm_has_wbinvd_exit,
> >  
> >  	.get_l2_tsc_offset = svm_get_l2_tsc_offset,
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 823646d601db..9a1ca59d36a4 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -12556,6 +12556,11 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
> >  }
> >  EXPORT_SYMBOL_GPL(__x86_set_memory_region);
> >  
> > +bool kvm_arch_has_private_mem(struct kvm *kvm)
> > +{
> > +	return static_call(kvm_x86_private_mem_enabled)(kvm);
> > +}
> > +
> >  void kvm_arch_pre_destroy_vm(struct kvm *kvm)
> >  {
> >  	kvm_mmu_pre_destroy_vm(kvm);
> > 
> > -- 
> > Regards/Gruss,
> >     Boris.
> > 
> > https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fpeople.kernel.org%2Ftglx%2Fnotes-about-netiquette&data=05%7C01%7Cmichael.roth%40amd.com%7C319e89ce555a46eace4d08dae506b51a%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C638074114318137471%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000%7C%7C%7C&sdata=aG11K7va1BhemwlKCKKdcIXEwXGUzImYL%2BZ9%2FQ7XToI%3D&reserved=0

BR, Jarkko
Michael Roth Feb. 20, 2023, 4:18 p.m. UTC | #9
On Fri, Jan 20, 2023 at 09:20:30PM +0000, Jarkko Sakkinen wrote:
> On Wed, Jan 04, 2023 at 08:14:19PM -0600, Michael Roth wrote:
> > On Fri, Dec 23, 2022 at 05:56:50PM +0100, Borislav Petkov wrote:
> > > On Wed, Dec 14, 2022 at 01:39:55PM -0600, Michael Roth wrote:
> > > > +       bool (*private_mem_enabled)(struct kvm *kvm);
> > > 
> > > This looks like a function returning boolean to me. IOW, you can
> > > simplify this to:
> > 
> > The semantics and existing uses of KVM_X86_OP_OPTIONAL_RET0() gave me the
> > impression it needed to return an integer value, since by default if a
> > platform doesn't implement the op it would "return 0", and so could
> > still be called unconditionally.
> > 
> > Maybe that's not actually enforced, by it seems awkward to try to use a
> > bool return instead. At least for KVM_X86_OP_OPTIONAL_RET0().
> > 
> > However, we could just use KVM_X86_OP() to declare it so we can cleanly
> > use a function that returns bool, and then we just need to do:
> > 
> >   bool kvm_arch_has_private_mem(struct kvm *kvm)
> >   {
> >           if (kvm_x86_ops.private_mem_enabled)
> >                   return static_call(kvm_x86_private_mem_enabled)(kvm);
> 
> I guess this is missing:
> 
>         return false;
> 
> >   }
> >     
> > instead of relying on default return value. So I'll take that approach
> > and adopt your other suggested changes.
> > 
> > ...
> > 
> > On a separate topic though, at a high level, this hook is basically a way
> > for platform-specific code to tell generic KVM code that private memslots
> > are supported by overriding the kvm_arch_has_private_mem() weak
> > reference. In this case the AMD platform is using using kvm->arch.upm_mode
> > flag to convey that, which is in turn set by the
> > KVM_CAP_UNMAPPED_PRIVATE_MEMORY introduced in this series.
> > 
> > But if, as I suggested in response to your PATCH 2 comments, we drop
> > KVM_CAP_UNAMMPED_PRIVATE_MEMORY in favor of
> > KVM_SET_SUPPORTED_MEMORY_ATTRIBUTES ioctl to enable "UPM mode" in SEV/SNP
> > code, then we need to rethink things a bit, since KVM_SET_MEMORY_ATTRIBUTES
> > in-part relies on kvm_arch_has_private_mem() to determine what flags are
> > supported, whereas SEV/SNP code would be using what was set by
> > KVM_SET_MEMORY_ATTRIBUTES to determine the return value in
> > kvm_arch_has_private_mem().
> 
> Does this mean that internal calls to  kvm_vm_set_region_attr() will
> cease to exist, and it will rely for user space to use the ioctl
> properly instead?

Patches 1-3 are no longer needed and have been dropped for v8, instead
"UPM mode" is set via KVM_VM_CREATE vm_type arg, and SEV/SNP can simply
call kvm_arch_has_private_mem() to query whether userspace has enabled
UPM mode or not.

But even still, we call kvm_vm_set_region_attr() in
sev_launch_update_data() and snp_launch_update() after copying initial
payload into private memory.

I don't think there's much worth in having userspace have to do it via
KVM_SET_MEMORY_ATTRIBUTES afterward. It could be done that way I suppose,
but generally RMP update from shared->private happens as part of
KVM_SET_MEMORY_ATTRIBUTES, whereas in this case it would necessarily
happen *after* the RMP updates, since SNP_LAUNCH_UPDATE expects the pages
to be marked private beforehand.

Just seems like more corner cases to deal with and more boilerplate code
for userspace, which already needed to operate under the assumption that
pages will be private after SNP_LAUNCH_UPDATE, so seems to make sense to
just have the memory attributes also updated accordingly.

-Mike
> 
> > So, for AMD, the return value of kvm_arch_has_private_mem() needs to rely
> > on something else. Maybe the logic can just be:
> > 
> >   bool svm_private_mem_enabled(struct kvm *kvm)
> >   {
> >     return sev_enabled(kvm) || sev_snp_enabled(kvm)
> >   }
> > 
> > (at least in the context of this patchset where UPM support is added for
> > both SEV and SNP).
> > 
> > So I'll plan to make that change as well.
> > 
> > -Mike
> > 
> > > 
> > > diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
> > > index 82ba4a564e58..4449aeff0dff 100644
> > > --- a/arch/x86/include/asm/kvm-x86-ops.h
> > > +++ b/arch/x86/include/asm/kvm-x86-ops.h
> > > @@ -129,6 +129,7 @@ KVM_X86_OP(msr_filter_changed)
> > >  KVM_X86_OP(complete_emulated_msr)
> > >  KVM_X86_OP(vcpu_deliver_sipi_vector)
> > >  KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
> > > +KVM_X86_OP_OPTIONAL_RET0(private_mem_enabled);
> > >  
> > >  #undef KVM_X86_OP
> > >  #undef KVM_X86_OP_OPTIONAL
> > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > > index 1da0474edb2d..1b4b89ddeb55 100644
> > > --- a/arch/x86/include/asm/kvm_host.h
> > > +++ b/arch/x86/include/asm/kvm_host.h
> > > @@ -1574,6 +1574,7 @@ struct kvm_x86_ops {
> > >  
> > >  	void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
> > >  			     int root_level);
> > > +	bool (*private_mem_enabled)(struct kvm *kvm);
> > >  
> > >  	bool (*has_wbinvd_exit)(void);
> > >  
> > > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > > index ce362e88a567..73b780fa4653 100644
> > > --- a/arch/x86/kvm/svm/svm.c
> > > +++ b/arch/x86/kvm/svm/svm.c
> > > @@ -4680,6 +4680,14 @@ static int svm_vm_init(struct kvm *kvm)
> > >  	return 0;
> > >  }
> > >  
> > > +static bool svm_private_mem_enabled(struct kvm *kvm)
> > > +{
> > > +	if (sev_guest(kvm))
> > > +		return kvm->arch.upm_mode;
> > > +
> > > +	return IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING);
> > > +}
> > > +
> > >  static struct kvm_x86_ops svm_x86_ops __initdata = {
> > >  	.name = "kvm_amd",
> > >  
> > > @@ -4760,6 +4768,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
> > >  
> > >  	.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
> > >  
> > > +	.private_mem_enabled = svm_private_mem_enabled,
> > > +
> > >  	.has_wbinvd_exit = svm_has_wbinvd_exit,
> > >  
> > >  	.get_l2_tsc_offset = svm_get_l2_tsc_offset,
> > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > > index 823646d601db..9a1ca59d36a4 100644
> > > --- a/arch/x86/kvm/x86.c
> > > +++ b/arch/x86/kvm/x86.c
> > > @@ -12556,6 +12556,11 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
> > >  }
> > >  EXPORT_SYMBOL_GPL(__x86_set_memory_region);
> > >  
> > > +bool kvm_arch_has_private_mem(struct kvm *kvm)
> > > +{
> > > +	return static_call(kvm_x86_private_mem_enabled)(kvm);
> > > +}
> > > +
> > >  void kvm_arch_pre_destroy_vm(struct kvm *kvm)
> > >  {
> > >  	kvm_mmu_pre_destroy_vm(kvm);
> > > 
> > > -- 
> > > Regards/Gruss,
> > >     Boris.
> > > 
> > > https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fpeople.kernel.org%2Ftglx%2Fnotes-about-netiquette&data=05%7C01%7Cmichael.roth%40amd.com%7C319e89ce555a46eace4d08dae506b51a%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C638074114318137471%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C3000%7C%7C%7C&sdata=aG11K7va1BhemwlKCKKdcIXEwXGUzImYL%2BZ9%2FQ7XToI%3D&reserved=0
> 
> BR, Jarkko
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index abccd51dcfca..f530a550c092 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -131,6 +131,7 @@  KVM_X86_OP(msr_filter_changed)
 KVM_X86_OP(complete_emulated_msr)
 KVM_X86_OP(vcpu_deliver_sipi_vector)
 KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
+KVM_X86_OP_OPTIONAL_RET0(private_mem_enabled);
 
 #undef KVM_X86_OP
 #undef KVM_X86_OP_OPTIONAL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2b6244525107..9317abffbf68 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1635,6 +1635,7 @@  struct kvm_x86_ops {
 
 	void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
 			     int root_level);
+	int (*private_mem_enabled)(struct kvm *kvm);
 
 	bool (*has_wbinvd_exit)(void);
 
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 91352d692845..7f3e4d91c0c6 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4694,6 +4694,14 @@  static int svm_vm_init(struct kvm *kvm)
 	return 0;
 }
 
+static int svm_private_mem_enabled(struct kvm *kvm)
+{
+	if (sev_guest(kvm))
+		return kvm->arch.upm_mode ? 1 : 0;
+
+	return IS_ENABLED(CONFIG_HAVE_KVM_PRIVATE_MEM_TESTING) ? 1 : 0;
+}
+
 static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.name = "kvm_amd",
 
@@ -4774,6 +4782,8 @@  static struct kvm_x86_ops svm_x86_ops __initdata = {
 
 	.vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
 
+	.private_mem_enabled = svm_private_mem_enabled,
+
 	.has_wbinvd_exit = svm_has_wbinvd_exit,
 
 	.get_l2_tsc_offset = svm_get_l2_tsc_offset,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 99ecf99bc4d2..bb6adb216054 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12266,6 +12266,14 @@  void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
 }
 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
 
+bool kvm_arch_has_private_mem(struct kvm *kvm)
+{
+	if (static_call(kvm_x86_private_mem_enabled)(kvm))
+		return true;
+
+	return false;
+}
+
 void kvm_arch_pre_destroy_vm(struct kvm *kvm)
 {
 	kvm_mmu_pre_destroy_vm(kvm);