diff mbox series

[Part2,v6,06/49] x86/sev: Add helper functions for RMPUPDATE and PSMASH instruction

Message ID e4643e9d37fcb025d0aec9080feefaae5e9245d5.1655761627.git.ashish.kalra@amd.com (mailing list archive)
State New, archived
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) | expand

Commit Message

Kalra, Ashish June 20, 2022, 11:02 p.m. UTC
From: Brijesh Singh <brijesh.singh@amd.com>

The RMPUPDATE instruction writes a new RMP entry in the RMP Table. The
hypervisor will use the instruction to add pages to the RMP table. See
APM3 for details on the instruction operations.

The PSMASH instruction expands a 2MB RMP entry into a corresponding set of
contiguous 4KB-Page RMP entries. The hypervisor will use this instruction
to adjust the RMP entry without invalidating the previous RMP entry.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/sev.h | 11 ++++++
 arch/x86/kernel/sev.c      | 72 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 83 insertions(+)

Comments

Dr. David Alan Gilbert June 21, 2022, 4:40 p.m. UTC | #1
* Ashish Kalra (Ashish.Kalra@amd.com) wrote:
> From: Brijesh Singh <brijesh.singh@amd.com>
> 
> The RMPUPDATE instruction writes a new RMP entry in the RMP Table. The
> hypervisor will use the instruction to add pages to the RMP table. See
> APM3 for details on the instruction operations.
> 
> The PSMASH instruction expands a 2MB RMP entry into a corresponding set of
> contiguous 4KB-Page RMP entries. The hypervisor will use this instruction
> to adjust the RMP entry without invalidating the previous RMP entry.
> 
> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
> ---
>  arch/x86/include/asm/sev.h | 11 ++++++
>  arch/x86/kernel/sev.c      | 72 ++++++++++++++++++++++++++++++++++++++
>  2 files changed, 83 insertions(+)
> 
> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
> index cb16f0e5b585..6ab872311544 100644
> --- a/arch/x86/include/asm/sev.h
> +++ b/arch/x86/include/asm/sev.h
> @@ -85,7 +85,9 @@ extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
>  
>  /* RMP page size */
>  #define RMP_PG_SIZE_4K			0
> +#define RMP_PG_SIZE_2M			1
>  #define RMP_TO_X86_PG_LEVEL(level)	(((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
> +#define X86_TO_RMP_PG_LEVEL(level)	(((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M)
>  
>  /*
>   * The RMP entry format is not architectural. The format is defined in PPR
> @@ -126,6 +128,15 @@ struct snp_guest_platform_data {
>  	u64 secrets_gpa;
>  };
>  
> +struct rmpupdate {
> +	u64 gpa;
> +	u8 assigned;
> +	u8 pagesize;
> +	u8 immutable;
> +	u8 rsvd;
> +	u32 asid;
> +} __packed;

I see above it says the RMP entry format isn't architectural; is this
'rmpupdate' structure? If not how is this going to get handled when we
have a couple of SNP capable CPUs with different layouts?

Dave

>  #ifdef CONFIG_AMD_MEM_ENCRYPT
>  extern struct static_key_false sev_es_enable_key;
>  extern void __sev_es_ist_enter(struct pt_regs *regs);
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index 59e7ec6b0326..f6c64a722e94 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -2429,3 +2429,75 @@ int snp_lookup_rmpentry(u64 pfn, int *level)
>  	return !!rmpentry_assigned(e);
>  }
>  EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
> +
> +int psmash(u64 pfn)
> +{
> +	unsigned long paddr = pfn << PAGE_SHIFT;
> +	int ret;
> +
> +	if (!pfn_valid(pfn))
> +		return -EINVAL;
> +
> +	if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
> +		return -ENXIO;
> +
> +	/* Binutils version 2.36 supports the PSMASH mnemonic. */
> +	asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
> +		      : "=a"(ret)
> +		      : "a"(paddr)
> +		      : "memory", "cc");
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(psmash);
> +
> +static int rmpupdate(u64 pfn, struct rmpupdate *val)
> +{
> +	unsigned long paddr = pfn << PAGE_SHIFT;
> +	int ret;
> +
> +	if (!pfn_valid(pfn))
> +		return -EINVAL;
> +
> +	if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
> +		return -ENXIO;
> +
> +	/* Binutils version 2.36 supports the RMPUPDATE mnemonic. */
> +	asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
> +		     : "=a"(ret)
> +		     : "a"(paddr), "c"((unsigned long)val)
> +		     : "memory", "cc");
> +	return ret;
> +}
> +
> +int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, int asid, bool immutable)
> +{
> +	struct rmpupdate val;
> +
> +	if (!pfn_valid(pfn))
> +		return -EINVAL;
> +
> +	memset(&val, 0, sizeof(val));
> +	val.assigned = 1;
> +	val.asid = asid;
> +	val.immutable = immutable;
> +	val.gpa = gpa;
> +	val.pagesize = X86_TO_RMP_PG_LEVEL(level);
> +
> +	return rmpupdate(pfn, &val);
> +}
> +EXPORT_SYMBOL_GPL(rmp_make_private);
> +
> +int rmp_make_shared(u64 pfn, enum pg_level level)
> +{
> +	struct rmpupdate val;
> +
> +	if (!pfn_valid(pfn))
> +		return -EINVAL;
> +
> +	memset(&val, 0, sizeof(val));
> +	val.pagesize = X86_TO_RMP_PG_LEVEL(level);
> +
> +	return rmpupdate(pfn, &val);
> +}
> +EXPORT_SYMBOL_GPL(rmp_make_shared);
> -- 
> 2.25.1
>
Kalra, Ashish June 21, 2022, 5:38 p.m. UTC | #2
[AMD Official Use Only - General]

Hello Dave,

>>  /*
>>   * The RMP entry format is not architectural. The format is defined 
>> in PPR @@ -126,6 +128,15 @@ struct snp_guest_platform_data {
>>  	u64 secrets_gpa;
>>  };
>>  
>> +struct rmpupdate {
>> +	u64 gpa;
>> +	u8 assigned;
>> +	u8 pagesize;
>> +	u8 immutable;
>> +	u8 rsvd;
>> +	u32 asid;
>> +} __packed;

>I see above it says the RMP entry format isn't architectural; is this 'rmpupdate' structure? If not how is this going to get handled when we have a couple of SNP capable CPUs with different layouts?

Architectural implies that it is defined in the APM and shouldn't change in such a way as to not be backward compatible. 
I probably think the wording here should be architecture independent or more precisely platform independent.

Thanks,
Ashish
Dave Hansen June 22, 2022, 2:26 p.m. UTC | #3
On 6/20/22 16:02, Ashish Kalra wrote:
> +int psmash(u64 pfn)
> +{
> +	unsigned long paddr = pfn << PAGE_SHIFT;
> +	int ret;
> +
> +	if (!pfn_valid(pfn))
> +		return -EINVAL;
> +
> +	if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
> +		return -ENXIO;
> +
> +	/* Binutils version 2.36 supports the PSMASH mnemonic. */
> +	asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
> +		      : "=a"(ret)
> +		      : "a"(paddr)
> +		      : "memory", "cc");
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(psmash);

If a function gets an EXPORT_SYMBOL_GPL(), the least we can do is
reasonably document it.  We don't need full kerneldoc nonsense, but a
one-line about what this does would be quite helpful.  That goes for all
the functions here.

It would also be extremely helpful to have the changelog explain why
these functions are exported and how the exports will be used.

As a general rule, please push cpu_feature_enabled() checks as early as
you reasonably can.  They are *VERY* cheap and can even enable the
compiler to completely zap code like an #ifdef.

There also seem to be a lot of pfn_valid() checks in here that aren't
very well thought out.  For instance, there's a pfn_valid() check here:


+int rmp_make_shared(u64 pfn, enum pg_level level)
+{
+	struct rmpupdate val;
+
+	if (!pfn_valid(pfn))
+		return -EINVAL;
...
+	return rmpupdate(pfn, &val);
+}

and in rmpupdate():

+static int rmpupdate(u64 pfn, struct rmpupdate *val)
+{
+	unsigned long paddr = pfn << PAGE_SHIFT;
+	int ret;
+
+	if (!pfn_valid(pfn))
+		return -EINVAL;
...


This is (at best) wasteful.  Could it be refactored?
Kalra, Ashish June 22, 2022, 6:04 p.m. UTC | #4
[AMD Official Use Only - General]

>> +int psmash(u64 pfn)
>> +{
>> +	unsigned long paddr = pfn << PAGE_SHIFT;
>> +	int ret;
>> +
>> +	if (!pfn_valid(pfn))
>> +		return -EINVAL;
>> +
>> +	if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
>> +		return -ENXIO;
>> +
>> +	/* Binutils version 2.36 supports the PSMASH mnemonic. */
>> +	asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
>> +		      : "=a"(ret)
>> +		      : "a"(paddr)
>> +		      : "memory", "cc");
>> +
>> +	return ret;
>> +}
>> +EXPORT_SYMBOL_GPL(psmash);

>If a function gets an EXPORT_SYMBOL_GPL(), the least we can do is reasonably document it.  We don't need full kerneldoc nonsense, but a one-line about what this does would be quite helpful.  That goes for all the functions here.

>It would also be extremely helpful to have the changelog explain why these functions are exported and how the exports will be used.

I will add basic descriptions for all these exported functions.

Thanks,
Ashish

>As a general rule, please push cpu_feature_enabled() checks as early as you reasonably can.  They are *VERY* cheap and can even enable the compiler to completely zap code like an #ifdef.

There also seem to be a lot of pfn_valid() checks in here that aren't very well thought out.  For instance, there's a pfn_valid() check here:


+int rmp_make_shared(u64 pfn, enum pg_level level) {
+	struct rmpupdate val;
+
+	if (!pfn_valid(pfn))
+		return -EINVAL;
...
+	return rmpupdate(pfn, &val);
+}

and in rmpupdate():

+static int rmpupdate(u64 pfn, struct rmpupdate *val) {
+	unsigned long paddr = pfn << PAGE_SHIFT;
+	int ret;
+
+	if (!pfn_valid(pfn))
+		return -EINVAL;
...


This is (at best) wasteful.  Could it be refactored?
Kalra, Ashish June 22, 2022, 6:17 p.m. UTC | #5
[AMD Official Use Only - General]

>>>  /*
>>>   * The RMP entry format is not architectural. The format is defined 
>>> in PPR @@ -126,6 +128,15 @@ struct snp_guest_platform_data {
>>>  	u64 secrets_gpa;
>>>  };
>>>  
>>> +struct rmpupdate {
>>> +	u64 gpa;
>>> +	u8 assigned;
>>> +	u8 pagesize;
>>> +	u8 immutable;
>>> +	u8 rsvd;
>>> +	u32 asid;
>>> +} __packed;

>>I see above it says the RMP entry format isn't architectural; is this 'rmpupdate' structure? If not how is this going to get handled when we have a couple >of SNP capable CPUs with different layouts?

>Architectural implies that it is defined in the APM and shouldn't change in such a way as to not be backward compatible. 
>I probably think the wording here should be architecture independent or more precisely platform independent.

Some more clarity on this: 

Actually, the PPR for family 19h Model 01h, Rev B1 defines the RMP entry format as below:

2.1.4.2 RMP Entry Format
Architecturally the format of RMP entries are not specified in APM. In order to assist software, the following table specifies select portions of the RMP entry format for this specific product. Each RMP entry is 16B in size and is formatted as follows. Software should not rely on any field definitions not specified in this table and the format of an RMP entry may change in future processors. 

Architectural implies that it is defined in the APM and shouldn't change in such a way as to not be backward compatible. So non-architectural in this context means that it is only defined in our PPR.

So actually this RPM entry definition is platform dependent and will need to be changed for different AMD processors and that change has to be handled correspondingly in the dump_rmpentry() code. 

Thanks,
Ashish
Dr. David Alan Gilbert June 28, 2022, 10:50 a.m. UTC | #6
* Kalra, Ashish (Ashish.Kalra@amd.com) wrote:
> [AMD Official Use Only - General]
> 
> >>>  /*
> >>>   * The RMP entry format is not architectural. The format is defined 
> >>> in PPR @@ -126,6 +128,15 @@ struct snp_guest_platform_data {
> >>>  	u64 secrets_gpa;
> >>>  };
> >>>  
> >>> +struct rmpupdate {
> >>> +	u64 gpa;
> >>> +	u8 assigned;
> >>> +	u8 pagesize;
> >>> +	u8 immutable;
> >>> +	u8 rsvd;
> >>> +	u32 asid;
> >>> +} __packed;
> 
> >>I see above it says the RMP entry format isn't architectural; is this 'rmpupdate' structure? If not how is this going to get handled when we have a couple >of SNP capable CPUs with different layouts?
> 
> >Architectural implies that it is defined in the APM and shouldn't change in such a way as to not be backward compatible. 
> >I probably think the wording here should be architecture independent or more precisely platform independent.
> 
> Some more clarity on this: 
> 
> Actually, the PPR for family 19h Model 01h, Rev B1 defines the RMP entry format as below:
> 
> 2.1.4.2 RMP Entry Format
> Architecturally the format of RMP entries are not specified in APM. In order to assist software, the following table specifies select portions of the RMP entry format for this specific product. Each RMP entry is 16B in size and is formatted as follows. Software should not rely on any field definitions not specified in this table and the format of an RMP entry may change in future processors. 
> 
> Architectural implies that it is defined in the APM and shouldn't change in such a way as to not be backward compatible. So non-architectural in this context means that it is only defined in our PPR.
> 
> So actually this RPM entry definition is platform dependent and will need to be changed for different AMD processors and that change has to be handled correspondingly in the dump_rmpentry() code. 

You'll need a way to make that fail cleanly when run on a newer CPU
with different layout, and a way to build kernels that can handle
more than one layout.

Dave

> Thanks,
> Ashish
>
Kalra, Ashish June 28, 2022, 5:57 p.m. UTC | #7
[AMD Official Use Only - General]

Hello Dave,

-----Original Message-----
From: Dr. David Alan Gilbert <dgilbert@redhat.com> 
Sent: Tuesday, June 28, 2022 5:51 AM
To: Kalra, Ashish <Ashish.Kalra@amd.com>
Cc: x86@kernel.org; linux-kernel@vger.kernel.org; kvm@vger.kernel.org; linux-coco@lists.linux.dev; linux-mm@kvack.org; linux-crypto@vger.kernel.org; tglx@linutronix.de; mingo@redhat.com; jroedel@suse.de; Lendacky, Thomas <Thomas.Lendacky@amd.com>; hpa@zytor.com; ardb@kernel.org; pbonzini@redhat.com; seanjc@google.com; vkuznets@redhat.com; jmattson@google.com; luto@kernel.org; dave.hansen@linux.intel.com; slp@redhat.com; pgonda@google.com; peterz@infradead.org; srinivas.pandruvada@linux.intel.com; rientjes@google.com; dovmurik@linux.ibm.com; tobin@ibm.com; bp@alien8.de; Roth, Michael <Michael.Roth@amd.com>; vbabka@suse.cz; kirill@shutemov.name; ak@linux.intel.com; tony.luck@intel.com; marcorr@google.com; sathyanarayanan.kuppuswamy@linux.intel.com; alpergun@google.com; jarkko@kernel.org
Subject: Re: [PATCH Part2 v6 06/49] x86/sev: Add helper functions for RMPUPDATE and PSMASH instruction

* Kalra, Ashish (Ashish.Kalra@amd.com) wrote:
> [AMD Official Use Only - General]
> 
> >>>  /*
> >>>   * The RMP entry format is not architectural. The format is 
> >>> defined in PPR @@ -126,6 +128,15 @@ struct snp_guest_platform_data {
> >>>  	u64 secrets_gpa;
> >>>  };
> >>>  
> >>> +struct rmpupdate {
> >>> +	u64 gpa;
> >>> +	u8 assigned;
> >>> +	u8 pagesize;
> >>> +	u8 immutable;
> >>> +	u8 rsvd;
> >>> +	u32 asid;
> >>> +} __packed;
> 
> >>I see above it says the RMP entry format isn't architectural; is this 'rmpupdate' structure? If not how is this going to get handled when we have a couple >of SNP capable CPUs with different layouts?
> 
> >Architectural implies that it is defined in the APM and shouldn't change in such a way as to not be backward compatible. 
> >I probably think the wording here should be architecture independent or more precisely platform independent.
> 
> Some more clarity on this: 
> 
> Actually, the PPR for family 19h Model 01h, Rev B1 defines the RMP entry format as below:
> 
> 2.1.4.2 RMP Entry Format
> Architecturally the format of RMP entries are not specified in APM. In order to assist software, the following table specifies select portions of the RMP entry format for this specific product. Each RMP entry is 16B in size and is formatted as follows. Software should not rely on any field definitions not specified in this table and the format of an RMP entry may change in future processors. 
> 
> Architectural implies that it is defined in the APM and shouldn't change in such a way as to not be backward compatible. So non-architectural in this context means that it is only defined in our PPR.
> 
> So actually this RPM entry definition is platform dependent and will need to be changed for different AMD processors and that change has to be handled correspondingly in the dump_rmpentry() code. 

> You'll need a way to make that fail cleanly when run on a newer CPU with different layout, and a way to build kernels that can handle more than one layout.

Yes, I will be adding a check for CPU family/model as following :

static int __init snp_rmptable_init(void)
{
+       int family, model;

      if (!boot_cpu_has(X86_FEATURE_SEV_SNP))
               return 0;

+       family = boot_cpu_data.x86;
+       model  = boot_cpu_data.x86_model;

+       /*
+        * RMP table entry format is not architectural and it can vary by processor and
+        * is defined by the per-processor PPR. Restrict SNP support on the known CPU
+        * model and family for which the RMP table entry format is currently defined for.
+        */
+       if (family != 0x19 || model > 0xaf)
+               goto nosnp;
+

This way SNP will only be enabled specifically on the platforms for which this RMP entry
format is defined in those processor's PPR. This will work for Milan and Genoa as of now.

Additionally as per Sean's suggestion, I will be moving the RMP structure definition to sev.c,
which will make it a private structure and not exposed to other parts of the kernel.

Also in the future we will have an architectural interface to read the RMP table entry,
we will first check for it's availability and if not available fall back to the RMP table
entry structure definition.

 Thanks,
 Ashish
Dr. David Alan Gilbert June 28, 2022, 6:58 p.m. UTC | #8
* Kalra, Ashish (Ashish.Kalra@amd.com) wrote:
> [AMD Official Use Only - General]
> 
> Hello Dave,
> 
> -----Original Message-----
> From: Dr. David Alan Gilbert <dgilbert@redhat.com> 
> Sent: Tuesday, June 28, 2022 5:51 AM
> To: Kalra, Ashish <Ashish.Kalra@amd.com>
> Cc: x86@kernel.org; linux-kernel@vger.kernel.org; kvm@vger.kernel.org; linux-coco@lists.linux.dev; linux-mm@kvack.org; linux-crypto@vger.kernel.org; tglx@linutronix.de; mingo@redhat.com; jroedel@suse.de; Lendacky, Thomas <Thomas.Lendacky@amd.com>; hpa@zytor.com; ardb@kernel.org; pbonzini@redhat.com; seanjc@google.com; vkuznets@redhat.com; jmattson@google.com; luto@kernel.org; dave.hansen@linux.intel.com; slp@redhat.com; pgonda@google.com; peterz@infradead.org; srinivas.pandruvada@linux.intel.com; rientjes@google.com; dovmurik@linux.ibm.com; tobin@ibm.com; bp@alien8.de; Roth, Michael <Michael.Roth@amd.com>; vbabka@suse.cz; kirill@shutemov.name; ak@linux.intel.com; tony.luck@intel.com; marcorr@google.com; sathyanarayanan.kuppuswamy@linux.intel.com; alpergun@google.com; jarkko@kernel.org
> Subject: Re: [PATCH Part2 v6 06/49] x86/sev: Add helper functions for RMPUPDATE and PSMASH instruction
> 
> * Kalra, Ashish (Ashish.Kalra@amd.com) wrote:
> > [AMD Official Use Only - General]
> > 
> > >>>  /*
> > >>>   * The RMP entry format is not architectural. The format is 
> > >>> defined in PPR @@ -126,6 +128,15 @@ struct snp_guest_platform_data {
> > >>>  	u64 secrets_gpa;
> > >>>  };
> > >>>  
> > >>> +struct rmpupdate {
> > >>> +	u64 gpa;
> > >>> +	u8 assigned;
> > >>> +	u8 pagesize;
> > >>> +	u8 immutable;
> > >>> +	u8 rsvd;
> > >>> +	u32 asid;
> > >>> +} __packed;
> > 
> > >>I see above it says the RMP entry format isn't architectural; is this 'rmpupdate' structure? If not how is this going to get handled when we have a couple >of SNP capable CPUs with different layouts?
> > 
> > >Architectural implies that it is defined in the APM and shouldn't change in such a way as to not be backward compatible. 
> > >I probably think the wording here should be architecture independent or more precisely platform independent.
> > 
> > Some more clarity on this: 
> > 
> > Actually, the PPR for family 19h Model 01h, Rev B1 defines the RMP entry format as below:
> > 
> > 2.1.4.2 RMP Entry Format
> > Architecturally the format of RMP entries are not specified in APM. In order to assist software, the following table specifies select portions of the RMP entry format for this specific product. Each RMP entry is 16B in size and is formatted as follows. Software should not rely on any field definitions not specified in this table and the format of an RMP entry may change in future processors. 
> > 
> > Architectural implies that it is defined in the APM and shouldn't change in such a way as to not be backward compatible. So non-architectural in this context means that it is only defined in our PPR.
> > 
> > So actually this RPM entry definition is platform dependent and will need to be changed for different AMD processors and that change has to be handled correspondingly in the dump_rmpentry() code. 
> 
> > You'll need a way to make that fail cleanly when run on a newer CPU with different layout, and a way to build kernels that can handle more than one layout.
> 
> Yes, I will be adding a check for CPU family/model as following :
> 
> static int __init snp_rmptable_init(void)
> {
> +       int family, model;
> 
>       if (!boot_cpu_has(X86_FEATURE_SEV_SNP))
>                return 0;
> 
> +       family = boot_cpu_data.x86;
> +       model  = boot_cpu_data.x86_model;
> 
> +       /*
> +        * RMP table entry format is not architectural and it can vary by processor and
> +        * is defined by the per-processor PPR. Restrict SNP support on the known CPU
> +        * model and family for which the RMP table entry format is currently defined for.
> +        */
> +       if (family != 0x19 || model > 0xaf)
> +               goto nosnp;

please add a print there to say why you're not enabling SNP.

It would be great if your firmware could give you an 'rmpentry version'; and
then if a new model came out that happened to have the same layout
everything would just carryon working by checking that rather than
the actual family/model.

> +
> 
> This way SNP will only be enabled specifically on the platforms for which this RMP entry
> format is defined in those processor's PPR. This will work for Milan and Genoa as of now.
> 
> Additionally as per Sean's suggestion, I will be moving the RMP structure definition to sev.c,
> which will make it a private structure and not exposed to other parts of the kernel.
> 
> Also in the future we will have an architectural interface to read the RMP table entry,
> we will first check for it's availability and if not available fall back to the RMP table
> entry structure definition.

Dave

>  Thanks,
>  Ashish
>
Dave Hansen June 28, 2022, 7:03 p.m. UTC | #9
On 6/28/22 10:57, Kalra, Ashish wrote:
> +       /*
> +        * RMP table entry format is not architectural and it can vary by processor and
> +        * is defined by the per-processor PPR. Restrict SNP support on the known CPU
> +        * model and family for which the RMP table entry format is currently defined for.
> +        */
> +       if (family != 0x19 || model > 0xaf)
> +               goto nosnp;
> +
> 
> This way SNP will only be enabled specifically on the platforms for which this RMP entry
> format is defined in those processor's PPR. This will work for Milan and Genoa as of now.

At some point, it would be really nice if the AMD side of things could
work to kick the magic number habit on these things.  This:

	arch/x86/include/asm/intel-family.h

has been really handy.  It lets you do things like

	grep INTEL_FAM6_SKYLAKE arch/x86

That's a *LOT* more precise than:

	egrep -i '0x5E|94' arch/x86
Dov Murik July 24, 2022, 5:31 p.m. UTC | #10
Hi Ashish,

On 21/06/2022 2:02, Ashish Kalra wrote:
> From: Brijesh Singh <brijesh.singh@amd.com>
> 
> The RMPUPDATE instruction writes a new RMP entry in the RMP Table. The
> hypervisor will use the instruction to add pages to the RMP table. See
> APM3 for details on the instruction operations.
> 
> The PSMASH instruction expands a 2MB RMP entry into a corresponding set of
> contiguous 4KB-Page RMP entries. The hypervisor will use this instruction
> to adjust the RMP entry without invalidating the previous RMP entry.
> 
> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
> ---
>  arch/x86/include/asm/sev.h | 11 ++++++
>  arch/x86/kernel/sev.c      | 72 ++++++++++++++++++++++++++++++++++++++
>  2 files changed, 83 insertions(+)
> 
> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
> index cb16f0e5b585..6ab872311544 100644
> --- a/arch/x86/include/asm/sev.h
> +++ b/arch/x86/include/asm/sev.h
> @@ -85,7 +85,9 @@ extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
>  
>  /* RMP page size */
>  #define RMP_PG_SIZE_4K			0
> +#define RMP_PG_SIZE_2M			1
>  #define RMP_TO_X86_PG_LEVEL(level)	(((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
> +#define X86_TO_RMP_PG_LEVEL(level)	(((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M)
>  
>  /*
>   * The RMP entry format is not architectural. The format is defined in PPR
> @@ -126,6 +128,15 @@ struct snp_guest_platform_data {
>  	u64 secrets_gpa;
>  };
>  
> +struct rmpupdate {
> +	u64 gpa;
> +	u8 assigned;
> +	u8 pagesize;
> +	u8 immutable;
> +	u8 rsvd;
> +	u32 asid;
> +} __packed;
> +
>  #ifdef CONFIG_AMD_MEM_ENCRYPT
>  extern struct static_key_false sev_es_enable_key;
>  extern void __sev_es_ist_enter(struct pt_regs *regs);
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index 59e7ec6b0326..f6c64a722e94 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -2429,3 +2429,75 @@ int snp_lookup_rmpentry(u64 pfn, int *level)
>  	return !!rmpentry_assigned(e);
>  }
>  EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
> +
> +int psmash(u64 pfn)
> +{
> +	unsigned long paddr = pfn << PAGE_SHIFT;
> +	int ret;
> +
> +	if (!pfn_valid(pfn))
> +		return -EINVAL;
> +
> +	if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
> +		return -ENXIO;
> +
> +	/* Binutils version 2.36 supports the PSMASH mnemonic. */
> +	asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
> +		      : "=a"(ret)
> +		      : "a"(paddr)
> +		      : "memory", "cc");
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(psmash);
> +
> +static int rmpupdate(u64 pfn, struct rmpupdate *val)
> +{
> +	unsigned long paddr = pfn << PAGE_SHIFT;
> +	int ret;
> +
> +	if (!pfn_valid(pfn))
> +		return -EINVAL;
> +
> +	if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
> +		return -ENXIO;
> +
> +	/* Binutils version 2.36 supports the RMPUPDATE mnemonic. */
> +	asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
> +		     : "=a"(ret)
> +		     : "a"(paddr), "c"((unsigned long)val)
> +		     : "memory", "cc");
> +	return ret;
> +}
> +
> +int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, int asid, bool immutable)
> +{
> +	struct rmpupdate val;
> +
> +	if (!pfn_valid(pfn))
> +		return -EINVAL;
> +

Should we add more checks on the arguments?

1. asid must be > 0
2. gpa must be aligned according to 'level'
3. gpa must be below the maximal address for the guest

"Note that the guest physical address space is limited according to
CPUID Fn80000008_EAX and thus the GPAs used by the firmware in
measurement calculation are equally limited. Hypervisors should not
attempt to map pages outside of this limit."
(-SNP ABI spec page 86, section 8.17 SNP_LAUNCH_UPDATE)


But note that in patch 28 of this series we have:

+		/* Transition the VMSA page to a firmware state. */
+		ret = rmp_make_private(pfn, -1, PG_LEVEL_4K, sev->asid, true);

That (u64)(-1) value for the gpa argument violates conditions 2 and 3
from my list above.

And indeed when calculating measurements we see that the GPA value
for the VMSA pages is 0x0000FFFF_FFFFF000, and not (u64)(-1). [1] [2]

Instead of checks, we can mask the gpa argument so that rmpupdate will
get the correct value.  Not sure which approach is preferable.


[1] https://github.com/IBM/sev-snp-measure/blob/90f6e59831d20e44d03d5ee19388f624fca87291/sevsnpmeasure/gctx.py#L40
[2] https://github.com/slp/snp-digest-rs/blob/0e5a787e99069944467151101ae4db474793d657/src/main.rs#L86


-Dov


> +	memset(&val, 0, sizeof(val));
> +	val.assigned = 1;
> +	val.asid = asid;
> +	val.immutable = immutable;
> +	val.gpa = gpa;
> +	val.pagesize = X86_TO_RMP_PG_LEVEL(level);
> +
> +	return rmpupdate(pfn, &val);
> +}
> +EXPORT_SYMBOL_GPL(rmp_make_private);
> +
> +int rmp_make_shared(u64 pfn, enum pg_level level)
> +{
> +	struct rmpupdate val;
> +
> +	if (!pfn_valid(pfn))
> +		return -EINVAL;
> +
> +	memset(&val, 0, sizeof(val));
> +	val.pagesize = X86_TO_RMP_PG_LEVEL(level);
> +
> +	return rmpupdate(pfn, &val);
> +}
> +EXPORT_SYMBOL_GPL(rmp_make_shared);
Borislav Petkov July 25, 2022, 1:24 p.m. UTC | #11
On Tue, Jun 28, 2022 at 05:57:41PM +0000, Kalra, Ashish wrote:
> Yes, I will be adding a check for CPU family/model as following :

Why if the PPR is already kinda spelling the already architectural
pieces of the RMP entry?

"In order to assist software" it says.

So you call the specified ones by their name and the rest is __rsvd.

No need for model checks at all.

Right?
Borislav Petkov July 25, 2022, 2:36 p.m. UTC | #12
On Mon, Jun 20, 2022 at 11:02:52PM +0000, Ashish Kalra wrote:
> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
> index cb16f0e5b585..6ab872311544 100644
> --- a/arch/x86/include/asm/sev.h
> +++ b/arch/x86/include/asm/sev.h
> @@ -85,7 +85,9 @@ extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
>  
>  /* RMP page size */
>  #define RMP_PG_SIZE_4K			0
> +#define RMP_PG_SIZE_2M			1
>  #define RMP_TO_X86_PG_LEVEL(level)	(((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
> +#define X86_TO_RMP_PG_LEVEL(level)	(((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M)
>  
>  /*
>   * The RMP entry format is not architectural. The format is defined in PPR
> @@ -126,6 +128,15 @@ struct snp_guest_platform_data {
>  	u64 secrets_gpa;
>  };
>  
> +struct rmpupdate {

Why is there a struct rmpupdate *and* a struct rmpentry?!

One should be enough.

> +	u64 gpa;
> +	u8 assigned;
> +	u8 pagesize;
> +	u8 immutable;
> +	u8 rsvd;
> +	u32 asid;
> +} __packed;
> +
Kalra, Ashish Aug. 1, 2022, 10:31 p.m. UTC | #13
[AMD Official Use Only - General]

Hello Boris,

>> +struct rmpupdate {

Why is there a struct rmpupdate *and* a struct rmpentry?!

The struct rmpentry is the raw layout of the RMP table entry while struct rmpupdate is the structure
expected by the rmpupdate instruction for programming the RMP table entries.

Arguably, we can program a struct rmpupdate internally from a struct rmpentry.

But we will still need struct rmpupdate for issuing the rmpupdate instruction, so it is probably cleaner
to keep it this way, as it only has two main callers - rmp_make_private() and rmp_make_shared(). 	

Also due to non-architectural aspect of struct rmpentry, the above functions may need to be modified
If there are changes in struct rmpentry, while struct rmpupdate remains consistent and persistent. 
 
>One should be enough.

>> +	u64 gpa;
>> +	u8 assigned;
>> +	u8 pagesize;
>> +	u8 immutable;
>> +	u8 rsvd;
>> +	u32 asid;
>> +} __packed;
>> +

Thanks,
Ashish
Kalra, Ashish Aug. 1, 2022, 11:32 p.m. UTC | #14
[AMD Official Use Only - General]

Hello Boris,

>> Yes, I will be adding a check for CPU family/model as following :

>Why if the PPR is already kinda spelling the already architectural pieces of the RMP entry?

>"In order to assist software" it says.

The PPR specifies select portions of the RMP entry format for a specific core/platform. 

Therefore, the complete struct rmpentry definition is non-architectural. 

As per PPR, software should not rely on any field definitions not specified 
in this table and the format of an RMP entry may change in future processors.

>So you call the specified ones by their name and the rest is __rsvd.

>No need for model checks at all.

>Right?

But we can't use this struct on a core/platform which has a different layout, so aren't
the model checks required ?

Thanks,
Ashish
Kalra, Ashish Aug. 2, 2022, 4:49 a.m. UTC | #15
[AMD Official Use Only - General]

Hello Dov,

>> +int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, int asid, 
>> +bool immutable) {
>> +	struct rmpupdate val;
>> +
>> +	if (!pfn_valid(pfn))
>> +		return -EINVAL;
>> +

>Should we add more checks on the arguments?

>1. asid must be > 0
>2. gpa must be aligned according to 'level'
>3. gpa must be below the maximal address for the guest

Ok, yes it surely makes sense to add more checks on the arguments.

>"Note that the guest physical address space is limited according to CPUID Fn80000008_EAX and thus the GPAs used by the firmware in measurement calculation are equally limited. Hypervisors should not attempt to map pages outside of this limit."
>(-SNP ABI spec page 86, section 8.17 SNP_LAUNCH_UPDATE)


>But note that in patch 28 of this series we have:

>+		/* Transition the VMSA page to a firmware state. */
>+		ret = rmp_make_private(pfn, -1, PG_LEVEL_4K, sev->asid, true);

>That (u64)(-1) value for the gpa argument violates conditions 2 and 3 from my list above.

>And indeed when calculating measurements we see that the GPA value for the VMSA pages is 0x0000FFFF_FFFFF000, and not (u64)(-1). [1] [2]

>Instead of checks, we can mask the gpa argument so that rmpupdate will get the correct value.  Not sure which approach is preferable.

Well, the firmware is anyway masking the gpa argument as you observe in the launch digest, so probably we should do the same here too.

Thanks,
Ashish
Borislav Petkov Aug. 2, 2022, 2:14 p.m. UTC | #16
On Mon, Aug 01, 2022 at 11:32:21PM +0000, Kalra, Ashish wrote:
> But we can't use this struct on a core/platform which has a different
> layout, so aren't the model checks required ?

That would be a problem only if the already specified fields move or get
resized.

If their offset and size don't change, you're good.
Borislav Petkov Aug. 3, 2022, 8:26 p.m. UTC | #17
On Mon, Aug 01, 2022 at 10:31:26PM +0000, Kalra, Ashish wrote:
> The struct rmpentry is the raw layout of the RMP table entry
> while struct rmpupdate is the structure expected by the rmpupdate
> instruction for programming the RMP table entries.
>
> Arguably, we can program a struct rmpupdate internally from a struct
> rmpentry.
>
> But we will still need struct rmpupdate for issuing the rmpupdate
> instruction, so it is probably cleaner to keep it this way, as it only
> has two main callers - rmp_make_private() and rmp_make_shared().

Ok, but then call it struct rmp_state. The APM says in the RMPUPDATE
blurb:

"The RCX register provides the effective address of a 16-byte data
structure which contains the new RMP state."

so the function signature should be:

static int rmpupdate(u64 pfn, struct rmp_state *new)

and this is basically the description of that. It can't get any more
user-friendly than this.

Thx.
diff mbox series

Patch

diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index cb16f0e5b585..6ab872311544 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -85,7 +85,9 @@  extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
 
 /* RMP page size */
 #define RMP_PG_SIZE_4K			0
+#define RMP_PG_SIZE_2M			1
 #define RMP_TO_X86_PG_LEVEL(level)	(((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
+#define X86_TO_RMP_PG_LEVEL(level)	(((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M)
 
 /*
  * The RMP entry format is not architectural. The format is defined in PPR
@@ -126,6 +128,15 @@  struct snp_guest_platform_data {
 	u64 secrets_gpa;
 };
 
+struct rmpupdate {
+	u64 gpa;
+	u8 assigned;
+	u8 pagesize;
+	u8 immutable;
+	u8 rsvd;
+	u32 asid;
+} __packed;
+
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 extern struct static_key_false sev_es_enable_key;
 extern void __sev_es_ist_enter(struct pt_regs *regs);
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 59e7ec6b0326..f6c64a722e94 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -2429,3 +2429,75 @@  int snp_lookup_rmpentry(u64 pfn, int *level)
 	return !!rmpentry_assigned(e);
 }
 EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
+
+int psmash(u64 pfn)
+{
+	unsigned long paddr = pfn << PAGE_SHIFT;
+	int ret;
+
+	if (!pfn_valid(pfn))
+		return -EINVAL;
+
+	if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+		return -ENXIO;
+
+	/* Binutils version 2.36 supports the PSMASH mnemonic. */
+	asm volatile(".byte 0xF3, 0x0F, 0x01, 0xFF"
+		      : "=a"(ret)
+		      : "a"(paddr)
+		      : "memory", "cc");
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(psmash);
+
+static int rmpupdate(u64 pfn, struct rmpupdate *val)
+{
+	unsigned long paddr = pfn << PAGE_SHIFT;
+	int ret;
+
+	if (!pfn_valid(pfn))
+		return -EINVAL;
+
+	if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+		return -ENXIO;
+
+	/* Binutils version 2.36 supports the RMPUPDATE mnemonic. */
+	asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE"
+		     : "=a"(ret)
+		     : "a"(paddr), "c"((unsigned long)val)
+		     : "memory", "cc");
+	return ret;
+}
+
+int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, int asid, bool immutable)
+{
+	struct rmpupdate val;
+
+	if (!pfn_valid(pfn))
+		return -EINVAL;
+
+	memset(&val, 0, sizeof(val));
+	val.assigned = 1;
+	val.asid = asid;
+	val.immutable = immutable;
+	val.gpa = gpa;
+	val.pagesize = X86_TO_RMP_PG_LEVEL(level);
+
+	return rmpupdate(pfn, &val);
+}
+EXPORT_SYMBOL_GPL(rmp_make_private);
+
+int rmp_make_shared(u64 pfn, enum pg_level level)
+{
+	struct rmpupdate val;
+
+	if (!pfn_valid(pfn))
+		return -EINVAL;
+
+	memset(&val, 0, sizeof(val));
+	val.pagesize = X86_TO_RMP_PG_LEVEL(level);
+
+	return rmpupdate(pfn, &val);
+}
+EXPORT_SYMBOL_GPL(rmp_make_shared);