diff mbox series

[RFC,Part2,02/30] x86/sev-snp: add RMP entry lookup helpers

Message ID 20210324170436.31843-3-brijesh.singh@amd.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Brijesh Singh March 24, 2021, 5:04 p.m. UTC
The lookup_page_in_rmptable() can be used by the host to read the RMP
entry for a given page. The RMP entry format is documented in PPR
section 2.1.5.2.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: x86@kernel.org
Cc: kvm@vger.kernel.org
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/sev-snp.h | 31 +++++++++++++++++++++++++++++++
 arch/x86/mm/mem_encrypt.c      | 32 ++++++++++++++++++++++++++++++++
 2 files changed, 63 insertions(+)

Comments

Borislav Petkov April 15, 2021, 4:57 p.m. UTC | #1
On Wed, Mar 24, 2021 at 12:04:08PM -0500, Brijesh Singh wrote:
> The lookup_page_in_rmptable() can be used by the host to read the RMP
> entry for a given page. The RMP entry format is documented in PPR
> section 2.1.5.2.

I see

Table 15-36. Fields of an RMP Entry

in the APM.

Which PPR do you mean? Also, you know where to put those documents,
right?

> +/* RMP table entry format (PPR section 2.1.5.2) */
> +struct __packed rmpentry {
> +	union {
> +		struct {
> +			uint64_t assigned:1;
> +			uint64_t pagesize:1;
> +			uint64_t immutable:1;
> +			uint64_t rsvd1:9;
> +			uint64_t gpa:39;
> +			uint64_t asid:10;
> +			uint64_t vmsa:1;
> +			uint64_t validated:1;
> +			uint64_t rsvd2:1;
> +		} info;
> +		uint64_t low;
> +	};
> +	uint64_t high;
> +};
> +
> +typedef struct rmpentry rmpentry_t;

Eww, a typedef. Why?

struct rmpentry is just fine.

> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 39461b9cb34e..06394b6d56b2 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -34,6 +34,8 @@
>  
>  #include "mm_internal.h"
>  

<--- Needs a comment here to explain the magic 0x4000 and the magic
shift by 8.

> +#define rmptable_page_offset(x)	(0x4000 + (((unsigned long) x) >> 8))
> +
>  /*
>   * Since SME related variables are set early in the boot process they must
>   * reside in the .data section so as not to be zeroed out when the .bss
> @@ -612,3 +614,33 @@ static int __init mem_encrypt_snp_init(void)
>   * SEV-SNP must be enabled across all CPUs, so make the initialization as a late initcall.
>   */
>  late_initcall(mem_encrypt_snp_init);
> +
> +rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level)

snp_lookup_page_in_rmptable()

> +{
> +	unsigned long phys = page_to_pfn(page) << PAGE_SHIFT;
> +	rmpentry_t *entry, *large_entry;
> +	unsigned long vaddr;
> +
> +	if (!static_branch_unlikely(&snp_enable_key))
> +		return NULL;
> +
> +	vaddr = rmptable_start + rmptable_page_offset(phys);
> +	if (WARN_ON(vaddr > rmptable_end))

Do you really want to spew a warn on splat for each wrong vaddr? What
for?

> +		return NULL;
> +
> +	entry = (rmpentry_t *)vaddr;
> +
> +	/*
> +	 * Check if this page is covered by the large RMP entry. This is needed to get
> +	 * the page level used in the RMP entry.
> +	 *

No need for a new line in the comment and no need for the "e.g." thing
either.

Also, s/the large RMP entry/a large RMP entry/g.

> +	 * e.g. if the page is covered by the large RMP entry then page size is set in the
> +	 *       base RMP entry.
> +	 */
> +	vaddr = rmptable_start + rmptable_page_offset(phys & PMD_MASK);
> +	large_entry = (rmpentry_t *)vaddr;
> +	*level = rmpentry_pagesize(large_entry);
> +
> +	return entry;
> +}
> +EXPORT_SYMBOL_GPL(lookup_page_in_rmptable);

Exported for kvm?
Borislav Petkov April 15, 2021, 5:03 p.m. UTC | #2
On Wed, Mar 24, 2021 at 12:04:08PM -0500, Brijesh Singh wrote:
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c

Also, why is all this SNP stuff landing in this file instead of in sev.c
or so which is AMD-specific?
Brijesh Singh April 15, 2021, 6:08 p.m. UTC | #3
Hi Boris,


On 4/15/21 11:57 AM, Borislav Petkov wrote:
> On Wed, Mar 24, 2021 at 12:04:08PM -0500, Brijesh Singh wrote:
>> The lookup_page_in_rmptable() can be used by the host to read the RMP
>> entry for a given page. The RMP entry format is documented in PPR
>> section 2.1.5.2.
> I see
>
> Table 15-36. Fields of an RMP Entry
>
> in the APM.
>
> Which PPR do you mean? Also, you know where to put those documents,
> right?

This is from Family 19h Model 01h Rev B01. The processor which
introduces the SNP feature. Yes, I have already upload the PPR on the BZ.

The PPR is also available at AMD: https://www.amd.com/en/support/tech-docs


>> +/* RMP table entry format (PPR section 2.1.5.2) */
>> +struct __packed rmpentry {
>> +	union {
>> +		struct {
>> +			uint64_t assigned:1;
>> +			uint64_t pagesize:1;
>> +			uint64_t immutable:1;
>> +			uint64_t rsvd1:9;
>> +			uint64_t gpa:39;
>> +			uint64_t asid:10;
>> +			uint64_t vmsa:1;
>> +			uint64_t validated:1;
>> +			uint64_t rsvd2:1;
>> +		} info;
>> +		uint64_t low;
>> +	};
>> +	uint64_t high;
>> +};
>> +
>> +typedef struct rmpentry rmpentry_t;
> Eww, a typedef. Why?
>
> struct rmpentry is just fine.


I guess I was trying to shorten the name. I am good with struct rmpentry;


>> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
>> index 39461b9cb34e..06394b6d56b2 100644
>> --- a/arch/x86/mm/mem_encrypt.c
>> +++ b/arch/x86/mm/mem_encrypt.c
>> @@ -34,6 +34,8 @@
>>  
>>  #include "mm_internal.h"
>>  
> <--- Needs a comment here to explain the magic 0x4000 and the magic
> shift by 8.


All those magic numbers are documented in the PPR. APM does not provide
the offset of the entry inside the RMP table. This is where we need to
refer the PPR.

>> +#define rmptable_page_offset(x)	(0x4000 + (((unsigned long) x) >> 8))
>> +
>>  /*
>>   * Since SME related variables are set early in the boot process they must
>>   * reside in the .data section so as not to be zeroed out when the .bss
>> @@ -612,3 +614,33 @@ static int __init mem_encrypt_snp_init(void)
>>   * SEV-SNP must be enabled across all CPUs, so make the initialization as a late initcall.
>>   */
>>  late_initcall(mem_encrypt_snp_init);
>> +
>> +rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level)
> snp_lookup_page_in_rmptable()

Noted.


>> +{
>> +	unsigned long phys = page_to_pfn(page) << PAGE_SHIFT;
>> +	rmpentry_t *entry, *large_entry;
>> +	unsigned long vaddr;
>> +
>> +	if (!static_branch_unlikely(&snp_enable_key))
>> +		return NULL;
>> +
>> +	vaddr = rmptable_start + rmptable_page_offset(phys);
>> +	if (WARN_ON(vaddr > rmptable_end))
> Do you really want to spew a warn on splat for each wrong vaddr? What
> for?
I guess I was using it during my development and there is no need for
it. I will remove it.
>
>> +		return NULL;
>> +
>> +	entry = (rmpentry_t *)vaddr;
>> +
>> +	/*
>> +	 * Check if this page is covered by the large RMP entry. This is needed to get
>> +	 * the page level used in the RMP entry.
>> +	 *
> No need for a new line in the comment and no need for the "e.g." thing
> either.
>
> Also, s/the large RMP entry/a large RMP entry/g.
Noted/
>
>> +	 * e.g. if the page is covered by the large RMP entry then page size is set in the
>> +	 *       base RMP entry.
>> +	 */
>> +	vaddr = rmptable_start + rmptable_page_offset(phys & PMD_MASK);
>> +	large_entry = (rmpentry_t *)vaddr;
>> +	*level = rmpentry_pagesize(large_entry);
>> +
>> +	return entry;
>> +}
>> +EXPORT_SYMBOL_GPL(lookup_page_in_rmptable);
> Exported for kvm?

The current user for this are: KVM, CCP and page fault handler.

-Brijesh
Brijesh Singh April 15, 2021, 6:09 p.m. UTC | #4
On 4/15/21 12:03 PM, Borislav Petkov wrote:
> On Wed, Mar 24, 2021 at 12:04:08PM -0500, Brijesh Singh wrote:
>> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> Also, why is all this SNP stuff landing in this file instead of in sev.c
> or so which is AMD-specific?
>
I don't have any strong reason to keep in mem_encrypt.c. All these can
go in sev.c. I will move them in next version.
Borislav Petkov April 15, 2021, 7:50 p.m. UTC | #5
On Thu, Apr 15, 2021 at 01:08:09PM -0500, Brijesh Singh wrote:
> This is from Family 19h Model 01h Rev B01. The processor which
> introduces the SNP feature. Yes, I have already upload the PPR on the BZ.
> 
> The PPR is also available at AMD: https://www.amd.com/en/support/tech-docs

Please add the link in the bugzilla to the comments here - this is the
reason why stuff is being uploaded in the first place, because those
vendor sites tend to change and those links become stale with time.

> I guess I was trying to shorten the name. I am good with struct rmpentry;

Yes please - typedefs are used only in very specific cases.

> All those magic numbers are documented in the PPR.

We use defines - not magic numbers. For example

#define RMPTABLE_ENTRIES_OFFSET 0x4000

The 8 is probably

PAGE_SHIFT - RMPENTRY_SHIFT

because you have GPA bits [50:12] and an RMP entry is 16 bytes, i.e., 1 << 4.

With defines it is actually clear what the computation is doing - with
naked numbers not really.

> APM does not provide the offset of the entry inside the RMP table.

It does, kinda, but in the pseudocode of those new insns in APM v3. From
PVALIDATE pseudo:

	RMP_ENTRY_PA = RMP_BASE + 0x4000 + (SYSTEM_PA / 0x1000) * 16

and that last

	/ 0x1000 * 16

is actually

	>> 12 - 4

i.e., the >> 8 shift.

Thx.
Brijesh Singh April 15, 2021, 10:18 p.m. UTC | #6
On 4/15/21 2:50 PM, Borislav Petkov wrote:
> On Thu, Apr 15, 2021 at 01:08:09PM -0500, Brijesh Singh wrote:
>> This is from Family 19h Model 01h Rev B01. The processor which
>> introduces the SNP feature. Yes, I have already upload the PPR on the BZ.
>>
>> The PPR is also available at AMD: https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fwww.amd.com%2Fen%2Fsupport%2Ftech-docs&amp;data=04%7C01%7Cbrijesh.singh%40amd.com%7Ca20ef8e85fca49875f4b08d90047b837%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637541130354491050%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&amp;sdata=MosOIntXEk6ikXpIFd89XkZPb8H6oO25y2mP2l82blU%3D&amp;reserved=0
> Please add the link in the bugzilla to the comments here - this is the
> reason why stuff is being uploaded in the first place, because those
> vendor sites tend to change and those links become stale with time.

Will do.


>
>> I guess I was trying to shorten the name. I am good with struct rmpentry;
> Yes please - typedefs are used only in very specific cases.
>
>> All those magic numbers are documented in the PPR.
> We use defines - not magic numbers. For example
>
> #define RMPTABLE_ENTRIES_OFFSET 0x4000
>
> The 8 is probably
>
> PAGE_SHIFT - RMPENTRY_SHIFT
>
> because you have GPA bits [50:12] and an RMP entry is 16 bytes, i.e., 1 << 4.
>
> With defines it is actually clear what the computation is doing - with
> naked numbers not really.

Sure, I will add macros to make it more readable.
diff mbox series

Patch

diff --git a/arch/x86/include/asm/sev-snp.h b/arch/x86/include/asm/sev-snp.h
index f7280d5c6158..2aa14b38c5ed 100644
--- a/arch/x86/include/asm/sev-snp.h
+++ b/arch/x86/include/asm/sev-snp.h
@@ -67,6 +67,35 @@  struct __packed snp_page_state_change {
 #define X86_RMP_PG_LEVEL(level)	(((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M)
 #define RMP_X86_PG_LEVEL(level)	(((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
 
+/* RMP table entry format (PPR section 2.1.5.2) */
+struct __packed rmpentry {
+	union {
+		struct {
+			uint64_t assigned:1;
+			uint64_t pagesize:1;
+			uint64_t immutable:1;
+			uint64_t rsvd1:9;
+			uint64_t gpa:39;
+			uint64_t asid:10;
+			uint64_t vmsa:1;
+			uint64_t validated:1;
+			uint64_t rsvd2:1;
+		} info;
+		uint64_t low;
+	};
+	uint64_t high;
+};
+
+typedef struct rmpentry rmpentry_t;
+
+#define rmpentry_assigned(x)	((x)->info.assigned)
+#define rmpentry_pagesize(x)	(RMP_X86_PG_LEVEL((x)->info.pagesize))
+#define rmpentry_vmsa(x)	((x)->info.vmsa)
+#define rmpentry_asid(x)	((x)->info.asid)
+#define rmpentry_validated(x)	((x)->info.validated)
+#define rmpentry_gpa(x)		((unsigned long)(x)->info.gpa)
+#define rmpentry_immutable(x)	((x)->info.immutable)
+
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 #include <linux/jump_label.h>
 
@@ -94,6 +123,7 @@  void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
 		unsigned int npages);
 int snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
 int snp_set_memory_private(unsigned long vaddr, unsigned int npages);
+rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level);
 
 extern struct static_key_false snp_enable_key;
 static inline bool snp_key_active(void)
@@ -124,6 +154,7 @@  early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned i
 static inline int snp_set_memory_shared(unsigned long vaddr, unsigned int npages) { return 0; }
 static inline int snp_set_memory_private(unsigned long vaddr, unsigned int npages) { return 0; }
 static inline bool snp_key_active(void) { return false; }
+static inline rpmentry_t *lookup_page_in_rmptable(struct page *page, int *level) { return NULL; }
 
 #endif /* CONFIG_AMD_MEM_ENCRYPT */
 
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 39461b9cb34e..06394b6d56b2 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -34,6 +34,8 @@ 
 
 #include "mm_internal.h"
 
+#define rmptable_page_offset(x)	(0x4000 + (((unsigned long) x) >> 8))
+
 /*
  * Since SME related variables are set early in the boot process they must
  * reside in the .data section so as not to be zeroed out when the .bss
@@ -612,3 +614,33 @@  static int __init mem_encrypt_snp_init(void)
  * SEV-SNP must be enabled across all CPUs, so make the initialization as a late initcall.
  */
 late_initcall(mem_encrypt_snp_init);
+
+rmpentry_t *lookup_page_in_rmptable(struct page *page, int *level)
+{
+	unsigned long phys = page_to_pfn(page) << PAGE_SHIFT;
+	rmpentry_t *entry, *large_entry;
+	unsigned long vaddr;
+
+	if (!static_branch_unlikely(&snp_enable_key))
+		return NULL;
+
+	vaddr = rmptable_start + rmptable_page_offset(phys);
+	if (WARN_ON(vaddr > rmptable_end))
+		return NULL;
+
+	entry = (rmpentry_t *)vaddr;
+
+	/*
+	 * Check if this page is covered by the large RMP entry. This is needed to get
+	 * the page level used in the RMP entry.
+	 *
+	 * e.g. if the page is covered by the large RMP entry then page size is set in the
+	 *       base RMP entry.
+	 */
+	vaddr = rmptable_start + rmptable_page_offset(phys & PMD_MASK);
+	large_entry = (rmpentry_t *)vaddr;
+	*level = rmpentry_pagesize(large_entry);
+
+	return entry;
+}
+EXPORT_SYMBOL_GPL(lookup_page_in_rmptable);