diff mbox series

[RFC,Part2,06/30] x86/fault: dump the RMP entry on #PF

Message ID 20210324170436.31843-7-brijesh.singh@amd.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Brijesh Singh March 24, 2021, 5:04 p.m. UTC
If hardware detects an RMP violation, it will raise a page-fault exception
with the RMP bit set. To help the debug, dump the RMP entry of the faulting
address.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: x86@kernel.org
Cc: kvm@vger.kernel.org
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/mm/fault.c | 75 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 75 insertions(+)

Comments

Andy Lutomirski March 24, 2021, 5:47 p.m. UTC | #1
On Wed, Mar 24, 2021 at 10:04 AM Brijesh Singh <brijesh.singh@amd.com> wrote:
>
> If hardware detects an RMP violation, it will raise a page-fault exception
> with the RMP bit set. To help the debug, dump the RMP entry of the faulting
> address.
>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Joerg Roedel <jroedel@suse.de>
> Cc: "H. Peter Anvin" <hpa@zytor.com>
> Cc: Tony Luck <tony.luck@intel.com>
> Cc: Dave Hansen <dave.hansen@intel.com>
> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Tom Lendacky <thomas.lendacky@amd.com>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Sean Christopherson <seanjc@google.com>
> Cc: x86@kernel.org
> Cc: kvm@vger.kernel.org
> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
> ---
>  arch/x86/mm/fault.c | 75 +++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 75 insertions(+)
>
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index f39b551f89a6..7605e06a6dd9 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -31,6 +31,7 @@
>  #include <asm/pgtable_areas.h>         /* VMALLOC_START, ...           */
>  #include <asm/kvm_para.h>              /* kvm_handle_async_pf          */
>  #include <asm/vdso.h>                  /* fixup_vdso_exception()       */
> +#include <asm/sev-snp.h>               /* lookup_rmpentry ...          */
>
>  #define CREATE_TRACE_POINTS
>  #include <asm/trace/exceptions.h>
> @@ -147,6 +148,76 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
>  DEFINE_SPINLOCK(pgd_lock);
>  LIST_HEAD(pgd_list);
>
> +static void dump_rmpentry(struct page *page, rmpentry_t *e)
> +{
> +       unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
> +
> +       pr_alert("RMPEntry paddr 0x%lx [assigned=%d immutable=%d pagesize=%d gpa=0x%lx asid=%d "
> +               "vmsa=%d validated=%d]\n", paddr, rmpentry_assigned(e), rmpentry_immutable(e),
> +               rmpentry_pagesize(e), rmpentry_gpa(e), rmpentry_asid(e), rmpentry_vmsa(e),
> +               rmpentry_validated(e));
> +       pr_alert("RMPEntry paddr 0x%lx %016llx %016llx\n", paddr, e->high, e->low);
> +}
> +
> +static void show_rmpentry(unsigned long address)
> +{
> +       struct page *page = virt_to_page(address);

This is an error path, and I don't think you have any particular
guarantee that virt_to_page(address) is valid.  Please add appropriate
validation or use one of the slow lookup helpers.
Brijesh Singh March 24, 2021, 8:35 p.m. UTC | #2
On 3/24/21 12:47 PM, Andy Lutomirski wrote:
> On Wed, Mar 24, 2021 at 10:04 AM Brijesh Singh <brijesh.singh@amd.com> wrote:
>> If hardware detects an RMP violation, it will raise a page-fault exception
>> with the RMP bit set. To help the debug, dump the RMP entry of the faulting
>> address.
>>
>> Cc: Thomas Gleixner <tglx@linutronix.de>
>> Cc: Ingo Molnar <mingo@redhat.com>
>> Cc: Borislav Petkov <bp@alien8.de>
>> Cc: Joerg Roedel <jroedel@suse.de>
>> Cc: "H. Peter Anvin" <hpa@zytor.com>
>> Cc: Tony Luck <tony.luck@intel.com>
>> Cc: Dave Hansen <dave.hansen@intel.com>
>> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>
>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>> Cc: Tom Lendacky <thomas.lendacky@amd.com>
>> Cc: David Rientjes <rientjes@google.com>
>> Cc: Sean Christopherson <seanjc@google.com>
>> Cc: x86@kernel.org
>> Cc: kvm@vger.kernel.org
>> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
>> ---
>>  arch/x86/mm/fault.c | 75 +++++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 75 insertions(+)
>>
>> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
>> index f39b551f89a6..7605e06a6dd9 100644
>> --- a/arch/x86/mm/fault.c
>> +++ b/arch/x86/mm/fault.c
>> @@ -31,6 +31,7 @@
>>  #include <asm/pgtable_areas.h>         /* VMALLOC_START, ...           */
>>  #include <asm/kvm_para.h>              /* kvm_handle_async_pf          */
>>  #include <asm/vdso.h>                  /* fixup_vdso_exception()       */
>> +#include <asm/sev-snp.h>               /* lookup_rmpentry ...          */
>>
>>  #define CREATE_TRACE_POINTS
>>  #include <asm/trace/exceptions.h>
>> @@ -147,6 +148,76 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
>>  DEFINE_SPINLOCK(pgd_lock);
>>  LIST_HEAD(pgd_list);
>>
>> +static void dump_rmpentry(struct page *page, rmpentry_t *e)
>> +{
>> +       unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
>> +
>> +       pr_alert("RMPEntry paddr 0x%lx [assigned=%d immutable=%d pagesize=%d gpa=0x%lx asid=%d "
>> +               "vmsa=%d validated=%d]\n", paddr, rmpentry_assigned(e), rmpentry_immutable(e),
>> +               rmpentry_pagesize(e), rmpentry_gpa(e), rmpentry_asid(e), rmpentry_vmsa(e),
>> +               rmpentry_validated(e));
>> +       pr_alert("RMPEntry paddr 0x%lx %016llx %016llx\n", paddr, e->high, e->low);
>> +}
>> +
>> +static void show_rmpentry(unsigned long address)
>> +{
>> +       struct page *page = virt_to_page(address);
> This is an error path, and I don't think you have any particular
> guarantee that virt_to_page(address) is valid.  Please add appropriate
> validation or use one of the slow lookup helpers.


Noted, thanks for the quick feedback.
diff mbox series

Patch

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f39b551f89a6..7605e06a6dd9 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -31,6 +31,7 @@ 
 #include <asm/pgtable_areas.h>		/* VMALLOC_START, ...		*/
 #include <asm/kvm_para.h>		/* kvm_handle_async_pf		*/
 #include <asm/vdso.h>			/* fixup_vdso_exception()	*/
+#include <asm/sev-snp.h>		/* lookup_rmpentry ...		*/
 
 #define CREATE_TRACE_POINTS
 #include <asm/trace/exceptions.h>
@@ -147,6 +148,76 @@  is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
 DEFINE_SPINLOCK(pgd_lock);
 LIST_HEAD(pgd_list);
 
+static void dump_rmpentry(struct page *page, rmpentry_t *e)
+{
+	unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
+
+	pr_alert("RMPEntry paddr 0x%lx [assigned=%d immutable=%d pagesize=%d gpa=0x%lx asid=%d "
+		"vmsa=%d validated=%d]\n", paddr, rmpentry_assigned(e), rmpentry_immutable(e),
+		rmpentry_pagesize(e), rmpentry_gpa(e), rmpentry_asid(e), rmpentry_vmsa(e),
+		rmpentry_validated(e));
+	pr_alert("RMPEntry paddr 0x%lx %016llx %016llx\n", paddr, e->high, e->low);
+}
+
+static void show_rmpentry(unsigned long address)
+{
+	struct page *page = virt_to_page(address);
+	rmpentry_t *entry, *large_entry;
+	int level, rmp_level;
+	pgd_t *pgd;
+	pte_t *pte;
+
+	/* Get the RMP entry for the fault address */
+	entry = lookup_page_in_rmptable(page, &rmp_level);
+	if (!entry) {
+		pr_alert("SEV-SNP: failed to read RMP entry for address 0x%lx\n", address);
+		return;
+	}
+
+	dump_rmpentry(page, entry);
+
+	/*
+	 * If fault occurred during the large page walk, dump the RMP entry at base of 2MB page.
+	 */
+	pgd = __va(read_cr3_pa());
+	pgd += pgd_index(address);
+	pte = lookup_address_in_pgd(pgd, address, &level);
+	if ((level > PG_LEVEL_4K) && (!IS_ALIGNED(address, PMD_SIZE))) {
+		address = address & PMD_MASK;
+		large_entry = lookup_page_in_rmptable(virt_to_page(address), &rmp_level);
+		if (!large_entry) {
+			pr_alert("SEV-SNP: failed to read large RMP entry 0x%lx\n",
+				address & PMD_MASK);
+			return;
+		}
+
+		dump_rmpentry(virt_to_page(address), large_entry);
+	}
+
+	/*
+	 * If the RMP entry at the faulting address was not assigned, then dump may not provide
+	 * any useful debug information. Iterate through the entire 2MB region, and dump the RMP
+	 * entries if one of the bit in the RMP entry is set.
+	 */
+	if (!rmpentry_assigned(entry)) {
+		unsigned long start, end;
+
+		start = address & PMD_MASK;
+		end = start + PMD_SIZE;
+
+		for (; start < end; start += PAGE_SIZE) {
+			entry = lookup_page_in_rmptable(virt_to_page(start), &rmp_level);
+			if (!entry)
+				return;
+
+			/* If any of the bits in RMP entry is set then dump it */
+			if (entry->high || entry->low)
+				pr_alert("RMPEntry paddr %lx: %016llx %016llx\n",
+					page_to_pfn(page) << PAGE_SHIFT, entry->high, entry->low);
+		}
+	}
+}
+
 #ifdef CONFIG_X86_32
 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 {
@@ -580,6 +651,10 @@  show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long ad
 	}
 
 	dump_pagetable(address);
+
+	if (error_code & X86_PF_RMP)
+		show_rmpentry(address);
+
 }
 
 static noinline void