diff mbox series

[kvm-unit-tests,RESEND,v3,5/8] x86: nSVM: Build up the nested page table dynamically

Message ID 20220425114417.151540-6-manali.shukla@amd.com (mailing list archive)
State New, archived
Headers show
Series Move npt test cases and NPT code improvements | expand

Commit Message

Manali Shukla April 25, 2022, 11:44 a.m. UTC
Current implementation of nested page table does the page table build
up statistically with 2048 PTEs and one pml4 entry.
That is why current implementation is not extensible.

New implementation does page table build up dynamically based on the
RAM size of the VM which enables us to have separate memory range to
test various npt test cases.

Signed-off-by: Manali Shukla <manali.shukla@amd.com>
---
 x86/svm.c     | 75 ++++++++++++++++-----------------------------------
 x86/svm.h     |  4 ++-
 x86/svm_npt.c |  5 ++--
 3 files changed, 29 insertions(+), 55 deletions(-)
diff mbox series

Patch

diff --git a/x86/svm.c b/x86/svm.c
index ec825c7..e66c801 100644
--- a/x86/svm.c
+++ b/x86/svm.c
@@ -8,6 +8,7 @@ 
 #include "desc.h"
 #include "msr.h"
 #include "vm.h"
+#include "fwcfg.h"
 #include "smp.h"
 #include "types.h"
 #include "alloc_page.h"
@@ -16,43 +17,32 @@ 
 #include "vmalloc.h"
 
 /* for the nested page table*/
-u64 *pte[2048];
-u64 *pde[4];
-u64 *pdpe;
 u64 *pml4e;
 
 struct vmcb *vmcb;
 
 u64 *npt_get_pte(u64 address)
 {
-	int i1, i2;
-
-	address >>= 12;
-	i1 = (address >> 9) & 0x7ff;
-	i2 = address & 0x1ff;
-
-	return &pte[i1][i2];
+        return get_pte(npt_get_pml4e(), (void*)address);
 }
 
 u64 *npt_get_pde(u64 address)
 {
-	int i1, i2;
-
-	address >>= 21;
-	i1 = (address >> 9) & 0x3;
-	i2 = address & 0x1ff;
-
-	return &pde[i1][i2];
+    struct pte_search search;
+    search = find_pte_level(npt_get_pml4e(), (void*)address, 2);
+    return search.pte;
 }
 
-u64 *npt_get_pdpe(void)
+u64 *npt_get_pdpe(u64 address)
 {
-	return pdpe;
+    struct pte_search search;
+    search = find_pte_level(npt_get_pml4e(), (void*)address, 3);
+    return search.pte;
 }
 
 u64 *npt_get_pml4e(void)
 {
-	return pml4e;
+    return pml4e;
 }
 
 bool smp_supported(void)
@@ -300,11 +290,21 @@  static void set_additional_vcpu_msr(void *msr_efer)
 	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME);
 }
 
+void setup_npt(void) {
+    u64 end_of_memory;
+    pml4e = alloc_page();
+
+    end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
+    if (end_of_memory < (1ul << 32))
+        end_of_memory = (1ul << 32);
+
+    setup_mmu_range(pml4e, 0, end_of_memory, true);
+}
+
 static void setup_svm(void)
 {
 	void *hsave = alloc_page();
-	u64 *page, address;
-	int i,j;
+	int i;
 
 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
@@ -327,36 +327,7 @@  static void setup_svm(void)
 	* pages to get enough granularity for the NPT unit-tests.
 	*/
 
-	address = 0;
-
-	/* PTE level */
-	for (i = 0; i < 2048; ++i) {
-		page = alloc_page();
-
-		for (j = 0; j < 512; ++j, address += 4096)
-	    		page[j] = address | 0x067ULL;
-
-		pte[i] = page;
-	}
-
-	/* PDE level */
-	for (i = 0; i < 4; ++i) {
-		page = alloc_page();
-
-	for (j = 0; j < 512; ++j)
-	    page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
-
-		pde[i] = page;
-	}
-
-	/* PDPe level */
-	pdpe   = alloc_page();
-	for (i = 0; i < 4; ++i)
-		pdpe[i] = ((u64)(pde[i])) | 0x27;
-
-	/* PML4e level */
-	pml4e    = alloc_page();
-	pml4e[0] = ((u64)pdpe) | 0x27;
+  setup_npt();
 }
 
 int matched;
diff --git a/x86/svm.h b/x86/svm.h
index 123e64f..85eff3f 100644
--- a/x86/svm.h
+++ b/x86/svm.h
@@ -406,7 +406,7 @@  typedef void (*test_guest_func)(struct svm_test *);
 int run_svm_tests(int ac, char **av);
 u64 *npt_get_pte(u64 address);
 u64 *npt_get_pde(u64 address);
-u64 *npt_get_pdpe(void);
+u64 *npt_get_pdpe(u64 address);
 u64 *npt_get_pml4e(void);
 bool smp_supported(void);
 bool default_supported(void);
@@ -429,6 +429,8 @@  int __svm_vmrun(u64 rip);
 void __svm_bare_vmrun(void);
 int svm_vmrun(void);
 void test_set_guest(test_guest_func func);
+void setup_npt(void);
+u64* get_npt_pte(u64 *pml4, u64 guest_addr, int level);
 
 extern struct vmcb *vmcb;
 extern struct svm_test svm_tests[];
diff --git a/x86/svm_npt.c b/x86/svm_npt.c
index 53e8a90..ab4dcf4 100644
--- a/x86/svm_npt.c
+++ b/x86/svm_npt.c
@@ -209,7 +209,8 @@  static void __svm_npt_rsvd_bits_test(u64 * pxe, u64 rsvd_bits, u64 efer,
 	       "Wanted #NPF on rsvd bits = 0x%lx, got exit = 0x%x", rsvd_bits,
 	       exit_reason);
 
-	if (pxe == npt_get_pdpe() || pxe == npt_get_pml4e()) {
+	if (pxe == npt_get_pdpe((u64) basic_guest_main)
+	    || pxe == npt_get_pml4e()) {
 		/*
 		 * The guest's page tables will blow up on a bad PDPE/PML4E,
 		 * before starting the final walk of the guest page.
@@ -338,7 +339,7 @@  skip_pte_test:
 				get_random_bits(20, 13) | PT_PAGE_SIZE_MASK,
 				host_efer, host_cr4, guest_efer, guest_cr4);
 
-	_svm_npt_rsvd_bits_test(npt_get_pdpe(),
+	_svm_npt_rsvd_bits_test(npt_get_pdpe((u64) basic_guest_main),
 				PT_PAGE_SIZE_MASK |
 				(this_cpu_has(X86_FEATURE_GBPAGES) ?
 				 get_random_bits(29, 13) : 0), host_efer,