@@ -8,6 +8,7 @@
#include "desc.h"
#include "msr.h"
#include "vm.h"
+#include "fwcfg.h"
#include "smp.h"
#include "types.h"
#include "alloc_page.h"
@@ -16,38 +17,27 @@
#include "vmalloc.h"
/* for the nested page table*/
-u64 *pte[2048];
-u64 *pde[4];
-u64 *pdpe;
u64 *pml4e;
struct vmcb *vmcb;
u64 *npt_get_pte(u64 address)
{
- int i1, i2;
-
- address >>= 12;
- i1 = (address >> 9) & 0x7ff;
- i2 = address & 0x1ff;
-
- return &pte[i1][i2];
+ return get_pte(npt_get_pml4e(), (void*)address);
}
u64 *npt_get_pde(u64 address)
{
- int i1, i2;
-
- address >>= 21;
- i1 = (address >> 9) & 0x3;
- i2 = address & 0x1ff;
-
- return &pde[i1][i2];
+ struct pte_search search;
+ search = find_pte_level(npt_get_pml4e(), (void*)address, 2);
+ return search.pte;
}
-u64 *npt_get_pdpe(void)
+u64 *npt_get_pdpe(u64 address)
{
- return pdpe;
+ struct pte_search search;
+ search = find_pte_level(npt_get_pml4e(), (void*)address, 3);
+ return search.pte;
}
u64 *npt_get_pml4e(void)
@@ -300,11 +290,21 @@ static void set_additional_vcpu_msr(void *msr_efer)
wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME);
}
+void setup_npt(void) {
+ u64 end_of_memory;
+ pml4e = alloc_page();
+
+ end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
+ if (end_of_memory < (1ul << 32))
+ end_of_memory = (1ul << 32);
+
+ __setup_mmu_range(pml4e, 0, end_of_memory, IS_NESTED_MMU);
+}
+
static void setup_svm(void)
{
void *hsave = alloc_page();
- u64 *page, address;
- int i,j;
+ int i;
wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
@@ -327,36 +327,7 @@ static void setup_svm(void)
* pages to get enough granularity for the NPT unit-tests.
*/
- address = 0;
-
- /* PTE level */
- for (i = 0; i < 2048; ++i) {
- page = alloc_page();
-
- for (j = 0; j < 512; ++j, address += 4096)
- page[j] = address | 0x067ULL;
-
- pte[i] = page;
- }
-
- /* PDE level */
- for (i = 0; i < 4; ++i) {
- page = alloc_page();
-
- for (j = 0; j < 512; ++j)
- page[j] = (u64)pte[(i * 512) + j] | 0x027ULL;
-
- pde[i] = page;
- }
-
- /* PDPe level */
- pdpe = alloc_page();
- for (i = 0; i < 4; ++i)
- pdpe[i] = ((u64)(pde[i])) | 0x27;
-
- /* PML4e level */
- pml4e = alloc_page();
- pml4e[0] = ((u64)pdpe) | 0x27;
+ setup_npt();
}
int matched;
@@ -406,7 +406,7 @@ typedef void (*test_guest_func)(struct svm_test *);
int run_svm_tests(int ac, char **av);
u64 *npt_get_pte(u64 address);
u64 *npt_get_pde(u64 address);
-u64 *npt_get_pdpe(void);
+u64 *npt_get_pdpe(u64 address);
u64 *npt_get_pml4e(void);
bool smp_supported(void);
bool default_supported(void);
@@ -429,6 +429,8 @@ int __svm_vmrun(u64 rip);
void __svm_bare_vmrun(void);
int svm_vmrun(void);
void test_set_guest(test_guest_func func);
+void setup_npt(void);
+u64* get_npt_pte(u64 *pml4, u64 guest_addr, int level);
extern struct vmcb *vmcb;
extern struct svm_test svm_tests[];
@@ -209,7 +209,8 @@ static void __svm_npt_rsvd_bits_test(u64 * pxe, u64 rsvd_bits, u64 efer,
"Wanted #NPF on rsvd bits = 0x%lx, got exit = 0x%x", rsvd_bits,
exit_reason);
- if (pxe == npt_get_pdpe() || pxe == npt_get_pml4e()) {
+ if (pxe == npt_get_pdpe((u64) basic_guest_main)
+ || pxe == npt_get_pml4e()) {
/*
* The guest's page tables will blow up on a bad PDPE/PML4E,
* before starting the final walk of the guest page.
@@ -338,7 +339,7 @@ skip_pte_test:
get_random_bits(20, 13) | PT_PAGE_SIZE_MASK,
host_efer, host_cr4, guest_efer, guest_cr4);
- _svm_npt_rsvd_bits_test(npt_get_pdpe(),
+ _svm_npt_rsvd_bits_test(npt_get_pdpe((u64) basic_guest_main),
PT_PAGE_SIZE_MASK |
(this_cpu_has(X86_FEATURE_GBPAGES) ?
get_random_bits(29, 13) : 0), host_efer,
Build up nested page table dynamically based on the RAM size of VM instead of building it statically with 2048 PTEs and one PML4 entry, so that nested page table can be easily extensible to provide seperate range of addressses to test various test cases, if needed. Signed-off-by: Manali Shukla <manali.shukla@amd.com> --- x86/svm.c | 73 ++++++++++++++++----------------------------------- x86/svm.h | 4 ++- x86/svm_npt.c | 5 ++-- 3 files changed, 28 insertions(+), 54 deletions(-)