diff mbox series

[kvm-unit-tests,v3,4/6] x86: Improve set_mmu_range() to implement npt

Message ID 20220425015806.105063-5-manali.shukla@amd.com (mailing list archive)
State New, archived
Headers show
Series Move npt test cases and NPT code improvements | expand

Commit Message

Manali Shukla April 25, 2022, 1:58 a.m. UTC
If U/S bit is "0" for all page table entries, all these pages are
considered as supervisor pages. By default, pte_opt_mask is set to "0"
for all npt test cases, which sets U/S bit in all PTEs to "0".

Any nested page table accesses performed by the MMU are treated as user
acesses. So while implementing a nested page table dynamically, PT_USER_MASK
needs to be enabled for all npt entries.

set_mmu_range() function is improved based on above analysis.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Manali Shukla <manali.shukla@amd.com>
---
 lib/x86/vm.c | 37 +++++++++++++++++++++++++++----------
 lib/x86/vm.h |  3 +++
 2 files changed, 30 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/lib/x86/vm.c b/lib/x86/vm.c
index 25a4f5f..b555d5b 100644
--- a/lib/x86/vm.c
+++ b/lib/x86/vm.c
@@ -4,7 +4,7 @@ 
 #include "alloc_page.h"
 #include "smp.h"
 
-static pteval_t pte_opt_mask;
+static pteval_t pte_opt_mask, prev_pte_opt_mask;
 
 pteval_t *install_pte(pgd_t *cr3,
 		      int pte_level,
@@ -140,16 +140,33 @@  bool any_present_pages(pgd_t *cr3, void *virt, size_t len)
 	return false;
 }
 
-static void setup_mmu_range(pgd_t *cr3, phys_addr_t start, size_t len)
+void set_pte_opt_mask()
+{
+        prev_pte_opt_mask = pte_opt_mask;
+        pte_opt_mask = PT_USER_MASK;
+}
+
+void reset_pte_opt_mask()
+{
+        pte_opt_mask = prev_pte_opt_mask;
+}
+
+void setup_mmu_range(pgd_t *cr3, phys_addr_t start, size_t len, bool nested_mmu)
 {
 	u64 max = (u64)len + (u64)start;
 	u64 phys = start;
 
-	while (phys + LARGE_PAGE_SIZE <= max) {
-		install_large_page(cr3, phys, (void *)(ulong)phys);
-		phys += LARGE_PAGE_SIZE;
-	}
-	install_pages(cr3, phys, max - phys, (void *)(ulong)phys);
+        if (nested_mmu == false) {
+                while (phys + LARGE_PAGE_SIZE <= max) {
+                        install_large_page(cr3, phys, (void *)(ulong)phys);
+		        phys += LARGE_PAGE_SIZE;
+	        }
+	        install_pages(cr3, phys, max - phys, (void *)(ulong)phys);
+        } else {
+                set_pte_opt_mask();
+                install_pages(cr3, phys, len, (void *)(ulong)phys);
+                reset_pte_opt_mask();
+        }
 }
 
 static void set_additional_vcpu_vmregs(struct vm_vcpu_info *info)
@@ -176,10 +193,10 @@  void *setup_mmu(phys_addr_t end_of_memory, void *opt_mask)
     if (end_of_memory < (1ul << 32))
         end_of_memory = (1ul << 32);  /* map mmio 1:1 */
 
-    setup_mmu_range(cr3, 0, end_of_memory);
+    setup_mmu_range(cr3, 0, end_of_memory, false);
 #else
-    setup_mmu_range(cr3, 0, (2ul << 30));
-    setup_mmu_range(cr3, 3ul << 30, (1ul << 30));
+    setup_mmu_range(cr3, 0, (2ul << 30), false);
+    setup_mmu_range(cr3, 3ul << 30, (1ul << 30), false);
     init_alloc_vpage((void*)(3ul << 30));
 #endif
 
diff --git a/lib/x86/vm.h b/lib/x86/vm.h
index 4c6dff9..fbb657f 100644
--- a/lib/x86/vm.h
+++ b/lib/x86/vm.h
@@ -37,6 +37,9 @@  pteval_t *install_pte(pgd_t *cr3,
 pteval_t *install_large_page(pgd_t *cr3, phys_addr_t phys, void *virt);
 void install_pages(pgd_t *cr3, phys_addr_t phys, size_t len, void *virt);
 bool any_present_pages(pgd_t *cr3, void *virt, size_t len);
+void set_pte_opt_mask(void);
+void reset_pte_opt_mask(void);
+void setup_mmu_range(pgd_t *cr3, phys_addr_t start, size_t len, bool nested_mmu);
 
 static inline void *current_page_table(void)
 {