diff mbox series

[kvm-unit-tests,v2,13/16] x86 AMD SEV-SNP: Change guest pages from Intermix->Private using GHCB NAE

Message ID 20240718124932.114121-14-papaluri@amd.com (mailing list archive)
State New, archived
Headers show
Series Introduce SEV-SNP support | expand

Commit Message

Paluri, PavanKumar July 18, 2024, 12:49 p.m. UTC
The tests perform the following actions:
1. Allocates a 2M private page (512 4K entries) and converts the entire
range to shared.
2. Performs a write operation on these un-encrypted pages.
3. Performs partial page state change conversions on the first 256 4K
entries and conducts a re-validation test on one of these now-private
entries to determine whether the current page state is private or not.
4. Converts the whole 2M range from an intermixed state to private and
perform a re-validation check on the now-private 2M page.

The goal of this test is to ensure 2M page state changes are handled
properly even if 2M range contains a mix of private/shared pages.

Signed-off-by: Pavan Kumar Paluri <papaluri@amd.com>
---
 x86/amd_sev.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)
diff mbox series

Patch

diff --git a/x86/amd_sev.c b/x86/amd_sev.c
index 12fe25dcdd0a..fc385613b993 100644
--- a/x86/amd_sev.c
+++ b/x86/amd_sev.c
@@ -290,6 +290,59 @@  static void test_sev_psc_ghcb_nae(void)
 	snp_free_pages(SEV_ALLOC_ORDER, SEV_ALLOC_PAGE_COUNT, vaddr, ghcb, true);
 }
 
+static void __test_sev_psc_private(unsigned long vaddr, struct ghcb *ghcb,
+				   int npages, bool allow_noupdate)
+{
+	set_pte_encrypted(vaddr, npages);
+
+	/* Convert the whole 2M range back to private */
+	sev_set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE, ghcb,
+			    allow_noupdate);
+
+	report(is_validated_private_page(vaddr, RMP_PG_SIZE_2M),
+	       "Expected page state: Private");
+}
+
+static void test_sev_psc_intermix(bool to_private)
+{
+	unsigned long vaddr;
+	struct ghcb *ghcb = (struct ghcb *)(rdmsr(SEV_ES_GHCB_MSR_INDEX));
+
+	/* Allocate a 2M private page */
+	vaddr = (unsigned long)vmalloc_pages((SEV_ALLOC_PAGE_COUNT) / 2,
+					     SEV_ALLOC_ORDER - 1, RMP_PG_SIZE_2M);
+
+	/* Ensure pages are in private state by checking the page is private */
+	report(is_validated_private_page(vaddr, RMP_PG_SIZE_2M),
+	       "Expected page state: Private");
+
+	sev_set_pages_state(vaddr, (SEV_ALLOC_PAGE_COUNT) / 2,
+			    SNP_PAGE_STATE_SHARED, ghcb, false);
+
+	set_pte_decrypted(vaddr, (SEV_ALLOC_PAGE_COUNT) / 2);
+
+	set_pte_encrypted(vaddr, (SEV_ALLOC_PAGE_COUNT) / 2);
+	/* Convert a bunch of sub-pages (256) to private and leave the rest shared */
+	sev_set_pages_state(vaddr, 256, SNP_PAGE_STATE_PRIVATE, ghcb, false);
+
+	report(is_validated_private_page(vaddr, RMP_PG_SIZE_4K),
+	       "Expected page state: Private");
+
+	/* Now convert all the pages back to private */
+	if (to_private)
+		__test_sev_psc_private(vaddr, ghcb, (SEV_ALLOC_PAGE_COUNT) / 2, true);
+
+	/* Free up all the used pages */
+	snp_free_pages(SEV_ALLOC_ORDER - 1, (SEV_ALLOC_PAGE_COUNT) / 2,
+		       vaddr, ghcb, true);
+}
+
+static void test_sev_psc_intermix_to_private(void)
+{
+	report_info("TEST: 2M Intermixed to Private PSC test");
+	test_sev_psc_intermix(true);
+}
+
 int main(void)
 {
 	int rtn;
@@ -309,6 +362,7 @@  int main(void)
 		init_vpages();
 		test_sev_psc_ghcb_msr();
 		test_sev_psc_ghcb_nae();
+		test_sev_psc_intermix_to_private();
 	}
 
 	return report_summary();