diff mbox series

[kvm-unit-tests,v2,12/16] x86 AMD SEV-SNP: Change guest pages from Shared->Private using GHCB NAE

Message ID 20240718124932.114121-13-papaluri@amd.com (mailing list archive)
State New, archived
Headers show
Series Introduce SEV-SNP support | expand

Commit Message

Paluri, PavanKumar July 18, 2024, 12:49 p.m. UTC
Convert the same pages back to private that were converted to shared.

The test handles both 4K and 2M large pages depending on the order and
the page size specified.

While at it, make changes to pvalidate_pages() to not treat
PVALIDATE_FAIL_NOUPDATE as an error when converting the already private
pages as a part of cleanup process.

Signed-off-by: Pavan Kumar Paluri <papaluri@amd.com>
---
 lib/x86/amd_sev.c | 22 ++++++++++++++++------
 lib/x86/amd_sev.h |  3 ++-
 x86/amd_sev.c     | 23 +++++++++++++++++------
 3 files changed, 35 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/lib/x86/amd_sev.c b/lib/x86/amd_sev.c
index e2f99bc8eded..c2f2a3f43193 100644
--- a/lib/x86/amd_sev.c
+++ b/lib/x86/amd_sev.c
@@ -347,7 +347,16 @@  enum es_result __sev_set_pages_state_msr_proto(unsigned long vaddr, int npages,
 	return ES_OK;
 }
 
-static void pvalidate_pages(struct snp_psc_desc *desc, unsigned long *vaddr_arr)
+static bool pvalidate_failed(int result, bool allow_noupdate)
+{
+	if (result && (!allow_noupdate || result != PVALIDATE_FAIL_NOUPDATE))
+		return true;
+
+	return false;
+}
+
+static void pvalidate_pages(struct snp_psc_desc *desc, unsigned long *vaddr_arr,
+			    bool allow_noupdate)
 {
 	struct psc_entry *entry;
 	int ret, i;
@@ -366,11 +375,11 @@  static void pvalidate_pages(struct snp_psc_desc *desc, unsigned long *vaddr_arr)
 
 			for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) {
 				ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
-				if (ret)
+				if (pvalidate_failed(ret, allow_noupdate))
 					break;
 			}
 		}
-		assert(!ret);
+		assert(!pvalidate_failed(ret, allow_noupdate));
 	}
 }
 
@@ -466,7 +475,8 @@  static void add_psc_entry(struct snp_psc_desc *desc, u8 idx, u8 op, unsigned lon
 
 unsigned long __sev_set_pages_state(struct snp_psc_desc *desc, unsigned long vaddr,
 				    unsigned long vaddr_end, int op,
-				    struct ghcb *ghcb, bool large_entry)
+				    struct ghcb *ghcb, bool large_entry,
+				    bool allow_noupdate)
 {
 	unsigned long vaddr_arr[VMGEXIT_PSC_MAX_ENTRY];
 	int ret, iter = 0, iter2 = 0;
@@ -493,13 +503,13 @@  unsigned long __sev_set_pages_state(struct snp_psc_desc *desc, unsigned long vad
 	}
 
 	if (op == SNP_PAGE_STATE_SHARED)
-		pvalidate_pages(desc, vaddr_arr);
+		pvalidate_pages(desc, vaddr_arr, allow_noupdate);
 
 	ret = vmgexit_psc(desc, ghcb);
 	assert_msg(!ret, "VMGEXIT failed with ret value: %d", ret);
 
 	if (op == SNP_PAGE_STATE_PRIVATE)
-		pvalidate_pages(desc, vaddr_arr);
+		pvalidate_pages(desc, vaddr_arr, allow_noupdate);
 
 	for (iter2 = 0; iter2 < iter; iter2++) {
 		page_size = desc->entries[iter2].pagesize;
diff --git a/lib/x86/amd_sev.h b/lib/x86/amd_sev.h
index bf065ef613b7..e180a269fb63 100644
--- a/lib/x86/amd_sev.h
+++ b/lib/x86/amd_sev.h
@@ -244,7 +244,8 @@  enum es_result  __sev_set_pages_state_msr_proto(unsigned long vaddr,
 					        int npages, int operation);
 unsigned long __sev_set_pages_state(struct snp_psc_desc *desc, unsigned long vaddr,
 				    unsigned long vaddr_end, int op,
-				    struct ghcb *ghcb, bool large_entry);
+				    struct ghcb *ghcb, bool large_entry,
+				    bool allow_noupdate);
 void vc_ghcb_invalidate(struct ghcb *ghcb);
 
 unsigned long long get_amd_sev_c_bit_mask(void);
diff --git a/x86/amd_sev.c b/x86/amd_sev.c
index 15281835d0ef..12fe25dcdd0a 100644
--- a/x86/amd_sev.c
+++ b/x86/amd_sev.c
@@ -174,7 +174,7 @@  static int test_write(unsigned long vaddr, int npages)
 }
 
 static void sev_set_pages_state(unsigned long vaddr, int npages, int op,
-				struct ghcb *ghcb)
+				struct ghcb *ghcb, bool allow_noupdate)
 {
 	struct snp_psc_desc desc;
 	unsigned long vaddr_end;
@@ -188,17 +188,19 @@  static void sev_set_pages_state(unsigned long vaddr, int npages, int op,
 
 	while (vaddr < vaddr_end) {
 		vaddr = __sev_set_pages_state(&desc, vaddr, vaddr_end,
-					      op, ghcb, large_entry);
+					      op, ghcb, large_entry,
+					      allow_noupdate);
 	}
 }
 
 static void snp_free_pages(int order, int npages, unsigned long vaddr,
-			   struct ghcb *ghcb)
+			   struct ghcb *ghcb, bool allow_noupdate)
 {
 	set_pte_encrypted(vaddr, SEV_ALLOC_PAGE_COUNT);
 
 	/* Convert pages back to default guest-owned state */
-	sev_set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE, ghcb);
+	sev_set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE, ghcb,
+			    allow_noupdate);
 
 	/* Free all the associated physical pages */
 	free_pages_by_order((void *)pgtable_va_to_pa(vaddr), order);
@@ -268,7 +270,7 @@  static void test_sev_psc_ghcb_nae(void)
 	       "Expected page state: Private");
 
 	sev_set_pages_state(vaddr, SEV_ALLOC_PAGE_COUNT, SNP_PAGE_STATE_SHARED,
-			    ghcb);
+			    ghcb, false);
 
 	set_pte_decrypted(vaddr, SEV_ALLOC_PAGE_COUNT);
 
@@ -276,7 +278,16 @@  static void test_sev_psc_ghcb_nae(void)
 	       "Write to %d unencrypted 2M pages after private->shared conversion",
 	       (SEV_ALLOC_PAGE_COUNT) / (1 << ORDER_2M));
 
-	snp_free_pages(SEV_ALLOC_ORDER, SEV_ALLOC_PAGE_COUNT, vaddr, ghcb);
+	/* Convert pages from shared->private */
+	set_pte_encrypted(vaddr, SEV_ALLOC_PAGE_COUNT);
+
+	sev_set_pages_state(vaddr, SEV_ALLOC_PAGE_COUNT, SNP_PAGE_STATE_PRIVATE,
+			    ghcb, false);
+
+	report(is_validated_private_page(vaddr, RMP_PG_SIZE_2M),
+	       "Expected page state: Private");
+
+	snp_free_pages(SEV_ALLOC_ORDER, SEV_ALLOC_PAGE_COUNT, vaddr, ghcb, true);
 }
 
 int main(void)