diff mbox series

[RFC,V6,10/14] hv: vmbus: Mask VMBus pages unencrypted for sev-snp enlightened guest

Message ID 20230515165917.1306922-11-ltykernel@gmail.com (mailing list archive)
State New, archived
Headers show
Series x86/hyperv/sev: Add AMD sev-snp enlightened guest support on hyperv | expand

Commit Message

Tianyu Lan May 15, 2023, 4:59 p.m. UTC
From: Tianyu Lan <tiala@microsoft.com>

VMBus post msg, synic event and message pages is necessary to
shared with hypervisor and so mask these pages unencrypted in
the sev-snp guest.

Signed-off-by: Tianyu Lan <tiala@microsoft.com>
---
Change sicne RFC V4:
       * Fix encrypt and free page order.

Change since RFC V3:
       * Set encrypt page back in the hv_synic_free()

Change since RFC V2:
       * Fix error in the error code path and encrypt
       	 pages correctly when decryption failure happens.
---
 drivers/hv/hv.c | 37 ++++++++++++++++++++++++++++++++++---
 1 file changed, 34 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index de6708dbe0df..d29bbf0c7108 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -20,6 +20,7 @@ 
 #include <linux/interrupt.h>
 #include <clocksource/hyperv_timer.h>
 #include <asm/mshyperv.h>
+#include <linux/set_memory.h>
 #include "hyperv_vmbus.h"
 
 /* The one and only */
@@ -78,7 +79,7 @@  int hv_post_message(union hv_connection_id connection_id,
 
 int hv_synic_alloc(void)
 {
-	int cpu;
+	int cpu, ret;
 	struct hv_per_cpu_context *hv_cpu;
 
 	/*
@@ -123,9 +124,29 @@  int hv_synic_alloc(void)
 				goto err;
 			}
 		}
+
+		if (hv_isolation_type_en_snp()) {
+			ret = set_memory_decrypted((unsigned long)
+				hv_cpu->synic_message_page, 1);
+			if (ret)
+				goto err;
+
+			ret = set_memory_decrypted((unsigned long)
+				hv_cpu->synic_event_page, 1);
+			if (ret)
+				goto err_decrypt_event_page;
+
+			memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
+			memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
+		}
 	}
 
 	return 0;
+
+err_decrypt_event_page:
+	set_memory_encrypted((unsigned long)
+		hv_cpu->synic_message_page, 1);
+
 err:
 	/*
 	 * Any memory allocations that succeeded will be freed when
@@ -143,8 +164,18 @@  void hv_synic_free(void)
 		struct hv_per_cpu_context *hv_cpu
 			= per_cpu_ptr(hv_context.cpu_context, cpu);
 
-		free_page((unsigned long)hv_cpu->synic_event_page);
-		free_page((unsigned long)hv_cpu->synic_message_page);
+		if (hv_isolation_type_en_snp()) {
+			if (!set_memory_encrypted((unsigned long)
+			    hv_cpu->synic_message_page, 1))
+				free_page((unsigned long)hv_cpu->synic_event_page);
+
+			if (!set_memory_encrypted((unsigned long)
+			    hv_cpu->synic_event_page, 1))
+				free_page((unsigned long)hv_cpu->synic_message_page);
+		} else {
+			free_page((unsigned long)hv_cpu->synic_event_page);
+			free_page((unsigned long)hv_cpu->synic_message_page);
+		}
 	}
 
 	kfree(hv_context.hv_numa_map);