diff mbox series

[RFC,V2,08/18] x86/hyperv: decrypt vmbus pages for sev-snp enlightened guest

Message ID 20221119034633.1728632-9-ltykernel@gmail.com (mailing list archive)
State New, archived
Headers show
Series x86/hyperv/sev: Add AMD sev-snp enlightened guest support on hyperv | expand

Commit Message

Tianyu Lan Nov. 19, 2022, 3:46 a.m. UTC
From: Tianyu Lan <tiala@microsoft.com>

Vmbus int, synic and post message pages are shared with hypervisor
and so decrypt these pages in the sev-snp guest.

Signed-off-by: Tianyu Lan <tiala@microsoft.com>
---
 drivers/hv/connection.c | 13 +++++++++++++
 drivers/hv/hv.c         | 32 +++++++++++++++++++++++++++++++-
 2 files changed, 44 insertions(+), 1 deletion(-)

Comments

Michael Kelley (LINUX) Dec. 13, 2022, 6:08 p.m. UTC | #1
From: Tianyu Lan <ltykernel@gmail.com> Sent: Friday, November 18, 2022 7:46 PM
> 

The Subject prefix for this patch should be "Drivers: hv: vmbus:"

> Vmbus int, synic and post message pages are shared with hypervisor
> and so decrypt these pages in the sev-snp guest.
> 
> Signed-off-by: Tianyu Lan <tiala@microsoft.com>
> ---
>  drivers/hv/connection.c | 13 +++++++++++++
>  drivers/hv/hv.c         | 32 +++++++++++++++++++++++++++++++-
>  2 files changed, 44 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
> index 9dc27e5d367a..43141225ea15 100644
> --- a/drivers/hv/connection.c
> +++ b/drivers/hv/connection.c
> @@ -215,6 +215,15 @@ int vmbus_connect(void)
>  		(void *)((unsigned long)vmbus_connection.int_page +
>  			(HV_HYP_PAGE_SIZE >> 1));
> 
> +	if (hv_isolation_type_snp() || hv_isolation_type_en_snp()) {

This decryption should be done only for a fully enlightened SEV-SNP
guest, not for a vTOM guest.

> +		ret = set_memory_decrypted((unsigned long)
> +				vmbus_connection.int_page, 1);
> +		if (ret)
> +			goto cleanup;

This cleanup path doesn't work correctly.  It calls
vmbus_disconnect(), which will try to re-encrypt the memory.
But if the original decryption failed, re-encrypting is the wrong
thing to do.

It looks like this same bug exists in current code if the decryption
of the monitor pages fails or if just one of the original memory
allocations fails.  vmbus_disconnect() doesn't know whether it
should re-encrypt the pages.

> +
> +		memset(vmbus_connection.int_page, 0, PAGE_SIZE);
> +	}
> +
>  	/*
>  	 * Setup the monitor notification facility. The 1st page for
>  	 * parent->child and the 2nd page for child->parent
> @@ -372,6 +381,10 @@ void vmbus_disconnect(void)
>  		destroy_workqueue(vmbus_connection.work_queue);
> 
>  	if (vmbus_connection.int_page) {
> +		if (hv_isolation_type_en_snp())
> +			set_memory_encrypted((unsigned long)
> +				vmbus_connection.int_page, 1);
> +
>  		hv_free_hyperv_page((unsigned long)vmbus_connection.int_page);
>  		vmbus_connection.int_page = NULL;
>  	}
> diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
> index 4d6480d57546..f9111eb32739 100644
> --- a/drivers/hv/hv.c
> +++ b/drivers/hv/hv.c
> @@ -20,6 +20,7 @@
>  #include <linux/interrupt.h>
>  #include <clocksource/hyperv_timer.h>
>  #include <asm/mshyperv.h>
> +#include <linux/set_memory.h>
>  #include "hyperv_vmbus.h"
> 
>  /* The one and only */
> @@ -117,7 +118,7 @@ int hv_post_message(union hv_connection_id connection_id,
> 
>  int hv_synic_alloc(void)
>  {
> -	int cpu;
> +	int cpu, ret;
>  	struct hv_per_cpu_context *hv_cpu;
> 
>  	/*
> @@ -168,6 +169,29 @@ int hv_synic_alloc(void)
>  			pr_err("Unable to allocate post msg page\n");
>  			goto err;
>  		}
> +
> +		if (hv_isolation_type_en_snp()) {
> +			ret = set_memory_decrypted((unsigned long)
> +				hv_cpu->synic_message_page, 1);
> +			ret |= set_memory_decrypted((unsigned long)
> +				hv_cpu->synic_event_page, 1);
> +			ret |= set_memory_decrypted((unsigned long)
> +				hv_cpu->post_msg_page, 1);
> +
> +			if (ret) {
> +				set_memory_encrypted((unsigned long)
> +					hv_cpu->synic_message_page, 1);
> +				set_memory_encrypted((unsigned long)
> +					hv_cpu->synic_event_page, 1);
> +				set_memory_encrypted((unsigned long)
> +					hv_cpu->post_msg_page, 1);
> +				goto err;

Same kind of cleanup problem here.  Some of the memory may have
been decrypted, but some may not have.  Re-encrypting all three pages
risks re-encrypting a page that failed to be decrypted, and that might
cause problems.

> +			}
> +
> +			memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
> +			memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
> +			memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
> +		}
>  	}
> 
>  	return 0;
> @@ -188,6 +212,12 @@ void hv_synic_free(void)
>  		struct hv_per_cpu_context *hv_cpu
>  			= per_cpu_ptr(hv_context.cpu_context, cpu);
> 
> +		if (hv_isolation_type_en_snp()) {
> +			set_memory_encrypted((unsigned long)hv_cpu->synic_message_page, 1);
> +			set_memory_encrypted((unsigned long)hv_cpu->synic_event_page, 1);
> +			set_memory_encrypted((unsigned long)hv_cpu->post_msg_page, 1);

This cleanup doesn't always work correctly.  There are multiple memory
allocations in hv_synic_alloc().  If some succeeded, but some failed, then
might get here with some memory that was allocated but not decrypted.
Trying to re-encrypt that memory before freeing it could cause problems.

> +		}
> +
>  		free_page((unsigned long)hv_cpu->synic_event_page);
>  		free_page((unsigned long)hv_cpu->synic_message_page);
>  		free_page((unsigned long)hv_cpu->post_msg_page);
> --
> 2.25.1
Tianyu Lan Dec. 26, 2022, 4:19 a.m. UTC | #2
On 12/14/2022 2:08 AM, Michael Kelley (LINUX) wrote:
> From: Tianyu Lan <ltykernel@gmail.com> Sent: Friday, November 18, 2022 7:46 PM
>>
> 
> The Subject prefix for this patch should be "Drivers: hv: vmbus:"

Sure. Will update in the next version.

> 
>> Vmbus int, synic and post message pages are shared with hypervisor
>> and so decrypt these pages in the sev-snp guest.
>>
>> Signed-off-by: Tianyu Lan <tiala@microsoft.com>
>> ---
>>   drivers/hv/connection.c | 13 +++++++++++++
>>   drivers/hv/hv.c         | 32 +++++++++++++++++++++++++++++++-
>>   2 files changed, 44 insertions(+), 1 deletion(-)
>>
>> diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
>> index 9dc27e5d367a..43141225ea15 100644
>> --- a/drivers/hv/connection.c
>> +++ b/drivers/hv/connection.c
>> @@ -215,6 +215,15 @@ int vmbus_connect(void)
>>   		(void *)((unsigned long)vmbus_connection.int_page +
>>   			(HV_HYP_PAGE_SIZE >> 1));
>>
>> +	if (hv_isolation_type_snp() || hv_isolation_type_en_snp()) {
> 
> This decryption should be done only for a fully enlightened SEV-SNP
> guest, not for a vTOM guest.
> 
>> +		ret = set_memory_decrypted((unsigned long)
>> +				vmbus_connection.int_page, 1);
>> +		if (ret)
>> +			goto cleanup;
> 
> This cleanup path doesn't work correctly.  It calls
> vmbus_disconnect(), which will try to re-encrypt the memory.
> But if the original decryption failed, re-encrypting is the wrong
> thing to do.
> 
> It looks like this same bug exists in current code if the decryption
> of the monitor pages fails or if just one of the original memory
> allocations fails.  vmbus_disconnect() doesn't know whether it
> should re-encrypt the pages.

Agree. It's necessary to handle decryption failure case by case in stead 
of re-encryting all pages. Will fix this in the next version. Thanks to 
point out.
diff mbox series

Patch

diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 9dc27e5d367a..43141225ea15 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -215,6 +215,15 @@  int vmbus_connect(void)
 		(void *)((unsigned long)vmbus_connection.int_page +
 			(HV_HYP_PAGE_SIZE >> 1));
 
+	if (hv_isolation_type_snp() || hv_isolation_type_en_snp()) {
+		ret = set_memory_decrypted((unsigned long)
+				vmbus_connection.int_page, 1);
+		if (ret)
+			goto cleanup;
+
+		memset(vmbus_connection.int_page, 0, PAGE_SIZE);
+	}
+
 	/*
 	 * Setup the monitor notification facility. The 1st page for
 	 * parent->child and the 2nd page for child->parent
@@ -372,6 +381,10 @@  void vmbus_disconnect(void)
 		destroy_workqueue(vmbus_connection.work_queue);
 
 	if (vmbus_connection.int_page) {
+		if (hv_isolation_type_en_snp())
+			set_memory_encrypted((unsigned long)
+				vmbus_connection.int_page, 1);
+
 		hv_free_hyperv_page((unsigned long)vmbus_connection.int_page);
 		vmbus_connection.int_page = NULL;
 	}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 4d6480d57546..f9111eb32739 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -20,6 +20,7 @@ 
 #include <linux/interrupt.h>
 #include <clocksource/hyperv_timer.h>
 #include <asm/mshyperv.h>
+#include <linux/set_memory.h>
 #include "hyperv_vmbus.h"
 
 /* The one and only */
@@ -117,7 +118,7 @@  int hv_post_message(union hv_connection_id connection_id,
 
 int hv_synic_alloc(void)
 {
-	int cpu;
+	int cpu, ret;
 	struct hv_per_cpu_context *hv_cpu;
 
 	/*
@@ -168,6 +169,29 @@  int hv_synic_alloc(void)
 			pr_err("Unable to allocate post msg page\n");
 			goto err;
 		}
+
+		if (hv_isolation_type_en_snp()) {
+			ret = set_memory_decrypted((unsigned long)
+				hv_cpu->synic_message_page, 1);
+			ret |= set_memory_decrypted((unsigned long)
+				hv_cpu->synic_event_page, 1);
+			ret |= set_memory_decrypted((unsigned long)
+				hv_cpu->post_msg_page, 1);
+
+			if (ret) {
+				set_memory_encrypted((unsigned long)
+					hv_cpu->synic_message_page, 1);
+				set_memory_encrypted((unsigned long)
+					hv_cpu->synic_event_page, 1);
+				set_memory_encrypted((unsigned long)
+					hv_cpu->post_msg_page, 1);
+				goto err;
+			}
+
+			memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
+			memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
+			memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
+		}
 	}
 
 	return 0;
@@ -188,6 +212,12 @@  void hv_synic_free(void)
 		struct hv_per_cpu_context *hv_cpu
 			= per_cpu_ptr(hv_context.cpu_context, cpu);
 
+		if (hv_isolation_type_en_snp()) {
+			set_memory_encrypted((unsigned long)hv_cpu->synic_message_page, 1);
+			set_memory_encrypted((unsigned long)hv_cpu->synic_event_page, 1);
+			set_memory_encrypted((unsigned long)hv_cpu->post_msg_page, 1);
+		}
+
 		free_page((unsigned long)hv_cpu->synic_event_page);
 		free_page((unsigned long)hv_cpu->synic_message_page);
 		free_page((unsigned long)hv_cpu->post_msg_page);