diff mbox series

[RFC,V4,06/17] x86/hyperv: decrypt VMBus pages for sev-snp enlightened guest

Message ID 20230403174406.4180472-7-ltykernel@gmail.com (mailing list archive)
State New, archived
Headers show
Series x86/hyperv/sev: Add AMD sev-snp enlightened guest support on hyperv | expand

Commit Message

Tianyu Lan April 3, 2023, 5:43 p.m. UTC
From: Tianyu Lan <tiala@microsoft.com>

VMBus post msg, synic event and message pages are shared
with hypervisor and so decrypt these pages in the sev-snp guest.

Signed-off-by: Tianyu Lan <tiala@microsoft.com>
---
Change since RFC V3:
       * Set encrypt page back in the hv_synic_free()

Change since RFC V2:
       * Fix error in the error code path and encrypt
       	 pages correctly when decryption failure happens.
---
 drivers/hv/hv.c | 42 +++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 41 insertions(+), 1 deletion(-)

Comments

Michael Kelley (LINUX) April 12, 2023, 2:32 p.m. UTC | #1
From: Tianyu Lan <ltykernel@gmail.com> Sent: Monday, April 3, 2023 10:44 AM
> 

The Subject prefix for this patch is still wrong.  I previously commented on
this. :-(   It should be Drivers: hv: vmbus: 


> VMBus post msg, synic event and message pages are shared
> with hypervisor and so decrypt these pages in the sev-snp guest.
> 
> Signed-off-by: Tianyu Lan <tiala@microsoft.com>
> ---
> Change since RFC V3:
>        * Set encrypt page back in the hv_synic_free()
> 
> Change since RFC V2:
>        * Fix error in the error code path and encrypt
>        	 pages correctly when decryption failure happens.
> ---
>  drivers/hv/hv.c | 42 +++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 41 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
> index 008234894d28..e09cea8f2f04 100644
> --- a/drivers/hv/hv.c
> +++ b/drivers/hv/hv.c
> @@ -20,6 +20,7 @@
>  #include <linux/interrupt.h>
>  #include <clocksource/hyperv_timer.h>
>  #include <asm/mshyperv.h>
> +#include <linux/set_memory.h>
>  #include "hyperv_vmbus.h"
> 
>  /* The one and only */
> @@ -117,7 +118,7 @@ int hv_post_message(union hv_connection_id connection_id,
> 
>  int hv_synic_alloc(void)
>  {
> -	int cpu;
> +	int cpu, ret;
>  	struct hv_per_cpu_context *hv_cpu;
> 
>  	/*
> @@ -168,9 +169,39 @@ int hv_synic_alloc(void)
>  			pr_err("Unable to allocate post msg page\n");
>  			goto err;
>  		}
> +
> +		if (hv_isolation_type_en_snp()) {
> +			ret = set_memory_decrypted((unsigned long)
> +				hv_cpu->synic_message_page, 1);
> +			if (ret)
> +				goto err;
> +
> +			ret = set_memory_decrypted((unsigned long)
> +				hv_cpu->synic_event_page, 1);
> +			if (ret)
> +				goto err_decrypt_event_page;
> +
> +			ret = set_memory_decrypted((unsigned long)
> +				hv_cpu->post_msg_page, 1);
> +			if (ret)
> +				goto err_decrypt_msg_page;
> +
> +			memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
> +			memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
> +			memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
> +		}
>  	}
> 
>  	return 0;
> +
> +err_decrypt_msg_page:
> +	set_memory_encrypted((unsigned long)
> +		hv_cpu->synic_event_page, 1);
> +
> +err_decrypt_event_page:
> +	set_memory_encrypted((unsigned long)
> +		hv_cpu->synic_message_page, 1);
> +
>  err:
>  	/*
>  	 * Any memory allocations that succeeded will be freed when
> @@ -191,6 +222,15 @@ void hv_synic_free(void)
>  		free_page((unsigned long)hv_cpu->synic_event_page);
>  		free_page((unsigned long)hv_cpu->synic_message_page);
>  		free_page((unsigned long)hv_cpu->post_msg_page);
> +
> +		if (hv_isolation_type_en_snp()) {
> +			set_memory_encrypted((unsigned long)
> +				hv_cpu->synic_message_page, 1);
> +			set_memory_encrypted((unsigned long)
> +				hv_cpu->synic_event_page, 1);
> +			set_memory_encrypted((unsigned long)
> +				hv_cpu->post_msg_page, 1);
> +		}

The re-encryption must be done *before* pages are freed!

Furthermore, if the re-encryption fails, we should not free
the page as it would pollute the free memory pool.  The best
we can do is leak the memory.  See Patch 5 in Dexuan's
TDX series, which does the same thing (but still doesn't
get it quite right, per my comments).

Michael

>  	}
> 
>  	kfree(hv_context.hv_numa_map);
> --
> 2.25.1
Tianyu Lan April 14, 2023, 4:40 a.m. UTC | #2
On 4/12/2023 10:32 PM, Michael Kelley (LINUX) wrote:
>> @@ -191,6 +222,15 @@ void hv_synic_free(void)
>>   		free_page((unsigned long)hv_cpu->synic_event_page);
>>   		free_page((unsigned long)hv_cpu->synic_message_page);
>>   		free_page((unsigned long)hv_cpu->post_msg_page);
>> +
>> +		if (hv_isolation_type_en_snp()) {
>> +			set_memory_encrypted((unsigned long)
>> +				hv_cpu->synic_message_page, 1);
>> +			set_memory_encrypted((unsigned long)
>> +				hv_cpu->synic_event_page, 1);
>> +			set_memory_encrypted((unsigned long)
>> +				hv_cpu->post_msg_page, 1);
>> +		}
> The re-encryption must be done*before*  pages are freed!
> 
> Furthermore, if the re-encryption fails, we should not free
> the page as it would pollute the free memory pool.  The best
> we can do is leak the memory.  See Patch 5 in Dexuan's
> TDX series, which does the same thing (but still doesn't
> get it quite right, per my comments).
> 

You are right. The order is wrong. we should figure out a right solution 
to handle such case.
diff mbox series

Patch

diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 008234894d28..e09cea8f2f04 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -20,6 +20,7 @@ 
 #include <linux/interrupt.h>
 #include <clocksource/hyperv_timer.h>
 #include <asm/mshyperv.h>
+#include <linux/set_memory.h>
 #include "hyperv_vmbus.h"
 
 /* The one and only */
@@ -117,7 +118,7 @@  int hv_post_message(union hv_connection_id connection_id,
 
 int hv_synic_alloc(void)
 {
-	int cpu;
+	int cpu, ret;
 	struct hv_per_cpu_context *hv_cpu;
 
 	/*
@@ -168,9 +169,39 @@  int hv_synic_alloc(void)
 			pr_err("Unable to allocate post msg page\n");
 			goto err;
 		}
+
+		if (hv_isolation_type_en_snp()) {
+			ret = set_memory_decrypted((unsigned long)
+				hv_cpu->synic_message_page, 1);
+			if (ret)
+				goto err;
+
+			ret = set_memory_decrypted((unsigned long)
+				hv_cpu->synic_event_page, 1);
+			if (ret)
+				goto err_decrypt_event_page;
+
+			ret = set_memory_decrypted((unsigned long)
+				hv_cpu->post_msg_page, 1);
+			if (ret)
+				goto err_decrypt_msg_page;
+
+			memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
+			memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
+			memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
+		}
 	}
 
 	return 0;
+
+err_decrypt_msg_page:
+	set_memory_encrypted((unsigned long)
+		hv_cpu->synic_event_page, 1);
+
+err_decrypt_event_page:
+	set_memory_encrypted((unsigned long)
+		hv_cpu->synic_message_page, 1);
+
 err:
 	/*
 	 * Any memory allocations that succeeded will be freed when
@@ -191,6 +222,15 @@  void hv_synic_free(void)
 		free_page((unsigned long)hv_cpu->synic_event_page);
 		free_page((unsigned long)hv_cpu->synic_message_page);
 		free_page((unsigned long)hv_cpu->post_msg_page);
+
+		if (hv_isolation_type_en_snp()) {
+			set_memory_encrypted((unsigned long)
+				hv_cpu->synic_message_page, 1);
+			set_memory_encrypted((unsigned long)
+				hv_cpu->synic_event_page, 1);
+			set_memory_encrypted((unsigned long)
+				hv_cpu->post_msg_page, 1);
+		}
 	}
 
 	kfree(hv_context.hv_numa_map);