diff mbox series

KVM: SVM: Invert user pointer casting in SEV {en,de}crypt helpers

Message ID 20210506231542.2331138-1-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: SVM: Invert user pointer casting in SEV {en,de}crypt helpers | expand

Commit Message

Sean Christopherson May 6, 2021, 11:15 p.m. UTC
Invert the user pointer params for SEV's helpers for encrypting and
decrypting guest memory so that they take a pointer and cast to an
unsigned long as necessary, as opposed to doing the opposite.  Tagging a
non-pointer as __user is confusing and weird since a cast of some form
needs to occur to actually access the user data.  This also fixes Sparse
warnings triggered by directly consuming the unsigned longs, which are
"noderef" due to the __user tag.

Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Ashish Kalra <ashish.kalra@amd.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/svm/sev.c | 24 +++++++++++-------------
 1 file changed, 11 insertions(+), 13 deletions(-)

Comments

Paolo Bonzini May 7, 2021, 7:40 a.m. UTC | #1
On 07/05/21 01:15, Sean Christopherson wrote:
> Invert the user pointer params for SEV's helpers for encrypting and
> decrypting guest memory so that they take a pointer and cast to an
> unsigned long as necessary, as opposed to doing the opposite.  Tagging a
> non-pointer as __user is confusing and weird since a cast of some form
> needs to occur to actually access the user data.  This also fixes Sparse
> warnings triggered by directly consuming the unsigned longs, which are
> "noderef" due to the __user tag.
> 
> Cc: Brijesh Singh <brijesh.singh@amd.com>
> Cc: Tom Lendacky <thomas.lendacky@amd.com>
> Cc: Ashish Kalra <ashish.kalra@amd.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>   arch/x86/kvm/svm/sev.c | 24 +++++++++++-------------
>   1 file changed, 11 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index a9d8d6aafdb8..bba4544fbaba 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -763,7 +763,7 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
>   }
>   
>   static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
> -				  unsigned long __user dst_uaddr,
> +				  void __user *dst_uaddr,
>   				  unsigned long dst_paddr,
>   				  int size, int *err)
>   {
> @@ -787,8 +787,7 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
>   
>   	if (tpage) {
>   		offset = paddr & 15;
> -		if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
> -				 page_address(tpage) + offset, size))
> +		if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
>   			ret = -EFAULT;
>   	}
>   
> @@ -800,9 +799,9 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
>   }
>   
>   static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
> -				  unsigned long __user vaddr,
> +				  void __user *vaddr,
>   				  unsigned long dst_paddr,
> -				  unsigned long __user dst_vaddr,
> +				  void __user *dst_vaddr,
>   				  int size, int *error)
>   {
>   	struct page *src_tpage = NULL;
> @@ -810,13 +809,12 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
>   	int ret, len = size;
>   
>   	/* If source buffer is not aligned then use an intermediate buffer */
> -	if (!IS_ALIGNED(vaddr, 16)) {
> +	if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
>   		src_tpage = alloc_page(GFP_KERNEL);
>   		if (!src_tpage)
>   			return -ENOMEM;
>   
> -		if (copy_from_user(page_address(src_tpage),
> -				(void __user *)(uintptr_t)vaddr, size)) {
> +		if (copy_from_user(page_address(src_tpage), vaddr, size)) {
>   			__free_page(src_tpage);
>   			return -EFAULT;
>   		}
> @@ -830,7 +828,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
>   	 *   - copy the source buffer in an intermediate buffer
>   	 *   - use the intermediate buffer as source buffer
>   	 */
> -	if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
> +	if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
>   		int dst_offset;
>   
>   		dst_tpage = alloc_page(GFP_KERNEL);
> @@ -855,7 +853,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
>   			       page_address(src_tpage), size);
>   		else {
>   			if (copy_from_user(page_address(dst_tpage) + dst_offset,
> -					   (void __user *)(uintptr_t)vaddr, size)) {
> +					   vaddr, size)) {
>   				ret = -EFAULT;
>   				goto e_free;
>   			}
> @@ -935,15 +933,15 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
>   		if (dec)
>   			ret = __sev_dbg_decrypt_user(kvm,
>   						     __sme_page_pa(src_p[0]) + s_off,
> -						     dst_vaddr,
> +						     (void __user *)dst_vaddr,
>   						     __sme_page_pa(dst_p[0]) + d_off,
>   						     len, &argp->error);
>   		else
>   			ret = __sev_dbg_encrypt_user(kvm,
>   						     __sme_page_pa(src_p[0]) + s_off,
> -						     vaddr,
> +						     (void __user *)vaddr,
>   						     __sme_page_pa(dst_p[0]) + d_off,
> -						     dst_vaddr,
> +						     (void __user *)dst_vaddr,
>   						     len, &argp->error);
>   
>   		sev_unpin_memory(kvm, src_p, n);
> 

Queued, thnaks.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index a9d8d6aafdb8..bba4544fbaba 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -763,7 +763,7 @@  static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
 }
 
 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
-				  unsigned long __user dst_uaddr,
+				  void __user *dst_uaddr,
 				  unsigned long dst_paddr,
 				  int size, int *err)
 {
@@ -787,8 +787,7 @@  static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
 
 	if (tpage) {
 		offset = paddr & 15;
-		if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
-				 page_address(tpage) + offset, size))
+		if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
 			ret = -EFAULT;
 	}
 
@@ -800,9 +799,9 @@  static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
 }
 
 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
-				  unsigned long __user vaddr,
+				  void __user *vaddr,
 				  unsigned long dst_paddr,
-				  unsigned long __user dst_vaddr,
+				  void __user *dst_vaddr,
 				  int size, int *error)
 {
 	struct page *src_tpage = NULL;
@@ -810,13 +809,12 @@  static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
 	int ret, len = size;
 
 	/* If source buffer is not aligned then use an intermediate buffer */
-	if (!IS_ALIGNED(vaddr, 16)) {
+	if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
 		src_tpage = alloc_page(GFP_KERNEL);
 		if (!src_tpage)
 			return -ENOMEM;
 
-		if (copy_from_user(page_address(src_tpage),
-				(void __user *)(uintptr_t)vaddr, size)) {
+		if (copy_from_user(page_address(src_tpage), vaddr, size)) {
 			__free_page(src_tpage);
 			return -EFAULT;
 		}
@@ -830,7 +828,7 @@  static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
 	 *   - copy the source buffer in an intermediate buffer
 	 *   - use the intermediate buffer as source buffer
 	 */
-	if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
+	if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
 		int dst_offset;
 
 		dst_tpage = alloc_page(GFP_KERNEL);
@@ -855,7 +853,7 @@  static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
 			       page_address(src_tpage), size);
 		else {
 			if (copy_from_user(page_address(dst_tpage) + dst_offset,
-					   (void __user *)(uintptr_t)vaddr, size)) {
+					   vaddr, size)) {
 				ret = -EFAULT;
 				goto e_free;
 			}
@@ -935,15 +933,15 @@  static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
 		if (dec)
 			ret = __sev_dbg_decrypt_user(kvm,
 						     __sme_page_pa(src_p[0]) + s_off,
-						     dst_vaddr,
+						     (void __user *)dst_vaddr,
 						     __sme_page_pa(dst_p[0]) + d_off,
 						     len, &argp->error);
 		else
 			ret = __sev_dbg_encrypt_user(kvm,
 						     __sme_page_pa(src_p[0]) + s_off,
-						     vaddr,
+						     (void __user *)vaddr,
 						     __sme_page_pa(dst_p[0]) + d_off,
-						     dst_vaddr,
+						     (void __user *)dst_vaddr,
 						     len, &argp->error);
 
 		sev_unpin_memory(kvm, src_p, n);