Message ID | a6a3826234bebe908290bb3f6de35356f44acbb9.1612398155.git.ashish.kalra@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add AMD SEV guest live migration support | expand |
Hi Ashish, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on v5.11-rc6] [also build test WARNING on next-20210125] [cannot apply to kvm/linux-next tip/x86/mm tip/x86/core] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Ashish-Kalra/KVM-SVM-Add-KVM_SEV-SEND_START-command/20210204-093647 base: 1048ba83fb1c00cd24172e23e8263972f6b5d9ac config: i386-randconfig-r015-20210209 (attached as .config) compiler: gcc-9 (Debian 9.3.0-15) 9.3.0 reproduce (this is a W=1 build): # https://github.com/0day-ci/linux/commit/c086cd5491c6b84adcffedc1bf798df758b18080 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Ashish-Kalra/KVM-SVM-Add-KVM_SEV-SEND_START-command/20210204-093647 git checkout c086cd5491c6b84adcffedc1bf798df758b18080 # save the attached .config to linux build tree make W=1 ARCH=i386 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All warnings (new ones prefixed by >>): arch/x86/kvm/svm/sev.c: In function 'handle_unencrypted_region': >> arch/x86/kvm/svm/sev.c:945:18: warning: variable 'd_off' set but not used [-Wunused-but-set-variable] 945 | int len, s_off, d_off; | ^~~~~ vim +/d_off +945 arch/x86/kvm/svm/sev.c 934 935 static int handle_unencrypted_region(struct kvm *kvm, 936 unsigned long vaddr, 937 unsigned long vaddr_end, 938 unsigned long dst_vaddr, 939 unsigned int size, 940 bool *is_decrypted) 941 { 942 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 943 struct page *page = NULL; 944 gfn_t gfn_start, gfn_end; > 945 int len, s_off, d_off; 946 int srcu_idx; 947 int ret = 0; 948 949 /* ensure hva_to_gfn translations remain valid */ 950 srcu_idx = srcu_read_lock(&kvm->srcu); 951 952 if (!hva_to_gfn(kvm, vaddr, &gfn_start)) { 953 srcu_read_unlock(&kvm->srcu, srcu_idx); 954 return -EINVAL; 955 } 956 957 if (!hva_to_gfn(kvm, vaddr_end, &gfn_end)) { 958 srcu_read_unlock(&kvm->srcu, srcu_idx); 959 return -EINVAL; 960 } 961 962 if (sev->shared_pages_list_count) { 963 if (is_unencrypted_region(gfn_start, gfn_end, 964 &sev->shared_pages_list)) { 965 page = alloc_page(GFP_KERNEL); 966 if (!page) { 967 srcu_read_unlock(&kvm->srcu, srcu_idx); 968 return -ENOMEM; 969 } 970 971 /* 972 * Since user buffer may not be page aligned, calculate the 973 * offset within the page. 974 */ 975 s_off = vaddr & ~PAGE_MASK; 976 d_off = dst_vaddr & ~PAGE_MASK; 977 len = min_t(size_t, (PAGE_SIZE - s_off), size); 978 979 if (copy_from_user(page_address(page), 980 (void __user *)(uintptr_t)vaddr, len)) { 981 __free_page(page); 982 srcu_read_unlock(&kvm->srcu, srcu_idx); 983 return -EFAULT; 984 } 985 986 if (copy_to_user((void __user *)(uintptr_t)dst_vaddr, 987 page_address(page), len)) { 988 ret = -EFAULT; 989 } 990 991 __free_page(page); 992 srcu_read_unlock(&kvm->srcu, srcu_idx); 993 *is_decrypted = true; 994 return ret; 995 } 996 } 997 srcu_read_unlock(&kvm->srcu, srcu_idx); 998 *is_decrypted = false; 999 return ret; 1000 } 1001 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 93f42b3d3e33..fa3fbbb73b33 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -888,6 +888,117 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, return ret; } +static struct kvm_memory_slot *hva_to_memslot(struct kvm *kvm, + unsigned long hva) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + + kvm_for_each_memslot(memslot, slots) { + if (hva >= memslot->userspace_addr && + hva < memslot->userspace_addr + + (memslot->npages << PAGE_SHIFT)) + return memslot; + } + + return NULL; +} + +static bool hva_to_gfn(struct kvm *kvm, unsigned long hva, gfn_t *gfn) +{ + struct kvm_memory_slot *memslot; + gpa_t gpa_offset; + + memslot = hva_to_memslot(kvm, hva); + if (!memslot) + return false; + + gpa_offset = hva - memslot->userspace_addr; + *gfn = ((memslot->base_gfn << PAGE_SHIFT) + gpa_offset) >> PAGE_SHIFT; + + return true; +} + +static bool is_unencrypted_region(gfn_t gfn_start, gfn_t gfn_end, + struct list_head *head) +{ + struct shared_region *pos; + + list_for_each_entry(pos, head, list) + if (gfn_start >= pos->gfn_start && + gfn_end <= pos->gfn_end) + return true; + + return false; +} + +static int handle_unencrypted_region(struct kvm *kvm, + unsigned long vaddr, + unsigned long vaddr_end, + unsigned long dst_vaddr, + unsigned int size, + bool *is_decrypted) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct page *page = NULL; + gfn_t gfn_start, gfn_end; + int len, s_off, d_off; + int srcu_idx; + int ret = 0; + + /* ensure hva_to_gfn translations remain valid */ + srcu_idx = srcu_read_lock(&kvm->srcu); + + if (!hva_to_gfn(kvm, vaddr, &gfn_start)) { + srcu_read_unlock(&kvm->srcu, srcu_idx); + return -EINVAL; + } + + if (!hva_to_gfn(kvm, vaddr_end, &gfn_end)) { + srcu_read_unlock(&kvm->srcu, srcu_idx); + return -EINVAL; + } + + if (sev->shared_pages_list_count) { + if (is_unencrypted_region(gfn_start, gfn_end, + &sev->shared_pages_list)) { + page = alloc_page(GFP_KERNEL); + if (!page) { + srcu_read_unlock(&kvm->srcu, srcu_idx); + return -ENOMEM; + } + + /* + * Since user buffer may not be page aligned, calculate the + * offset within the page. + */ + s_off = vaddr & ~PAGE_MASK; + d_off = dst_vaddr & ~PAGE_MASK; + len = min_t(size_t, (PAGE_SIZE - s_off), size); + + if (copy_from_user(page_address(page), + (void __user *)(uintptr_t)vaddr, len)) { + __free_page(page); + srcu_read_unlock(&kvm->srcu, srcu_idx); + return -EFAULT; + } + + if (copy_to_user((void __user *)(uintptr_t)dst_vaddr, + page_address(page), len)) { + ret = -EFAULT; + } + + __free_page(page); + srcu_read_unlock(&kvm->srcu, srcu_idx); + *is_decrypted = true; + return ret; + } + } + srcu_read_unlock(&kvm->srcu, srcu_idx); + *is_decrypted = false; + return ret; +} + static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) { unsigned long vaddr, vaddr_end, next_vaddr; @@ -917,6 +1028,20 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) for (; vaddr < vaddr_end; vaddr = next_vaddr) { int len, s_off, d_off; + if (dec) { + bool is_already_decrypted; + + ret = handle_unencrypted_region(kvm, + vaddr, + vaddr_end, + dst_vaddr, + size, + &is_already_decrypted); + + if (ret || is_already_decrypted) + goto already_decrypted; + } + /* lock userspace source and destination page */ src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0); if (IS_ERR(src_p)) @@ -961,6 +1086,7 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) sev_unpin_memory(kvm, src_p, n); sev_unpin_memory(kvm, dst_p, n); +already_decrypted: if (ret) goto err;