@@ -5684,6 +5684,101 @@ err_1:
return ret;
}
+static int sev_dbg_encrypt(struct kvm *kvm,
+ struct kvm_sev_dbg_encrypt __user *argp,
+ int *psp_ret)
+{
+ void *data;
+ int len, ret, d_off;
+ struct page **inpages;
+ struct psp_data_dbg *encrypt;
+ struct kvm_sev_dbg_encrypt debug;
+ unsigned long src_addr, dst_addr;
+
+ if (!kvm_sev_guest())
+ return -ENOTTY;
+
+ if (copy_from_user(&debug, argp, sizeof(*argp)))
+ return -EFAULT;
+
+ if (debug.length > PAGE_SIZE)
+ return -EINVAL;
+
+ len = debug.length;
+ src_addr = debug.src_addr;
+ dst_addr = debug.dst_addr;
+
+ inpages = kzalloc(1 * sizeof(struct page *), GFP_KERNEL);
+ if (!inpages)
+ return -ENOMEM;
+
+ /* pin the guest destination virtual address */
+ down_read(¤t->mm->mmap_sem);
+ ret = get_user_pages(dst_addr, 1, 1, 0, inpages, NULL);
+ up_read(¤t->mm->mmap_sem);
+ if (ret < 0)
+ goto err_1;
+
+ encrypt = kzalloc(sizeof(*encrypt), GFP_KERNEL);
+ if (!encrypt)
+ goto err_2;
+
+ data = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!data)
+ goto err_3;
+
+ encrypt->hdr.buffer_len = sizeof(*encrypt);
+ encrypt->handle = kvm_sev_handle();
+
+ if ((len & 15) || (dst_addr & 15)) {
+ /* if destination address and length are not 16-byte
+ * aligned then:
+ * a) decrypt destination page into temporary buffer
+ * b) copy source data into temporary buffer at correct offset
+ * c) encrypt temporary buffer
+ */
+ ret = __sev_dbg_decrypt_page(kvm, dst_addr, data, psp_ret);
+ if (ret)
+ goto err_4;
+
+ d_off = dst_addr & (PAGE_SIZE - 1);
+ ret = -EFAULT;
+ if (copy_from_user(data + d_off,
+ (uint8_t *)debug.src_addr, len))
+ goto err_4;
+
+ encrypt->length = PAGE_SIZE;
+ encrypt->src_addr = __pa(data) | sme_me_mask;
+ encrypt->dst_addr = __sev_page_pa(inpages[0]);
+ } else {
+ if (copy_from_user(data, (uint8_t *)debug.src_addr, len))
+ goto err_4;
+
+ d_off = dst_addr & (PAGE_SIZE - 1);
+ encrypt->length = len;
+ encrypt->src_addr = __pa(data) | sme_me_mask;
+ encrypt->dst_addr = __sev_page_pa(inpages[0]);
+ encrypt->dst_addr += d_off;
+ }
+
+ ret = psp_dbg_encrypt(encrypt, psp_ret);
+ if (ret)
+ printk(KERN_ERR "SEV: DEBUG_ENCRYPT: [%#lx=>%#lx+%#x] "
+ "%d (%#010x)\n",src_addr, dst_addr, len,
+ ret, *psp_ret);
+
+err_4:
+ free_page((unsigned long)data);
+err_3:
+ kfree(encrypt);
+err_2:
+ release_pages(inpages, 1, 0);
+err_1:
+ kfree(inpages);
+
+ return ret;
+}
+
static int amd_sev_issue_cmd(struct kvm *kvm,
struct kvm_sev_issue_cmd __user *user_data)
{
@@ -5719,6 +5814,11 @@ static int amd_sev_issue_cmd(struct kvm *kvm,
&arg.ret_code);
break;
}
+ case KVM_SEV_DBG_ENCRYPT: {
+ r = sev_dbg_encrypt(kvm, (void *)arg.opaque,
+ &arg.ret_code);
+ break;
+ }
default:
break;
}
The command encrypts a region of guest memory for debugging purposes. For more information see [1], section 7.2 [1] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Spec.pdf Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> --- arch/x86/kvm/svm.c | 100 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) -- To unsubscribe from this list: send the line "unsubscribe linux-crypto" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html