diff mbox series

[v10,11/16] KVM: x86: Introduce KVM_SET_SHARED_PAGES_LIST ioctl

Message ID 89a3e3218f3b08be562f68a9c0d736030fff9b1b.1612398155.git.ashish.kalra@amd.com (mailing list archive)
State New, archived
Headers show
Series Add AMD SEV guest live migration support | expand

Commit Message

Kalra, Ashish Feb. 4, 2021, 12:39 a.m. UTC
From: Brijesh Singh <brijesh.singh@amd.com>

The ioctl is used to setup the shared pages list for an
incoming guest.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: x86@kernel.org
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Co-developed-by: Ashish Kalra <ashish.kalra@amd.com>
Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
---
 Documentation/virt/kvm/api.rst  | 20 +++++++++-
 arch/x86/include/asm/kvm_host.h |  2 +
 arch/x86/kvm/svm/sev.c          | 70 +++++++++++++++++++++++++++++++++
 arch/x86/kvm/svm/svm.c          |  1 +
 arch/x86/kvm/svm/svm.h          |  1 +
 arch/x86/kvm/x86.c              | 12 ++++++
 include/uapi/linux/kvm.h        |  1 +
 7 files changed, 106 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 59ef537c0cdd..efb4720733b4 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -4701,6 +4701,25 @@  This list can be used during the guest migration. If the page
 is private then the userspace need to use SEV migration commands to transmit
 the page.
 
+4.126 KVM_SET_SHARED_PAGES_LIST (vm ioctl)
+---------------------------------------
+
+:Capability: basic
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_shared_pages_list (in/out)
+:Returns: 0 on success, -1 on error
+
+/* for KVM_SET_SHARED_PAGES_LIST */
+struct kvm_shared_pages_list {
+	int __user *pnents;
+	void __user *buffer;
+	__u32 size;
+};
+
+During the guest live migration the outgoing guest exports its unencrypted
+memory regions list, the KVM_SET_SHARED_PAGES_LIST can be used to build the
+shared/unencrypted regions list for an incoming guest.
 
 4.125 KVM_S390_PV_COMMAND
 -------------------------
@@ -4855,7 +4874,6 @@  into user space.
 If a vCPU is in running state while this ioctl is invoked, the vCPU may
 experience inconsistent filtering behavior on MSR accesses.
 
-
 5. The kvm_run structure
 ========================
 
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cd354d830e13..f05b812b69bd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1305,6 +1305,8 @@  struct kvm_x86_ops {
 				  unsigned long sz, unsigned long mode);
 	int (*get_shared_pages_list)(struct kvm *kvm,
 				     struct kvm_shared_pages_list *list);
+	int (*set_shared_pages_list)(struct kvm *kvm,
+				     struct kvm_shared_pages_list *list);
 };
 
 struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 701d74c8b15b..b0d324aed515 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1671,6 +1671,76 @@  int svm_get_shared_pages_list(struct kvm *kvm,
 	return ret;
 }
 
+int svm_set_shared_pages_list(struct kvm *kvm,
+			      struct kvm_shared_pages_list *list)
+{
+	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+	struct shared_region_array_entry *array;
+	struct shared_region *shrd_region;
+	int ret, nents, i;
+	unsigned long sz;
+
+	if (!sev_guest(kvm))
+		return -ENOTTY;
+
+	if (get_user(nents, list->pnents))
+		return -EFAULT;
+
+	/* special case of resetting the shared pages list */
+	if (!list->buffer || !nents) {
+		struct shared_region *pos;
+
+		mutex_lock(&kvm->lock);
+		list_for_each_entry(pos, &sev->shared_pages_list, list)
+			kfree(pos);
+		sev->shared_pages_list_count = 0;
+		mutex_unlock(&kvm->lock);
+
+		return 0;
+	}
+
+	sz = nents * sizeof(struct shared_region_array_entry);
+	array = kmalloc(sz, GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	ret = -EFAULT;
+	if (copy_from_user(array, list->buffer, sz))
+		goto out;
+
+	ret = 0;
+	mutex_lock(&kvm->lock);
+	for (i = 0; i < nents; i++) {
+		shrd_region = kzalloc(sizeof(*shrd_region), GFP_KERNEL_ACCOUNT);
+		if (!shrd_region) {
+			struct shared_region *pos;
+
+			/* Freeing previously allocated entries */
+			list_for_each_entry(pos,
+					    &sev->shared_pages_list,
+					    list) {
+				kfree(pos);
+			}
+
+			mutex_unlock(&kvm->lock);
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		shrd_region->gfn_start = array[i].gfn_start;
+		shrd_region->gfn_end = array[i].gfn_end;
+		list_add_tail(&shrd_region->list,
+			      &sev->shared_pages_list);
+	}
+	sev->shared_pages_list_count = nents;
+	mutex_unlock(&kvm->lock);
+
+out:
+	kfree(array);
+
+	return ret;
+}
+
 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
 {
 	struct kvm_sev_cmd sev_cmd;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 533ce47ff158..58f89f83caab 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4539,6 +4539,7 @@  static struct kvm_x86_ops svm_x86_ops __initdata = {
 
 	.page_enc_status_hc = svm_page_enc_status_hc,
 	.get_shared_pages_list = svm_get_shared_pages_list,
+	.set_shared_pages_list = svm_set_shared_pages_list,
 };
 
 static struct kvm_x86_init_ops svm_init_ops __initdata = {
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 6a777c61373c..066ca2a9f1e6 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -478,6 +478,7 @@  void sync_nested_vmcb_control(struct vcpu_svm *svm);
 int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
 			   unsigned long npages, unsigned long enc);
 int svm_get_shared_pages_list(struct kvm *kvm, struct kvm_shared_pages_list *list);
+int svm_set_shared_pages_list(struct kvm *kvm, struct kvm_shared_pages_list *list);
 
 extern struct kvm_x86_nested_ops svm_nested_ops;
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index acfec2ae1402..c119715c1034 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5731,6 +5731,18 @@  long kvm_arch_vm_ioctl(struct file *filp,
 			r = kvm_x86_ops.get_shared_pages_list(kvm, &list);
 		break;
 	}
+	case KVM_SET_SHARED_PAGES_LIST: {
+		struct kvm_shared_pages_list list;
+
+		r = -EFAULT;
+		if (copy_from_user(&list, argp, sizeof(list)))
+			goto out;
+
+		r = -ENOTTY;
+		if (kvm_x86_ops.set_shared_pages_list)
+			r = kvm_x86_ops.set_shared_pages_list(kvm, &list);
+		break;
+	}
 	default:
 		r = -ENOTTY;
 	}
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 0529ba80498a..f704b08c97f2 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1573,6 +1573,7 @@  struct kvm_pv_cmd {
 #define KVM_RESET_DIRTY_RINGS		_IO(KVMIO, 0xc7)
 
 #define KVM_GET_SHARED_PAGES_LIST	_IOW(KVMIO, 0xc8, struct kvm_shared_pages_list)
+#define KVM_SET_SHARED_PAGES_LIST	_IOW(KVMIO, 0xc9, struct kvm_shared_pages_list)
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {