@@ -4701,6 +4701,25 @@ This list can be used during the guest migration. If the page
is private then the userspace need to use SEV migration commands to transmit
the page.
+4.126 KVM_SET_SHARED_PAGES_LIST (vm ioctl)
+---------------------------------------
+
+:Capability: basic
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_shared_pages_list (in/out)
+:Returns: 0 on success, -1 on error
+
+/* for KVM_SET_SHARED_PAGES_LIST */
+struct kvm_shared_pages_list {
+ int __user *pnents;
+ void __user *buffer;
+ __u32 size;
+};
+
+During the guest live migration the outgoing guest exports its unencrypted
+memory regions list, the KVM_SET_SHARED_PAGES_LIST can be used to build the
+shared/unencrypted regions list for an incoming guest.
4.125 KVM_S390_PV_COMMAND
-------------------------
@@ -4855,7 +4874,6 @@ into user space.
If a vCPU is in running state while this ioctl is invoked, the vCPU may
experience inconsistent filtering behavior on MSR accesses.
-
5. The kvm_run structure
========================
@@ -1305,6 +1305,8 @@ struct kvm_x86_ops {
unsigned long sz, unsigned long mode);
int (*get_shared_pages_list)(struct kvm *kvm,
struct kvm_shared_pages_list *list);
+ int (*set_shared_pages_list)(struct kvm *kvm,
+ struct kvm_shared_pages_list *list);
};
struct kvm_x86_nested_ops {
@@ -1671,6 +1671,76 @@ int svm_get_shared_pages_list(struct kvm *kvm,
return ret;
}
+int svm_set_shared_pages_list(struct kvm *kvm,
+ struct kvm_shared_pages_list *list)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct shared_region_array_entry *array;
+ struct shared_region *shrd_region;
+ int ret, nents, i;
+ unsigned long sz;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (get_user(nents, list->pnents))
+ return -EFAULT;
+
+ /* special case of resetting the shared pages list */
+ if (!list->buffer || !nents) {
+ struct shared_region *pos;
+
+ mutex_lock(&kvm->lock);
+ list_for_each_entry(pos, &sev->shared_pages_list, list)
+ kfree(pos);
+ sev->shared_pages_list_count = 0;
+ mutex_unlock(&kvm->lock);
+
+ return 0;
+ }
+
+ sz = nents * sizeof(struct shared_region_array_entry);
+ array = kmalloc(sz, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(array, list->buffer, sz))
+ goto out;
+
+ ret = 0;
+ mutex_lock(&kvm->lock);
+ for (i = 0; i < nents; i++) {
+ shrd_region = kzalloc(sizeof(*shrd_region), GFP_KERNEL_ACCOUNT);
+ if (!shrd_region) {
+ struct shared_region *pos;
+
+ /* Freeing previously allocated entries */
+ list_for_each_entry(pos,
+ &sev->shared_pages_list,
+ list) {
+ kfree(pos);
+ }
+
+ mutex_unlock(&kvm->lock);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ shrd_region->gfn_start = array[i].gfn_start;
+ shrd_region->gfn_end = array[i].gfn_end;
+ list_add_tail(&shrd_region->list,
+ &sev->shared_pages_list);
+ }
+ sev->shared_pages_list_count = nents;
+ mutex_unlock(&kvm->lock);
+
+out:
+ kfree(array);
+
+ return ret;
+}
+
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
@@ -4539,6 +4539,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.page_enc_status_hc = svm_page_enc_status_hc,
.get_shared_pages_list = svm_get_shared_pages_list,
+ .set_shared_pages_list = svm_set_shared_pages_list,
};
static struct kvm_x86_init_ops svm_init_ops __initdata = {
@@ -478,6 +478,7 @@ void sync_nested_vmcb_control(struct vcpu_svm *svm);
int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
unsigned long npages, unsigned long enc);
int svm_get_shared_pages_list(struct kvm *kvm, struct kvm_shared_pages_list *list);
+int svm_set_shared_pages_list(struct kvm *kvm, struct kvm_shared_pages_list *list);
extern struct kvm_x86_nested_ops svm_nested_ops;
@@ -5731,6 +5731,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_x86_ops.get_shared_pages_list(kvm, &list);
break;
}
+ case KVM_SET_SHARED_PAGES_LIST: {
+ struct kvm_shared_pages_list list;
+
+ r = -EFAULT;
+ if (copy_from_user(&list, argp, sizeof(list)))
+ goto out;
+
+ r = -ENOTTY;
+ if (kvm_x86_ops.set_shared_pages_list)
+ r = kvm_x86_ops.set_shared_pages_list(kvm, &list);
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -1573,6 +1573,7 @@ struct kvm_pv_cmd {
#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7)
#define KVM_GET_SHARED_PAGES_LIST _IOW(KVMIO, 0xc8, struct kvm_shared_pages_list)
+#define KVM_SET_SHARED_PAGES_LIST _IOW(KVMIO, 0xc9, struct kvm_shared_pages_list)
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {