@@ -4698,6 +4698,28 @@ or shared. The bitmap can be used during the guest migration. If the page
is private then the userspace need to use SEV migration commands to transmit
the page.
+4.126 KVM_SET_PAGE_ENC_BITMAP (vm ioctl)
+---------------------------------------
+
+:Capability: basic
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_page_enc_bitmap (in/out)
+:Returns: 0 on success, -1 on error
+
+/* for KVM_SET_PAGE_ENC_BITMAP */
+struct kvm_page_enc_bitmap {
+ __u64 start_gfn;
+ __u64 num_pages;
+ union {
+ void __user *enc_bitmap; /* one bit per page */
+ __u64 padding2;
+ };
+};
+
+During the guest live migration the outgoing guest exports its page encryption
+bitmap, the KVM_SET_PAGE_ENC_BITMAP can be used to build the page encryption
+bitmap for an incoming guest.
4.125 KVM_S390_PV_COMMAND
-------------------------
@@ -4852,6 +4874,28 @@ into user space.
If a vCPU is in running state while this ioctl is invoked, the vCPU may
experience inconsistent filtering behavior on MSR accesses.
+4.126 KVM_SET_PAGE_ENC_BITMAP (vm ioctl)
+---------------------------------------
+
+:Capability: basic
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_page_enc_bitmap (in/out)
+:Returns: 0 on success, -1 on error
+
+/* for KVM_SET_PAGE_ENC_BITMAP */
+struct kvm_page_enc_bitmap {
+ __u64 start_gfn;
+ __u64 num_pages;
+ union {
+ void __user *enc_bitmap; /* one bit per page */
+ __u64 padding2;
+ };
+};
+
+During the guest live migration the outgoing guest exports its page encryption
+bitmap, the KVM_SET_PAGE_ENC_BITMAP can be used to build the page encryption
+bitmap for an incoming guest.
5. The kvm_run structure
========================
@@ -1286,6 +1286,8 @@ struct kvm_x86_ops {
unsigned long sz, unsigned long mode);
int (*get_page_enc_bitmap)(struct kvm *kvm,
struct kvm_page_enc_bitmap *bmap);
+ int (*set_page_enc_bitmap)(struct kvm *kvm,
+ struct kvm_page_enc_bitmap *bmap);
};
struct kvm_x86_nested_ops {
@@ -1518,6 +1518,56 @@ int svm_get_page_enc_bitmap(struct kvm *kvm,
return ret;
}
+int svm_set_page_enc_bitmap(struct kvm *kvm,
+ struct kvm_page_enc_bitmap *bmap)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ unsigned long gfn_start, gfn_end;
+ unsigned long *bitmap;
+ unsigned long sz;
+ int ret;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+ /* special case of resetting the complete bitmap */
+ if (!bmap->enc_bitmap) {
+ mutex_lock(&kvm->lock);
+ /* by default all pages are marked encrypted */
+ if (sev->page_enc_bmap_size)
+ bitmap_fill(sev->page_enc_bmap,
+ sev->page_enc_bmap_size);
+ mutex_unlock(&kvm->lock);
+ return 0;
+ }
+
+ gfn_start = bmap->start_gfn;
+ gfn_end = gfn_start + bmap->num_pages;
+
+ sz = ALIGN(bmap->num_pages, BITS_PER_LONG) / 8;
+ bitmap = kmalloc(sz, GFP_KERNEL);
+ if (!bitmap)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(bitmap, bmap->enc_bitmap, sz))
+ goto out;
+
+ mutex_lock(&kvm->lock);
+ ret = sev_resize_page_enc_bitmap(kvm, gfn_end);
+ if (ret)
+ goto unlock;
+
+ bitmap_copy(sev->page_enc_bmap + BIT_WORD(gfn_start), bitmap,
+ (gfn_end - gfn_start));
+
+ ret = 0;
+unlock:
+ mutex_unlock(&kvm->lock);
+out:
+ kfree(bitmap);
+ return ret;
+}
+
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
@@ -4315,6 +4315,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.page_enc_status_hc = svm_page_enc_status_hc,
.get_page_enc_bitmap = svm_get_page_enc_bitmap,
+ .set_page_enc_bitmap = svm_set_page_enc_bitmap,
};
static struct kvm_x86_init_ops svm_init_ops __initdata = {
@@ -414,6 +414,7 @@ void sync_nested_vmcb_control(struct vcpu_svm *svm);
int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
unsigned long npages, unsigned long enc);
int svm_get_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap *bmap);
+int svm_set_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap *bmap);
extern struct kvm_x86_nested_ops svm_nested_ops;
@@ -5707,6 +5707,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_x86_ops.get_page_enc_bitmap(kvm, &bitmap);
break;
}
+ case KVM_SET_PAGE_ENC_BITMAP: {
+ struct kvm_page_enc_bitmap bitmap;
+
+ r = -EFAULT;
+ if (copy_from_user(&bitmap, argp, sizeof(bitmap)))
+ goto out;
+
+ r = -ENOTTY;
+ if (kvm_x86_ops.set_page_enc_bitmap)
+ r = kvm_x86_ops.set_page_enc_bitmap(kvm, &bitmap);
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -1574,6 +1574,7 @@ struct kvm_pv_cmd {
#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7)
#define KVM_GET_PAGE_ENC_BITMAP _IOW(KVMIO, 0xc8, struct kvm_page_enc_bitmap)
+#define KVM_SET_PAGE_ENC_BITMAP _IOW(KVMIO, 0xc9, struct kvm_page_enc_bitmap)
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {