@@ -1463,10 +1463,10 @@ int kvm_mmu_init(u32 *hyp_va_bits)
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+ const struct kvm_userspace_memory_region_ext *mem,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
/*
* At this point memslot has been committed and there is an
@@ -1486,9 +1486,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
hva_t hva = mem->userspace_addr;
hva_t reg_end = hva + mem->memory_size;
@@ -233,18 +233,18 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+ const struct kvm_userspace_memory_region_ext *mem,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
int needs_flush;
@@ -200,14 +200,14 @@ extern void kvmppc_core_destroy_vm(struct kvm *kvm);
extern void kvmppc_core_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change);
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change);
+ const struct kvm_userspace_memory_region_ext *mem,
+ const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
struct kvm_ppc_smmu_info *info);
extern void kvmppc_core_flush_memslot(struct kvm *kvm,
@@ -274,14 +274,14 @@ struct kvmppc_ops {
int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
int (*prepare_memory_region)(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change);
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change);
void (*commit_memory_region)(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change);
+ const struct kvm_userspace_memory_region_ext *mem,
+ const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -847,19 +847,19 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
}
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem,
change);
}
void kvmppc_core_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+ const struct kvm_userspace_memory_region_ext *mem,
+ const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
}
@@ -4854,9 +4854,9 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot)
}
static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *slot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
@@ -4871,10 +4871,10 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
}
static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+ const struct kvm_userspace_memory_region_ext *mem,
+ const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
@@ -1899,18 +1899,18 @@ static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
}
static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
return 0;
}
static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+ const struct kvm_userspace_memory_region_ext *mem,
+ const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
return;
}
@@ -1821,18 +1821,18 @@ void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
}
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
return 0;
}
void kvmppc_core_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+ const struct kvm_userspace_memory_region_ext *mem,
+ const struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
}
@@ -706,18 +706,18 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+ const struct kvm_userspace_memory_region_ext *mem,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
}
@@ -456,10 +456,10 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+ const struct kvm_userspace_memory_region_ext *mem,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
/*
* At this point memslot has been committed and there is an
@@ -471,9 +471,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
hva_t hva = mem->userspace_addr;
hva_t reg_end = hva + mem->memory_size;
@@ -5018,9 +5018,9 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
/* A few sanity checks. We can have memory slots which have to be
located/ended at a segment boundary (1MB). The memory in userland is
@@ -5043,10 +5043,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+ const struct kvm_userspace_memory_region_ext *mem,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
int rc = 0;
@@ -1556,9 +1556,9 @@ struct kvm_x86_ops {
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
int (*prepare_memory_region)(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change);
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change);
#ifdef CONFIG_KVM_TDX_SEAM_BACKDOOR
void (*do_seamcall)(struct kvm_seamcall *call);
@@ -992,9 +992,9 @@ static void vt_setup_mce(struct kvm_vcpu *vcpu)
}
static int vt_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
if (is_td(kvm))
tdx_prepare_memory_region(kvm, memslot, mem, change);
@@ -2679,9 +2679,9 @@ static void tdx_flush_gprs(struct kvm_vcpu *vcpu)
}
static int tdx_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
/* TDX Secure-EPT allows only RWX. */
if (mem->flags & KVM_MEM_READONLY)
@@ -28,9 +28,9 @@ static int tdx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) { ret
static void tdx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
u32 *intr_info, u32 *error_code) {}
static int tdx_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change) { return 0; }
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change) { return 0; }
static void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
static int __init tdx_check_processor_compatibility(void) { return 0; }
static void __init tdx_pre_kvm_init(unsigned int *vcpu_size,
@@ -11635,7 +11635,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
}
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- struct kvm_userspace_memory_region m;
+ struct kvm_userspace_memory_region_ext m;
m.slot = id | (i << 16);
m.flags = 0;
@@ -11841,9 +11841,9 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change)
{
int err;
@@ -11948,10 +11948,10 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+ const struct kvm_userspace_memory_region_ext *mem,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
if (!kvm->arch.n_requested_mmu_pages)
kvm_mmu_change_mmu_pages(kvm,
@@ -824,20 +824,20 @@ enum kvm_mr_change {
};
int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region_ext *mem);
int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region_ext *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change);
+ struct kvm_memory_slot *memslot,
+ const struct kvm_userspace_memory_region_ext *mem,
+ enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change);
+ const struct kvm_userspace_memory_region_ext *mem,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
/* flush all memory translations */
void kvm_arch_flush_shadow_all(struct kvm *kvm);
/* flush memory translations pointing to 'slot' */
@@ -103,6 +103,17 @@ struct kvm_userspace_memory_region {
__u64 userspace_addr; /* start of the userspace allocated memory */
};
+struct kvm_userspace_memory_region_ext {
+ __u32 slot;
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size; /* bytes */
+ __u64 userspace_addr; /* offset into fd/private_fd */
+ __s32 fd;
+ __s32 private_fd; /* valid if guest private memory is supported */
+ __u32 padding[6];
+};
+
/*
* The bit 0 ~ bit 15 of kvm_memory_region::flags are visible for userspace,
* other bits are reserved for kvm internal use which are defined in
@@ -110,6 +121,7 @@ struct kvm_userspace_memory_region {
*/
#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
#define KVM_MEM_READONLY (1UL << 1)
+#define KVM_MEM_FD (1UL << 2)
/* for KVM_IRQ_LINE */
struct kvm_irq_level {
@@ -1424,7 +1424,7 @@ static void update_memslots(struct kvm_memslots *slots,
}
static int check_memory_region_flags(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem)
+ const struct kvm_userspace_memory_region_ext *mem)
{
u32 valid_flags = 0;
@@ -1537,7 +1537,7 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
}
static int kvm_set_memslot(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region_ext *mem,
struct kvm_memory_slot *old,
struct kvm_memory_slot *new, int as_id,
enum kvm_mr_change change)
@@ -1629,7 +1629,7 @@ static int kvm_set_memslot(struct kvm *kvm,
}
static int kvm_delete_memslot(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_userspace_memory_region_ext *mem,
struct kvm_memory_slot *old, int as_id)
{
struct kvm_memory_slot new;
@@ -1663,7 +1663,7 @@ static int kvm_delete_memslot(struct kvm *kvm,
* Must be called holding kvm->slots_lock for write.
*/
int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem)
+ const struct kvm_userspace_memory_region_ext *mem)
{
struct kvm_memory_slot old, new;
struct kvm_memory_slot *tmp;
@@ -1783,7 +1783,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem)
+ const struct kvm_userspace_memory_region_ext *mem)
{
int r;
@@ -1795,7 +1795,7 @@ int kvm_set_memory_region(struct kvm *kvm,
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem)
+ struct kvm_userspace_memory_region_ext *mem)
{
if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
return -EINVAL;
@@ -4368,12 +4368,19 @@ static long kvm_vm_ioctl(struct file *filp,
break;
}
case KVM_SET_USER_MEMORY_REGION: {
- struct kvm_userspace_memory_region kvm_userspace_mem;
+ struct kvm_userspace_memory_region_ext kvm_userspace_mem;
r = -EFAULT;
if (copy_from_user(&kvm_userspace_mem, argp,
- sizeof(kvm_userspace_mem)))
+ sizeof(struct kvm_userspace_memory_region)))
goto out;
+ if (kvm_userspace_mem.flags & KVM_MEM_FD) {
+ int offset = offsetof(
+ struct kvm_userspace_memory_region_ext, fd);
+ if (copy_from_user(&kvm_userspace_mem.fd, argp + offset,
+ sizeof(kvm_userspace_mem) - offset))
+ goto out;
+ }
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
break;