@@ -8070,6 +8070,29 @@ error/annotated fault.
See KVM_EXIT_MEMORY_FAULT for more information.
+7.35 KVM_CAP_USERFAULT
+------------------------------
+
+:Architectures: none
+:Parameters: args[0] - whether or not to enable KVM Userfault. To enable,
+ pass KVM_USERFAULT_ENABLE, and to disable pass
+ KVM_USERFAULT_DISABLE.
+ args[1] - the eventfd to be notified when asynchronous userfaults
+ occur.
+
+:Returns: 0 on success, -EINVAL if args[0] is not KVM_USERFAULT_ENABLE
+ or KVM_USERFAULT_DISABLE, or if KVM Userfault is not supported.
+
+This capability, if enabled with KVM_ENABLE_CAP, allows userspace to mark
+regions of memory as KVM_MEMORY_ATTRIBUTE_USERFAULT, in which case, attempted
+accesses to these regions of memory by KVM_RUN will fail with
+KVM_EXIT_MEMORY_FAULT. Attempted accesses by other ioctls will fail with
+EFAULT.
+
+Enabling this capability will cause all future faults to create
+small-page-sized sptes. Collapsing these sptes back into their optimal size
+is done with KVM_COLLAPSE_PAGE_TABLES.
+
8. Other capabilities.
======================
@@ -730,6 +730,10 @@ struct kvm_memslots {
int node_idx;
};
+struct kvm_userfault_ctx {
+ struct eventfd_ctx *ev_fd;
+};
+
struct kvm {
#ifdef KVM_HAVE_MMU_RWLOCK
rwlock_t mmu_lock;
@@ -831,6 +835,7 @@ struct kvm {
bool dirty_ring_with_bitmap;
bool vm_bugged;
bool vm_dead;
+ struct kvm_userfault_ctx __rcu *userfault_ctx;
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
struct notifier_block pm_notifier;
@@ -2477,4 +2482,13 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
#endif
+static inline bool kvm_userfault_enabled(struct kvm *kvm)
+{
+#ifdef CONFIG_KVM_USERFAULT
+ return !!rcu_access_pointer(kvm->userfault_ctx);
+#else
+ return false;
+#endif
+}
+
#endif
@@ -917,6 +917,7 @@ struct kvm_enable_cap {
#define KVM_CAP_MEMORY_ATTRIBUTES 233
#define KVM_CAP_GUEST_MEMFD 234
#define KVM_CAP_VM_TYPES 235
+#define KVM_CAP_USERFAULT 236
struct kvm_irq_routing_irqchip {
__u32 irqchip;
@@ -1539,6 +1540,7 @@ struct kvm_memory_attributes {
};
#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3)
+#define KVM_MEMORY_ATTRIBUTE_USERFAULT (1ULL << 4)
#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
@@ -1548,4 +1550,7 @@ struct kvm_create_guest_memfd {
__u64 reserved[6];
};
+#define KVM_USERFAULT_ENABLE (1ULL << 0)
+#define KVM_USERFAULT_DISABLE (1ULL << 1)
+
#endif /* __LINUX_KVM_H */
@@ -2430,10 +2430,16 @@ bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
static u64 kvm_supported_mem_attributes(struct kvm *kvm)
{
+ u64 attributes = 0;
if (!kvm || kvm_arch_has_private_mem(kvm))
- return KVM_MEMORY_ATTRIBUTE_PRIVATE;
+ attributes |= KVM_MEMORY_ATTRIBUTE_PRIVATE;
- return 0;
+#ifdef CONFIG_KVM_USERFAULT
+ if (!kvm || kvm_userfault_enabled(kvm))
+ attributes |= KVM_MEMORY_ATTRIBUTE_USERFAULT;
+#endif
+
+ return attributes;
}
static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
@@ -4946,6 +4952,84 @@ bool kvm_are_all_memslots_empty(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
+#ifdef CONFIG_KVM_USERFAULT
+static int kvm_disable_userfault(struct kvm *kvm)
+{
+ struct kvm_userfault_ctx *ctx;
+
+ mutex_lock(&kvm->slots_lock);
+
+ ctx = rcu_replace_pointer(kvm->userfault_ctx, NULL,
+ mutex_is_locked(&kvm->slots_lock));
+
+ mutex_unlock(&kvm->slots_lock);
+
+ if (!ctx)
+ return 0;
+
+ /* Wait for everyone to stop using userfault. */
+ synchronize_srcu(&kvm->srcu);
+
+ eventfd_ctx_put(ctx->ev_fd);
+ kfree(ctx);
+ return 0;
+}
+
+static int kvm_enable_userfault(struct kvm *kvm, int event_fd)
+{
+ struct kvm_userfault_ctx *userfault_ctx;
+ struct eventfd_ctx *ev_fd;
+ int ret;
+
+ mutex_lock(&kvm->slots_lock);
+
+ ret = -EEXIST;
+ if (kvm_userfault_enabled(kvm))
+ goto out;
+
+ ret = -ENOMEM;
+ userfault_ctx = kmalloc(sizeof(*userfault_ctx), GFP_KERNEL);
+ if (!userfault_ctx)
+ goto out;
+
+ ev_fd = eventfd_ctx_fdget(event_fd);
+ if (IS_ERR(ev_fd)) {
+ ret = PTR_ERR(ev_fd);
+ kfree(userfault_ctx);
+ goto out;
+ }
+
+ ret = 0;
+ userfault_ctx->ev_fd = ev_fd;
+
+ rcu_assign_pointer(kvm->userfault_ctx, userfault_ctx);
+out:
+ mutex_unlock(&kvm->slots_lock);
+ return ret;
+}
+
+static int kvm_vm_ioctl_enable_userfault(struct kvm *kvm, int options,
+ int event_fd)
+{
+ u64 allowed_options = KVM_USERFAULT_ENABLE |
+ KVM_USERFAULT_DISABLE;
+ bool enable;
+
+ if (options & ~allowed_options)
+ return -EINVAL;
+ /* Exactly one of ENABLE or DISABLE must be set. */
+ if (options == allowed_options || !options)
+ return -EINVAL;
+
+ enable = options & KVM_USERFAULT_ENABLE;
+
+ if (enable)
+ return kvm_enable_userfault(kvm, event_fd);
+ else
+ return kvm_disable_userfault(kvm);
+}
+#endif
+
static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
@@ -5009,6 +5093,14 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
return r;
}
+#ifdef CONFIG_KVM_USERFAULT
+ case KVM_CAP_USERFAULT:
+ if (cap->flags)
+ return -EINVAL;
+
+ return kvm_vm_ioctl_enable_userfault(kvm, cap->args[0],
+ cap->args[1]);
+#endif
default:
return kvm_vm_ioctl_enable_cap(kvm, cap);
}
Add the ability to enable and disable KVM Userfault, and add KVM_MEMORY_ATTRIBUTE_USERFAULT to control whether or not pages should trigger userfaults. The presence of a kvm_userfault_ctx in the struct kvm is what signifies whether KVM Userfault is enabled or not. To make sure that this struct is non-empty, include a struct eventfd_ctx pointer, although it is not used in this patch. Signed-off-by: James Houghton <jthoughton@google.com> --- Documentation/virt/kvm/api.rst | 23 ++++++++ include/linux/kvm_host.h | 14 +++++ include/uapi/linux/kvm.h | 5 ++ virt/kvm/kvm_main.c | 96 +++++++++++++++++++++++++++++++++- 4 files changed, 136 insertions(+), 2 deletions(-)