@@ -2206,6 +2206,8 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
#define kvm_arch_has_private_mem(kvm) false
#endif
+#define kvm_arch_private_mem_inplace(kvm) false
+
#define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state)
static inline u16 kvm_read_ldt(void)
@@ -717,6 +717,17 @@ static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
}
#endif
+/*
+ * Arch code must define kvm_arch_private_mem_inplace if support for private
+ * memory is enabled it supports in-place conversion between shared and private.
+ */
+#if !defined(kvm_arch_private_mem_inplace) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM)
+static inline bool kvm_arch_private_mem_inplace(struct kvm *kvm)
+{
+ return false;
+}
+#endif
+
#ifndef kvm_arch_has_readonly_mem
static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm)
{
@@ -124,3 +124,7 @@ config HAVE_KVM_ARCH_GMEM_PREPARE
config HAVE_KVM_ARCH_GMEM_INVALIDATE
bool
depends on KVM_PRIVATE_MEM
+
+config KVM_GMEM_MAPPABLE
+ select KVM_PRIVATE_MEM
+ bool
@@ -307,7 +307,78 @@ static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn)
return gfn - slot->base_gfn + slot->gmem.pgoff;
}
+#ifdef CONFIG_KVM_GMEM_MAPPABLE
+static vm_fault_t kvm_gmem_fault(struct vm_fault *vmf)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ struct folio *folio;
+ vm_fault_t ret = VM_FAULT_LOCKED;
+
+ filemap_invalidate_lock_shared(inode->i_mapping);
+
+ folio = kvm_gmem_get_folio(inode, vmf->pgoff);
+ if (IS_ERR(folio)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_filemap;
+ }
+
+ if (folio_test_hwpoison(folio)) {
+ ret = VM_FAULT_HWPOISON;
+ goto out_folio;
+ }
+
+ if (!folio_test_uptodate(folio)) {
+ unsigned long nr_pages = folio_nr_pages(folio);
+ unsigned long i;
+
+ for (i = 0; i < nr_pages; i++)
+ clear_highpage(folio_page(folio, i));
+
+ folio_mark_uptodate(folio);
+ }
+
+ vmf->page = folio_file_page(folio, vmf->pgoff);
+
+out_folio:
+ if (ret != VM_FAULT_LOCKED) {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+
+out_filemap:
+ filemap_invalidate_unlock_shared(inode->i_mapping);
+
+ return ret;
+}
+
+static const struct vm_operations_struct kvm_gmem_vm_ops = {
+ .fault = kvm_gmem_fault,
+};
+
+static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct kvm_gmem *gmem = file->private_data;
+
+ if (!kvm_arch_private_mem_inplace(gmem->kvm))
+ return -ENODEV;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) !=
+ (VM_SHARED | VM_MAYSHARE)) {
+ return -EINVAL;
+ }
+
+ file_accessed(file);
+ vm_flags_set(vma, VM_DONTDUMP);
+ vma->vm_ops = &kvm_gmem_vm_ops;
+
+ return 0;
+}
+#else
+#define kvm_gmem_mmap NULL
+#endif /* CONFIG_KVM_GMEM_MAPPABLE */
+
static struct file_operations kvm_gmem_fops = {
+ .mmap = kvm_gmem_mmap,
.open = generic_file_open,
.release = kvm_gmem_release,
.fallocate = kvm_gmem_fallocate,
Add support for mmap() and fault() for guest_memfd in the host for VMs that support in place conversion between shared and private. To that end, this patch adds the ability to check whether the architecture has that support, and only allows mmap() if that's the case. Additionally, this is gated with a new configuration option, CONFIG_KVM_GMEM_MAPPABLE. Signed-off-by: Fuad Tabba <tabba@google.com> --- arch/x86/include/asm/kvm_host.h | 2 + include/linux/kvm_host.h | 11 +++++ virt/kvm/Kconfig | 4 ++ virt/kvm/guest_memfd.c | 71 +++++++++++++++++++++++++++++++++ 4 files changed, 88 insertions(+)