@@ -323,6 +323,11 @@ void kvm_gmem_init(struct module *module)
kvm_gmem_fops.owner = module;
}
+void kvm_gmem_exit(void)
+{
+
+}
+
static int kvm_gmem_migrate_folio(struct address_space *mapping,
struct folio *dst, struct folio *src,
enum migrate_mode mode)
@@ -6441,6 +6441,7 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
err_register:
kvm_uninit_virtualization();
err_virt:
+ kvm_gmem_exit();
kvm_vfio_ops_exit();
err_vfio:
kvm_async_pf_deinit();
@@ -6471,6 +6472,7 @@ void kvm_exit(void)
debugfs_remove_recursive(kvm_debugfs_dir);
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
+ kvm_gmem_exit();
kmem_cache_destroy(kvm_vcpu_cache);
kvm_vfio_ops_exit();
kvm_async_pf_deinit();
@@ -69,6 +69,7 @@ static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
#ifdef CONFIG_KVM_PRIVATE_MEM
void kvm_gmem_init(struct module *module);
+void kvm_gmem_exit(void);
int kvm_gmem_create(struct kvm *kvm, struct kvm_create_guest_memfd *args);
int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot,
unsigned int fd, loff_t offset);
@@ -79,6 +80,11 @@ static inline void kvm_gmem_init(struct module *module)
}
+static inline void kvm_gmem_exit(void)
+{
+
+}
+
static inline int kvm_gmem_bind(struct kvm *kvm,
struct kvm_memory_slot *slot,
unsigned int fd, loff_t offset)
Add empty kvm_gmem_exit() function for proper cleanup of guest memory resources. Call it from both kvm_init() error path and kvm_exit(). This is preparatory change for upcoming work that involves KVM guest_memfd using inodes to store metadata for backing memory. Signed-off-by: Shivank Garg <shivankg@amd.com> --- virt/kvm/guest_memfd.c | 5 +++++ virt/kvm/kvm_main.c | 2 ++ virt/kvm/kvm_mm.h | 6 ++++++ 3 files changed, 13 insertions(+)