Message ID | 20211119134739.20218-10-chao.p.peng@linux.intel.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | KVM: mm: fd-based approach for supporting KVM guest private memory | expand |
On 11/19/21 14:47, Chao Peng wrote: > + > + /* Prevent memslot modification */ > + spin_lock(&kvm->mn_invalidate_lock); > + kvm->mn_active_invalidate_count++; > + spin_unlock(&kvm->mn_invalidate_lock); > + > + ret = __kvm_handle_useraddr_range(kvm, &useraddr_range); > + > + spin_lock(&kvm->mn_invalidate_lock); > + kvm->mn_active_invalidate_count--; > + spin_unlock(&kvm->mn_invalidate_lock); > + You need to follow this with a rcuwait_wake_up as in kvm_mmu_notifier_invalidate_range_end. It's probably best if you move the manipulations of mn_active_invalidate_count from kvm_mmu_notifier_invalidate_range_* to two separate functions. Paolo
On Tue, Nov 23, 2021 at 09:46:34AM +0100, Paolo Bonzini wrote: > On 11/19/21 14:47, Chao Peng wrote: > > + > > + /* Prevent memslot modification */ > > + spin_lock(&kvm->mn_invalidate_lock); > > + kvm->mn_active_invalidate_count++; > > + spin_unlock(&kvm->mn_invalidate_lock); > > + > > + ret = __kvm_handle_useraddr_range(kvm, &useraddr_range); > > + > > + spin_lock(&kvm->mn_invalidate_lock); > > + kvm->mn_active_invalidate_count--; > > + spin_unlock(&kvm->mn_invalidate_lock); > > + > > > You need to follow this with a rcuwait_wake_up as in > kvm_mmu_notifier_invalidate_range_end. Oh right. > > It's probably best if you move the manipulations of > mn_active_invalidate_count from kvm_mmu_notifier_invalidate_range_* to two > separate functions. Will do. > > Paolo
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 925c4d9f0a31..f0fd32f6eab3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1883,4 +1883,7 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) /* Max number of entries allowed for each kvm dirty ring */ #define KVM_DIRTY_RING_MAX_ENTRIES 65536 +int kvm_memfd_invalidate_range(struct kvm *kvm, struct inode *inode, + unsigned long start, unsigned long end); + #endif diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d9a6890dd18a..090afbadb03f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -811,6 +811,35 @@ static int kvm_init_mmu_notifier(struct kvm *kvm) return mmu_notifier_register(&kvm->mmu_notifier, current->mm); } +int kvm_memfd_invalidate_range(struct kvm *kvm, struct inode *inode, + unsigned long start, unsigned long end) +{ + int ret; + const struct kvm_useraddr_range useraddr_range = { + .start = start, + .end = end, + .pte = __pte(0), + .handler = kvm_unmap_gfn_range, + .on_lock = (void *)kvm_null_fn, + .flush_on_ret = true, + .may_block = false, + }; + + + /* Prevent memslot modification */ + spin_lock(&kvm->mn_invalidate_lock); + kvm->mn_active_invalidate_count++; + spin_unlock(&kvm->mn_invalidate_lock); + + ret = __kvm_handle_useraddr_range(kvm, &useraddr_range); + + spin_lock(&kvm->mn_invalidate_lock); + kvm->mn_active_invalidate_count--; + spin_unlock(&kvm->mn_invalidate_lock); + + return ret; +} + #else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ static int kvm_init_mmu_notifier(struct kvm *kvm) @@ -818,6 +847,12 @@ static int kvm_init_mmu_notifier(struct kvm *kvm) return 0; } +int kvm_memfd_invalidate_range(struct kvm *kvm, struct inode *inode, + unsigned long start, unsigned long end) +{ + return 0; +} + #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER