Message ID | 20210112181041.356734-17-bgardon@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Allow parallel page faults with TDP MMU | expand |
On 12/01/21 19:10, Ben Gardon wrote: > Wrap assertions and warnings checking the MMU lock state in a function > which uses lockdep_assert_held. While the existing checks use a few > different functions to check the lock state, they are all better off > using lockdep_assert_held. This will support a refactoring to move the > mmu_lock to struct kvm_arch so that it can be replaced with an rwlock for > x86. > > Reviewed-by: Peter Feiner <pfeiner@google.com> > > Signed-off-by: Ben Gardon <bgardon@google.com> > --- > arch/arm64/kvm/mmu.c | 2 +- > arch/powerpc/include/asm/kvm_book3s_64.h | 7 +++---- > arch/powerpc/kvm/book3s_hv_nested.c | 3 +-- > arch/x86/kvm/mmu/mmu_internal.h | 4 ++-- > arch/x86/kvm/mmu/tdp_mmu.c | 8 ++++---- > include/linux/kvm_host.h | 1 + > virt/kvm/kvm_main.c | 5 +++++ > 7 files changed, 17 insertions(+), 13 deletions(-) > > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c > index 57ef1ec23b56..8b54eb58bf47 100644 > --- a/arch/arm64/kvm/mmu.c > +++ b/arch/arm64/kvm/mmu.c > @@ -130,7 +130,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 > struct kvm *kvm = mmu->kvm; > phys_addr_t end = start + size; > > - assert_spin_locked(&kvm->mmu_lock); > + kvm_mmu_lock_assert_held(kvm); > WARN_ON(size & ~PAGE_MASK); > WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap, > may_block)); > diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h > index 9bb9bb370b53..db2e437cd97c 100644 > --- a/arch/powerpc/include/asm/kvm_book3s_64.h > +++ b/arch/powerpc/include/asm/kvm_book3s_64.h > @@ -650,8 +650,8 @@ static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea, > { > pte_t *pte; > > - VM_WARN(!spin_is_locked(&kvm->mmu_lock), > - "%s called with kvm mmu_lock not held \n", __func__); > + kvm_mmu_lock_assert_held(kvm); > + > pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift); > > return pte; > @@ -662,8 +662,7 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq, > { > pte_t *pte; > > - VM_WARN(!spin_is_locked(&kvm->mmu_lock), > - "%s called with kvm mmu_lock not held \n", __func__); > + kvm_mmu_lock_assert_held(kvm); > > if (mmu_notifier_retry(kvm, mmu_seq)) > return NULL; > diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c > index 18890dca9476..6d5987d1eee7 100644 > --- a/arch/powerpc/kvm/book3s_hv_nested.c > +++ b/arch/powerpc/kvm/book3s_hv_nested.c > @@ -767,8 +767,7 @@ pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, > if (!gp) > return NULL; > > - VM_WARN(!spin_is_locked(&kvm->mmu_lock), > - "%s called with kvm mmu_lock not held \n", __func__); > + kvm_mmu_lock_assert_held(kvm); > pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift); > > return pte; > diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h > index 7f599cc64178..cc8268cf28d2 100644 > --- a/arch/x86/kvm/mmu/mmu_internal.h > +++ b/arch/x86/kvm/mmu/mmu_internal.h > @@ -101,14 +101,14 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, > static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp) > { > BUG_ON(!sp->root_count); > - lockdep_assert_held(&kvm->mmu_lock); > + kvm_mmu_lock_assert_held(kvm); > > ++sp->root_count; > } > > static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp) > { > - lockdep_assert_held(&kvm->mmu_lock); > + kvm_mmu_lock_assert_held(kvm); > --sp->root_count; > > return !sp->root_count; > diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c > index fb911ca428b2..1d7c01300495 100644 > --- a/arch/x86/kvm/mmu/tdp_mmu.c > +++ b/arch/x86/kvm/mmu/tdp_mmu.c > @@ -117,7 +117,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) > { > gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); > > - lockdep_assert_held(&kvm->mmu_lock); > + kvm_mmu_lock_assert_held(kvm); > > WARN_ON(root->root_count); > WARN_ON(!root->tdp_mmu_page); > @@ -425,7 +425,7 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, > struct kvm_mmu_page *root = sptep_to_sp(root_pt); > int as_id = kvm_mmu_page_as_id(root); > > - lockdep_assert_held(&kvm->mmu_lock); > + kvm_mmu_lock_assert_held(kvm); > > WRITE_ONCE(*iter->sptep, new_spte); > > @@ -1139,7 +1139,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, > struct kvm_mmu_page *root; > int root_as_id; > > - lockdep_assert_held(&kvm->mmu_lock); > + kvm_mmu_lock_assert_held(kvm); > for_each_tdp_mmu_root(kvm, root) { > root_as_id = kvm_mmu_page_as_id(root); > if (root_as_id != slot->as_id) > @@ -1324,7 +1324,7 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, > int root_as_id; > bool spte_set = false; > > - lockdep_assert_held(&kvm->mmu_lock); > + kvm_mmu_lock_assert_held(kvm); > for_each_tdp_mmu_root(kvm, root) { > root_as_id = kvm_mmu_page_as_id(root); > if (root_as_id != slot->as_id) > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index 6e2773fc406c..022e3522788f 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -1499,5 +1499,6 @@ void kvm_mmu_lock(struct kvm *kvm); > void kvm_mmu_unlock(struct kvm *kvm); > int kvm_mmu_lock_needbreak(struct kvm *kvm); > int kvm_mmu_lock_cond_resched(struct kvm *kvm); > +void kvm_mmu_lock_assert_held(struct kvm *kvm); Probably better to make this an empty inline if !defined(CONFIG_LOCKDEP). Paolo > #endif > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index b4c49a7e0556..c504f876176b 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -452,6 +452,11 @@ int kvm_mmu_lock_cond_resched(struct kvm *kvm) > return cond_resched_lock(&kvm->mmu_lock); > } > > +void kvm_mmu_lock_assert_held(struct kvm *kvm) > +{ > + lockdep_assert_held(&kvm->mmu_lock); > +} > + > #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) > static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) > { >
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 57ef1ec23b56..8b54eb58bf47 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -130,7 +130,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 struct kvm *kvm = mmu->kvm; phys_addr_t end = start + size; - assert_spin_locked(&kvm->mmu_lock); + kvm_mmu_lock_assert_held(kvm); WARN_ON(size & ~PAGE_MASK); WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap, may_block)); diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 9bb9bb370b53..db2e437cd97c 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -650,8 +650,8 @@ static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea, { pte_t *pte; - VM_WARN(!spin_is_locked(&kvm->mmu_lock), - "%s called with kvm mmu_lock not held \n", __func__); + kvm_mmu_lock_assert_held(kvm); + pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift); return pte; @@ -662,8 +662,7 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq, { pte_t *pte; - VM_WARN(!spin_is_locked(&kvm->mmu_lock), - "%s called with kvm mmu_lock not held \n", __func__); + kvm_mmu_lock_assert_held(kvm); if (mmu_notifier_retry(kvm, mmu_seq)) return NULL; diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 18890dca9476..6d5987d1eee7 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -767,8 +767,7 @@ pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, if (!gp) return NULL; - VM_WARN(!spin_is_locked(&kvm->mmu_lock), - "%s called with kvm mmu_lock not held \n", __func__); + kvm_mmu_lock_assert_held(kvm); pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift); return pte; diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 7f599cc64178..cc8268cf28d2 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -101,14 +101,14 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp) { BUG_ON(!sp->root_count); - lockdep_assert_held(&kvm->mmu_lock); + kvm_mmu_lock_assert_held(kvm); ++sp->root_count; } static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp) { - lockdep_assert_held(&kvm->mmu_lock); + kvm_mmu_lock_assert_held(kvm); --sp->root_count; return !sp->root_count; diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index fb911ca428b2..1d7c01300495 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -117,7 +117,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) { gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); - lockdep_assert_held(&kvm->mmu_lock); + kvm_mmu_lock_assert_held(kvm); WARN_ON(root->root_count); WARN_ON(!root->tdp_mmu_page); @@ -425,7 +425,7 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, struct kvm_mmu_page *root = sptep_to_sp(root_pt); int as_id = kvm_mmu_page_as_id(root); - lockdep_assert_held(&kvm->mmu_lock); + kvm_mmu_lock_assert_held(kvm); WRITE_ONCE(*iter->sptep, new_spte); @@ -1139,7 +1139,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root; int root_as_id; - lockdep_assert_held(&kvm->mmu_lock); + kvm_mmu_lock_assert_held(kvm); for_each_tdp_mmu_root(kvm, root) { root_as_id = kvm_mmu_page_as_id(root); if (root_as_id != slot->as_id) @@ -1324,7 +1324,7 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, int root_as_id; bool spte_set = false; - lockdep_assert_held(&kvm->mmu_lock); + kvm_mmu_lock_assert_held(kvm); for_each_tdp_mmu_root(kvm, root) { root_as_id = kvm_mmu_page_as_id(root); if (root_as_id != slot->as_id) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6e2773fc406c..022e3522788f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1499,5 +1499,6 @@ void kvm_mmu_lock(struct kvm *kvm); void kvm_mmu_unlock(struct kvm *kvm); int kvm_mmu_lock_needbreak(struct kvm *kvm); int kvm_mmu_lock_cond_resched(struct kvm *kvm); +void kvm_mmu_lock_assert_held(struct kvm *kvm); #endif diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b4c49a7e0556..c504f876176b 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -452,6 +452,11 @@ int kvm_mmu_lock_cond_resched(struct kvm *kvm) return cond_resched_lock(&kvm->mmu_lock); } +void kvm_mmu_lock_assert_held(struct kvm *kvm) +{ + lockdep_assert_held(&kvm->mmu_lock); +} + #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) {