@@ -902,7 +902,13 @@ enum kvm_irqchip_mode {
#define APICV_INHIBIT_REASON_X2APIC 5
struct kvm_arch {
- spinlock_t mmu_lock;
+ union {
+ /* Used if the TDP MMU is enabled. */
+ rwlock_t mmu_rwlock;
+
+ /* Used if the TDP MMU is not enabled. */
+ spinlock_t mmu_lock;
+ };
unsigned long n_used_mmu_pages;
unsigned long n_requested_mmu_pages;
@@ -5471,6 +5471,11 @@ void kvm_mmu_init_vm(struct kvm *kvm)
kvm_mmu_init_tdp_mmu(kvm);
+ if (kvm->arch.tdp_mmu_enabled)
+ rwlock_init(&kvm->arch.mmu_rwlock);
+ else
+ spin_lock_init(&kvm->arch.mmu_lock);
+
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
kvm_page_track_register_notifier(kvm, node);
@@ -6074,3 +6079,87 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
if (kvm->arch.nx_lpage_recovery_thread)
kthread_stop(kvm->arch.nx_lpage_recovery_thread);
}
+
+void kvm_mmu_lock_shared(struct kvm *kvm)
+{
+ WARN_ON(!kvm->arch.tdp_mmu_enabled);
+ read_lock(&kvm->arch.mmu_rwlock);
+}
+
+void kvm_mmu_unlock_shared(struct kvm *kvm)
+{
+ WARN_ON(!kvm->arch.tdp_mmu_enabled);
+ read_unlock(&kvm->arch.mmu_rwlock);
+}
+
+void kvm_mmu_lock_exclusive(struct kvm *kvm)
+{
+ WARN_ON(!kvm->arch.tdp_mmu_enabled);
+ write_lock(&kvm->arch.mmu_rwlock);
+}
+
+void kvm_mmu_unlock_exclusive(struct kvm *kvm)
+{
+ WARN_ON(!kvm->arch.tdp_mmu_enabled);
+ write_unlock(&kvm->arch.mmu_rwlock);
+}
+
+void kvm_mmu_lock(struct kvm *kvm)
+{
+ if (kvm->arch.tdp_mmu_enabled)
+ kvm_mmu_lock_exclusive(kvm);
+ else
+ spin_lock(&kvm->arch.mmu_lock);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_lock);
+
+void kvm_mmu_unlock(struct kvm *kvm)
+{
+ if (kvm->arch.tdp_mmu_enabled)
+ kvm_mmu_unlock_exclusive(kvm);
+ else
+ spin_unlock(&kvm->arch.mmu_lock);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_unlock);
+
+int kvm_mmu_lock_needbreak(struct kvm *kvm)
+{
+ if (kvm->arch.tdp_mmu_enabled)
+ return rwlock_needbreak(&kvm->arch.mmu_rwlock);
+ else
+ return spin_needbreak(&kvm->arch.mmu_lock);
+}
+
+int kvm_mmu_lock_cond_resched_exclusive(struct kvm *kvm)
+{
+ WARN_ON(!kvm->arch.tdp_mmu_enabled);
+ return cond_resched_rwlock_write(&kvm->arch.mmu_rwlock);
+}
+
+int kvm_mmu_lock_cond_resched(struct kvm *kvm)
+{
+ if (kvm->arch.tdp_mmu_enabled)
+ return kvm_mmu_lock_cond_resched_exclusive(kvm);
+ else
+ return cond_resched_lock(&kvm->arch.mmu_lock);
+}
+
+void kvm_mmu_lock_assert_held_shared(struct kvm *kvm)
+{
+ WARN_ON(!kvm->arch.tdp_mmu_enabled);
+ lockdep_assert_held_read(&kvm->arch.mmu_rwlock);
+}
+
+void kvm_mmu_lock_assert_held_exclusive(struct kvm *kvm)
+{
+ WARN_ON(!kvm->arch.tdp_mmu_enabled);
+ lockdep_assert_held_write(&kvm->arch.mmu_rwlock);
+}
+
+void kvm_mmu_lock_assert_held(struct kvm *kvm)
+{
+ if (kvm->arch.tdp_mmu_enabled)
+ lockdep_assert_held(&kvm->arch.mmu_rwlock);
+ else
+ lockdep_assert_held(&kvm->arch.mmu_lock);
+}
@@ -149,4 +149,13 @@ void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+void kvm_mmu_lock_shared(struct kvm *kvm);
+void kvm_mmu_unlock_shared(struct kvm *kvm);
+void kvm_mmu_lock_exclusive(struct kvm *kvm);
+void kvm_mmu_unlock_exclusive(struct kvm *kvm);
+int kvm_mmu_lock_cond_resched_exclusive(struct kvm *kvm);
+void kvm_mmu_lock_assert_held_shared(struct kvm *kvm);
+void kvm_mmu_lock_assert_held_exclusive(struct kvm *kvm);
+void kvm_mmu_lock_assert_held(struct kvm *kvm);
+
#endif /* __KVM_X86_MMU_INTERNAL_H */
@@ -59,7 +59,7 @@ static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
struct kvm_mmu_page *root)
{
- lockdep_assert_held(&kvm->mmu_lock);
+ kvm_mmu_lock_assert_held_exclusive(kvm);
if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
return false;
@@ -117,7 +117,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
- kvm_mmu_lock_assert_held(kvm);
+ kvm_mmu_lock_assert_held_exclusive(kvm);
WARN_ON(root->root_count);
WARN_ON(!root->tdp_mmu_page);
@@ -425,7 +425,7 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
struct kvm_mmu_page *root = sptep_to_sp(root_pt);
int as_id = kvm_mmu_page_as_id(root);
- kvm_mmu_lock_assert_held(kvm);
+ kvm_mmu_lock_assert_held_exclusive(kvm);
WRITE_ONCE(*iter->sptep, new_spte);
@@ -1139,7 +1139,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_mmu_page *root;
int root_as_id;
- kvm_mmu_lock_assert_held(kvm);
+ kvm_mmu_lock_assert_held_exclusive(kvm);
for_each_tdp_mmu_root(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
@@ -1324,7 +1324,7 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
int root_as_id;
bool spte_set = false;
- kvm_mmu_lock_assert_held(kvm);
+ kvm_mmu_lock_assert_held_exclusive(kvm);
for_each_tdp_mmu_root(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
@@ -10366,8 +10366,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (type)
return -EINVAL;
- spin_lock_init(&kvm->arch.mmu_lock);
-
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
@@ -432,27 +432,27 @@ void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
-void kvm_mmu_lock(struct kvm *kvm)
+__weak void kvm_mmu_lock(struct kvm *kvm)
{
spin_lock(&kvm->arch.mmu_lock);
}
-void kvm_mmu_unlock(struct kvm *kvm)
+__weak void kvm_mmu_unlock(struct kvm *kvm)
{
spin_unlock(&kvm->arch.mmu_lock);
}
-int kvm_mmu_lock_needbreak(struct kvm *kvm)
+__weak int kvm_mmu_lock_needbreak(struct kvm *kvm)
{
return spin_needbreak(&kvm->arch.mmu_lock);
}
-int kvm_mmu_lock_cond_resched(struct kvm *kvm)
+__weak int kvm_mmu_lock_cond_resched(struct kvm *kvm)
{
return cond_resched_lock(&kvm->arch.mmu_lock);
}
-void kvm_mmu_lock_assert_held(struct kvm *kvm)
+__weak void kvm_mmu_lock_assert_held(struct kvm *kvm)
{
lockdep_assert_held(&kvm->arch.mmu_lock);
}