@@ -212,7 +212,7 @@ which time it will be set using the Dirty tracking mechanism described above.
- tsc offset in vmcb
:Comment: 'raw' because updating the tsc offsets must not be preempted.
-:Name: kvm->mmu_lock
+:Name: kvm_arch::mmu_lock
:Type: spinlock_t
:Arch: any
:Protects: -shadow page/shadow tlb entry
@@ -102,6 +102,8 @@ struct kvm_arch_memory_slot {
};
struct kvm_arch {
+ spinlock_t mmu_lock;
+
struct kvm_s2_mmu mmu;
/* VTCR_EL2 value for this VM */
@@ -130,6 +130,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int ret;
+ spin_lock_init(&kvm->arch.mmu_lock);
+
ret = kvm_arm_setup_stage2(kvm, type);
if (ret)
return ret;
@@ -216,6 +216,8 @@ struct loongson_kvm_ipi {
#endif
struct kvm_arch {
+ spinlock_t mmu_lock;
+
/* Guest physical mm */
struct mm_struct gpa_mm;
/* Mask of CPUs needing GPA ASID flush */
@@ -150,6 +150,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return -EINVAL;
};
+ spin_lock_init(&kvm->arch.mmu_lock);
+
/* Allocate page table to map GPA -> RPA */
kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
if (!kvm->arch.gpa_mm.pgd)
@@ -263,7 +263,7 @@ static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
*
* Flushes a range of GPA mappings from the GPA page tables.
*
- * The caller must hold the @kvm->mmu_lock spinlock.
+ * The caller must hold the @kvm->arch.mmu_lock spinlock.
*
* Returns: Whether its safe to remove the top level page directory because
* all lower levels have been removed.
@@ -388,7 +388,7 @@ BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
* Make a range of GPA mappings clean so that guest writes will fault and
* trigger dirty page logging.
*
- * The caller must hold the @kvm->mmu_lock spinlock.
+ * The caller must hold the @kvm->arch.mmu_lock spinlock.
*
* Returns: Whether any GPA mappings were modified, which would require
* derived mappings (GVA page tables & TLB enties) to be
@@ -410,7 +410,7 @@ int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
* slot to be write protected
*
* Walks bits set in mask write protects the associated pte's. Caller must
- * acquire @kvm->mmu_lock.
+ * acquire @kvm->arch.mmu_lock.
*/
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
@@ -282,6 +282,8 @@ struct kvm_resize_hpt;
#define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */
struct kvm_arch {
+ spinlock_t mmu_lock;
+
unsigned int lpid;
unsigned int smt_mode; /* # vcpus per virtual core */
unsigned int emul_smt_mode; /* emualted SMT mode, on P9 */
@@ -388,7 +388,7 @@ static void kvmppc_pmd_free(pmd_t *pmdp)
kmem_cache_free(kvm_pmd_cache, pmdp);
}
-/* Called with kvm->mmu_lock held */
+/* Called with kvm->arch.mmu_lock held */
void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
unsigned int shift,
const struct kvm_memory_slot *memslot,
@@ -992,7 +992,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
return ret;
}
-/* Called with kvm->mmu_lock held */
+/* Called with kvm->arch.mmu_lock held */
int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
@@ -1012,7 +1012,7 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
return 0;
}
-/* Called with kvm->mmu_lock held */
+/* Called with kvm->arch.mmu_lock held */
int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
@@ -1040,7 +1040,7 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
return ref;
}
-/* Called with kvm->mmu_lock held */
+/* Called with kvm->arch.mmu_lock held */
int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
@@ -1073,7 +1073,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
return ret;
/*
- * For performance reasons we don't hold kvm->mmu_lock while walking the
+ * For performance reasons we don't hold kvm->arch.mmu_lock while walking the
* partition scoped table.
*/
ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
@@ -545,7 +545,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
return H_TOO_HARD;
- arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+ arch_spin_lock(&kvm->arch.mmu_lock.rlock.raw_lock);
if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
ret = H_TOO_HARD;
goto unlock_exit;
@@ -590,7 +590,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
unlock_exit:
if (!prereg)
- arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+ arch_spin_unlock(&kvm->arch.mmu_lock.rlock.raw_lock);
return ret;
}
@@ -611,7 +611,7 @@ static void kvmhv_release_nested(struct kvm_nested_guest *gp)
/*
* No vcpu is using this struct and no call to
* kvmhv_get_nested can find this struct,
- * so we don't need to hold kvm->mmu_lock.
+ * so we don't need to hold kvm->arch.mmu_lock.
*/
kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
gp->shadow_lpid);
@@ -892,7 +892,7 @@ static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
}
}
-/* called with kvm->mmu_lock held */
+/* called with kvm->arch.mmu_lock held */
void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
unsigned long gpa, unsigned long hpa,
@@ -249,7 +249,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Translate to host virtual address */
hva = __gfn_to_hva_memslot(memslot, gfn);
- arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+ arch_spin_lock(&kvm->arch.mmu_lock.rlock.raw_lock);
ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift);
if (ptep) {
pte_t pte;
@@ -264,7 +264,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
* to <= host page size, if host is using hugepage
*/
if (host_pte_size < psize) {
- arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+ arch_spin_unlock(&kvm->arch.mmu_lock.rlock.raw_lock);
return H_PARAMETER;
}
pte = kvmppc_read_update_linux_pte(ptep, writing);
@@ -278,7 +278,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pa |= gpa & ~PAGE_MASK;
}
}
- arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+ arch_spin_unlock(&kvm->arch.mmu_lock.rlock.raw_lock);
ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
ptel |= pa;
@@ -933,7 +933,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
- arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+ arch_spin_lock(&kvm->arch.mmu_lock.rlock.raw_lock);
ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot);
if (ret != H_SUCCESS)
@@ -945,7 +945,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
out_unlock:
- arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+ arch_spin_unlock(&kvm->arch.mmu_lock.rlock.raw_lock);
return ret;
}
@@ -961,7 +961,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
- arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+ arch_spin_lock(&kvm->arch.mmu_lock.rlock.raw_lock);
ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot);
if (ret != H_SUCCESS)
goto out_unlock;
@@ -976,7 +976,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
out_unlock:
- arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+ arch_spin_unlock(&kvm->arch.mmu_lock.rlock.raw_lock);
return ret;
}
@@ -470,7 +470,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
/*
* We are just looking at the wimg bits, so we don't
* care much about the trans splitting bit.
- * We are holding kvm->mmu_lock so a notifier invalidate
+ * We are holding kvm->arch.mmu_lock so a notifier invalidate
* can't run hence pfn won't change.
*/
local_irq_save(flags);
@@ -452,6 +452,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
} else
goto err_out;
+ spin_lock_init(&kvm->arch.mmu_lock);
+
if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
return -ENOENT;
@@ -926,6 +926,8 @@ struct kvm_s390_pv {
};
struct kvm_arch{
+ spinlock_t mmu_lock;
+
void *sca;
int use_esca;
rwlock_t sca_lock;
@@ -2642,6 +2642,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
goto out_err;
#endif
+ spin_lock_init(&kvm->arch.mmu_lock);
+
rc = s390_enable_sie();
if (rc)
goto out_err;
@@ -902,6 +902,8 @@ enum kvm_irqchip_mode {
#define APICV_INHIBIT_REASON_X2APIC 5
struct kvm_arch {
+ spinlock_t mmu_lock;
+
unsigned long n_used_mmu_pages;
unsigned long n_requested_mmu_pages;
unsigned long n_max_mmu_pages;
@@ -5747,7 +5747,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (!nr_to_scan--)
break;
/*
- * n_used_mmu_pages is accessed without holding kvm->mmu_lock
+ * n_used_mmu_pages is accessed without holding kvm->arch.mmu_lock
* here. We may skip a VM instance errorneosly, but we do not
* want to shrink a VM that only started to populate its MMU
* anyway.
@@ -10366,6 +10366,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (type)
return -EINVAL;
+ spin_lock_init(&kvm->arch.mmu_lock);
+
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
@@ -451,7 +451,6 @@ struct kvm_memslots {
};
struct kvm {
- spinlock_t mmu_lock;
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
@@ -434,27 +434,27 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
void kvm_mmu_lock(struct kvm *kvm)
{
- spin_lock(&kvm->mmu_lock);
+ spin_lock(&kvm->arch.mmu_lock);
}
void kvm_mmu_unlock(struct kvm *kvm)
{
- spin_unlock(&kvm->mmu_lock);
+ spin_unlock(&kvm->arch.mmu_lock);
}
int kvm_mmu_lock_needbreak(struct kvm *kvm)
{
- return spin_needbreak(&kvm->mmu_lock);
+ return spin_needbreak(&kvm->arch.mmu_lock);
}
int kvm_mmu_lock_cond_resched(struct kvm *kvm)
{
- return cond_resched_lock(&kvm->mmu_lock);
+ return cond_resched_lock(&kvm->arch.mmu_lock);
}
void kvm_mmu_lock_assert_held(struct kvm *kvm)
{
- lockdep_assert_held(&kvm->mmu_lock);
+ lockdep_assert_held(&kvm->arch.mmu_lock);
}
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
@@ -770,7 +770,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (!kvm)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&kvm->mmu_lock);
mmgrab(current->mm);
kvm->mm = current->mm;
kvm_eventfd_init(kvm);