@@ -1125,6 +1125,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
+ write_lock(&kvm->mmu_lock);
lockdep_assert_held_write(&kvm->mmu_lock);
stage2_wp_range(&kvm->arch.mmu, start, end);
@@ -1139,6 +1140,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
*/
if (kvm_dirty_log_manual_protect_and_init_set(kvm))
kvm_mmu_split_huge_pages(kvm, start, end);
+ write_unlock(&kvm->mmu_lock);
}
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
@@ -419,7 +419,9 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
gfn_t start = base_gfn + __ffs(mask);
gfn_t end = base_gfn + __fls(mask);
+ spin_lock(&kvm->mmu_lock);
kvm_mips_mkclean_gpa_pt(kvm, start, end);
+ spin_unlock(&kvm->mmu_lock);
}
/*
@@ -399,7 +399,9 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
+ spin_lock(&kvm->mmu_lock);
gstage_wp_range(kvm, start, end);
+ spin_unlock(&kvm->mmu_lock);
}
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
@@ -1382,6 +1382,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
+ write_lock(&kvm->mmu_lock);
/*
* Huge pages are NOT write protected when we start dirty logging in
* initially-all-set mode; must write protect them here so that they
@@ -1412,6 +1413,8 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
else
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+
+ write_unlock(&kvm->mmu_lock);
}
int kvm_cpu_dirty_log_size(void)
@@ -66,9 +66,7 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
if (!memslot || (offset + __fls(mask)) >= memslot->npages)
return;
- KVM_MMU_LOCK(kvm);
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
- KVM_MMU_UNLOCK(kvm);
}
int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
@@ -2160,7 +2160,6 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
memset(dirty_bitmap_buffer, 0, n);
- KVM_MMU_LOCK(kvm);
for (i = 0; i < n / sizeof(long); i++) {
unsigned long mask;
gfn_t offset;
@@ -2176,7 +2175,6 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
offset, mask);
}
- KVM_MMU_UNLOCK(kvm);
}
if (flush)
@@ -2271,7 +2269,6 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
return -EFAULT;
- KVM_MMU_LOCK(kvm);
for (offset = log->first_page, i = offset / BITS_PER_LONG,
n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
i++, offset += BITS_PER_LONG) {
@@ -2294,7 +2291,6 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
offset, mask);
}
}
- KVM_MMU_UNLOCK(kvm);
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
Move mmu_lock lock and unlock calls from common code in kvm_clear_dirty_log_protect() to arch specific code in kvm_arch_mmu_enable_log_dirty_pt_masked(). None of the other code inside the for loop of kvm_arch_mmu_enable_log_dirty_pt_masked() needs mmu_lock exclusivity apart from the arch specific API call. Future commits will change clear dirty log operations under mmu read lock instead of write lock for ARM and, potentially, x86 architectures. No functional changes intended. Signed-off-by: Vipin Sharma <vipinsh@google.com> --- arch/arm64/kvm/mmu.c | 2 ++ arch/mips/kvm/mmu.c | 2 ++ arch/riscv/kvm/mmu.c | 2 ++ arch/x86/kvm/mmu/mmu.c | 3 +++ virt/kvm/dirty_ring.c | 2 -- virt/kvm/kvm_main.c | 4 ---- 6 files changed, 9 insertions(+), 6 deletions(-)