@@ -591,21 +591,14 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
if (changed) {
kvm_set_pte(ptep, new);
pfn = kvm_pte_pfn(new);
- }
- spin_unlock(&kvm->mmu_lock);
-
- /*
- * Fixme: pfn may be freed after mmu_lock
- * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this?
- */
- if (kvm_pte_young(changed))
- kvm_set_pfn_accessed(pfn);
+ if (kvm_pte_young(changed))
+ kvm_set_pfn_accessed(pfn);
- if (kvm_pte_dirty(changed)) {
- mark_page_dirty(kvm, gfn);
- kvm_set_pfn_dirty(pfn);
+ if (kvm_pte_dirty(changed)) {
+ mark_page_dirty(kvm, gfn);
+ kvm_set_pfn_dirty(pfn);
+ }
}
- return ret;
out:
spin_unlock(&kvm->mmu_lock);
return ret;
Function kvm_set_pfn_accessed() is somewhat complicated, originally we want to reduce coverity range of mmu_lock, so move function kvm_set_pfn_accessed() and kvm_set_pfn_dirty() out of mmu_lock. However with migration test where vm has some workload, there is no response when VM finishes migration. When mmu_lock is released, pfn page may be replaced with other pages, it is unreasonable to set old replaced pfn page with accessed or dirty. Here move function kvm_set_pfn_accessed() and kvm_set_pfn_dirty() with mmu_lock held, VM works after many times of migrations. Signed-off-by: Bibo Mao <maobibo@loongson.cn> --- arch/loongarch/kvm/mmu.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) base-commit: 2df0193e62cf887f373995fb8a91068562784adc