Message ID | 20240726235234.228822-66-seanjc@google.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | KVM: Stop grabbing references to PFNMAP'd pages | expand |
On 2024/7/27 上午7:52, Sean Christopherson wrote: > Mark pages accessed only in the slow path, before dropping mmu_lock when > faulting in guest memory so that LoongArch can convert to > kvm_release_faultin_page() without tripping its lockdep assertion on > mmu_lock being held. > > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/loongarch/kvm/mmu.c | 20 ++------------------ > 1 file changed, 2 insertions(+), 18 deletions(-) > > diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c > index 364dd35e0557..52b5c16cf250 100644 > --- a/arch/loongarch/kvm/mmu.c > +++ b/arch/loongarch/kvm/mmu.c > @@ -552,12 +552,10 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) > static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) > { > int ret = 0; > - kvm_pfn_t pfn = 0; > kvm_pte_t *ptep, changed, new; > gfn_t gfn = gpa >> PAGE_SHIFT; > struct kvm *kvm = vcpu->kvm; > struct kvm_memory_slot *slot; > - struct page *page; > > spin_lock(&kvm->mmu_lock); > > @@ -570,8 +568,6 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ > > /* Track access to pages marked old */ > new = kvm_pte_mkyoung(*ptep); > - /* call kvm_set_pfn_accessed() after unlock */ > - > if (write && !kvm_pte_dirty(new)) { > if (!kvm_pte_write(new)) { > ret = -EFAULT; > @@ -595,23 +591,11 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ > } > > changed = new ^ (*ptep); > - if (changed) { > + if (changed) > kvm_set_pte(ptep, new); > - pfn = kvm_pte_pfn(new); > - page = kvm_pfn_to_refcounted_page(pfn); > - if (page) > - get_page(page); > - } > + > spin_unlock(&kvm->mmu_lock); > > - if (changed) { > - if (kvm_pte_young(changed)) > - kvm_set_pfn_accessed(pfn); > - > - if (page) > - put_page(page); > - } > - > if (kvm_pte_dirty(changed)) > mark_page_dirty(kvm, gfn); > > Reviewed-by: Bibo Mao <maobibo@loongson.cn>
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 364dd35e0557..52b5c16cf250 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -552,12 +552,10 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) { int ret = 0; - kvm_pfn_t pfn = 0; kvm_pte_t *ptep, changed, new; gfn_t gfn = gpa >> PAGE_SHIFT; struct kvm *kvm = vcpu->kvm; struct kvm_memory_slot *slot; - struct page *page; spin_lock(&kvm->mmu_lock); @@ -570,8 +568,6 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ /* Track access to pages marked old */ new = kvm_pte_mkyoung(*ptep); - /* call kvm_set_pfn_accessed() after unlock */ - if (write && !kvm_pte_dirty(new)) { if (!kvm_pte_write(new)) { ret = -EFAULT; @@ -595,23 +591,11 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ } changed = new ^ (*ptep); - if (changed) { + if (changed) kvm_set_pte(ptep, new); - pfn = kvm_pte_pfn(new); - page = kvm_pfn_to_refcounted_page(pfn); - if (page) - get_page(page); - } + spin_unlock(&kvm->mmu_lock); - if (changed) { - if (kvm_pte_young(changed)) - kvm_set_pfn_accessed(pfn); - - if (page) - put_page(page); - } - if (kvm_pte_dirty(changed)) mark_page_dirty(kvm, gfn);
Mark pages accessed only in the slow path, before dropping mmu_lock when faulting in guest memory so that LoongArch can convert to kvm_release_faultin_page() without tripping its lockdep assertion on mmu_lock being held. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/loongarch/kvm/mmu.c | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-)