@@ -535,8 +535,8 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
{
struct kvm_memory_slot *slot;
unsigned pte_access;
+ struct page *page;
gfn_t gfn;
- kvm_pfn_t pfn;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
return false;
@@ -549,12 +549,11 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (!slot)
return false;
- pfn = gfn_to_pfn_memslot_atomic(slot, gfn);
- if (is_error_pfn(pfn))
+ if (gfn_to_page_many_atomic(slot, gfn, &page, 1) != 1)
return false;
- mmu_set_spte(vcpu, slot, spte, pte_access, gfn, pfn, NULL);
- kvm_release_pfn_clean(pfn);
+ mmu_set_spte(vcpu, slot, spte, pte_access, gfn, page_to_pfn(page), NULL);
+ kvm_release_page_clean(page);
return true;
}
Use gfn_to_page_many_atomic() instead of gfn_to_pfn_memslot_atomic() when prefetching indirect PTEs (direct_pte_prefetch_many() already uses the "to page" APIS). Functionally, the two are subtly equivalent, as the "to pfn" API short-circuits hva_to_pfn() if hva_to_pfn_fast() fails, i.e. is just a wrapper for get_user_page_fast_only()/get_user_pages_fast_only(). Switching to the "to page" API will allow dropping the @atomic parameter from the entire hva_to_pfn() callchain. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/kvm/mmu/paging_tmpl.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-)