@@ -246,7 +246,7 @@ static inline bool kvm_available_flush_tlb_with_range(void)
return kvm_x86_ops.tlb_remote_flush_with_range;
}
-void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages)
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, u64 pages)
{
struct kvm_tlb_range range;
int ret = -EOPNOTSUPP;
@@ -164,7 +164,7 @@ void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn,
int min_level);
-void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, u64 pages);
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
extern int nx_huge_pages;
Use gfn_t instead of u64 for the start_gfn parameter to kvm_flush_remote_tlbs_range(), since that is the standard type for GFNs throughout KVM. No functional change intended. Signed-off-by: David Matlack <dmatlack@google.com> --- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/mmu_internal.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-)