@@ -970,7 +970,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
int i, nr_present = 0;
bool host_writable;
gpa_t first_pte_gpa;
- int set_spte_ret = 0;
+ HLIST_HEAD(flush_list);
/* direct kvm_mmu_page can not be unsync. */
BUG_ON(sp->role.direct);
@@ -978,6 +978,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
+ int set_spte_ret = 0;
unsigned pte_access;
pt_element_t gpte;
gpa_t pte_gpa;
@@ -1027,14 +1028,20 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
- set_spte_ret |= set_spte(vcpu, &sp->spt[i],
+ set_spte_ret = set_spte(vcpu, &sp->spt[i],
pte_access, PT_PAGE_TABLE_LEVEL,
gfn, spte_to_pfn(sp->spt[i]),
true, false, host_writable);
+
+ if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH) {
+ struct kvm_mmu_page *leaf_sp = page_header(sp->spt[i]
+ & PT64_BASE_ADDR_MASK);
+ hlist_add_head(&leaf_sp->flush_link, &flush_list);
+ }
+
}
- if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH)
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs_with_list(vcpu->kvm, &flush_list);
return nr_present;
}