@@ -316,6 +316,12 @@ struct kvm_rmap_head {
struct kvm_mmu_page {
struct list_head link;
+
+ /*
+ * Tlb flush with range list uses struct kvm_mmu_page as list entry
+ * and all list operations should be under protection of mmu_lock.
+ */
+ struct list_head flush_link;
struct hlist_node hash_link;
bool unsync;
@@ -443,6 +449,7 @@ struct kvm_mmu {
struct kvm_tlb_range {
u64 start_gfn;
u64 pages;
+ struct list_head *flush_list;
};
enum pmc_type {
@@ -289,6 +289,17 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
range.start_gfn = start_gfn;
range.pages = pages;
+ range.flush_list = NULL;
+
+ kvm_flush_remote_tlbs_with_range(kvm, &range);
+}
+
+static void kvm_flush_remote_tlbs_with_list(struct kvm *kvm,
+ struct list_head *flush_list)
+{
+ struct kvm_tlb_range range;
+
+ range.flush_list = flush_list;
kvm_flush_remote_tlbs_with_range(kvm, &range);
}
@@ -2708,6 +2719,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{
struct kvm_mmu_page *sp, *nsp;
+ LIST_HEAD(flush_list);
if (list_empty(invalid_list))
return;
@@ -2721,7 +2733,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
* In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
* guest mode and/or lockless shadow page table walks.
*/
- kvm_flush_remote_tlbs(kvm);
+ if (kvm_available_flush_tlb_with_range()) {
+ list_for_each_entry(sp, invalid_list, link)
+ if (sp->sptep && is_last_spte(*sp->sptep,
+ sp->role.level))
+ list_add(&sp->flush_link, &flush_list);
+
+ if (!list_empty(&flush_list))
+ kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
+ } else {
+ kvm_flush_remote_tlbs(kvm);
+ }
list_for_each_entry_safe(sp, nsp, invalid_list, link) {
WARN_ON(!sp->role.invalid || sp->root_count);