@@ -1195,12 +1195,15 @@ static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
--kvm->stat.mmu_unsync;
}
-static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ int *self_deleted);
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
+ int self_deleted;
+
if (sp->role.cr4_pae != !!is_pae(vcpu)) {
- kvm_mmu_zap_page(vcpu->kvm, sp);
+ kvm_mmu_zap_page(vcpu->kvm, sp, &self_deleted);
return 1;
}
@@ -1209,7 +1212,7 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
kvm_flush_remote_tlbs(vcpu->kvm);
kvm_unlink_unsync_page(vcpu->kvm, sp);
if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
- kvm_mmu_zap_page(vcpu->kvm, sp);
+ kvm_mmu_zap_page(vcpu->kvm, sp, &self_deleted);
return 1;
}
@@ -1471,6 +1474,7 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
int i, zapped = 0;
struct mmu_page_path parents;
struct kvm_mmu_pages pages;
+ int self_deleted;
if (parent->role.level == PT_PAGE_TABLE_LEVEL)
return 0;
@@ -1480,7 +1484,7 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
struct kvm_mmu_page *sp;
for_each_sp(pages, sp, parents, i) {
- kvm_mmu_zap_page(kvm, sp);
+ kvm_mmu_zap_page(kvm, sp, &self_deleted);
mmu_pages_clear_parents(&parents);
zapped++;
}
@@ -1490,10 +1494,12 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
return zapped;
}
-static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ int *self_deleted)
{
int ret;
+ *self_deleted = 0;
trace_kvm_mmu_zap_page(sp);
++kvm->stat.mmu_shadow_zapped;
ret = mmu_zap_unsync_children(kvm, sp);
@@ -1507,11 +1513,15 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
if (!sp->root_count) {
hlist_del(&sp->hash_link);
kvm_mmu_free_page(kvm, sp);
+ /* Count self */
+ ret++;
+ *self_deleted = 1;
} else {
sp->role.invalid = 1;
list_move(&sp->link, &kvm->arch.active_mmu_pages);
kvm_reload_remote_mmus(kvm);
}
+
kvm_mmu_reset_last_pte_updated(kvm);
return ret;
}
@@ -1523,6 +1533,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
{
int used_pages;
+ int self_deleted;
used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
used_pages = max(0, used_pages);
@@ -1540,8 +1551,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- used_pages -= kvm_mmu_zap_page(kvm, page);
- used_pages--;
+ used_pages -= kvm_mmu_zap_page(kvm, page,
+ &self_deleted);
}
kvm_nr_mmu_pages = used_pages;
kvm->arch.n_free_mmu_pages = 0;
@@ -1560,6 +1571,8 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
struct kvm_mmu_page *sp;
struct hlist_node *node, *n;
int r;
+ int self_deleted;
+ int ret;
pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
r = 0;
@@ -1571,7 +1584,8 @@ restart:
pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
sp->role.word);
r = 1;
- if (kvm_mmu_zap_page(kvm, sp))
+ ret = kvm_mmu_zap_page(kvm, sp, &self_deleted);
+ if (ret > 1 || (ret == 1 && self_deleted == 0))
goto restart;
}
return r;
@@ -1583,6 +1597,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
struct hlist_head *bucket;
struct kvm_mmu_page *sp;
struct hlist_node *node, *nn;
+ int ret;
+ int self_deleted;
index = kvm_page_table_hashfn(gfn);
bucket = &kvm->arch.mmu_page_hash[index];
@@ -1592,7 +1608,8 @@ restart:
&& !sp->role.invalid) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
- if (kvm_mmu_zap_page(kvm, sp))
+ ret = kvm_mmu_zap_page(kvm, sp, &self_deleted);
+ if (ret > 1 || (ret == 1 && self_deleted == 0))
goto restart;
}
}
@@ -2018,6 +2035,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
{
int i;
struct kvm_mmu_page *sp;
+ int self_deleted;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
@@ -2028,7 +2046,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
sp = page_header(root);
--sp->root_count;
if (!sp->root_count && sp->role.invalid)
- kvm_mmu_zap_page(vcpu->kvm, sp);
+ kvm_mmu_zap_page(vcpu->kvm, sp, &self_deleted);
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
spin_unlock(&vcpu->kvm->mmu_lock);
return;
@@ -2041,7 +2059,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
sp = page_header(root);
--sp->root_count;
if (!sp->root_count && sp->role.invalid)
- kvm_mmu_zap_page(vcpu->kvm, sp);
+ kvm_mmu_zap_page(vcpu->kvm, sp, &self_deleted);
}
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
}
@@ -2616,6 +2634,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int npte;
int r;
int invlpg_counter;
+ int ret;
+ int self_deleted;
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
@@ -2694,7 +2714,9 @@ restart:
*/
pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, sp->role.word);
- if (kvm_mmu_zap_page(vcpu->kvm, sp))
+
+ ret = kvm_mmu_zap_page(vcpu->kvm, sp, &self_deleted);
+ if (ret > 1 || (ret == 1 && self_deleted == 0))
goto restart;
++vcpu->kvm->stat.mmu_flooded;
continue;
@@ -2756,13 +2778,15 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
+ int self_deleted;
+
while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES &&
!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *sp;
sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- kvm_mmu_zap_page(vcpu->kvm, sp);
+ kvm_mmu_zap_page(vcpu->kvm, sp, &self_deleted);
++vcpu->kvm->stat.mmu_recycled;
}
}
@@ -2902,13 +2926,16 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
void kvm_mmu_zap_all(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
+ int ret;
+ int self_deleted;
spin_lock(&kvm->mmu_lock);
restart:
- list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
- if (kvm_mmu_zap_page(kvm, sp))
+ list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
+ ret = kvm_mmu_zap_page(kvm, sp, &self_deleted);
+ if (ret > 1 || (ret == 1 && self_deleted == 0))
goto restart;
-
+ }
spin_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm);
@@ -2917,10 +2944,11 @@ restart:
static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
{
struct kvm_mmu_page *page;
+ int self_deleted;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- kvm_mmu_zap_page(kvm, page);
+ kvm_mmu_zap_page(kvm, page, &self_deleted);
}
static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)