@@ -387,7 +387,7 @@ struct kvm_arch {
unsigned int n_free_mmu_pages;
unsigned int n_requested_mmu_pages;
unsigned int n_alloc_mmu_pages;
- atomic_t invlpg_counter;
+ unsigned int invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
* Hash table of struct kvm_mmu_page.
@@ -466,7 +466,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
struct kvm_shadow_walk_iterator iterator;
gfn_t gfn = -1;
u64 *sptep = NULL, gentry;
- int invlpg_counter, level, offset = 0, need_flush = 0;
+ unsigned int invlpg_counter;
+ int level, offset = 0, need_flush = 0;
spin_lock(&vcpu->kvm->mmu_lock);
@@ -502,7 +503,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
if (need_flush)
kvm_flush_remote_tlbs(vcpu->kvm);
- invlpg_counter = atomic_add_return(1, &vcpu->kvm->arch.invlpg_counter);
+ invlpg_counter = ++vcpu->kvm->arch.invlpg_counter;
spin_unlock(&vcpu->kvm->mmu_lock);
if (gfn == -1)
@@ -522,7 +523,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
kvm_mmu_free_page(vcpu->kvm, sp);
goto unlock_exit;
}
- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) == invlpg_counter &&
+ if (vcpu->kvm->arch.invlpg_counter == invlpg_counter &&
sp->role.level == PT_PAGE_TABLE_LEVEL) {
++vcpu->kvm->stat.mmu_pte_updated;
FNAME(update_pte)(vcpu, sp, sptep, &gentry);