diff mbox series

[RFC,04/28] kvm: mmu: Update the lpages stat atomically

Message ID 20190926231824.149014-5-bgardon@google.com (mailing list archive)
State New, archived
Headers show
Series kvm: mmu: Rework the x86 TDP direct mapped case | expand

Commit Message

Ben Gardon Sept. 26, 2019, 11:18 p.m. UTC
In order to pave the way for more concurrent MMU operations, updates to
VM-global stats need to be done atomically. Change updates to the lpages
stat to be atomic in preparation for the introduction of parallel page
fault handling.

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

Comments

Sean Christopherson Nov. 27, 2019, 6:39 p.m. UTC | #1
On Thu, Sep 26, 2019 at 04:18:00PM -0700, Ben Gardon wrote:
> In order to pave the way for more concurrent MMU operations, updates to
> VM-global stats need to be done atomically. Change updates to the lpages
> stat to be atomic in preparation for the introduction of parallel page
> fault handling.
> 
> Signed-off-by: Ben Gardon <bgardon@google.com>
> ---
>  arch/x86/kvm/mmu.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index 1ecd6d51c0ee0..56587655aecb9 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -1532,7 +1532,7 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
>  		WARN_ON(page_header(__pa(sptep))->role.level ==
>  			PT_PAGE_TABLE_LEVEL);
>  		drop_spte(kvm, sptep);
> -		--kvm->stat.lpages;
> +		xadd(&kvm->stat.lpages, -1);

Manually doing atomic operations without converting the variable itself to
an atomic feels like a hack, e.g. lacks the compile time checks provided
by the atomics framework.

Tangentially related, should the members of struct kvm_vm_stat be forced
to 64-bit variables to avoid theoretical wrapping on 32-bit KVM?

>  		return true;
>  	}
>  
> @@ -2676,7 +2676,7 @@ static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
>  		if (is_last_spte(pte, sp->role.level)) {
>  			drop_spte(kvm, spte);
>  			if (is_large_pte(pte))
> -				--kvm->stat.lpages;
> +				xadd(&kvm->stat.lpages, -1);
>  		} else {
>  			child = page_header(pte & PT64_BASE_ADDR_MASK);
>  			drop_parent_pte(child, spte);
> @@ -3134,7 +3134,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
>  	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
>  	trace_kvm_mmu_set_spte(level, gfn, sptep);
>  	if (!was_rmapped && is_large_pte(*sptep))
> -		++vcpu->kvm->stat.lpages;
> +		xadd(&vcpu->kvm->stat.lpages, 1);
>  
>  	if (is_shadow_present_pte(*sptep)) {
>  		if (!was_rmapped) {
> -- 
> 2.23.0.444.g18eeb5a265-goog
>
Ben Gardon Dec. 6, 2019, 8:10 p.m. UTC | #2
I would definitely support changing all the entries in KVM stat to be
64 bit and making some of them atomic64_t. I agree that doing atomic
operations on int64s is fragile.

On Wed, Nov 27, 2019 at 10:39 AM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> On Thu, Sep 26, 2019 at 04:18:00PM -0700, Ben Gardon wrote:
> > In order to pave the way for more concurrent MMU operations, updates to
> > VM-global stats need to be done atomically. Change updates to the lpages
> > stat to be atomic in preparation for the introduction of parallel page
> > fault handling.
> >
> > Signed-off-by: Ben Gardon <bgardon@google.com>
> > ---
> >  arch/x86/kvm/mmu.c | 6 +++---
> >  1 file changed, 3 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> > index 1ecd6d51c0ee0..56587655aecb9 100644
> > --- a/arch/x86/kvm/mmu.c
> > +++ b/arch/x86/kvm/mmu.c
> > @@ -1532,7 +1532,7 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
> >               WARN_ON(page_header(__pa(sptep))->role.level ==
> >                       PT_PAGE_TABLE_LEVEL);
> >               drop_spte(kvm, sptep);
> > -             --kvm->stat.lpages;
> > +             xadd(&kvm->stat.lpages, -1);
>
> Manually doing atomic operations without converting the variable itself to
> an atomic feels like a hack, e.g. lacks the compile time checks provided
> by the atomics framework.
>
> Tangentially related, should the members of struct kvm_vm_stat be forced
> to 64-bit variables to avoid theoretical wrapping on 32-bit KVM?
>
> >               return true;
> >       }
> >
> > @@ -2676,7 +2676,7 @@ static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
> >               if (is_last_spte(pte, sp->role.level)) {
> >                       drop_spte(kvm, spte);
> >                       if (is_large_pte(pte))
> > -                             --kvm->stat.lpages;
> > +                             xadd(&kvm->stat.lpages, -1);
> >               } else {
> >                       child = page_header(pte & PT64_BASE_ADDR_MASK);
> >                       drop_parent_pte(child, spte);
> > @@ -3134,7 +3134,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
> >       pgprintk("%s: setting spte %llx\n", __func__, *sptep);
> >       trace_kvm_mmu_set_spte(level, gfn, sptep);
> >       if (!was_rmapped && is_large_pte(*sptep))
> > -             ++vcpu->kvm->stat.lpages;
> > +             xadd(&vcpu->kvm->stat.lpages, 1);
> >
> >       if (is_shadow_present_pte(*sptep)) {
> >               if (!was_rmapped) {
> > --
> > 2.23.0.444.g18eeb5a265-goog
> >
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1ecd6d51c0ee0..56587655aecb9 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1532,7 +1532,7 @@  static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
 		WARN_ON(page_header(__pa(sptep))->role.level ==
 			PT_PAGE_TABLE_LEVEL);
 		drop_spte(kvm, sptep);
-		--kvm->stat.lpages;
+		xadd(&kvm->stat.lpages, -1);
 		return true;
 	}
 
@@ -2676,7 +2676,7 @@  static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
 		if (is_last_spte(pte, sp->role.level)) {
 			drop_spte(kvm, spte);
 			if (is_large_pte(pte))
-				--kvm->stat.lpages;
+				xadd(&kvm->stat.lpages, -1);
 		} else {
 			child = page_header(pte & PT64_BASE_ADDR_MASK);
 			drop_parent_pte(child, spte);
@@ -3134,7 +3134,7 @@  static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
 	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
 	trace_kvm_mmu_set_spte(level, gfn, sptep);
 	if (!was_rmapped && is_large_pte(*sptep))
-		++vcpu->kvm->stat.lpages;
+		xadd(&vcpu->kvm->stat.lpages, 1);
 
 	if (is_shadow_present_pte(*sptep)) {
 		if (!was_rmapped) {