Message ID | 20190926231824.149014-2-bgardon@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | kvm: mmu: Rework the x86 TDP direct mapped case | expand |
On Thu, Sep 26, 2019 at 04:17:57PM -0700, Ben Gardon wrote: > Separate the functions for generating MMIO page table entries from the > function that inserts them into the paging structure. This refactoring > will allow changes to the MMU sychronization model to use atomic > compare / exchanges (which are not guaranteed to succeed) instead of a > monolithic MMU lock. > > Signed-off-by: Ben Gardon <bgardon@google.com> > --- > arch/x86/kvm/mmu.c | 14 ++++++++++++-- > 1 file changed, 12 insertions(+), 2 deletions(-) > > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c > index 5269aa057dfa6..781c2ca7455e3 100644 > --- a/arch/x86/kvm/mmu.c > +++ b/arch/x86/kvm/mmu.c > @@ -390,8 +390,7 @@ static u64 get_mmio_spte_generation(u64 spte) > return gen; > } > > -static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, > - unsigned access) > +static u64 generate_mmio_pte(struct kvm_vcpu *vcpu, u64 gfn, unsigned access) Maybe get_mmio_spte_value()? I see "generate" and all I can think of is the generation number and nothing else. > { > u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; > u64 mask = generation_mmio_spte_mask(gen); > @@ -403,6 +402,17 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, > mask |= (gpa & shadow_nonpresent_or_rsvd_mask) > << shadow_nonpresent_or_rsvd_mask_len; > > + return mask; > +} > + > +static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, > + unsigned access) > +{ > + u64 mask = generate_mmio_pte(vcpu, gfn, access); > + unsigned int gen = get_mmio_spte_generation(mask); > + > + access = mask & ACC_ALL; > + > trace_mark_mmio_spte(sptep, gfn, access, gen); > mmu_spte_set(sptep, mask); > } > -- > 2.23.0.444.g18eeb5a265-goog >
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5269aa057dfa6..781c2ca7455e3 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -390,8 +390,7 @@ static u64 get_mmio_spte_generation(u64 spte) return gen; } -static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, - unsigned access) +static u64 generate_mmio_pte(struct kvm_vcpu *vcpu, u64 gfn, unsigned access) { u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; u64 mask = generation_mmio_spte_mask(gen); @@ -403,6 +402,17 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, mask |= (gpa & shadow_nonpresent_or_rsvd_mask) << shadow_nonpresent_or_rsvd_mask_len; + return mask; +} + +static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, + unsigned access) +{ + u64 mask = generate_mmio_pte(vcpu, gfn, access); + unsigned int gen = get_mmio_spte_generation(mask); + + access = mask & ACC_ALL; + trace_mark_mmio_spte(sptep, gfn, access, gen); mmu_spte_set(sptep, mask); }
Separate the functions for generating MMIO page table entries from the function that inserts them into the paging structure. This refactoring will allow changes to the MMU sychronization model to use atomic compare / exchanges (which are not guaranteed to succeed) instead of a monolithic MMU lock. Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/kvm/mmu.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-)