Message ID | 20220921173546.2674386-4-dmatlack@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86/mmu: Make tdp_mmu read-only and clean up TPD MMU fault handler | expand |
On Wed, Sep 21, 2022 at 10:35:39AM -0700, David Matlack <dmatlack@google.com> wrote: > Grab mmu_invalidate_seq in kvm_faultin_pfn() and stash it in struct > kvm_page_fault. The eliminates duplicate code and reduces the amount of > parameters needed for is_page_fault_stale(). > > Preemptively split out __kvm_faultin_pfn() to a separate function for > use in subsequent commits. > > No functional change intended. > > Signed-off-by: David Matlack <dmatlack@google.com> > --- > arch/x86/kvm/mmu/mmu.c | 21 ++++++++++++--------- > arch/x86/kvm/mmu/mmu_internal.h | 1 + > arch/x86/kvm/mmu/paging_tmpl.h | 6 +----- > 3 files changed, 14 insertions(+), 14 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index dd261cd2ad4e..31b835d20762 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -4129,7 +4129,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) > kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); > } > > -static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) > +static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) > { > struct kvm_memory_slot *slot = fault->slot; > bool async; > @@ -4185,12 +4185,20 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) > return RET_PF_CONTINUE; > } > > +static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) > +{ > + fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq; > + smp_rmb(); > + > + return __kvm_faultin_pfn(vcpu, fault); > +} > + > /* > * Returns true if the page fault is stale and needs to be retried, i.e. if the > * root was invalidated by a memslot update or a relevant mmu_notifier fired. > */ > static bool is_page_fault_stale(struct kvm_vcpu *vcpu, > - struct kvm_page_fault *fault, int mmu_seq) > + struct kvm_page_fault *fault) > { > struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa); > > @@ -4210,14 +4218,12 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, > return true; > > return fault->slot && > - mmu_invalidate_retry_hva(vcpu->kvm, mmu_seq, fault->hva); > + mmu_invalidate_retry_hva(vcpu->kvm, fault->mmu_seq, fault->hva); > } > > static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) > { > bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); > - > - unsigned long mmu_seq; > int r; > > fault->gfn = fault->addr >> PAGE_SHIFT; > @@ -4234,9 +4240,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault > if (r) > return r; > > - mmu_seq = vcpu->kvm->mmu_invalidate_seq; > - smp_rmb(); > - > r = kvm_faultin_pfn(vcpu, fault); > if (r != RET_PF_CONTINUE) > return r; > @@ -4252,7 +4255,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault > else > write_lock(&vcpu->kvm->mmu_lock); > > - if (is_page_fault_stale(vcpu, fault, mmu_seq)) > + if (is_page_fault_stale(vcpu, fault)) > goto out_unlock; > > r = make_mmu_pages_available(vcpu); > diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h > index 582def531d4d..1c0a1e7c796d 100644 > --- a/arch/x86/kvm/mmu/mmu_internal.h > +++ b/arch/x86/kvm/mmu/mmu_internal.h > @@ -221,6 +221,7 @@ struct kvm_page_fault { > struct kvm_memory_slot *slot; > > /* Outputs of kvm_faultin_pfn. */ > + unsigned long mmu_seq; > kvm_pfn_t pfn; > hva_t hva; > bool map_writable; > diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h > index 39e0205e7300..98f4abce4eaf 100644 > --- a/arch/x86/kvm/mmu/paging_tmpl.h > +++ b/arch/x86/kvm/mmu/paging_tmpl.h > @@ -791,7 +791,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault > { > struct guest_walker walker; > int r; > - unsigned long mmu_seq; > bool is_self_change_mapping; > > pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code); > @@ -838,9 +837,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault > else > fault->max_level = walker.level; > > - mmu_seq = vcpu->kvm->mmu_invalidate_seq; > - smp_rmb(); > - > r = kvm_faultin_pfn(vcpu, fault); > if (r != RET_PF_CONTINUE) > return r; > @@ -871,7 +867,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault > r = RET_PF_RETRY; > write_lock(&vcpu->kvm->mmu_lock); > > - if (is_page_fault_stale(vcpu, fault, mmu_seq)) > + if (is_page_fault_stale(vcpu, fault)) > goto out_unlock; > > r = make_mmu_pages_available(vcpu); > -- > 2.37.3.998.g577e59143f-goog > Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com>
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index dd261cd2ad4e..31b835d20762 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4129,7 +4129,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); } -static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) +static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct kvm_memory_slot *slot = fault->slot; bool async; @@ -4185,12 +4185,20 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) return RET_PF_CONTINUE; } +static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) +{ + fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq; + smp_rmb(); + + return __kvm_faultin_pfn(vcpu, fault); +} + /* * Returns true if the page fault is stale and needs to be retried, i.e. if the * root was invalidated by a memslot update or a relevant mmu_notifier fired. */ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, - struct kvm_page_fault *fault, int mmu_seq) + struct kvm_page_fault *fault) { struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa); @@ -4210,14 +4218,12 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, return true; return fault->slot && - mmu_invalidate_retry_hva(vcpu->kvm, mmu_seq, fault->hva); + mmu_invalidate_retry_hva(vcpu->kvm, fault->mmu_seq, fault->hva); } static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); - - unsigned long mmu_seq; int r; fault->gfn = fault->addr >> PAGE_SHIFT; @@ -4234,9 +4240,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (r) return r; - mmu_seq = vcpu->kvm->mmu_invalidate_seq; - smp_rmb(); - r = kvm_faultin_pfn(vcpu, fault); if (r != RET_PF_CONTINUE) return r; @@ -4252,7 +4255,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault else write_lock(&vcpu->kvm->mmu_lock); - if (is_page_fault_stale(vcpu, fault, mmu_seq)) + if (is_page_fault_stale(vcpu, fault)) goto out_unlock; r = make_mmu_pages_available(vcpu); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 582def531d4d..1c0a1e7c796d 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -221,6 +221,7 @@ struct kvm_page_fault { struct kvm_memory_slot *slot; /* Outputs of kvm_faultin_pfn. */ + unsigned long mmu_seq; kvm_pfn_t pfn; hva_t hva; bool map_writable; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 39e0205e7300..98f4abce4eaf 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -791,7 +791,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault { struct guest_walker walker; int r; - unsigned long mmu_seq; bool is_self_change_mapping; pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code); @@ -838,9 +837,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault else fault->max_level = walker.level; - mmu_seq = vcpu->kvm->mmu_invalidate_seq; - smp_rmb(); - r = kvm_faultin_pfn(vcpu, fault); if (r != RET_PF_CONTINUE) return r; @@ -871,7 +867,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault r = RET_PF_RETRY; write_lock(&vcpu->kvm->mmu_lock); - if (is_page_fault_stale(vcpu, fault, mmu_seq)) + if (is_page_fault_stale(vcpu, fault)) goto out_unlock; r = make_mmu_pages_available(vcpu);
Grab mmu_invalidate_seq in kvm_faultin_pfn() and stash it in struct kvm_page_fault. The eliminates duplicate code and reduces the amount of parameters needed for is_page_fault_stale(). Preemptively split out __kvm_faultin_pfn() to a separate function for use in subsequent commits. No functional change intended. Signed-off-by: David Matlack <dmatlack@google.com> --- arch/x86/kvm/mmu/mmu.c | 21 ++++++++++++--------- arch/x86/kvm/mmu/mmu_internal.h | 1 + arch/x86/kvm/mmu/paging_tmpl.h | 6 +----- 3 files changed, 14 insertions(+), 14 deletions(-)