Message ID | 20230203192822.106773-6-vipinsh@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Optimize clear dirty log | expand |
On Fri, Feb 3, 2023 at 11:28 AM Vipin Sharma <vipinsh@google.com> wrote: > > __handle_changed_pte() and handle_changed_spte_acc_track() are always > used together. Merge these two functions and name the new function > handle_changed_pte(). Remove the existing handle_changed_pte() function > which just calls __handle_changed_pte and > handle_changed_spte_acc_track(). > > This converges SPTEs change handling code to a single place. > > Signed-off-by: Vipin Sharma <vipinsh@google.com> Reviewed-by: Ben Gardon <bgardon@google.com> > --- > arch/x86/kvm/mmu/tdp_mmu.c | 42 +++++++++++--------------------------- > 1 file changed, 12 insertions(+), 30 deletions(-) > > diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c > index afe0dcb1859e..9b0c81a28f97 100644 > --- a/arch/x86/kvm/mmu/tdp_mmu.c > +++ b/arch/x86/kvm/mmu/tdp_mmu.c > @@ -334,17 +334,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, > u64 old_spte, u64 new_spte, int level, > bool shared); > > -static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) > -{ > - if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) > - return; > - > - if (is_accessed_spte(old_spte) && > - (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) || > - spte_to_pfn(old_spte) != spte_to_pfn(new_spte))) > - kvm_set_pfn_accessed(spte_to_pfn(old_spte)); > -} > - > static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) > { > kvm_account_pgtable_pages((void *)sp->spt, +1); > @@ -487,7 +476,7 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) > } > > /** > - * __handle_changed_spte - handle bookkeeping associated with an SPTE change > + * handle_changed_spte - handle bookkeeping associated with an SPTE change > * @kvm: kvm instance > * @as_id: the address space of the paging structure the SPTE was a part of > * @gfn: the base GFN that was mapped by the SPTE > @@ -501,9 +490,9 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) > * Handle bookkeeping that might result from the modification of a SPTE. > * This function must be called for all TDP SPTE modifications. > */ > -static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, > - u64 old_spte, u64 new_spte, int level, > - bool shared) > +static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, > + u64 old_spte, u64 new_spte, int level, > + bool shared) > { > bool was_present = is_shadow_present_pte(old_spte); > bool is_present = is_shadow_present_pte(new_spte); > @@ -587,15 +576,10 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, > if (was_present && !was_leaf && > (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed))) > handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared); > -} > > -static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, > - u64 old_spte, u64 new_spte, int level, > - bool shared) > -{ > - __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, > - shared); > - handle_changed_spte_acc_track(old_spte, new_spte, level); > + if (was_leaf && is_accessed_spte(old_spte) && > + (!is_present || !is_accessed_spte(new_spte) || pfn_changed)) > + kvm_set_pfn_accessed(spte_to_pfn(old_spte)); > } > > /* > @@ -638,9 +622,8 @@ static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, > if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) > return -EBUSY; > > - __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, > - new_spte, iter->level, true); > - handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); > + handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, > + new_spte, iter->level, true); > > return 0; > } > @@ -705,8 +688,7 @@ static u64 _tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, > > old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); > > - __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); > - handle_changed_spte_acc_track(old_spte, new_spte, level); > + handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); > return old_spte; > } > > @@ -1273,7 +1255,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, > * Note, when changing a read-only SPTE, it's not strictly necessary to > * zero the SPTE before setting the new PFN, but doing so preserves the > * invariant that the PFN of a present * leaf SPTE can never change. > - * See __handle_changed_spte(). > + * See handle_changed_spte(). > */ > tdp_mmu_set_spte(kvm, iter, 0); > > @@ -1298,7 +1280,7 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) > /* > * No need to handle the remote TLB flush under RCU protection, the > * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a > - * shadow page. See the WARN on pfn_changed in __handle_changed_spte(). > + * shadow page. See the WARN on pfn_changed in handle_changed_spte(). > */ > return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); > } > -- > 2.39.1.519.gcb327c4b5f-goog >
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index afe0dcb1859e..9b0c81a28f97 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -334,17 +334,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, u64 old_spte, u64 new_spte, int level, bool shared); -static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) -{ - if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) - return; - - if (is_accessed_spte(old_spte) && - (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) || - spte_to_pfn(old_spte) != spte_to_pfn(new_spte))) - kvm_set_pfn_accessed(spte_to_pfn(old_spte)); -} - static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) { kvm_account_pgtable_pages((void *)sp->spt, +1); @@ -487,7 +476,7 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) } /** - * __handle_changed_spte - handle bookkeeping associated with an SPTE change + * handle_changed_spte - handle bookkeeping associated with an SPTE change * @kvm: kvm instance * @as_id: the address space of the paging structure the SPTE was a part of * @gfn: the base GFN that was mapped by the SPTE @@ -501,9 +490,9 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) * Handle bookkeeping that might result from the modification of a SPTE. * This function must be called for all TDP SPTE modifications. */ -static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level, - bool shared) +static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, + u64 old_spte, u64 new_spte, int level, + bool shared) { bool was_present = is_shadow_present_pte(old_spte); bool is_present = is_shadow_present_pte(new_spte); @@ -587,15 +576,10 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, if (was_present && !was_leaf && (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed))) handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared); -} -static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level, - bool shared) -{ - __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, - shared); - handle_changed_spte_acc_track(old_spte, new_spte, level); + if (was_leaf && is_accessed_spte(old_spte) && + (!is_present || !is_accessed_spte(new_spte) || pfn_changed)) + kvm_set_pfn_accessed(spte_to_pfn(old_spte)); } /* @@ -638,9 +622,8 @@ static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) return -EBUSY; - __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, - new_spte, iter->level, true); - handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); + handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, + new_spte, iter->level, true); return 0; } @@ -705,8 +688,7 @@ static u64 _tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); - __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); - handle_changed_spte_acc_track(old_spte, new_spte, level); + handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); return old_spte; } @@ -1273,7 +1255,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, * Note, when changing a read-only SPTE, it's not strictly necessary to * zero the SPTE before setting the new PFN, but doing so preserves the * invariant that the PFN of a present * leaf SPTE can never change. - * See __handle_changed_spte(). + * See handle_changed_spte(). */ tdp_mmu_set_spte(kvm, iter, 0); @@ -1298,7 +1280,7 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) /* * No need to handle the remote TLB flush under RCU protection, the * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a - * shadow page. See the WARN on pfn_changed in __handle_changed_spte(). + * shadow page. See the WARN on pfn_changed in handle_changed_spte(). */ return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); }
__handle_changed_pte() and handle_changed_spte_acc_track() are always used together. Merge these two functions and name the new function handle_changed_pte(). Remove the existing handle_changed_pte() function which just calls __handle_changed_pte and handle_changed_spte_acc_track(). This converges SPTEs change handling code to a single place. Signed-off-by: Vipin Sharma <vipinsh@google.com> --- arch/x86/kvm/mmu/tdp_mmu.c | 42 +++++++++++--------------------------- 1 file changed, 12 insertions(+), 30 deletions(-)