diff mbox series

[v1,06/13] KVM: x86/mmu: Refactor tdp_mmu iterators to take kvm_mmu_page root

Message ID 20211213225918.672507-7-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Eager Page Splitting for the TDP MMU | expand

Commit Message

David Matlack Dec. 13, 2021, 10:59 p.m. UTC
Instead of passing a pointer to the root page table and the root level
separately, pass in a pointer to the kvm_mmu_page that backs the root.
This reduces the number of arguments by 1, cutting down on line lengths.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/tdp_iter.c |  5 ++++-
 arch/x86/kvm/mmu/tdp_iter.h | 10 +++++-----
 arch/x86/kvm/mmu/tdp_mmu.c  | 14 +++++---------
 3 files changed, 14 insertions(+), 15 deletions(-)

Comments

Peter Xu Jan. 4, 2022, 10:35 a.m. UTC | #1
On Mon, Dec 13, 2021 at 10:59:11PM +0000, David Matlack wrote:
> Instead of passing a pointer to the root page table and the root level
> separately, pass in a pointer to the kvm_mmu_page that backs the root.
> This reduces the number of arguments by 1, cutting down on line lengths.
> 
> No functional change intended.
> 
> Signed-off-by: David Matlack <dmatlack@google.com>
> Reviewed-by: Ben Gardon <bgardon@google.com>

Reviewed-by: Peter Xu <peterx@redhat.com>
Sean Christopherson Jan. 6, 2022, 8:34 p.m. UTC | #2
On Mon, Dec 13, 2021, David Matlack wrote:
> Instead of passing a pointer to the root page table and the root level
> separately, pass in a pointer to the kvm_mmu_page that backs the root.
> This reduces the number of arguments by 1, cutting down on line lengths.
> 
> No functional change intended.
> 
> Signed-off-by: David Matlack <dmatlack@google.com>
> Reviewed-by: Ben Gardon <bgardon@google.com>
> ---
>  arch/x86/kvm/mmu/tdp_iter.c |  5 ++++-
>  arch/x86/kvm/mmu/tdp_iter.h | 10 +++++-----
>  arch/x86/kvm/mmu/tdp_mmu.c  | 14 +++++---------
>  3 files changed, 14 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
> index b3ed302c1a35..92b3a075525a 100644
> --- a/arch/x86/kvm/mmu/tdp_iter.c
> +++ b/arch/x86/kvm/mmu/tdp_iter.c
> @@ -39,9 +39,12 @@ void tdp_iter_restart(struct tdp_iter *iter)
>   * Sets a TDP iterator to walk a pre-order traversal of the paging structure
>   * rooted at root_pt, starting with the walk to translate next_last_level_gfn.
>   */
> -void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
> +void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
>  		    int min_level, gfn_t next_last_level_gfn)
>  {
> +	u64 *root_pt = root->spt;
> +	int root_level = root->role.level;

Uber nit, arch/x86/ prefers reverse fir tree, though I've yet to get Paolo fully
on board :-)

But looking at the usage of root_pt, even better would be

  void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
		      int min_level, gfn_t next_last_level_gfn)
  {
	int root_level = root->role.level;

	WARN_ON(root_level < 1);
	WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);

	iter->next_last_level_gfn = next_last_level_gfn;
	iter->root_level = root_level;
	iter->min_level = min_level;
	iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root->spt;
	iter->as_id = kvm_mmu_page_as_id(root);

	tdp_iter_restart(iter);
  }

to avoid the pointless sptep_to_sp() and eliminate the motivation for root_pt.
David Matlack Jan. 6, 2022, 10:57 p.m. UTC | #3
On Thu, Jan 6, 2022 at 12:34 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Mon, Dec 13, 2021, David Matlack wrote:
> > Instead of passing a pointer to the root page table and the root level
> > separately, pass in a pointer to the kvm_mmu_page that backs the root.
> > This reduces the number of arguments by 1, cutting down on line lengths.
> >
> > No functional change intended.
> >
> > Signed-off-by: David Matlack <dmatlack@google.com>
> > Reviewed-by: Ben Gardon <bgardon@google.com>
> > ---
> >  arch/x86/kvm/mmu/tdp_iter.c |  5 ++++-
> >  arch/x86/kvm/mmu/tdp_iter.h | 10 +++++-----
> >  arch/x86/kvm/mmu/tdp_mmu.c  | 14 +++++---------
> >  3 files changed, 14 insertions(+), 15 deletions(-)
> >
> > diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
> > index b3ed302c1a35..92b3a075525a 100644
> > --- a/arch/x86/kvm/mmu/tdp_iter.c
> > +++ b/arch/x86/kvm/mmu/tdp_iter.c
> > @@ -39,9 +39,12 @@ void tdp_iter_restart(struct tdp_iter *iter)
> >   * Sets a TDP iterator to walk a pre-order traversal of the paging structure
> >   * rooted at root_pt, starting with the walk to translate next_last_level_gfn.
> >   */
> > -void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
> > +void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
> >                   int min_level, gfn_t next_last_level_gfn)
> >  {
> > +     u64 *root_pt = root->spt;
> > +     int root_level = root->role.level;
>
> Uber nit, arch/x86/ prefers reverse fir tree, though I've yet to get Paolo fully
> on board :-)
>
> But looking at the usage of root_pt, even better would be
>
>   void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
>                       int min_level, gfn_t next_last_level_gfn)
>   {
>         int root_level = root->role.level;
>
>         WARN_ON(root_level < 1);
>         WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
>
>         iter->next_last_level_gfn = next_last_level_gfn;
>         iter->root_level = root_level;
>         iter->min_level = min_level;
>         iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root->spt;
>         iter->as_id = kvm_mmu_page_as_id(root);
>
>         tdp_iter_restart(iter);
>   }
>
> to avoid the pointless sptep_to_sp() and eliminate the motivation for root_pt.

Will do.
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
index b3ed302c1a35..92b3a075525a 100644
--- a/arch/x86/kvm/mmu/tdp_iter.c
+++ b/arch/x86/kvm/mmu/tdp_iter.c
@@ -39,9 +39,12 @@  void tdp_iter_restart(struct tdp_iter *iter)
  * Sets a TDP iterator to walk a pre-order traversal of the paging structure
  * rooted at root_pt, starting with the walk to translate next_last_level_gfn.
  */
-void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
+void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
 		    int min_level, gfn_t next_last_level_gfn)
 {
+	u64 *root_pt = root->spt;
+	int root_level = root->role.level;
+
 	WARN_ON(root_level < 1);
 	WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
 
diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
index b1748b988d3a..ec1f58013428 100644
--- a/arch/x86/kvm/mmu/tdp_iter.h
+++ b/arch/x86/kvm/mmu/tdp_iter.h
@@ -51,17 +51,17 @@  struct tdp_iter {
  * Iterates over every SPTE mapping the GFN range [start, end) in a
  * preorder traversal.
  */
-#define for_each_tdp_pte_min_level(iter, root, root_level, min_level, start, end) \
-	for (tdp_iter_start(&iter, root, root_level, min_level, start); \
+#define for_each_tdp_pte_min_level(iter, root, min_level, start, end) \
+	for (tdp_iter_start(&iter, root, min_level, start); \
 	     iter.valid && iter.gfn < end;		     \
 	     tdp_iter_next(&iter))
 
-#define for_each_tdp_pte(iter, root, root_level, start, end) \
-	for_each_tdp_pte_min_level(iter, root, root_level, PG_LEVEL_4K, start, end)
+#define for_each_tdp_pte(iter, root, start, end) \
+	for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end)
 
 tdp_ptep_t spte_to_child_pt(u64 pte, int level);
 
-void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
+void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
 		    int min_level, gfn_t next_last_level_gfn);
 void tdp_iter_next(struct tdp_iter *iter);
 void tdp_iter_restart(struct tdp_iter *iter);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index dbd07c10d11a..2fb2d7677fbf 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -632,7 +632,7 @@  static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
 }
 
 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
-	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
+	for_each_tdp_pte(_iter, _root, _start, _end)
 
 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
 	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
@@ -642,8 +642,7 @@  static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
 		else
 
 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
-	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
-			 _mmu->shadow_root_level, _start, _end)
+	for_each_tdp_pte(_iter, to_shadow_page(_mmu->root_hpa), _start, _end)
 
 /*
  * Yield if the MMU lock is contended or this thread needs to return control
@@ -733,8 +732,7 @@  static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
 
 	rcu_read_lock();
 
-	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
-				   min_level, start, end) {
+	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
 retry:
 		if (can_yield &&
 		    tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
@@ -1205,8 +1203,7 @@  static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
 
 	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
 
-	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
-				   min_level, start, end) {
+	for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
 retry:
 		if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
 			continue;
@@ -1445,8 +1442,7 @@  static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
 
 	rcu_read_lock();
 
-	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
-				   min_level, gfn, gfn + 1) {
+	for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
 		if (!is_shadow_present_pte(iter.old_spte) ||
 		    !is_last_spte(iter.old_spte, iter.level))
 			continue;