diff mbox series

[v1] KVM: x86/mmu: refactor kvm_tdp_mmu_map

Message ID 20230802142737.5572-1-wei.w.wang@intel.com (mailing list archive)
State New, archived
Headers show
Series [v1] KVM: x86/mmu: refactor kvm_tdp_mmu_map | expand

Commit Message

Wang, Wei W Aug. 2, 2023, 2:27 p.m. UTC
The implementation of kvm_tdp_mmu_map is a bit long. It essentially does
three things:
1) adjust the leaf entry level (e.g. 4KB, 2MB or 1GB) to map according to
   the hugepage configurations;
2) map the nonleaf entries of the tdp page table; and
3) map the target leaf entry.

Improve the readabiliy by moving the implementation of 2) above into a
subfunction, kvm_tdp_mmu_map_nonleaf, and removing the unnecessary
"goto"s. No functional changes intended.

Signed-off-by: Wei Wang <wei.w.wang@intel.com>
---
 arch/x86/kvm/mmu/tdp_mmu.c | 76 ++++++++++++++++++++------------------
 1 file changed, 41 insertions(+), 35 deletions(-)

Comments

Sean Christopherson Aug. 2, 2023, 3:14 p.m. UTC | #1
On Wed, Aug 02, 2023, Wei Wang wrote:
> The implementation of kvm_tdp_mmu_map is a bit long. It essentially does
> three things:
> 1) adjust the leaf entry level (e.g. 4KB, 2MB or 1GB) to map according to
>    the hugepage configurations;
> 2) map the nonleaf entries of the tdp page table; and
> 3) map the target leaf entry.
> 
> Improve the readabiliy by moving the implementation of 2) above into a
> subfunction, kvm_tdp_mmu_map_nonleaf, and removing the unnecessary
> "goto"s. No functional changes intended.

Eh, I prefer the current code from a readability perspective.  I like being able
to see the entire flow, and I especially like that this

		if (iter.level == fault->goal_level)
			goto map_target_level;

very clearly and explicitly captures that reaching the goal leavel means that it's
time to map the target level, whereas IMO this does not, in no small part because
seeing "continue" in a loop makes me think "continue the loop", not "continue on
to the next part of the page fault"

		if (iter->level == fault->goal_level)
			return RET_PF_CONTINUE;

And the existing code follows the patter of the other page fault paths, direct_map()
and FNAME(fetch).  That doesn't necessarily mean that the existing pattern is
"better", but I personally place a lot of value on consistency.

> +/*
> + * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
> + * page tables and SPTEs to translate the faulting guest physical address.
> + */
> +int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
> +{
> +	struct tdp_iter iter;
> +	int ret;
> +
> +	kvm_mmu_hugepage_adjust(vcpu, fault);
> +
> +	trace_kvm_mmu_spte_requested(fault);
> +
> +	rcu_read_lock();
> +
> +	ret = kvm_tdp_mmu_map_nonleafs(vcpu, fault, &iter);
> +	if (ret == RET_PF_CONTINUE)
> +		ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);

And I also don't like passing in an uninitialized tdp_iter, and then consuming
it too.

>  
> -retry:
>  	rcu_read_unlock();
>  	return ret;
>  }
> -- 
> 2.27.0
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 512163d52194..0b29a7f853b5 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1057,43 +1057,33 @@  static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
 				   struct kvm_mmu_page *sp, bool shared);
 
-/*
- * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
- * page tables and SPTEs to translate the faulting guest physical address.
- */
-int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+static int kvm_tdp_mmu_map_nonleafs(struct kvm_vcpu *vcpu,
+				    struct kvm_page_fault *fault,
+				    struct tdp_iter *iter)
 {
 	struct kvm_mmu *mmu = vcpu->arch.mmu;
 	struct kvm *kvm = vcpu->kvm;
-	struct tdp_iter iter;
 	struct kvm_mmu_page *sp;
-	int ret = RET_PF_RETRY;
-
-	kvm_mmu_hugepage_adjust(vcpu, fault);
-
-	trace_kvm_mmu_spte_requested(fault);
-
-	rcu_read_lock();
-
-	tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
-		int r;
+	int ret;
 
+	tdp_mmu_for_each_pte((*iter), mmu, fault->gfn, fault->gfn + 1) {
 		if (fault->nx_huge_page_workaround_enabled)
-			disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
+			disallowed_hugepage_adjust(fault, iter->old_spte,
+						   iter->level);
 
 		/*
 		 * If SPTE has been frozen by another thread, just give up and
 		 * retry, avoiding unnecessary page table allocation and free.
 		 */
-		if (is_removed_spte(iter.old_spte))
-			goto retry;
+		if (is_removed_spte(iter->old_spte))
+			return RET_PF_RETRY;
 
-		if (iter.level == fault->goal_level)
-			goto map_target_level;
+		if (iter->level == fault->goal_level)
+			return RET_PF_CONTINUE;
 
 		/* Step down into the lower level page table if it exists. */
-		if (is_shadow_present_pte(iter.old_spte) &&
-		    !is_large_pte(iter.old_spte))
+		if (is_shadow_present_pte(iter->old_spte) &&
+		    !is_large_pte(iter->old_spte))
 			continue;
 
 		/*
@@ -1101,26 +1091,26 @@  int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 		 * needs to be split.
 		 */
 		sp = tdp_mmu_alloc_sp(vcpu);
-		tdp_mmu_init_child_sp(sp, &iter);
+		tdp_mmu_init_child_sp(sp, iter);
 
 		sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
 
-		if (is_shadow_present_pte(iter.old_spte))
-			r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
+		if (is_shadow_present_pte(iter->old_spte))
+			ret = tdp_mmu_split_huge_page(kvm, iter, sp, true);
 		else
-			r = tdp_mmu_link_sp(kvm, &iter, sp, true);
+			ret = tdp_mmu_link_sp(kvm, iter, sp, true);
 
 		/*
 		 * Force the guest to retry if installing an upper level SPTE
 		 * failed, e.g. because a different task modified the SPTE.
 		 */
-		if (r) {
+		if (ret) {
 			tdp_mmu_free_sp(sp);
-			goto retry;
+			return RET_PF_RETRY;
 		}
 
 		if (fault->huge_page_disallowed &&
-		    fault->req_level >= iter.level) {
+		    fault->req_level >= iter->level) {
 			spin_lock(&kvm->arch.tdp_mmu_pages_lock);
 			if (sp->nx_huge_page_disallowed)
 				track_possible_nx_huge_page(kvm, sp);
@@ -1132,13 +1122,29 @@  int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 	 * The walk aborted before reaching the target level, e.g. because the
 	 * iterator detected an upper level SPTE was frozen during traversal.
 	 */
-	WARN_ON_ONCE(iter.level == fault->goal_level);
-	goto retry;
+	WARN_ON_ONCE(iter->level == fault->goal_level);
+	return RET_PF_RETRY;
+}
 
-map_target_level:
-	ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
+/*
+ * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
+ * page tables and SPTEs to translate the faulting guest physical address.
+ */
+int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+{
+	struct tdp_iter iter;
+	int ret;
+
+	kvm_mmu_hugepage_adjust(vcpu, fault);
+
+	trace_kvm_mmu_spte_requested(fault);
+
+	rcu_read_lock();
+
+	ret = kvm_tdp_mmu_map_nonleafs(vcpu, fault, &iter);
+	if (ret == RET_PF_CONTINUE)
+		ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
 
-retry:
 	rcu_read_unlock();
 	return ret;
 }