diff mbox series

[3/4] KVM: x86/mmu: Use raw level to index into MMIO walks' sptes array

Message ID 20201218003139.2167891-4-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Bug fixes and cleanups in get_mmio_spte() | expand

Commit Message

Sean Christopherson Dec. 18, 2020, 12:31 a.m. UTC
Bump the size of the sptes array by one and use the raw level of the
SPTE to index into the sptes array.  Using the SPTE level directly
improves readability by eliminating the need to reason out why the level
is being adjusted when indexing the array.  The array is on the stack
and is not explicitly initialized; bumping its size is nothing more than
a superficial adjustment to the stack frame.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/mmu/mmu.c     | 15 +++++++--------
 arch/x86/kvm/mmu/tdp_mmu.c |  2 +-
 2 files changed, 8 insertions(+), 9 deletions(-)

Comments

Vitaly Kuznetsov Dec. 18, 2020, 9:18 a.m. UTC | #1
Sean Christopherson <seanjc@google.com> writes:

> Bump the size of the sptes array by one and use the raw level of the
> SPTE to index into the sptes array.  Using the SPTE level directly
> improves readability by eliminating the need to reason out why the level
> is being adjusted when indexing the array.  The array is on the stack
> and is not explicitly initialized; bumping its size is nothing more than
> a superficial adjustment to the stack frame.
>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  arch/x86/kvm/mmu/mmu.c     | 15 +++++++--------
>  arch/x86/kvm/mmu/tdp_mmu.c |  2 +-
>  2 files changed, 8 insertions(+), 9 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 52f36c879086..4798a4472066 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3500,7 +3500,7 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
>  		leaf = iterator.level;
>  		spte = mmu_spte_get_lockless(iterator.sptep);
>  
> -		sptes[leaf - 1] = spte;
> +		sptes[leaf] = spte;
>  
>  		if (!is_shadow_present_pte(spte))
>  			break;
> @@ -3514,7 +3514,7 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
>  /* return true if reserved bit is detected on spte. */
>  static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>  {
> -	u64 sptes[PT64_ROOT_MAX_LEVEL];
> +	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
>  	struct rsvd_bits_validate *rsvd_check;
>  	int root, leaf, level;
>  	bool reserved = false;
> @@ -3537,16 +3537,15 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>  	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
>  
>  	for (level = root; level >= leaf; level--) {
> -		if (!is_shadow_present_pte(sptes[level - 1]))
> +		if (!is_shadow_present_pte(sptes[level]))
>  			break;
>  		/*
>  		 * Use a bitwise-OR instead of a logical-OR to aggregate the
>  		 * reserved bit and EPT's invalid memtype/XWR checks to avoid
>  		 * adding a Jcc in the loop.
>  		 */
> -		reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level - 1]) |
> -			    __is_rsvd_bits_set(rsvd_check, sptes[level - 1],
> -					       level);
> +		reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
> +			    __is_rsvd_bits_set(rsvd_check, sptes[level], level);
>  	}
>  
>  	if (reserved) {
> @@ -3554,10 +3553,10 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
>  		       __func__, addr);
>  		for (level = root; level >= leaf; level--)
>  			pr_err("------ spte 0x%llx level %d.\n",
> -			       sptes[level - 1], level);
> +			       sptes[level], level);
>  	}
>  
> -	*sptep = sptes[leaf - 1];
> +	*sptep = sptes[leaf];
>  
>  	return reserved;
>  }
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index a4f9447f8327..efef571806ad 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -1160,7 +1160,7 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
>  
>  	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
>  		leaf = iter.level;
> -		sptes[leaf - 1] = iter.old_spte;
> +		sptes[leaf] = iter.old_spte;
>  	}
>  
>  	return leaf;

An alretnitive solution would've been to reverse the array and fill it
like

 sptes[PT64_ROOT_MAX_LEVEL - leaf] = iter.old_spte;

but this may not reach the goal of 'improved readability' :-)

Also, we may add an MMU_DEBUG ifdef-ed check that sptes[0] remains
intact.

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 52f36c879086..4798a4472066 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3500,7 +3500,7 @@  static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
 		leaf = iterator.level;
 		spte = mmu_spte_get_lockless(iterator.sptep);
 
-		sptes[leaf - 1] = spte;
+		sptes[leaf] = spte;
 
 		if (!is_shadow_present_pte(spte))
 			break;
@@ -3514,7 +3514,7 @@  static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
 /* return true if reserved bit is detected on spte. */
 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 {
-	u64 sptes[PT64_ROOT_MAX_LEVEL];
+	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
 	struct rsvd_bits_validate *rsvd_check;
 	int root, leaf, level;
 	bool reserved = false;
@@ -3537,16 +3537,15 @@  static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
 
 	for (level = root; level >= leaf; level--) {
-		if (!is_shadow_present_pte(sptes[level - 1]))
+		if (!is_shadow_present_pte(sptes[level]))
 			break;
 		/*
 		 * Use a bitwise-OR instead of a logical-OR to aggregate the
 		 * reserved bit and EPT's invalid memtype/XWR checks to avoid
 		 * adding a Jcc in the loop.
 		 */
-		reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level - 1]) |
-			    __is_rsvd_bits_set(rsvd_check, sptes[level - 1],
-					       level);
+		reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
+			    __is_rsvd_bits_set(rsvd_check, sptes[level], level);
 	}
 
 	if (reserved) {
@@ -3554,10 +3553,10 @@  static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 		       __func__, addr);
 		for (level = root; level >= leaf; level--)
 			pr_err("------ spte 0x%llx level %d.\n",
-			       sptes[level - 1], level);
+			       sptes[level], level);
 	}
 
-	*sptep = sptes[leaf - 1];
+	*sptep = sptes[leaf];
 
 	return reserved;
 }
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index a4f9447f8327..efef571806ad 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1160,7 +1160,7 @@  int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
 
 	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
 		leaf = iter.level;
-		sptes[leaf - 1] = iter.old_spte;
+		sptes[leaf] = iter.old_spte;
 	}
 
 	return leaf;