diff mbox series

KVM: x86/mmu: Add a helper to consolidate root sp allocation

Message ID 20200428023714.31923-1-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Add a helper to consolidate root sp allocation | expand

Commit Message

Sean Christopherson April 28, 2020, 2:37 a.m. UTC
Add a helper, mmu_alloc_root(), to consolidate the allocation of a root
shadow page, which has the same basic mechanics for all flavors of TDP
and shadow paging.

Note, __pa(sp->spt) doesn't need to be protected by mmu_lock, sp->spt
points at a kernel page.

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/mmu/mmu.c | 88 +++++++++++++++++++-----------------------
 1 file changed, 39 insertions(+), 49 deletions(-)

Comments

Paolo Bonzini May 4, 2020, 4:52 p.m. UTC | #1
On 28/04/20 04:37, Sean Christopherson wrote:
> Add a helper, mmu_alloc_root(), to consolidate the allocation of a root
> shadow page, which has the same basic mechanics for all flavors of TDP
> and shadow paging.
> 
> Note, __pa(sp->spt) doesn't need to be protected by mmu_lock, sp->spt
> points at a kernel page.
> 
> No functional change intended.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 88 +++++++++++++++++++-----------------------
>  1 file changed, 39 insertions(+), 49 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index e618472c572b..80205aea296e 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3685,37 +3685,43 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
>  	return ret;
>  }
>  
> +static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
> +			    u8 level, bool direct)
> +{
> +	struct kvm_mmu_page *sp;
> +
> +	spin_lock(&vcpu->kvm->mmu_lock);
> +
> +	if (make_mmu_pages_available(vcpu)) {
> +		spin_unlock(&vcpu->kvm->mmu_lock);
> +		return INVALID_PAGE;
> +	}
> +	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
> +	++sp->root_count;
> +
> +	spin_unlock(&vcpu->kvm->mmu_lock);
> +	return __pa(sp->spt);
> +}
> +
>  static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
>  {
> -	struct kvm_mmu_page *sp;
> +	u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level;
> +	hpa_t root;
>  	unsigned i;
>  
> -	if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
> -		spin_lock(&vcpu->kvm->mmu_lock);
> -		if(make_mmu_pages_available(vcpu) < 0) {
> -			spin_unlock(&vcpu->kvm->mmu_lock);
> +	if (shadow_root_level >= PT64_ROOT_4LEVEL) {
> +		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
> +		if (!VALID_PAGE(root))
>  			return -ENOSPC;
> -		}
> -		sp = kvm_mmu_get_page(vcpu, 0, 0,
> -				vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
> -		++sp->root_count;
> -		spin_unlock(&vcpu->kvm->mmu_lock);
> -		vcpu->arch.mmu->root_hpa = __pa(sp->spt);
> -	} else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
> +		vcpu->arch.mmu->root_hpa = root;
> +	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
>  		for (i = 0; i < 4; ++i) {
> -			hpa_t root = vcpu->arch.mmu->pae_root[i];
> +			MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
>  
> -			MMU_WARN_ON(VALID_PAGE(root));
> -			spin_lock(&vcpu->kvm->mmu_lock);
> -			if (make_mmu_pages_available(vcpu) < 0) {
> -				spin_unlock(&vcpu->kvm->mmu_lock);
> +			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
> +					      i << 30, PT32_ROOT_LEVEL, true);
> +			if (!VALID_PAGE(root))
>  				return -ENOSPC;
> -			}
> -			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
> -					i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
> -			root = __pa(sp->spt);
> -			++sp->root_count;
> -			spin_unlock(&vcpu->kvm->mmu_lock);
>  			vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
>  		}
>  		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
> @@ -3730,9 +3736,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
>  
>  static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
>  {
> -	struct kvm_mmu_page *sp;
>  	u64 pdptr, pm_mask;
>  	gfn_t root_gfn, root_pgd;
> +	hpa_t root;
>  	int i;
>  
>  	root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu);
> @@ -3746,20 +3752,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
>  	 * write-protect the guests page table root.
>  	 */
>  	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
> -		hpa_t root = vcpu->arch.mmu->root_hpa;
> +		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
>  
> -		MMU_WARN_ON(VALID_PAGE(root));
> -
> -		spin_lock(&vcpu->kvm->mmu_lock);
> -		if (make_mmu_pages_available(vcpu) < 0) {
> -			spin_unlock(&vcpu->kvm->mmu_lock);
> +		root = mmu_alloc_root(vcpu, root_gfn, 0,
> +				      vcpu->arch.mmu->shadow_root_level, false);
> +		if (!VALID_PAGE(root))
>  			return -ENOSPC;
> -		}
> -		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
> -				vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
> -		root = __pa(sp->spt);
> -		++sp->root_count;
> -		spin_unlock(&vcpu->kvm->mmu_lock);
>  		vcpu->arch.mmu->root_hpa = root;
>  		goto set_root_pgd;
>  	}
> @@ -3774,9 +3772,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
>  		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
>  
>  	for (i = 0; i < 4; ++i) {
> -		hpa_t root = vcpu->arch.mmu->pae_root[i];
> -
> -		MMU_WARN_ON(VALID_PAGE(root));
> +		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
>  		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
>  			pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
>  			if (!(pdptr & PT_PRESENT_MASK)) {
> @@ -3787,17 +3783,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
>  			if (mmu_check_root(vcpu, root_gfn))
>  				return 1;
>  		}
> -		spin_lock(&vcpu->kvm->mmu_lock);
> -		if (make_mmu_pages_available(vcpu) < 0) {
> -			spin_unlock(&vcpu->kvm->mmu_lock);
> -			return -ENOSPC;
> -		}
> -		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
> -				      0, ACC_ALL);
> -		root = __pa(sp->spt);
> -		++sp->root_count;
> -		spin_unlock(&vcpu->kvm->mmu_lock);
>  
> +		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
> +				      PT32_ROOT_LEVEL, false);
> +		if (!VALID_PAGE(root))
> +			return -ENOSPC;
>  		vcpu->arch.mmu->pae_root[i] = root | pm_mask;
>  	}
>  	vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
> 

Queued, thanks.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index e618472c572b..80205aea296e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3685,37 +3685,43 @@  static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
 	return ret;
 }
 
+static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
+			    u8 level, bool direct)
+{
+	struct kvm_mmu_page *sp;
+
+	spin_lock(&vcpu->kvm->mmu_lock);
+
+	if (make_mmu_pages_available(vcpu)) {
+		spin_unlock(&vcpu->kvm->mmu_lock);
+		return INVALID_PAGE;
+	}
+	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
+	++sp->root_count;
+
+	spin_unlock(&vcpu->kvm->mmu_lock);
+	return __pa(sp->spt);
+}
+
 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 {
-	struct kvm_mmu_page *sp;
+	u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level;
+	hpa_t root;
 	unsigned i;
 
-	if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
-		spin_lock(&vcpu->kvm->mmu_lock);
-		if(make_mmu_pages_available(vcpu) < 0) {
-			spin_unlock(&vcpu->kvm->mmu_lock);
+	if (shadow_root_level >= PT64_ROOT_4LEVEL) {
+		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
+		if (!VALID_PAGE(root))
 			return -ENOSPC;
-		}
-		sp = kvm_mmu_get_page(vcpu, 0, 0,
-				vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
-		++sp->root_count;
-		spin_unlock(&vcpu->kvm->mmu_lock);
-		vcpu->arch.mmu->root_hpa = __pa(sp->spt);
-	} else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
+		vcpu->arch.mmu->root_hpa = root;
+	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
 		for (i = 0; i < 4; ++i) {
-			hpa_t root = vcpu->arch.mmu->pae_root[i];
+			MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
 
-			MMU_WARN_ON(VALID_PAGE(root));
-			spin_lock(&vcpu->kvm->mmu_lock);
-			if (make_mmu_pages_available(vcpu) < 0) {
-				spin_unlock(&vcpu->kvm->mmu_lock);
+			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
+					      i << 30, PT32_ROOT_LEVEL, true);
+			if (!VALID_PAGE(root))
 				return -ENOSPC;
-			}
-			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
-					i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
-			root = __pa(sp->spt);
-			++sp->root_count;
-			spin_unlock(&vcpu->kvm->mmu_lock);
 			vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
 		}
 		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
@@ -3730,9 +3736,9 @@  static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 
 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 {
-	struct kvm_mmu_page *sp;
 	u64 pdptr, pm_mask;
 	gfn_t root_gfn, root_pgd;
+	hpa_t root;
 	int i;
 
 	root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu);
@@ -3746,20 +3752,12 @@  static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 	 * write-protect the guests page table root.
 	 */
 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
-		hpa_t root = vcpu->arch.mmu->root_hpa;
+		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
 
-		MMU_WARN_ON(VALID_PAGE(root));
-
-		spin_lock(&vcpu->kvm->mmu_lock);
-		if (make_mmu_pages_available(vcpu) < 0) {
-			spin_unlock(&vcpu->kvm->mmu_lock);
+		root = mmu_alloc_root(vcpu, root_gfn, 0,
+				      vcpu->arch.mmu->shadow_root_level, false);
+		if (!VALID_PAGE(root))
 			return -ENOSPC;
-		}
-		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
-				vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
-		root = __pa(sp->spt);
-		++sp->root_count;
-		spin_unlock(&vcpu->kvm->mmu_lock);
 		vcpu->arch.mmu->root_hpa = root;
 		goto set_root_pgd;
 	}
@@ -3774,9 +3772,7 @@  static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
 
 	for (i = 0; i < 4; ++i) {
-		hpa_t root = vcpu->arch.mmu->pae_root[i];
-
-		MMU_WARN_ON(VALID_PAGE(root));
+		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
 		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
 			pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
 			if (!(pdptr & PT_PRESENT_MASK)) {
@@ -3787,17 +3783,11 @@  static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 			if (mmu_check_root(vcpu, root_gfn))
 				return 1;
 		}
-		spin_lock(&vcpu->kvm->mmu_lock);
-		if (make_mmu_pages_available(vcpu) < 0) {
-			spin_unlock(&vcpu->kvm->mmu_lock);
-			return -ENOSPC;
-		}
-		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
-				      0, ACC_ALL);
-		root = __pa(sp->spt);
-		++sp->root_count;
-		spin_unlock(&vcpu->kvm->mmu_lock);
 
+		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
+				      PT32_ROOT_LEVEL, false);
+		if (!VALID_PAGE(root))
+			return -ENOSPC;
 		vcpu->arch.mmu->pae_root[i] = root | pm_mask;
 	}
 	vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);