diff mbox series

[2/9] KVM: x86/mmu: Add separate helper for shadow NPT root page role calc

Message ID 20200716034122.5998-3-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: TDP level cleanups and shadow NPT fix | expand

Commit Message

Sean Christopherson July 16, 2020, 3:41 a.m. UTC
Refactor the shadow NPT role calculation into a separate helper to
better differentiate it from the non-nested shadow MMU, e.g. the NPT
variant is never direct and derives its root level from the TDP level.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/mmu/mmu.c | 30 +++++++++++++++++++++++++-----
 1 file changed, 25 insertions(+), 5 deletions(-)

Comments

Vitaly Kuznetsov July 22, 2020, 5:05 p.m. UTC | #1
Sean Christopherson <sean.j.christopherson@intel.com> writes:

> Refactor the shadow NPT role calculation into a separate helper to
> better differentiate it from the non-nested shadow MMU, e.g. the NPT
> variant is never direct and derives its root level from the TDP level.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 30 +++++++++++++++++++++++++-----
>  1 file changed, 25 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 678b6209dad50..0fb033ce6cc57 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4908,7 +4908,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
>  }
>  
>  static union kvm_mmu_role
> -kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
> +kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
>  {
>  	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
>  
> @@ -4916,9 +4916,19 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
>  		!is_write_protection(vcpu);
>  	role.base.smap_andnot_wp = role.ext.cr4_smap &&
>  		!is_write_protection(vcpu);
> -	role.base.direct = !is_paging(vcpu);
>  	role.base.gpte_is_8_bytes = !!is_pae(vcpu);
>  
> +	return role;
> +}
> +
> +static union kvm_mmu_role
> +kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
> +{
> +	union kvm_mmu_role role =
> +		kvm_calc_shadow_root_page_role_common(vcpu, base_only);
> +
> +	role.base.direct = !is_paging(vcpu);
> +
>  	if (!is_long_mode(vcpu))
>  		role.base.level = PT32E_ROOT_LEVEL;
>  	else if (is_la57_mode(vcpu))
> @@ -4956,14 +4966,24 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efe
>  		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
>  }
>  
> +static union kvm_mmu_role
> +kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
> +{
> +	union kvm_mmu_role role =
> +		kvm_calc_shadow_root_page_role_common(vcpu, false);
> +
> +	role.base.direct = false;
> +	role.base.level = vcpu->arch.tdp_level;
> +
> +	return role;
> +}
> +
>  void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
>  			     gpa_t nested_cr3)
>  {
>  	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
> -	union kvm_mmu_role new_role =
> -		kvm_calc_shadow_mmu_root_page_role(vcpu, false);
> +	union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
>  
> -	new_role.base.level = vcpu->arch.tdp_level;
>  	context->shadow_root_level = new_role.base.level;
>  
>  	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 678b6209dad50..0fb033ce6cc57 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4908,7 +4908,7 @@  static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 }
 
 static union kvm_mmu_role
-kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
 {
 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
 
@@ -4916,9 +4916,19 @@  kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
 		!is_write_protection(vcpu);
 	role.base.smap_andnot_wp = role.ext.cr4_smap &&
 		!is_write_protection(vcpu);
-	role.base.direct = !is_paging(vcpu);
 	role.base.gpte_is_8_bytes = !!is_pae(vcpu);
 
+	return role;
+}
+
+static union kvm_mmu_role
+kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+{
+	union kvm_mmu_role role =
+		kvm_calc_shadow_root_page_role_common(vcpu, base_only);
+
+	role.base.direct = !is_paging(vcpu);
+
 	if (!is_long_mode(vcpu))
 		role.base.level = PT32E_ROOT_LEVEL;
 	else if (is_la57_mode(vcpu))
@@ -4956,14 +4966,24 @@  static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efe
 		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
 }
 
+static union kvm_mmu_role
+kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
+{
+	union kvm_mmu_role role =
+		kvm_calc_shadow_root_page_role_common(vcpu, false);
+
+	role.base.direct = false;
+	role.base.level = vcpu->arch.tdp_level;
+
+	return role;
+}
+
 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
 			     gpa_t nested_cr3)
 {
 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
-	union kvm_mmu_role new_role =
-		kvm_calc_shadow_mmu_root_page_role(vcpu, false);
+	union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
 
-	new_role.base.level = vcpu->arch.tdp_level;
 	context->shadow_root_level = new_role.base.level;
 
 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);