diff mbox series

[v2,19/25] KVM: x86/mmu: simplify and/or inline computation of shadow MMU roles

Message ID 20220221162243.683208-20-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM MMU refactoring part 2: role changes | expand

Commit Message

Paolo Bonzini Feb. 21, 2022, 4:22 p.m. UTC
Shadow MMUs compute their role from cpu_mode.base, simply by adjusting
the root level.  It's one line of code, so do not place it in a separate
function.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/mmu/mmu.c | 54 +++++++++++++++---------------------------
 1 file changed, 19 insertions(+), 35 deletions(-)

Comments

Sean Christopherson March 8, 2022, 7:35 p.m. UTC | #1
On Mon, Feb 21, 2022, Paolo Bonzini wrote:
> @@ -4822,18 +4798,23 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
>  {
>  	struct kvm_mmu *context = &vcpu->arch.root_mmu;
>  	union kvm_mmu_paging_mode cpu_mode = kvm_calc_cpu_mode(vcpu, regs);
> -	union kvm_mmu_page_role root_role =
> -		kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_mode);
> +	union kvm_mmu_page_role root_role;
>  
> -	shadow_mmu_init_context(vcpu, context, cpu_mode, root_role);
> -}
> +	root_role = cpu_mode.base;
> +	root_role.level = max_t(u32, root_role.level, PT32E_ROOT_LEVEL);

Heh, we have different definitions of "simpler".   Can we split the difference
and do?

	/* KVM uses PAE paging whenever the guest isn't using 64-bit paging. */
	if (!____is_efer_lma(regs))
		root_role.level = PT32E_ROOT_LEVEL;

> -static union kvm_mmu_page_role
> -kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
> -				   union kvm_mmu_paging_mode role)
> -{
> -	role.base.level = kvm_mmu_get_tdp_level(vcpu);
> -	return role.base;
> +	/*
> +	 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
> +	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
> +	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
> +	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
> +	 * The iTLB multi-hit workaround can be toggled at any time, so assume
> +	 * NX can be used by any non-nested shadow MMU to avoid having to reset
> +	 * MMU contexts.
> +	 */
> +	root_role.efer_nx = true;
> +
> +	shadow_mmu_init_context(vcpu, context, cpu_mode, root_role);
>  }
>  
>  void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
> @@ -4846,7 +4827,10 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
>  		.efer = efer,
>  	};
>  	union kvm_mmu_paging_mode cpu_mode = kvm_calc_cpu_mode(vcpu, &regs);
> -	union kvm_mmu_page_role root_role = kvm_calc_shadow_npt_root_page_role(vcpu, cpu_mode);
> +	union kvm_mmu_page_role root_role;
> +
> +	root_role = cpu_mode.base;
> +	root_role.level = kvm_mmu_get_tdp_level(vcpu);

Regarding the WARN_ON_ONCE(root_role.direct) discussed for a different patch, how
about this for a WARN + comment?

	/* NPT requires CR0.PG=1, thus the MMU is guaranteed to be indirect. */
	WARN_ON_ONCE(root_role.direct);

>  	shadow_mmu_init_context(vcpu, context, cpu_mode, root_role);
>  	kvm_mmu_new_pgd(vcpu, nested_cr3);
> -- 
> 2.31.1
> 
>
Sean Christopherson March 8, 2022, 7:41 p.m. UTC | #2
On Tue, Mar 08, 2022, Sean Christopherson wrote:
> On Mon, Feb 21, 2022, Paolo Bonzini wrote:
> > @@ -4822,18 +4798,23 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
> >  {
> >  	struct kvm_mmu *context = &vcpu->arch.root_mmu;
> >  	union kvm_mmu_paging_mode cpu_mode = kvm_calc_cpu_mode(vcpu, regs);
> > -	union kvm_mmu_page_role root_role =
> > -		kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_mode);
> > +	union kvm_mmu_page_role root_role;
> >  
> > -	shadow_mmu_init_context(vcpu, context, cpu_mode, root_role);
> > -}
> > +	root_role = cpu_mode.base;
> > +	root_role.level = max_t(u32, root_role.level, PT32E_ROOT_LEVEL);
> 
> Heh, we have different definitions of "simpler".   Can we split the difference
> and do?
> 
> 	/* KVM uses PAE paging whenever the guest isn't using 64-bit paging. */
> 	if (!____is_efer_lma(regs))
> 		root_role.level = PT32E_ROOT_LEVEL;

Ha, and then the very next patch stomps all over this.  I think this just needs
to add

	BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma);

and do

	if (!is_efer_lma(context))
		root_role.level = PT32E_ROOT_LEVEL;
Paolo Bonzini March 9, 2022, 10:33 a.m. UTC | #3
On 3/8/22 20:35, Sean Christopherson wrote:
>> +	root_role = cpu_mode.base;
>> +	root_role.level = max_t(u32, root_role.level, PT32E_ROOT_LEVEL);
> Heh, we have different definitions of "simpler".   Can we split the difference
> and do?
> 
> 	/* KVM uses PAE paging whenever the guest isn't using 64-bit paging. */
> 	if (!____is_efer_lma(regs))
> 		root_role.level = PT32E_ROOT_LEVEL;
> 

It's not that easy until the very end (when cpu_mode is set in 
kvm_mmu_init_walker), but I'll make sure to switch to is_efer_lma once 
it is possible.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d657e2e2ceec..47288643ab70 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4768,30 +4768,6 @@  static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
 	reset_tdp_shadow_zero_bits_mask(context);
 }
 
-static union kvm_mmu_page_role
-kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
-				   union kvm_mmu_paging_mode role)
-{
-	if (!role.ext.efer_lma)
-		role.base.level = PT32E_ROOT_LEVEL;
-	else if (role.ext.cr4_la57)
-		role.base.level = PT64_ROOT_5LEVEL;
-	else
-		role.base.level = PT64_ROOT_4LEVEL;
-
-	/*
-	 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
-	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
-	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
-	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
-	 * The iTLB multi-hit workaround can be toggled at any time, so assume
-	 * NX can be used by any non-nested shadow MMU to avoid having to reset
-	 * MMU contexts.
-	 */
-	role.base.efer_nx = true;
-	return role.base;
-}
-
 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
 				    union kvm_mmu_paging_mode cpu_mode,
 				    union kvm_mmu_page_role root_role)
@@ -4822,18 +4798,23 @@  static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
 {
 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
 	union kvm_mmu_paging_mode cpu_mode = kvm_calc_cpu_mode(vcpu, regs);
-	union kvm_mmu_page_role root_role =
-		kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_mode);
+	union kvm_mmu_page_role root_role;
 
-	shadow_mmu_init_context(vcpu, context, cpu_mode, root_role);
-}
+	root_role = cpu_mode.base;
+	root_role.level = max_t(u32, root_role.level, PT32E_ROOT_LEVEL);
 
-static union kvm_mmu_page_role
-kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
-				   union kvm_mmu_paging_mode role)
-{
-	role.base.level = kvm_mmu_get_tdp_level(vcpu);
-	return role.base;
+	/*
+	 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
+	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
+	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
+	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
+	 * The iTLB multi-hit workaround can be toggled at any time, so assume
+	 * NX can be used by any non-nested shadow MMU to avoid having to reset
+	 * MMU contexts.
+	 */
+	root_role.efer_nx = true;
+
+	shadow_mmu_init_context(vcpu, context, cpu_mode, root_role);
 }
 
 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
@@ -4846,7 +4827,10 @@  void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
 		.efer = efer,
 	};
 	union kvm_mmu_paging_mode cpu_mode = kvm_calc_cpu_mode(vcpu, &regs);
-	union kvm_mmu_page_role root_role = kvm_calc_shadow_npt_root_page_role(vcpu, cpu_mode);
+	union kvm_mmu_page_role root_role;
+
+	root_role = cpu_mode.base;
+	root_role.level = kvm_mmu_get_tdp_level(vcpu);
 
 	shadow_mmu_init_context(vcpu, context, cpu_mode, root_role);
 	kvm_mmu_new_pgd(vcpu, nested_cr3);