diff mbox series

[v2,09/25] KVM: x86/mmu: do not recompute root level from kvm_mmu_role_regs

Message ID 20220221162243.683208-10-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM MMU refactoring part 2: role changes | expand

Commit Message

Paolo Bonzini Feb. 21, 2022, 4:22 p.m. UTC
The root_level can be found in the cpu_mode (in fact the field
is superfluous and could be removed, but one thing at a time).
Since there is only one usage left of role_regs_to_root_level,
inline it into kvm_calc_cpu_mode.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/mmu/mmu.c | 23 ++++++++---------------
 1 file changed, 8 insertions(+), 15 deletions(-)

Comments

Sean Christopherson March 8, 2022, 5:41 p.m. UTC | #1
On Mon, Feb 21, 2022, Paolo Bonzini wrote:
> The root_level can be found in the cpu_mode (in fact the field
> is superfluous and could be removed, but one thing at a time).
> Since there is only one usage left of role_regs_to_root_level,
> inline it into kvm_calc_cpu_mode.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 23 ++++++++---------------
>  1 file changed, 8 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 1af898f0cf87..6e539fc2c9c7 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -244,19 +244,6 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
>  	return regs;
>  }
>  
> -static int role_regs_to_root_level(const struct kvm_mmu_role_regs *regs)
> -{
> -	if (!____is_cr0_pg(regs))
> -		return 0;
> -	else if (____is_efer_lma(regs))
> -		return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
> -					       PT64_ROOT_4LEVEL;
> -	else if (____is_cr4_pae(regs))
> -		return PT32E_ROOT_LEVEL;
> -	else
> -		return PT32_ROOT_LEVEL;
> -}
> -
>  static inline bool kvm_available_flush_tlb_with_range(void)
>  {
>  	return kvm_x86_ops.tlb_remote_flush_with_range;
> @@ -4695,7 +4682,13 @@ kvm_calc_cpu_mode(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
>  		role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
>  		role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
>  		role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
> -		role.base.level = role_regs_to_root_level(regs);
> +
> +		if (____is_efer_lma(regs))
> +			role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;

Can we wrap this, even if indentation is reduced?  I find it much easier to quickly
understand the if-else paths if they're stacked and not run out to almost 100 chars.

	if (____is_efer_lma(regs))
		role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
							  PT64_ROOT_4LEVEL;
	else if (____is_cr4_pae(regs))
		role.base.level = PT32E_ROOT_LEVEL;
	else
		role.base.level = PT32_ROOT_LEVEL;

> +		else if (____is_cr4_pae(regs))
> +			role.base.level = PT32E_ROOT_LEVEL;
> +		else
> +			role.base.level = PT32_ROOT_LEVEL;
>  
>  		role.ext.cr0_pg = 1;
>  		role.ext.cr4_pae = ____is_cr4_pae(regs);
> @@ -4790,7 +4783,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
>  	context->get_guest_pgd = kvm_get_guest_cr3;
>  	context->get_pdptr = kvm_pdptr_read;
>  	context->inject_page_fault = kvm_inject_page_fault;
> -	context->root_level = role_regs_to_root_level(regs);
> +	context->root_level = cpu_mode.base.level;
>  
>  	if (!is_cr0_pg(context))
>  		context->gva_to_gpa = nonpaging_gva_to_gpa;
> -- 
> 2.31.1
> 
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 1af898f0cf87..6e539fc2c9c7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -244,19 +244,6 @@  static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
 	return regs;
 }
 
-static int role_regs_to_root_level(const struct kvm_mmu_role_regs *regs)
-{
-	if (!____is_cr0_pg(regs))
-		return 0;
-	else if (____is_efer_lma(regs))
-		return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
-					       PT64_ROOT_4LEVEL;
-	else if (____is_cr4_pae(regs))
-		return PT32E_ROOT_LEVEL;
-	else
-		return PT32_ROOT_LEVEL;
-}
-
 static inline bool kvm_available_flush_tlb_with_range(void)
 {
 	return kvm_x86_ops.tlb_remote_flush_with_range;
@@ -4695,7 +4682,13 @@  kvm_calc_cpu_mode(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
 		role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
 		role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
 		role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
-		role.base.level = role_regs_to_root_level(regs);
+
+		if (____is_efer_lma(regs))
+			role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
+		else if (____is_cr4_pae(regs))
+			role.base.level = PT32E_ROOT_LEVEL;
+		else
+			role.base.level = PT32_ROOT_LEVEL;
 
 		role.ext.cr0_pg = 1;
 		role.ext.cr4_pae = ____is_cr4_pae(regs);
@@ -4790,7 +4783,7 @@  static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
 	context->get_guest_pgd = kvm_get_guest_cr3;
 	context->get_pdptr = kvm_pdptr_read;
 	context->inject_page_fault = kvm_inject_page_fault;
-	context->root_level = role_regs_to_root_level(regs);
+	context->root_level = cpu_mode.base.level;
 
 	if (!is_cr0_pg(context))
 		context->gva_to_gpa = nonpaging_gva_to_gpa;