diff mbox series

[09/23] KVM: MMU: remove "bool base_only" arguments

Message ID 20220204115718.14934-10-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: MMU: MMU role refactoring | expand

Commit Message

Paolo Bonzini Feb. 4, 2022, 11:57 a.m. UTC
The argument is always false now that kvm_mmu_calc_root_page_role has
been removed.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/mmu/mmu.c | 63 +++++++++++++++---------------------------
 1 file changed, 22 insertions(+), 41 deletions(-)

Comments

David Matlack Feb. 4, 2022, 7:41 p.m. UTC | #1
On Fri, Feb 04, 2022 at 06:57:04AM -0500, Paolo Bonzini wrote:
> The argument is always false now that kvm_mmu_calc_root_page_role has
> been removed.
> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

Reviewed-by: David Matlack <dmatlack@google.com>

> ---
>  arch/x86/kvm/mmu/mmu.c | 63 +++++++++++++++---------------------------
>  1 file changed, 22 insertions(+), 41 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 42475e4c2a48..dd69cfc8c4f6 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -4658,46 +4658,30 @@ static void paging32_init_context(struct kvm_mmu *context)
>  	context->direct_map = false;
>  }
>  
> -static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
> -							 const struct kvm_mmu_role_regs *regs)
> -{
> -	union kvm_mmu_extended_role ext = {0};
> -
> -	if (____is_cr0_pg(regs)) {
> -		ext.cr0_pg = 1;
> -		ext.cr4_pae = ____is_cr4_pae(regs);
> -		ext.cr4_smep = ____is_cr4_smep(regs);
> -		ext.cr4_smap = ____is_cr4_smap(regs);
> -		ext.cr4_pse = ____is_cr4_pse(regs);
> -
> -		/* PKEY and LA57 are active iff long mode is active. */
> -		ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
> -		ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
> -		ext.efer_lma = ____is_efer_lma(regs);
> -	}
> -
> -	return ext;
> -}
> -
>  static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
> -						   const struct kvm_mmu_role_regs *regs,
> -						   bool base_only)
> +						   const struct kvm_mmu_role_regs *regs)
>  {
>  	union kvm_mmu_role role = {0};
>  
>  	role.base.access = ACC_ALL;
>  	if (____is_cr0_pg(regs)) {
> +		role.ext.cr0_pg = 1;
>  		role.base.efer_nx = ____is_efer_nx(regs);
>  		role.base.cr0_wp = ____is_cr0_wp(regs);
> +
> +		role.ext.cr4_pae = ____is_cr4_pae(regs);
> +		role.ext.cr4_smep = ____is_cr4_smep(regs);
> +		role.ext.cr4_smap = ____is_cr4_smap(regs);
> +		role.ext.cr4_pse = ____is_cr4_pse(regs);
> +
> +		/* PKEY and LA57 are active iff long mode is active. */
> +		role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
> +		role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
> +		role.ext.efer_lma = ____is_efer_lma(regs);
>  	}
>  	role.base.smm = is_smm(vcpu);
>  	role.base.guest_mode = is_guest_mode(vcpu);
>  
> -	if (base_only)
> -		return role;
> -
> -	role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
> -
>  	return role;
>  }
>  
> @@ -4716,10 +4700,9 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
>  
>  static union kvm_mmu_role
>  kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
> -				const struct kvm_mmu_role_regs *regs,
> -				bool base_only)
> +				const struct kvm_mmu_role_regs *regs)
>  {
> -	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
> +	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);
>  
>  	role.base.ad_disabled = (shadow_accessed_mask == 0);
>  	role.base.level = kvm_mmu_get_tdp_level(vcpu);
> @@ -4734,7 +4717,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
>  {
>  	struct kvm_mmu *context = &vcpu->arch.root_mmu;
>  	union kvm_mmu_role new_role =
> -		kvm_calc_tdp_mmu_root_page_role(vcpu, regs, false);
> +		kvm_calc_tdp_mmu_root_page_role(vcpu, regs);
>  
>  	if (new_role.as_u64 == context->mmu_role.as_u64)
>  		return;
> @@ -4763,10 +4746,9 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
>  
>  static union kvm_mmu_role
>  kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
> -				      const struct kvm_mmu_role_regs *regs,
> -				      bool base_only)
> +				      const struct kvm_mmu_role_regs *regs)
>  {
> -	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
> +	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);
>  
>  	role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
>  	role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
> @@ -4777,11 +4759,10 @@ kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
>  
>  static union kvm_mmu_role
>  kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
> -				   const struct kvm_mmu_role_regs *regs,
> -				   bool base_only)
> +				   const struct kvm_mmu_role_regs *regs)
>  {
>  	union kvm_mmu_role role =
> -		kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
> +		kvm_calc_shadow_root_page_role_common(vcpu, regs);
>  
>  	role.base.direct = !____is_cr0_pg(regs);
>  
> @@ -4821,7 +4802,7 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
>  {
>  	struct kvm_mmu *context = &vcpu->arch.root_mmu;
>  	union kvm_mmu_role new_role =
> -		kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
> +		kvm_calc_shadow_mmu_root_page_role(vcpu, regs);
>  
>  	shadow_mmu_init_context(vcpu, context, regs, new_role);
>  
> @@ -4841,7 +4822,7 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
>  				   const struct kvm_mmu_role_regs *regs)
>  {
>  	union kvm_mmu_role role =
> -		kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
> +		kvm_calc_shadow_root_page_role_common(vcpu, regs);
>  
>  	role.base.direct = false;
>  	role.base.level = kvm_mmu_get_tdp_level(vcpu);
> @@ -4937,7 +4918,7 @@ kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *
>  {
>  	union kvm_mmu_role role;
>  
> -	role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
> +	role = kvm_calc_shadow_root_page_role_common(vcpu, regs);
>  
>  	/*
>  	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
> -- 
> 2.31.1
> 
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 42475e4c2a48..dd69cfc8c4f6 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4658,46 +4658,30 @@  static void paging32_init_context(struct kvm_mmu *context)
 	context->direct_map = false;
 }
 
-static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
-							 const struct kvm_mmu_role_regs *regs)
-{
-	union kvm_mmu_extended_role ext = {0};
-
-	if (____is_cr0_pg(regs)) {
-		ext.cr0_pg = 1;
-		ext.cr4_pae = ____is_cr4_pae(regs);
-		ext.cr4_smep = ____is_cr4_smep(regs);
-		ext.cr4_smap = ____is_cr4_smap(regs);
-		ext.cr4_pse = ____is_cr4_pse(regs);
-
-		/* PKEY and LA57 are active iff long mode is active. */
-		ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
-		ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
-		ext.efer_lma = ____is_efer_lma(regs);
-	}
-
-	return ext;
-}
-
 static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
-						   const struct kvm_mmu_role_regs *regs,
-						   bool base_only)
+						   const struct kvm_mmu_role_regs *regs)
 {
 	union kvm_mmu_role role = {0};
 
 	role.base.access = ACC_ALL;
 	if (____is_cr0_pg(regs)) {
+		role.ext.cr0_pg = 1;
 		role.base.efer_nx = ____is_efer_nx(regs);
 		role.base.cr0_wp = ____is_cr0_wp(regs);
+
+		role.ext.cr4_pae = ____is_cr4_pae(regs);
+		role.ext.cr4_smep = ____is_cr4_smep(regs);
+		role.ext.cr4_smap = ____is_cr4_smap(regs);
+		role.ext.cr4_pse = ____is_cr4_pse(regs);
+
+		/* PKEY and LA57 are active iff long mode is active. */
+		role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
+		role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
+		role.ext.efer_lma = ____is_efer_lma(regs);
 	}
 	role.base.smm = is_smm(vcpu);
 	role.base.guest_mode = is_guest_mode(vcpu);
 
-	if (base_only)
-		return role;
-
-	role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
-
 	return role;
 }
 
@@ -4716,10 +4700,9 @@  static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
 
 static union kvm_mmu_role
 kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
-				const struct kvm_mmu_role_regs *regs,
-				bool base_only)
+				const struct kvm_mmu_role_regs *regs)
 {
-	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
+	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);
 
 	role.base.ad_disabled = (shadow_accessed_mask == 0);
 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4734,7 +4717,7 @@  static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
 {
 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
 	union kvm_mmu_role new_role =
-		kvm_calc_tdp_mmu_root_page_role(vcpu, regs, false);
+		kvm_calc_tdp_mmu_root_page_role(vcpu, regs);
 
 	if (new_role.as_u64 == context->mmu_role.as_u64)
 		return;
@@ -4763,10 +4746,9 @@  static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
 
 static union kvm_mmu_role
 kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
-				      const struct kvm_mmu_role_regs *regs,
-				      bool base_only)
+				      const struct kvm_mmu_role_regs *regs)
 {
-	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
+	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);
 
 	role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
 	role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
@@ -4777,11 +4759,10 @@  kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
 
 static union kvm_mmu_role
 kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
-				   const struct kvm_mmu_role_regs *regs,
-				   bool base_only)
+				   const struct kvm_mmu_role_regs *regs)
 {
 	union kvm_mmu_role role =
-		kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
+		kvm_calc_shadow_root_page_role_common(vcpu, regs);
 
 	role.base.direct = !____is_cr0_pg(regs);
 
@@ -4821,7 +4802,7 @@  static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
 {
 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
 	union kvm_mmu_role new_role =
-		kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
+		kvm_calc_shadow_mmu_root_page_role(vcpu, regs);
 
 	shadow_mmu_init_context(vcpu, context, regs, new_role);
 
@@ -4841,7 +4822,7 @@  kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
 				   const struct kvm_mmu_role_regs *regs)
 {
 	union kvm_mmu_role role =
-		kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
+		kvm_calc_shadow_root_page_role_common(vcpu, regs);
 
 	role.base.direct = false;
 	role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4937,7 +4918,7 @@  kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *
 {
 	union kvm_mmu_role role;
 
-	role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
+	role = kvm_calc_shadow_root_page_role_common(vcpu, regs);
 
 	/*
 	 * Nested MMUs are used only for walking L2's gva->gpa, they never have