diff mbox series

[v4,08/12] KVM: x86: centralize code to get CD=1 memtype when guest MTRRs are honored

Message ID 20230714065356.20620-1-yan.y.zhao@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: refine memtype related mmu zap | expand

Commit Message

Yan Zhao July 14, 2023, 6:53 a.m. UTC
Centralize the code to get cache disabled memtype when guest MTRRs are
honored. If a TDP honors guest MTRRs, it is required to call the provided
API to get the memtype for CR0.CD=1.

This is the preparation patch for later implementation of fine-grained gfn
zap for CR0.CD toggles when guest MTRRs are honored.

No functional change intended.

Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
---
 arch/x86/kvm/mtrr.c    | 16 ++++++++++++++++
 arch/x86/kvm/vmx/vmx.c | 10 +++++-----
 arch/x86/kvm/x86.h     |  2 ++
 3 files changed, 23 insertions(+), 5 deletions(-)

Comments

Sean Christopherson Aug. 25, 2023, 9:46 p.m. UTC | #1
On Fri, Jul 14, 2023, Yan Zhao wrote:
> Centralize the code to get cache disabled memtype when guest MTRRs are
> honored. If a TDP honors guest MTRRs, it is required to call the provided
> API to get the memtype for CR0.CD=1.
> 
> This is the preparation patch for later implementation of fine-grained gfn
> zap for CR0.CD toggles when guest MTRRs are honored.
> 
> No functional change intended.
> 
> Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
> ---
>  arch/x86/kvm/mtrr.c    | 16 ++++++++++++++++
>  arch/x86/kvm/vmx/vmx.c | 10 +++++-----
>  arch/x86/kvm/x86.h     |  2 ++
>  3 files changed, 23 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
> index 3ce58734ad22..64c6daa659c8 100644
> --- a/arch/x86/kvm/mtrr.c
> +++ b/arch/x86/kvm/mtrr.c
> @@ -721,3 +721,19 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
>  
>  	return type == mtrr_default_type(mtrr_state);
>  }
> +
> +/*
> + * this routine is supposed to be called when guest mtrrs are honored
> + */
> +void kvm_honors_guest_mtrrs_get_cd_memtype(struct kvm_vcpu *vcpu,
> +					   u8 *type, bool *ipat)

I really don't like this helper.  IMO it's a big net negative for the readability
of vmx_get_mt_mask().  As I said in the previous version, I agree that splitting
logic is generally undesirable, but in this case I strongly believe it's the
lesser evil.

> +{
> +	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) {
> +		*type = MTRR_TYPE_WRBACK;
> +		*ipat = false;
> +	} else {
> +		*type = MTRR_TYPE_UNCACHABLE;
> +		*ipat = true;
> +	}
> +}
> +EXPORT_SYMBOL_GPL(kvm_honors_guest_mtrrs_get_cd_memtype);
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index c1e93678cea4..7fec1ee23b54 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7573,11 +7573,11 @@ static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
>  		return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
>  
>  	if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) {
> -		if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
> -			return MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT;
> -		else
> -			return (MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT) |
> -				VMX_EPT_IPAT_BIT;
> +		bool ipat;
> +		u8 cache;
> +
> +		kvm_honors_guest_mtrrs_get_cd_memtype(vcpu, &cache, &ipat);
> +		return cache << VMX_EPT_MT_EPTE_SHIFT | (ipat ? VMX_EPT_IPAT_BIT : 0);
>  	}
>  
>  	return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT;
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index 82e3dafc5453..e7733dc4dccc 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -313,6 +313,8 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
>  int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
>  bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
>  					  int page_num);
> +void kvm_honors_guest_mtrrs_get_cd_memtype(struct kvm_vcpu *vcpu,
> +					   u8 *type, bool *ipat);
>  bool kvm_vector_hashing_enabled(void);
>  void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
>  int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
> -- 
> 2.17.1
>
Yan Zhao Sept. 4, 2023, 7:46 a.m. UTC | #2
On Fri, Aug 25, 2023 at 02:46:07PM -0700, Sean Christopherson wrote:
> On Fri, Jul 14, 2023, Yan Zhao wrote:
> > Centralize the code to get cache disabled memtype when guest MTRRs are
> > honored. If a TDP honors guest MTRRs, it is required to call the provided
> > API to get the memtype for CR0.CD=1.
> > 
> > This is the preparation patch for later implementation of fine-grained gfn
> > zap for CR0.CD toggles when guest MTRRs are honored.
> > 
> > No functional change intended.
> > 
> > Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
> > ---
> >  arch/x86/kvm/mtrr.c    | 16 ++++++++++++++++
> >  arch/x86/kvm/vmx/vmx.c | 10 +++++-----
> >  arch/x86/kvm/x86.h     |  2 ++
> >  3 files changed, 23 insertions(+), 5 deletions(-)
> > 
> > diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
> > index 3ce58734ad22..64c6daa659c8 100644
> > --- a/arch/x86/kvm/mtrr.c
> > +++ b/arch/x86/kvm/mtrr.c
> > @@ -721,3 +721,19 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
> >  
> >  	return type == mtrr_default_type(mtrr_state);
> >  }
> > +
> > +/*
> > + * this routine is supposed to be called when guest mtrrs are honored
> > + */
> > +void kvm_honors_guest_mtrrs_get_cd_memtype(struct kvm_vcpu *vcpu,
> > +					   u8 *type, bool *ipat)
> 
> I really don't like this helper.  IMO it's a big net negative for the readability
> of vmx_get_mt_mask().  As I said in the previous version, I agree that splitting
> logic is generally undesirable, but in this case I strongly believe it's the
> lesser evil.
> 
Ok, will drop this helper in the next version :)
diff mbox series

Patch

diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 3ce58734ad22..64c6daa659c8 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -721,3 +721,19 @@  bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
 
 	return type == mtrr_default_type(mtrr_state);
 }
+
+/*
+ * this routine is supposed to be called when guest mtrrs are honored
+ */
+void kvm_honors_guest_mtrrs_get_cd_memtype(struct kvm_vcpu *vcpu,
+					   u8 *type, bool *ipat)
+{
+	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) {
+		*type = MTRR_TYPE_WRBACK;
+		*ipat = false;
+	} else {
+		*type = MTRR_TYPE_UNCACHABLE;
+		*ipat = true;
+	}
+}
+EXPORT_SYMBOL_GPL(kvm_honors_guest_mtrrs_get_cd_memtype);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c1e93678cea4..7fec1ee23b54 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7573,11 +7573,11 @@  static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 		return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
 
 	if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) {
-		if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
-			return MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT;
-		else
-			return (MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT) |
-				VMX_EPT_IPAT_BIT;
+		bool ipat;
+		u8 cache;
+
+		kvm_honors_guest_mtrrs_get_cd_memtype(vcpu, &cache, &ipat);
+		return cache << VMX_EPT_MT_EPTE_SHIFT | (ipat ? VMX_EPT_IPAT_BIT : 0);
 	}
 
 	return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 82e3dafc5453..e7733dc4dccc 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -313,6 +313,8 @@  int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
 					  int page_num);
+void kvm_honors_guest_mtrrs_get_cd_memtype(struct kvm_vcpu *vcpu,
+					   u8 *type, bool *ipat);
 bool kvm_vector_hashing_enabled(void);
 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,