diff mbox series

[3/8,VMX] : Add a function to check reserved bits in MSR_CORE_PERF_GLOBAL_CTRL

Message ID 20190424231724.2014-4-krish.sadhukhan@oracle.com (mailing list archive)
State New, archived
Headers show
Series [1/8,KVMnVMX] : Enable "load IA32_PERF_GLOBAL_CTRL" VM-exit control for nested guests | expand

Commit Message

Krish Sadhukhan April 24, 2019, 11:17 p.m. UTC
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Reviewed-by: Karl Heubaum <karl.heubaum@oracle.com>
---
 arch/x86/include/asm/kvm_host.h  |  1 +
 arch/x86/include/asm/msr-index.h |  7 +++++++
 arch/x86/kvm/x86.c               | 20 ++++++++++++++++++++
 3 files changed, 28 insertions(+)

Comments

Sean Christopherson May 13, 2019, 6:57 p.m. UTC | #1
On Wed, Apr 24, 2019 at 07:17:19PM -0400, Krish Sadhukhan wrote:
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> Reviewed-by: Karl Heubaum <karl.heubaum@oracle.com>
> ---
>  arch/x86/include/asm/kvm_host.h  |  1 +
>  arch/x86/include/asm/msr-index.h |  7 +++++++
>  arch/x86/kvm/x86.c               | 20 ++++++++++++++++++++
>  3 files changed, 28 insertions(+)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 4660ce90de7f..c5b3c63129a6 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1311,6 +1311,7 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
>  
>  void kvm_enable_efer_bits(u64);
>  bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
> +bool kvm_valid_perf_global_ctrl(u64 perf_global);
>  int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
>  int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
>  
> diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
> index 8e40c2446fd1..d10e8d4b2842 100644
> --- a/arch/x86/include/asm/msr-index.h
> +++ b/arch/x86/include/asm/msr-index.h
> @@ -775,6 +775,13 @@
>  #define MSR_CORE_PERF_GLOBAL_CTRL	0x0000038f
>  #define MSR_CORE_PERF_GLOBAL_OVF_CTRL	0x00000390
>  
> +/* MSR_CORE_PERF_GLOBAL_CTRL bits */
> +#define	PERF_GLOBAL_CTRL_PMC0_ENABLE	(1ull << 0)

BIT and BIT_ULL

> +#define	PERF_GLOBAL_CTRL_PMC1_ENABLE	(1ull << 1)
> +#define	PERF_GLOBAL_CTRL_FIXED0_ENABLE	(1ull << 32)
> +#define	PERF_GLOBAL_CTRL_FIXED1_ENABLE	(1ull << 33)
> +#define	PERF_GLOBAL_CTRL_FIXED2_ENABLE	(1ull << 34)
> +
>  /* Geode defined MSRs */
>  #define MSR_GEODE_BUSCONT_CONF0		0x00001900
>  
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 02c8e095a239..ecddb8baaa7f 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -89,8 +89,19 @@ EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
>  #ifdef CONFIG_X86_64
>  static
>  u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
> +static
> +u64 __read_mostly perf_global_ctrl_reserved_bits =
> +				~((u64)(PERF_GLOBAL_CTRL_PMC0_ENABLE	|
> +					PERF_GLOBAL_CTRL_PMC1_ENABLE	|
> +					PERF_GLOBAL_CTRL_FIXED0_ENABLE	|
> +					PERF_GLOBAL_CTRL_FIXED1_ENABLE	|
> +					PERF_GLOBAL_CTRL_FIXED2_ENABLE));
>  #else
>  static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
> +static

Why is static on a different line?

> +u64 __read_mostly perf_global_ctrl_reserved_bits =
> +				~((u64)(PERF_GLOBAL_CTRL_PMC0_ENABLE	|
> +					PERF_GLOBAL_CTRL_PMC1_ENABLE));

Why are the fixed bits reserved on a 32-bit build?

>  #endif
>  
>  #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
> @@ -1255,6 +1266,15 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
>  	return 0;
>  }
>  
> +bool kvm_valid_perf_global_ctrl(u64 perf_global)
> +{
> +	if (perf_global & perf_global_ctrl_reserved_bits)

If the check were correct, this could be:

	return !(perf_blobal & perf_global_ctrl_reserved_bits);

But the check isn't correct, the number of counters is variable, i.e. the
helper should query the guest's CPUID 0xA (ignoring for the moment the
fact that this bypasses the PMU handling of guest vs. host ownership).

> +		return false;
> +
> +	return true;
> +}
> +EXPORT_SYMBOL_GPL(kvm_valid_perf_global_ctrl);
> +
>  bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
>  {
>  	if (efer & efer_reserved_bits)
> -- 
> 2.17.2
>
Jim Mattson Aug. 15, 2019, 10:29 p.m. UTC | #2
On Mon, May 13, 2019 at 11:57 AM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> On Wed, Apr 24, 2019 at 07:17:19PM -0400, Krish Sadhukhan wrote:
> > Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> > Reviewed-by: Karl Heubaum <karl.heubaum@oracle.com>
> > ---
> >  arch/x86/include/asm/kvm_host.h  |  1 +
> >  arch/x86/include/asm/msr-index.h |  7 +++++++
> >  arch/x86/kvm/x86.c               | 20 ++++++++++++++++++++
> >  3 files changed, 28 insertions(+)
> >
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index 4660ce90de7f..c5b3c63129a6 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -1311,6 +1311,7 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
> >
> >  void kvm_enable_efer_bits(u64);
> >  bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
> > +bool kvm_valid_perf_global_ctrl(u64 perf_global);
> >  int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
> >  int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
> >
> > diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
> > index 8e40c2446fd1..d10e8d4b2842 100644
> > --- a/arch/x86/include/asm/msr-index.h
> > +++ b/arch/x86/include/asm/msr-index.h
> > @@ -775,6 +775,13 @@
> >  #define MSR_CORE_PERF_GLOBAL_CTRL    0x0000038f
> >  #define MSR_CORE_PERF_GLOBAL_OVF_CTRL        0x00000390
> >
> > +/* MSR_CORE_PERF_GLOBAL_CTRL bits */
> > +#define      PERF_GLOBAL_CTRL_PMC0_ENABLE    (1ull << 0)
>
> BIT and BIT_ULL
>
> > +#define      PERF_GLOBAL_CTRL_PMC1_ENABLE    (1ull << 1)
> > +#define      PERF_GLOBAL_CTRL_FIXED0_ENABLE  (1ull << 32)
> > +#define      PERF_GLOBAL_CTRL_FIXED1_ENABLE  (1ull << 33)
> > +#define      PERF_GLOBAL_CTRL_FIXED2_ENABLE  (1ull << 34)
> > +
> >  /* Geode defined MSRs */
> >  #define MSR_GEODE_BUSCONT_CONF0              0x00001900
> >
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 02c8e095a239..ecddb8baaa7f 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -89,8 +89,19 @@ EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
> >  #ifdef CONFIG_X86_64
> >  static
> >  u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
> > +static
> > +u64 __read_mostly perf_global_ctrl_reserved_bits =
> > +                             ~((u64)(PERF_GLOBAL_CTRL_PMC0_ENABLE    |
> > +                                     PERF_GLOBAL_CTRL_PMC1_ENABLE    |
> > +                                     PERF_GLOBAL_CTRL_FIXED0_ENABLE  |
> > +                                     PERF_GLOBAL_CTRL_FIXED1_ENABLE  |
> > +                                     PERF_GLOBAL_CTRL_FIXED2_ENABLE));
> >  #else
> >  static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
> > +static
>
> Why is static on a different line?
>
> > +u64 __read_mostly perf_global_ctrl_reserved_bits =
> > +                             ~((u64)(PERF_GLOBAL_CTRL_PMC0_ENABLE    |
> > +                                     PERF_GLOBAL_CTRL_PMC1_ENABLE));
>
> Why are the fixed bits reserved on a 32-bit build?
>
> >  #endif
> >
> >  #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
> > @@ -1255,6 +1266,15 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
> >       return 0;
> >  }
> >
> > +bool kvm_valid_perf_global_ctrl(u64 perf_global)
> > +{
> > +     if (perf_global & perf_global_ctrl_reserved_bits)
>
> If the check were correct, this could be:
>
>         return !(perf_blobal & perf_global_ctrl_reserved_bits);
>
> But the check isn't correct, the number of counters is variable, i.e. the
> helper should query the guest's CPUID 0xA (ignoring for the moment the
> fact that this bypasses the PMU handling of guest vs. host ownership).

Aren't the reserved bits already easily calculated as
~pmu->global_ctrl_mask? Well, there's also bit 48, potentially, I
guess, but I don't think kvm virtualizes it.

Once we have this concept, shouldn't intel_pmu_set_msr() return
non-zero if an attempt is made to set a reserved bit of this MSR?

> > +             return false;
> > +
> > +     return true;
> > +}
> > +EXPORT_SYMBOL_GPL(kvm_valid_perf_global_ctrl);
> > +
> >  bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
> >  {
> >       if (efer & efer_reserved_bits)
> > --
> > 2.17.2
> >
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4660ce90de7f..c5b3c63129a6 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1311,6 +1311,7 @@  int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
 
 void kvm_enable_efer_bits(u64);
 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
+bool kvm_valid_perf_global_ctrl(u64 perf_global);
 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
 
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 8e40c2446fd1..d10e8d4b2842 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -775,6 +775,13 @@ 
 #define MSR_CORE_PERF_GLOBAL_CTRL	0x0000038f
 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL	0x00000390
 
+/* MSR_CORE_PERF_GLOBAL_CTRL bits */
+#define	PERF_GLOBAL_CTRL_PMC0_ENABLE	(1ull << 0)
+#define	PERF_GLOBAL_CTRL_PMC1_ENABLE	(1ull << 1)
+#define	PERF_GLOBAL_CTRL_FIXED0_ENABLE	(1ull << 32)
+#define	PERF_GLOBAL_CTRL_FIXED1_ENABLE	(1ull << 33)
+#define	PERF_GLOBAL_CTRL_FIXED2_ENABLE	(1ull << 34)
+
 /* Geode defined MSRs */
 #define MSR_GEODE_BUSCONT_CONF0		0x00001900
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 02c8e095a239..ecddb8baaa7f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -89,8 +89,19 @@  EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
 #ifdef CONFIG_X86_64
 static
 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
+static
+u64 __read_mostly perf_global_ctrl_reserved_bits =
+				~((u64)(PERF_GLOBAL_CTRL_PMC0_ENABLE	|
+					PERF_GLOBAL_CTRL_PMC1_ENABLE	|
+					PERF_GLOBAL_CTRL_FIXED0_ENABLE	|
+					PERF_GLOBAL_CTRL_FIXED1_ENABLE	|
+					PERF_GLOBAL_CTRL_FIXED2_ENABLE));
 #else
 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
+static
+u64 __read_mostly perf_global_ctrl_reserved_bits =
+				~((u64)(PERF_GLOBAL_CTRL_PMC0_ENABLE	|
+					PERF_GLOBAL_CTRL_PMC1_ENABLE));
 #endif
 
 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
@@ -1255,6 +1266,15 @@  static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
 	return 0;
 }
 
+bool kvm_valid_perf_global_ctrl(u64 perf_global)
+{
+	if (perf_global & perf_global_ctrl_reserved_bits)
+		return false;
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(kvm_valid_perf_global_ctrl);
+
 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
 	if (efer & efer_reserved_bits)