diff mbox series

[v6,11/20] kvm/x86: Emulate MSR IA32_CORE_CAPABILITY

Message ID 1554326526-172295-12-git-send-email-fenghua.yu@intel.com (mailing list archive)
State Not Applicable
Delegated to: Johannes Berg
Headers show
Series x86/split_lock: Enable split locked accesses detection | expand

Commit Message

Fenghua Yu April 3, 2019, 9:21 p.m. UTC
From: Xiaoyao Li <xiaoyao.li@linux.intel.com>

MSR IA32_CORE_CAPABILITY is a feature-enumerating MSR, bit 5 of which
reports the capability of enabling detection of split locks (will be
supported on future processors based on Tremont microarchitecture and
later).

CPUID.(EAX=7H,ECX=0):EDX[30] will enumerate the presence of the
IA32_CORE_CAPABILITY MSR.

Please check the latest Intel 64 and IA-32 Architectures Software
Developer's Manual for more detailed information on the MSR and
the split lock bit.

Since MSR_IA32_CORE_CAPABILITY is a feature-enumerating MSR, emulate
it in software regardless of host's capability. What we need to
do is to set the right value of it to report the capability of guest.

In this patch, just set the guest's core_capability as 0, because we
haven't added support of the features it indicates to guest. It's for
bisectability.

Signed-off-by: Xiaoyao Li <xiaoyao.li@linux.intel.com>
Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/cpuid.c            |  6 ++++++
 arch/x86/kvm/x86.c              | 24 ++++++++++++++++++++++++
 3 files changed, 32 insertions(+)

Comments

Thomas Gleixner April 5, 2019, noon UTC | #1
On Wed, 3 Apr 2019, Fenghua Yu wrote:
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>  
> +u64 kvm_get_core_capability(void)
> +{
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(kvm_get_core_capability);

Why is this global and exported? The only users are in this very file.

Thanks,

	tglx
Xiaoyao Li April 9, 2019, 6:03 a.m. UTC | #2
On Fri, 2019-04-05 at 14:00 +0200, Thomas Gleixner wrote:
> On Wed, 3 Apr 2019, Fenghua Yu wrote:
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> >  
> > +u64 kvm_get_core_capability(void)
> > +{
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(kvm_get_core_capability);
> 
> Why is this global and exported? The only users are in this very file.

You're right.
Will remove it in next version.

> Thanks,
> 
> 	tglx
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 159b5988292f..e28626f6a2e0 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -570,6 +570,7 @@  struct kvm_vcpu_arch {
 	u64 ia32_xss;
 	u64 microcode_version;
 	u64 arch_capabilities;
+	u64 core_capability;
 
 	/*
 	 * Paging state of the vcpu
@@ -1527,6 +1528,7 @@  int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
 		    unsigned long icr, int op_64_bit);
 
 u64 kvm_get_arch_capabilities(void);
+u64 kvm_get_core_capability(void);
 void kvm_define_shared_msr(unsigned index, u32 msr);
 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index fd3951638ae4..4a2f7892ea31 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -505,6 +505,12 @@  static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 			 * if the host doesn't support it.
 			 */
 			entry->edx |= F(ARCH_CAPABILITIES);
+			/*
+			 * Since we emulate MSR IA32_CORE_CAPABILITY in
+			 * software, we can always enable it for guest
+			 * regardless of host's capability.
+			 */
+			entry->edx |= F(CORE_CAPABILITY);
 		} else {
 			entry->ebx = 0;
 			entry->ecx = 0;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 099b851dabaf..4459115eb0ec 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1159,6 +1159,7 @@  static u32 emulated_msrs[] = {
 	MSR_IA32_TSC_ADJUST,
 	MSR_IA32_TSCDEADLINE,
 	MSR_IA32_ARCH_CAPABILITIES,
+	MSR_IA32_CORE_CAPABILITY,
 	MSR_IA32_MISC_ENABLE,
 	MSR_IA32_MCG_STATUS,
 	MSR_IA32_MCG_CTL,
@@ -1198,6 +1199,7 @@  static u32 msr_based_features[] = {
 
 	MSR_F10H_DECFG,
 	MSR_IA32_UCODE_REV,
+	MSR_IA32_CORE_CAPABILITY,
 	MSR_IA32_ARCH_CAPABILITIES,
 };
 
@@ -1225,9 +1227,18 @@  u64 kvm_get_arch_capabilities(void)
 }
 EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
 
+u64 kvm_get_core_capability(void)
+{
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_get_core_capability);
+
 static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
 {
 	switch (msr->index) {
+	case MSR_IA32_CORE_CAPABILITY:
+		msr->data = kvm_get_core_capability();
+		break;
 	case MSR_IA32_ARCH_CAPABILITIES:
 		msr->data = kvm_get_arch_capabilities();
 		break;
@@ -2451,6 +2462,12 @@  int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		break;
 	case MSR_EFER:
 		return set_efer(vcpu, data);
+	case MSR_IA32_CORE_CAPABILITY:
+		if (!msr_info->host_initiated)
+			return 1;
+
+		vcpu->arch.core_capability = data;
+		break;
 	case MSR_K7_HWCR:
 		data &= ~(u64)0x40;	/* ignore flush filter disable */
 		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
@@ -2762,6 +2779,12 @@  int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_IA32_TSC:
 		msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
 		break;
+	case MSR_IA32_CORE_CAPABILITY:
+		if (!msr_info->host_initiated &&
+		    !guest_cpuid_has(vcpu, X86_FEATURE_CORE_CAPABILITY))
+			return 1;
+		msr_info->data = vcpu->arch.core_capability;
+		break;
 	case MSR_MTRRcap:
 	case 0x200 ... 0x2ff:
 		return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -8762,6 +8785,7 @@  struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
 	vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
+	vcpu->arch.core_capability = kvm_get_core_capability();
 	vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
 	kvm_vcpu_mtrr_init(vcpu);
 	vcpu_load(vcpu);