diff mbox series

[v6,19/20] x86/clearcpuid: Clear CPUID bit in CPUID faulting

Message ID 1554326526-172295-20-git-send-email-fenghua.yu@intel.com (mailing list archive)
State Not Applicable
Delegated to: Johannes Berg
Headers show
Series x86/split_lock: Enable split locked accesses detection | expand

Commit Message

Fenghua Yu April 3, 2019, 9:22 p.m. UTC
From: Peter Zijlstra <peterz@infradead.org>

After kernel clears a CPUID bit through clearcpuid or other kernel options,
CPUID instruction executed from user space should see the same value for
the bit. The CPUID faulting handler returns the cleared bit to user.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
---
 arch/x86/include/asm/cpufeature.h |  4 +++
 arch/x86/kernel/cpu/common.c      |  2 ++
 arch/x86/kernel/cpu/cpuid-deps.c  | 52 ++++++++++++++++++++++++++++
 arch/x86/kernel/cpu/intel.c       | 56 +++++++++++++++++++++++++++++--
 arch/x86/kernel/cpu/scattered.c   | 17 ++++++++++
 arch/x86/kernel/process.c         |  3 ++
 arch/x86/kernel/traps.c           | 11 ++++++
 7 files changed, 142 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 823c4ab82e12..53875fd13f5a 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -228,5 +228,9 @@  static __always_inline __pure bool _static_cpu_has(u16 bit)
 #define CPU_FEATURE_TYPEVAL		boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
 					boot_cpu_data.x86_model
 
+extern int cpuid_fault;
+u32 scattered_cpuid_mask(u32 leaf, u32 count, enum cpuid_regs_idx reg);
+u32 cpuid_cap_mask(u32 leaf, u32 count, enum cpuid_regs_idx reg);
+
 #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
 #endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e1d41405c27b..020597bca252 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1503,6 +1503,8 @@  void print_cpu_info(struct cpuinfo_x86 *c)
 		pr_cont(")\n");
 }
 
+int cpuid_fault;
+
 /*
  * clearcpuid= was already parsed in fpu__init_parse_early_param.
  * But we need to keep a dummy __setup around otherwise it would
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index 1a71434f7b46..d42aa4fa3021 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -113,9 +113,61 @@  static void do_clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature)
 
 void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature)
 {
+	if (boot_cpu_has(feature))
+		cpuid_fault = 1;
 	do_clear_cpu_cap(c, feature);
 }
 
+u32 cpuid_cap_mask(u32 leaf, u32 count, enum cpuid_regs_idx reg)
+{
+	switch (leaf) {
+	case 0x1:
+		if (reg == CPUID_EDX)
+			return ~cpu_caps_cleared[CPUID_1_EDX];
+		if (reg == CPUID_ECX)
+			return ~cpu_caps_cleared[CPUID_1_ECX];
+		break;
+
+	case 0x6:
+		if (reg == CPUID_EAX)
+			return ~cpu_caps_cleared[CPUID_6_EAX];
+		break;
+
+	case 0x7:
+		if (reg == CPUID_EDX)
+			return ~cpu_caps_cleared[CPUID_7_EDX];
+		if (reg == CPUID_ECX)
+			return ~cpu_caps_cleared[CPUID_7_ECX];
+		if (reg == CPUID_EBX && count == 0)
+			return ~cpu_caps_cleared[CPUID_7_0_EBX];
+		break;
+
+	case 0xD:
+		if (reg == CPUID_EAX)
+			return ~cpu_caps_cleared[CPUID_D_1_EAX];
+		break;
+
+	case 0xF:
+		if (reg == CPUID_EDX) {
+			if (count == 0)
+				return ~cpu_caps_cleared[CPUID_F_0_EDX];
+			if (count == 1)
+				return ~cpu_caps_cleared[CPUID_F_0_EDX];
+		}
+		break;
+
+	case 0x80000007:
+		if (reg == CPUID_EDX) {
+			if (test_bit(X86_FEATURE_CONSTANT_TSC,
+				     (unsigned long *)cpu_caps_cleared))
+				return ~(1 << 8);
+		}
+		break;
+	}
+
+	return scattered_cpuid_mask(leaf, count, reg);
+}
+
 void setup_clear_cpu_cap(unsigned int feature)
 {
 	do_clear_cpu_cap(NULL, feature);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 166033fa8423..aa1cfcc81323 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -19,6 +19,9 @@ 
 #include <asm/microcode_intel.h>
 #include <asm/hwcap2.h>
 #include <asm/elf.h>
+#include <asm/insn.h>
+#include <asm/insn-eval.h>
+#include <asm/inat.h>
 
 #ifdef CONFIG_X86_64
 #include <linux/topology.h>
@@ -667,13 +670,60 @@  static void intel_bsp_resume(struct cpuinfo_x86 *c)
 	init_intel_energy_perf(c);
 }
 
+bool fixup_cpuid_exception(struct pt_regs *regs)
+{
+	unsigned int leaf, count, eax, ebx, ecx, edx;
+	unsigned long seg_base = 0;
+	unsigned char buf[2];
+	int not_copied;
+
+	if (!cpuid_fault)
+		return false;
+
+	if (test_thread_flag(TIF_NOCPUID))
+		return false;
+
+	if (!user_64bit_mode(regs))
+		seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
+
+	if (seg_base == -1L)
+		return false;
+
+	not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip),
+				    sizeof(buf));
+	if (not_copied)
+		return false;
+
+	if (buf[0] != 0x0F || buf[1] != 0xA2) /* CPUID - OF A2 */
+		return false;
+
+	leaf = regs->ax;
+	count = regs->cx;
+
+	cpuid_count(leaf, count, &eax, &ebx, &ecx, &edx);
+
+	regs->ip += 2;
+	regs->ax = eax & cpuid_cap_mask(leaf, count, CPUID_EAX);
+	regs->bx = ebx & cpuid_cap_mask(leaf, count, CPUID_EBX);
+	regs->cx = ecx & cpuid_cap_mask(leaf, count, CPUID_ECX);
+	regs->dx = edx & cpuid_cap_mask(leaf, count, CPUID_EDX);
+
+	return true;
+}
+
 static void init_cpuid_fault(struct cpuinfo_x86 *c)
 {
 	u64 msr;
 
-	if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
-		if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
-			set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
+	if (rdmsrl_safe(MSR_PLATFORM_INFO, &msr))
+		return;
+
+	if (msr & MSR_PLATFORM_INFO_CPUID_FAULT) {
+		set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
+		if (cpuid_fault) {
+			this_cpu_or(msr_misc_features_shadow,
+				    MSR_MISC_FEATURES_ENABLES_CPUID_FAULT);
+		}
 	}
 }
 
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 94aa1c72ca98..353756c27096 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -62,3 +62,20 @@  void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
 			set_cpu_cap(c, cb->feature);
 	}
 }
+
+u32 scattered_cpuid_mask(u32 leaf, u32 count, enum cpuid_regs_idx reg)
+{
+	const struct cpuid_bit *cb;
+	u32 mask = ~0U;
+
+	for (cb = cpuid_bits; cb->feature; cb++) {
+		if (cb->level == leaf && cb->sub_leaf == count &&
+		    cb->reg == reg) {
+			if (test_bit(cb->feature,
+				     (unsigned long *)cpu_caps_cleared))
+				mask &= ~BIT(cb->bit);
+		}
+	}
+
+	return mask;
+}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 58ac7be52c7a..2b1dfd7ae65d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -196,6 +196,9 @@  static void set_cpuid_faulting(bool on)
 {
 	u64 msrval;
 
+	if (cpuid_fault)
+		return;
+
 	msrval = this_cpu_read(msr_misc_features_shadow);
 	msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
 	msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 0ac992bfa287..d887da1e8050 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -559,6 +559,12 @@  dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
 	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, 0, NULL);
 }
 
+#ifdef CONFIG_CPU_SUP_INTEL
+extern bool fixup_cpuid_exception(struct pt_regs *regs);
+#else
+static inline bool fixup_cpuid_exception(struct pt_regs *regs) { return false; }
+#endif
+
 dotraplinkage void
 do_general_protection(struct pt_regs *regs, long error_code)
 {
@@ -573,6 +579,11 @@  do_general_protection(struct pt_regs *regs, long error_code)
 			return;
 	}
 
+	if (static_cpu_has(X86_FEATURE_CPUID_FAULT)) {
+		if (user_mode(regs) && fixup_cpuid_exception(regs))
+			return;
+	}
+
 	if (v8086_mode(regs)) {
 		local_irq_enable();
 		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);