[v2,2/2] x86/copy_safe: Introduce copy_safe_fast()
diff mbox series

Message ID 158823510897.2094061.8955960257425244970.stgit@dwillia2-desk3.amr.corp.intel.com
State New, archived
Headers show
Series
  • Replace and improve "mcsafe" with copy_safe()
Related show

Commit Message

Dan Williams April 30, 2020, 8:25 a.m. UTC
The existing copy_safe() implementation satisfies two primary concerns.
It provides a copy routine that avoids known unrecoverable scenarios
(poison consumption via fast-string instructions / accesses across
cacheline boundaries), and it provides a fallback to fast plain memcpy
if the platform does not indicate recovery capability. The fallback is
broken for two reasons. It fails the expected copy_safe() semantics that
allow it to be used in scenarios where reads / writes trigger memory
protection faults and it fails to enable machine check recovery on
systems that do not need the careful semantics of copy_safe_slow().

With copy_safe_fast() in place copy_safe() never falls back to plain
memcpy(), and it is fast regardless in every other scenario outside of
the copy_safe_slow() blacklist scenario.

Cc: x86@kernel.org
Cc: <stable@vger.kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Reported-by: Erwin Tsaur <erwin.tsaur@intel.com>
Tested-by: Erwin Tsaur <erwin.tsaur@intel.com>
Fixes: 92b0729c34ca ("x86/mm, x86/mce: Add memcpy_mcsafe()")
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 arch/x86/include/asm/copy_safe.h |    2 ++
 arch/x86/lib/copy_safe.c         |    5 ++---
 arch/x86/lib/copy_safe_64.S      |   40 ++++++++++++++++++++++++++++++++++++++
 tools/objtool/check.c            |    1 +
 4 files changed, 45 insertions(+), 3 deletions(-)

Patch
diff mbox series

diff --git a/arch/x86/include/asm/copy_safe.h b/arch/x86/include/asm/copy_safe.h
index 1c130364dd61..306248ca819f 100644
--- a/arch/x86/include/asm/copy_safe.h
+++ b/arch/x86/include/asm/copy_safe.h
@@ -12,5 +12,7 @@  static inline void enable_copy_safe_slow(void)
 
 __must_check unsigned long
 copy_safe_slow(void *dst, const void *src, size_t cnt);
+__must_check unsigned long
+copy_safe_fast(void *dst, const void *src, size_t cnt);
 
 #endif /* _COPY_SAFE_H_ */
diff --git a/arch/x86/lib/copy_safe.c b/arch/x86/lib/copy_safe.c
index 9dd3a831ff94..b8472e6a44d3 100644
--- a/arch/x86/lib/copy_safe.c
+++ b/arch/x86/lib/copy_safe.c
@@ -25,7 +25,7 @@  void enable_copy_safe_slow(void)
  *
  * We only call into the slow version on systems that have trouble
  * actually do machine check recovery. Everyone else can just
- * use memcpy().
+ * use copy_safe_fast().
  *
  * Return 0 for success, or number of bytes not copied if there was an
  * exception.
@@ -34,8 +34,7 @@  __must_check unsigned long copy_safe(void *dst, const void *src, size_t cnt)
 {
 	if (static_branch_unlikely(&copy_safe_slow_key))
 		return copy_safe_slow(dst, src, cnt);
-	memcpy(dst, src, cnt);
-	return 0;
+	return copy_safe_fast(dst, src, cnt);
 }
 EXPORT_SYMBOL_GPL(copy_safe);
 
diff --git a/arch/x86/lib/copy_safe_64.S b/arch/x86/lib/copy_safe_64.S
index 46dfdd832bde..551c781ae9fd 100644
--- a/arch/x86/lib/copy_safe_64.S
+++ b/arch/x86/lib/copy_safe_64.S
@@ -2,7 +2,9 @@ 
 /* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
 
 #include <linux/linkage.h>
+#include <asm/alternative-asm.h>
 #include <asm/copy_safe_test.h>
+#include <asm/cpufeatures.h>
 #include <asm/export.h>
 #include <asm/asm.h>
 
@@ -120,4 +122,42 @@  EXPORT_SYMBOL_GPL(copy_safe_slow)
 	_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
 	_ASM_EXTABLE(.L_write_words, .E_write_words)
 	_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
+
+/*
+ * copy_safe_fast - memory copy with exception handling
+ *
+ * Fast string copy + fault / exception handling. If the CPU does
+ * support machine check exception recovery, but does not support
+ * recovering from fast-string exceptions then this CPU needs to be
+ * added to the copy_safe_slow_key set of quirks. Otherwise, absent any
+ * machine check recovery support this version should be no slower than
+ * standard memcpy.
+ */
+SYM_FUNC_START(copy_safe_fast)
+	ALTERNATIVE "jmp copy_safe_slow", "", X86_FEATURE_ERMS
+	movq %rdi, %rax
+	movq %rdx, %rcx
+.L_copy:
+	rep movsb
+	/* Copy successful. Return zero */
+	xorl %eax, %eax
+	ret
+SYM_FUNC_END(copy_safe_fast)
+EXPORT_SYMBOL_GPL(copy_safe_fast)
+
+	.section .fixup, "ax"
+.E_copy:
+	/*
+	 * On fault %rcx is updated such that the copy instruction could
+	 * optionally be restarted at the fault position, i.e. it
+	 * contains 'bytes remaining'. A non-zero return indicates error
+	 * to copy_safe() users, or indicate short transfers to
+	 * user-copy routines.
+	 */
+	movq %rcx, %rax
+	ret
+
+	.previous
+
+	_ASM_EXTABLE_FAULT(.L_copy, .E_copy)
 #endif
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index be20eff8f358..e8b6e2438d31 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -533,6 +533,7 @@  static const char *uaccess_safe_builtin[] = {
 	"__ubsan_handle_shift_out_of_bounds",
 	/* misc */
 	"csum_partial_copy_generic",
+	"copy_safe_fast",
 	"copy_safe_slow",
 	"copy_safe_slow_handle_tail",
 	"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */