diff mbox series

[RFC,-next,V3,6/6] arm64: add cow to machine check safe

Message ID 20220412072552.2526871-7-tongtiangen@huawei.com (mailing list archive)
State New, archived
Headers show
Series arm64: add machine check safe support | expand

Commit Message

Tong Tiangen April 12, 2022, 7:25 a.m. UTC
In the cow(copy on write) processing, the data of the user process is
copied, when hardware memory error is encountered during copy, only the
relevant processes are affected, so killing the user process and isolate
the user page with hardware memory errors is a more reasonable choice than
kernel panic.

Add new helper copy_page_mc() which provide a page copy implementation with
machine check safe. At present, only used in cow. In future, we can expand
more scenes. As long as the consequences of page copy failure are not
fatal(eg: only affect user process), we can use this helper.

The copy_page_mc() in copy_page_mc.S is largely borrows from copy_page()
in copy_page.S and the main difference is copy_page_mc() add some extable
entry to support machine check safe. largely to keep the patch simple. If
needed those optimizations can be folded in.

Add new extable type EX_TYPE_COPY_PAGE_MC which used in copy_page_mc().

This type only be processed in fixup_exception_mc(), The reason is that
copy_page_mc() is consistent with copy_page() except machine check safe is
considered, and copy_page() do not need to consider exception fixup.

Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
---
 arch/arm64/include/asm/asm-extable.h |  5 ++
 arch/arm64/include/asm/page.h        | 10 +++
 arch/arm64/lib/Makefile              |  2 +
 arch/arm64/lib/copy_page_mc.S        | 99 ++++++++++++++++++++++++++++
 arch/arm64/mm/copypage.c             | 36 ++++++++--
 arch/arm64/mm/extable.c              |  1 +
 include/linux/highmem.h              |  8 +++
 mm/memory.c                          |  2 +-
 8 files changed, 156 insertions(+), 7 deletions(-)
 create mode 100644 arch/arm64/lib/copy_page_mc.S

Comments

Robin Murphy April 12, 2022, 4:39 p.m. UTC | #1
On 12/04/2022 8:25 am, Tong Tiangen wrote:
[...]
> +100:	ldp	x2, x3, [x1]
> +101:	ldp	x4, x5, [x1, #16]
> +102:	ldp	x6, x7, [x1, #32]
> +103:	ldp	x8, x9, [x1, #48]
> +104:	ldp	x10, x11, [x1, #64]
> +105:	ldp	x12, x13, [x1, #80]
> +106:	ldp	x14, x15, [x1, #96]
> +107:	ldp	x16, x17, [x1, #112]
> +
> +	add	x0, x0, #256
> +	add	x1, x1, #128
> +1:
> +	tst	x0, #(PAGE_SIZE - 1)
> +
> +alternative_if ARM64_HAS_NO_HW_PREFETCH
> +	prfm	pldl1strm, [x1, #384]
> +alternative_else_nop_endif
> +
> +	stnp	x2, x3, [x0, #-256]
> +200:	ldp	x2, x3, [x1]
> +	stnp	x4, x5, [x0, #16 - 256]
> +201:	ldp	x4, x5, [x1, #16]
> +	stnp	x6, x7, [x0, #32 - 256]
> +202:	ldp	x6, x7, [x1, #32]
> +	stnp	x8, x9, [x0, #48 - 256]
> +203:	ldp	x8, x9, [x1, #48]
> +	stnp	x10, x11, [x0, #64 - 256]
> +204:	ldp	x10, x11, [x1, #64]
> +	stnp	x12, x13, [x0, #80 - 256]
> +205:	ldp	x12, x13, [x1, #80]
> +	stnp	x14, x15, [x0, #96 - 256]
> +206:	ldp	x14, x15, [x1, #96]
> +	stnp	x16, x17, [x0, #112 - 256]
> +207:	ldp	x16, x17, [x1, #112]
> +
> +	add	x0, x0, #128
> +	add	x1, x1, #128
> +
> +	b.ne	1b
> +
> +	stnp	x2, x3, [x0, #-256]
> +	stnp	x4, x5, [x0, #16 - 256]
> +	stnp	x6, x7, [x0, #32 - 256]
> +	stnp	x8, x9, [x0, #48 - 256]
> +	stnp	x10, x11, [x0, #64 - 256]
> +	stnp	x12, x13, [x0, #80 - 256]
> +	stnp	x14, x15, [x0, #96 - 256]
> +	stnp	x16, x17, [x0, #112 - 256]
> +
> +300:	ret
> +
> +_asm_extable_copy_page_mc 100b, 300b
> +_asm_extable_copy_page_mc 101b, 300b
> +_asm_extable_copy_page_mc 102b, 300b
> +_asm_extable_copy_page_mc 103b, 300b
> +_asm_extable_copy_page_mc 104b, 300b
> +_asm_extable_copy_page_mc 105b, 300b
> +_asm_extable_copy_page_mc 106b, 300b
> +_asm_extable_copy_page_mc 107b, 300b
> +_asm_extable_copy_page_mc 200b, 300b
> +_asm_extable_copy_page_mc 201b, 300b
> +_asm_extable_copy_page_mc 202b, 300b
> +_asm_extable_copy_page_mc 203b, 300b
> +_asm_extable_copy_page_mc 204b, 300b
> +_asm_extable_copy_page_mc 205b, 300b
> +_asm_extable_copy_page_mc 206b, 300b
> +_asm_extable_copy_page_mc 207b, 300b


Please add a USER_MC() macro to parallel the existing USER() one (we can 
worry about names and eventually consolidating things later), then use 
that to save all the label mess here.

Thanks,
Robin.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index 62eafb651773..274bd7edcff6 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -11,6 +11,7 @@ 
 /* _MC indicates that can fixup from machine check errors */
 #define EX_TYPE_UACCESS_MC		5
 #define EX_TYPE_UACCESS_MC_ERR_ZERO	6
+#define EX_TYPE_COPY_PAGE_MC		7
 
 #ifdef __ASSEMBLY__
 
@@ -39,6 +40,10 @@ 
 	__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_UACCESS_MC, 0)
 	.endm
 
+	.macro          _asm_extable_copy_page_mc, insn, fixup
+	__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_COPY_PAGE_MC, 0)
+	.endm
+
 /*
  * Create an exception table entry for `insn` if `fixup` is provided. Otherwise
  * do nothing.
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 993a27ea6f54..832571a7dddb 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -29,6 +29,16 @@  void copy_user_highpage(struct page *to, struct page *from,
 void copy_highpage(struct page *to, struct page *from);
 #define __HAVE_ARCH_COPY_HIGHPAGE
 
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+extern void copy_page_mc(void *to, const void *from);
+void copy_highpage_mc(struct page *to, struct page *from);
+#define __HAVE_ARCH_COPY_HIGHPAGE_MC
+
+void copy_user_highpage_mc(struct page *to, struct page *from,
+		unsigned long vaddr, struct vm_area_struct *vma);
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE_MC
+#endif
+
 struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
 						unsigned long vaddr);
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 29490be2546b..0d9f292ef68a 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -15,6 +15,8 @@  endif
 
 lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
 
+lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_page_mc.o
+
 obj-$(CONFIG_CRC32) += crc32.o
 
 obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/arm64/lib/copy_page_mc.S b/arch/arm64/lib/copy_page_mc.S
new file mode 100644
index 000000000000..93b4203bdf45
--- /dev/null
+++ b/arch/arm64/lib/copy_page_mc.S
@@ -0,0 +1,99 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/linkage.h>
+#include <linux/const.h>
+#include <asm/assembler.h>
+#include <asm/page.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative.h>
+#include <asm/asm-extable.h>
+
+/*
+ * Copy a page from src to dest (both are page aligned) with machine check
+ *
+ * Parameters:
+ *	x0 - dest
+ *	x1 - src
+ */
+SYM_FUNC_START(__pi_copy_page_mc)
+alternative_if ARM64_HAS_NO_HW_PREFETCH
+	// Prefetch three cache lines ahead.
+	prfm	pldl1strm, [x1, #128]
+	prfm	pldl1strm, [x1, #256]
+	prfm	pldl1strm, [x1, #384]
+alternative_else_nop_endif
+
+100:	ldp	x2, x3, [x1]
+101:	ldp	x4, x5, [x1, #16]
+102:	ldp	x6, x7, [x1, #32]
+103:	ldp	x8, x9, [x1, #48]
+104:	ldp	x10, x11, [x1, #64]
+105:	ldp	x12, x13, [x1, #80]
+106:	ldp	x14, x15, [x1, #96]
+107:	ldp	x16, x17, [x1, #112]
+
+	add	x0, x0, #256
+	add	x1, x1, #128
+1:
+	tst	x0, #(PAGE_SIZE - 1)
+
+alternative_if ARM64_HAS_NO_HW_PREFETCH
+	prfm	pldl1strm, [x1, #384]
+alternative_else_nop_endif
+
+	stnp	x2, x3, [x0, #-256]
+200:	ldp	x2, x3, [x1]
+	stnp	x4, x5, [x0, #16 - 256]
+201:	ldp	x4, x5, [x1, #16]
+	stnp	x6, x7, [x0, #32 - 256]
+202:	ldp	x6, x7, [x1, #32]
+	stnp	x8, x9, [x0, #48 - 256]
+203:	ldp	x8, x9, [x1, #48]
+	stnp	x10, x11, [x0, #64 - 256]
+204:	ldp	x10, x11, [x1, #64]
+	stnp	x12, x13, [x0, #80 - 256]
+205:	ldp	x12, x13, [x1, #80]
+	stnp	x14, x15, [x0, #96 - 256]
+206:	ldp	x14, x15, [x1, #96]
+	stnp	x16, x17, [x0, #112 - 256]
+207:	ldp	x16, x17, [x1, #112]
+
+	add	x0, x0, #128
+	add	x1, x1, #128
+
+	b.ne	1b
+
+	stnp	x2, x3, [x0, #-256]
+	stnp	x4, x5, [x0, #16 - 256]
+	stnp	x6, x7, [x0, #32 - 256]
+	stnp	x8, x9, [x0, #48 - 256]
+	stnp	x10, x11, [x0, #64 - 256]
+	stnp	x12, x13, [x0, #80 - 256]
+	stnp	x14, x15, [x0, #96 - 256]
+	stnp	x16, x17, [x0, #112 - 256]
+
+300:	ret
+
+_asm_extable_copy_page_mc 100b, 300b
+_asm_extable_copy_page_mc 101b, 300b
+_asm_extable_copy_page_mc 102b, 300b
+_asm_extable_copy_page_mc 103b, 300b
+_asm_extable_copy_page_mc 104b, 300b
+_asm_extable_copy_page_mc 105b, 300b
+_asm_extable_copy_page_mc 106b, 300b
+_asm_extable_copy_page_mc 107b, 300b
+_asm_extable_copy_page_mc 200b, 300b
+_asm_extable_copy_page_mc 201b, 300b
+_asm_extable_copy_page_mc 202b, 300b
+_asm_extable_copy_page_mc 203b, 300b
+_asm_extable_copy_page_mc 204b, 300b
+_asm_extable_copy_page_mc 205b, 300b
+_asm_extable_copy_page_mc 206b, 300b
+_asm_extable_copy_page_mc 207b, 300b
+
+SYM_FUNC_END(__pi_copy_page_mc)
+SYM_FUNC_ALIAS(copy_page_mc, __pi_copy_page_mc)
+EXPORT_SYMBOL(copy_page_mc)
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 0dea80bf6de4..0f28edfcb234 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -14,13 +14,8 @@ 
 #include <asm/cpufeature.h>
 #include <asm/mte.h>
 
-void copy_highpage(struct page *to, struct page *from)
+static void do_mte(struct page *to, struct page *from, void *kto, void *kfrom)
 {
-	void *kto = page_address(to);
-	void *kfrom = page_address(from);
-
-	copy_page(kto, kfrom);
-
 	if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
 		set_bit(PG_mte_tagged, &to->flags);
 		page_kasan_tag_reset(to);
@@ -35,6 +30,15 @@  void copy_highpage(struct page *to, struct page *from)
 		mte_copy_page_tags(kto, kfrom);
 	}
 }
+
+void copy_highpage(struct page *to, struct page *from)
+{
+	void *kto = page_address(to);
+	void *kfrom = page_address(from);
+
+	copy_page(kto, kfrom);
+	do_mte(to, from, kto, kfrom);
+}
 EXPORT_SYMBOL(copy_highpage);
 
 void copy_user_highpage(struct page *to, struct page *from,
@@ -44,3 +48,23 @@  void copy_user_highpage(struct page *to, struct page *from,
 	flush_dcache_page(to);
 }
 EXPORT_SYMBOL_GPL(copy_user_highpage);
+
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+void copy_highpage_mc(struct page *to, struct page *from)
+{
+	void *kto = page_address(to);
+	void *kfrom = page_address(from);
+
+	copy_page_mc(kto, kfrom);
+	do_mte(to, from, kto, kfrom);
+}
+EXPORT_SYMBOL(copy_highpage_mc);
+
+void copy_user_highpage_mc(struct page *to, struct page *from,
+			unsigned long vaddr, struct vm_area_struct *vma)
+{
+	copy_highpage_mc(to, from);
+	flush_dcache_page(to);
+}
+EXPORT_SYMBOL_GPL(copy_user_highpage_mc);
+#endif
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index ca7388f3923b..7ee67fcf9e81 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -98,6 +98,7 @@  bool fixup_exception_mc(struct pt_regs *regs)
 
 	switch (ex->type) {
 	case EX_TYPE_UACCESS_MC:
+	case EX_TYPE_COPY_PAGE_MC:
 		return ex_handler_fixup(ex, regs);
 	case EX_TYPE_UACCESS_MC_ERR_ZERO:
 		return ex_handler_uaccess_err_zero(ex, regs);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 39bb9b47fa9c..a9dbf331b038 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -283,6 +283,10 @@  static inline void copy_user_highpage(struct page *to, struct page *from,
 
 #endif
 
+#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE_MC
+#define copy_user_highpage_mc copy_user_highpage
+#endif
+
 #ifndef __HAVE_ARCH_COPY_HIGHPAGE
 
 static inline void copy_highpage(struct page *to, struct page *from)
@@ -298,6 +302,10 @@  static inline void copy_highpage(struct page *to, struct page *from)
 
 #endif
 
+#ifndef __HAVE_ARCH_COPY_HIGHPAGE_MC
+#define cop_highpage_mc copy_highpage
+#endif
+
 static inline void memcpy_page(struct page *dst_page, size_t dst_off,
 			       struct page *src_page, size_t src_off,
 			       size_t len)
diff --git a/mm/memory.c b/mm/memory.c
index 76e3af9639d9..d5f62234152d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2767,7 +2767,7 @@  static inline bool cow_user_page(struct page *dst, struct page *src,
 	unsigned long addr = vmf->address;
 
 	if (likely(src)) {
-		copy_user_highpage(dst, src, addr, vma);
+		copy_user_highpage_mc(dst, src, addr, vma);
 		return true;
 	}