diff mbox series

[v10,2/3] mm/hwpoison: introduce copy_mc_highpage

Message ID 20230305065112.1932255-3-jiaqiyan@google.com (mailing list archive)
State New
Headers show
Series Memory poison recovery in khugepaged collapsing | expand

Commit Message

Jiaqi Yan March 5, 2023, 6:51 a.m. UTC
Similar to how copy_mc_user_highpage is implemented for
copy_user_highpage on #MC supported architecture, introduce
the #MC handled version of copy_highpage.

This helper has immediate usage when khugepaged wants to copy
file-backed memory pages and tolerate #MC.

Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
---
 include/linux/highmem.h | 54 +++++++++++++++++++++++++++++++----------
 1 file changed, 41 insertions(+), 13 deletions(-)

Comments

Jiaqi Yan March 5, 2023, 6:56 a.m. UTC | #1
On Sat, Mar 4, 2023 at 10:51 PM Jiaqi Yan <jiaqiyan@google.com> wrote:
>
> Similar to how copy_mc_user_highpage is implemented for
> copy_user_highpage on #MC supported architecture, introduce
> the #MC handled version of copy_highpage.
>
> This helper has immediate usage when khugepaged wants to copy
> file-backed memory pages and tolerate #MC.
>
> Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
> ---
>  include/linux/highmem.h | 54 +++++++++++++++++++++++++++++++----------
>  1 file changed, 41 insertions(+), 13 deletions(-)
>
> diff --git a/include/linux/highmem.h b/include/linux/highmem.h
> index 1128b7114931f..7cbecae39b3eb 100644
> --- a/include/linux/highmem.h
> +++ b/include/linux/highmem.h
> @@ -315,7 +315,29 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
>
>  #endif
>
> +#ifndef __HAVE_ARCH_COPY_HIGHPAGE
> +
> +static inline void copy_highpage(struct page *to, struct page *from)
> +{
> +       char *vfrom, *vto;
> +
> +       vfrom = kmap_local_page(from);
> +       vto = kmap_local_page(to);
> +       copy_page(vto, vfrom);
> +       kmsan_copy_page_meta(to, from);
> +       kunmap_local(vto);
> +       kunmap_local(vfrom);
> +}
> +
> +#endif
> +
>  #ifdef copy_mc_to_kernel
> +/*
> + * If architecture supports machine check exception handling, define the
> + * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
> + * page with #MC in source page (@from) handled, and return the number
> + * of bytes not copied if there was a #MC, otherwise 0 for success.
> + */

I know that back in v8, Andrew said copy_mc_* should not be inline,
but I am still putting them here as inline functions because to me
they blend in this file well. However, if you have strong opinions,
let me know and I will move copy_mc_user_highpage + copy_mc_highpage
to a .c file (maybe highmem.c?).

>  static inline int copy_mc_user_highpage(struct page *to, struct page *from,
>                                         unsigned long vaddr, struct vm_area_struct *vma)
>  {
> @@ -332,29 +354,35 @@ static inline int copy_mc_user_highpage(struct page *to, struct page *from,
>
>         return ret;
>  }
> -#else
> -static inline int copy_mc_user_highpage(struct page *to, struct page *from,
> -                                       unsigned long vaddr, struct vm_area_struct *vma)
> -{
> -       copy_user_highpage(to, from, vaddr, vma);
> -       return 0;
> -}
> -#endif
>
> -#ifndef __HAVE_ARCH_COPY_HIGHPAGE
> -
> -static inline void copy_highpage(struct page *to, struct page *from)
> +static inline int copy_mc_highpage(struct page *to, struct page *from)
>  {
> +       unsigned long ret;
>         char *vfrom, *vto;
>
>         vfrom = kmap_local_page(from);
>         vto = kmap_local_page(to);
> -       copy_page(vto, vfrom);
> -       kmsan_copy_page_meta(to, from);
> +       ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
> +       if (!ret)
> +               kmsan_copy_page_meta(to, from);
>         kunmap_local(vto);
>         kunmap_local(vfrom);
> +
> +       return ret;
> +}
> +#else
> +static inline int copy_mc_user_highpage(struct page *to, struct page *from,
> +                                       unsigned long vaddr, struct vm_area_struct *vma)
> +{
> +       copy_user_highpage(to, from, vaddr, vma);
> +       return 0;
>  }
>
> +static inline int copy_mc_highpage(struct page *to, struct page *from)
> +{
> +       copy_highpage(to, from);
> +       return 0;
> +}
>  #endif
>
>  static inline void memcpy_page(struct page *dst_page, size_t dst_off,
> --
> 2.40.0.rc0.216.gc4246ad0f0-goog
>
Yang Shi March 24, 2023, 8:24 p.m. UTC | #2
On Sat, Mar 4, 2023 at 10:51 PM Jiaqi Yan <jiaqiyan@google.com> wrote:
>
> Similar to how copy_mc_user_highpage is implemented for
> copy_user_highpage on #MC supported architecture, introduce
> the #MC handled version of copy_highpage.
>
> This helper has immediate usage when khugepaged wants to copy
> file-backed memory pages and tolerate #MC.

I don't have a strong opinion on non-inline or inline. Putting
copy_mc_highpage() together with copy_mc_user_highpage() makes sense
to me.

Reviewed-by: Yang Shi <shy828301@gmail.com>

>
> Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
> ---
>  include/linux/highmem.h | 54 +++++++++++++++++++++++++++++++----------
>  1 file changed, 41 insertions(+), 13 deletions(-)
>
> diff --git a/include/linux/highmem.h b/include/linux/highmem.h
> index 1128b7114931f..7cbecae39b3eb 100644
> --- a/include/linux/highmem.h
> +++ b/include/linux/highmem.h
> @@ -315,7 +315,29 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
>
>  #endif
>
> +#ifndef __HAVE_ARCH_COPY_HIGHPAGE
> +
> +static inline void copy_highpage(struct page *to, struct page *from)
> +{
> +       char *vfrom, *vto;
> +
> +       vfrom = kmap_local_page(from);
> +       vto = kmap_local_page(to);
> +       copy_page(vto, vfrom);
> +       kmsan_copy_page_meta(to, from);
> +       kunmap_local(vto);
> +       kunmap_local(vfrom);
> +}
> +
> +#endif
> +
>  #ifdef copy_mc_to_kernel
> +/*
> + * If architecture supports machine check exception handling, define the
> + * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
> + * page with #MC in source page (@from) handled, and return the number
> + * of bytes not copied if there was a #MC, otherwise 0 for success.
> + */
>  static inline int copy_mc_user_highpage(struct page *to, struct page *from,
>                                         unsigned long vaddr, struct vm_area_struct *vma)
>  {
> @@ -332,29 +354,35 @@ static inline int copy_mc_user_highpage(struct page *to, struct page *from,
>
>         return ret;
>  }
> -#else
> -static inline int copy_mc_user_highpage(struct page *to, struct page *from,
> -                                       unsigned long vaddr, struct vm_area_struct *vma)
> -{
> -       copy_user_highpage(to, from, vaddr, vma);
> -       return 0;
> -}
> -#endif
>
> -#ifndef __HAVE_ARCH_COPY_HIGHPAGE
> -
> -static inline void copy_highpage(struct page *to, struct page *from)
> +static inline int copy_mc_highpage(struct page *to, struct page *from)
>  {
> +       unsigned long ret;
>         char *vfrom, *vto;
>
>         vfrom = kmap_local_page(from);
>         vto = kmap_local_page(to);
> -       copy_page(vto, vfrom);
> -       kmsan_copy_page_meta(to, from);
> +       ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
> +       if (!ret)
> +               kmsan_copy_page_meta(to, from);
>         kunmap_local(vto);
>         kunmap_local(vfrom);
> +
> +       return ret;
> +}
> +#else
> +static inline int copy_mc_user_highpage(struct page *to, struct page *from,
> +                                       unsigned long vaddr, struct vm_area_struct *vma)
> +{
> +       copy_user_highpage(to, from, vaddr, vma);
> +       return 0;
>  }
>
> +static inline int copy_mc_highpage(struct page *to, struct page *from)
> +{
> +       copy_highpage(to, from);
> +       return 0;
> +}
>  #endif
>
>  static inline void memcpy_page(struct page *dst_page, size_t dst_off,
> --
> 2.40.0.rc0.216.gc4246ad0f0-goog
>
diff mbox series

Patch

diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 1128b7114931f..7cbecae39b3eb 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -315,7 +315,29 @@  static inline void copy_user_highpage(struct page *to, struct page *from,
 
 #endif
 
+#ifndef __HAVE_ARCH_COPY_HIGHPAGE
+
+static inline void copy_highpage(struct page *to, struct page *from)
+{
+	char *vfrom, *vto;
+
+	vfrom = kmap_local_page(from);
+	vto = kmap_local_page(to);
+	copy_page(vto, vfrom);
+	kmsan_copy_page_meta(to, from);
+	kunmap_local(vto);
+	kunmap_local(vfrom);
+}
+
+#endif
+
 #ifdef copy_mc_to_kernel
+/*
+ * If architecture supports machine check exception handling, define the
+ * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
+ * page with #MC in source page (@from) handled, and return the number
+ * of bytes not copied if there was a #MC, otherwise 0 for success.
+ */
 static inline int copy_mc_user_highpage(struct page *to, struct page *from,
 					unsigned long vaddr, struct vm_area_struct *vma)
 {
@@ -332,29 +354,35 @@  static inline int copy_mc_user_highpage(struct page *to, struct page *from,
 
 	return ret;
 }
-#else
-static inline int copy_mc_user_highpage(struct page *to, struct page *from,
-					unsigned long vaddr, struct vm_area_struct *vma)
-{
-	copy_user_highpage(to, from, vaddr, vma);
-	return 0;
-}
-#endif
 
-#ifndef __HAVE_ARCH_COPY_HIGHPAGE
-
-static inline void copy_highpage(struct page *to, struct page *from)
+static inline int copy_mc_highpage(struct page *to, struct page *from)
 {
+	unsigned long ret;
 	char *vfrom, *vto;
 
 	vfrom = kmap_local_page(from);
 	vto = kmap_local_page(to);
-	copy_page(vto, vfrom);
-	kmsan_copy_page_meta(to, from);
+	ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
+	if (!ret)
+		kmsan_copy_page_meta(to, from);
 	kunmap_local(vto);
 	kunmap_local(vfrom);
+
+	return ret;
+}
+#else
+static inline int copy_mc_user_highpage(struct page *to, struct page *from,
+					unsigned long vaddr, struct vm_area_struct *vma)
+{
+	copy_user_highpage(to, from, vaddr, vma);
+	return 0;
 }
 
+static inline int copy_mc_highpage(struct page *to, struct page *from)
+{
+	copy_highpage(to, from);
+	return 0;
+}
 #endif
 
 static inline void memcpy_page(struct page *dst_page, size_t dst_off,