@@ -598,6 +598,4 @@ static __always_inline void perf_lopwr_cb(bool lopwr_in)
static inline void amd_pmu_disable_virt(void) { }
#endif
-#define arch_perf_out_copy_user copy_from_user_nmi
-
#endif /* _ASM_X86_PERF_EVENT_H */
@@ -42,7 +42,7 @@ clean-files := inat-tables.c
obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
lib-y := delay.o misc.o cmdline.o cpu.o
-lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
+lib-y += usercopy_$(BITS).o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
lib-y += pc-conf-reg.o
lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o
deleted file mode 100644
@@ -1,55 +0,0 @@
-/*
- * User address space access functions.
- *
- * For licencing details see kernel-base/COPYING
- */
-
-#include <linux/uaccess.h>
-#include <linux/export.h>
-#include <linux/instrumented.h>
-
-#include <asm/tlbflush.h>
-
-/**
- * copy_from_user_nmi - NMI safe copy from user
- * @to: Pointer to the destination buffer
- * @from: Pointer to a user space address of the current task
- * @n: Number of bytes to copy
- *
- * Returns: The number of not copied bytes. 0 is success, i.e. all bytes copied
- *
- * Contrary to other copy_from_user() variants this function can be called
- * from NMI context. Despite the name it is not restricted to be called
- * from NMI context. It is safe to be called from any other context as
- * well. It disables pagefaults across the copy which means a fault will
- * abort the copy.
- *
- * For NMI context invocations this relies on the nested NMI work to allow
- * atomic faults from the NMI path; the nested NMI paths are careful to
- * preserve CR2.
- */
-unsigned long
-copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
-{
- unsigned long ret;
-
- if (!__access_ok(from, n))
- return n;
-
- if (!nmi_uaccess_okay())
- return n;
-
- /*
- * Even though this function is typically called from NMI/IRQ context
- * disable pagefaults so that its behaviour is consistent even when
- * called from other contexts.
- */
- pagefault_disable();
- instrument_copy_from_user_before(to, from, n);
- ret = raw_copy_from_user(to, from, n);
- instrument_copy_from_user_after(to, from, n, ret);
- pagefault_enable();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(copy_from_user_nmi);
@@ -190,21 +190,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
-#ifndef arch_perf_out_copy_user
-#define arch_perf_out_copy_user arch_perf_out_copy_user
-
-static inline unsigned long
-arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
-{
- unsigned long ret;
-
- pagefault_disable();
- ret = __copy_from_user_inatomic(dst, src, n);
- pagefault_enable();
-
- return ret;
-}
-#endif
+#define arch_perf_out_copy_user copy_from_user_nmi
DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
@@ -103,17 +103,27 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
}
/**
- * copy_from_user_nofault(): safely attempt to read from a user-space location
- * @dst: pointer to the buffer that shall take the data
- * @src: address to read from. This must be a user address.
- * @size: size of the data chunk
+ * copy_from_user_nmi - NMI safe copy from user
+ * @dst: Pointer to the destination buffer
+ * @src: Pointer to a user space address of the current task
+ * @size: Number of bytes to copy
*
- * Safely read from user address @src to the buffer at @dst. If a kernel fault
- * happens, handle that and return -EFAULT.
+ * Returns: The number of not copied bytes. 0 is success, i.e. all bytes copied
+ *
+ * Contrary to other copy_from_user() variants this function can be called
+ * from NMI context. Despite the name it is not restricted to be called
+ * from NMI context. It is safe to be called from any other context as
+ * well. It disables pagefaults across the copy which means a fault will
+ * abort the copy.
+ *
+ * For NMI context invocations this relies on the nested NMI work to allow
+ * atomic faults from the NMI path; the nested NMI paths are careful to
+ * preserve CR2 on X86 architecture.
*/
-long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
+unsigned long
+copy_from_user_nmi(void *dst, const void __user *src, unsigned long size)
{
- long ret = -EFAULT;
+ unsigned long ret = size;
if (!__access_ok(src, size))
return ret;
@@ -121,11 +131,31 @@ long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
if (!nmi_uaccess_okay())
return ret;
+ /*
+ * Even though this function is typically called from NMI/IRQ context
+ * disable pagefaults so that its behaviour is consistent even when
+ * called from other contexts.
+ */
pagefault_disable();
ret = __copy_from_user_inatomic(dst, src, size);
pagefault_enable();
- if (ret)
+ return ret;
+}
+EXPORT_SYMBOL_GPL(copy_from_user_nmi);
+
+/**
+ * copy_from_user_nofault(): safely attempt to read from a user-space location
+ * @dst: pointer to the buffer that shall take the data
+ * @src: address to read from. This must be a user address.
+ * @size: size of the data chunk
+ *
+ * Safely read from user address @src to the buffer at @dst. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
+{
+ if (copy_from_user_nmi(dst, src, size))
return -EFAULT;
return 0;
}