@@ -213,4 +213,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len);
unsigned long
mcsafe_handle_tail(char *to, char *from, unsigned len);
+unsigned long __must_check
+memset_user(void __user *mem, int c, unsigned long len);
+
+unsigned long __must_check
+__memset_user(void __user *mem, int c, unsigned long len);
+
#endif /* _ASM_X86_UACCESS_64_H */
@@ -9,6 +9,60 @@
#include <linux/uaccess.h>
#include <linux/highmem.h>
+/*
+ * Memset Userspace
+ */
+
+unsigned long __memset_user(void __user *addr, int c, unsigned long size)
+{
+ long __d0;
+ unsigned long pattern = 0;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ pattern = (pattern << 8) | (0xFF & c);
+ might_fault();
+ /* no memory constraint: gcc doesn't know about this memory */
+ stac();
+ asm volatile(
+ " movq %[val], %%rdx\n"
+ " testq %[size8],%[size8]\n"
+ " jz 4f\n"
+ "0: mov %%rdx,(%[dst])\n"
+ " addq $8,%[dst]\n"
+ " decl %%ecx ; jnz 0b\n"
+ "4: movq %[size1],%%rcx\n"
+ " testl %%ecx,%%ecx\n"
+ " jz 2f\n"
+ "1: movb %%dl,(%[dst])\n"
+ " incq %[dst]\n"
+ " decl %%ecx ; jnz 1b\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: lea 0(%[size1],%[size8],8),%[size8]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE_UA(0b, 3b)
+ _ASM_EXTABLE_UA(1b, 2b)
+ : [size8] "=&c"(size), [dst] "=&D" (__d0)
+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
+ [val] "ri"(pattern)
+ : "rdx");
+
+ clac();
+ return size;
+}
+EXPORT_SYMBOL(__memset_user);
+
+unsigned long memset_user(void __user *to, int c, unsigned long n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __memset_user(to, c, n);
+ return n;
+}
+EXPORT_SYMBOL(memset_user);
+
+
/*
* Zero Userspace
*/
Create x86_64 specific version of memset for user space, based on clear_user(). This will be used for implementing wr_memset() in the __wr_after_init scenario, where write-rare variables have an alternate mapping for writing. Signed-off-by: Igor Stoppa <igor.stoppa@huawei.com> CC: Andy Lutomirski <luto@amacapital.net> CC: Nadav Amit <nadav.amit@gmail.com> CC: Matthew Wilcox <willy@infradead.org> CC: Peter Zijlstra <peterz@infradead.org> CC: Kees Cook <keescook@chromium.org> CC: Dave Hansen <dave.hansen@linux.intel.com> CC: Mimi Zohar <zohar@linux.vnet.ibm.com> CC: linux-integrity@vger.kernel.org CC: kernel-hardening@lists.openwall.com CC: linux-mm@kvack.org CC: linux-kernel@vger.kernel.org --- arch/x86/include/asm/uaccess_64.h | 6 ++++ arch/x86/lib/usercopy_64.c | 54 +++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+)