@@ -6,6 +6,7 @@
*/
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
+#include <linux/kmsan-checks.h>
#include <linux/string.h>
#include <asm/asm.h>
#include <asm/page.h>
@@ -174,6 +175,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
ASM_CALL_CONSTRAINT \
: "0" (ptr), "i" (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
+ kmsan_unpoison_shadow(&(x), sizeof(*(ptr))); \
__builtin_expect(__ret_gu, 0); \
})
@@ -248,6 +250,7 @@ extern void __put_user_8(void);
__chk_user_ptr(ptr); \
might_fault(); \
__pu_val = x; \
+ kmsan_check_memory(&(__pu_val), sizeof(*(ptr))); \
switch (sizeof(*(ptr))) { \
case 1: \
__put_user_x(1, __pu_val, ptr, __ret_pu); \
@@ -270,7 +273,9 @@ extern void __put_user_8(void);
#define __put_user_size(x, ptr, size, label) \
do { \
+ __typeof__(*(ptr)) __pus_val = x; \
__chk_user_ptr(ptr); \
+ kmsan_check_memory(&(__pus_val), size); \
switch (size) { \
case 1: \
__put_user_goto(x, ptr, "b", "b", "iq", label); \
@@ -295,7 +300,10 @@ do { \
*/
#define __put_user_size_ex(x, ptr, size) \
do { \
+ __typeof__(*(ptr)) __puse_val; \
__chk_user_ptr(ptr); \
+ __puse_val = x; \
+ kmsan_check_memory(&(__puse_val), size); \
switch (size) { \
case 1: \
__put_user_asm_ex(x, ptr, "b", "b", "iq"); \
@@ -363,6 +371,7 @@ do { \
default: \
(x) = __get_user_bad(); \
} \
+ kmsan_unpoison_shadow(&(x), size); \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -413,6 +422,7 @@ do { \
default: \
(x) = __get_user_bad(); \
} \
+ kmsan_unpoison_shadow(&(x), size); \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
@@ -428,11 +438,13 @@ do { \
#define __put_user_nocheck(x, ptr, size) \
({ \
__label__ __pu_label; \
+ __typeof__(*(ptr)) __pun_val = x; \
int __pu_err = -EFAULT; \
__typeof__(*(ptr)) __pu_val = (x); \
__typeof__(ptr) __pu_ptr = (ptr); \
__typeof__(size) __pu_size = (size); \
__uaccess_begin(); \
+ kmsan_check_memory(&(__pun_val), size); \
__put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
__pu_err = 0; \
__pu_label: \
@@ -4,6 +4,7 @@
/* Keep includes the same across arches. */
#include <linux/mm.h>
+#include <linux/kmsan-checks.h>
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
@@ -72,10 +73,14 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
+ kmsan_check_memory(src, len); \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+ do { \
+ memcpy(dst, src, len); \
+ kmsan_unpoison_shadow(dst, len); \
+ } while (0)
#endif /* __ASM_CACHEFLUSH_H */
@@ -142,7 +142,11 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
- return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
+ int n;
+
+ n = raw_copy_to_user(ptr, x, size);
+ kmsan_copy_to_user(ptr, x, size, n);
+ return unlikely(n) ? -EFAULT : 0;
}
#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
@@ -203,7 +207,11 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
+ int copied, to_copy = size;
+
+ copied = raw_copy_from_user(x, ptr, size);
+ kmsan_unpoison_shadow(to, to_copy - res);
+ return unlikely(copied) ? -EFAULT : 0;
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/kasan-checks.h>
+#include <linux/kmsan-checks.h>
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
@@ -58,18 +59,26 @@
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
+ unsigned long to_copy = n;
+
kasan_check_write(to, n);
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ n = raw_copy_from_user(to, from, n);
+ kmsan_unpoison_shadow(to, to_copy - n);
+ return n;
}
static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long to_copy = n;
+
might_fault();
kasan_check_write(to, n);
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ n = raw_copy_from_user(to, from, n);
+ kmsan_unpoison_shadow(to, to_copy - n);
+ return n;
}
/**
@@ -88,29 +97,39 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
+ unsigned long to_copy = n;
+
kasan_check_read(from, n);
check_object_size(from, n, true);
- return raw_copy_to_user(to, from, n);
+ n = raw_copy_to_user(to, from, n);
+ kmsan_copy_to_user(to, from, to_copy, n);
+ return n;
}
static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ unsigned long to_copy = n;
+
might_fault();
kasan_check_read(from, n);
check_object_size(from, n, true);
- return raw_copy_to_user(to, from, n);
+ n = raw_copy_to_user(to, from, n);
+ kmsan_copy_to_user(to, from, to_copy, n);
+ return n;
}
#ifdef INLINE_COPY_FROM_USER
static inline __must_check unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned long res = n;
+ unsigned long res = n, to_copy = n;
+
might_fault();
if (likely(access_ok(from, n))) {
kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
+ kmsan_unpoison_shadow(to, to_copy - res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
@@ -125,10 +144,13 @@ _copy_from_user(void *, const void __user *, unsigned long);
static inline __must_check unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ unsigned long to_copy = n;
+
might_fault();
if (access_ok(to, n)) {
kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
+ kmsan_copy_to_user(to, from, to_copy, n);
}
return n;
}
@@ -137,18 +137,24 @@
static int copyout(void __user *to, const void *from, size_t n)
{
+ size_t to_copy = n;
+
if (access_ok(to, n)) {
kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
+ kmsan_copy_to_user(to, from, to_copy, n);
}
return n;
}
static int copyin(void *to, const void __user *from, size_t n)
{
+ size_t to_copy = n;
+
if (access_ok(from, n)) {
kasan_check_write(to, n);
n = raw_copy_from_user(to, from, n);
+ kmsan_unpoison_shadow(to, to_copy - n);
}
return n;
}
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/kmsan-checks.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
@@ -7,11 +8,12 @@
#ifndef INLINE_COPY_FROM_USER
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned long res = n;
+ unsigned long res = n, to_copy = n;
might_fault();
if (likely(access_ok(from, n))) {
kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
+ kmsan_unpoison_shadow(to, to_copy - res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
@@ -23,10 +25,12 @@ EXPORT_SYMBOL(_copy_from_user);
#ifndef INLINE_COPY_TO_USER
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ unsigned long to_copy = n;
might_fault();
if (likely(access_ok(to, n))) {
kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
+ kmsan_copy_to_user(to, from, to_copy, n);
}
return n;
}
Memory that is copied from userspace must be unpoisoned. Before copying memory to userspace, check it and report an error if it contains uninitialized bits. Signed-off-by: Alexander Potapenko <glider@google.com> To: Alexander Potapenko <glider@google.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: linux-mm@kvack.org --- Change-Id: I38428b9c7d1909b8441dcec1749b080494a7af99 --- arch/x86/include/asm/uaccess.h | 12 ++++++++++++ include/asm-generic/cacheflush.h | 7 ++++++- include/asm-generic/uaccess.h | 12 ++++++++++-- include/linux/uaccess.h | 32 +++++++++++++++++++++++++++----- lib/iov_iter.c | 6 ++++++ lib/usercopy.c | 6 +++++- 6 files changed, 66 insertions(+), 9 deletions(-)