@@ -2559,7 +2559,7 @@ int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long
unsigned long vm_len, pfn, pages;
/* Check that the physical memory area passed in looks valid */
- if (start + len < start)
+ if (add_would_overflow(start, len))
return -EINVAL;
/*
* You *really* shouldn't map things that aren't page-aligned,
@@ -2569,7 +2569,7 @@ int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long
len += start & ~PAGE_MASK;
pfn = start >> PAGE_SHIFT;
pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (pfn + pages < pfn)
+ if (add_would_overflow(pfn, pages))
return -EINVAL;
/* We start the mapping 'vm_pgoff' pages into the area */
@@ -3023,7 +3023,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
return ret;
/* Does pgoff wrap? */
- if (pgoff + (size >> PAGE_SHIFT) < pgoff)
+ if (add_would_overflow(pgoff, (size >> PAGE_SHIFT)))
return ret;
if (mmap_write_lock_killable(mm))
@@ -848,7 +848,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
/* Need to be careful about a growing mapping */
pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
- if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
+ if (add_would_overflow(pgoff, (new_len >> PAGE_SHIFT)))
return ERR_PTR(-EINVAL);
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
{
/* Don't allow overflow */
- if ((unsigned long) addr + count < count)
+ if (add_would_overflow(count, (unsigned long)addr))
count = -(unsigned long) addr;
return copy_to_iter(addr, count, iter);
@@ -1705,7 +1705,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
{
struct mm_struct *mm;
- if (addr + len < addr)
+ if (add_would_overflow(addr, len))
return 0;
mm = get_task_mm(tsk);
@@ -567,7 +567,7 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
{
- if (unlikely(offset + PAGE_ALIGN(len) < offset))
+ if (unlikely(add_would_overflow(offset, PAGE_ALIGN(len))))
return -EINVAL;
if (unlikely(offset_in_page(offset)))
return -EINVAL;
In an effort to separate intentional arithmetic wrap-around from unexpected wrap-around, we need to refactor places that depend on this kind of math. One of the most common code patterns of this is: VAR + value < VAR Notably, this is considered "undefined behavior" for signed and pointer types, which the kernel works around by using the -fno-strict-overflow option in the build[1] (which used to just be -fwrapv). Regardless, we want to get the kernel source to the position where we can meaningfully instrument arithmetic wrap-around conditions and catch them when they are unexpected, regardless of whether they are signed[2], unsigned[3], or pointer[4] types. Refactor open-coded wrap-around addition test to use add_would_overflow(). This paves the way to enabling the wrap-around sanitizers in the future. Link: https://git.kernel.org/linus/68df3755e383e6fecf2354a67b08f92f18536594 [1] Link: https://github.com/KSPP/linux/issues/26 [2] Link: https://github.com/KSPP/linux/issues/27 [3] Link: https://github.com/KSPP/linux/issues/344 [4] Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Shuah Khan <shuah@kernel.org> Cc: linux-mm@kvack.org Cc: linux-kselftest@vger.kernel.org Signed-off-by: Kees Cook <keescook@chromium.org> --- mm/memory.c | 4 ++-- mm/mmap.c | 2 +- mm/mremap.c | 2 +- mm/nommu.c | 4 ++-- mm/util.c | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-)