@@ -343,7 +343,6 @@ unsigned long long ZSTD_findDecompressed
return ret;
/* check for overflow */
- if (totalDstSize + ret < totalDstSize)
return ZSTD_CONTENTSIZE_ERROR;
totalDstSize += ret;
}
@@ -503,7 +503,6 @@ struct scatterlist *sgl_alloc_order(unsi
nalloc = nent;
if (chainable) {
/* Check for integer overflow */
- if (nalloc + 1 < nalloc)
return NULL;
nalloc++;
}
@@ -954,7 +954,6 @@ int dma_buf_mmap(struct dma_buf *dmabuf,
return -EINVAL;
/* check for offset overflow */
- if (pgoff + vma_pages(vma) < pgoff)
return -EOVERFLOW;
/* check for overflowing the buffer's size */
@@ -1043,7 +1043,6 @@ static int verity_ctr(struct dm_target *
v->hash_level_block[i] = hash_position;
s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
>> ((i + 1) * v->hash_per_block_bits);
- if (hash_position + s < hash_position) {
ti->error = "Hash device offset overflow";
r = -E2BIG;
goto bad;
@@ -233,7 +233,6 @@ static int flakey_ctr(struct dm_target *
goto bad;
}
- if (fc->up_interval + fc->down_interval < fc->up_interval) {
ti->error = "Interval overflow";
r = -EINVAL;
goto bad;
@@ -306,7 +306,6 @@ validate_gl_array_primitive(VALIDATE_ARG
}
shader_state = &exec->shader_state[exec->shader_state_count - 1];
- if (length + base_index < length) {
DRM_DEBUG("primitive vertex count overflow\n");
return -EINVAL;
}
@@ -208,7 +208,6 @@ int vme_check_window(u32 aspace, unsigne
{
int retval = 0;
- if (vme_base + size < size)
return -EINVAL;
switch (aspace) {
@@ -99,7 +99,6 @@ static void __iomem *map_capability(stru
length -= start;
- if (start + offset < offset) {
dev_err(&dev->dev,
"virtio_pci: map wrap-around %u+%u\n",
start, offset);
@@ -2015,7 +2015,6 @@ static int mwifiex_extract_wifi_fw(struc
switch (dnld_cmd) {
case MWIFIEX_FW_DNLD_CMD_1:
- if (offset + data_len < data_len) {
mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
ret = -1;
goto done;
@@ -2039,7 +2038,6 @@ static int mwifiex_extract_wifi_fw(struc
case MWIFIEX_FW_DNLD_CMD_5:
first_cmd = true;
/* Check for integer overflow */
- if (offset + data_len < data_len) {
mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
ret = -1;
goto done;
@@ -2049,7 +2047,6 @@ static int mwifiex_extract_wifi_fw(struc
case MWIFIEX_FW_DNLD_CMD_6:
first_cmd = true;
/* Check for integer overflow */
- if (offset + data_len < data_len) {
mwifiex_dbg(adapter, ERROR, "bad FW parse\n");
ret = -1;
goto done;
@@ -6884,7 +6884,6 @@ static int niu_get_eeprom(struct net_dev
offset = eeprom->offset;
len = eeprom->len;
- if (offset + len < offset)
return -EINVAL;
if (offset >= np->eeprom_len)
return -EINVAL;
@@ -389,7 +389,6 @@ ixgb_get_eeprom(struct net_device *netde
max_len = ixgb_get_eeprom_len(netdev);
- if (eeprom->offset > eeprom->offset + eeprom->len) {
ret_val = -EINVAL;
goto geeprom_error;
}
@@ -435,7 +434,6 @@ ixgb_set_eeprom(struct net_device *netde
max_len = ixgb_get_eeprom_len(netdev);
- if (eeprom->offset > eeprom->offset + eeprom->len)
return -EINVAL;
if ((eeprom->offset + eeprom->len) > max_len)
@@ -1193,7 +1193,6 @@ artpec6_crypto_ctr_crypt(struct skcipher
* the whole IV is a counter. So fallback if the counter is going to
* overlow.
*/
- if (counter + nblks < counter) {
int ret;
pr_debug("counter %x will overflow (nblks %u), falling back\n",
@@ -123,7 +123,6 @@ static inline bool range_check(struct vr
}
/* Otherwise, don't wrap. */
- if (addr + *len < addr) {
vringh_bad("Wrapping descriptor %zu@0x%llx",
*len, (unsigned long long)addr);
return false;
@@ -224,8 +224,6 @@ static int iwch_sgl2pbl_map(struct iwch_
pr_debug("%s %d\n", __func__, __LINE__);
return -EINVAL;
}
- if (sg_list[i].addr + ((u64) sg_list[i].length) <
- sg_list[i].addr) {
pr_debug("%s %d\n", __func__, __LINE__);
return -EINVAL;
}
@@ -362,7 +362,6 @@ static int read_segment_platform_config(
}
/* check for bogus offset and size that wrap when added together */
- if (entry->offset + entry->size < entry->offset) {
dd_dev_err(dd,
"Bad configuration file start + size 0x%x+0x%x\n",
entry->offset, entry->size);
@@ -317,7 +317,6 @@ EXPORT_SYMBOL_GPL(fsi_slave_write);
extern int fsi_slave_claim_range(struct fsi_slave *slave,
uint32_t addr, uint32_t size)
{
- if (addr + size < addr)
return -EINVAL;
if (addr + size > slave->size)
@@ -274,9 +274,7 @@ void vgic_v2_enable(struct kvm_vcpu *vcp
/* check for overlapping regions and for regions crossing the end of memory */
static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
{
- if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
return false;
- if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
return false;
if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
@@ -921,7 +921,6 @@ int __kvm_set_memory_region(struct kvm *
goto out;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
goto out;
- if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
@@ -917,7 +917,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, st
}
/* check for range overflow */
- if (args->addr + args->len < args->addr)
return -EINVAL;
/* check for extra flags that we don't understand */
@@ -31,7 +31,6 @@ static int coalesced_mmio_in_range(struc
*/
if (len < 0)
return 0;
- if (addr + len < addr)
return 0;
if (addr < dev->zone.addr)
return 0;
@@ -306,7 +306,6 @@ int __init e820__update_table(struct e82
/* Bail out if we find any unreasonable addresses in the map: */
for (i = 0; i < table->nr_entries; i++) {
- if (entries[i].addr + entries[i].size < entries[i].addr)
return -1;
}
@@ -52,7 +52,6 @@ static bool opal_prd_range_is_valid(uint
struct device_node *parent, *node;
bool found;
- if (addr + size < addr)
return false;
parent = of_find_node_by_path("/reserved-memory");
@@ -206,7 +206,6 @@ static bool elf_is_phdr_sane(const struc
} else if (phdr->p_offset + phdr->p_filesz > buf_len) {
pr_debug("ELF segment not in file.\n");
return false;
- } else if (phdr->p_paddr + phdr->p_memsz < phdr->p_paddr) {
pr_debug("ELF segment address wraps around.\n");
return false;
}
@@ -322,7 +321,6 @@ static bool elf_is_shdr_sane(const struc
if (!size_ok) {
pr_debug("ELF section with wrong entry size.\n");
return false;
- } else if (shdr->sh_addr + shdr->sh_size < shdr->sh_addr) {
pr_debug("ELF section address wraps around.\n");
return false;
}
@@ -97,7 +97,6 @@ void __init add_memory_region(phys_addr_
--size;
/* Sanity check */
- if (start + size < start) {
pr_warn("Trying to add an invalid memory region, skipped\n");
return;
}
@@ -351,7 +351,6 @@ static int __init sanitize_memmap(struct
/* bail out if we find any unreasonable addresses in memmap */
for (i = 0; i < old_nr; i++)
- if (map[i].addr + map[i].size < map[i].addr)
return -1;
/* create pointers for initial change-point information (for sorting) */
@@ -123,7 +123,6 @@ is_user_addr_valid(struct task_struct *c
struct sram_list_struct *sraml;
/* overflow */
- if (start + len < start)
return -EIO;
down_read(&child->mm->mmap_sem);
@@ -31,7 +31,6 @@ asmlinkage int sys_cacheflush(unsigned l
return -EINVAL;
/* Check for overflow */
- if (addr + len < addr)
return -EFAULT;
/*
@@ -392,7 +392,6 @@ sys_cacheflush (unsigned long addr, int
struct vm_area_struct *vma;
/* Check for overflow. */
- if (addr + len < addr)
goto out;
/*
@@ -66,7 +66,6 @@ asmlinkage int sys_cacheflush(unsigned l
* Verify that the specified address region actually belongs
* to this process.
*/
- if (addr + len < addr)
return -EFAULT;
down_read(¤t->mm->mmap_sem);
@@ -456,12 +456,10 @@ nocache:
addr = ALIGN(first->va_end, align);
if (addr < vstart)
goto nocache;
- if (addr + size < addr)
goto overflow;
} else {
addr = ALIGN(vstart, align);
- if (addr + size < addr)
goto overflow;
n = vmap_area_root.rb_node;
@@ -488,7 +486,6 @@ nocache:
if (addr + cached_hole_size < first->va_start)
cached_hole_size = first->va_start - addr;
addr = ALIGN(first->va_end, align);
- if (addr + size < addr)
goto overflow;
if (list_is_last(&first->list, &vmap_area_list))
@@ -2136,7 +2136,6 @@ int vm_iomap_memory(struct vm_area_struc
unsigned long vm_len, pfn, pages;
/* Check that the physical memory area passed in looks valid */
- if (start + len < start)
return -EINVAL;
/*
* You *really* shouldn't map things that aren't page-aligned,
@@ -2146,7 +2145,6 @@ int vm_iomap_memory(struct vm_area_struc
len += start & ~PAGE_MASK;
pfn = start >> PAGE_SHIFT;
pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (pfn + pages < pfn)
return -EINVAL;
/* We start the mapping 'vm_pgoff' pages into the area */
@@ -2792,7 +2792,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
return ret;
/* Does pgoff wrap? */
- if (pgoff + (size >> PAGE_SHIFT) < pgoff)
return ret;
if (down_write_killable(&mm->mmap_sem))
@@ -1860,7 +1860,6 @@ int access_process_vm(struct task_struct
{
struct mm_struct *mm;
- if (addr + len < addr)
return 0;
mm = get_task_mm(tsk);
@@ -411,7 +411,6 @@ static struct vm_area_struct *vma_to_res
/* Need to be careful about a growing mapping */
pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
- if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
return ERR_PTR(-EINVAL);
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
@@ -517,7 +517,6 @@ static int access_mem(unw_addr_space_t _
end = start + stack->size;
/* Check overflow. */
- if (addr + sizeof(unw_word_t) < addr)
return -EINVAL;
if (addr < start || addr + sizeof(unw_word_t) >= end) {
@@ -964,7 +964,6 @@ static ssize_t data_read_offset(struct d
if (offset > dso->data.file_size)
return -1;
- if (offset + size < offset)
return -1;
return cached_read(dso, machine, offset, data, size);
@@ -145,7 +145,6 @@ static bool memory_read(Dwfl *dwfl __may
end = start + stack->size;
/* Check overflow. */
- if (addr + sizeof(Dwarf_Word) < addr)
return false;
if (addr < start || addr + sizeof(Dwarf_Word) > end) {
@@ -57,7 +57,6 @@ static bool u32_match_it(const struct xt
val >>= number;
break;
case XT_U32_AT:
- if (at + val < at)
return false;
at += val;
pos = number;
@@ -71,7 +71,6 @@ static int nft_limit_init(struct nft_lim
else
limit->burst = 0;
- if (limit->rate + limit->burst < limit->rate)
return -EOVERFLOW;
/* The token bucket size limits the number of tokens can be
@@ -84,7 +84,6 @@ void free_extent_map(struct extent_map *
/* simple helper to do math around the end of an extent, handling wrap */
static u64 range_end(u64 start, u64 len)
{
- if (start + len < start)
return (u64)-1;
return start + len;
}
@@ -31,7 +31,6 @@ static struct kmem_cache *btrfs_ordered_
static u64 entry_end(struct btrfs_ordered_extent *entry)
{
- if (entry->file_offset + entry->len < entry->file_offset)
return (u64)-1;
return entry->file_offset + entry->len;
}
@@ -1615,14 +1615,10 @@ int ext4_group_add(struct super_block *s
return -EPERM;
}
- if (ext4_blocks_count(es) + input->blocks_count <
- ext4_blocks_count(es)) {
ext4_warning(sb, "blocks_count overflow");
return -EINVAL;
}
- if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
- le32_to_cpu(es->s_inodes_count)) {
ext4_warning(sb, "inodes_count overflow");
return -EINVAL;
}
@@ -1770,7 +1766,6 @@ int ext4_group_extend(struct super_block
add = EXT4_BLOCKS_PER_GROUP(sb) - last;
- if (o_blocks_count + add < o_blocks_count) {
ext4_warning(sb, "blocks_count overflow");
return -EINVAL;
}
@@ -109,7 +109,6 @@ static bool valid_pos(loff_t pos, size_t
{
if (pos < 0 || (long) pos != pos || (ssize_t) count < 0)
return false;
- if ((unsigned long) pos + (unsigned long) count < (unsigned long) pos)
return false;
return true;
}
@@ -291,7 +291,6 @@ static struct inode *mqueue_get_inode(st
min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
sizeof(struct posix_msg_tree_node);
mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
- if (mq_bytes + mq_treesize < mq_bytes)
goto out_inode;
mq_bytes += mq_treesize;
spin_lock(&mq_lock);
@@ -1417,7 +1417,6 @@ long do_shmat(int shmid, char __user *sh
if (addr && !(shmflg & SHM_REMAP)) {
err = -EINVAL;
- if (addr + size < addr)
goto invalid;
if (find_vma_intersection(current->mm, addr, addr + size))