@@ -4587,23 +4587,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
return ret;
}
- /* Wait all existing dio workers, newcomers will block on i_rwsem */
- inode_dio_wait(inode);
-
- ret = file_modified(file);
- if (ret)
- return ret;
-
- /*
- * Prevent page faults from reinstantiating pages we have released
- * from page cache.
- */
- filemap_invalidate_lock(mapping);
-
- ret = ext4_break_layouts(inode);
- if (ret)
- goto out_invalidate_lock;
-
/*
* For journalled data we need to write (and checkpoint) pages before
* discarding page cache to avoid inconsitent data on disk in case of
@@ -4616,7 +4599,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
ext4_truncate_folios_range(inode, offset, end);
}
if (ret)
- goto out_invalidate_lock;
+ return ret;
/* Now release the pages and zero block aligned part of pages */
truncate_pagecache_range(inode, offset, end - 1);
@@ -4630,7 +4613,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
ret = ext4_alloc_file_blocks(file, alloc_lblk, len_lblk,
new_size, flags);
if (ret)
- goto out_invalidate_lock;
+ return ret;
}
/* Zero range excluding the unaligned edges */
@@ -4643,11 +4626,11 @@ static long ext4_zero_range(struct file *file, loff_t offset,
ret = ext4_alloc_file_blocks(file, start_lblk, zero_blks,
new_size, flags);
if (ret)
- goto out_invalidate_lock;
+ return ret;
}
/* Finish zeroing out if it doesn't contain partial block */
if (!(offset & (blocksize - 1)) && !(end & (blocksize - 1)))
- goto out_invalidate_lock;
+ return ret;
/*
* In worst case we have to writeout two nonadjacent unwritten
@@ -4660,7 +4643,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(inode->i_sb, ret);
- goto out_invalidate_lock;
+ return ret;
}
/* Zero out partial block at the edges of the range */
@@ -4680,8 +4663,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
out_handle:
ext4_journal_stop(handle);
-out_invalidate_lock:
- filemap_invalidate_unlock(mapping);
return ret;
}
@@ -4714,13 +4695,6 @@ static long ext4_do_fallocate(struct file *file, loff_t offset,
goto out;
}
- /* Wait all existing dio workers, newcomers will block on i_rwsem */
- inode_dio_wait(inode);
-
- ret = file_modified(file);
- if (ret)
- goto out;
-
ret = ext4_alloc_file_blocks(file, start_lblk, len_lblk, new_size,
EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT);
if (ret)
@@ -4745,6 +4719,7 @@ static long ext4_do_fallocate(struct file *file, loff_t offset,
long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
+ struct address_space *mapping = file->f_mapping;
int ret;
/*
@@ -4768,6 +4743,29 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (ret)
goto out;
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+ inode_dio_wait(inode);
+
+ ret = file_modified(file);
+ if (ret)
+ return ret;
+
+ if ((mode & FALLOC_FL_MODE_MASK) == FALLOC_FL_ALLOCATE_RANGE) {
+ ret = ext4_do_fallocate(file, offset, len, mode);
+ goto out;
+ }
+
+ /*
+ * Follow-up operations will drop page cache, hold invalidate lock
+ * to prevent page faults from reinstantiating pages we have
+ * released from page cache.
+ */
+ filemap_invalidate_lock(mapping);
+
+ ret = ext4_break_layouts(inode);
+ if (ret)
+ goto out_invalidate_lock;
+
if (mode & FALLOC_FL_PUNCH_HOLE)
ret = ext4_punch_hole(file, offset, len);
else if (mode & FALLOC_FL_COLLAPSE_RANGE)
@@ -4777,7 +4775,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
else if (mode & FALLOC_FL_ZERO_RANGE)
ret = ext4_zero_range(file, offset, len, mode);
else
- ret = ext4_do_fallocate(file, offset, len, mode);
+ ret = -EOPNOTSUPP;
+
+out_invalidate_lock:
+ filemap_invalidate_unlock(mapping);
out:
inode_unlock(inode);
return ret;
@@ -5304,23 +5305,6 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
if (end >= inode->i_size)
return -EINVAL;
- /* Wait for existing dio to complete */
- inode_dio_wait(inode);
-
- ret = file_modified(file);
- if (ret)
- return ret;
-
- /*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
- */
- filemap_invalidate_lock(mapping);
-
- ret = ext4_break_layouts(inode);
- if (ret)
- goto out_invalidate_lock;
-
/*
* Write tail of the last page before removed range and data that
* will be shifted since they will get removed from the page cache
@@ -5334,16 +5318,15 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
if (!ret)
ret = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
if (ret)
- goto out_invalidate_lock;
+ return ret;
truncate_pagecache(inode, start);
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out_invalidate_lock;
- }
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
start_lblk = offset >> inode->i_blkbits;
@@ -5382,8 +5365,6 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
out_handle:
ext4_journal_stop(handle);
-out_invalidate_lock:
- filemap_invalidate_unlock(mapping);
return ret;
}
@@ -5424,23 +5405,6 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
if (len > inode->i_sb->s_maxbytes - inode->i_size)
return -EFBIG;
- /* Wait for existing dio to complete */
- inode_dio_wait(inode);
-
- ret = file_modified(file);
- if (ret)
- return ret;
-
- /*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
- */
- filemap_invalidate_lock(mapping);
-
- ret = ext4_break_layouts(inode);
- if (ret)
- goto out_invalidate_lock;
-
/*
* Write out all dirty pages. Need to round down to align start offset
* to page size boundary for page size > block size.
@@ -5448,16 +5412,15 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
start = round_down(offset, PAGE_SIZE);
ret = filemap_write_and_wait_range(mapping, start, LLONG_MAX);
if (ret)
- goto out_invalidate_lock;
+ return ret;
truncate_pagecache(inode, start);
credits = ext4_writepage_trans_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out_invalidate_lock;
- }
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
/* Expand file to avoid data loss if there is error while shifting */
@@ -5528,8 +5491,6 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
out_handle:
ext4_journal_stop(handle);
-out_invalidate_lock:
- filemap_invalidate_unlock(mapping);
return ret;
}
@@ -3992,23 +3992,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
return ret;
}
- /* Wait all existing dio workers, newcomers will block on i_rwsem */
- inode_dio_wait(inode);
-
- ret = file_modified(file);
- if (ret)
- return ret;
-
- /*
- * Prevent page faults from reinstantiating pages we have released from
- * page cache.
- */
- filemap_invalidate_lock(mapping);
-
- ret = ext4_break_layouts(inode);
- if (ret)
- goto out_invalidate_lock;
-
/*
* For journalled data we need to write (and checkpoint) pages
* before discarding page cache to avoid inconsitent data on
@@ -4021,7 +4004,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
ext4_truncate_folios_range(inode, offset, end);
}
if (ret)
- goto out_invalidate_lock;
+ return ret;
/* Now release the pages and zero block aligned part of pages*/
truncate_pagecache_range(inode, offset, end - 1);
@@ -4034,7 +4017,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(sb, ret);
- goto out_invalidate_lock;
+ return ret;
}
ret = ext4_zero_partial_blocks(handle, inode, offset, length);
@@ -4079,8 +4062,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
ext4_handle_sync(handle);
out_handle:
ext4_journal_stop(handle);
-out_invalidate_lock:
- filemap_invalidate_unlock(mapping);
return ret;
}