@@ -1637,6 +1637,7 @@ iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
break;
list_move_tail(&next->io_list, &ioend->io_list);
ioend->io_size += next->io_size;
+ ioend->io_end = next->io_end;
}
}
EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
@@ -1723,6 +1724,7 @@ static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
ioend->io_inode = inode;
ioend->io_size = 0;
ioend->io_offset = pos;
+ ioend->io_end = pos;
ioend->io_sector = bio->bi_iter.bi_sector;
wpc->nr_folios = 0;
@@ -1768,6 +1770,7 @@ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
{
struct iomap_folio_state *ifs = folio->private;
size_t poff = offset_in_folio(folio, pos);
+ loff_t isize = i_size_read(inode);
int error;
if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos)) {
@@ -1784,6 +1787,7 @@ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
if (ifs)
atomic_add(len, &ifs->write_bytes_pending);
wpc->ioend->io_size += len;
+ wpc->ioend->io_end = min_t(loff_t, pos + len, isize);
wbc_account_cgroup_owner(wbc, folio, len);
return 0;
}
@@ -37,8 +37,8 @@ XFS_WPC(struct iomap_writepage_ctx *ctx)
*/
static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
{
- return ioend->io_offset + ioend->io_size >
- XFS_I(ioend->io_inode)->i_disk_size;
+ WARN_ON_ONCE(ioend->io_end > ioend->io_offset + ioend->io_size);
+ return ioend->io_end > XFS_I(ioend->io_inode)->i_disk_size;
}
/*
@@ -86,6 +86,7 @@ xfs_end_ioend(
struct xfs_inode *ip = XFS_I(ioend->io_inode);
struct xfs_mount *mp = ip->i_mount;
xfs_off_t offset = ioend->io_offset;
+ xfs_off_t end = ioend->io_end;
size_t size = ioend->io_size;
unsigned int nofs_flag;
int error;
@@ -131,7 +132,7 @@ xfs_end_ioend(
error = xfs_iomap_write_unwritten(ip, offset, size, false);
if (!error && xfs_ioend_is_append(ioend))
- error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
+ error = xfs_setfilesize(ip, offset, end - offset);
done:
iomap_finish_ioends(ioend, error);
memalloc_nofs_restore(nofs_flag);
@@ -332,6 +332,7 @@ struct iomap_ioend {
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
loff_t io_offset; /* offset in the file */
+ loff_t io_end; /* end of valid data */
sector_t io_sector; /* start sector of ioend */
struct bio io_bio; /* MUST BE LAST! */
};
During concurrent append writes to XFS filesystem, zero padding data may appear in the file after power failure. This happens due to imprecise disk size updates when handling write completion. Consider this scenario with concurrent append writes same file: Thread 1: Thread 2: ------------ ----------- write [A, A+B] update inode size to A+B submit I/O [A, A+BS] write [A+B, A+B+C] update inode size to A+B+C <I/O completes, updates disk size to A+B+C> <power failure> After reboot, file has zero padding in range [A+B, A+B+C]: |< Block Size (BS) >| |DDDDDDDDDDDDDDDD0000000000000000| ^ ^ ^ A A+B A+B+C (EOF) D = Valid Data 0 = Zero Padding The issue stems from disk size being set to min(io_offset + io_size, inode->i_size) at I/O completion. Since io_offset+io_size is block size granularity, it may exceed the actual valid file data size. In the case of concurrent append writes, inode->i_size may be larger than the actual range of valid file data written to disk, leading to inaccurate disk size updates. This patch introduce ioend->io_end to trace the end position of the valid data in ioend, rather than solely relying on ioend->io_size. It ensures more precise disk size updates and avoids the zero padding issue. Another benefit is that it makes the xfs_ioend_is_append() check more accurate, which can reduce unnecessary end bio callbacks of xfs_end_bio() in certain scenarios, such as repeated writes at the file tail without extending the file size. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Long Li <leo.lilong@huawei.com> --- fs/iomap/buffered-io.c | 4 ++++ fs/xfs/xfs_aops.c | 7 ++++--- include/linux/iomap.h | 1 + 3 files changed, 9 insertions(+), 3 deletions(-)