Message ID | 20190624055253.31183-5-hch@lst.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/12] list.h: add a list_pop helper | expand |
On Mon, Jun 24, 2019 at 07:52:45AM +0200, Christoph Hellwig wrote: > Currently we don't overwrite the flags field in the iomap in > xfs_bmbt_to_iomap. This works fine with 0-initialized iomaps on stack, > but is harmful once we want to be able to reuse an iomap in the > writeback code. Is that going to affect all the other iomap users, or is xfs the only one that assumes zero-initialized iomaps being passed into ->iomap_begin? --D > Replace the shared paramter with a set of initial > flags an thus ensures the flags field is always reinitialized. > > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > fs/xfs/xfs_iomap.c | 28 +++++++++++++++++----------- > fs/xfs/xfs_iomap.h | 2 +- > fs/xfs/xfs_pnfs.c | 2 +- > 3 files changed, 19 insertions(+), 13 deletions(-) > > diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c > index 63d323916bba..6b29452bfba0 100644 > --- a/fs/xfs/xfs_iomap.c > +++ b/fs/xfs/xfs_iomap.c > @@ -57,7 +57,7 @@ xfs_bmbt_to_iomap( > struct xfs_inode *ip, > struct iomap *iomap, > struct xfs_bmbt_irec *imap, > - bool shared) > + u16 flags) > { > struct xfs_mount *mp = ip->i_mount; > > @@ -82,12 +82,11 @@ xfs_bmbt_to_iomap( > iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); > iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip)); > iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip)); > + iomap->flags = flags; > > if (xfs_ipincount(ip) && > (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) > iomap->flags |= IOMAP_F_DIRTY; > - if (shared) > - iomap->flags |= IOMAP_F_SHARED; > return 0; > } > > @@ -543,6 +542,7 @@ xfs_file_iomap_begin_delay( > struct xfs_iext_cursor icur, ccur; > xfs_fsblock_t prealloc_blocks = 0; > bool eof = false, cow_eof = false, shared = false; > + u16 iomap_flags = 0; > int whichfork = XFS_DATA_FORK; > int error = 0; > > @@ -710,7 +710,7 @@ xfs_file_iomap_begin_delay( > * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch > * them out if the write happens to fail. > */ > - iomap->flags |= IOMAP_F_NEW; > + iomap_flags |= IOMAP_F_NEW; > trace_xfs_iomap_alloc(ip, offset, count, whichfork, > whichfork == XFS_DATA_FORK ? &imap : &cmap); > done: > @@ -718,14 +718,17 @@ xfs_file_iomap_begin_delay( > if (imap.br_startoff > offset_fsb) { > xfs_trim_extent(&cmap, offset_fsb, > imap.br_startoff - offset_fsb); > - error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true); > + error = xfs_bmbt_to_iomap(ip, iomap, &cmap, > + IOMAP_F_SHARED); > goto out_unlock; > } > /* ensure we only report blocks we have a reservation for */ > xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount); > shared = true; > } > - error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared); > + if (shared) > + iomap_flags |= IOMAP_F_SHARED; > + error = xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags); > out_unlock: > xfs_iunlock(ip, XFS_ILOCK_EXCL); > return error; > @@ -933,6 +936,7 @@ xfs_file_iomap_begin( > xfs_fileoff_t offset_fsb, end_fsb; > int nimaps = 1, error = 0; > bool shared = false; > + u16 iomap_flags = 0; > unsigned lockmode; > > if (XFS_FORCED_SHUTDOWN(mp)) > @@ -1048,11 +1052,13 @@ xfs_file_iomap_begin( > if (error) > return error; > > - iomap->flags |= IOMAP_F_NEW; > + iomap_flags |= IOMAP_F_NEW; > trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap); > > out_finish: > - return xfs_bmbt_to_iomap(ip, iomap, &imap, shared); > + if (shared) > + iomap_flags |= IOMAP_F_SHARED; > + return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags); > > out_found: > ASSERT(nimaps); > @@ -1196,7 +1202,7 @@ xfs_seek_iomap_begin( > if (data_fsb < cow_fsb + cmap.br_blockcount) > end_fsb = min(end_fsb, data_fsb); > xfs_trim_extent(&cmap, offset_fsb, end_fsb); > - error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true); > + error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED); > /* > * This is a COW extent, so we must probe the page cache > * because there could be dirty page cache being backed > @@ -1218,7 +1224,7 @@ xfs_seek_iomap_begin( > imap.br_state = XFS_EXT_NORM; > done: > xfs_trim_extent(&imap, offset_fsb, end_fsb); > - error = xfs_bmbt_to_iomap(ip, iomap, &imap, false); > + error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0); > out_unlock: > xfs_iunlock(ip, lockmode); > return error; > @@ -1264,7 +1270,7 @@ xfs_xattr_iomap_begin( > if (error) > return error; > ASSERT(nimaps); > - return xfs_bmbt_to_iomap(ip, iomap, &imap, false); > + return xfs_bmbt_to_iomap(ip, iomap, &imap, 0); > } > > const struct iomap_ops xfs_xattr_iomap_ops = { > diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h > index 5c2f6aa6d78f..71d0ae460c44 100644 > --- a/fs/xfs/xfs_iomap.h > +++ b/fs/xfs/xfs_iomap.h > @@ -16,7 +16,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, > int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool); > > int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, > - struct xfs_bmbt_irec *, bool shared); > + struct xfs_bmbt_irec *, u16); > xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize); > > static inline xfs_filblks_t > diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c > index bde2c9f56a46..12f664785248 100644 > --- a/fs/xfs/xfs_pnfs.c > +++ b/fs/xfs/xfs_pnfs.c > @@ -185,7 +185,7 @@ xfs_fs_map_blocks( > } > xfs_iunlock(ip, XFS_IOLOCK_EXCL); > > - error = xfs_bmbt_to_iomap(ip, iomap, &imap, false); > + error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0); > *device_generation = mp->m_generation; > return error; > out_unlock: > -- > 2.20.1 >
On Mon, Jun 24, 2019 at 07:57:07AM -0700, Darrick J. Wong wrote: > On Mon, Jun 24, 2019 at 07:52:45AM +0200, Christoph Hellwig wrote: > > Currently we don't overwrite the flags field in the iomap in > > xfs_bmbt_to_iomap. This works fine with 0-initialized iomaps on stack, > > but is harmful once we want to be able to reuse an iomap in the > > writeback code. > > Is that going to affect all the other iomap users, or is xfs the only > one that assumes zero-initialized iomaps being passed into > ->iomap_begin? This doesn't affect any existing user as they all get a zeroed iomap passed from the caller in iomap.c. It affects the writeback code once it uses struct iomap as it overwrites a previously used iomap.
On Tue, Jun 25, 2019 at 12:07:01PM +0200, Christoph Hellwig wrote: > On Mon, Jun 24, 2019 at 07:57:07AM -0700, Darrick J. Wong wrote: > > On Mon, Jun 24, 2019 at 07:52:45AM +0200, Christoph Hellwig wrote: > > > Currently we don't overwrite the flags field in the iomap in > > > xfs_bmbt_to_iomap. This works fine with 0-initialized iomaps on stack, > > > but is harmful once we want to be able to reuse an iomap in the > > > writeback code. > > > > Is that going to affect all the other iomap users, or is xfs the only > > one that assumes zero-initialized iomaps being passed into > > ->iomap_begin? > > This doesn't affect any existing user as they all get a zeroed iomap > passed from the caller in iomap.c. It affects the writeback code > once it uses struct iomap as it overwrites a previously used iomap. Then shouldn't this new writeback code zero the iomap before calling back into the filesystem, just to maintain consistent behavior? --D
On Tue, Jun 25, 2019 at 08:13:57AM -0700, Darrick J. Wong wrote: > > This doesn't affect any existing user as they all get a zeroed iomap > > passed from the caller in iomap.c. It affects the writeback code > > once it uses struct iomap as it overwrites a previously used iomap. > > Then shouldn't this new writeback code zero the iomap before calling > back into the filesystem, just to maintain consistent behavior? The core code doesn't decide when to overwrite it, that ->map_blocks method does that based on a few factors (including the data_seq/cow_seq counters that are entirely inside xfs).
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 63d323916bba..6b29452bfba0 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -57,7 +57,7 @@ xfs_bmbt_to_iomap( struct xfs_inode *ip, struct iomap *iomap, struct xfs_bmbt_irec *imap, - bool shared) + u16 flags) { struct xfs_mount *mp = ip->i_mount; @@ -82,12 +82,11 @@ xfs_bmbt_to_iomap( iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip)); iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip)); + iomap->flags = flags; if (xfs_ipincount(ip) && (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) iomap->flags |= IOMAP_F_DIRTY; - if (shared) - iomap->flags |= IOMAP_F_SHARED; return 0; } @@ -543,6 +542,7 @@ xfs_file_iomap_begin_delay( struct xfs_iext_cursor icur, ccur; xfs_fsblock_t prealloc_blocks = 0; bool eof = false, cow_eof = false, shared = false; + u16 iomap_flags = 0; int whichfork = XFS_DATA_FORK; int error = 0; @@ -710,7 +710,7 @@ xfs_file_iomap_begin_delay( * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch * them out if the write happens to fail. */ - iomap->flags |= IOMAP_F_NEW; + iomap_flags |= IOMAP_F_NEW; trace_xfs_iomap_alloc(ip, offset, count, whichfork, whichfork == XFS_DATA_FORK ? &imap : &cmap); done: @@ -718,14 +718,17 @@ xfs_file_iomap_begin_delay( if (imap.br_startoff > offset_fsb) { xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb); - error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true); + error = xfs_bmbt_to_iomap(ip, iomap, &cmap, + IOMAP_F_SHARED); goto out_unlock; } /* ensure we only report blocks we have a reservation for */ xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount); shared = true; } - error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared); + if (shared) + iomap_flags |= IOMAP_F_SHARED; + error = xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags); out_unlock: xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; @@ -933,6 +936,7 @@ xfs_file_iomap_begin( xfs_fileoff_t offset_fsb, end_fsb; int nimaps = 1, error = 0; bool shared = false; + u16 iomap_flags = 0; unsigned lockmode; if (XFS_FORCED_SHUTDOWN(mp)) @@ -1048,11 +1052,13 @@ xfs_file_iomap_begin( if (error) return error; - iomap->flags |= IOMAP_F_NEW; + iomap_flags |= IOMAP_F_NEW; trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap); out_finish: - return xfs_bmbt_to_iomap(ip, iomap, &imap, shared); + if (shared) + iomap_flags |= IOMAP_F_SHARED; + return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags); out_found: ASSERT(nimaps); @@ -1196,7 +1202,7 @@ xfs_seek_iomap_begin( if (data_fsb < cow_fsb + cmap.br_blockcount) end_fsb = min(end_fsb, data_fsb); xfs_trim_extent(&cmap, offset_fsb, end_fsb); - error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true); + error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED); /* * This is a COW extent, so we must probe the page cache * because there could be dirty page cache being backed @@ -1218,7 +1224,7 @@ xfs_seek_iomap_begin( imap.br_state = XFS_EXT_NORM; done: xfs_trim_extent(&imap, offset_fsb, end_fsb); - error = xfs_bmbt_to_iomap(ip, iomap, &imap, false); + error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0); out_unlock: xfs_iunlock(ip, lockmode); return error; @@ -1264,7 +1270,7 @@ xfs_xattr_iomap_begin( if (error) return error; ASSERT(nimaps); - return xfs_bmbt_to_iomap(ip, iomap, &imap, false); + return xfs_bmbt_to_iomap(ip, iomap, &imap, 0); } const struct iomap_ops xfs_xattr_iomap_ops = { diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index 5c2f6aa6d78f..71d0ae460c44 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h @@ -16,7 +16,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool); int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, - struct xfs_bmbt_irec *, bool shared); + struct xfs_bmbt_irec *, u16); xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize); static inline xfs_filblks_t diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index bde2c9f56a46..12f664785248 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -185,7 +185,7 @@ xfs_fs_map_blocks( } xfs_iunlock(ip, XFS_IOLOCK_EXCL); - error = xfs_bmbt_to_iomap(ip, iomap, &imap, false); + error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0); *device_generation = mp->m_generation; return error; out_unlock:
Currently we don't overwrite the flags field in the iomap in xfs_bmbt_to_iomap. This works fine with 0-initialized iomaps on stack, but is harmful once we want to be able to reuse an iomap in the writeback code. Replace the shared paramter with a set of initial flags an thus ensures the flags field is always reinitialized. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/xfs/xfs_iomap.c | 28 +++++++++++++++++----------- fs/xfs/xfs_iomap.h | 2 +- fs/xfs/xfs_pnfs.c | 2 +- 3 files changed, 19 insertions(+), 13 deletions(-)