Message ID | 20241219173954.22546-7-hch@lst.de (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [01/10] iomap: allow the file system to submit the writeback bios | expand |
On Thu, Dec 19, 2024 at 05:39:11PM +0000, Christoph Hellwig wrote: > Split out the struct iomap-dio level final completion from > iomap_dio_bio_end_io into a helper to clean up the code and make it > reusable. > > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > fs/iomap/direct-io.c | 76 ++++++++++++++++++++++---------------------- > 1 file changed, 38 insertions(+), 38 deletions(-) > > diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c > index 641649a04614..ed658eb09a1a 100644 > --- a/fs/iomap/direct-io.c > +++ b/fs/iomap/direct-io.c > @@ -165,43 +165,31 @@ static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) > cmpxchg(&dio->error, 0, ret); > } > > -void iomap_dio_bio_end_io(struct bio *bio) > +/* > + * Called when dio->ref reaches zero from and I/O completion. an I/O completion This hoist looks fine to me, so with that fixed: Reviewed-by: "Darrick J. Wong" <djwong@kernel.org> --D > + */ > +static void iomap_dio_done(struct iomap_dio *dio) > { > - struct iomap_dio *dio = bio->bi_private; > - bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); > struct kiocb *iocb = dio->iocb; > > - if (bio->bi_status) > - iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); > - if (!atomic_dec_and_test(&dio->ref)) > - goto release_bio; > - > - /* > - * Synchronous dio, task itself will handle any completion work > - * that needs after IO. All we need to do is wake the task. > - */ > if (dio->wait_for_completion) { > + /* > + * Synchronous I/O, task itself will handle any completion work > + * that needs after IO. All we need to do is wake the task. > + */ > struct task_struct *waiter = dio->submit.waiter; > > WRITE_ONCE(dio->submit.waiter, NULL); > blk_wake_io_task(waiter); > - goto release_bio; > - } > - > - /* > - * Flagged with IOMAP_DIO_INLINE_COMP, we can complete it inline > - */ > - if (dio->flags & IOMAP_DIO_INLINE_COMP) { > + } else if (dio->flags & IOMAP_DIO_INLINE_COMP) { > WRITE_ONCE(iocb->private, NULL); > iomap_dio_complete_work(&dio->aio.work); > - goto release_bio; > - } > - > - /* > - * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then schedule > - * our completion that way to avoid an async punt to a workqueue. > - */ > - if (dio->flags & IOMAP_DIO_CALLER_COMP) { > + } else if (dio->flags & IOMAP_DIO_CALLER_COMP) { > + /* > + * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then > + * schedule our completion that way to avoid an async punt to a > + * workqueue. > + */ > /* only polled IO cares about private cleared */ > iocb->private = dio; > iocb->dio_complete = iomap_dio_deferred_complete; > @@ -219,19 +207,31 @@ void iomap_dio_bio_end_io(struct bio *bio) > * issuer. > */ > iocb->ki_complete(iocb, 0); > - goto release_bio; > + } else { > + struct inode *inode = file_inode(iocb->ki_filp); > + > + /* > + * Async DIO completion that requires filesystem level > + * completion work gets punted to a work queue to complete as > + * the operation may require more IO to be issued to finalise > + * filesystem metadata changes or guarantee data integrity. > + */ > + INIT_WORK(&dio->aio.work, iomap_dio_complete_work); > + queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); > } > +} > + > +void iomap_dio_bio_end_io(struct bio *bio) > +{ > + struct iomap_dio *dio = bio->bi_private; > + bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); > + > + if (bio->bi_status) > + iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); > + > + if (atomic_dec_and_test(&dio->ref)) > + iomap_dio_done(dio); > > - /* > - * Async DIO completion that requires filesystem level completion work > - * gets punted to a work queue to complete as the operation may require > - * more IO to be issued to finalise filesystem metadata changes or > - * guarantee data integrity. > - */ > - INIT_WORK(&dio->aio.work, iomap_dio_complete_work); > - queue_work(file_inode(iocb->ki_filp)->i_sb->s_dio_done_wq, > - &dio->aio.work); > -release_bio: > if (should_dirty) { > bio_check_pages_dirty(bio); > } else { > -- > 2.45.2 > >
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 641649a04614..ed658eb09a1a 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -165,43 +165,31 @@ static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) cmpxchg(&dio->error, 0, ret); } -void iomap_dio_bio_end_io(struct bio *bio) +/* + * Called when dio->ref reaches zero from and I/O completion. + */ +static void iomap_dio_done(struct iomap_dio *dio) { - struct iomap_dio *dio = bio->bi_private; - bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); struct kiocb *iocb = dio->iocb; - if (bio->bi_status) - iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); - if (!atomic_dec_and_test(&dio->ref)) - goto release_bio; - - /* - * Synchronous dio, task itself will handle any completion work - * that needs after IO. All we need to do is wake the task. - */ if (dio->wait_for_completion) { + /* + * Synchronous I/O, task itself will handle any completion work + * that needs after IO. All we need to do is wake the task. + */ struct task_struct *waiter = dio->submit.waiter; WRITE_ONCE(dio->submit.waiter, NULL); blk_wake_io_task(waiter); - goto release_bio; - } - - /* - * Flagged with IOMAP_DIO_INLINE_COMP, we can complete it inline - */ - if (dio->flags & IOMAP_DIO_INLINE_COMP) { + } else if (dio->flags & IOMAP_DIO_INLINE_COMP) { WRITE_ONCE(iocb->private, NULL); iomap_dio_complete_work(&dio->aio.work); - goto release_bio; - } - - /* - * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then schedule - * our completion that way to avoid an async punt to a workqueue. - */ - if (dio->flags & IOMAP_DIO_CALLER_COMP) { + } else if (dio->flags & IOMAP_DIO_CALLER_COMP) { + /* + * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then + * schedule our completion that way to avoid an async punt to a + * workqueue. + */ /* only polled IO cares about private cleared */ iocb->private = dio; iocb->dio_complete = iomap_dio_deferred_complete; @@ -219,19 +207,31 @@ void iomap_dio_bio_end_io(struct bio *bio) * issuer. */ iocb->ki_complete(iocb, 0); - goto release_bio; + } else { + struct inode *inode = file_inode(iocb->ki_filp); + + /* + * Async DIO completion that requires filesystem level + * completion work gets punted to a work queue to complete as + * the operation may require more IO to be issued to finalise + * filesystem metadata changes or guarantee data integrity. + */ + INIT_WORK(&dio->aio.work, iomap_dio_complete_work); + queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); } +} + +void iomap_dio_bio_end_io(struct bio *bio) +{ + struct iomap_dio *dio = bio->bi_private; + bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); + + if (bio->bi_status) + iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status)); + + if (atomic_dec_and_test(&dio->ref)) + iomap_dio_done(dio); - /* - * Async DIO completion that requires filesystem level completion work - * gets punted to a work queue to complete as the operation may require - * more IO to be issued to finalise filesystem metadata changes or - * guarantee data integrity. - */ - INIT_WORK(&dio->aio.work, iomap_dio_complete_work); - queue_work(file_inode(iocb->ki_filp)->i_sb->s_dio_done_wq, - &dio->aio.work); -release_bio: if (should_dirty) { bio_check_pages_dirty(bio); } else {
Split out the struct iomap-dio level final completion from iomap_dio_bio_end_io into a helper to clean up the code and make it reusable. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/iomap/direct-io.c | 76 ++++++++++++++++++++++---------------------- 1 file changed, 38 insertions(+), 38 deletions(-)