@@ -1399,7 +1399,7 @@ void bio_free_pages(struct bio *bio)
EXPORT_SYMBOL(bio_free_pages);
/*
- * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
+ * bio_set_pages_dirty() and bio_queue_for_redirty() are support functions
* for performing direct-IO in BIOs.
*
* The problem is that we cannot run set_page_dirty() from interrupt context
@@ -1438,17 +1438,6 @@ void bio_set_pages_dirty(struct bio *bio)
}
}
-/*
- * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
- * If they are, then fine. If, however, some pages are clean then they must
- * have been written out during the direct-IO read. So we take another ref on
- * the BIO and re-dirty the pages in process context.
- *
- * It is expected that bio_check_pages_dirty() will wholly own the BIO from
- * here on. It will run one put_page() against each page and will run one
- * bio_put() against the BIO.
- */
-
static void bio_dirty_fn(struct work_struct *work);
static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
@@ -1475,25 +1464,46 @@ static void bio_dirty_fn(struct work_struct *work)
}
}
-void bio_check_pages_dirty(struct bio *bio)
+/**
+ * bio_should_redirty_pages - check that all the BIO's pages are still dirty
+ * @bio: BIO to check
+ *
+ * Check if all pages in a direct read are still dirty. If they aren't the
+ * caller must call bio_queue_for_redirty() to redirty all pages that have been
+ * marked clean.
+ */
+bool bio_should_redirty_pages(struct bio *bio)
{
- struct bio_vec *bvec;
- unsigned long flags;
struct bvec_iter_all iter_all;
+ struct bio_vec *bvec;
- bio_for_each_segment_all(bvec, bio, iter_all) {
+ bio_for_each_segment_all(bvec, bio, iter_all)
if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
- goto defer;
- }
+ return true;
+ return false;
+}
+
+/**
+ * bio_queue_for_redirty - queue up a bio to ensure all pages are marked dirty
+ *
+ * If some pages are clean after a direct read has completed, then they must
+ * have been written out during the direct read. Take another ref on the BIO
+ * and re-dirty the pages in process context.
+ *
+ * It is expected that bio_queue_for_redirty() owns the pages in the BIO from
+ * here on. It will run one put_page() against each page one finished.
+ */
+void bio_queue_for_redirty(struct bio *bio)
+{
+ unsigned long flags;
+
+ bio_get(bio);
- bio_release_pages(bio, false);
- bio_put(bio);
- return;
-defer:
spin_lock_irqsave(&bio_dirty_lock, flags);
bio->bi_private = bio_dirty_list;
bio_dirty_list = bio;
spin_unlock_irqrestore(&bio_dirty_lock, flags);
+
schedule_work(&bio_dirty_work);
}
@@ -159,12 +159,11 @@ static void blkdev_bio_end_io(struct bio *bio)
}
}
- if (should_dirty) {
- bio_check_pages_dirty(bio);
- } else {
+ if (should_dirty && bio_should_redirty_pages(bio))
+ bio_queue_for_redirty(bio);
+ else
bio_release_pages(bio, false);
- bio_put(bio);
- }
+ bio_put(bio);
}
static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
@@ -283,12 +282,11 @@ static void blkdev_bio_end_io_async(struct bio *bio)
iocb->ki_complete(iocb, ret);
- if (dio->flags & DIO_SHOULD_DIRTY) {
- bio_check_pages_dirty(bio);
- } else {
+ if ((dio->flags & DIO_SHOULD_DIRTY) && bio_should_redirty_pages(bio))
+ bio_queue_for_redirty(bio);
+ else
bio_release_pages(bio, false);
- bio_put(bio);
- }
+ bio_put(bio);
}
static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
@@ -410,7 +410,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
/*
* In the AIO read case we speculatively dirty the pages before starting IO.
* During IO completion, any of these pages which happen to have been written
- * back will be redirtied by bio_check_pages_dirty().
+ * back will be redirtied by bio_queue_for_redirty().
*
* bios hold a dio reference between submit_bio and ->end_io.
*/
@@ -504,12 +504,11 @@ static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
dio->io_error = -EIO;
}
- if (dio->is_async && should_dirty) {
- bio_check_pages_dirty(bio); /* transfers ownership */
- } else {
+ if (dio->is_async && should_dirty && bio_should_redirty_pages(bio))
+ bio_queue_for_redirty(bio); /* transfers ownership */
+ else
bio_release_pages(bio, should_dirty);
- bio_put(bio);
- }
+ bio_put(bio);
return err;
}
@@ -179,12 +179,11 @@ void iomap_dio_bio_end_io(struct bio *bio)
}
}
- if (should_dirty) {
- bio_check_pages_dirty(bio);
- } else {
+ if (should_dirty && bio_should_redirty_pages(bio))
+ bio_queue_for_redirty(bio);
+ else
bio_release_pages(bio, false);
- bio_put(bio);
- }
+ bio_put(bio);
}
EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
@@ -473,7 +473,8 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
-extern void bio_check_pages_dirty(struct bio *bio);
+bool bio_should_redirty_pages(struct bio *bio);
+void bio_queue_for_redirty(struct bio *bio);
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio *src, struct bvec_iter *src_iter);
Split out bio_should_redirty_pages and bio_queue_for_redirty helpers and let the callers use their normal bio release path for the non-redirty case. Note that this changes the refcounting for the redirty case as bio_queue_for_redirty takes a reference now, which allows the caller to unconditionally drop its reference instead of special casing this path. Signed-off-by: Christoph Hellwig <hch@lst.de> --- block/bio.c | 54 ++++++++++++++++++++++++++------------------ block/fops.c | 18 +++++++-------- fs/direct-io.c | 11 ++++----- fs/iomap/direct-io.c | 9 ++++---- include/linux/bio.h | 3 ++- 5 files changed, 51 insertions(+), 44 deletions(-)