@@ -337,7 +337,7 @@ static void btrfs_end_bio_work(struct work_struct *work)
struct btrfs_bio *bbio = container_of(work, struct btrfs_bio, end_io_work);
/* Metadata reads are checked and repaired by the submitter. */
- if (is_data_bbio(bbio))
+ if (bio_op(&bbio->bio) == REQ_OP_READ && is_data_bbio(bbio))
btrfs_check_read_bio(bbio, bbio->bio.bi_private);
else
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
@@ -354,7 +354,7 @@ static void btrfs_simple_end_io(struct bio *bio)
if (bio->bi_status)
btrfs_log_dev_io_error(bio, dev);
- if (bio_op(bio) == REQ_OP_READ) {
+ if (bio_op(bio) == REQ_OP_READ || bbio->dropbehind_io) {
INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
} else {
@@ -82,6 +82,8 @@ struct btrfs_bio {
/* Save the first error status of split bio. */
blk_status_t status;
+ bool dropbehind_io;
+
/*
* This member must come last, bio_alloc_bioset will allocate enough
* bytes for entire btrfs_bio but relies on bio being last.
@@ -758,8 +758,11 @@ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
ASSERT(bio_ctrl->end_io_func);
if (bio_ctrl->bbio &&
- !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset))
+ !btrfs_bio_is_contig(bio_ctrl, folio, disk_bytenr, pg_offset)) {
+ if (folio_test_dropbehind(folio))
+ bio_ctrl->bbio->dropbehind_io = true;
submit_one_bio(bio_ctrl);
+ }
do {
u32 len = size;
@@ -777,6 +780,9 @@ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl,
len = bio_ctrl->len_to_oe_boundary;
}
+ if (folio_test_dropbehind(folio))
+ bio_ctrl->bbio->dropbehind_io = true;
+
if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) {
/* bio full: move on to a new one */
submit_one_bio(bio_ctrl);
@@ -857,18 +857,17 @@ static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
*/
static noinline int prepare_one_folio(struct inode *inode, struct folio **folio_ret,
loff_t pos, size_t write_bytes,
- bool force_uptodate, bool nowait)
+ bool force_uptodate, fgf_t fgp_flags)
{
unsigned long index = pos >> PAGE_SHIFT;
- gfp_t mask = get_prepare_gfp_flags(inode, nowait);
- fgf_t fgp_flags = (nowait ? FGP_WRITEBEGIN | FGP_NOWAIT : FGP_WRITEBEGIN);
+ gfp_t mask = get_prepare_gfp_flags(inode, fgp_flags & FGP_NOWAIT);
struct folio *folio;
int ret = 0;
again:
folio = __filemap_get_folio(inode->i_mapping, index, fgp_flags, mask);
if (IS_ERR(folio)) {
- if (nowait)
+ if (fgp_flags & FGP_NOWAIT)
ret = -EAGAIN;
else
ret = PTR_ERR(folio);
@@ -887,7 +886,7 @@ static noinline int prepare_one_folio(struct inode *inode, struct folio **folio_
if (ret) {
/* The folio is already unlocked. */
folio_put(folio);
- if (!nowait && ret == -EAGAIN) {
+ if (!(fgp_flags & FGP_NOWAIT) && ret == -EAGAIN) {
ret = 0;
goto again;
}
@@ -1097,9 +1096,15 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
bool only_release_metadata = false;
+ fgf_t fgp_flags = FGP_WRITEBEGIN;
- if (nowait)
+ if (nowait) {
ilock_flags |= BTRFS_ILOCK_TRY;
+ fgp_flags |= FGP_NOWAIT;
+ }
+
+ if (iocb->ki_flags & IOCB_DONTCACHE)
+ fgp_flags |= FGP_DONTCACHE;
ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
if (ret < 0)
@@ -1195,7 +1200,7 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i)
}
ret = prepare_one_folio(inode, &folio, pos, write_bytes,
- force_page_uptodate, false);
+ force_page_uptodate, fgp_flags);
if (ret) {
btrfs_delalloc_release_extents(BTRFS_I(inode),
reserve_bytes);
@@ -3671,7 +3676,7 @@ const struct file_operations btrfs_file_operations = {
#endif
.remap_file_range = btrfs_remap_file_range,
.uring_cmd = btrfs_uring_cmd,
- .fop_flags = FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC,
+ .fop_flags = FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC | FOP_DONTCACHE,
};
int btrfs_fdatawrite_range(struct btrfs_inode *inode, loff_t start, loff_t end)
The read side is already covered as btrfs uses the generic filemap helpers. For writes, just pass in FGP_DONTCACHE if uncached IO is being done, then the folios created should be marked appropriately. For IO completion, ensure that writing back folios that are uncached gets punted to one of the btrfs workers, as task context is needed for that. Add an 'uncached_io' member to struct btrfs_bio to manage that. With that, add FOP_DONTCACHE to the btrfs file_operations fop_flags structure, enabling use of RWF_DONTCACHE. Signed-off-by: Jens Axboe <axboe@kernel.dk> ---