@@ -1448,12 +1448,8 @@ static void __bio_unmap_user(struct bio *bio)
/*
* make sure we dirty pages we wrote to
*/
- bio_for_each_segment_all(bvec, bio, i, iter_all) {
- if (bio_data_dir(bio) == READ)
- set_page_dirty_lock(bvec_page(bvec));
-
- bvec_put_page(bvec);
- }
+ bio_for_each_segment_all(bvec, bio, i, iter_all)
+ bvec_put_page_dirty_lock(bvec, bio_data_dir(bio) == READ);
bio_put(bio);
}
@@ -261,11 +261,9 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
}
__set_current_state(TASK_RUNNING);
- bio_for_each_segment_all(bvec, &bio, i, iter_all) {
- if (should_dirty && !PageCompound(bvec_page(bvec)))
- set_page_dirty_lock(bvec_page(bvec));
- bvec_put_page(bvec);
- }
+ bio_for_each_segment_all(bvec, &bio, i, iter_all)
+ bvec_put_page_dirty_lock(bvec, should_dirty &&
+ !PageCompound(bvec_page(bvec)));
if (unlikely(bio.bi_status))
ret = blk_status_to_errno(bio.bi_status);
@@ -160,11 +160,7 @@ static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
int i;
for (i = 0; i < num_bvecs; i++) {
- if (bvec_page(&bvecs[i])) {
- if (should_dirty)
- set_page_dirty_lock(bvec_page(&bvecs[i]));
- bvec_put_page(&bvecs[i]);
- }
+ bvec_put_page_dirty_lock(&bvecs[i], should_dirty);
}
kvfree(bvecs);
}
@@ -800,11 +800,9 @@ cifs_aio_ctx_release(struct kref *refcount)
if (ctx->bv) {
unsigned i;
- for (i = 0; i < ctx->npages; i++) {
- if (ctx->should_dirty)
- set_page_dirty(bvec_page(&ctx->bv[i]));
- bvec_put_page(&ctx->bv[i]);
- }
+ for (i = 0; i < ctx->npages; i++)
+ bvec_put_page_dirty_lock(&ctx->bv[i],
+ ctx->should_dirty);
kvfree(ctx->bv);
}