@@ -732,7 +732,7 @@ static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
*/
static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
- bool *same_page)
+ bool *same_page, unsigned int max_sectors)
{
struct bio_vec *bvec;
@@ -742,7 +742,7 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
if (unlikely(bio_flagged(bio, BIO_CLONED)))
return 0;
- if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
+ if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
return 0;
if (bio->bi_vcnt > 0) {
@@ -777,10 +777,20 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset)
{
bool same_page = false;
- return __bio_add_pc_page(q, bio, page, len, offset, &same_page);
+ return __bio_add_pc_page(q, bio, page, len, offset, &same_page,
+ queue_max_hw_sectors(q));
}
EXPORT_SYMBOL(bio_add_pc_page);
+int bio_add_append_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset)
+{
+ bool same_page = false;
+ return __bio_add_pc_page(q, bio, page, len, offset, &same_page,
+ queue_max_zone_append_sectors(q));
+}
+EXPORT_SYMBOL(bio_add_append_page);
+
/**
* __bio_try_merge_page - try appending data to an existing bvec.
* @bio: destination bio
@@ -945,8 +955,15 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
len = min_t(size_t, PAGE_SIZE - offset, left);
- if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
- if (same_page)
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ size = bio_add_append_page(bio->bi_disk->queue, bio,
+ page, len, offset);
+
+ if (size != len)
+ return -E2BIG;
+ } else if (__bio_try_merge_page(bio, page, len, offset,
+ &same_page)) {
+ if (same_page)
put_page(page);
} else {
if (WARN_ON_ONCE(bio_full(bio, len)))
@@ -1389,11 +1406,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
*/
struct bio *bio_map_user_iov(struct request_queue *q,
struct iov_iter *iter,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, unsigned int op)
{
int j;
struct bio *bio;
int ret;
+ unsigned int max_sectors;
if (!iov_iter_count(iter))
return ERR_PTR(-EINVAL);
@@ -1402,6 +1420,11 @@ struct bio *bio_map_user_iov(struct request_queue *q,
if (!bio)
return ERR_PTR(-ENOMEM);
+ if (op == REQ_OP_ZONE_APPEND)
+ max_sectors = queue_max_zone_append_sectors(q);
+ else
+ max_sectors = queue_max_hw_sectors(q);
+
while (iov_iter_count(iter)) {
struct page **pages;
ssize_t bytes;
@@ -1429,7 +1452,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
n = bytes;
if (!__bio_add_pc_page(q, bio, page, n, offs,
- &same_page)) {
+ &same_page, max_sectors)) {
if (same_page)
put_page(page);
break;
@@ -72,7 +72,7 @@ static int __blk_rq_map_user_iov(struct request *rq,
if (copy)
bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
else
- bio = bio_map_user_iov(q, iter, gfp_mask);
+ bio = bio_map_user_iov(q, iter, gfp_mask, req_op(rq));
if (IS_ERR(bio))
return PTR_ERR(bio);
@@ -444,7 +444,7 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
void bio_release_pages(struct bio *bio, bool mark_dirty);
struct rq_map_data;
extern struct bio *bio_map_user_iov(struct request_queue *,
- struct iov_iter *, gfp_t);
+ struct iov_iter *, gfp_t, unsigned int);
extern void bio_unmap_user(struct bio *);
extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
gfp_t);
For REQ_OP_ZONE_APPEND we cannot add unlimited amounts of pages to a bio, as the bio cannot be split later on. This is similar to what we have to do for passthrough pages as well, just with a different limit. Introduce bio_add_append_page() which can used by file-systems add pages for a REQ_OP_ZONE_APPEND bio. Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> --- block/bio.c | 37 ++++++++++++++++++++++++++++++------- block/blk-map.c | 2 +- include/linux/bio.h | 2 +- 3 files changed, 32 insertions(+), 9 deletions(-)