@@ -229,7 +229,8 @@ static void bio_free(struct bio *bio)
WARN_ON_ONCE(!bs);
bio_uninit(bio);
- bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
+ if (!bio_flagged(bio, BIO_DMA_TAGGED))
+ bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
mempool_free(p - bs->front_pad, &bs->bio_pool);
}
@@ -762,6 +763,8 @@ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
bio_set_flag(bio, BIO_CLONED);
if (bio_flagged(bio_src, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED);
+ if (bio_flagged(bio_src, BIO_DMA_TAGGED))
+ bio_set_flag(bio, BIO_DMA_TAGGED);
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_iter = bio_src->bi_iter;
@@ -1151,6 +1154,21 @@ void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
bio_set_flag(bio, BIO_CLONED);
}
+static void bio_iov_dma_tag_set(struct bio *bio, struct iov_iter *iter)
+{
+ size_t size = iov_iter_count(iter);
+
+ bio->bi_vcnt = iter->nr_segs;
+ bio->bi_dma_tag = iter->dma_tag;
+ bio->bi_iter.bi_bvec_done = iter->iov_offset;
+ bio->bi_iter.bi_size = size;
+ bio->bi_opf |= REQ_NOMERGE;
+ bio_set_flag(bio, BIO_NO_PAGE_REF);
+ bio_set_flag(bio, BIO_DMA_TAGGED);
+
+ iov_iter_advance(iter, bio->bi_iter.bi_size);
+}
+
static int bio_iov_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
@@ -1287,6 +1305,11 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
return 0;
}
+ if (iov_iter_is_dma_tag(iter)) {
+ bio_iov_dma_tag_set(bio, iter);
+ return 0;
+ }
+
do {
ret = __bio_iov_iter_get_pages(bio, iter);
} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
@@ -276,6 +276,24 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
const unsigned max_bytes = get_max_io_size(q, bio) << 9;
const unsigned max_segs = queue_max_segments(q);
+ if (bio_flagged(bio, BIO_DMA_TAGGED)) {
+ int offset = offset_in_page(bio->bi_iter.bi_bvec_done);
+
+ nsegs = ALIGN(bio->bi_iter.bi_size + offset, PAGE_SIZE) >> PAGE_SHIFT;
+ if (bio->bi_iter.bi_size > max_bytes) {
+ bytes = max_bytes;
+ nsegs = (bytes + offset) >> PAGE_SHIFT;
+ } else if (nsegs > max_segs) {
+ nsegs = max_segs;
+ bytes = PAGE_SIZE * nsegs - offset;
+ } else {
+ *segs = nsegs;
+ return NULL;
+ }
+
+ goto split;
+ }
+
bio_for_each_bvec(bv, bio, iter) {
/*
* If the queue doesn't support SG gaps and adding this
@@ -61,11 +61,17 @@ static inline bool bio_has_data(struct bio *bio)
return false;
}
+static inline bool bio_flagged(const struct bio *bio, unsigned int bit)
+{
+ return (bio->bi_flags & (1U << bit)) != 0;
+}
+
static inline bool bio_no_advance_iter(const struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
- bio_op(bio) == REQ_OP_WRITE_ZEROES;
+ bio_op(bio) == REQ_OP_WRITE_ZEROES ||
+ bio_flagged(bio, BIO_DMA_TAGGED);
}
static inline void *bio_data(struct bio *bio)
@@ -98,9 +104,11 @@ static inline void bio_advance_iter(const struct bio *bio,
{
iter->bi_sector += bytes >> 9;
- if (bio_no_advance_iter(bio))
+ if (bio_no_advance_iter(bio)) {
iter->bi_size -= bytes;
- else
+ if (bio_flagged(bio, BIO_DMA_TAGGED))
+ iter->bi_bvec_done += bytes;
+ } else
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
/* TODO: It is reasonable to complete bio with error here. */
}
@@ -225,11 +233,6 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
atomic_set(&bio->__bi_cnt, count);
}
-static inline bool bio_flagged(struct bio *bio, unsigned int bit)
-{
- return (bio->bi_flags & (1U << bit)) != 0;
-}
-
static inline void bio_set_flag(struct bio *bio, unsigned int bit)
{
bio->bi_flags |= (1U << bit);
@@ -447,7 +450,7 @@ static inline void bio_wouldblock_error(struct bio *bio)
*/
static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
{
- if (iov_iter_is_bvec(iter))
+ if (iov_iter_is_bvec(iter) || iov_iter_is_dma_tag(iter))
return 0;
return iov_iter_npages(iter, max_segs);
}
@@ -1141,6 +1141,18 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
}
void blk_dump_rq_flags(struct request *, char *);
+static inline void *blk_rq_dma_tag(struct request *rq)
+{
+ return rq->bio && bio_flagged(rq->bio, BIO_DMA_TAGGED) ?
+ rq->bio->bi_dma_tag : 0;
+}
+
+static inline size_t blk_rq_dma_offset(struct request *rq)
+{
+ return rq->bio && bio_flagged(rq->bio, BIO_DMA_TAGGED) ?
+ rq->bio->bi_iter.bi_bvec_done : 0;
+}
+
#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_rq_zone_no(struct request *rq)
{
@@ -299,7 +299,10 @@ struct bio {
atomic_t __bi_cnt; /* pin count */
- struct bio_vec *bi_io_vec; /* the actual vec list */
+ union {
+ struct bio_vec *bi_io_vec; /* the actual vec list */
+ void *bi_dma_tag; /* driver specific tag */
+ };
struct bio_set *bi_pool;
@@ -334,6 +337,7 @@ enum {
BIO_QOS_MERGED, /* but went through rq_qos merge path */
BIO_REMAPPED,
BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
+ BIO_DMA_TAGGED, /* Using premmaped dma buffers */
BIO_FLAG_LAST
};