@@ -1292,7 +1292,8 @@ static void dmio_complete(unsigned long error, void *context)
}
static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
- unsigned int n_sectors, unsigned int offset)
+ unsigned int n_sectors, unsigned int offset,
+ unsigned short ioprio)
{
int r;
struct dm_io_request io_req = {
@@ -1315,7 +1316,7 @@ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
io_req.mem.ptr.vma = (char *)b->data + offset;
}
- r = dm_io(&io_req, 1, ®ion, NULL, IOPRIO_DEFAULT);
+ r = dm_io(&io_req, 1, ®ion, NULL, ioprio);
if (unlikely(r))
b->end_io(b, errno_to_blk_status(r));
}
@@ -1331,7 +1332,8 @@ static void bio_complete(struct bio *bio)
}
static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
- unsigned int n_sectors, unsigned int offset)
+ unsigned int n_sectors, unsigned int offset,
+ unsigned short ioprio)
{
struct bio *bio;
char *ptr;
@@ -1339,13 +1341,14 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) {
- use_dmio(b, op, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset, ioprio);
return;
}
bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_complete;
bio->bi_private = b;
+ bio->bi_ioprio = ioprio;
ptr = (char *)b->data + offset;
len = n_sectors << SECTOR_SHIFT;
@@ -1368,7 +1371,7 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
return sector;
}
-static void submit_io(struct dm_buffer *b, enum req_op op,
+static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio,
void (*end_io)(struct dm_buffer *, blk_status_t))
{
unsigned int n_sectors;
@@ -1398,9 +1401,9 @@ static void submit_io(struct dm_buffer *b, enum req_op op,
}
if (b->data_mode != DATA_MODE_VMALLOC)
- use_bio(b, op, sector, n_sectors, offset);
+ use_bio(b, op, sector, n_sectors, offset, ioprio);
else
- use_dmio(b, op, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset, ioprio);
}
/*
@@ -1456,7 +1459,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
b->write_end = b->dirty_end;
if (!write_list)
- submit_io(b, REQ_OP_WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
else
list_add_tail(&b->write_list, write_list);
}
@@ -1470,7 +1473,7 @@ static void __flush_write_list(struct list_head *write_list)
struct dm_buffer *b =
list_entry(write_list->next, struct dm_buffer, write_list);
list_del(&b->write_list);
- submit_io(b, REQ_OP_WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
cond_resched();
}
blk_finish_plug(&plug);
@@ -1852,7 +1855,8 @@ static void read_endio(struct dm_buffer *b, blk_status_t status)
* and uses dm_bufio_mark_buffer_dirty to write new data back).
*/
static void *new_read(struct dm_bufio_client *c, sector_t block,
- enum new_flag nf, struct dm_buffer **bp)
+ enum new_flag nf, struct dm_buffer **bp,
+ unsigned short ioprio)
{
int need_submit = 0;
struct dm_buffer *b;
@@ -1905,7 +1909,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
return NULL;
if (need_submit)
- submit_io(b, REQ_OP_READ, read_endio);
+ submit_io(b, REQ_OP_READ, ioprio, read_endio);
if (nf != NF_GET) /* we already tested this condition above */
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
@@ -1926,17 +1930,17 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
struct dm_buffer **bp)
{
- return new_read(c, block, NF_GET, bp);
+ return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_get);
void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
- struct dm_buffer **bp)
+ struct dm_buffer **bp, unsigned short ioprio)
{
if (WARN_ON_ONCE(dm_bufio_in_request()))
return ERR_PTR(-EINVAL);
- return new_read(c, block, NF_READ, bp);
+ return new_read(c, block, NF_READ, bp, ioprio);
}
EXPORT_SYMBOL_GPL(dm_bufio_read);
@@ -1946,12 +1950,13 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
if (WARN_ON_ONCE(dm_bufio_in_request()))
return ERR_PTR(-EINVAL);
- return new_read(c, block, NF_FRESH, bp);
+ return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_new);
void dm_bufio_prefetch(struct dm_bufio_client *c,
- sector_t block, unsigned int n_blocks)
+ sector_t block, unsigned int n_blocks,
+ unsigned short ioprio)
{
struct blk_plug plug;
@@ -1987,7 +1992,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
dm_bufio_unlock(c);
if (need_submit)
- submit_io(b, REQ_OP_READ, read_endio);
+ submit_io(b, REQ_OP_READ, ioprio, read_endio);
dm_bufio_release(b);
cond_resched();
@@ -84,7 +84,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv,
/* Avoid reading for writes in case bio vector's page overwrites block completely. */
if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
- ba = dm_bufio_read(ec->bufio, block, &b);
+ ba = dm_bufio_read(ec->bufio, block, &b, IOPRIO_DEFAULT);
else
ba = dm_bufio_new(ec->bufio, block, &b);
@@ -194,13 +194,13 @@ static void __ebs_process_bios(struct work_struct *ws)
bio_list_for_each(bio, &bios) {
block1 = __sector_to_block(ec, bio->bi_iter.bi_sector);
if (bio_op(bio) == REQ_OP_READ)
- dm_bufio_prefetch(ec->bufio, block1, __nr_blocks(ec, bio));
+ dm_bufio_prefetch(ec->bufio, block1, __nr_blocks(ec, bio), IOPRIO_DEFAULT);
else if (bio_op(bio) == REQ_OP_WRITE && !(bio->bi_opf & REQ_PREFLUSH)) {
block2 = __sector_to_block(ec, bio_end_sector(bio));
if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs))
- dm_bufio_prefetch(ec->bufio, block1, 1);
+ dm_bufio_prefetch(ec->bufio, block1, 1, IOPRIO_DEFAULT);
if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1)
- dm_bufio_prefetch(ec->bufio, block2, 1);
+ dm_bufio_prefetch(ec->bufio, block2, 1, IOPRIO_DEFAULT);
}
}
@@ -1418,7 +1418,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
if (unlikely(r))
return r;
- data = dm_bufio_read(ic->bufio, *metadata_block, &b);
+ data = dm_bufio_read(ic->bufio, *metadata_block, &b, IOPRIO_DEFAULT);
if (IS_ERR(data))
return PTR_ERR(data);
@@ -524,7 +524,7 @@ static int read_exceptions(struct pstore *ps,
if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
break;
- dm_bufio_prefetch(client, pf_chunk, 1);
+ dm_bufio_prefetch(client, pf_chunk, 1, IOPRIO_DEFAULT);
prefetch_area++;
if (unlikely(!prefetch_area))
break;
@@ -533,7 +533,7 @@ static int read_exceptions(struct pstore *ps,
chunk = area_location(ps, ps->current_area);
- area = dm_bufio_read(client, chunk, &bp);
+ area = dm_bufio_read(client, chunk, &bp, IOPRIO_DEFAULT);
if (IS_ERR(area)) {
r = PTR_ERR(area);
goto ret_destroy_bufio;
@@ -69,7 +69,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
block = div64_u64_rem(position, v->fec->io_size, &rem);
*offset = (unsigned int)rem;
- res = dm_bufio_read(v->fec->bufio, block, buf);
+ res = dm_bufio_read(v->fec->bufio, block, buf, IOPRIO_DEFAULT);
if (IS_ERR(res)) {
DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
v->data_dev->name, (unsigned long long)rsb,
@@ -248,7 +248,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
bufio = v->bufio;
}
- bbuf = dm_bufio_read(bufio, block, &buf);
+ bbuf = dm_bufio_read(bufio, block, &buf, IOPRIO_DEFAULT);
if (IS_ERR(bbuf)) {
DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
v->data_dev->name,
@@ -308,7 +308,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
return -EAGAIN;
}
} else
- data = dm_bufio_read(v->bufio, hash_block, &buf);
+ data = dm_bufio_read(v->bufio, hash_block, &buf, IOPRIO_DEFAULT);
if (IS_ERR(data))
return PTR_ERR(data);
@@ -719,7 +719,8 @@ static void verity_prefetch_io(struct work_struct *work)
}
no_prefetch_cluster:
dm_bufio_prefetch(v->bufio, hash_block_start,
- hash_block_end - hash_block_start + 1);
+ hash_block_end - hash_block_start + 1,
+ IOPRIO_DEFAULT);
}
kfree(pw);
@@ -474,7 +474,7 @@ int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
void *p;
int r;
- p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
+ p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result, IOPRIO_DEFAULT);
if (IS_ERR(p))
return PTR_ERR(p);
@@ -510,7 +510,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
if (dm_bm_is_read_only(bm))
return -EPERM;
- p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
+ p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result, IOPRIO_DEFAULT);
if (IS_ERR(p))
return PTR_ERR(p);
@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(dm_bm_flush);
void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
{
- dm_bufio_prefetch(bm->bufio, b, 1);
+ dm_bufio_prefetch(bm->bufio, b, 1, IOPRIO_DEFAULT);
}
bool dm_bm_is_read_only(struct dm_block_manager *bm)
@@ -62,7 +62,7 @@ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
* it dirty.
*/
void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
- struct dm_buffer **bp);
+ struct dm_buffer **bp, unsigned short ioprio);
/*
* Like dm_bufio_read, but return buffer from cache, don't read
@@ -84,7 +84,8 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
* I/O to finish.
*/
void dm_bufio_prefetch(struct dm_bufio_client *c,
- sector_t block, unsigned int n_blocks);
+ sector_t block, unsigned int n_blocks,
+ unsigned short ioprio);
/*
* Release a reference obtained with dm_bufio_{read,get,new}. The data