@@ -42,6 +42,7 @@ struct convert_context {
struct bvec_iter iter_out;
sector_t cc_sector;
atomic_t cc_pending;
+ unsigned int block_size;
struct ablkcipher_request *req;
};
@@ -142,6 +143,8 @@ struct crypt_config {
sector_t iv_offset;
unsigned int iv_size;
+ unsigned int block_size;
+
/* ESSIV: struct crypto_cipher *essiv_tfm */
void *iv_private;
struct crypto_ablkcipher **tfms;
@@ -801,10 +804,17 @@ static void crypt_convert_init(struct crypt_config *cc,
{
ctx->bio_in = bio_in;
ctx->bio_out = bio_out;
- if (bio_in)
+ ctx->block_size = 0;
+ if (bio_in) {
ctx->iter_in = bio_in->bi_iter;
- if (bio_out)
+ ctx->block_size = max(ctx->block_size, bio_cur_bytes(bio_in));
+ }
+ if (bio_out) {
ctx->iter_out = bio_out->bi_iter;
+ ctx->block_size = max(ctx->block_size, bio_cur_bytes(bio_out));
+ }
+ if (ctx->block_size > cc->block_size)
+ ctx->block_size = cc->block_size;
ctx->cc_sector = sector + cc->iv_offset;
init_completion(&ctx->restart);
}
@@ -844,15 +854,15 @@ static int crypt_convert_block(struct crypt_config *cc,
dmreq->iv_sector = ctx->cc_sector;
dmreq->ctx = ctx;
sg_init_table(&dmreq->sg_in, 1);
- sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
+ sg_set_page(&dmreq->sg_in, bv_in.bv_page, ctx->block_size,
bv_in.bv_offset);
sg_init_table(&dmreq->sg_out, 1);
- sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
+ sg_set_page(&dmreq->sg_out, bv_out.bv_page, ctx->block_size,
bv_out.bv_offset);
- bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
- bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
+ bio_advance_iter(ctx->bio_in, &ctx->iter_in, ctx->block_size);
+ bio_advance_iter(ctx->bio_out, &ctx->iter_out, ctx->block_size);
if (cc->iv_gen_ops) {
r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -861,7 +871,7 @@ static int crypt_convert_block(struct crypt_config *cc,
}
ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
- 1 << SECTOR_SHIFT, iv);
+ ctx->block_size, iv);
if (bio_data_dir(ctx->bio_in) == WRITE)
r = crypto_ablkcipher_encrypt(req);
@@ -926,13 +936,13 @@ static int crypt_convert(struct crypt_config *cc,
/* fall through*/
case -EINPROGRESS:
ctx->req = NULL;
- ctx->cc_sector++;
+ ctx->cc_sector += ctx->block_size / (1<<SECTOR_SHIFT);
continue;
/* sync */
case 0:
atomic_dec(&ctx->cc_pending);
- ctx->cc_sector++;
+ ctx->cc_sector += ctx->block_size / (1<<SECTOR_SHIFT);
cond_resched();
continue;
@@ -1814,6 +1824,9 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
+ /* PAGE_SIZE? */
+ cc->block_size = 4096;
+
ti->num_flush_bios = 1;
ti->discard_zeroes_data_unsupported = true;
@@ -1974,9 +1987,16 @@ static int crypt_iterate_devices(struct dm_target *ti,
return fn(ti, cc->dev, cc->start, ti->len, data);
}
+static void crypt_io_hints(struct dm_target *ti,
+ struct queue_limits *limits)
+{
+ /* PAGE_SIZE? */
+ blk_limits_io_min(limits, 4096);
+}
+
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 13, 0},
+ .version = {1, 14, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
@@ -1988,6 +2008,7 @@ static struct target_type crypt_target = {
.message = crypt_message,
.merge = crypt_merge,
.iterate_devices = crypt_iterate_devices,
+ .io_hints = crypt_io_hints,
};
static int __init dm_crypt_init(void)