===================================================================
@@ -453,6 +453,7 @@ static void free_io(struct mapped_device
static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
{
+ bio_clear_context(&tio->clone);
bio_put(&tio->clone);
}
@@ -521,6 +522,8 @@ static void queue_io(struct mapped_devic
{
unsigned long flags;
+ bio_associate_current(bio);
+
spin_lock_irqsave(&md->deferred_lock, flags);
bio_list_add(&md->deferred, bio);
spin_unlock_irqrestore(&md->deferred_lock, flags);
@@ -1124,6 +1127,8 @@ static struct dm_target_io *alloc_tio(st
clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs);
tio = container_of(clone, struct dm_target_io, clone);
+ bio_clone_context(ci->bio, &tio->clone);
+
tio->io = ci->io;
tio->ti = ti;
memset(&tio->info, 0, sizeof(tio->info));
===================================================================
@@ -291,6 +291,15 @@ extern void bvec_free_bs(struct bio_set
extern unsigned int bvec_nr_vecs(unsigned short idx);
#ifdef CONFIG_BLK_CGROUP
+/*
+ * bio_associate_current associates the bio with the current process. It must be
+ * called by any block device driver that passes the bio to a different process
+ * to be processed. It must be called in the original process.
+ * bio_associate_current does nothing if the bio is already associated.
+ *
+ * bio_dissociate_task dissociates the bio from the task. It is called
+ * automatically at bio destruction.
+ */
int bio_associate_current(struct bio *bio);
void bio_disassociate_task(struct bio *bio);
#else /* CONFIG_BLK_CGROUP */
@@ -299,6 +308,36 @@ static inline void bio_disassociate_task
#endif /* CONFIG_BLK_CGROUP */
/*
+ * bio_clone_context copies cgroup context from the original bio to the new bio.
+ * It is used by bio midlayer drivers that create new bio based on an original
+ * bio and forward it to the lower layer.
+ *
+ * No reference counts are incremented - it is assumed that the lifestime of the
+ * new bio is shorter than the lifetime of the original bio. If the new bio can
+ * outlive the old bio, the caller must increment the reference counts.
+ *
+ * Before freeing the new bio, the caller must clear the context with
+ * bio_clear_context function. If bio_clear_context were not called, the
+ * reference counts would be decremented on both new and original bio, resulting
+ * in crash due to reference count underflow.
+ */
+static inline void bio_clone_context(struct bio *orig, struct bio *new)
+{
+#ifdef CONFIG_BLK_CGROUP
+ new->bi_ioc = orig->bi_ioc;
+ new->bi_css = orig->bi_css;
+#endif
+}
+
+static inline void bio_clear_context(struct bio *bio)
+{
+#ifdef CONFIG_BLK_CGROUP
+ bio->bi_ioc = NULL;
+ bio->bi_css = NULL;
+#endif
+}
+
+/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.
* These memory pools in turn all allocate from the bio_slab
===================================================================
@@ -181,6 +181,7 @@ struct crypt_config {
static struct kmem_cache *_crypt_io_pool;
static void clone_init(struct dm_crypt_io *, struct bio *);
+static void clone_free(struct bio *);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
@@ -846,7 +847,7 @@ static struct bio *crypt_alloc_buffer(st
}
if (!clone->bi_size) {
- bio_put(clone);
+ clone_free(clone);
return NULL;
}
@@ -945,7 +946,7 @@ static void crypt_endio(struct bio *clon
if (rw == WRITE)
crypt_free_buffer_pages(cc, clone);
- bio_put(clone);
+ clone_free(clone);
if (rw == READ && !error) {
kcryptd_queue_crypt(io);
@@ -966,6 +967,14 @@ static void clone_init(struct dm_crypt_i
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
clone->bi_rw = io->base_bio->bi_rw;
+
+ bio_clone_context(io->base_bio, clone);
+}
+
+static void clone_free(struct bio *clone)
+{
+ bio_clear_context(clone);
+ bio_put(clone);
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1026,7 +1035,7 @@ static void kcryptd_crypt_write_io_submi
if (unlikely(io->error < 0)) {
crypt_free_buffer_pages(cc, clone);
- bio_put(clone);
+ clone_free(clone);
crypt_dec_pending(io);
return;
}
@@ -1692,6 +1701,8 @@ static int crypt_map(struct dm_target *t
return DM_MAPIO_REMAPPED;
}
+ bio_associate_current(bio);
+
io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
if (bio_data_dir(io->base_bio) == READ) {
===================================================================
@@ -1703,6 +1703,7 @@ int bio_associate_current(struct bio *bi
return 0;
}
+EXPORT_SYMBOL(bio_associate_current);
/**
* bio_disassociate_task - undo bio_associate_current()
@@ -1719,6 +1720,7 @@ void bio_disassociate_task(struct bio *b
bio->bi_css = NULL;
}
}
+EXPORT_SYMBOL(bio_disassociate_task);
#endif /* CONFIG_BLK_CGROUP */