@@ -545,7 +545,8 @@ static void dmio_complete(unsigned long error, void *context)
{
struct dm_buffer *b = context;
- b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
+ b->bio.bi_error = error ? -EIO : 0;
+ b->bio.bi_end_io(&b->bio);
}
static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
@@ -954,25 +954,22 @@ static void disable_write_same(struct mapped_device *md)
limits->max_write_same_sectors = 0;
}
-static void clone_endio(struct bio *bio, int error)
+static void clone_endio(struct bio *bio)
{
- int r = error;
+ int r = bio->bi_error;
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
- if (!bio_flagged(bio, BIO_UPTODATE) && !error)
- error = -EIO;
-
if (endio) {
- r = endio(tio->ti, bio, error);
+ r = endio(tio->ti, bio, bio->bi_error);
if (r < 0 || r == DM_ENDIO_REQUEUE)
/*
* error and requeue request are handled
* in dec_pending().
*/
- error = r;
+ ;
else if (r == DM_ENDIO_INCOMPLETE)
/* The target will handle the io */
return;
@@ -987,7 +984,7 @@ static void clone_endio(struct bio *bio, int error)
disable_write_same(md);
free_tio(md, tio);
- dec_pending(io, error);
+ dec_pending(io, r);
}
static struct dm_rq_target_io *tio_from_request(struct request *rq)