@@ -26,6 +26,20 @@ struct blkdev_copy_offload_io {
loff_t offset;
};
+/* Keeps track of single outstanding copy emulation IO */
+struct blkdev_copy_emulation_io {
+ struct blkdev_copy_io *cio;
+ struct work_struct emulation_work;
+ void *buf;
+ ssize_t buf_len;
+ loff_t pos_in;
+ loff_t pos_out;
+ ssize_t len;
+ struct block_device *bdev_in;
+ struct block_device *bdev_out;
+ gfp_t gfp;
+};
+
static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
{
unsigned int discard_granularity = bdev_discard_granularity(bdev);
@@ -319,6 +333,215 @@ ssize_t blkdev_copy_offload(struct block_device *bdev, loff_t pos_in,
}
EXPORT_SYMBOL_GPL(blkdev_copy_offload);
+static void *blkdev_copy_alloc_buf(ssize_t req_size, ssize_t *alloc_size,
+ gfp_t gfp)
+{
+ int min_size = PAGE_SIZE;
+ char *buf;
+
+ while (req_size >= min_size) {
+ buf = kvmalloc(req_size, gfp);
+ if (buf) {
+ *alloc_size = req_size;
+ return buf;
+ }
+ req_size >>= 1;
+ }
+
+ return NULL;
+}
+
+static struct bio *bio_map_buf(void *data, unsigned int len, gfp_t gfp)
+{
+ unsigned long kaddr = (unsigned long)data;
+ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = kaddr >> PAGE_SHIFT;
+ const int nr_pages = end - start;
+ bool is_vmalloc = is_vmalloc_addr(data);
+ struct page *page;
+ int offset, i;
+ struct bio *bio;
+
+ bio = bio_kmalloc(nr_pages, gfp);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+ bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
+
+ if (is_vmalloc) {
+ flush_kernel_vmap_range(data, len);
+ bio->bi_private = data;
+ }
+
+ offset = offset_in_page(kaddr);
+ for (i = 0; i < nr_pages; i++) {
+ unsigned int bytes = PAGE_SIZE - offset;
+
+ if (len <= 0)
+ break;
+
+ if (bytes > len)
+ bytes = len;
+
+ if (!is_vmalloc)
+ page = virt_to_page(data);
+ else
+ page = vmalloc_to_page(data);
+ if (bio_add_page(bio, page, bytes, offset) < bytes) {
+ /* we don't support partial mappings */
+ bio_uninit(bio);
+ kfree(bio);
+ return ERR_PTR(-EINVAL);
+ }
+
+ data += bytes;
+ len -= bytes;
+ offset = 0;
+ }
+
+ return bio;
+}
+
+static void blkdev_copy_emulation_work(struct work_struct *work)
+{
+ struct blkdev_copy_emulation_io *emulation_io = container_of(work,
+ struct blkdev_copy_emulation_io, emulation_work);
+ struct blkdev_copy_io *cio = emulation_io->cio;
+ struct bio *read_bio, *write_bio;
+ loff_t pos_in = emulation_io->pos_in, pos_out = emulation_io->pos_out;
+ ssize_t rem, chunk;
+ int ret = 0;
+
+ for (rem = emulation_io->len; rem > 0; rem -= chunk) {
+ chunk = min_t(int, emulation_io->buf_len, rem);
+
+ read_bio = bio_map_buf(emulation_io->buf,
+ emulation_io->buf_len,
+ emulation_io->gfp);
+ if (IS_ERR(read_bio)) {
+ ret = PTR_ERR(read_bio);
+ break;
+ }
+ read_bio->bi_opf = REQ_OP_READ | REQ_SYNC;
+ bio_set_dev(read_bio, emulation_io->bdev_in);
+ read_bio->bi_iter.bi_sector = pos_in >> SECTOR_SHIFT;
+ read_bio->bi_iter.bi_size = chunk;
+ ret = submit_bio_wait(read_bio);
+ kfree(read_bio);
+ if (ret)
+ break;
+
+ write_bio = bio_map_buf(emulation_io->buf,
+ emulation_io->buf_len,
+ emulation_io->gfp);
+ if (IS_ERR(write_bio)) {
+ ret = PTR_ERR(write_bio);
+ break;
+ }
+ write_bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
+ bio_set_dev(write_bio, emulation_io->bdev_out);
+ write_bio->bi_iter.bi_sector = pos_out >> SECTOR_SHIFT;
+ write_bio->bi_iter.bi_size = chunk;
+ ret = submit_bio_wait(write_bio);
+ kfree(write_bio);
+ if (ret)
+ break;
+
+ pos_in += chunk;
+ pos_out += chunk;
+ }
+ cio->status = ret;
+ kvfree(emulation_io->buf);
+ kfree(emulation_io);
+ blkdev_copy_endio(cio);
+}
+
+static inline ssize_t queue_max_hw_bytes(struct request_queue *q)
+{
+ return min_t(ssize_t, queue_max_hw_sectors(q) << SECTOR_SHIFT,
+ queue_max_segments(q) << PAGE_SHIFT);
+}
+/*
+ * @bdev_in: source block device
+ * @pos_in: source offset
+ * @bdev_out: destination block device
+ * @pos_out: destination offset
+ * @len: length in bytes to be copied
+ * @endio: endio function to be called on completion of copy operation,
+ * for synchronous operation this should be NULL
+ * @private: endio function will be called with this private data,
+ * for synchronous operation this should be NULL
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * For synchronous operation returns the length of bytes copied or error
+ * For asynchronous operation returns -EIOCBQUEUED or error
+ *
+ * Description:
+ * If native copy offload feature is absent, caller can use this function
+ * to perform copy.
+ * We store information required to perform the copy along with temporary
+ * buffer allocation. We async punt copy emulation to a worker. And worker
+ * performs copy in 2 steps.
+ * 1. Read data from source to temporary buffer
+ * 2. Write data to destination from temporary buffer
+ */
+ssize_t blkdev_copy_emulation(struct block_device *bdev_in, loff_t pos_in,
+ struct block_device *bdev_out, loff_t pos_out,
+ size_t len, void (*endio)(void *, int, ssize_t),
+ void *private, gfp_t gfp)
+{
+ struct request_queue *in = bdev_get_queue(bdev_in);
+ struct request_queue *out = bdev_get_queue(bdev_out);
+ struct blkdev_copy_emulation_io *emulation_io;
+ struct blkdev_copy_io *cio;
+ ssize_t ret;
+ size_t max_hw_bytes = min(queue_max_hw_bytes(in),
+ queue_max_hw_bytes(out));
+
+ ret = blkdev_copy_sanity_check(bdev_in, pos_in, bdev_out, pos_out, len);
+ if (ret)
+ return ret;
+
+ cio = kzalloc(sizeof(*cio), GFP_KERNEL);
+ if (!cio)
+ return -ENOMEM;
+
+ cio->waiter = current;
+ cio->copied = len;
+ cio->endio = endio;
+ cio->private = private;
+
+ emulation_io = kzalloc(sizeof(*emulation_io), gfp);
+ if (!emulation_io)
+ goto err_free_cio;
+ emulation_io->cio = cio;
+ INIT_WORK(&emulation_io->emulation_work, blkdev_copy_emulation_work);
+ emulation_io->pos_in = pos_in;
+ emulation_io->pos_out = pos_out;
+ emulation_io->len = len;
+ emulation_io->bdev_in = bdev_in;
+ emulation_io->bdev_out = bdev_out;
+ emulation_io->gfp = gfp;
+
+ emulation_io->buf = blkdev_copy_alloc_buf(min(max_hw_bytes, len),
+ &emulation_io->buf_len, gfp);
+ if (!emulation_io->buf)
+ goto err_free_emulation_io;
+
+ schedule_work(&emulation_io->emulation_work);
+
+ if (cio->endio)
+ return -EIOCBQUEUED;
+
+ return blkdev_copy_wait_for_completion_io(cio);
+
+err_free_emulation_io:
+ kfree(emulation_io);
+err_free_cio:
+ kfree(cio);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(blkdev_copy_emulation);
+
static int __blkdev_issue_write_zeroes(struct block_device *bdev,
sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
struct bio **biop, unsigned flags)
@@ -1046,6 +1046,10 @@ ssize_t blkdev_copy_offload(struct block_device *bdev, loff_t pos_in,
loff_t pos_out, size_t len,
void (*endio)(void *, int, ssize_t),
void *private, gfp_t gfp_mask);
+ssize_t blkdev_copy_emulation(struct block_device *bdev_in, loff_t pos_in,
+ struct block_device *bdev_out, loff_t pos_out,
+ size_t len, void (*endio)(void *, int, ssize_t),
+ void *private, gfp_t gfp);
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */