diff mbox series

[v9,9/9] null_blk: add support for copy offload

Message ID 20230411081041.5328-10-anuj20.g@samsung.com (mailing list archive)
State New, archived
Headers show
Series [v9,1/9] block: Introduce queue limits for copy-offload support | expand

Commit Message

Anuj Gupta April 11, 2023, 8:10 a.m. UTC
From: Nitesh Shetty <nj.shetty@samsung.com>

Implementaion is based on existing read and write infrastructure.
copy_max_bytes: A new configfs and module parameter is introduced, which
can be used to set hardware/driver supported maximum copy limit.

Suggested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
---
 drivers/block/null_blk/main.c     | 101 ++++++++++++++++++++++++++++++
 drivers/block/null_blk/null_blk.h |   8 +++
 2 files changed, 109 insertions(+)

Comments

Chaitanya Kulkarni April 13, 2023, 6:28 a.m. UTC | #1
On 4/11/23 01:10, Anuj Gupta wrote:
> From: Nitesh Shetty <nj.shetty@samsung.com>
>
> Implementaion is based on existing read and write infrastructure.
> copy_max_bytes: A new configfs and module parameter is introduced, which
> can be used to set hardware/driver supported maximum copy limit.
>
> Suggested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
> Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
> Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
> Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
> ---
>   drivers/block/null_blk/main.c     | 101 ++++++++++++++++++++++++++++++
>   drivers/block/null_blk/null_blk.h |   8 +++
>   2 files changed, 109 insertions(+)
>
> diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
> index bc2c58724df3..e273e18ace74 100644
> --- a/drivers/block/null_blk/main.c
> +++ b/drivers/block/null_blk/main.c
> @@ -157,6 +157,10 @@ static int g_max_sectors;
>   module_param_named(max_sectors, g_max_sectors, int, 0444);
>   MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
>   
> +static int g_copy_max_bytes = COPY_MAX_BYTES;

how about following ? matches nullb_device->copy_max_bytes type ..

-static int g_copy_max_bytes = COPY_MAX_BYTES;
-module_param_named(copy_max_bytes, g_copy_max_bytes, int, 0444);
+static unsigned long g_copy_max_bytes = COPY_MAX_BYTES;
+module_param_named(copy_max_bytes, g_copy_max_bytes, ulong, 0444);

[...]

> @@ -631,6 +637,7 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
>   			"badblocks,blocking,blocksize,cache_size,"
>   			"completion_nsec,discard,home_node,hw_queue_depth,"
>   			"irqmode,max_sectors,mbps,memory_backed,no_sched,"
> +			"copy_max_bytes,"
>   			"poll_queues,power,queue_mode,shared_tag_bitmap,size,"
>   			"submit_queues,use_per_node_hctx,virt_boundary,zoned,"
>   			"zone_capacity,zone_max_active,zone_max_open,"

why not ?

@@ -637,11 +637,12 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
                         "badblocks,blocking,blocksize,cache_size,"
                         "completion_nsec,discard,home_node,hw_queue_depth,"
                         "irqmode,max_sectors,mbps,memory_backed,no_sched,"
-                       "copy_max_bytes,"
                         "poll_queues,power,queue_mode,shared_tag_bitmap,size,"
                         "submit_queues,use_per_node_hctx,virt_boundary,zoned,"
                         "zone_capacity,zone_max_active,zone_max_open,"
-                       "zone_nr_conv,zone_offline,zone_readonly,zone_size\n");
+                       "zone_nr_conv,zone_offline,zone_readonly,zone_size"
+                       "copy_max_bytes\n");
  }
  
[...]
  
+static inline int nullb_setup_copy_read(struct nullb *nullb,
+		struct bio *bio)
+{
+	struct nullb_copy_token *token = bvec_kmap_local(&bio->bi_io_vec[0]);
+
+	memcpy(token->subsys, "nullb", 5);

do you really need to use memcpy here ? can token->subsys be a pointer
and use with assignment token->subsys = nullb ?

+	token->sector_in = bio->bi_iter.bi_sector;
+	token->nullb = nullb;
+	token->sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
+
+	return 0;
+}
+

no point in return 0 , use local bool for fua instead of repeating
expression and no need to fold line for nullb_setup_copy_read()
makes is easy to read and removes extra lines and indentation see below :-

-static inline int nullb_setup_copy_read(struct nullb *nullb,
-               struct bio *bio)
+static inline void nullb_setup_copy_read(struct nullb *nullb, struct bio *bio)
  {
         struct nullb_copy_token *token = bvec_kmap_local(&bio->bi_io_vec[0]);
  
-       memcpy(token->subsys, "nullb", 5);
+       token->subsys = "nullb;
         token->sector_in = bio->bi_iter.bi_sector;
         token->nullb = nullb;
         token->sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
-
-       return 0;
  }
  
  static inline int nullb_setup_copy_write(struct nullb *nullb,
@@ -1334,20 +1331,21 @@ static int null_handle_rq(struct nullb_cmd *cmd)
         sector_t sector = blk_rq_pos(rq);
         struct req_iterator iter;
         struct bio_vec bvec;
+       bool fua = rq->cmd_flags & REQ_FUA;
  
         if (rq->cmd_flags & REQ_COPY) {
                 if (op_is_write(req_op(rq)))
-                       return nullb_setup_copy_write(nullb, rq->bio,
-                                               rq->cmd_flags & REQ_FUA);
-               return nullb_setup_copy_read(nullb, rq->bio);
+                       return nullb_setup_copy_write(nullb, rq->bio, fua);
+
+               nullb_setup_copy_read(nullb, rq->bio);
+               return 0;
         }
  
         spin_lock_irq(&nullb->lock);
         rq_for_each_segment(bvec, rq, iter) {
                 len = bvec.bv_len;
                 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
-                                    op_is_write(req_op(rq)), sector,
-                                    rq->cmd_flags & REQ_FUA);
+                                    op_is_write(req_op(rq)), sector, fua);
                 if (err) {
                         spin_unlock_irq(&nullb->lock);
                         return err;
@@ -1368,12 +1366,13 @@ static int null_handle_bio(struct nullb_cmd *cmd)
         sector_t sector = bio->bi_iter.bi_sector;
         struct bio_vec bvec;
         struct bvec_iter iter;
+       bool fua = bio->bi_opf & REQ_FUA
  
         if (bio->bi_opf & REQ_COPY) {
                 if (op_is_write(bio_op(bio)))
-                       return nullb_setup_copy_write(nullb, bio,
-                                                       bio->bi_opf & REQ_FUA);
-               return nullb_setup_copy_read(nullb, bio);
+                       return nullb_setup_copy_write(nullb, bio, fua);
+               nullb_setup_copy_read(nullb, bio);
+               return 0;
         }
  



[...]
  
+struct nullb_copy_token {
+	char subsys[5];
+	struct nullb *nullb;
+	u64 sector_in;
+	u64 sectors;
+};
+

why not use sector_t ?

diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index c67c098d92fa..ffa4b6a6d19b 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -70,8 +70,8 @@ enum {
  struct nullb_copy_token {
         char subsys[5];
         struct nullb *nullb;
-       u64 sector_in;
-       u64 sectors;
+       sector_t sector_in;
+       sector_t sectors;
  };
  

-ck
Nitesh Shetty April 13, 2023, 10:36 a.m. UTC | #2
On 23/04/13 06:28AM, Chaitanya Kulkarni wrote:
>On 4/11/23 01:10, Anuj Gupta wrote:
>> From: Nitesh Shetty <nj.shetty@samsung.com>
>>
>> Implementaion is based on existing read and write infrastructure.
>> copy_max_bytes: A new configfs and module parameter is introduced, which
>> can be used to set hardware/driver supported maximum copy limit.
>>
>> Suggested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
>> Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
>> Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
>> Signed-off-by: Vincent Fu <vincent.fu@samsung.com>
>> ---
>>   drivers/block/null_blk/main.c     | 101 ++++++++++++++++++++++++++++++
>>   drivers/block/null_blk/null_blk.h |   8 +++
>>   2 files changed, 109 insertions(+)
>>
>> diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
>> index bc2c58724df3..e273e18ace74 100644
>> --- a/drivers/block/null_blk/main.c
>> +++ b/drivers/block/null_blk/main.c
>> @@ -157,6 +157,10 @@ static int g_max_sectors;
>>   module_param_named(max_sectors, g_max_sectors, int, 0444);
>>   MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
>>
>> +static int g_copy_max_bytes = COPY_MAX_BYTES;
>
>how about following ? matches nullb_device->copy_max_bytes type ..
>
>-static int g_copy_max_bytes = COPY_MAX_BYTES;
>-module_param_named(copy_max_bytes, g_copy_max_bytes, int, 0444);
>+static unsigned long g_copy_max_bytes = COPY_MAX_BYTES;
>+module_param_named(copy_max_bytes, g_copy_max_bytes, ulong, 0444);
>
>[...]
>

acked

>> @@ -631,6 +637,7 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
>>   			"badblocks,blocking,blocksize,cache_size,"
>>   			"completion_nsec,discard,home_node,hw_queue_depth,"
>>   			"irqmode,max_sectors,mbps,memory_backed,no_sched,"
>> +			"copy_max_bytes,"
>>   			"poll_queues,power,queue_mode,shared_tag_bitmap,size,"
>>   			"submit_queues,use_per_node_hctx,virt_boundary,zoned,"
>>   			"zone_capacity,zone_max_active,zone_max_open,"
>
>why not ?
>
>@@ -637,11 +637,12 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
>                         "badblocks,blocking,blocksize,cache_size,"
>                         "completion_nsec,discard,home_node,hw_queue_depth,"
>                         "irqmode,max_sectors,mbps,memory_backed,no_sched,"
>-                       "copy_max_bytes,"
>                         "poll_queues,power,queue_mode,shared_tag_bitmap,size,"
>                         "submit_queues,use_per_node_hctx,virt_boundary,zoned,"
>                         "zone_capacity,zone_max_active,zone_max_open,"
>-                       "zone_nr_conv,zone_offline,zone_readonly,zone_size\n");
>+                       "zone_nr_conv,zone_offline,zone_readonly,zone_size"
>+                       "copy_max_bytes\n");
>  }
>
>[...]
>

acked, one doubt I see checkpatch complains
"WARNING: quoted string split across lines".
Is there any graceful way to avoid this warning ?

>+static inline int nullb_setup_copy_read(struct nullb *nullb,
>+		struct bio *bio)
>+{
>+	struct nullb_copy_token *token = bvec_kmap_local(&bio->bi_io_vec[0]);
>+
>+	memcpy(token->subsys, "nullb", 5);
>
>do you really need to use memcpy here ? can token->subsys be a pointer
>and use with assignment token->subsys = nullb ?
>

We do have token->nullb, which stores this device.
Idea behind token->subsys is to differentiate between different types of
copies. Like copy between namespace, across namespace etc.

>+	token->sector_in = bio->bi_iter.bi_sector;
>+	token->nullb = nullb;
>+	token->sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
>+
>+	return 0;
>+}
>+
>
>no point in return 1 , use local bool for fua instead of repeating
>expression and no need to fold line for nullb_setup_copy_read()
>makes is easy to read and removes extra lines and indentation see below :-
>

acked.

>-static inline int nullb_setup_copy_read(struct nullb *nullb,
>-               struct bio *bio)
>+static inline void nullb_setup_copy_read(struct nullb *nullb, struct bio *bio)
>  {
>         struct nullb_copy_token *token = bvec_kmap_local(&bio->bi_io_vec[0]);
>
>-       memcpy(token->subsys, "nullb", 5);
>+       token->subsys = "nullb;

if you meant, token->subsys = "nullb", yeah we can add this in next
version.

>         token->sector_in = bio->bi_iter.bi_sector;
>         token->nullb = nullb;
>         token->sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
>-
>-       return 0;
>  }
>
>  static inline int nullb_setup_copy_write(struct nullb *nullb,
>@@ -1334,20 +1331,21 @@ static int null_handle_rq(struct nullb_cmd *cmd)
>         sector_t sector = blk_rq_pos(rq);
>         struct req_iterator iter;
>         struct bio_vec bvec;
>+       bool fua = rq->cmd_flags & REQ_FUA;
>
>         if (rq->cmd_flags & REQ_COPY) {
>                 if (op_is_write(req_op(rq)))
>-                       return nullb_setup_copy_write(nullb, rq->bio,
>-                                               rq->cmd_flags & REQ_FUA);
>-               return nullb_setup_copy_read(nullb, rq->bio);
>+                       return nullb_setup_copy_write(nullb, rq->bio, fua);
>+
>+               nullb_setup_copy_read(nullb, rq->bio);
>+               return 0;
>         }
>
>         spin_lock_irq(&nullb->lock);
>         rq_for_each_segment(bvec, rq, iter) {
>                 len = bvec.bv_len;
>                 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
>-                                    op_is_write(req_op(rq)), sector,
>-                                    rq->cmd_flags & REQ_FUA);
>+                                    op_is_write(req_op(rq)), sector, fua);
>                 if (err) {
>                         spin_unlock_irq(&nullb->lock);
>                         return err;
>@@ -1368,12 +1366,13 @@ static int null_handle_bio(struct nullb_cmd *cmd)
>         sector_t sector = bio->bi_iter.bi_sector;
>         struct bio_vec bvec;
>         struct bvec_iter iter;
>+       bool fua = bio->bi_opf & REQ_FUA
>
>         if (bio->bi_opf & REQ_COPY) {
>                 if (op_is_write(bio_op(bio)))
>-                       return nullb_setup_copy_write(nullb, bio,
>-                                                       bio->bi_opf & REQ_FUA);
>-               return nullb_setup_copy_read(nullb, bio);
>+                       return nullb_setup_copy_write(nullb, bio, fua);
>+               nullb_setup_copy_read(nullb, bio);
>+               return 0;
>         }
>
>
>
>
>[...]
>

acked

>+struct nullb_copy_token {
>+	char subsys[5];
>+	struct nullb *nullb;
>+	u64 sector_in;
>+	u64 sectors;
>+};
>+
>
>why not use sector_t ?
>
>diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
>index c67c098d92fa..ffa4b6a6d19b 100644
>--- a/drivers/block/null_blk/null_blk.h
>+++ b/drivers/block/null_blk/null_blk.h
>@@ -70,8 +70,8 @@ enum {
>  struct nullb_copy_token {
>         char subsys[5];
>         struct nullb *nullb;
>-       u64 sector_in;
>-       u64 sectors;
>+       sector_t sector_in;
>+       sector_t sectors;
>  };
>
>

acked

-- Thank you,
-- Nitesh Shetty
diff mbox series

Patch

diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index bc2c58724df3..e273e18ace74 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -157,6 +157,10 @@  static int g_max_sectors;
 module_param_named(max_sectors, g_max_sectors, int, 0444);
 MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
 
+static int g_copy_max_bytes = COPY_MAX_BYTES;
+module_param_named(copy_max_bytes, g_copy_max_bytes, int, 0444);
+MODULE_PARM_DESC(copy_max_bytes, "Maximum size of a copy command (in bytes)");
+
 static unsigned int nr_devices = 1;
 module_param(nr_devices, uint, 0444);
 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
@@ -409,6 +413,7 @@  NULLB_DEVICE_ATTR(home_node, uint, NULL);
 NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
 NULLB_DEVICE_ATTR(blocksize, uint, NULL);
 NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
+NULLB_DEVICE_ATTR(copy_max_bytes, uint, NULL);
 NULLB_DEVICE_ATTR(irqmode, uint, NULL);
 NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
 NULLB_DEVICE_ATTR(index, uint, NULL);
@@ -550,6 +555,7 @@  static struct configfs_attribute *nullb_device_attrs[] = {
 	&nullb_device_attr_queue_mode,
 	&nullb_device_attr_blocksize,
 	&nullb_device_attr_max_sectors,
+	&nullb_device_attr_copy_max_bytes,
 	&nullb_device_attr_irqmode,
 	&nullb_device_attr_hw_queue_depth,
 	&nullb_device_attr_index,
@@ -631,6 +637,7 @@  static ssize_t memb_group_features_show(struct config_item *item, char *page)
 			"badblocks,blocking,blocksize,cache_size,"
 			"completion_nsec,discard,home_node,hw_queue_depth,"
 			"irqmode,max_sectors,mbps,memory_backed,no_sched,"
+			"copy_max_bytes,"
 			"poll_queues,power,queue_mode,shared_tag_bitmap,size,"
 			"submit_queues,use_per_node_hctx,virt_boundary,zoned,"
 			"zone_capacity,zone_max_active,zone_max_open,"
@@ -693,6 +700,7 @@  static struct nullb_device *null_alloc_dev(void)
 	dev->queue_mode = g_queue_mode;
 	dev->blocksize = g_bs;
 	dev->max_sectors = g_max_sectors;
+	dev->copy_max_bytes = g_copy_max_bytes;
 	dev->irqmode = g_irqmode;
 	dev->hw_queue_depth = g_hw_queue_depth;
 	dev->blocking = g_blocking;
@@ -1242,6 +1250,81 @@  static int null_transfer(struct nullb *nullb, struct page *page,
 	return err;
 }
 
+static inline int nullb_setup_copy_read(struct nullb *nullb,
+		struct bio *bio)
+{
+	struct nullb_copy_token *token = bvec_kmap_local(&bio->bi_io_vec[0]);
+
+	memcpy(token->subsys, "nullb", 5);
+	token->sector_in = bio->bi_iter.bi_sector;
+	token->nullb = nullb;
+	token->sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
+
+	return 0;
+}
+
+static inline int nullb_setup_copy_write(struct nullb *nullb,
+		struct bio *bio, bool is_fua)
+{
+	struct nullb_copy_token *token = bvec_kmap_local(&bio->bi_io_vec[0]);
+	sector_t sector_in, sector_out;
+	void *in, *out;
+	size_t rem, temp;
+	unsigned long offset_in, offset_out;
+	struct nullb_page *t_page_in, *t_page_out;
+	int ret = -EIO;
+
+	if (unlikely(memcmp(token->subsys, "nullb", 5)))
+		return -EINVAL;
+	if (unlikely(token->nullb != nullb))
+		return -EINVAL;
+	if (WARN_ON(token->sectors != bio->bi_iter.bi_size >> SECTOR_SHIFT))
+		return -EINVAL;
+
+	sector_in = token->sector_in;
+	sector_out = bio->bi_iter.bi_sector;
+	rem = token->sectors << SECTOR_SHIFT;
+
+	spin_lock_irq(&nullb->lock);
+	while (rem > 0) {
+		temp = min_t(size_t, nullb->dev->blocksize, rem);
+		offset_in = (sector_in & SECTOR_MASK) << SECTOR_SHIFT;
+		offset_out = (sector_out & SECTOR_MASK) << SECTOR_SHIFT;
+
+		if (null_cache_active(nullb) && !is_fua)
+			null_make_cache_space(nullb, PAGE_SIZE);
+
+		t_page_in = null_lookup_page(nullb, sector_in, false,
+			!null_cache_active(nullb));
+		if (!t_page_in)
+			goto err;
+		t_page_out = null_insert_page(nullb, sector_out,
+			!null_cache_active(nullb) || is_fua);
+		if (!t_page_out)
+			goto err;
+
+		in = kmap_local_page(t_page_in->page);
+		out = kmap_local_page(t_page_out->page);
+
+		memcpy(out + offset_out, in + offset_in, temp);
+		kunmap_local(out);
+		kunmap_local(in);
+		__set_bit(sector_out & SECTOR_MASK, t_page_out->bitmap);
+
+		if (is_fua)
+			null_free_sector(nullb, sector_out, true);
+
+		rem -= temp;
+		sector_in += temp >> SECTOR_SHIFT;
+		sector_out += temp >> SECTOR_SHIFT;
+	}
+
+	ret = 0;
+err:
+	spin_unlock_irq(&nullb->lock);
+	return ret;
+}
+
 static int null_handle_rq(struct nullb_cmd *cmd)
 {
 	struct request *rq = cmd->rq;
@@ -1252,6 +1335,13 @@  static int null_handle_rq(struct nullb_cmd *cmd)
 	struct req_iterator iter;
 	struct bio_vec bvec;
 
+	if (rq->cmd_flags & REQ_COPY) {
+		if (op_is_write(req_op(rq)))
+			return nullb_setup_copy_write(nullb, rq->bio,
+						rq->cmd_flags & REQ_FUA);
+		return nullb_setup_copy_read(nullb, rq->bio);
+	}
+
 	spin_lock_irq(&nullb->lock);
 	rq_for_each_segment(bvec, rq, iter) {
 		len = bvec.bv_len;
@@ -1279,6 +1369,13 @@  static int null_handle_bio(struct nullb_cmd *cmd)
 	struct bio_vec bvec;
 	struct bvec_iter iter;
 
+	if (bio->bi_opf & REQ_COPY) {
+		if (op_is_write(bio_op(bio)))
+			return nullb_setup_copy_write(nullb, bio,
+							bio->bi_opf & REQ_FUA);
+		return nullb_setup_copy_read(nullb, bio);
+	}
+
 	spin_lock_irq(&nullb->lock);
 	bio_for_each_segment(bvec, bio, iter) {
 		len = bvec.bv_len;
@@ -2109,6 +2206,10 @@  static int null_add_dev(struct nullb_device *dev)
 		dev->max_sectors = queue_max_hw_sectors(nullb->q);
 	dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS);
 	blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
+	blk_queue_max_copy_sectors_hw(nullb->q,
+			       dev->copy_max_bytes >> SECTOR_SHIFT);
+	if (dev->copy_max_bytes)
+		blk_queue_flag_set(QUEUE_FLAG_COPY, nullb->disk->queue);
 
 	if (dev->virt_boundary)
 		blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index eb5972c50be8..c67c098d92fa 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -67,6 +67,13 @@  enum {
 	NULL_Q_MQ	= 2,
 };
 
+struct nullb_copy_token {
+	char subsys[5];
+	struct nullb *nullb;
+	u64 sector_in;
+	u64 sectors;
+};
+
 struct nullb_device {
 	struct nullb *nullb;
 	struct config_item item;
@@ -102,6 +109,7 @@  struct nullb_device {
 	unsigned int queue_mode; /* block interface */
 	unsigned int blocksize; /* block size */
 	unsigned int max_sectors; /* Max sectors per command */
+	unsigned long copy_max_bytes; /* Max copy offload length in bytes */
 	unsigned int irqmode; /* IRQ completion handler */
 	unsigned int hw_queue_depth; /* queue depth */
 	unsigned int index; /* index of the disk, only valid with a disk */