diff mbox series

[v20,09/12] dm: Add support for copy offload

Message ID 20240520102033.9361-10-nj.shetty@samsung.com (mailing list archive)
State New
Headers show
Series [v20,01/12] block: Introduce queue limits and sysfs for copy-offload support | expand

Commit Message

Nitesh Shetty May 20, 2024, 10:20 a.m. UTC
Before enabling copy for dm target, check if underlying devices and
dm target support copy. Avoid split happening inside dm target.
Fail early if the request needs split, currently splitting copy
request is not supported.

Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
---
 drivers/md/dm-table.c         | 37 +++++++++++++++++++++++++++++++++++
 drivers/md/dm.c               |  7 +++++++
 include/linux/device-mapper.h |  3 +++
 3 files changed, 47 insertions(+)

Comments

Hannes Reinecke May 21, 2024, 7:11 a.m. UTC | #1
On 5/20/24 12:20, Nitesh Shetty wrote:
> Before enabling copy for dm target, check if underlying devices and
> dm target support copy. Avoid split happening inside dm target.
> Fail early if the request needs split, currently splitting copy
> request is not supported.
> 
> Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
> ---
>   drivers/md/dm-table.c         | 37 +++++++++++++++++++++++++++++++++++
>   drivers/md/dm.c               |  7 +++++++
>   include/linux/device-mapper.h |  3 +++
>   3 files changed, 47 insertions(+)
> 
> diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
> index cc66a27c363a..d58c67ecd794 100644
> --- a/drivers/md/dm-table.c
> +++ b/drivers/md/dm-table.c
> @@ -1899,6 +1899,38 @@ static bool dm_table_supports_nowait(struct dm_table *t)
>   	return true;
>   }
>   
> +static int device_not_copy_capable(struct dm_target *ti, struct dm_dev *dev,
> +				   sector_t start, sector_t len, void *data)
> +{
> +	struct request_queue *q = bdev_get_queue(dev->bdev);
> +
> +	return !q->limits.max_copy_sectors;
> +}
> +
> +static bool dm_table_supports_copy(struct dm_table *t)
> +{
> +	struct dm_target *ti;
> +	unsigned int i;
> +
> +	for (i = 0; i < t->num_targets; i++) {
> +		ti = dm_table_get_target(t, i);
> +
> +		if (!ti->copy_offload_supported)
> +			return false;
> +
> +		/*
> +		 * target provides copy support (as implied by setting
> +		 * 'copy_offload_supported')
> +		 * and it relies on _all_ data devices having copy support.
> +		 */
> +		if (!ti->type->iterate_devices ||
> +		    ti->type->iterate_devices(ti, device_not_copy_capable, NULL))
> +			return false;
> +	}
> +
> +	return true;
> +}
> +
>   static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
>   				      sector_t start, sector_t len, void *data)
>   {
> @@ -1975,6 +2007,11 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
>   		limits->discard_misaligned = 0;
>   	}
>   
> +	if (!dm_table_supports_copy(t)) {
> +		limits->max_copy_sectors = 0;
> +		limits->max_copy_hw_sectors = 0;
> +	}
> +
>   	if (!dm_table_supports_write_zeroes(t))
>   		limits->max_write_zeroes_sectors = 0;
>   
> diff --git a/drivers/md/dm.c b/drivers/md/dm.c
> index 597dd7a25823..070b41b83a97 100644
> --- a/drivers/md/dm.c
> +++ b/drivers/md/dm.c
> @@ -1717,6 +1717,13 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
>   	if (unlikely(ci->is_abnormal_io))
>   		return __process_abnormal_io(ci, ti);
>   
> +	if ((unlikely(op_is_copy(ci->bio->bi_opf)) &&
> +	    max_io_len(ti, ci->sector) < ci->sector_count)) {
> +		DMERR("Error, IO size(%u) > max target size(%llu)\n",
> +		      ci->sector_count, max_io_len(ti, ci->sector));
> +		return BLK_STS_IOERR;
> +	}
> +
>   	/*
>   	 * Only support bio polling for normal IO, and the target io is
>   	 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
> diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
> index 82b2195efaca..6868941bc7d9 100644
> --- a/include/linux/device-mapper.h
> +++ b/include/linux/device-mapper.h
> @@ -397,6 +397,9 @@ struct dm_target {
>   	 * bio_set_dev(). NOTE: ideally a target should _not_ need this.
>   	 */
>   	bool needs_bio_set_dev:1;
> +
> +	/* copy offload is supported */
> +	bool copy_offload_supported:1;
>   };
>   
>   void *dm_per_bio_data(struct bio *bio, size_t data_size);

Errm. Not sure this will work. DM tables might be arbitrarily, requiring 
us to _split_ the copy offload request according to the underlying 
component devices. But we explicitly disallowed a split in one of the 
earlier patches.
Or am I wrong?

Cheers,

Hannes
Nitesh Shetty May 21, 2024, 2:08 p.m. UTC | #2
On 21/05/24 09:11AM, Hannes Reinecke wrote:
>On 5/20/24 12:20, Nitesh Shetty wrote:
>>Before enabling copy for dm target, check if underlying devices and
>>dm target support copy. Avoid split happening inside dm target.
>>Fail early if the request needs split, currently splitting copy
>>request is not supported.
>>
>>Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
>>---
>>@@ -397,6 +397,9 @@ struct dm_target {
>>  	 * bio_set_dev(). NOTE: ideally a target should _not_ need this.
>>  	 */
>>  	bool needs_bio_set_dev:1;
>>+
>>+	/* copy offload is supported */
>>+	bool copy_offload_supported:1;
>>  };
>>  void *dm_per_bio_data(struct bio *bio, size_t data_size);
>
>Errm. Not sure this will work. DM tables might be arbitrarily, 
>requiring us to _split_ the copy offload request according to the 
>underlying component devices. But we explicitly disallowed a split in 
>one of the earlier patches.
>Or am I wrong?
>
Yes you are right w.r.to split, we disallow split.
But this flag indicates whether we support copy offload in dm-target or
not. At present we support copy offload only in dm-linear.
For other dm-target, eventhough underlaying device supports copy
offload, dm-target based on it wont support copy offload.
If the present series get merged, we can test and integrate more
targets.

Regards,
Nitesh Shetty
Hannes Reinecke May 22, 2024, 6:22 a.m. UTC | #3
On 5/21/24 16:08, Nitesh Shetty wrote:
> On 21/05/24 09:11AM, Hannes Reinecke wrote:
>> On 5/20/24 12:20, Nitesh Shetty wrote:
>>> Before enabling copy for dm target, check if underlying devices and
>>> dm target support copy. Avoid split happening inside dm target.
>>> Fail early if the request needs split, currently splitting copy
>>> request is not supported.
>>>
>>> Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
>>> ---
>>> @@ -397,6 +397,9 @@ struct dm_target {
>>>       * bio_set_dev(). NOTE: ideally a target should _not_ need this.
>>>       */
>>>      bool needs_bio_set_dev:1;
>>> +
>>> +    /* copy offload is supported */
>>> +    bool copy_offload_supported:1;
>>>  };
>>>  void *dm_per_bio_data(struct bio *bio, size_t data_size);
>>
>> Errm. Not sure this will work. DM tables might be arbitrarily, 
>> requiring us to _split_ the copy offload request according to the 
>> underlying component devices. But we explicitly disallowed a split in 
>> one of the earlier patches.
>> Or am I wrong?
>>
> Yes you are right w.r.to split, we disallow split.
> But this flag indicates whether we support copy offload in dm-target or
> not. At present we support copy offload only in dm-linear.
> For other dm-target, eventhough underlaying device supports copy
> offload, dm-target based on it wont support copy offload.
> If the present series get merged, we can test and integrate more
> targets.
> 
But dm-linear can be concatenated, too; you can easily use dm-linear
to tie several devices together.
Which again would require a copy-offload range to be split.
Hmm?

Cheers,

Hannes
Nitesh Shetty May 22, 2024, 7:10 a.m. UTC | #4
On 22/05/24 08:22AM, Hannes Reinecke wrote:
>On 5/21/24 16:08, Nitesh Shetty wrote:
>>On 21/05/24 09:11AM, Hannes Reinecke wrote:
>>>On 5/20/24 12:20, Nitesh Shetty wrote:
>>>>Before enabling copy for dm target, check if underlying devices and
>>>>dm target support copy. Avoid split happening inside dm target.
>>>>Fail early if the request needs split, currently splitting copy
>>>>request is not supported.
>>>>
>>>>Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
>>>>---
>>>>@@ -397,6 +397,9 @@ struct dm_target {
>>>>      * bio_set_dev(). NOTE: ideally a target should _not_ need this.
>>>>      */
>>>>     bool needs_bio_set_dev:1;
>>>>+
>>>>+    /* copy offload is supported */
>>>>+    bool copy_offload_supported:1;
>>>> };
>>>> void *dm_per_bio_data(struct bio *bio, size_t data_size);
>>>
>>>Errm. Not sure this will work. DM tables might be arbitrarily, 
>>>requiring us to _split_ the copy offload request according to the 
>>>underlying component devices. But we explicitly disallowed a split 
>>>in one of the earlier patches.
>>>Or am I wrong?
>>>
>>Yes you are right w.r.to split, we disallow split.
>>But this flag indicates whether we support copy offload in dm-target or
>>not. At present we support copy offload only in dm-linear.
>>For other dm-target, eventhough underlaying device supports copy
>>offload, dm-target based on it wont support copy offload.
>>If the present series get merged, we can test and integrate more
>>targets.
>>
>But dm-linear can be concatenated, too; you can easily use dm-linear
>to tie several devices together.
>Which again would require a copy-offload range to be split.
>Hmm?
>
Sorry, I dont understand the concern here. I see 3 possibilites here.

1. Both src and dst IO lies in same underlying device. This will succeed.
2. src and dst lie in different devices. This will fail.
	a. src or dst needs to be split, if one or both of them
	spans across the underlying block device boundary. In this case we
	fail the IO in dm layer(refer patch 9).
	b. src and dst doesn't split in dm,
	but they wont be merged in request later as they belong to
	different block device.
	Hence the request reaches the driver with single bio and will fail
	in driver(refer patch 7)

Does this address your concern, or do you have something else in mind ?

Thank you,
Nitesh Shetty
diff mbox series

Patch

diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index cc66a27c363a..d58c67ecd794 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1899,6 +1899,38 @@  static bool dm_table_supports_nowait(struct dm_table *t)
 	return true;
 }
 
+static int device_not_copy_capable(struct dm_target *ti, struct dm_dev *dev,
+				   sector_t start, sector_t len, void *data)
+{
+	struct request_queue *q = bdev_get_queue(dev->bdev);
+
+	return !q->limits.max_copy_sectors;
+}
+
+static bool dm_table_supports_copy(struct dm_table *t)
+{
+	struct dm_target *ti;
+	unsigned int i;
+
+	for (i = 0; i < t->num_targets; i++) {
+		ti = dm_table_get_target(t, i);
+
+		if (!ti->copy_offload_supported)
+			return false;
+
+		/*
+		 * target provides copy support (as implied by setting
+		 * 'copy_offload_supported')
+		 * and it relies on _all_ data devices having copy support.
+		 */
+		if (!ti->type->iterate_devices ||
+		    ti->type->iterate_devices(ti, device_not_copy_capable, NULL))
+			return false;
+	}
+
+	return true;
+}
+
 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
 				      sector_t start, sector_t len, void *data)
 {
@@ -1975,6 +2007,11 @@  int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 		limits->discard_misaligned = 0;
 	}
 
+	if (!dm_table_supports_copy(t)) {
+		limits->max_copy_sectors = 0;
+		limits->max_copy_hw_sectors = 0;
+	}
+
 	if (!dm_table_supports_write_zeroes(t))
 		limits->max_write_zeroes_sectors = 0;
 
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 597dd7a25823..070b41b83a97 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1717,6 +1717,13 @@  static blk_status_t __split_and_process_bio(struct clone_info *ci)
 	if (unlikely(ci->is_abnormal_io))
 		return __process_abnormal_io(ci, ti);
 
+	if ((unlikely(op_is_copy(ci->bio->bi_opf)) &&
+	    max_io_len(ti, ci->sector) < ci->sector_count)) {
+		DMERR("Error, IO size(%u) > max target size(%llu)\n",
+		      ci->sector_count, max_io_len(ti, ci->sector));
+		return BLK_STS_IOERR;
+	}
+
 	/*
 	 * Only support bio polling for normal IO, and the target io is
 	 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 82b2195efaca..6868941bc7d9 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -397,6 +397,9 @@  struct dm_target {
 	 * bio_set_dev(). NOTE: ideally a target should _not_ need this.
 	 */
 	bool needs_bio_set_dev:1;
+
+	/* copy offload is supported */
+	bool copy_offload_supported:1;
 };
 
 void *dm_per_bio_data(struct bio *bio, size_t data_size);