diff mbox series

[v4,07/10] dm: Add support for copy offload.

Message ID 20220426101241.30100-8-nj.shetty@samsung.com (mailing list archive)
State New, archived
Headers show
Series [v4,01/10] block: Introduce queue limits for copy-offload support | expand

Commit Message

Nitesh Shetty April 26, 2022, 10:12 a.m. UTC
Before enabling copy for dm target, check if underlying devices and
dm target support copy. Avoid split happening inside dm target.
Fail early if the request needs split, currently splitting copy
request is not supported.

Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
---
 drivers/md/dm-table.c         | 45 +++++++++++++++++++++++++++++++++++
 drivers/md/dm.c               |  6 +++++
 include/linux/device-mapper.h |  5 ++++
 3 files changed, 56 insertions(+)

Comments

kernel test robot April 28, 2022, 3:54 p.m. UTC | #1
Hi Nitesh,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on next-20220422]
[cannot apply to axboe-block/for-next device-mapper-dm/for-next linus/master v5.18-rc4 v5.18-rc3 v5.18-rc2 v5.18-rc4]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/intel-lab-lkp/linux/commits/Nitesh-Shetty/block-Introduce-queue-limits-for-copy-offload-support/20220426-201825
base:    e7d6987e09a328d4a949701db40ef63fbb970670
config: s390-randconfig-s032-20220427 (https://download.01.org/0day-ci/archive/20220428/202204282336.7AY0GVKz-lkp@intel.com/config)
compiler: s390-linux-gcc (GCC) 11.3.0
reproduce:
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # apt-get install sparse
        # sparse version: v0.6.4-dirty
        # https://github.com/intel-lab-lkp/linux/commit/913c8c5197fea28ee3c8424e16eadd8b159a91f0
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Nitesh-Shetty/block-Introduce-queue-limits-for-copy-offload-support/20220426-201825
        git checkout 913c8c5197fea28ee3c8424e16eadd8b159a91f0
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.3.0 make.cross C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=s390 SHELL=/bin/bash drivers/md/

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>


sparse warnings: (new ones prefixed by >>)
>> drivers/md/dm.c:1602:24: sparse: sparse: incorrect type in return expression (different base types) @@     expected restricted blk_status_t @@     got int @@
   drivers/md/dm.c:1602:24: sparse:     expected restricted blk_status_t
   drivers/md/dm.c:1602:24: sparse:     got int

vim +1602 drivers/md/dm.c

  1582	
  1583	/*
  1584	 * Select the correct strategy for processing a non-flush bio.
  1585	 */
  1586	static blk_status_t __split_and_process_bio(struct clone_info *ci)
  1587	{
  1588		struct bio *clone;
  1589		struct dm_target *ti;
  1590		unsigned len;
  1591	
  1592		ti = dm_table_find_target(ci->map, ci->sector);
  1593		if (unlikely(!ti))
  1594			return BLK_STS_IOERR;
  1595		else if (unlikely(ci->is_abnormal_io))
  1596			return __process_abnormal_io(ci, ti);
  1597	
  1598		if ((unlikely(op_is_copy(ci->bio->bi_opf)) &&
  1599					max_io_len(ti, ci->sector) < ci->sector_count)) {
  1600			DMERR("%s: Error IO size(%u) is greater than maximum target size(%llu)\n",
  1601					__func__, ci->sector_count, max_io_len(ti, ci->sector));
> 1602			return -EIO;
  1603		}
  1604		/*
  1605		 * Only support bio polling for normal IO, and the target io is
  1606		 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
  1607		 */
  1608		ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED;
  1609	
  1610		len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
  1611		setup_split_accounting(ci, len);
  1612		clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
  1613		__map_bio(clone);
  1614	
  1615		ci->sector += len;
  1616		ci->sector_count -= len;
  1617	
  1618		return BLK_STS_OK;
  1619	}
  1620
diff mbox series

Patch

diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a37c7b763643..b7574f179ed6 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1893,6 +1893,38 @@  static bool dm_table_supports_nowait(struct dm_table *t)
 	return true;
 }
 
+static int device_not_copy_capable(struct dm_target *ti, struct dm_dev *dev,
+				      sector_t start, sector_t len, void *data)
+{
+	struct request_queue *q = bdev_get_queue(dev->bdev);
+
+	return !blk_queue_copy(q);
+}
+
+static bool dm_table_supports_copy(struct dm_table *t)
+{
+	struct dm_target *ti;
+	unsigned int i;
+
+	for (i = 0; i < dm_table_get_num_targets(t); i++) {
+		ti = dm_table_get_target(t, i);
+
+		if (!ti->copy_offload_supported)
+			return false;
+
+		/*
+		 * target provides copy support (as implied by setting 'copy_offload_supported')
+		 * and it relies on _all_ data devices having copy support.
+		 */
+		if (ti->copy_offload_supported &&
+		    (!ti->type->iterate_devices ||
+		     ti->type->iterate_devices(ti, device_not_copy_capable, NULL)))
+			return false;
+	}
+
+	return true;
+}
+
 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
 				      sector_t start, sector_t len, void *data)
 {
@@ -1981,6 +2013,19 @@  int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 		q->limits.discard_misaligned = 0;
 	}
 
+	if (!dm_table_supports_copy(t)) {
+		blk_queue_flag_clear(QUEUE_FLAG_COPY, q);
+		/* Must also clear copy limits... */
+		q->limits.max_copy_sectors = 0;
+		q->limits.max_hw_copy_sectors = 0;
+		q->limits.max_copy_range_sectors = 0;
+		q->limits.max_hw_copy_range_sectors = 0;
+		q->limits.max_copy_nr_ranges = 0;
+		q->limits.max_hw_copy_nr_ranges = 0;
+	} else {
+		blk_queue_flag_set(QUEUE_FLAG_COPY, q);
+	}
+
 	if (!dm_table_supports_secure_erase(t))
 		q->limits.max_secure_erase_sectors = 0;
 
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 7e3b5bdcf520..b995de127093 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1595,6 +1595,12 @@  static blk_status_t __split_and_process_bio(struct clone_info *ci)
 	else if (unlikely(ci->is_abnormal_io))
 		return __process_abnormal_io(ci, ti);
 
+	if ((unlikely(op_is_copy(ci->bio->bi_opf)) &&
+				max_io_len(ti, ci->sector) < ci->sector_count)) {
+		DMERR("%s: Error IO size(%u) is greater than maximum target size(%llu)\n",
+				__func__, ci->sector_count, max_io_len(ti, ci->sector));
+		return -EIO;
+	}
 	/*
 	 * Only support bio polling for normal IO, and the target io is
 	 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index c2a3758c4aaa..9304e640c9b9 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -362,6 +362,11 @@  struct dm_target {
 	 * after returning DM_MAPIO_SUBMITTED from its map function.
 	 */
 	bool accounts_remapped_io:1;
+
+	/*
+	 * copy offload is supported
+	 */
+	bool copy_offload_supported:1;
 };
 
 void *dm_per_bio_data(struct bio *bio, size_t data_size);