diff mbox series

[RFC,3/3] dm: add support for IO polling

Message ID 20201020065420.124885-4-jefflexu@linux.alibaba.com (mailing list archive)
State Changes Requested, archived
Delegated to: Mike Snitzer
Headers show
Series Add support of iopoll for dm device | expand

Commit Message

Jingbo Xu Oct. 20, 2020, 6:54 a.m. UTC
Design of cookie is initially constrained as a per-bio concept. It
dosn't work well when bio-split needed, and it is really an issue when
adding support of iopoll for dm devices.

The current algorithm implementation is simple. The returned cookie of
dm device is actually not used since it is just the cookie of one of
the cloned bios. Polling of dm device is actually polling on all
hardware queues (in poll mode) of all underlying target devices.

Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
---
 drivers/md/dm-core.h  |  1 +
 drivers/md/dm-table.c | 30 ++++++++++++++++++++++++++++++
 drivers/md/dm.c       | 39 +++++++++++++++++++++++++++++++++++++++
 3 files changed, 70 insertions(+)

Comments

Mike Snitzer Oct. 21, 2020, 8:49 p.m. UTC | #1
On Tue, Oct 20 2020 at  2:54am -0400,
Jeffle Xu <jefflexu@linux.alibaba.com> wrote:

> Design of cookie is initially constrained as a per-bio concept. It
> dosn't work well when bio-split needed, and it is really an issue when
> adding support of iopoll for dm devices.
> 
> The current algorithm implementation is simple. The returned cookie of
> dm device is actually not used since it is just the cookie of one of
> the cloned bios. Polling of dm device is actually polling on all
> hardware queues (in poll mode) of all underlying target devices.
> 
> Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
> ---
>  drivers/md/dm-core.h  |  1 +
>  drivers/md/dm-table.c | 30 ++++++++++++++++++++++++++++++
>  drivers/md/dm.c       | 39 +++++++++++++++++++++++++++++++++++++++
>  3 files changed, 70 insertions(+)
> 
> diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
> index d522093cb39d..f18e066beffe 100644
> --- a/drivers/md/dm-core.h
> +++ b/drivers/md/dm-core.h
> @@ -187,4 +187,5 @@ extern atomic_t dm_global_event_nr;
>  extern wait_queue_head_t dm_global_eventq;
>  void dm_issue_global_event(void);
>  
> +int dm_io_poll(struct request_queue *q, blk_qc_t cookie);
>  #endif
> diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
> index ce543b761be7..634b79842519 100644
> --- a/drivers/md/dm-table.c
> +++ b/drivers/md/dm-table.c
> @@ -1809,6 +1809,31 @@ static bool dm_table_requires_stable_pages(struct dm_table *t)
>  	return false;
>  }
>  
> +static int device_not_support_poll(struct dm_target *ti, struct dm_dev *dev,
> +					   sector_t start, sector_t len, void *data)
> +{
> +	struct request_queue *q = bdev_get_queue(dev->bdev);
> +
> +	return q && !(q->queue_flags & QUEUE_FLAG_POLL);
> +}
> +
> +bool dm_table_supports_poll(struct dm_table *t)
> +{
> +	struct dm_target *ti;
> +	unsigned int i;
> +
> +	/* Ensure that all targets support DAX. */
> +	for (i = 0; i < dm_table_get_num_targets(t); i++) {
> +		ti = dm_table_get_target(t, i);
> +
> +		if (!ti->type->iterate_devices ||
> +		    ti->type->iterate_devices(ti, device_not_support_poll, NULL))
> +			return false;
> +	}
> +
> +	return true;
> +}
> +
>  void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
>  			       struct queue_limits *limits)
>  {
> @@ -1901,6 +1926,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
>  #endif
>  
>  	blk_queue_update_readahead(q);
> +
> +	if (dm_table_supports_poll(t)) {
> +		q->poll_fn = dm_io_poll;
> +		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
> +	}
>  }

You really want to implement the !dm_table_supports_poll case too on the
off chance that a DM table reload swaps in a new mapping that doesn't
support polling (be it the error target or whatever).


>  unsigned int dm_table_get_num_targets(struct dm_table *t)
> diff --git a/drivers/md/dm.c b/drivers/md/dm.c
> index c18fc2548518..4eceaf87ffd4 100644
> --- a/drivers/md/dm.c
> +++ b/drivers/md/dm.c
> @@ -1666,6 +1666,45 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
>  	return ret;
>  }
>  
> +static int dm_poll_one_dev(struct request_queue *q, blk_qc_t cookie)
> +{
> +	/* Iterate polling on all polling queues for mq device */
> +	if (queue_is_mq(q)) {
> +		struct blk_mq_hw_ctx *hctx;
> +		int i, ret = 0;
> +
> +		if (!percpu_ref_tryget(&q->q_usage_counter))
> +			return 0;
> +
> +		queue_for_each_poll_hw_ctx(q, hctx, i) {
> +			ret += q->mq_ops->poll(hctx);
> +		}
> +
> +		percpu_ref_put(&q->q_usage_counter);
> +		return ret;
> +	} else
> +		return q->poll_fn(q, cookie);
> +}
> +
> +int dm_io_poll(struct request_queue *q, blk_qc_t cookie)
> +{
> +	struct mapped_device *md = q->queuedata;
> +	struct dm_table *table;
> +	struct dm_dev_internal *dd;
> +	int srcu_idx;
> +	int ret = 0;
> +
> +	table = dm_get_live_table(md, &srcu_idx);
> +	if (!table)
> +		goto out;
> +
> +	list_for_each_entry(dd, dm_table_get_devices(table), list)
> +		ret += dm_poll_one_dev(bdev_get_queue(dd->dm_dev->bdev), cookie);

Definitely need to use ti->type->iterate_devices because otherwise you
could be iterating over non-data devices that have no business being
polled.  Ideally you could use the cookie to find the unique bio in
question to quickly lookup the associated DM target to use
ti->type->iterate_devices

But short of having useful cookies, you should iterate through all
targets in the table and call ti->type->iterate_devices

Mike

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
Ming Lei Oct. 22, 2020, 3:14 a.m. UTC | #2
On Tue, Oct 20, 2020 at 02:54:20PM +0800, Jeffle Xu wrote:
> Design of cookie is initially constrained as a per-bio concept. It
> dosn't work well when bio-split needed, and it is really an issue when
> adding support of iopoll for dm devices.
> 
> The current algorithm implementation is simple. The returned cookie of
> dm device is actually not used since it is just the cookie of one of
> the cloned bios. Polling of dm device is actually polling on all
> hardware queues (in poll mode) of all underlying target devices.
> 
> Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
> ---
>  drivers/md/dm-core.h  |  1 +
>  drivers/md/dm-table.c | 30 ++++++++++++++++++++++++++++++
>  drivers/md/dm.c       | 39 +++++++++++++++++++++++++++++++++++++++
>  3 files changed, 70 insertions(+)
> 
> diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
> index d522093cb39d..f18e066beffe 100644
> --- a/drivers/md/dm-core.h
> +++ b/drivers/md/dm-core.h
> @@ -187,4 +187,5 @@ extern atomic_t dm_global_event_nr;
>  extern wait_queue_head_t dm_global_eventq;
>  void dm_issue_global_event(void);
>  
> +int dm_io_poll(struct request_queue *q, blk_qc_t cookie);
>  #endif
> diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
> index ce543b761be7..634b79842519 100644
> --- a/drivers/md/dm-table.c
> +++ b/drivers/md/dm-table.c
> @@ -1809,6 +1809,31 @@ static bool dm_table_requires_stable_pages(struct dm_table *t)
>  	return false;
>  }
>  
> +static int device_not_support_poll(struct dm_target *ti, struct dm_dev *dev,
> +					   sector_t start, sector_t len, void *data)
> +{
> +	struct request_queue *q = bdev_get_queue(dev->bdev);
> +
> +	return q && !(q->queue_flags & QUEUE_FLAG_POLL);
> +}
> +
> +bool dm_table_supports_poll(struct dm_table *t)
> +{
> +	struct dm_target *ti;
> +	unsigned int i;
> +
> +	/* Ensure that all targets support DAX. */
> +	for (i = 0; i < dm_table_get_num_targets(t); i++) {
> +		ti = dm_table_get_target(t, i);
> +
> +		if (!ti->type->iterate_devices ||
> +		    ti->type->iterate_devices(ti, device_not_support_poll, NULL))
> +			return false;
> +	}
> +
> +	return true;
> +}
> +
>  void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
>  			       struct queue_limits *limits)
>  {
> @@ -1901,6 +1926,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
>  #endif
>  
>  	blk_queue_update_readahead(q);
> +
> +	if (dm_table_supports_poll(t)) {
> +		q->poll_fn = dm_io_poll;
> +		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
> +	}
>  }
>  
>  unsigned int dm_table_get_num_targets(struct dm_table *t)
> diff --git a/drivers/md/dm.c b/drivers/md/dm.c
> index c18fc2548518..4eceaf87ffd4 100644
> --- a/drivers/md/dm.c
> +++ b/drivers/md/dm.c
> @@ -1666,6 +1666,45 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
>  	return ret;
>  }
>  
> +static int dm_poll_one_dev(struct request_queue *q, blk_qc_t cookie)
> +{
> +	/* Iterate polling on all polling queues for mq device */
> +	if (queue_is_mq(q)) {
> +		struct blk_mq_hw_ctx *hctx;
> +		int i, ret = 0;
> +
> +		if (!percpu_ref_tryget(&q->q_usage_counter))
> +			return 0;
> +
> +		queue_for_each_poll_hw_ctx(q, hctx, i) {
> +			ret += q->mq_ops->poll(hctx);
> +		}

IMO, this way may not be accepted from performance viewpoint, .poll()
often requires per-hw-queue lock. So in case of > 1 io thread,
contention/cache ping-pong on hw queue resource can be very serious.

I guess you may have to find one way to pass correct cookie to ->poll().


Thanks,
Ming

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
diff mbox series

Patch

diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index d522093cb39d..f18e066beffe 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -187,4 +187,5 @@  extern atomic_t dm_global_event_nr;
 extern wait_queue_head_t dm_global_eventq;
 void dm_issue_global_event(void);
 
+int dm_io_poll(struct request_queue *q, blk_qc_t cookie);
 #endif
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ce543b761be7..634b79842519 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1809,6 +1809,31 @@  static bool dm_table_requires_stable_pages(struct dm_table *t)
 	return false;
 }
 
+static int device_not_support_poll(struct dm_target *ti, struct dm_dev *dev,
+					   sector_t start, sector_t len, void *data)
+{
+	struct request_queue *q = bdev_get_queue(dev->bdev);
+
+	return q && !(q->queue_flags & QUEUE_FLAG_POLL);
+}
+
+bool dm_table_supports_poll(struct dm_table *t)
+{
+	struct dm_target *ti;
+	unsigned int i;
+
+	/* Ensure that all targets support DAX. */
+	for (i = 0; i < dm_table_get_num_targets(t); i++) {
+		ti = dm_table_get_target(t, i);
+
+		if (!ti->type->iterate_devices ||
+		    ti->type->iterate_devices(ti, device_not_support_poll, NULL))
+			return false;
+	}
+
+	return true;
+}
+
 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 			       struct queue_limits *limits)
 {
@@ -1901,6 +1926,11 @@  void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 #endif
 
 	blk_queue_update_readahead(q);
+
+	if (dm_table_supports_poll(t)) {
+		q->poll_fn = dm_io_poll;
+		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
+	}
 }
 
 unsigned int dm_table_get_num_targets(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c18fc2548518..4eceaf87ffd4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1666,6 +1666,45 @@  static blk_qc_t dm_submit_bio(struct bio *bio)
 	return ret;
 }
 
+static int dm_poll_one_dev(struct request_queue *q, blk_qc_t cookie)
+{
+	/* Iterate polling on all polling queues for mq device */
+	if (queue_is_mq(q)) {
+		struct blk_mq_hw_ctx *hctx;
+		int i, ret = 0;
+
+		if (!percpu_ref_tryget(&q->q_usage_counter))
+			return 0;
+
+		queue_for_each_poll_hw_ctx(q, hctx, i) {
+			ret += q->mq_ops->poll(hctx);
+		}
+
+		percpu_ref_put(&q->q_usage_counter);
+		return ret;
+	} else
+		return q->poll_fn(q, cookie);
+}
+
+int dm_io_poll(struct request_queue *q, blk_qc_t cookie)
+{
+	struct mapped_device *md = q->queuedata;
+	struct dm_table *table;
+	struct dm_dev_internal *dd;
+	int srcu_idx;
+	int ret = 0;
+
+	table = dm_get_live_table(md, &srcu_idx);
+	if (!table)
+		goto out;
+
+	list_for_each_entry(dd, dm_table_get_devices(table), list)
+		ret += dm_poll_one_dev(bdev_get_queue(dd->dm_dev->bdev), cookie);
+out:
+	dm_put_live_table(md, srcu_idx);
+	return ret;
+}
+
 /*-----------------------------------------------------------------
  * An IDR is used to keep track of allocated minor numbers.
  *---------------------------------------------------------------*/