diff mbox

[2/2] io-controller: make rt preemption happen in the whole hierarchy

Message ID 4A44833F.8040308@cn.fujitsu.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gui Jianfeng June 26, 2009, 8:13 a.m. UTC
let rt queue preempt non-rt queue if needed.
Make sure comparision happens at the same level.

Signed-off-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com>
---
 block/elevator-fq.c |   28 +++++++++++++++++++++++++++-
 1 files changed, 27 insertions(+), 1 deletions(-)

Comments

Vivek Goyal June 26, 2009, 12:39 p.m. UTC | #1
On Fri, Jun 26, 2009 at 04:13:51PM +0800, Gui Jianfeng wrote:
> let rt queue preempt non-rt queue if needed.
> Make sure comparision happens at the same level.
> 
> Signed-off-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com>
> ---
>  block/elevator-fq.c |   28 +++++++++++++++++++++++++++-
>  1 files changed, 27 insertions(+), 1 deletions(-)
> 
> diff --git a/block/elevator-fq.c b/block/elevator-fq.c
> index 1d4ec1f..21d38f5 100644
> --- a/block/elevator-fq.c
> +++ b/block/elevator-fq.c
> @@ -3742,6 +3742,31 @@ int elv_iosched_expire_ioq(struct request_queue *q, int slice_expired,
>  	return ret;
>  }
>  
> +static int check_rt_preemption(struct io_queue *ioq)
> +{
> +	struct hlist_node *node;
> +	struct hlist_head *hhead = &ioq->efqd->rt_ioq_list;
> +	struct io_queue *rt_ioq;
> +	struct io_entity *entity = &ioq->entity;
> +	struct io_entity *new_entity;
> +
> +	rcu_read_lock();
> +	hlist_for_each_entry_rcu(rt_ioq, node, hhead, rt_node) {
> +		new_entity = &rt_ioq->entity;
> +
> +		bfq_find_matching_entity(&entity, &new_entity);
> +
> +		if (new_entity->ioprio_class == IOPRIO_CLASS_RT &&
> +		    entity->ioprio_class != IOPRIO_CLASS_RT) {
> +			rcu_read_unlock();
> +			return 1;
> +		}
> +	}
> +	rcu_read_unlock();
> +
> +	return 0;
> +}
> +
>  /* Common layer function to select the next queue to dispatch from */
>  void *elv_fq_select_ioq(struct request_queue *q, int force)
>  {
> @@ -3823,7 +3848,8 @@ void *elv_fq_select_ioq(struct request_queue *q, int force)
>  	 */
>  	iog = ioq_to_io_group(ioq);
>  
> -	if (!elv_ioq_class_rt(ioq) && iog->busy_rt_queues) {
> +	if (!elv_ioq_class_rt(ioq) &&
> +	    (iog->busy_rt_queues || check_rt_preemption(ioq))) {
>  		/*

Hi Gui,

I am not able to understand why do we need above changes?

BFQ scheduler already takes care of selecting an RT queue for dispatch (if
the queue is entitled to).

In case a new RT queue backlogged while a BE queue is being served, we
do preemtion check to make sure RT queue gets to run as soon as possible.

In fact I think that busy_rt_queues infrastructure is also redundant and
I plan to get rid of it. 

Can you please help me understand what use case are you addressing with
above patch?

Thanks
Vivek

--
dm-devel mailing list
dm-devel@redhat.com
https://www.redhat.com/mailman/listinfo/dm-devel
diff mbox

Patch

diff --git a/block/elevator-fq.c b/block/elevator-fq.c
index 1d4ec1f..21d38f5 100644
--- a/block/elevator-fq.c
+++ b/block/elevator-fq.c
@@ -3742,6 +3742,31 @@  int elv_iosched_expire_ioq(struct request_queue *q, int slice_expired,
 	return ret;
 }
 
+static int check_rt_preemption(struct io_queue *ioq)
+{
+	struct hlist_node *node;
+	struct hlist_head *hhead = &ioq->efqd->rt_ioq_list;
+	struct io_queue *rt_ioq;
+	struct io_entity *entity = &ioq->entity;
+	struct io_entity *new_entity;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(rt_ioq, node, hhead, rt_node) {
+		new_entity = &rt_ioq->entity;
+
+		bfq_find_matching_entity(&entity, &new_entity);
+
+		if (new_entity->ioprio_class == IOPRIO_CLASS_RT &&
+		    entity->ioprio_class != IOPRIO_CLASS_RT) {
+			rcu_read_unlock();
+			return 1;
+		}
+	}
+	rcu_read_unlock();
+
+	return 0;
+}
+
 /* Common layer function to select the next queue to dispatch from */
 void *elv_fq_select_ioq(struct request_queue *q, int force)
 {
@@ -3823,7 +3848,8 @@  void *elv_fq_select_ioq(struct request_queue *q, int force)
 	 */
 	iog = ioq_to_io_group(ioq);
 
-	if (!elv_ioq_class_rt(ioq) && iog->busy_rt_queues) {
+	if (!elv_ioq_class_rt(ioq) &&
+	    (iog->busy_rt_queues || check_rt_preemption(ioq))) {
 		/*
 		 * We simulate this as cfqq timed out so that it gets to bank
 		 * the remaining of its time slice.