diff mbox

[v1,1/3] xen:rtds: towards work conserving RTDS

Message ID 1502036563-4275-2-git-send-email-mengxu@cis.upenn.edu (mailing list archive)
State New, archived
Headers show

Commit Message

Meng Xu Aug. 6, 2017, 4:22 p.m. UTC
Make RTDS scheduler work conserving without breaking the real-time guarantees.

VCPU model:
Each real-time VCPU is extended to have an extratime flag
and a priority_level field.
When a VCPU's budget is depleted in the current period,
if it has extratime flag set,
its priority_level will increase by 1 and its budget will be refilled;
othewrise, the VCPU will be moved to the depletedq.

Scheduling policy is modified global EDF:
A VCPU v1 has higher priority than another VCPU v2 if
(i) v1 has smaller priority_leve; or
(ii) v1 has the same priority_level but has a smaller deadline

Queue management:
Run queue holds VCPUs with extratime flag set and VCPUs with
remaining budget. Run queue is sorted in increasing order of VCPUs priorities.
Depleted queue holds VCPUs which have extratime flag cleared and depleted budget.
Replenished queue is not modified.

Signed-off-by: Meng Xu <mengxu@cis.upenn.edu>

---
Changes from RFC v1
Rewording comments and commit message
Remove is_work_conserving field from rt_vcpu structure
Use one bit in VCPU's flag to indicate if a VCPU will have extra time
Correct comments style
---
 xen/common/sched_rt.c       | 90 ++++++++++++++++++++++++++++++++++++++-------
 xen/include/public/domctl.h |  3 ++
 2 files changed, 79 insertions(+), 14 deletions(-)

Comments

Dario Faggioli Aug. 8, 2017, 2:57 p.m. UTC | #1
On Sun, 2017-08-06 at 12:22 -0400, Meng Xu wrote:
> Make RTDS scheduler work conserving without breaking the real-time
> guarantees.
> 
> VCPU model:
> Each real-time VCPU is extended to have an extratime flag
> and a priority_level field.
> When a VCPU's budget is depleted in the current period,
> if it has extratime flag set,
> its priority_level will increase by 1 and its budget will be
> refilled;
> othewrise, the VCPU will be moved to the depletedq.
> 
> Scheduling policy is modified global EDF:
> A VCPU v1 has higher priority than another VCPU v2 if
> (i) v1 has smaller priority_leve; or
> (ii) v1 has the same priority_level but has a smaller deadline
> 
> Queue management:
> Run queue holds VCPUs with extratime flag set and VCPUs with
> remaining budget. Run queue is sorted in increasing order of VCPUs
> priorities.
> Depleted queue holds VCPUs which have extratime flag cleared and
> depleted budget.
> Replenished queue is not modified.
> 
> Signed-off-by: Meng Xu <mengxu@cis.upenn.edu>
> 
This looks mostly good to me.

There are only a couple of things left, in addition to the
changlog+comment mention to how the 'spare bandwidth' distribution
works, that we agreed upon in the other thread.

> --- a/xen/common/sched_rt.c
> +++ b/xen/common/sched_rt.c 
> @@ -245,6 +258,11 @@ static inline struct list_head *rt_replq(const
> struct scheduler *ops)
>      return &rt_priv(ops)->replq;
>  }
>  
> +static inline bool has_extratime(const struct rt_vcpu *svc)
> +{
> +    return (svc->flags & RTDS_extratime) ? 1 : 0;
> +}
> +
>
Cool... I like 'has_extratime()' soo much better as a name than what it
was before! Thanks. :-)

>  /*
>   * Helper functions for manipulating the runqueue, the depleted
> queue,
>   * and the replenishment events queue.
> @@ -274,6 +292,21 @@ vcpu_on_replq(const struct rt_vcpu *svc)
>  }
>  
>  /*
> + * If v1 priority >= v2 priority, return value > 0
> + * Otherwise, return value < 0
> + */
> +static s_time_t
> +compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu
> *v2)
> +{
> +    int prio = v2->priority_level - v1->priority_level;
> +
> +    if ( prio == 0 )
> +    return v2->cur_deadline - v1->cur_deadline;
> +
Indentation.

> @@ -423,15 +459,18 @@ rt_update_deadline(s_time_t now, struct rt_vcpu
> *svc)
>       */
>      svc->last_start = now;
>      svc->cur_budget = svc->budget;
> +    svc->priority_level = 0;
>  
>      /* TRACE */
>      {
>          struct __packed {
>              unsigned vcpu:16, dom:16;
> +            unsigned priority_level;
>              uint64_t cur_deadline, cur_budget;
>          } d;
>
Can you please, and in this very comment, update
tools/xentrace/xenalyze.c and tools/xentrace/formats as well, to take
into account this new field?

> diff --git a/xen/include/public/domctl.h
> b/xen/include/public/domctl.h
> index 0669c31..ba5daa9 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -360,6 +360,9 @@ typedef struct xen_domctl_sched_credit2 {
>  typedef struct xen_domctl_sched_rtds {
>      uint32_t period;
>      uint32_t budget;
> +#define _XEN_DOMCTL_SCHED_RTDS_extratime 0
> +#define
> XEN_DOMCTL_SCHED_RTDS_extratime  (1U<<_XEN_DOMCTL_SCHED_RTDS_extratim
> e)
> +    uint32_t flags;
>
I'd add a one liner comment above the flag definition, as, for
instance, how things are done in createdomain:

struct xen_domctl_createdomain {
    /* IN parameters */
    uint32_t ssidref;
    xen_domain_handle_t handle;
 /* Is this an HVM guest (as opposed to a PVH or PV guest)? */
#define _XEN_DOMCTL_CDF_hvm_guest     0
#define XEN_DOMCTL_CDF_hvm_guest      (1U<<_XEN_DOMCTL_CDF_hvm_guest)
 /* Use hardware-assisted paging if available? */
#define _XEN_DOMCTL_CDF_hap           1
#define XEN_DOMCTL_CDF_hap            (1U<<_XEN_DOMCTL_CDF_hap)

Also, consider shortening the name (e.g., by contracting the SCHED_RTDS
part; it does not matter if it's not 100% equal to what's in
sched_rt.c, I think).

This, of course, is just my opinion, and final say belongs to
maintainers of this public interface, which I think means 'THE REST',
and most of them are not Cc-ed. Let me do that...

Regards,
Dario
Meng Xu Aug. 8, 2017, 7:06 p.m. UTC | #2
On Tue, Aug 8, 2017 at 10:57 AM, Dario Faggioli
<dario.faggioli@citrix.com> wrote:
> On Sun, 2017-08-06 at 12:22 -0400, Meng Xu wrote:
>> Make RTDS scheduler work conserving without breaking the real-time
>> guarantees.
>>
>> VCPU model:
>> Each real-time VCPU is extended to have an extratime flag
>> and a priority_level field.
>> When a VCPU's budget is depleted in the current period,
>> if it has extratime flag set,
>> its priority_level will increase by 1 and its budget will be
>> refilled;
>> othewrise, the VCPU will be moved to the depletedq.
>>
>> Scheduling policy is modified global EDF:
>> A VCPU v1 has higher priority than another VCPU v2 if
>> (i) v1 has smaller priority_leve; or
>> (ii) v1 has the same priority_level but has a smaller deadline
>>
>> Queue management:
>> Run queue holds VCPUs with extratime flag set and VCPUs with
>> remaining budget. Run queue is sorted in increasing order of VCPUs
>> priorities.
>> Depleted queue holds VCPUs which have extratime flag cleared and
>> depleted budget.
>> Replenished queue is not modified.
>>
>> Signed-off-by: Meng Xu <mengxu@cis.upenn.edu>
>>
> This looks mostly good to me.
>
> There are only a couple of things left, in addition to the
> changlog+comment mention to how the 'spare bandwidth' distribution
> works, that we agreed upon in the other thread.
>
>> --- a/xen/common/sched_rt.c
>> +++ b/xen/common/sched_rt.c
>> @@ -245,6 +258,11 @@ static inline struct list_head *rt_replq(const
>> struct scheduler *ops)
>>      return &rt_priv(ops)->replq;
>>  }
>>
>> +static inline bool has_extratime(const struct rt_vcpu *svc)
>> +{
>> +    return (svc->flags & RTDS_extratime) ? 1 : 0;
>> +}
>> +
>>
> Cool... I like 'has_extratime()' soo much better as a name than what it
> was before! Thanks. :-)
>

:-)

>>  /*
>>   * Helper functions for manipulating the runqueue, the depleted
>> queue,
>>   * and the replenishment events queue.
>> @@ -274,6 +292,21 @@ vcpu_on_replq(const struct rt_vcpu *svc)
>>  }
>>
>>  /*
>> + * If v1 priority >= v2 priority, return value > 0
>> + * Otherwise, return value < 0
>> + */
>> +static s_time_t
>> +compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu
>> *v2)
>> +{
>> +    int prio = v2->priority_level - v1->priority_level;
>> +
>> +    if ( prio == 0 )
>> +    return v2->cur_deadline - v1->cur_deadline;
>> +
> Indentation.

Oh, sorry. Will correct it.

>
>> @@ -423,15 +459,18 @@ rt_update_deadline(s_time_t now, struct rt_vcpu
>> *svc)
>>       */
>>      svc->last_start = now;
>>      svc->cur_budget = svc->budget;
>> +    svc->priority_level = 0;
>>
>>      /* TRACE */
>>      {
>>          struct __packed {
>>              unsigned vcpu:16, dom:16;
>> +            unsigned priority_level;
>>              uint64_t cur_deadline, cur_budget;
>>          } d;
>>
> Can you please, and in this very comment, update
> tools/xentrace/xenalyze.c and tools/xentrace/formats as well, to take
> into account this new field?

Sure. Will do in the next version.

>
>> diff --git a/xen/include/public/domctl.h
>> b/xen/include/public/domctl.h
>> index 0669c31..ba5daa9 100644
>> --- a/xen/include/public/domctl.h
>> +++ b/xen/include/public/domctl.h
>> @@ -360,6 +360,9 @@ typedef struct xen_domctl_sched_credit2 {
>>  typedef struct xen_domctl_sched_rtds {
>>      uint32_t period;
>>      uint32_t budget;
>> +#define _XEN_DOMCTL_SCHED_RTDS_extratime 0
>> +#define
>> XEN_DOMCTL_SCHED_RTDS_extratime  (1U<<_XEN_DOMCTL_SCHED_RTDS_extratim
>> e)
>> +    uint32_t flags;
>>
> I'd add a one liner comment above the flag definition, as, for
> instance, how things are done in createdomain:

Sure.

How about comment:
/* Does this VCPU get extratime beyond reserved time? */

>
> struct xen_domctl_createdomain {
>     /* IN parameters */
>     uint32_t ssidref;
>     xen_domain_handle_t handle;
>  /* Is this an HVM guest (as opposed to a PVH or PV guest)? */
> #define _XEN_DOMCTL_CDF_hvm_guest     0
> #define XEN_DOMCTL_CDF_hvm_guest      (1U<<_XEN_DOMCTL_CDF_hvm_guest)
>  /* Use hardware-assisted paging if available? */
> #define _XEN_DOMCTL_CDF_hap           1
> #define XEN_DOMCTL_CDF_hap            (1U<<_XEN_DOMCTL_CDF_hap)
>
> Also, consider shortening the name (e.g., by contracting the SCHED_RTDS
> part; it does not matter if it's not 100% equal to what's in
> sched_rt.c, I think).


How about shorten it to XEN_DOMCTL_RTDS_extra or XEN_DOMCTL_RTDS_extratime?

>
> This, of course, is just my opinion, and final say belongs to
> maintainers of this public interface, which I think means 'THE REST',
> and most of them are not Cc-ed. Let me do that...

Thank you very much!

Best,

Meng
Dario Faggioli Aug. 8, 2017, 10:52 p.m. UTC | #3
On Tue, 2017-08-08 at 12:06 -0700, Meng Xu wrote:
> On Tue, Aug 8, 2017 at 10:57 AM, Dario Faggioli
> <dario.faggioli@citrix.com> wrote:
> > On Sun, 2017-08-06 at 12:22 -0400, Meng Xu wrote:
> > > 
> > > diff --git a/xen/include/public/domctl.h
> > > b/xen/include/public/domctl.h
> > > index 0669c31..ba5daa9 100644
> > > --- a/xen/include/public/domctl.h
> > > +++ b/xen/include/public/domctl.h
> > > @@ -360,6 +360,9 @@ typedef struct xen_domctl_sched_credit2 {
> > >  typedef struct xen_domctl_sched_rtds {
> > >      uint32_t period;
> > >      uint32_t budget;
> > > +#define _XEN_DOMCTL_SCHED_RTDS_extratime 0
> > > +#define
> > > XEN_DOMCTL_SCHED_RTDS_extratime  (1U<<_XEN_DOMCTL_SCHED_RTDS_extr
> > > atim
> > > e)
> > > +    uint32_t flags;
> > > 
> > 
> > I'd add a one liner comment above the flag definition, as, for
> > instance, how things are done in createdomain:
> 
> Sure.
> 
> How about comment:
> /* Does this VCPU get extratime beyond reserved time? */
> 
'Can this vCPU execute beyond its reserved amount of time?'

> > 
> > struct xen_domctl_createdomain {
> >     /* IN parameters */
> >     uint32_t ssidref;
> >     xen_domain_handle_t handle;
> >  /* Is this an HVM guest (as opposed to a PVH or PV guest)? */
> > #define _XEN_DOMCTL_CDF_hvm_guest     0
> > #define
> > XEN_DOMCTL_CDF_hvm_guest      (1U<<_XEN_DOMCTL_CDF_hvm_guest)
> >  /* Use hardware-assisted paging if available? */
> > #define _XEN_DOMCTL_CDF_hap           1
> > #define XEN_DOMCTL_CDF_hap            (1U<<_XEN_DOMCTL_CDF_hap)
> > 
> > Also, consider shortening the name (e.g., by contracting the
> > SCHED_RTDS
> > part; it does not matter if it's not 100% equal to what's in
> > sched_rt.c, I think).
> 
> 
> How about shorten it to XEN_DOMCTL_RTDS_extra or
> XEN_DOMCTL_RTDS_extratime?
> 
Personally, I'd go for XEN_DOMCTL_SCHEDRT_extra (or _extratime, or
_extrat).

Dario
Meng Xu Aug. 8, 2017, 10:56 p.m. UTC | #4
On Tue, Aug 8, 2017 at 3:52 PM, Dario Faggioli
<dario.faggioli@citrix.com> wrote:
> On Tue, 2017-08-08 at 12:06 -0700, Meng Xu wrote:
>> On Tue, Aug 8, 2017 at 10:57 AM, Dario Faggioli
>> <dario.faggioli@citrix.com> wrote:
>> > On Sun, 2017-08-06 at 12:22 -0400, Meng Xu wrote:
>> > >
>> > > diff --git a/xen/include/public/domctl.h
>> > > b/xen/include/public/domctl.h
>> > > index 0669c31..ba5daa9 100644
>> > > --- a/xen/include/public/domctl.h
>> > > +++ b/xen/include/public/domctl.h
>> > > @@ -360,6 +360,9 @@ typedef struct xen_domctl_sched_credit2 {
>> > >  typedef struct xen_domctl_sched_rtds {
>> > >      uint32_t period;
>> > >      uint32_t budget;
>> > > +#define _XEN_DOMCTL_SCHED_RTDS_extratime 0
>> > > +#define
>> > > XEN_DOMCTL_SCHED_RTDS_extratime  (1U<<_XEN_DOMCTL_SCHED_RTDS_extr
>> > > atim
>> > > e)
>> > > +    uint32_t flags;
>> > >
>> >
>> > I'd add a one liner comment above the flag definition, as, for
>> > instance, how things are done in createdomain:
>>
>> Sure.
>>
>> How about comment:
>> /* Does this VCPU get extratime beyond reserved time? */
>>
> 'Can this vCPU execute beyond its reserved amount of time?'
>
>> >
>> > struct xen_domctl_createdomain {
>> >     /* IN parameters */
>> >     uint32_t ssidref;
>> >     xen_domain_handle_t handle;
>> >  /* Is this an HVM guest (as opposed to a PVH or PV guest)? */
>> > #define _XEN_DOMCTL_CDF_hvm_guest     0
>> > #define
>> > XEN_DOMCTL_CDF_hvm_guest      (1U<<_XEN_DOMCTL_CDF_hvm_guest)
>> >  /* Use hardware-assisted paging if available? */
>> > #define _XEN_DOMCTL_CDF_hap           1
>> > #define XEN_DOMCTL_CDF_hap            (1U<<_XEN_DOMCTL_CDF_hap)
>> >
>> > Also, consider shortening the name (e.g., by contracting the
>> > SCHED_RTDS
>> > part; it does not matter if it's not 100% equal to what's in
>> > sched_rt.c, I think).
>>
>>
>> How about shorten it to XEN_DOMCTL_RTDS_extra or
>> XEN_DOMCTL_RTDS_extratime?
>>
> Personally, I'd go for XEN_DOMCTL_SCHEDRT_extra (or _extratime, or
> _extrat).

OK. I can go with  XEN_DOMCTL_SCHEDRT_extra.

Thanks,

Meng

-----------
Meng Xu
PhD Candidate in Computer and Information Science
University of Pennsylvania
http://www.cis.upenn.edu/~mengxu/
diff mbox

Patch

diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 39f6bee..4e048b9 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -49,13 +49,15 @@ 
  * A PCPU is feasible if the VCPU can run on this PCPU and (the PCPU is idle or
  * has a lower-priority VCPU running on it.)
  *
- * Each VCPU has a dedicated period and budget.
+ * Each VCPU has a dedicated period, budget and a extratime flag
  * The deadline of a VCPU is at the end of each period;
  * A VCPU has its budget replenished at the beginning of each period;
  * While scheduled, a VCPU burns its budget.
  * The VCPU needs to finish its budget before its deadline in each period;
  * The VCPU discards its unused budget at the end of each period.
- * If a VCPU runs out of budget in a period, it has to wait until next period.
+ * When a VCPU runs out of budget in a period, if its extratime flag is set,
+ * the VCPU increases its priority_level by 1 and refills its budget; otherwise,
+ * it has to wait until next period.
  *
  * Each VCPU is implemented as a deferable server.
  * When a VCPU has a task running on it, its budget is continuously burned;
@@ -63,7 +65,8 @@ 
  *
  * Queue scheme:
  * A global runqueue and a global depletedqueue for each CPU pool.
- * The runqueue holds all runnable VCPUs with budget, sorted by deadline;
+ * The runqueue holds all runnable VCPUs with budget,
+ * sorted by priority_level and deadline;
  * The depletedqueue holds all VCPUs without budget, unsorted;
  *
  * Note: cpumask and cpupool is supported.
@@ -151,6 +154,14 @@ 
 #define RTDS_depleted (1<<__RTDS_depleted)
 
 /*
+ * RTDS_extratime: Can the vcpu run in the time that is
+ * not part of any real-time reservation, and would therefore
+ * be otherwise left idle?
+ */
+#define __RTDS_extratime    4
+#define RTDS_extratime (1<<__RTDS_extratime)
+
+/*
  * rt tracing events ("only" 512 available!). Check
  * include/public/trace.h for more details.
  */
@@ -201,6 +212,8 @@  struct rt_vcpu {
     struct rt_dom *sdom;
     struct vcpu *vcpu;
 
+    unsigned priority_level;
+
     unsigned flags;              /* mark __RTDS_scheduled, etc.. */
 };
 
@@ -245,6 +258,11 @@  static inline struct list_head *rt_replq(const struct scheduler *ops)
     return &rt_priv(ops)->replq;
 }
 
+static inline bool has_extratime(const struct rt_vcpu *svc)
+{
+    return (svc->flags & RTDS_extratime) ? 1 : 0;
+}
+
 /*
  * Helper functions for manipulating the runqueue, the depleted queue,
  * and the replenishment events queue.
@@ -274,6 +292,21 @@  vcpu_on_replq(const struct rt_vcpu *svc)
 }
 
 /*
+ * If v1 priority >= v2 priority, return value > 0
+ * Otherwise, return value < 0
+ */
+static s_time_t
+compare_vcpu_priority(const struct rt_vcpu *v1, const struct rt_vcpu *v2)
+{
+    int prio = v2->priority_level - v1->priority_level;
+
+    if ( prio == 0 )
+    return v2->cur_deadline - v1->cur_deadline;
+
+    return prio;
+}
+
+/*
  * Debug related code, dump vcpu/cpu information
  */
 static void
@@ -303,6 +336,7 @@  rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
     cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), mask);
     printk("[%5d.%-2u] cpu %u, (%"PRI_stime", %"PRI_stime"),"
            " cur_b=%"PRI_stime" cur_d=%"PRI_stime" last_start=%"PRI_stime"\n"
+           " \t\t priority_level=%d has_extratime=%d\n"
            " \t\t onQ=%d runnable=%d flags=%x effective hard_affinity=%s\n",
             svc->vcpu->domain->domain_id,
             svc->vcpu->vcpu_id,
@@ -312,6 +346,8 @@  rt_dump_vcpu(const struct scheduler *ops, const struct rt_vcpu *svc)
             svc->cur_budget,
             svc->cur_deadline,
             svc->last_start,
+            svc->priority_level,
+            has_extratime(svc),
             vcpu_on_q(svc),
             vcpu_runnable(svc->vcpu),
             svc->flags,
@@ -423,15 +459,18 @@  rt_update_deadline(s_time_t now, struct rt_vcpu *svc)
      */
     svc->last_start = now;
     svc->cur_budget = svc->budget;
+    svc->priority_level = 0;
 
     /* TRACE */
     {
         struct __packed {
             unsigned vcpu:16, dom:16;
+            unsigned priority_level;
             uint64_t cur_deadline, cur_budget;
         } d;
         d.dom = svc->vcpu->domain->domain_id;
         d.vcpu = svc->vcpu->vcpu_id;
+        d.priority_level = svc->priority_level;
         d.cur_deadline = (uint64_t) svc->cur_deadline;
         d.cur_budget = (uint64_t) svc->cur_budget;
         trace_var(TRC_RTDS_BUDGET_REPLENISH, 1,
@@ -454,7 +493,7 @@  rt_update_deadline(s_time_t now, struct rt_vcpu *svc)
  * cases, if the vcpu with the earliest deadline is what we
  * are dealing with).
  */
-static inline bool_t
+static inline bool
 deadline_queue_remove(struct list_head *queue, struct list_head *elem)
 {
     int pos = 0;
@@ -466,7 +505,7 @@  deadline_queue_remove(struct list_head *queue, struct list_head *elem)
     return !pos;
 }
 
-static inline bool_t
+static inline bool
 deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
                       struct rt_vcpu *svc, struct list_head *elem,
                       struct list_head *queue)
@@ -477,7 +516,7 @@  deadline_queue_insert(struct rt_vcpu * (*qelem)(struct list_head *),
     list_for_each ( iter, queue )
     {
         struct rt_vcpu * iter_svc = (*qelem)(iter);
-        if ( svc->cur_deadline <= iter_svc->cur_deadline )
+        if ( compare_vcpu_priority(svc, iter_svc) > 0 )
             break;
         pos++;
     }
@@ -537,8 +576,9 @@  runq_insert(const struct scheduler *ops, struct rt_vcpu *svc)
     ASSERT( !vcpu_on_q(svc) );
     ASSERT( vcpu_on_replq(svc) );
 
-    /* add svc to runq if svc still has budget */
-    if ( svc->cur_budget > 0 )
+    /* add svc to runq if svc still has budget or its extratime is set */
+    if ( svc->cur_budget > 0 ||
+         has_extratime(svc) )
         deadline_runq_insert(svc, &svc->q_elem, runq);
     else
         list_add(&svc->q_elem, &prv->depletedq);
@@ -857,6 +897,8 @@  rt_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
     svc->vcpu = vc;
     svc->last_start = 0;
 
+    __set_bit(__RTDS_extratime, &svc->flags);
+    svc->priority_level = 0;
     svc->period = RTDS_DEFAULT_PERIOD;
     if ( !is_idle_vcpu(vc) )
         svc->budget = RTDS_DEFAULT_BUDGET;
@@ -966,8 +1008,16 @@  burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
 
     if ( svc->cur_budget <= 0 )
     {
-        svc->cur_budget = 0;
-        __set_bit(__RTDS_depleted, &svc->flags);
+        if ( has_extratime(svc) )
+        {
+            svc->priority_level++;
+            svc->cur_budget = svc->budget;
+        }
+        else
+        {
+            svc->cur_budget = 0;
+            __set_bit(__RTDS_depleted, &svc->flags);
+        }
     }
 
     /* TRACE */
@@ -976,11 +1026,15 @@  burn_budget(const struct scheduler *ops, struct rt_vcpu *svc, s_time_t now)
             unsigned vcpu:16, dom:16;
             uint64_t cur_budget;
             int delta;
+            unsigned priority_level;
+            bool has_extratime;
         } d;
         d.dom = svc->vcpu->domain->domain_id;
         d.vcpu = svc->vcpu->vcpu_id;
         d.cur_budget = (uint64_t) svc->cur_budget;
         d.delta = delta;
+        d.priority_level = svc->priority_level;
+        d.has_extratime = svc->flags & RTDS_extratime;
         trace_var(TRC_RTDS_BUDGET_BURN, 1,
                   sizeof(d),
                   (unsigned char *) &d);
@@ -1088,7 +1142,7 @@  rt_schedule(const struct scheduler *ops, s_time_t now, bool_t tasklet_work_sched
              vcpu_runnable(current) &&
              scurr->cur_budget > 0 &&
              ( is_idle_vcpu(snext->vcpu) ||
-               scurr->cur_deadline <= snext->cur_deadline ) )
+               compare_vcpu_priority(scurr, snext) > 0 ) )
             snext = scurr;
     }
 
@@ -1198,13 +1252,13 @@  runq_tickle(const struct scheduler *ops, struct rt_vcpu *new)
         }
         iter_svc = rt_vcpu(iter_vc);
         if ( latest_deadline_vcpu == NULL ||
-             iter_svc->cur_deadline > latest_deadline_vcpu->cur_deadline )
+             compare_vcpu_priority(iter_svc, latest_deadline_vcpu) < 0 )
             latest_deadline_vcpu = iter_svc;
     }
 
     /* 3) candicate has higher priority, kick out lowest priority vcpu */
     if ( latest_deadline_vcpu != NULL &&
-         new->cur_deadline < latest_deadline_vcpu->cur_deadline )
+         compare_vcpu_priority(latest_deadline_vcpu, new) < 0 )
     {
         SCHED_STAT_CRANK(tickled_busy_cpu);
         cpu_to_tickle = latest_deadline_vcpu->vcpu->processor;
@@ -1395,6 +1449,10 @@  rt_dom_cntl(
                 svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
                 local_sched.u.rtds.budget = svc->budget / MICROSECS(1);
                 local_sched.u.rtds.period = svc->period / MICROSECS(1);
+                if ( has_extratime(svc) )
+                    local_sched.u.rtds.flags |= XEN_DOMCTL_SCHED_RTDS_extratime;
+                else
+                    local_sched.u.rtds.flags &= ~XEN_DOMCTL_SCHED_RTDS_extratime;
                 spin_unlock_irqrestore(&prv->lock, flags);
 
                 if ( copy_to_guest_offset(op->u.v.vcpus, index,
@@ -1419,6 +1477,10 @@  rt_dom_cntl(
                 svc = rt_vcpu(d->vcpu[local_sched.vcpuid]);
                 svc->period = period;
                 svc->budget = budget;
+                if ( local_sched.u.rtds.flags & XEN_DOMCTL_SCHED_RTDS_extratime )
+                    __set_bit(__RTDS_extratime, &svc->flags);
+                else
+                    __clear_bit(__RTDS_extratime, &svc->flags);
                 spin_unlock_irqrestore(&prv->lock, flags);
             }
             /* Process a most 64 vCPUs without checking for preemptions. */
@@ -1493,7 +1555,7 @@  static void repl_timer_handler(void *data){
         {
             struct rt_vcpu *next_on_runq = q_elem(runq->next);
 
-            if ( svc->cur_deadline > next_on_runq->cur_deadline )
+            if ( compare_vcpu_priority(svc, next_on_runq) < 0 )
                 runq_tickle(ops, next_on_runq);
         }
         else if ( __test_and_clear_bit(__RTDS_depleted, &svc->flags) &&
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 0669c31..ba5daa9 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -360,6 +360,9 @@  typedef struct xen_domctl_sched_credit2 {
 typedef struct xen_domctl_sched_rtds {
     uint32_t period;
     uint32_t budget;
+#define _XEN_DOMCTL_SCHED_RTDS_extratime 0
+#define XEN_DOMCTL_SCHED_RTDS_extratime  (1U<<_XEN_DOMCTL_SCHED_RTDS_extratime)
+    uint32_t flags;
 } xen_domctl_sched_rtds_t;
 
 typedef struct xen_domctl_schedparam_vcpu {