diff mbox

[14/19] xen: credit2: add yet some more tracing

Message ID 146620517863.29766.7999578465463906558.stgit@Solace.fritz.box (mailing list archive)
State New, archived
Headers show

Commit Message

Dario Faggioli June 17, 2016, 11:12 p.m. UTC
(and fix the style of two labels as well.)

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
Cc: George Dunlap <george.dunlap@citrix.com>
Cc: Anshul Makkar <anshul.makkar@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
---
 xen/common/sched_credit2.c |   58 +++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 54 insertions(+), 4 deletions(-)

Comments

Jan Beulich June 20, 2016, 8:15 a.m. UTC | #1
>>> On 18.06.16 at 01:12, <dario.faggioli@citrix.com> wrote:
> @@ -1484,6 +1489,23 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
>  out_up:
>      spin_unlock(&prv->lock);
>  
> +    /* TRACE */
> +    {
> +        struct {
> +            uint64_t b_avgload;
> +            unsigned vcpu:16, dom:16;
> +            unsigned rq_id:16, new_cpu:16;
> +       } d;
> +        d.b_avgload = prv->rqd[min_rqi].b_avgload;
> +        d.dom = vc->domain->domain_id;
> +        d.vcpu = vc->vcpu_id;
> +        d.rq_id = c2r(ops, new_cpu);
> +        d.new_cpu = new_cpu;

I guess this follows pre-existing style, but it would seem more natural
to me for the variable to have an initializer instead of this series of
assignments.

Jan
George Dunlap July 7, 2016, 3:34 p.m. UTC | #2
On Mon, Jun 20, 2016 at 9:15 AM, Jan Beulich <JBeulich@suse.com> wrote:
>>>> On 18.06.16 at 01:12, <dario.faggioli@citrix.com> wrote:
>> @@ -1484,6 +1489,23 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
>>  out_up:
>>      spin_unlock(&prv->lock);
>>
>> +    /* TRACE */
>> +    {
>> +        struct {
>> +            uint64_t b_avgload;
>> +            unsigned vcpu:16, dom:16;
>> +            unsigned rq_id:16, new_cpu:16;
>> +       } d;
>> +        d.b_avgload = prv->rqd[min_rqi].b_avgload;
>> +        d.dom = vc->domain->domain_id;
>> +        d.vcpu = vc->vcpu_id;
>> +        d.rq_id = c2r(ops, new_cpu);
>> +        d.new_cpu = new_cpu;
>
> I guess this follows pre-existing style, but it would seem more natural
> to me for the variable to have an initializer instead of this series of
> assignments.

Well that doesn't actually save you that much typing, and I think it's
probably (slightly) less easy to read.  But the biggest thing at this
point is that it's inconsistent with what's there. :-)

 -George
George Dunlap July 7, 2016, 3:34 p.m. UTC | #3
On Sat, Jun 18, 2016 at 12:12 AM, Dario Faggioli
<dario.faggioli@citrix.com> wrote:
> (and fix the style of two labels as well.)
>
> Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>

Acked-by: George Dunlap <george.dunlap@citrix.com>

> ---
> Cc: George Dunlap <george.dunlap@citrix.com>
> Cc: Anshul Makkar <anshul.makkar@citrix.com>
> Cc: David Vrabel <david.vrabel@citrix.com>
> ---
>  xen/common/sched_credit2.c |   58 +++++++++++++++++++++++++++++++++++++++++---
>  1 file changed, 54 insertions(+), 4 deletions(-)
>
> diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
> index ba3a78a..e9f3f13 100644
> --- a/xen/common/sched_credit2.c
> +++ b/xen/common/sched_credit2.c
> @@ -46,6 +46,9 @@
>  #define TRC_CSCHED2_TICKLE_NEW       TRC_SCHED_CLASS_EVT(CSCHED2, 13)
>  #define TRC_CSCHED2_RUNQ_MAX_WEIGHT  TRC_SCHED_CLASS_EVT(CSCHED2, 14)
>  #define TRC_CSCHED2_MIGRATE          TRC_SCHED_CLASS_EVT(CSCHED2, 15)
> +#define TRC_CSCHED2_LOAD_CHECK       TRC_SCHED_CLASS_EVT(CSCHED2, 16)
> +#define TRC_CSCHED2_LOAD_BALANCE     TRC_SCHED_CLASS_EVT(CSCHED2, 17)
> +#define TRC_CSCHED2_PICKED_CPU       TRC_SCHED_CLASS_EVT(CSCHED2, 19)
>
>  /*
>   * WARNING: This is still in an experimental phase.  Status and work can be found at the
> @@ -709,6 +712,8 @@ update_load(const struct scheduler *ops,
>              struct csched2_runqueue_data *rqd,
>              struct csched2_vcpu *svc, int change, s_time_t now)
>  {
> +    trace_var(TRC_CSCHED2_UPDATE_LOAD, 1, 0,  NULL);
> +
>      __update_runq_load(ops, rqd, change, now);
>      if ( svc )
>          __update_svc_load(ops, svc, change, now);
> @@ -1484,6 +1489,23 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
>  out_up:
>      spin_unlock(&prv->lock);
>
> +    /* TRACE */
> +    {
> +        struct {
> +            uint64_t b_avgload;
> +            unsigned vcpu:16, dom:16;
> +            unsigned rq_id:16, new_cpu:16;
> +       } d;
> +        d.b_avgload = prv->rqd[min_rqi].b_avgload;
> +        d.dom = vc->domain->domain_id;
> +        d.vcpu = vc->vcpu_id;
> +        d.rq_id = c2r(ops, new_cpu);
> +        d.new_cpu = new_cpu;
> +        trace_var(TRC_CSCHED2_PICKED_CPU, 1,
> +                  sizeof(d),
> +                  (unsigned char *)&d);
> +    }
> +
>      return new_cpu;
>  }
>
> @@ -1611,7 +1633,7 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
>      bool_t inner_load_updated = 0;
>
>      balance_state_t st = { .best_push_svc = NULL, .best_pull_svc = NULL };
> -
> +
>      /*
>       * Basic algorithm: Push, pull, or swap.
>       * - Find the runqueue with the furthest load distance
> @@ -1677,6 +1699,20 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
>          if ( i > cpus_max )
>              cpus_max = i;
>
> +        /* TRACE */
> +        {
> +            struct {
> +                unsigned lrq_id:16, orq_id:16;
> +                unsigned load_delta;
> +            } d;
> +            d.lrq_id = st.lrqd->id;
> +            d.orq_id = st.orqd->id;
> +            d.load_delta = st.load_delta;
> +            trace_var(TRC_CSCHED2_LOAD_CHECK, 1,
> +                      sizeof(d),
> +                      (unsigned char *)&d);
> +        }
> +
>          /*
>           * If we're under 100% capacaty, only shift if load difference
>           * is > 1.  otherwise, shift if under 12.5%
> @@ -1705,6 +1741,21 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
>      if ( unlikely(st.orqd->id < 0) )
>          goto out_up;
>
> +    /* TRACE */
> +    {
> +        struct {
> +            uint64_t lb_avgload, ob_avgload;
> +            unsigned lrq_id:16, orq_id:16;
> +        } d;
> +        d.lrq_id = st.lrqd->id;
> +        d.lb_avgload = st.lrqd->b_avgload;
> +        d.orq_id = st.orqd->id;
> +        d.ob_avgload = st.orqd->b_avgload;
> +        trace_var(TRC_CSCHED2_LOAD_BALANCE, 1,
> +                  sizeof(d),
> +                  (unsigned char *)&d);
> +    }
> +
>      now = NOW();
>
>      /* Look for "swap" which gives the best load average
> @@ -1756,10 +1807,9 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
>      if ( st.best_pull_svc )
>          migrate(ops, st.best_pull_svc, st.lrqd, now);
>
> -out_up:
> + out_up:
>      spin_unlock(&st.orqd->lock);
> -
> -out:
> + out:
>      return;
>  }
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> http://lists.xen.org/xen-devel
diff mbox

Patch

diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index ba3a78a..e9f3f13 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -46,6 +46,9 @@ 
 #define TRC_CSCHED2_TICKLE_NEW       TRC_SCHED_CLASS_EVT(CSCHED2, 13)
 #define TRC_CSCHED2_RUNQ_MAX_WEIGHT  TRC_SCHED_CLASS_EVT(CSCHED2, 14)
 #define TRC_CSCHED2_MIGRATE          TRC_SCHED_CLASS_EVT(CSCHED2, 15)
+#define TRC_CSCHED2_LOAD_CHECK       TRC_SCHED_CLASS_EVT(CSCHED2, 16)
+#define TRC_CSCHED2_LOAD_BALANCE     TRC_SCHED_CLASS_EVT(CSCHED2, 17)
+#define TRC_CSCHED2_PICKED_CPU       TRC_SCHED_CLASS_EVT(CSCHED2, 19)
 
 /*
  * WARNING: This is still in an experimental phase.  Status and work can be found at the
@@ -709,6 +712,8 @@  update_load(const struct scheduler *ops,
             struct csched2_runqueue_data *rqd,
             struct csched2_vcpu *svc, int change, s_time_t now)
 {
+    trace_var(TRC_CSCHED2_UPDATE_LOAD, 1, 0,  NULL);
+
     __update_runq_load(ops, rqd, change, now);
     if ( svc )
         __update_svc_load(ops, svc, change, now);
@@ -1484,6 +1489,23 @@  csched2_cpu_pick(const struct scheduler *ops, struct vcpu *vc)
 out_up:
     spin_unlock(&prv->lock);
 
+    /* TRACE */
+    {
+        struct {
+            uint64_t b_avgload;
+            unsigned vcpu:16, dom:16;
+            unsigned rq_id:16, new_cpu:16;
+       } d;
+        d.b_avgload = prv->rqd[min_rqi].b_avgload;
+        d.dom = vc->domain->domain_id;
+        d.vcpu = vc->vcpu_id;
+        d.rq_id = c2r(ops, new_cpu);
+        d.new_cpu = new_cpu;
+        trace_var(TRC_CSCHED2_PICKED_CPU, 1,
+                  sizeof(d),
+                  (unsigned char *)&d);
+    }
+
     return new_cpu;
 }
 
@@ -1611,7 +1633,7 @@  static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
     bool_t inner_load_updated = 0;
 
     balance_state_t st = { .best_push_svc = NULL, .best_pull_svc = NULL };
-    
+
     /*
      * Basic algorithm: Push, pull, or swap.
      * - Find the runqueue with the furthest load distance
@@ -1677,6 +1699,20 @@  static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
         if ( i > cpus_max )
             cpus_max = i;
 
+        /* TRACE */
+        {
+            struct {
+                unsigned lrq_id:16, orq_id:16;
+                unsigned load_delta;
+            } d;
+            d.lrq_id = st.lrqd->id;
+            d.orq_id = st.orqd->id;
+            d.load_delta = st.load_delta;
+            trace_var(TRC_CSCHED2_LOAD_CHECK, 1,
+                      sizeof(d),
+                      (unsigned char *)&d);
+        }
+
         /*
          * If we're under 100% capacaty, only shift if load difference
          * is > 1.  otherwise, shift if under 12.5%
@@ -1705,6 +1741,21 @@  static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
     if ( unlikely(st.orqd->id < 0) )
         goto out_up;
 
+    /* TRACE */
+    {
+        struct {
+            uint64_t lb_avgload, ob_avgload;
+            unsigned lrq_id:16, orq_id:16;
+        } d;
+        d.lrq_id = st.lrqd->id;
+        d.lb_avgload = st.lrqd->b_avgload;
+        d.orq_id = st.orqd->id;
+        d.ob_avgload = st.orqd->b_avgload;
+        trace_var(TRC_CSCHED2_LOAD_BALANCE, 1,
+                  sizeof(d),
+                  (unsigned char *)&d);
+    }
+
     now = NOW();
 
     /* Look for "swap" which gives the best load average
@@ -1756,10 +1807,9 @@  static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
     if ( st.best_pull_svc )
         migrate(ops, st.best_pull_svc, st.lrqd, now);
 
-out_up:
+ out_up:
     spin_unlock(&st.orqd->lock);
-
-out:
+ out:
     return;
 }