diff mbox

[v2,1/7] xen: credit1: simplify csched_runq_steal() a little bit.

Message ID 149146656929.21348.389782940595763501.stgit@Solace.fritz.box (mailing list archive)
State New, archived
Headers show

Commit Message

Dario Faggioli April 6, 2017, 8:16 a.m. UTC
Since we're holding the lock on the pCPU from which we
are trying to steal, it can't have disappeared, so we
can drop the check for that (and convert it in an
ASSERT()).

And since we try to steal only from busy pCPUs, it's
unlikely for such pCPU to be idle, so we mark it as
such (and bail early if it unfortunately is).

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
---
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Anshul Makkar <anshul.makkar@citrix.com>
---
 xen/common/sched_credit.c |   87 +++++++++++++++++++++++----------------------
 1 file changed, 44 insertions(+), 43 deletions(-)

Comments

George Dunlap April 7, 2017, 10:45 a.m. UTC | #1
On 06/04/17 09:16, Dario Faggioli wrote:
> Since we're holding the lock on the pCPU from which we
> are trying to steal, it can't have disappeared, so we
> can drop the check for that (and convert it in an
> ASSERT()).
> 
> And since we try to steal only from busy pCPUs, it's
> unlikely for such pCPU to be idle, so we mark it as
> such (and bail early if it unfortunately is).

Oh, you mean "mark the test as unlikely".  I initially parsed this as,
"Mark the pcpu as idle", which confused me.

Can I change this to, "tell the compiler that it's unlikely"?

Either way:

Reviewed-by; George Dunlap <george.dunlap@citrix.com>

> 
> Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
> ---
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Anshul Makkar <anshul.makkar@citrix.com>
> ---
>  xen/common/sched_credit.c |   87 +++++++++++++++++++++++----------------------
>  1 file changed, 44 insertions(+), 43 deletions(-)
> 
> diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
> index 4649e64..63a8675 100644
> --- a/xen/common/sched_credit.c
> +++ b/xen/common/sched_credit.c
> @@ -1593,64 +1593,65 @@ static struct csched_vcpu *
>  csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
>  {
>      const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
> -    const struct vcpu * const peer_vcpu = curr_on_cpu(peer_cpu);
>      struct csched_vcpu *speer;
>      struct list_head *iter;
>      struct vcpu *vc;
>  
> +    ASSERT(peer_pcpu != NULL);
> +
>      /*
>       * Don't steal from an idle CPU's runq because it's about to
>       * pick up work from it itself.
>       */
> -    if ( peer_pcpu != NULL && !is_idle_vcpu(peer_vcpu) )
> +    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu))) )
> +        goto out;
> +
> +    list_for_each( iter, &peer_pcpu->runq )
>      {
> -        list_for_each( iter, &peer_pcpu->runq )
> -        {
> -            speer = __runq_elem(iter);
> +        speer = __runq_elem(iter);
>  
> -            /*
> -             * If next available VCPU here is not of strictly higher
> -             * priority than ours, this PCPU is useless to us.
> -             */
> -            if ( speer->pri <= pri )
> -                break;
> +        /*
> +         * If next available VCPU here is not of strictly higher
> +         * priority than ours, this PCPU is useless to us.
> +         */
> +        if ( speer->pri <= pri )
> +            break;
>  
> -            /* Is this VCPU runnable on our PCPU? */
> -            vc = speer->vcpu;
> -            BUG_ON( is_idle_vcpu(vc) );
> +        /* Is this VCPU runnable on our PCPU? */
> +        vc = speer->vcpu;
> +        BUG_ON( is_idle_vcpu(vc) );
>  
> -            /*
> -             * If the vcpu has no useful soft affinity, skip this vcpu.
> -             * In fact, what we want is to check if we have any "soft-affine
> -             * work" to steal, before starting to look at "hard-affine work".
> -             *
> -             * Notice that, if not even one vCPU on this runq has a useful
> -             * soft affinity, we could have avoid considering this runq for
> -             * a soft balancing step in the first place. This, for instance,
> -             * can be implemented by taking note of on what runq there are
> -             * vCPUs with useful soft affinities in some sort of bitmap
> -             * or counter.
> -             */
> -            if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY
> -                 && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
> -                continue;
> +        /*
> +         * If the vcpu has no useful soft affinity, skip this vcpu.
> +         * In fact, what we want is to check if we have any "soft-affine
> +         * work" to steal, before starting to look at "hard-affine work".
> +         *
> +         * Notice that, if not even one vCPU on this runq has a useful
> +         * soft affinity, we could have avoid considering this runq for
> +         * a soft balancing step in the first place. This, for instance,
> +         * can be implemented by taking note of on what runq there are
> +         * vCPUs with useful soft affinities in some sort of bitmap
> +         * or counter.
> +         */
> +        if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY
> +             && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
> +            continue;
>  
> -            csched_balance_cpumask(vc, balance_step, cpumask_scratch);
> -            if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) )
> -            {
> -                /* We got a candidate. Grab it! */
> -                TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
> -                         vc->domain->domain_id, vc->vcpu_id);
> -                SCHED_VCPU_STAT_CRANK(speer, migrate_q);
> -                SCHED_STAT_CRANK(migrate_queued);
> -                WARN_ON(vc->is_urgent);
> -                __runq_remove(speer);
> -                vc->processor = cpu;
> -                return speer;
> -            }
> +        csched_balance_cpumask(vc, balance_step, cpumask_scratch);
> +        if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) )
> +        {
> +            /* We got a candidate. Grab it! */
> +            TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
> +                     vc->domain->domain_id, vc->vcpu_id);
> +            SCHED_VCPU_STAT_CRANK(speer, migrate_q);
> +            SCHED_STAT_CRANK(migrate_queued);
> +            WARN_ON(vc->is_urgent);
> +            __runq_remove(speer);
> +            vc->processor = cpu;
> +            return speer;
>          }
>      }
> -
> + out:
>      SCHED_STAT_CRANK(steal_peer_idle);
>      return NULL;
>  }
>
Dario Faggioli April 7, 2017, 11 a.m. UTC | #2
On Fri, 2017-04-07 at 11:45 +0100, George Dunlap wrote:
> On 06/04/17 09:16, Dario Faggioli wrote:
> > Since we're holding the lock on the pCPU from which we
> > are trying to steal, it can't have disappeared, so we
> > can drop the check for that (and convert it in an
> > ASSERT()).
> > 
> > And since we try to steal only from busy pCPUs, it's
> > unlikely for such pCPU to be idle, so we mark it as
> > such (and bail early if it unfortunately is).
> 
> Oh, you mean "mark the test as unlikely".  I initially parsed this
> as,
> "Mark the pcpu as idle", which confused me.
> 
> Can I change this to, "tell the compiler that it's unlikely"?
> 
Indeed. AFAICS, I'll have to resend, so I will do this myself.

> Either way:
> 
> Reviewed-by; George Dunlap <george.dunlap@citrix.com>
> 
Thanks,
Dario
diff mbox

Patch

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 4649e64..63a8675 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1593,64 +1593,65 @@  static struct csched_vcpu *
 csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
 {
     const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
-    const struct vcpu * const peer_vcpu = curr_on_cpu(peer_cpu);
     struct csched_vcpu *speer;
     struct list_head *iter;
     struct vcpu *vc;
 
+    ASSERT(peer_pcpu != NULL);
+
     /*
      * Don't steal from an idle CPU's runq because it's about to
      * pick up work from it itself.
      */
-    if ( peer_pcpu != NULL && !is_idle_vcpu(peer_vcpu) )
+    if ( unlikely(is_idle_vcpu(curr_on_cpu(peer_cpu))) )
+        goto out;
+
+    list_for_each( iter, &peer_pcpu->runq )
     {
-        list_for_each( iter, &peer_pcpu->runq )
-        {
-            speer = __runq_elem(iter);
+        speer = __runq_elem(iter);
 
-            /*
-             * If next available VCPU here is not of strictly higher
-             * priority than ours, this PCPU is useless to us.
-             */
-            if ( speer->pri <= pri )
-                break;
+        /*
+         * If next available VCPU here is not of strictly higher
+         * priority than ours, this PCPU is useless to us.
+         */
+        if ( speer->pri <= pri )
+            break;
 
-            /* Is this VCPU runnable on our PCPU? */
-            vc = speer->vcpu;
-            BUG_ON( is_idle_vcpu(vc) );
+        /* Is this VCPU runnable on our PCPU? */
+        vc = speer->vcpu;
+        BUG_ON( is_idle_vcpu(vc) );
 
-            /*
-             * If the vcpu has no useful soft affinity, skip this vcpu.
-             * In fact, what we want is to check if we have any "soft-affine
-             * work" to steal, before starting to look at "hard-affine work".
-             *
-             * Notice that, if not even one vCPU on this runq has a useful
-             * soft affinity, we could have avoid considering this runq for
-             * a soft balancing step in the first place. This, for instance,
-             * can be implemented by taking note of on what runq there are
-             * vCPUs with useful soft affinities in some sort of bitmap
-             * or counter.
-             */
-            if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY
-                 && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
-                continue;
+        /*
+         * If the vcpu has no useful soft affinity, skip this vcpu.
+         * In fact, what we want is to check if we have any "soft-affine
+         * work" to steal, before starting to look at "hard-affine work".
+         *
+         * Notice that, if not even one vCPU on this runq has a useful
+         * soft affinity, we could have avoid considering this runq for
+         * a soft balancing step in the first place. This, for instance,
+         * can be implemented by taking note of on what runq there are
+         * vCPUs with useful soft affinities in some sort of bitmap
+         * or counter.
+         */
+        if ( balance_step == CSCHED_BALANCE_SOFT_AFFINITY
+             && !__vcpu_has_soft_affinity(vc, vc->cpu_hard_affinity) )
+            continue;
 
-            csched_balance_cpumask(vc, balance_step, cpumask_scratch);
-            if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) )
-            {
-                /* We got a candidate. Grab it! */
-                TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
-                         vc->domain->domain_id, vc->vcpu_id);
-                SCHED_VCPU_STAT_CRANK(speer, migrate_q);
-                SCHED_STAT_CRANK(migrate_queued);
-                WARN_ON(vc->is_urgent);
-                __runq_remove(speer);
-                vc->processor = cpu;
-                return speer;
-            }
+        csched_balance_cpumask(vc, balance_step, cpumask_scratch);
+        if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) )
+        {
+            /* We got a candidate. Grab it! */
+            TRACE_3D(TRC_CSCHED_STOLEN_VCPU, peer_cpu,
+                     vc->domain->domain_id, vc->vcpu_id);
+            SCHED_VCPU_STAT_CRANK(speer, migrate_q);
+            SCHED_STAT_CRANK(migrate_queued);
+            WARN_ON(vc->is_urgent);
+            __runq_remove(speer);
+            vc->processor = cpu;
+            return speer;
         }
     }
-
+ out:
     SCHED_STAT_CRANK(steal_peer_idle);
     return NULL;
 }