diff mbox series

[v3,37/47] xen/sched: move per-cpu variable scheduler to struct sched_resource

Message ID 20190914085251.18816-38-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen: add core scheduling support | expand

Commit Message

Jürgen Groß Sept. 14, 2019, 8:52 a.m. UTC
Having a pointer to struct scheduler in struct sched_resource instead
of per cpu is enough.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V1: new patch
---
 xen/common/sched_credit.c  | 18 +++++++++++-------
 xen/common/sched_credit2.c |  3 ++-
 xen/common/schedule.c      | 15 +++++++--------
 xen/include/xen/sched-if.h |  2 +-
 4 files changed, 21 insertions(+), 17 deletions(-)

Comments

Jan Beulich Sept. 24, 2019, 12:16 p.m. UTC | #1
On 14.09.2019 10:52, Juergen Gross wrote:
> --- a/xen/common/sched_credit.c
> +++ b/xen/common/sched_credit.c
> @@ -352,9 +352,10 @@ DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
>  static inline void __runq_tickle(struct csched_unit *new)
>  {
>      unsigned int cpu = sched_unit_cpu(new->unit);
> +    struct sched_resource *sd = get_sched_res(cpu);
>      struct sched_unit *unit = new->unit;
>      struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
> -    struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
> +    struct csched_private *prv = CSCHED_PRIV(sd->scheduler);

Is the single use "sd" (and, as mentioned elsewhere, mis-named)
really worth it? (Applicable further down as well.)

> @@ -931,7 +932,8 @@ csched_unit_acct(struct csched_private *prv, unsigned int cpu)
>  {
>      struct sched_unit *currunit = current->sched_unit;
>      struct csched_unit * const svc = CSCHED_UNIT(currunit);
> -    const struct scheduler *ops = per_cpu(scheduler, cpu);
> +    struct sched_resource *sd = get_sched_res(cpu);
> +    const struct scheduler *ops = sd->scheduler;
>  
>      ASSERT( sched_unit_cpu(currunit) == cpu );
>      ASSERT( svc->sdom != NULL );
> @@ -987,8 +989,7 @@ csched_unit_acct(struct csched_private *prv, unsigned int cpu)
>               * idlers. But, if we are here, it means there is someone running
>               * on it, and hence the bit must be zero already.
>               */
> -            ASSERT(!cpumask_test_cpu(cpu,
> -                                     CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers));
> +            ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(sd->scheduler)->idlers));

While at the first glance "sd" is used twice here, this 2nd use is
actually unnecessary - "cpu" hasn't changed from the beginning of
the function afaics, and hence "ops" could be used here.

Preferably with the suggested adjustments (but final judgement is
with the scheduler maintainers anyway)
Reviewed-by: Jan Beulich <jbeulich@suse.com>

Jan
Jürgen Groß Sept. 25, 2019, 1:13 p.m. UTC | #2
On 24.09.19 14:16, Jan Beulich wrote:
> On 14.09.2019 10:52, Juergen Gross wrote:
>> --- a/xen/common/sched_credit.c
>> +++ b/xen/common/sched_credit.c
>> @@ -352,9 +352,10 @@ DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
>>   static inline void __runq_tickle(struct csched_unit *new)
>>   {
>>       unsigned int cpu = sched_unit_cpu(new->unit);
>> +    struct sched_resource *sd = get_sched_res(cpu);
>>       struct sched_unit *unit = new->unit;
>>       struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
>> -    struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
>> +    struct csched_private *prv = CSCHED_PRIV(sd->scheduler);
> 
> Is the single use "sd" (and, as mentioned elsewhere, mis-named)
> really worth it? (Applicable further down as well.)
> 
>> @@ -931,7 +932,8 @@ csched_unit_acct(struct csched_private *prv, unsigned int cpu)
>>   {
>>       struct sched_unit *currunit = current->sched_unit;
>>       struct csched_unit * const svc = CSCHED_UNIT(currunit);
>> -    const struct scheduler *ops = per_cpu(scheduler, cpu);
>> +    struct sched_resource *sd = get_sched_res(cpu);
>> +    const struct scheduler *ops = sd->scheduler;
>>   
>>       ASSERT( sched_unit_cpu(currunit) == cpu );
>>       ASSERT( svc->sdom != NULL );
>> @@ -987,8 +989,7 @@ csched_unit_acct(struct csched_private *prv, unsigned int cpu)
>>                * idlers. But, if we are here, it means there is someone running
>>                * on it, and hence the bit must be zero already.
>>                */
>> -            ASSERT(!cpumask_test_cpu(cpu,
>> -                                     CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers));
>> +            ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(sd->scheduler)->idlers));
> 
> While at the first glance "sd" is used twice here, this 2nd use is
> actually unnecessary - "cpu" hasn't changed from the beginning of
> the function afaics, and hence "ops" could be used here.

"sd" is now "sr" everywhere.

And yes, using ops here seems okay.

> 
> Preferably with the suggested adjustments (but final judgement is
> with the scheduler maintainers anyway)
> Reviewed-by: Jan Beulich <jbeulich@suse.com>

Thanks,


Juergen
Dario Faggioli Sept. 26, 2019, 9:06 a.m. UTC | #3
On Wed, 2019-09-25 at 15:13 +0200, Jürgen Groß wrote:
> On 24.09.19 14:16, Jan Beulich wrote:
> > 
> > While at the first glance "sd" is used twice here, this 2nd use is
> > actually unnecessary - "cpu" hasn't changed from the beginning of
> > the function afaics, and hence "ops" could be used here.
> 
> "sd" is now "sr" everywhere.
> 
> And yes, using ops here seems okay.
> 
> > Preferably with the suggested adjustments (but final judgement is
> > with the scheduler maintainers anyway)
> > Reviewed-by: Jan Beulich <jbeulich@suse.com>
>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>

(With sd->sr and with using ops.)

Regards
diff mbox series

Patch

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index b25a7d2270..e47e865d76 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -352,9 +352,10 @@  DEFINE_PER_CPU(unsigned int, last_tickle_cpu);
 static inline void __runq_tickle(struct csched_unit *new)
 {
     unsigned int cpu = sched_unit_cpu(new->unit);
+    struct sched_resource *sd = get_sched_res(cpu);
     struct sched_unit *unit = new->unit;
     struct csched_unit * const cur = CSCHED_UNIT(curr_on_cpu(cpu));
-    struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
+    struct csched_private *prv = CSCHED_PRIV(sd->scheduler);
     cpumask_t mask, idle_mask, *online;
     int balance_step, idlers_empty;
 
@@ -931,7 +932,8 @@  csched_unit_acct(struct csched_private *prv, unsigned int cpu)
 {
     struct sched_unit *currunit = current->sched_unit;
     struct csched_unit * const svc = CSCHED_UNIT(currunit);
-    const struct scheduler *ops = per_cpu(scheduler, cpu);
+    struct sched_resource *sd = get_sched_res(cpu);
+    const struct scheduler *ops = sd->scheduler;
 
     ASSERT( sched_unit_cpu(currunit) == cpu );
     ASSERT( svc->sdom != NULL );
@@ -987,8 +989,7 @@  csched_unit_acct(struct csched_private *prv, unsigned int cpu)
              * idlers. But, if we are here, it means there is someone running
              * on it, and hence the bit must be zero already.
              */
-            ASSERT(!cpumask_test_cpu(cpu,
-                                     CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers));
+            ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(sd->scheduler)->idlers));
             cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
         }
     }
@@ -1083,6 +1084,7 @@  csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
 {
     struct csched_unit * const svc = CSCHED_UNIT(unit);
     unsigned int cpu = sched_unit_cpu(unit);
+    struct sched_resource *sd = get_sched_res(cpu);
 
     SCHED_STAT_CRANK(unit_sleep);
 
@@ -1095,7 +1097,7 @@  csched_unit_sleep(const struct scheduler *ops, struct sched_unit *unit)
          * But, we are here because unit is going to sleep while running on cpu,
          * so the bit must be zero already.
          */
-        ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(per_cpu(scheduler, cpu))->idlers));
+        ASSERT(!cpumask_test_cpu(cpu, CSCHED_PRIV(sd->scheduler)->idlers));
         cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
     }
     else if ( __unit_on_runq(svc) )
@@ -1575,8 +1577,9 @@  static void
 csched_tick(void *_cpu)
 {
     unsigned int cpu = (unsigned long)_cpu;
+    struct sched_resource *sd = get_sched_res(cpu);
     struct csched_pcpu *spc = CSCHED_PCPU(cpu);
-    struct csched_private *prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
+    struct csched_private *prv = CSCHED_PRIV(sd->scheduler);
 
     spc->tick++;
 
@@ -1601,7 +1604,8 @@  csched_tick(void *_cpu)
 static struct csched_unit *
 csched_runq_steal(int peer_cpu, int cpu, int pri, int balance_step)
 {
-    const struct csched_private * const prv = CSCHED_PRIV(per_cpu(scheduler, cpu));
+    struct sched_resource *sd = get_sched_res(cpu);
+    const struct csched_private * const prv = CSCHED_PRIV(sd->scheduler);
     const struct csched_pcpu * const peer_pcpu = CSCHED_PCPU(peer_cpu);
     struct csched_unit *speer;
     struct list_head *iter;
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index 708643be7e..49c5f80ddd 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -3266,8 +3266,9 @@  runq_candidate(struct csched2_runqueue_data *rqd,
                unsigned int *skipped)
 {
     struct list_head *iter, *temp;
+    struct sched_resource *sd = get_sched_res(cpu);
     struct csched2_unit *snext = NULL;
-    struct csched2_private *prv = csched2_priv(per_cpu(scheduler, cpu));
+    struct csched2_private *prv = csched2_priv(sd->scheduler);
     bool yield = false, soft_aff_preempt = false;
 
     *skipped = 0;
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 88ac1a1ab8..1bd84a49bc 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -69,7 +69,6 @@  static void vcpu_singleshot_timer_fn(void *data);
 static void poll_timer_fn(void *data);
 
 /* This is global for now so that private implementations can reach it */
-DEFINE_PER_CPU(struct scheduler *, scheduler);
 DEFINE_PER_CPU_READ_MOSTLY(struct sched_resource *, sched_res);
 static DEFINE_PER_CPU_READ_MOSTLY(unsigned int, sched_res_idx);
 
@@ -191,7 +190,7 @@  static inline struct scheduler *unit_scheduler(const struct sched_unit *unit)
      */
 
     ASSERT(is_idle_domain(d));
-    return per_cpu(scheduler, unit->res->master_cpu);
+    return unit->res->scheduler;
 }
 
 static inline struct scheduler *vcpu_scheduler(const struct vcpu *v)
@@ -1896,8 +1895,8 @@  static bool sched_tasklet_check(unsigned int cpu)
 static struct sched_unit *do_schedule(struct sched_unit *prev, s_time_t now,
                                       unsigned int cpu)
 {
-    struct scheduler *sched = per_cpu(scheduler, cpu);
     struct sched_resource *sd = get_sched_res(cpu);
+    struct scheduler *sched = sd->scheduler;
     struct sched_unit *next;
 
     /* get policy-specific decision on scheduling... */
@@ -2302,7 +2301,7 @@  static int cpu_schedule_up(unsigned int cpu)
     sd->cpus = cpumask_of(cpu);
     set_sched_res(cpu, sd);
 
-    per_cpu(scheduler, cpu) = &sched_idle_ops;
+    sd->scheduler = &sched_idle_ops;
     spin_lock_init(&sd->_lock);
     sd->schedule_lock = &sched_free_cpu_lock;
     init_timer(&sd->s_timer, s_timer_fn, NULL, cpu);
@@ -2513,7 +2512,7 @@  int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
 {
     struct vcpu *idle;
     void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
-    struct scheduler *old_ops = per_cpu(scheduler, cpu);
+    struct scheduler *old_ops = get_sched_res(cpu)->scheduler;
     struct scheduler *new_ops = (c == NULL) ? &sched_idle_ops : c->sched;
     struct cpupool *old_pool = per_cpu(cpupool, cpu);
     struct sched_resource *sd = get_sched_res(cpu);
@@ -2577,7 +2576,7 @@  int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     ppriv_old = sd->sched_priv;
     new_lock = sched_switch_sched(new_ops, cpu, ppriv, vpriv);
 
-    per_cpu(scheduler, cpu) = new_ops;
+    sd->scheduler = new_ops;
     sd->sched_priv = ppriv;
 
     /*
@@ -2677,7 +2676,7 @@  void sched_tick_suspend(void)
     struct scheduler *sched;
     unsigned int cpu = smp_processor_id();
 
-    sched = per_cpu(scheduler, cpu);
+    sched = get_sched_res(cpu)->scheduler;
     sched_do_tick_suspend(sched, cpu);
     rcu_idle_enter(cpu);
     rcu_idle_timer_start();
@@ -2690,7 +2689,7 @@  void sched_tick_resume(void)
 
     rcu_idle_timer_stop();
     rcu_idle_exit(cpu);
-    sched = per_cpu(scheduler, cpu);
+    sched = get_sched_res(cpu)->scheduler;
     sched_do_tick_resume(sched, cpu);
 }
 
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 655eb3af32..528874ab11 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -36,6 +36,7 @@  extern const cpumask_t *sched_res_mask;
  * as the rest of the struct.  Just have the scheduler point to the
  * one it wants (This may be the one right in front of it).*/
 struct sched_resource {
+    struct scheduler   *scheduler;
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_unit  *curr;
@@ -51,7 +52,6 @@  struct sched_resource {
 
 #define curr_on_cpu(c)    (get_sched_res(c)->curr)
 
-DECLARE_PER_CPU(struct scheduler *, scheduler);
 DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);