diff mbox series

[v3,38/47] xen/sched: move per-cpu variable cpupool to struct sched_resource

Message ID 20190914085251.18816-39-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen: add core scheduling support | expand

Commit Message

Jürgen Groß Sept. 14, 2019, 8:52 a.m. UTC
Having a pointer to struct cpupool in struct sched_resource instead
of per cpu is enough.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V1: new patch
---
 xen/common/cpupool.c       | 4 +---
 xen/common/sched_credit.c  | 2 +-
 xen/common/sched_rt.c      | 2 +-
 xen/common/schedule.c      | 8 ++++----
 xen/include/xen/sched-if.h | 2 +-
 5 files changed, 8 insertions(+), 10 deletions(-)

Comments

Jan Beulich Sept. 24, 2019, 1:10 p.m. UTC | #1
On 14.09.2019 10:52, Juergen Gross wrote:
> Having a pointer to struct cpupool in struct sched_resource instead
> of per cpu is enough.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>

Technically / mechanically
Reviewed-by: Jan Beulich <jbeulich@suse.com>

However, I'm confused by what appears to be a resulting chicken-and-
egg problem when considering the plan of allowing different
granularities in different cpupools. Perhaps I'm either mis-
remembering that plan, or it's not clear to me how data would get
structured in that case.

Jan
Jürgen Groß Sept. 25, 2019, 1:17 p.m. UTC | #2
On 24.09.19 15:10, Jan Beulich wrote:
> On 14.09.2019 10:52, Juergen Gross wrote:
>> Having a pointer to struct cpupool in struct sched_resource instead
>> of per cpu is enough.
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
> 
> Technically / mechanically
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> 
> However, I'm confused by what appears to be a resulting chicken-and-
> egg problem when considering the plan of allowing different
> granularities in different cpupools. Perhaps I'm either mis-
> remembering that plan, or it's not clear to me how data would get
> structured in that case.

There still is:

DEFINE_PER_CPU_READ_MOSTLY(struct sched_resource *, sched_res);


Juergen
Dario Faggioli Sept. 26, 2019, 9:09 a.m. UTC | #3
On Sat, 2019-09-14 at 10:52 +0200, Juergen Gross wrote:
> Having a pointer to struct cpupool in struct sched_resource instead
> of per cpu is enough.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>

Regards
diff mbox series

Patch

diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index a5f4113915..e0333a8417 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -34,8 +34,6 @@  static cpumask_t cpupool_locked_cpus;
 
 static DEFINE_SPINLOCK(cpupool_lock);
 
-DEFINE_PER_CPU(struct cpupool *, cpupool);
-
 static void free_cpupool_struct(struct cpupool *c)
 {
     if ( c )
@@ -506,7 +504,7 @@  static int cpupool_cpu_add(unsigned int cpu)
      * (or unplugging would have failed) and that is the default behavior
      * anyway.
      */
-    per_cpu(cpupool, cpu) = NULL;
+    get_sched_res(cpu)->cpupool = NULL;
     ret = cpupool_assign_cpu_locked(cpupool0, cpu);
 
     spin_unlock(&cpupool_lock);
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index e47e865d76..41699f6b32 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1681,7 +1681,7 @@  static struct csched_unit *
 csched_load_balance(struct csched_private *prv, int cpu,
     struct csched_unit *snext, bool *stolen)
 {
-    struct cpupool *c = per_cpu(cpupool, cpu);
+    struct cpupool *c = get_sched_res(cpu)->cpupool;
     struct csched_unit *speer;
     cpumask_t workers;
     cpumask_t *online;
diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c
index 9f4e397334..d7eb6931ef 100644
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -775,7 +775,7 @@  rt_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 
     if ( prv->repl_timer.cpu == cpu )
     {
-        struct cpupool *c = per_cpu(cpupool, cpu);
+        struct cpupool *c = get_sched_res(cpu)->cpupool;
         unsigned int new_cpu = cpumask_cycle(cpu, cpupool_online_cpumask(c));
 
         /*
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 1bd84a49bc..e360c9ec9f 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1093,7 +1093,7 @@  int cpu_disable_scheduler(unsigned int cpu)
     cpumask_t online_affinity;
     int ret = 0;
 
-    c = per_cpu(cpupool, cpu);
+    c = get_sched_res(cpu)->cpupool;
     if ( c == NULL )
         return ret;
 
@@ -1162,7 +1162,7 @@  static int cpu_disable_scheduler_check(unsigned int cpu)
     struct cpupool *c;
     struct vcpu *v;
 
-    c = per_cpu(cpupool, cpu);
+    c = get_sched_res(cpu)->cpupool;
     if ( c == NULL )
         return 0;
 
@@ -2514,8 +2514,8 @@  int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     void *ppriv, *ppriv_old, *vpriv, *vpriv_old;
     struct scheduler *old_ops = get_sched_res(cpu)->scheduler;
     struct scheduler *new_ops = (c == NULL) ? &sched_idle_ops : c->sched;
-    struct cpupool *old_pool = per_cpu(cpupool, cpu);
     struct sched_resource *sd = get_sched_res(cpu);
+    struct cpupool *old_pool = sd->cpupool;
     spinlock_t *old_lock, *new_lock;
     unsigned long flags;
 
@@ -2597,7 +2597,7 @@  int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     sched_free_vdata(old_ops, vpriv_old);
     sched_free_pdata(old_ops, ppriv_old, cpu);
 
-    per_cpu(cpupool, cpu) = c;
+    get_sched_res(cpu)->cpupool = c;
     /* When a cpu is added to a pool, trigger it to go pick up some work */
     if ( c != NULL )
         cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 528874ab11..5625cafb6e 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -37,6 +37,7 @@  extern const cpumask_t *sched_res_mask;
  * one it wants (This may be the one right in front of it).*/
 struct sched_resource {
     struct scheduler   *scheduler;
+    struct cpupool     *cpupool;
     spinlock_t         *schedule_lock,
                        _lock;
     struct sched_unit  *curr;
@@ -52,7 +53,6 @@  struct sched_resource {
 
 #define curr_on_cpu(c)    (get_sched_res(c)->curr)
 
-DECLARE_PER_CPU(struct cpupool *, cpupool);
 DECLARE_PER_CPU(struct sched_resource *, sched_res);
 
 static inline struct sched_resource *get_sched_res(unsigned int cpu)