diff mbox series

[51/60] xen/sched: use one schedule lock for all free cpus

Message ID 20190528103313.1343-52-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series xen: add core scheduling support | expand

Commit Message

Jürgen Groß May 28, 2019, 10:33 a.m. UTC
In order to prepare using always cpu scheduling for free cpus
regardless of other cpupools scheduling granularity always use a single
fixed lock for all free cpus shared by all schedulers. This will allow
to move any number of free cpus to a cpupool guarded by only one lock.

This requires to drop ASSERTs regarding the lock in some schedulers.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/sched_credit.c | 9 ---------
 xen/common/sched_null.c   | 7 -------
 xen/common/schedule.c     | 7 +++++--
 3 files changed, 5 insertions(+), 18 deletions(-)
diff mbox series

Patch

diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 969ac4cc20..0b14fa9e11 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -616,15 +616,6 @@  csched_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     unsigned long flags;
     struct csched_private *prv = CSCHED_PRIV(ops);
-    struct sched_resource *sd = get_sched_res(cpu);
-
-    /*
-     * This is called either during during boot, resume or hotplug, in
-     * case Credit1 is the scheduler chosen at boot. In such cases, the
-     * scheduler lock for cpu is already pointing to the default per-cpu
-     * spinlock, as Credit1 needs it, so there is no remapping to be done.
-     */
-    ASSERT(sd->schedule_lock == &sd->_lock && !spin_is_locked(&sd->_lock));
 
     spin_lock_irqsave(&prv->lock, flags);
     init_pdata(prv, pdata, cpu);
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index e9336a2948..1499c82422 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -169,17 +169,10 @@  static void init_pdata(struct null_private *prv, unsigned int cpu)
 static void null_init_pdata(const struct scheduler *ops, void *pdata, int cpu)
 {
     struct null_private *prv = null_priv(ops);
-    struct sched_resource *sd = get_sched_res(cpu);
 
     /* alloc_pdata is not implemented, so we want this to be NULL. */
     ASSERT(!pdata);
 
-    /*
-     * The scheduler lock points already to the default per-cpu spinlock,
-     * so there is no remapping to be done.
-     */
-    ASSERT(sd->schedule_lock == &sd->_lock && !spin_is_locked(&sd->_lock));
-
     init_pdata(prv, cpu);
 }
 
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 7fd83ffd4e..44364ff4d2 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -61,6 +61,9 @@  unsigned int sched_granularity = 1;
 bool sched_disable_smt_switching;
 const cpumask_t *sched_res_mask = &cpumask_all;
 
+/* Common lock for free cpus. */
+static DEFINE_SPINLOCK(sched_free_cpu_lock);
+
 /* Various timer handlers. */
 static void s_timer_fn(void *unused);
 static void vcpu_periodic_timer_fn(void *data);
@@ -2149,7 +2152,7 @@  static int cpu_schedule_up(unsigned int cpu)
 
     sd->scheduler = &ops;
     spin_lock_init(&sd->_lock);
-    sd->schedule_lock = &sd->_lock;
+    sd->schedule_lock = &sched_free_cpu_lock;
     init_timer(&sd->s_timer, s_timer_fn, NULL, cpu);
     atomic_set(&sd->urgent_count, 0);
 
@@ -2488,7 +2491,7 @@  int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
      * taking it, finds all the initializations we've done above in place.
      */
     smp_mb();
-    sd->schedule_lock = new_lock;
+    sd->schedule_lock = c ? new_lock : &sched_free_cpu_lock;
 
     /* _Not_ pcpu_schedule_unlock(): schedule_lock may have changed! */
     spin_unlock_irq(old_lock);