diff mbox series

[RFC,43/49] xen/sched: modify cpupool_domain_cpumask() to be an item mask

Message ID 20190329150934.17694-44-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen: add core scheduling support | expand

Commit Message

Jürgen Groß March 29, 2019, 3:09 p.m. UTC
cpupool_domain_cpumask() is used by scheduling to select cpus or to
iterate over cpus. In order to support scheduling items spanning
multiple cpus let cpupool_domain_cpumask() return a cpumask with only
one bit set per scheduling resource.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/cpupool.c       | 30 +++++++++++++++++++++---------
 xen/common/schedule.c      |  5 +++--
 xen/include/xen/sched-if.h |  5 ++++-
 3 files changed, 28 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index 31ac323e40..ba76045937 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -38,26 +38,35 @@  DEFINE_PER_CPU(struct cpupool *, cpupool);
 
 #define cpupool_dprintk(x...) ((void)0)
 
+static void free_cpupool_struct(struct cpupool *c)
+{
+    if ( c )
+    {
+        free_cpumask_var(c->res_valid);
+        free_cpumask_var(c->cpu_valid);
+    }
+    xfree(c);
+}
+
 static struct cpupool *alloc_cpupool_struct(void)
 {
     struct cpupool *c = xzalloc(struct cpupool);
 
-    if ( !c || !zalloc_cpumask_var(&c->cpu_valid) )
+    if ( !c )
+        return NULL;
+
+    zalloc_cpumask_var(&c->cpu_valid);
+    zalloc_cpumask_var(&c->res_valid);
+
+    if ( !c->cpu_valid || !c->res_valid )
     {
-        xfree(c);
+        free_cpupool_struct(c);
         c = NULL;
     }
 
     return c;
 }
 
-static void free_cpupool_struct(struct cpupool *c)
-{
-    if ( c )
-        free_cpumask_var(c->cpu_valid);
-    xfree(c);
-}
-
 /*
  * find a cpupool by it's id. to be called with cpupool lock held
  * if exact is not specified, the first cpupool with an id larger or equal to
@@ -271,6 +280,7 @@  static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
         cpupool_cpu_moving = NULL;
     }
     cpumask_set_cpu(cpu, c->cpu_valid);
+    cpumask_and(c->res_valid, c->cpu_valid, sched_res_mask);
 
     rcu_read_lock(&domlist_read_lock);
     for_each_domain_in_cpupool(d, c)
@@ -393,6 +403,7 @@  static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
     atomic_inc(&c->refcnt);
     cpupool_cpu_moving = c;
     cpumask_clear_cpu(cpu, c->cpu_valid);
+    cpumask_and(c->res_valid, c->cpu_valid, sched_res_mask);
     spin_unlock(&cpupool_lock);
 
     work_cpu = smp_processor_id();
@@ -509,6 +520,7 @@  static int cpupool_cpu_remove(unsigned int cpu)
          * allowed only for CPUs in pool0.
          */
         cpumask_clear_cpu(cpu, cpupool0->cpu_valid);
+        cpumask_and(cpupool0->res_valid, cpupool0->cpu_valid, sched_res_mask);
         ret = 0;
     }
 
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index d2a02aea34..7fb0b1ed4e 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -57,6 +57,7 @@  integer_param("sched_ratelimit_us", sched_ratelimit_us);
 
 /* Number of vcpus per struct sched_item. */
 unsigned int sched_granularity = 1;
+const cpumask_t *sched_res_mask = &cpumask_all;
 
 /* Various timer handlers. */
 static void s_timer_fn(void *unused);
@@ -372,9 +373,9 @@  static unsigned int sched_select_initial_cpu(struct vcpu *v)
     cpumask_clear(&cpus);
     for_each_node_mask ( node, d->node_affinity )
         cpumask_or(&cpus, &cpus, &node_to_cpumask(node));
-    cpumask_and(&cpus, &cpus, cpupool_domain_cpumask(d));
+    cpumask_and(&cpus, &cpus, d->cpupool->cpu_valid);
     if ( cpumask_empty(&cpus) )
-        cpumask_copy(&cpus, cpupool_domain_cpumask(d));
+        cpumask_copy(&cpus, d->cpupool->cpu_valid);
 
     if ( v->vcpu_id == 0 )
         return cpumask_first(&cpus);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 4a3fb092c2..2b2612302d 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -22,6 +22,8 @@  extern cpumask_t cpupool_free_cpus;
 #define SCHED_DEFAULT_RATELIMIT_US 1000
 extern int sched_ratelimit_us;
 
+/* Scheduling resource mask. */
+extern const cpumask_t *sched_res_mask;
 
 /*
  * In order to allow a scheduler to remap the lock->cpu mapping,
@@ -389,6 +391,7 @@  struct cpupool
 {
     int              cpupool_id;
     cpumask_var_t    cpu_valid;      /* all cpus assigned to pool */
+    cpumask_var_t    res_valid;      /* all scheduling resources of pool */
     struct cpupool   *next;
     unsigned int     n_dom;
     struct scheduler *sched;
@@ -405,7 +408,7 @@  static inline cpumask_t* cpupool_domain_cpumask(struct domain *d)
      * be interested in calling this for the idle domain.
      */
     ASSERT(d->cpupool != NULL);
-    return d->cpupool->cpu_valid;
+    return d->cpupool->res_valid;
 }
 
 /*