diff mbox series

[v2,03/48] xen/sched: build a linked list of struct sched_unit

Message ID 20190809145833.1020-4-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen: add core scheduling support | expand

Commit Message

Jürgen Groß Aug. 9, 2019, 2:57 p.m. UTC
In order to make it easy to iterate over sched_unit elements of a
domain, build a single linked list and add an iterator for it. The new
list is guarded by the same mechanisms as the vcpu linked list as it
is modified only via vcpu_create() or vcpu_destroy().

For completeness add another iterator for_each_sched_unit_vcpu() which
will iterate over all vcpus of a sched_unit (right now only one). This
will be needed later for larger scheduling granularity (e.g. cores).

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- add comment (Dario Faggioli)
---
 xen/common/schedule.c   | 60 +++++++++++++++++++++++++++++++++++++++++--------
 xen/include/xen/sched.h | 14 ++++++++++++
 2 files changed, 65 insertions(+), 9 deletions(-)

Comments

Dario Faggioli Aug. 23, 2019, 10:52 a.m. UTC | #1
On Fri, 2019-08-09 at 16:57 +0200, Juergen Gross wrote:
> In order to make it easy to iterate over sched_unit elements of a
> domain, build a single linked list and add an iterator for it. The
> new
> list is guarded by the same mechanisms as the vcpu linked list as it
> is modified only via vcpu_create() or vcpu_destroy().
> 
> For completeness add another iterator for_each_sched_unit_vcpu()
> which
> will iterate over all vcpus of a sched_unit (right now only one).
> This
> will be needed later for larger scheduling granularity (e.g. cores).
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
> V2:
> - add comment (Dario Faggioli)
>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>

Regards
diff mbox series

Patch

diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 038ebf5ae9..6da237110e 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -305,20 +305,64 @@  static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2,
     spin_unlock_irqrestore(lock1, flags);
 }
 
-int sched_init_vcpu(struct vcpu *v, unsigned int processor)
+static void sched_free_unit(struct sched_unit *unit)
 {
-    struct domain *d = v->domain;
-    struct sched_unit *unit;
+    struct sched_unit *prev_unit;
+    struct domain *d = unit->domain;
 
-    v->processor = processor;
+    if ( d->sched_unit_list == unit )
+        d->sched_unit_list = unit->next_in_list;
+    else
+    {
+        for_each_sched_unit ( d, prev_unit )
+        {
+            if ( prev_unit->next_in_list == unit )
+            {
+                prev_unit->next_in_list = unit->next_in_list;
+                break;
+            }
+        }
+    }
+
+    unit->vcpu_list->sched_unit = NULL;
+    xfree(unit);
+}
+
+static struct sched_unit *sched_alloc_unit(struct vcpu *v)
+{
+    struct sched_unit *unit, **prev_unit;
+    struct domain *d = v->domain;
 
     if ( (unit = xzalloc(struct sched_unit)) == NULL )
-        return 1;
+        return NULL;
+
     v->sched_unit = unit;
     unit->vcpu_list = v;
     unit->unit_id = v->vcpu_id;
     unit->domain = d;
 
+    for ( prev_unit = &d->sched_unit_list; *prev_unit;
+          prev_unit = &(*prev_unit)->next_in_list )
+        if ( (*prev_unit)->next_in_list &&
+             (*prev_unit)->next_in_list->unit_id > unit->unit_id )
+            break;
+
+    unit->next_in_list = *prev_unit;
+    *prev_unit = unit;
+
+    return unit;
+}
+
+int sched_init_vcpu(struct vcpu *v, unsigned int processor)
+{
+    struct domain *d = v->domain;
+    struct sched_unit *unit;
+
+    v->processor = processor;
+
+    if ( (unit = sched_alloc_unit(v)) == NULL )
+        return 1;
+
     /* Initialise the per-vcpu timers. */
     init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
                v, v->processor);
@@ -330,8 +374,7 @@  int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     unit->priv = sched_alloc_vdata(dom_scheduler(d), unit, d->sched_priv);
     if ( unit->priv == NULL )
     {
-        v->sched_unit = NULL;
-        xfree(unit);
+        sched_free_unit(unit);
         return 1;
     }
 
@@ -474,8 +517,7 @@  void sched_destroy_vcpu(struct vcpu *v)
         atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
     sched_remove_unit(vcpu_scheduler(v), unit);
     sched_free_vdata(vcpu_scheduler(v), unit->priv);
-    xfree(unit);
-    v->sched_unit = NULL;
+    sched_free_unit(unit);
 }
 
 int sched_init_domain(struct domain *d, int poolid)
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index a389ba5e1a..d7d9e153f9 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -277,9 +277,22 @@  struct sched_unit {
     struct domain         *domain;
     struct vcpu           *vcpu_list;
     void                  *priv;      /* scheduler private data */
+    struct sched_unit     *next_in_list;
     int                    unit_id;
 };
 
+#define for_each_sched_unit(d, e)                                         \
+    for ( (e) = (d)->sched_unit_list; (e) != NULL; (e) = (e)->next_in_list )
+
+/*
+ * All vcpus of a domain are in a single linked list with unit->vcpu_list
+ * pointing to the first vcpu of the unit. The loop must be terminated when
+ * a vcpu is hit not being part of the unit to loop over.
+ */
+#define for_each_sched_unit_vcpu(i, v)                                    \
+    for ( (v) = (i)->vcpu_list; (v) != NULL && (v)->sched_unit == (i);    \
+          (v) = (v)->next_in_list )
+
 /* Per-domain lock can be recursively acquired in fault handlers. */
 #define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
 #define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
@@ -333,6 +346,7 @@  struct domain
 
     /* Scheduling. */
     void            *sched_priv;    /* scheduler-specific data */
+    struct sched_unit *sched_unit_list;
     struct cpupool  *cpupool;
 
     struct domain   *next_in_list;