diff mbox series

[v3,43/47] xen/sched: support multiple cpus per scheduling resource

Message ID 20190914085251.18816-44-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen: add core scheduling support | expand

Commit Message

Jürgen Groß Sept. 14, 2019, 8:52 a.m. UTC
Prepare supporting multiple cpus per scheduling resource by allocating
the cpumask per resource dynamically.

Modify sched_res_mask to have only one bit per scheduling resource set.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V1: new patch (carved out from other patch)
---
 xen/common/schedule.c      | 16 ++++++++++++++--
 xen/include/xen/sched-if.h |  4 ++--
 2 files changed, 16 insertions(+), 4 deletions(-)

Comments

Jan Beulich Sept. 24, 2019, 1:49 p.m. UTC | #1
On 14.09.2019 10:52, Juergen Gross wrote:
> --- a/xen/common/schedule.c
> +++ b/xen/common/schedule.c
> @@ -59,7 +59,7 @@ integer_param("sched_ratelimit_us", sched_ratelimit_us);
>  enum sched_gran __read_mostly opt_sched_granularity = SCHED_GRAN_cpu;
>  unsigned int __read_mostly sched_granularity = 1;
>  bool __read_mostly sched_disable_smt_switching;
> -const cpumask_t *sched_res_mask = &cpumask_all;
> +cpumask_var_t sched_res_mask;

For a non-automatic variable it might be even easier to have it
be cpumask_t?

> @@ -2401,6 +2407,8 @@ static int cpu_schedule_up(unsigned int cpu)
>      /* We start with cpu granularity. */
>      sd->granularity = 1;
>  
> +    cpumask_set_cpu(cpu, sched_res_mask);

I'm missing the clearing counterpart.

Jan
Jürgen Groß Sept. 25, 2019, 1:39 p.m. UTC | #2
On 24.09.19 15:49, Jan Beulich wrote:
> On 14.09.2019 10:52, Juergen Gross wrote:
>> --- a/xen/common/schedule.c
>> +++ b/xen/common/schedule.c
>> @@ -59,7 +59,7 @@ integer_param("sched_ratelimit_us", sched_ratelimit_us);
>>   enum sched_gran __read_mostly opt_sched_granularity = SCHED_GRAN_cpu;
>>   unsigned int __read_mostly sched_granularity = 1;
>>   bool __read_mostly sched_disable_smt_switching;
>> -const cpumask_t *sched_res_mask = &cpumask_all;
>> +cpumask_var_t sched_res_mask;
> 
> For a non-automatic variable it might be even easier to have it
> be cpumask_t?

I can change that.

> 
>> @@ -2401,6 +2407,8 @@ static int cpu_schedule_up(unsigned int cpu)
>>       /* We start with cpu granularity. */
>>       sd->granularity = 1;
>>   
>> +    cpumask_set_cpu(cpu, sched_res_mask);
> 
> I'm missing the clearing counterpart.

Oh, indeed. Good catch! Not that it would result in wrong behavior,
though.


Juergen
diff mbox series

Patch

diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index c2e5a9220d..882b3baf42 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -59,7 +59,7 @@  integer_param("sched_ratelimit_us", sched_ratelimit_us);
 enum sched_gran __read_mostly opt_sched_granularity = SCHED_GRAN_cpu;
 unsigned int __read_mostly sched_granularity = 1;
 bool __read_mostly sched_disable_smt_switching;
-const cpumask_t *sched_res_mask = &cpumask_all;
+cpumask_var_t sched_res_mask;
 
 /* Common lock for free cpus. */
 static DEFINE_SPINLOCK(sched_free_cpu_lock);
@@ -2388,8 +2388,14 @@  static int cpu_schedule_up(unsigned int cpu)
     sd = xzalloc(struct sched_resource);
     if ( sd == NULL )
         return -ENOMEM;
+    if ( !zalloc_cpumask_var(&sd->cpus) )
+    {
+        xfree(sd);
+        return -ENOMEM;
+    }
+
     sd->master_cpu = cpu;
-    sd->cpus = cpumask_of(cpu);
+    cpumask_copy(sd->cpus, cpumask_of(cpu));
     set_sched_res(cpu, sd);
 
     sd->scheduler = &sched_idle_ops;
@@ -2401,6 +2407,8 @@  static int cpu_schedule_up(unsigned int cpu)
     /* We start with cpu granularity. */
     sd->granularity = 1;
 
+    cpumask_set_cpu(cpu, sched_res_mask);
+
     /* Boot CPU is dealt with later in scheduler_init(). */
     if ( cpu == 0 )
         return 0;
@@ -2433,6 +2441,7 @@  static void sched_res_free(struct rcu_head *head)
 {
     struct sched_resource *sd = container_of(head, struct sched_resource, rcu);
 
+    free_cpumask_var(sd->cpus);
     xfree(sd);
 }
 
@@ -2581,6 +2590,9 @@  void __init scheduler_init(void)
         printk("Using '%s' (%s)\n", ops.name, ops.opt_name);
     }
 
+    if ( !zalloc_cpumask_var(&sched_res_mask) )
+        BUG();
+
     if ( cpu_schedule_up(0) )
         BUG();
     register_cpu_notifier(&cpu_schedule_nfb);
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index f10ed768b0..1770b9bca1 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -24,7 +24,7 @@  extern cpumask_t cpupool_free_cpus;
 extern int sched_ratelimit_us;
 
 /* Scheduling resource mask. */
-extern const cpumask_t *sched_res_mask;
+extern cpumask_var_t sched_res_mask;
 
 /* Number of vcpus per struct sched_unit. */
 enum sched_gran {
@@ -59,7 +59,7 @@  struct sched_resource {
     /* Cpu with lowest id in scheduling resource. */
     unsigned int        master_cpu;
     unsigned int        granularity;
-    const cpumask_t    *cpus;           /* cpus covered by this struct     */
+    cpumask_var_t       cpus;           /* cpus covered by this struct     */
     struct rcu_head     rcu;
 };