diff mbox series

[v3,36/47] xen/sched: carve out freeing sched_unit memory into dedicated function

Message ID 20190914085251.18816-37-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen: add core scheduling support | expand

Commit Message

Jürgen Groß Sept. 14, 2019, 8:52 a.m. UTC
We'll need a way to free a sched_unit structure without side effects
in a later patch.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
RFC V2: new patch, carved out from RFC V1 patch 49
---
 xen/common/schedule.c | 38 +++++++++++++++++++++-----------------
 1 file changed, 21 insertions(+), 17 deletions(-)

Comments

Jan Beulich Sept. 24, 2019, 12:04 p.m. UTC | #1
On 14.09.2019 10:52, Juergen Gross wrote:
> --- a/xen/common/schedule.c
> +++ b/xen/common/schedule.c
> @@ -351,26 +351,10 @@ static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2,
>      spin_unlock_irqrestore(lock1, flags);
>  }
>  
> -static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
> +static void sched_free_unit_mem(struct sched_unit *unit)
>  {
>      struct sched_unit *prev_unit;
>      struct domain *d = unit->domain;
> -    struct vcpu *vunit;
> -    unsigned int cnt = 0;
> -
> -    /* Don't count to be released vcpu, might be not in vcpu list yet. */
> -    for_each_sched_unit_vcpu ( unit, vunit )
> -        if ( vunit != v )
> -            cnt++;
> -
> -    v->sched_unit = NULL;
> -    unit->runstate_cnt[v->runstate.state]--;
> -
> -    if ( cnt )
> -        return;
> -
> -    if ( unit->vcpu_list == v )
> -        unit->vcpu_list = v->next_in_list;
>  
>      if ( d->sched_unit_list == unit )
>          d->sched_unit_list = unit->next_in_list;
> @@ -393,6 +377,26 @@ static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
>      xfree(unit);
>  }
>  
> +static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
> +{
> +    struct vcpu *vunit;
> +    unsigned int cnt = 0;
> +
> +    /* Don't count to be released vcpu, might be not in vcpu list yet. */
> +    for_each_sched_unit_vcpu ( unit, vunit )
> +        if ( vunit != v )
> +            cnt++;
> +
> +    v->sched_unit = NULL;
> +    unit->runstate_cnt[v->runstate.state]--;
> +
> +    if ( unit->vcpu_list == v )
> +        unit->vcpu_list = v->next_in_list;
> +
> +    if ( !cnt )
> +        sched_free_unit_mem(unit);
> +}

The entire sched_free_unit() is new code (starting from patch 3) - why
don't you arrange for the split right away, instead of moving code
around here?

Jan
Jürgen Groß Sept. 25, 2019, 1:09 p.m. UTC | #2
On 24.09.19 14:04, Jan Beulich wrote:
> On 14.09.2019 10:52, Juergen Gross wrote:
>> --- a/xen/common/schedule.c
>> +++ b/xen/common/schedule.c
>> @@ -351,26 +351,10 @@ static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2,
>>       spin_unlock_irqrestore(lock1, flags);
>>   }
>>   
>> -static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
>> +static void sched_free_unit_mem(struct sched_unit *unit)
>>   {
>>       struct sched_unit *prev_unit;
>>       struct domain *d = unit->domain;
>> -    struct vcpu *vunit;
>> -    unsigned int cnt = 0;
>> -
>> -    /* Don't count to be released vcpu, might be not in vcpu list yet. */
>> -    for_each_sched_unit_vcpu ( unit, vunit )
>> -        if ( vunit != v )
>> -            cnt++;
>> -
>> -    v->sched_unit = NULL;
>> -    unit->runstate_cnt[v->runstate.state]--;
>> -
>> -    if ( cnt )
>> -        return;
>> -
>> -    if ( unit->vcpu_list == v )
>> -        unit->vcpu_list = v->next_in_list;
>>   
>>       if ( d->sched_unit_list == unit )
>>           d->sched_unit_list = unit->next_in_list;
>> @@ -393,6 +377,26 @@ static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
>>       xfree(unit);
>>   }
>>   
>> +static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
>> +{
>> +    struct vcpu *vunit;
>> +    unsigned int cnt = 0;
>> +
>> +    /* Don't count to be released vcpu, might be not in vcpu list yet. */
>> +    for_each_sched_unit_vcpu ( unit, vunit )
>> +        if ( vunit != v )
>> +            cnt++;
>> +
>> +    v->sched_unit = NULL;
>> +    unit->runstate_cnt[v->runstate.state]--;
>> +
>> +    if ( unit->vcpu_list == v )
>> +        unit->vcpu_list = v->next_in_list;
>> +
>> +    if ( !cnt )
>> +        sched_free_unit_mem(unit);
>> +}
> 
> The entire sched_free_unit() is new code (starting from patch 3) - why
> don't you arrange for the split right away, instead of moving code
> around here?

I wanted to introduce new subfunctions only when they are really needed.
I can merge this patch into patch 3 if you like that better.


Juergen
Jan Beulich Sept. 25, 2019, 1:16 p.m. UTC | #3
On 25.09.2019 15:09, Jürgen Groß wrote:
> On 24.09.19 14:04, Jan Beulich wrote:
>> On 14.09.2019 10:52, Juergen Gross wrote:
>>> --- a/xen/common/schedule.c
>>> +++ b/xen/common/schedule.c
>>> @@ -351,26 +351,10 @@ static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2,
>>>       spin_unlock_irqrestore(lock1, flags);
>>>   }
>>>   
>>> -static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
>>> +static void sched_free_unit_mem(struct sched_unit *unit)
>>>   {
>>>       struct sched_unit *prev_unit;
>>>       struct domain *d = unit->domain;
>>> -    struct vcpu *vunit;
>>> -    unsigned int cnt = 0;
>>> -
>>> -    /* Don't count to be released vcpu, might be not in vcpu list yet. */
>>> -    for_each_sched_unit_vcpu ( unit, vunit )
>>> -        if ( vunit != v )
>>> -            cnt++;
>>> -
>>> -    v->sched_unit = NULL;
>>> -    unit->runstate_cnt[v->runstate.state]--;
>>> -
>>> -    if ( cnt )
>>> -        return;
>>> -
>>> -    if ( unit->vcpu_list == v )
>>> -        unit->vcpu_list = v->next_in_list;
>>>   
>>>       if ( d->sched_unit_list == unit )
>>>           d->sched_unit_list = unit->next_in_list;
>>> @@ -393,6 +377,26 @@ static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
>>>       xfree(unit);
>>>   }
>>>   
>>> +static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
>>> +{
>>> +    struct vcpu *vunit;
>>> +    unsigned int cnt = 0;
>>> +
>>> +    /* Don't count to be released vcpu, might be not in vcpu list yet. */
>>> +    for_each_sched_unit_vcpu ( unit, vunit )
>>> +        if ( vunit != v )
>>> +            cnt++;
>>> +
>>> +    v->sched_unit = NULL;
>>> +    unit->runstate_cnt[v->runstate.state]--;
>>> +
>>> +    if ( unit->vcpu_list == v )
>>> +        unit->vcpu_list = v->next_in_list;
>>> +
>>> +    if ( !cnt )
>>> +        sched_free_unit_mem(unit);
>>> +}
>>
>> The entire sched_free_unit() is new code (starting from patch 3) - why
>> don't you arrange for the split right away, instead of moving code
>> around here?
> 
> I wanted to introduce new subfunctions only when they are really needed.

There are cases where this is indeed the better approach; perhaps
that even the typical case. But here you spend an entire patch on
re-doing what you've done before. So ...

> I can merge this patch into patch 3 if you like that better.

... yes, personally I'd prefer this, but in the end it's the call
of the scheduler maintainers.

Jan
Dario Faggioli Sept. 25, 2019, 5:31 p.m. UTC | #4
On Wed, 2019-09-25 at 15:16 +0200, Jan Beulich wrote:
> On 25.09.2019 15:09, Jürgen Groß wrote:
> > 
> There are cases where this is indeed the better approach; perhaps
> that even the typical case. But here you spend an entire patch on
> re-doing what you've done before. So ...
> 
> > I can merge this patch into patch 3 if you like that better.
> 
> ... yes, personally I'd prefer this, but in the end it's the call
> of the scheduler maintainers.
>
Yes, I think I like it better too for the patches to be merged.

Regards
diff mbox series

Patch

diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 1e793617ec..88ac1a1ab8 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -351,26 +351,10 @@  static void sched_spin_unlock_double(spinlock_t *lock1, spinlock_t *lock2,
     spin_unlock_irqrestore(lock1, flags);
 }
 
-static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
+static void sched_free_unit_mem(struct sched_unit *unit)
 {
     struct sched_unit *prev_unit;
     struct domain *d = unit->domain;
-    struct vcpu *vunit;
-    unsigned int cnt = 0;
-
-    /* Don't count to be released vcpu, might be not in vcpu list yet. */
-    for_each_sched_unit_vcpu ( unit, vunit )
-        if ( vunit != v )
-            cnt++;
-
-    v->sched_unit = NULL;
-    unit->runstate_cnt[v->runstate.state]--;
-
-    if ( cnt )
-        return;
-
-    if ( unit->vcpu_list == v )
-        unit->vcpu_list = v->next_in_list;
 
     if ( d->sched_unit_list == unit )
         d->sched_unit_list = unit->next_in_list;
@@ -393,6 +377,26 @@  static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
     xfree(unit);
 }
 
+static void sched_free_unit(struct sched_unit *unit, struct vcpu *v)
+{
+    struct vcpu *vunit;
+    unsigned int cnt = 0;
+
+    /* Don't count to be released vcpu, might be not in vcpu list yet. */
+    for_each_sched_unit_vcpu ( unit, vunit )
+        if ( vunit != v )
+            cnt++;
+
+    v->sched_unit = NULL;
+    unit->runstate_cnt[v->runstate.state]--;
+
+    if ( unit->vcpu_list == v )
+        unit->vcpu_list = v->next_in_list;
+
+    if ( !cnt )
+        sched_free_unit_mem(unit);
+}
+
 static void sched_unit_add_vcpu(struct sched_unit *unit, struct vcpu *v)
 {
     v->sched_unit = unit;