diff mbox

[RFC,4/8] linux-user: Rework exclusive operation mechanism

Message ID 1466375313-7562-5-git-send-email-sergey.fedorov@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

sergey.fedorov@linaro.org June 19, 2016, 10:28 p.m. UTC
From: Sergey Fedorov <serge.fdrv@gmail.com>

A single variable 'pending_cpus' was used for both counting currently
running CPUs and for signalling the pending exclusive operation request.

To prepare for supporting operations which requires a quiescent state,
like translation buffer flush, it is useful to keep a counter of
currently running CPUs always up to date.

Use a separate variable 'tcg_pending_cpus' to count for currently
running CPUs and a separate variable 'exclusive_pending' to indicate
that there's an exclusive operation pending.

Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
---
 linux-user/main.c | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

Comments

Alex Bennée June 27, 2016, 9:02 a.m. UTC | #1
Sergey Fedorov <sergey.fedorov@linaro.org> writes:

> From: Sergey Fedorov <serge.fdrv@gmail.com>
>
> A single variable 'pending_cpus' was used for both counting currently
> running CPUs and for signalling the pending exclusive operation request.
>
> To prepare for supporting operations which requires a quiescent state,
> like translation buffer flush, it is useful to keep a counter of
> currently running CPUs always up to date.
>
> Use a separate variable 'tcg_pending_cpus' to count for currently
> running CPUs and a separate variable 'exclusive_pending' to indicate
> that there's an exclusive operation pending.
>
> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
> ---
>  linux-user/main.c | 24 ++++++++++++------------
>  1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/linux-user/main.c b/linux-user/main.c
> index b9a4e0ea45ac..485336f78b8f 100644
> --- a/linux-user/main.c
> +++ b/linux-user/main.c
> @@ -111,7 +111,8 @@ static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
>  static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
>  static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
>  static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
> -static int pending_cpus;
> +static bool exclusive_pending;
> +static int tcg_pending_cpus;

I'm not sure you need to re-name to tcg_pending_cpus as TCG is implied
for linux-user. Also they are not really CPUs (although we are using the
CPU structure for each running thread). I'm not sure if there is a
neater way to make the distinction clear.

>
>  /* Make sure everything is in a consistent state for calling fork().  */
>  void fork_start(void)
> @@ -133,7 +134,8 @@ void fork_end(int child)
>                  QTAILQ_REMOVE(&cpus, cpu, node);
>              }
>          }
> -        pending_cpus = 0;
> +        tcg_pending_cpus = 0;
> +        exclusive_pending = false;
>          pthread_mutex_init(&exclusive_lock, NULL);
>          pthread_mutex_init(&cpu_list_mutex, NULL);
>          pthread_cond_init(&exclusive_cond, NULL);
> @@ -150,7 +152,7 @@ void fork_end(int child)
>     must be held.  */
>  static inline void exclusive_idle(void)
>  {
> -    while (pending_cpus) {
> +    while (exclusive_pending) {
>          pthread_cond_wait(&exclusive_resume, &exclusive_lock);
>      }
>  }
> @@ -164,15 +166,14 @@ static inline void start_exclusive(void)
>      pthread_mutex_lock(&exclusive_lock);
>      exclusive_idle();
>
> -    pending_cpus = 1;
> +    exclusive_pending = true;
>      /* Make all other cpus stop executing.  */
>      CPU_FOREACH(other_cpu) {
>          if (other_cpu->running) {
> -            pending_cpus++;
>              cpu_exit(other_cpu);
>          }
>      }
> -    if (pending_cpus > 1) {
> +    while (tcg_pending_cpus) {
>          pthread_cond_wait(&exclusive_cond, &exclusive_lock);
>      }
>  }
> @@ -180,7 +181,7 @@ static inline void start_exclusive(void)
>  /* Finish an exclusive operation.  */
>  static inline void __attribute__((unused)) end_exclusive(void)
>  {
> -    pending_cpus = 0;
> +    exclusive_pending = false;
>      pthread_cond_broadcast(&exclusive_resume);
>      pthread_mutex_unlock(&exclusive_lock);
>  }
> @@ -191,6 +192,7 @@ static inline void cpu_exec_start(CPUState *cpu)
>      pthread_mutex_lock(&exclusive_lock);
>      exclusive_idle();
>      cpu->running = true;
> +    tcg_pending_cpus++;

These aren't TLS variables so shouldn't we be ensuring all access is atomic?

>      pthread_mutex_unlock(&exclusive_lock);
>  }
>
> @@ -199,11 +201,9 @@ static inline void cpu_exec_end(CPUState *cpu)
>  {
>      pthread_mutex_lock(&exclusive_lock);
>      cpu->running = false;
> -    if (pending_cpus > 1) {
> -        pending_cpus--;
> -        if (pending_cpus == 1) {
> -            pthread_cond_signal(&exclusive_cond);
> -        }
> +    tcg_pending_cpus--;
> +    if (!tcg_pending_cpus) {
> +        pthread_cond_broadcast(&exclusive_cond);
>      }

Couldn't two threads race to -1 here?

>      exclusive_idle();
>      pthread_mutex_unlock(&exclusive_lock);


--
Alex Bennée
Sergey Fedorov June 29, 2016, 2:57 p.m. UTC | #2
On 27/06/16 12:02, Alex Bennée wrote:
> Sergey Fedorov <sergey.fedorov@linaro.org> writes:
>
>> From: Sergey Fedorov <serge.fdrv@gmail.com>
>>
(snip)
>> diff --git a/linux-user/main.c b/linux-user/main.c
>> index b9a4e0ea45ac..485336f78b8f 100644
>> --- a/linux-user/main.c
>> +++ b/linux-user/main.c
>> @@ -111,7 +111,8 @@ static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
>>  static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
>>  static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
>>  static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
>> -static int pending_cpus;
>> +static bool exclusive_pending;
>> +static int tcg_pending_cpus;
> I'm not sure you need to re-name to tcg_pending_cpus as TCG is implied
> for linux-user. Also they are not really CPUs (although we are using the
> CPU structure for each running thread). I'm not sure if there is a
> neater way to make the distinction clear.

How about 'tcg_pending_threads'? It is going to be used in system-mode
soon, so I'd like to keep "tcg_" prefix.

>
>>  /* Make sure everything is in a consistent state for calling fork().  */
>>  void fork_start(void)
>> @@ -133,7 +134,8 @@ void fork_end(int child)
>>                  QTAILQ_REMOVE(&cpus, cpu, node);
>>              }
>>          }
>> -        pending_cpus = 0;
>> +        tcg_pending_cpus = 0;
>> +        exclusive_pending = false;
>>          pthread_mutex_init(&exclusive_lock, NULL);
>>          pthread_mutex_init(&cpu_list_mutex, NULL);
>>          pthread_cond_init(&exclusive_cond, NULL);
>> @@ -150,7 +152,7 @@ void fork_end(int child)
>>     must be held.  */
>>  static inline void exclusive_idle(void)
>>  {
>> -    while (pending_cpus) {
>> +    while (exclusive_pending) {
>>          pthread_cond_wait(&exclusive_resume, &exclusive_lock);
>>      }
>>  }
>> @@ -164,15 +166,14 @@ static inline void start_exclusive(void)
>>      pthread_mutex_lock(&exclusive_lock);
>>      exclusive_idle();
>>
>> -    pending_cpus = 1;
>> +    exclusive_pending = true;
>>      /* Make all other cpus stop executing.  */
>>      CPU_FOREACH(other_cpu) {
>>          if (other_cpu->running) {
>> -            pending_cpus++;
>>              cpu_exit(other_cpu);
>>          }
>>      }
>> -    if (pending_cpus > 1) {
>> +    while (tcg_pending_cpus) {
>>          pthread_cond_wait(&exclusive_cond, &exclusive_lock);
>>      }
>>  }
>> @@ -180,7 +181,7 @@ static inline void start_exclusive(void)
>>  /* Finish an exclusive operation.  */
>>  static inline void __attribute__((unused)) end_exclusive(void)
>>  {
>> -    pending_cpus = 0;
>> +    exclusive_pending = false;
>>      pthread_cond_broadcast(&exclusive_resume);
>>      pthread_mutex_unlock(&exclusive_lock);
>>  }
>> @@ -191,6 +192,7 @@ static inline void cpu_exec_start(CPUState *cpu)
>>      pthread_mutex_lock(&exclusive_lock);
>>      exclusive_idle();
>>      cpu->running = true;
>> +    tcg_pending_cpus++;
> These aren't TLS variables so shouldn't we be ensuring all access is atomic?

It is protected by 'exclusive_lock'.

>
>>      pthread_mutex_unlock(&exclusive_lock);
>>  }
>>
>> @@ -199,11 +201,9 @@ static inline void cpu_exec_end(CPUState *cpu)
>>  {
>>      pthread_mutex_lock(&exclusive_lock);
>>      cpu->running = false;
>> -    if (pending_cpus > 1) {
>> -        pending_cpus--;
>> -        if (pending_cpus == 1) {
>> -            pthread_cond_signal(&exclusive_cond);
>> -        }
>> +    tcg_pending_cpus--;
>> +    if (!tcg_pending_cpus) {
>> +        pthread_cond_broadcast(&exclusive_cond);
>>      }
> Couldn't two threads race to -1 here?

See comment above.

Kind regards,
Sergey

>
>>      exclusive_idle();
>>      pthread_mutex_unlock(&exclusive_lock);
>
> --
> Alex Bennée
diff mbox

Patch

diff --git a/linux-user/main.c b/linux-user/main.c
index b9a4e0ea45ac..485336f78b8f 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -111,7 +111,8 @@  static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
 static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
 static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
-static int pending_cpus;
+static bool exclusive_pending;
+static int tcg_pending_cpus;
 
 /* Make sure everything is in a consistent state for calling fork().  */
 void fork_start(void)
@@ -133,7 +134,8 @@  void fork_end(int child)
                 QTAILQ_REMOVE(&cpus, cpu, node);
             }
         }
-        pending_cpus = 0;
+        tcg_pending_cpus = 0;
+        exclusive_pending = false;
         pthread_mutex_init(&exclusive_lock, NULL);
         pthread_mutex_init(&cpu_list_mutex, NULL);
         pthread_cond_init(&exclusive_cond, NULL);
@@ -150,7 +152,7 @@  void fork_end(int child)
    must be held.  */
 static inline void exclusive_idle(void)
 {
-    while (pending_cpus) {
+    while (exclusive_pending) {
         pthread_cond_wait(&exclusive_resume, &exclusive_lock);
     }
 }
@@ -164,15 +166,14 @@  static inline void start_exclusive(void)
     pthread_mutex_lock(&exclusive_lock);
     exclusive_idle();
 
-    pending_cpus = 1;
+    exclusive_pending = true;
     /* Make all other cpus stop executing.  */
     CPU_FOREACH(other_cpu) {
         if (other_cpu->running) {
-            pending_cpus++;
             cpu_exit(other_cpu);
         }
     }
-    if (pending_cpus > 1) {
+    while (tcg_pending_cpus) {
         pthread_cond_wait(&exclusive_cond, &exclusive_lock);
     }
 }
@@ -180,7 +181,7 @@  static inline void start_exclusive(void)
 /* Finish an exclusive operation.  */
 static inline void __attribute__((unused)) end_exclusive(void)
 {
-    pending_cpus = 0;
+    exclusive_pending = false;
     pthread_cond_broadcast(&exclusive_resume);
     pthread_mutex_unlock(&exclusive_lock);
 }
@@ -191,6 +192,7 @@  static inline void cpu_exec_start(CPUState *cpu)
     pthread_mutex_lock(&exclusive_lock);
     exclusive_idle();
     cpu->running = true;
+    tcg_pending_cpus++;
     pthread_mutex_unlock(&exclusive_lock);
 }
 
@@ -199,11 +201,9 @@  static inline void cpu_exec_end(CPUState *cpu)
 {
     pthread_mutex_lock(&exclusive_lock);
     cpu->running = false;
-    if (pending_cpus > 1) {
-        pending_cpus--;
-        if (pending_cpus == 1) {
-            pthread_cond_signal(&exclusive_cond);
-        }
+    tcg_pending_cpus--;
+    if (!tcg_pending_cpus) {
+        pthread_cond_broadcast(&exclusive_cond);
     }
     exclusive_idle();
     pthread_mutex_unlock(&exclusive_lock);