diff mbox

[RFC,v2,07/11] linux-user: Rework exclusive operation mechanism

Message ID 1467839703-11733-8-git-send-email-sergey.fedorov@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

sergey.fedorov@linaro.org July 6, 2016, 9:14 p.m. UTC
From: Sergey Fedorov <serge.fdrv@gmail.com>

A single variable 'pending_cpus' was used for both counting currently
running CPUs and for signalling the pending exclusive operation request.

To prepare for supporting operations which requires a quiescent state,
like translation buffer flush, it is useful to keep a counter of
currently running CPUs always up to date.

Use a separate variable 'tcg_pending_threads' to count for currently
running CPUs and a separate variable 'exclusive_pending' to indicate
that there's an exclusive operation pending.

Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
---

Changes in v2:
 - Rename 'tcg_pending_cpus' to 'tcg_pending_threads'

---
 linux-user/main.c | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

Comments

Alex Bennée July 14, 2016, 3:04 p.m. UTC | #1
Sergey Fedorov <sergey.fedorov@linaro.org> writes:

> From: Sergey Fedorov <serge.fdrv@gmail.com>
>
> A single variable 'pending_cpus' was used for both counting currently
> running CPUs and for signalling the pending exclusive operation request.
>
> To prepare for supporting operations which requires a quiescent state,
> like translation buffer flush, it is useful to keep a counter of
> currently running CPUs always up to date.
>
> Use a separate variable 'tcg_pending_threads' to count for currently
> running CPUs and a separate variable 'exclusive_pending' to indicate
> that there's an exclusive operation pending.
>
> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
> ---
>
> Changes in v2:
>  - Rename 'tcg_pending_cpus' to 'tcg_pending_threads'


Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

>
> ---
>  linux-user/main.c | 24 ++++++++++++------------
>  1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/linux-user/main.c b/linux-user/main.c
> index bdbda693cc5f..5ff0b20bad89 100644
> --- a/linux-user/main.c
> +++ b/linux-user/main.c
> @@ -112,7 +112,8 @@ static QemuMutex cpu_list_mutex;
>  static QemuMutex exclusive_lock;
>  static QemuCond exclusive_cond;
>  static QemuCond exclusive_resume;
> -static int pending_cpus;
> +static bool exclusive_pending;
> +static int tcg_pending_threads;
>
>  void qemu_init_cpu_loop(void)
>  {
> @@ -142,7 +143,8 @@ void fork_end(int child)
>                  QTAILQ_REMOVE(&cpus, cpu, node);
>              }
>          }
> -        pending_cpus = 0;
> +        tcg_pending_threads = 0;
> +        exclusive_pending = false;
>          qemu_mutex_init(&exclusive_lock);
>          qemu_mutex_init(&cpu_list_mutex);
>          qemu_cond_init(&exclusive_cond);
> @@ -159,7 +161,7 @@ void fork_end(int child)
>     must be held.  */
>  static inline void exclusive_idle(void)
>  {
> -    while (pending_cpus) {
> +    while (exclusive_pending) {
>          qemu_cond_wait(&exclusive_resume, &exclusive_lock);
>      }
>  }
> @@ -173,15 +175,14 @@ static inline void start_exclusive(void)
>      qemu_mutex_lock(&exclusive_lock);
>      exclusive_idle();
>
> -    pending_cpus = 1;
> +    exclusive_pending = true;
>      /* Make all other cpus stop executing.  */
>      CPU_FOREACH(other_cpu) {
>          if (other_cpu->running) {
> -            pending_cpus++;
>              cpu_exit(other_cpu);
>          }
>      }
> -    if (pending_cpus > 1) {
> +    while (tcg_pending_threads) {
>          qemu_cond_wait(&exclusive_cond, &exclusive_lock);
>      }
>  }
> @@ -189,7 +190,7 @@ static inline void start_exclusive(void)
>  /* Finish an exclusive operation.  */
>  static inline void __attribute__((unused)) end_exclusive(void)
>  {
> -    pending_cpus = 0;
> +    exclusive_pending = false;
>      qemu_cond_broadcast(&exclusive_resume);
>      qemu_mutex_unlock(&exclusive_lock);
>  }
> @@ -200,6 +201,7 @@ static inline void cpu_exec_start(CPUState *cpu)
>      qemu_mutex_lock(&exclusive_lock);
>      exclusive_idle();
>      cpu->running = true;
> +    tcg_pending_threads++;
>      qemu_mutex_unlock(&exclusive_lock);
>  }
>
> @@ -208,11 +210,9 @@ static inline void cpu_exec_end(CPUState *cpu)
>  {
>      qemu_mutex_lock(&exclusive_lock);
>      cpu->running = false;
> -    if (pending_cpus > 1) {
> -        pending_cpus--;
> -        if (pending_cpus == 1) {
> -            qemu_cond_signal(&exclusive_cond);
> -        }
> +    tcg_pending_threads--;
> +    if (!tcg_pending_threads) {
> +        qemu_cond_signal(&exclusive_cond);
>      }
>      exclusive_idle();
>      qemu_mutex_unlock(&exclusive_lock);


--
Alex Bennée
diff mbox

Patch

diff --git a/linux-user/main.c b/linux-user/main.c
index bdbda693cc5f..5ff0b20bad89 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -112,7 +112,8 @@  static QemuMutex cpu_list_mutex;
 static QemuMutex exclusive_lock;
 static QemuCond exclusive_cond;
 static QemuCond exclusive_resume;
-static int pending_cpus;
+static bool exclusive_pending;
+static int tcg_pending_threads;
 
 void qemu_init_cpu_loop(void)
 {
@@ -142,7 +143,8 @@  void fork_end(int child)
                 QTAILQ_REMOVE(&cpus, cpu, node);
             }
         }
-        pending_cpus = 0;
+        tcg_pending_threads = 0;
+        exclusive_pending = false;
         qemu_mutex_init(&exclusive_lock);
         qemu_mutex_init(&cpu_list_mutex);
         qemu_cond_init(&exclusive_cond);
@@ -159,7 +161,7 @@  void fork_end(int child)
    must be held.  */
 static inline void exclusive_idle(void)
 {
-    while (pending_cpus) {
+    while (exclusive_pending) {
         qemu_cond_wait(&exclusive_resume, &exclusive_lock);
     }
 }
@@ -173,15 +175,14 @@  static inline void start_exclusive(void)
     qemu_mutex_lock(&exclusive_lock);
     exclusive_idle();
 
-    pending_cpus = 1;
+    exclusive_pending = true;
     /* Make all other cpus stop executing.  */
     CPU_FOREACH(other_cpu) {
         if (other_cpu->running) {
-            pending_cpus++;
             cpu_exit(other_cpu);
         }
     }
-    if (pending_cpus > 1) {
+    while (tcg_pending_threads) {
         qemu_cond_wait(&exclusive_cond, &exclusive_lock);
     }
 }
@@ -189,7 +190,7 @@  static inline void start_exclusive(void)
 /* Finish an exclusive operation.  */
 static inline void __attribute__((unused)) end_exclusive(void)
 {
-    pending_cpus = 0;
+    exclusive_pending = false;
     qemu_cond_broadcast(&exclusive_resume);
     qemu_mutex_unlock(&exclusive_lock);
 }
@@ -200,6 +201,7 @@  static inline void cpu_exec_start(CPUState *cpu)
     qemu_mutex_lock(&exclusive_lock);
     exclusive_idle();
     cpu->running = true;
+    tcg_pending_threads++;
     qemu_mutex_unlock(&exclusive_lock);
 }
 
@@ -208,11 +210,9 @@  static inline void cpu_exec_end(CPUState *cpu)
 {
     qemu_mutex_lock(&exclusive_lock);
     cpu->running = false;
-    if (pending_cpus > 1) {
-        pending_cpus--;
-        if (pending_cpus == 1) {
-            qemu_cond_signal(&exclusive_cond);
-        }
+    tcg_pending_threads--;
+    if (!tcg_pending_threads) {
+        qemu_cond_signal(&exclusive_cond);
     }
     exclusive_idle();
     qemu_mutex_unlock(&exclusive_lock);