diff mbox

[RFC,2/8] cpus: Move common code out of {async_, }run_on_cpu()

Message ID 1466375313-7562-3-git-send-email-sergey.fedorov@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

sergey.fedorov@linaro.org June 19, 2016, 10:28 p.m. UTC
From: Sergey Fedorov <serge.fdrv@gmail.com>

Move the code common between run_on_cpu() and async_run_on_cpu() into a
new function queue_work_on_cpu().

Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
---
 cpus.c | 42 ++++++++++++++++++------------------------
 1 file changed, 18 insertions(+), 24 deletions(-)

Comments

Alex Bennée June 27, 2016, 8:54 a.m. UTC | #1
Sergey Fedorov <sergey.fedorov@linaro.org> writes:

> From: Sergey Fedorov <serge.fdrv@gmail.com>
>
> Move the code common between run_on_cpu() and async_run_on_cpu() into a
> new function queue_work_on_cpu().
>
> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

> ---
>  cpus.c | 42 ++++++++++++++++++------------------------
>  1 file changed, 18 insertions(+), 24 deletions(-)
>
> diff --git a/cpus.c b/cpus.c
> index 049c2d04e150..04687c85bcd4 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -910,6 +910,22 @@ void qemu_init_cpu_loop(void)
>      qemu_thread_get_self(&io_thread);
>  }
>
> +static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
> +{
> +    qemu_mutex_lock(&cpu->work_mutex);
> +    if (cpu->queued_work_first == NULL) {
> +        cpu->queued_work_first = wi;
> +    } else {
> +        cpu->queued_work_last->next = wi;
> +    }
> +    cpu->queued_work_last = wi;
> +    wi->next = NULL;
> +    wi->done = false;
> +    qemu_mutex_unlock(&cpu->work_mutex);
> +
> +    qemu_cpu_kick(cpu);
> +}
> +
>  void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
>  {
>      struct qemu_work_item wi;
> @@ -923,18 +939,7 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
>      wi.data = data;
>      wi.free = false;
>
> -    qemu_mutex_lock(&cpu->work_mutex);
> -    if (cpu->queued_work_first == NULL) {
> -        cpu->queued_work_first = &wi;
> -    } else {
> -        cpu->queued_work_last->next = &wi;
> -    }
> -    cpu->queued_work_last = &wi;
> -    wi.next = NULL;
> -    wi.done = false;
> -    qemu_mutex_unlock(&cpu->work_mutex);
> -
> -    qemu_cpu_kick(cpu);
> +    queue_work_on_cpu(cpu, &wi);
>      while (!atomic_mb_read(&wi.done)) {
>          CPUState *self_cpu = current_cpu;
>
> @@ -957,18 +962,7 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
>      wi->data = data;
>      wi->free = true;
>
> -    qemu_mutex_lock(&cpu->work_mutex);
> -    if (cpu->queued_work_first == NULL) {
> -        cpu->queued_work_first = wi;
> -    } else {
> -        cpu->queued_work_last->next = wi;
> -    }
> -    cpu->queued_work_last = wi;
> -    wi->next = NULL;
> -    wi->done = false;
> -    qemu_mutex_unlock(&cpu->work_mutex);
> -
> -    qemu_cpu_kick(cpu);
> +    queue_work_on_cpu(cpu, wi);
>  }
>
>  static void qemu_kvm_destroy_vcpu(CPUState *cpu)


--
Alex Bennée
diff mbox

Patch

diff --git a/cpus.c b/cpus.c
index 049c2d04e150..04687c85bcd4 100644
--- a/cpus.c
+++ b/cpus.c
@@ -910,6 +910,22 @@  void qemu_init_cpu_loop(void)
     qemu_thread_get_self(&io_thread);
 }
 
+static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
+{
+    qemu_mutex_lock(&cpu->work_mutex);
+    if (cpu->queued_work_first == NULL) {
+        cpu->queued_work_first = wi;
+    } else {
+        cpu->queued_work_last->next = wi;
+    }
+    cpu->queued_work_last = wi;
+    wi->next = NULL;
+    wi->done = false;
+    qemu_mutex_unlock(&cpu->work_mutex);
+
+    qemu_cpu_kick(cpu);
+}
+
 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
 {
     struct qemu_work_item wi;
@@ -923,18 +939,7 @@  void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
     wi.data = data;
     wi.free = false;
 
-    qemu_mutex_lock(&cpu->work_mutex);
-    if (cpu->queued_work_first == NULL) {
-        cpu->queued_work_first = &wi;
-    } else {
-        cpu->queued_work_last->next = &wi;
-    }
-    cpu->queued_work_last = &wi;
-    wi.next = NULL;
-    wi.done = false;
-    qemu_mutex_unlock(&cpu->work_mutex);
-
-    qemu_cpu_kick(cpu);
+    queue_work_on_cpu(cpu, &wi);
     while (!atomic_mb_read(&wi.done)) {
         CPUState *self_cpu = current_cpu;
 
@@ -957,18 +962,7 @@  void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
     wi->data = data;
     wi->free = true;
 
-    qemu_mutex_lock(&cpu->work_mutex);
-    if (cpu->queued_work_first == NULL) {
-        cpu->queued_work_first = wi;
-    } else {
-        cpu->queued_work_last->next = wi;
-    }
-    cpu->queued_work_last = wi;
-    wi->next = NULL;
-    wi->done = false;
-    qemu_mutex_unlock(&cpu->work_mutex);
-
-    qemu_cpu_kick(cpu);
+    queue_work_on_cpu(cpu, wi);
 }
 
 static void qemu_kvm_destroy_vcpu(CPUState *cpu)