diff mbox

[RFC,v3,12/19] tcg: add kick timer for single-threaded vCPU emulation

Message ID 1464986428-6739-13-git-send-email-alex.bennee@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

Alex Bennée June 3, 2016, 8:40 p.m. UTC
Currently we rely on the side effect of the main loop grabbing the
iothread_mutex to give any long running basic block chains a kick to
ensure the next vCPU is scheduled. As this code is being re-factored and
rationalised we now do it explicitly here.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>

---
v2
  - re-base fixes
  - get_ticks_per_sec() -> NANOSECONDS_PER_SEC
v3
  - add define for TCG_KICK_FREQ
  - fix checkpatch warning
---
 cpus.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

Comments

Sergey Fedorov June 27, 2016, 9:20 p.m. UTC | #1
On 03/06/16 23:40, Alex Bennée wrote:
> diff --git a/cpus.c b/cpus.c
> index 1694ce9..12e04c9 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -1208,9 +1208,29 @@ static int tcg_cpu_exec(CPUState *cpu)
>      return ret;
>  }
>  
> +/* Single-threaded TCG
> + *
> + * In the single-threaded case each vCPU is simulated in turn. If
> + * there is more than a single vCPU we create a simple timer to kick
> + * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
> + * This is done explicitly rather than relying on side-effects
> + * elsewhere.
> + */
> +static void qemu_cpu_kick_no_halt(void);
> +#define TCG_KICK_FREQ (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + \
> +                       NANOSECONDS_PER_SECOND / 10)

Hmm, it doesn't look nice to wrap calculation of the next timeout in a
macro and name it '*_FREQ'. I think we'd better do like this:

#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)

static inline int64_t qemu_tcg_next_kick(void)
{
    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
}

and use it like this:

timer_mod(kick_timer, qemu_tcg_next_kick());

Kind regards,
Sergey

> +
> +static void kick_tcg_thread(void *opaque)
> +{
> +    QEMUTimer *self = *(QEMUTimer **) opaque;
> +    timer_mod(self, TCG_KICK_FREQ);
> +    qemu_cpu_kick_no_halt();
> +}
> +
>  static void *qemu_tcg_cpu_thread_fn(void *arg)
>  {
>      CPUState *cpu = arg;
> +    QEMUTimer *kick_timer;
>  
>      rcu_register_thread();
>  
> @@ -1234,6 +1254,13 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
>          }
>      }
>  
> +    /* Set to kick if we have to do more than one vCPU */
> +    if (CPU_NEXT(first_cpu)) {
> +        kick_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,  kick_tcg_thread,
> +                                  &kick_timer);
> +        timer_mod(kick_timer, TCG_KICK_FREQ);
> +    }
> +
>      /* process any pending work */
>      atomic_mb_set(&exit_request, 1);
>
Richard Henderson July 2, 2016, 12:17 a.m. UTC | #2
On 06/27/2016 02:20 PM, Sergey Fedorov wrote:
> On 03/06/16 23:40, Alex Bennée wrote:
>> diff --git a/cpus.c b/cpus.c
>> index 1694ce9..12e04c9 100644
>> --- a/cpus.c
>> +++ b/cpus.c
>> @@ -1208,9 +1208,29 @@ static int tcg_cpu_exec(CPUState *cpu)
>>      return ret;
>>  }
>>
>> +/* Single-threaded TCG
>> + *
>> + * In the single-threaded case each vCPU is simulated in turn. If
>> + * there is more than a single vCPU we create a simple timer to kick
>> + * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
>> + * This is done explicitly rather than relying on side-effects
>> + * elsewhere.
>> + */
>> +static void qemu_cpu_kick_no_halt(void);
>> +#define TCG_KICK_FREQ (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + \
>> +                       NANOSECONDS_PER_SECOND / 10)
>
> Hmm, it doesn't look nice to wrap calculation of the next timeout in a
> macro and name it '*_FREQ'. I think we'd better do like this:
>
> #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
>
> static inline int64_t qemu_tcg_next_kick(void)
> {
>     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
> }
>
> and use it like this:
>
> timer_mod(kick_timer, qemu_tcg_next_kick());

Agreed.

As an aside, surely a period of 10ns is too small.
That's on the order of 20-50 host instructions.


r~
Sergey Fedorov July 2, 2016, 7:36 a.m. UTC | #3
On 02/07/16 03:17, Richard Henderson wrote:
> On 06/27/2016 02:20 PM, Sergey Fedorov wrote:
>> On 03/06/16 23:40, Alex Bennée wrote:
>>> diff --git a/cpus.c b/cpus.c
>>> index 1694ce9..12e04c9 100644
>>> --- a/cpus.c
>>> +++ b/cpus.c
>>> @@ -1208,9 +1208,29 @@ static int tcg_cpu_exec(CPUState *cpu)
>>>      return ret;
>>>  }
>>>
>>> +/* Single-threaded TCG
>>> + *
>>> + * In the single-threaded case each vCPU is simulated in turn. If
>>> + * there is more than a single vCPU we create a simple timer to kick
>>> + * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
>>> + * This is done explicitly rather than relying on side-effects
>>> + * elsewhere.
>>> + */
>>> +static void qemu_cpu_kick_no_halt(void);
>>> +#define TCG_KICK_FREQ (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + \
>>> +                       NANOSECONDS_PER_SECOND / 10)
>>
>> Hmm, it doesn't look nice to wrap calculation of the next timeout in a
>> macro and name it '*_FREQ'. I think we'd better do like this:
>>
>> #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10)
>>
>> static inline int64_t qemu_tcg_next_kick(void)
>> {
>>     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
>> }
>>
>> and use it like this:
>>
>> timer_mod(kick_timer, qemu_tcg_next_kick());
>
> Agreed.
>
> As an aside, surely a period of 10ns is too small.
> That's on the order of 20-50 host instructions.

I think the period is 10 times in a second which is 100 ms.

Regards,
Sergey
diff mbox

Patch

diff --git a/cpus.c b/cpus.c
index 1694ce9..12e04c9 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1208,9 +1208,29 @@  static int tcg_cpu_exec(CPUState *cpu)
     return ret;
 }
 
+/* Single-threaded TCG
+ *
+ * In the single-threaded case each vCPU is simulated in turn. If
+ * there is more than a single vCPU we create a simple timer to kick
+ * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
+ * This is done explicitly rather than relying on side-effects
+ * elsewhere.
+ */
+static void qemu_cpu_kick_no_halt(void);
+#define TCG_KICK_FREQ (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + \
+                       NANOSECONDS_PER_SECOND / 10)
+
+static void kick_tcg_thread(void *opaque)
+{
+    QEMUTimer *self = *(QEMUTimer **) opaque;
+    timer_mod(self, TCG_KICK_FREQ);
+    qemu_cpu_kick_no_halt();
+}
+
 static void *qemu_tcg_cpu_thread_fn(void *arg)
 {
     CPUState *cpu = arg;
+    QEMUTimer *kick_timer;
 
     rcu_register_thread();
 
@@ -1234,6 +1254,13 @@  static void *qemu_tcg_cpu_thread_fn(void *arg)
         }
     }
 
+    /* Set to kick if we have to do more than one vCPU */
+    if (CPU_NEXT(first_cpu)) {
+        kick_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,  kick_tcg_thread,
+                                  &kick_timer);
+        timer_mod(kick_timer, TCG_KICK_FREQ);
+    }
+
     /* process any pending work */
     atomic_mb_set(&exit_request, 1);