diff mbox series

[v2,08/11] target/openrisc: Enable MTTCG

Message ID 20220703212823.10067-9-shorne@gmail.com (mailing list archive)
State New, archived
Headers show
Series OpenRISC Virtual Machine | expand

Commit Message

Stafford Horne July 3, 2022, 9:28 p.m. UTC
This patch enables multithread TCG for OpenRISC.  Since the or1k shared
syncrhonized timer can be updated from each vCPU via helpers we use a
mutex to synchronize updates.

Signed-off-by: Stafford Horne <shorne@gmail.com>
---
 configs/targets/or1k-softmmu.mak |  1 +
 hw/openrisc/cputimer.c           | 17 +++++++++++++++++
 target/openrisc/cpu.h            |  3 +++
 target/openrisc/sys_helper.c     | 11 +++++++++--
 4 files changed, 30 insertions(+), 2 deletions(-)

Comments

Richard Henderson July 4, 2022, 10:07 a.m. UTC | #1
On 7/4/22 02:58, Stafford Horne wrote:
>       case TO_SPR(10, 1): /* TTCR */
> -        cpu_openrisc_count_update(cpu);
> +        if (cpu_openrisc_timer_has_advanced(cpu)) {
> +            qemu_mutex_lock_iothread();
> +            cpu_openrisc_count_update(cpu);
> +            qemu_mutex_unlock_iothread();
> +        }

Lock around the whole if, I think.  Otherwise looks good.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>


r~
Stafford Horne July 4, 2022, 8:31 p.m. UTC | #2
On Mon, Jul 04, 2022 at 03:37:04PM +0530, Richard Henderson wrote:
> On 7/4/22 02:58, Stafford Horne wrote:
> >       case TO_SPR(10, 1): /* TTCR */
> > -        cpu_openrisc_count_update(cpu);
> > +        if (cpu_openrisc_timer_has_advanced(cpu)) {
> > +            qemu_mutex_lock_iothread();
> > +            cpu_openrisc_count_update(cpu);
> > +            qemu_mutex_unlock_iothread();
> > +        }
> 
> Lock around the whole if, I think.  Otherwise looks good.

Well, actually the cpu_openrisc_timer_has_advanced read is done once outside the
lock as an optimization to avoid taking the lock when it is not needed. i.e. if
we have 4 cores that all try to update the clock at the same time in theory only
one will have to take the lock and update the shared timer.

But I do see that could be flawed as after it takes the lock the timer could
have been updated by then.  Ill move it inside and see if there is any
perfromance hit / increase in the sync-profile.

> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
> 
> 
> r~
diff mbox series

Patch

diff --git a/configs/targets/or1k-softmmu.mak b/configs/targets/or1k-softmmu.mak
index 263e970870..432f855a30 100644
--- a/configs/targets/or1k-softmmu.mak
+++ b/configs/targets/or1k-softmmu.mak
@@ -1,3 +1,4 @@ 
 TARGET_ARCH=openrisc
+TARGET_SUPPORTS_MTTCG=y
 TARGET_BIG_ENDIAN=y
 TARGET_NEED_FDT=y
diff --git a/hw/openrisc/cputimer.c b/hw/openrisc/cputimer.c
index 4dbba3a3d4..2298eff8b9 100644
--- a/hw/openrisc/cputimer.c
+++ b/hw/openrisc/cputimer.c
@@ -43,6 +43,23 @@  uint32_t cpu_openrisc_count_get(OpenRISCCPU *cpu)
     return or1k_timer->ttcr;
 }
 
+/*
+ * Check to see if calling cpu_openrisc_count_update will
+ * actually advance the time.
+ *
+ * Used in hot spots to avoid taking expensive locks.
+ */
+bool cpu_openrisc_timer_has_advanced(OpenRISCCPU *cpu)
+{
+    uint64_t now;
+
+    if (!cpu->env.is_counting) {
+        return false;
+    }
+    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+    return (now - or1k_timer->last_clk) >= TIMER_PERIOD;
+}
+
 /* Add elapsed ticks to ttcr */
 void cpu_openrisc_count_update(OpenRISCCPU *cpu)
 {
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index b9584f10d4..5354d681f5 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -25,6 +25,8 @@ 
 #include "hw/core/cpu.h"
 #include "qom/object.h"
 
+#define TCG_GUEST_DEFAULT_MO (0)
+
 #define TYPE_OPENRISC_CPU "or1k-cpu"
 
 OBJECT_DECLARE_CPU_TYPE(OpenRISCCPU, OpenRISCCPUClass, OPENRISC_CPU)
@@ -333,6 +335,7 @@  void cpu_openrisc_pic_init(OpenRISCCPU *cpu);
 
 /* hw/openrisc_timer.c */
 void cpu_openrisc_clock_init(OpenRISCCPU *cpu);
+bool cpu_openrisc_timer_has_advanced(OpenRISCCPU *cpu);
 uint32_t cpu_openrisc_count_get(OpenRISCCPU *cpu);
 void cpu_openrisc_count_set(OpenRISCCPU *cpu, uint32_t val);
 void cpu_openrisc_count_update(OpenRISCCPU *cpu);
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
index 48674231e7..7c0d3d6187 100644
--- a/target/openrisc/sys_helper.c
+++ b/target/openrisc/sys_helper.c
@@ -145,6 +145,7 @@  void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
         break;
     case TO_SPR(10, 0): /* TTMR */
         {
+            qemu_mutex_lock_iothread();
             if ((env->ttmr & TTMR_M) ^ (rb & TTMR_M)) {
                 switch (rb & TTMR_M) {
                 case TIMER_NONE:
@@ -168,14 +169,16 @@  void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb)
                 env->ttmr = rb & ~TTMR_IP;
                 cs->interrupt_request &= ~CPU_INTERRUPT_TIMER;
             }
-
             cpu_openrisc_timer_update(cpu);
+            qemu_mutex_unlock_iothread();
         }
         break;
 
     case TO_SPR(10, 1): /* TTCR */
+        qemu_mutex_lock_iothread();
         cpu_openrisc_count_set(cpu, rb);
         cpu_openrisc_timer_update(cpu);
+        qemu_mutex_unlock_iothread();
         break;
 #endif
 
@@ -303,7 +306,11 @@  target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd,
         return env->ttmr;
 
     case TO_SPR(10, 1): /* TTCR */
-        cpu_openrisc_count_update(cpu);
+        if (cpu_openrisc_timer_has_advanced(cpu)) {
+            qemu_mutex_lock_iothread();
+            cpu_openrisc_count_update(cpu);
+            qemu_mutex_unlock_iothread();
+        }
         return cpu_openrisc_count_get(cpu);
 #endif