diff mbox series

[PULL,2/4] target/riscv: Remove atomic accesses to MIP CSR

Message ID 20191115044104.4197-3-palmer@dabbelt.com (mailing list archive)
State New, archived
Headers show
Series [PULL,1/4] remove unnecessary ifdef TARGET_RISCV64 | expand

Commit Message

Palmer Dabbelt Nov. 15, 2019, 4:41 a.m. UTC
From: Alistair Francis <alistair.francis@wdc.com>

Instead of relying on atomics to access the MIP register let's update
our helper function to instead just lock the IO mutex thread before
writing. This follows the same concept as used in PPC for handling
interrupts

Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Palmer Dabbelt <palmer@dabbelt.com>
Signed-off-by: Palmer Dabbelt <palmer@sifive.com>
Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
---
 target/riscv/cpu.c        |  5 ++--
 target/riscv/cpu.h        |  9 --------
 target/riscv/cpu_helper.c | 48 +++++++++++++++------------------------
 target/riscv/csr.c        |  2 +-
 4 files changed, 21 insertions(+), 43 deletions(-)
diff mbox series

Patch

diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 3939963b71ab..d37861a4305b 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -224,8 +224,7 @@  static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
 #ifndef CONFIG_USER_ONLY
     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mhartid ", env->mhartid);
     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatus ", env->mstatus);
-    qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mip     ",
-                 (target_ulong)atomic_read(&env->mip));
+    qemu_fprintf(f, " %s 0x%x\n", "mip     ", env->mip);
     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mie     ", env->mie);
     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mideleg ", env->mideleg);
     qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "medeleg ", env->medeleg);
@@ -275,7 +274,7 @@  static bool riscv_cpu_has_work(CPUState *cs)
      * Definition of the WFI instruction requires it to ignore the privilege
      * mode and delegation registers, but respect individual enables
      */
-    return (atomic_read(&env->mip) & env->mie) != 0;
+    return (env->mip & env->mie) != 0;
 #else
     return true;
 #endif
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 8c64c68538d8..e59343e13c02 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -121,15 +121,6 @@  struct CPURISCVState {
     target_ulong mhartid;
     target_ulong mstatus;
 
-    /*
-     * CAUTION! Unlike the rest of this struct, mip is accessed asynchonously
-     * by I/O threads. It should be read with atomic_read. It should be updated
-     * using riscv_cpu_update_mip with the iothread mutex held. The iothread
-     * mutex must be held because mip must be consistent with the CPU inturrept
-     * state. riscv_cpu_update_mip calls cpu_interrupt or cpu_reset_interrupt
-     * wuth the invariant that CPU_INTERRUPT_HARD is set iff mip is non-zero.
-     * mip is 32-bits to allow atomic_read on 32-bit hosts.
-     */
     uint32_t mip;
     uint32_t miclaim;
 
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index f13131a51bde..767c8762ac42 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -19,6 +19,7 @@ 
 
 #include "qemu/osdep.h"
 #include "qemu/log.h"
+#include "qemu/main-loop.h"
 #include "cpu.h"
 #include "exec/exec-all.h"
 #include "tcg-op.h"
@@ -38,7 +39,7 @@  static int riscv_cpu_local_irq_pending(CPURISCVState *env)
 {
     target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
     target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
-    target_ulong pending = atomic_read(&env->mip) & env->mie;
+    target_ulong pending = env->mip & env->mie;
     target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie);
     target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie);
     target_ulong irqs = (pending & ~env->mideleg & -mie) |
@@ -92,42 +93,29 @@  int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
     }
 }
 
-struct CpuAsyncInfo {
-    uint32_t new_mip;
-};
-
-static void riscv_cpu_update_mip_irqs_async(CPUState *target_cpu_state,
-                                            run_on_cpu_data data)
-{
-    struct CpuAsyncInfo *info = (struct CpuAsyncInfo *) data.host_ptr;
-
-    if (info->new_mip) {
-        cpu_interrupt(target_cpu_state, CPU_INTERRUPT_HARD);
-    } else {
-        cpu_reset_interrupt(target_cpu_state, CPU_INTERRUPT_HARD);
-    }
-
-    g_free(info);
-}
-
 uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
 {
     CPURISCVState *env = &cpu->env;
     CPUState *cs = CPU(cpu);
-    struct CpuAsyncInfo *info;
-    uint32_t old, new, cmp = atomic_read(&env->mip);
+    uint32_t old = env->mip;
+    bool locked = false;
+
+    if (!qemu_mutex_iothread_locked()) {
+        locked = true;
+        qemu_mutex_lock_iothread();
+    }
 
-    do {
-        old = cmp;
-        new = (old & ~mask) | (value & mask);
-        cmp = atomic_cmpxchg(&env->mip, old, new);
-    } while (old != cmp);
+    env->mip = (env->mip & ~mask) | (value & mask);
 
-    info = g_new(struct CpuAsyncInfo, 1);
-    info->new_mip = new;
+    if (env->mip) {
+        cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+    } else {
+        cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+    }
 
-    async_run_on_cpu(cs, riscv_cpu_update_mip_irqs_async,
-                     RUN_ON_CPU_HOST_PTR(info));
+    if (locked) {
+        qemu_mutex_unlock_iothread();
+    }
 
     return old;
 }
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 974c9c20b5f4..da02f9f0b177 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -579,7 +579,7 @@  static int rmw_mip(CPURISCVState *env, int csrno, target_ulong *ret_value,
     if (mask) {
         old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask));
     } else {
-        old_mip = atomic_read(&env->mip);
+        old_mip = env->mip;
     }
 
     if (ret_value) {