diff mbox

[PULL,05/39] tcg/ppc: Make direct jump patching thread-safe

Message ID 1463098420-29113-6-git-send-email-rth@twiddle.net (mailing list archive)
State New, archived
Headers show

Commit Message

Richard Henderson May 13, 2016, 12:13 a.m. UTC
From: Sergey Fedorov <serge.fdrv@gmail.com>

Ensure direct jump patching in PPC is atomic by:
 * limiting translation buffer size in 32-bit mode to be addressable by
   Branch I-form instruction;
 * using atomic_read()/atomic_set() for code patching.

Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <1461341333-19646-5-git-send-email-sergey.fedorov@linaro.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
---
 tcg/ppc/tcg-target.inc.c | 22 ++++++++++++++++++----
 translate-all.c          |  2 ++
 2 files changed, 20 insertions(+), 4 deletions(-)
diff mbox

Patch

diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index 00bb90f..039fa77 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -1237,6 +1237,7 @@  static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
     tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
 }
 
+#ifdef __powerpc64__
 void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
 {
     tcg_insn_unit i1, i2;
@@ -1265,11 +1266,18 @@  void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
     pair = (uint64_t)i2 << 32 | i1;
 #endif
 
-    /* ??? __atomic_store_8, presuming there's some way to do that
-       for 32-bit, otherwise this is good enough for 64-bit.  */
-    *(uint64_t *)jmp_addr = pair;
+    atomic_set((uint64_t *)jmp_addr, pair);
     flush_icache_range(jmp_addr, jmp_addr + 8);
 }
+#else
+void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
+{
+    intptr_t diff = addr - jmp_addr;
+    tcg_debug_assert(in_range_b(diff));
+    atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
+    flush_icache_range(jmp_addr, jmp_addr + 4);
+}
+#endif
 
 static void tcg_out_call(TCGContext *s, tcg_insn_unit *target)
 {
@@ -1895,7 +1903,9 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
         break;
     case INDEX_op_goto_tb:
         tcg_debug_assert(s->tb_jmp_offset);
-        /* Direct jump.  Ensure the next insns are 8-byte aligned. */
+        /* Direct jump. */
+#ifdef __powerpc64__
+        /* Ensure the next insns are 8-byte aligned. */
         if ((uintptr_t)s->code_ptr & 7) {
             tcg_out32(s, NOP);
         }
@@ -1904,6 +1914,10 @@  static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
         s->code_ptr += 2;
         tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
         tcg_out32(s, BCCTR | BO_ALWAYS);
+#else
+        /* To be replaced by a branch.  */
+        s->code_ptr++;
+#endif
         s->tb_next_offset[args[0]] = tcg_current_code_size(s);
         break;
     case INDEX_op_br:
diff --git a/translate-all.c b/translate-all.c
index 1a8f68b..781819e 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -464,6 +464,8 @@  static inline PageDesc *page_find(tb_page_addr_t index)
 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
 #elif defined(__powerpc64__)
 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
+#elif defined(__powerpc__)
+# define MAX_CODE_GEN_BUFFER_SIZE  (32u * 1024 * 1024)
 #elif defined(__aarch64__)
 # define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
 #elif defined(__arm__)