diff mbox series

[v6,09/21] accel/tcg: Unlock mmap_lock after longjmp

Message ID 20220819032615.884847-10-richard.henderson@linaro.org (mailing list archive)
State New, archived
Headers show
Series linux-user: Fix siginfo_t contents when jumping to non-readable pages | expand

Commit Message

Richard Henderson Aug. 19, 2022, 3:26 a.m. UTC
The mmap_lock is held around tb_gen_code.  While the comment
is correct that the lock is dropped when tb_gen_code runs out
of memory, the lock is *not* dropped when an exception is
raised reading code for translation.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 accel/tcg/cpu-exec.c  | 12 ++++++------
 accel/tcg/user-exec.c |  3 ---
 2 files changed, 6 insertions(+), 9 deletions(-)

Comments

Alistair Francis Aug. 21, 2022, 11:30 p.m. UTC | #1
On Fri, Aug 19, 2022 at 1:29 PM Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> The mmap_lock is held around tb_gen_code.  While the comment
> is correct that the lock is dropped when tb_gen_code runs out
> of memory, the lock is *not* dropped when an exception is
> raised reading code for translation.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

Acked-by: Alistair Francis <alistair.francis@wdc.com>

Alistair

> ---
>  accel/tcg/cpu-exec.c  | 12 ++++++------
>  accel/tcg/user-exec.c |  3 ---
>  2 files changed, 6 insertions(+), 9 deletions(-)
>
> diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
> index a565a3f8ec..d18081ca6f 100644
> --- a/accel/tcg/cpu-exec.c
> +++ b/accel/tcg/cpu-exec.c
> @@ -462,13 +462,11 @@ void cpu_exec_step_atomic(CPUState *cpu)
>          cpu_tb_exec(cpu, tb, &tb_exit);
>          cpu_exec_exit(cpu);
>      } else {
> -        /*
> -         * The mmap_lock is dropped by tb_gen_code if it runs out of
> -         * memory.
> -         */
>  #ifndef CONFIG_SOFTMMU
>          clear_helper_retaddr();
> -        tcg_debug_assert(!have_mmap_lock());
> +        if (have_mmap_lock()) {
> +            mmap_unlock();
> +        }
>  #endif
>          if (qemu_mutex_iothread_locked()) {
>              qemu_mutex_unlock_iothread();
> @@ -936,7 +934,9 @@ int cpu_exec(CPUState *cpu)
>
>  #ifndef CONFIG_SOFTMMU
>          clear_helper_retaddr();
> -        tcg_debug_assert(!have_mmap_lock());
> +        if (have_mmap_lock()) {
> +            mmap_unlock();
> +        }
>  #endif
>          if (qemu_mutex_iothread_locked()) {
>              qemu_mutex_unlock_iothread();
> diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
> index a20234fb02..58edd33896 100644
> --- a/accel/tcg/user-exec.c
> +++ b/accel/tcg/user-exec.c
> @@ -80,10 +80,7 @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
>           * (and if the translator doesn't handle page boundaries correctly
>           * there's little we can do about that here).  Therefore, do not
>           * trigger the unwinder.
> -         *
> -         * Like tb_gen_code, release the memory lock before cpu_loop_exit.
>           */
> -        mmap_unlock();
>          *pc = 0;
>          return MMU_INST_FETCH;
>      }
> --
> 2.34.1
>
>
diff mbox series

Patch

diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index a565a3f8ec..d18081ca6f 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -462,13 +462,11 @@  void cpu_exec_step_atomic(CPUState *cpu)
         cpu_tb_exec(cpu, tb, &tb_exit);
         cpu_exec_exit(cpu);
     } else {
-        /*
-         * The mmap_lock is dropped by tb_gen_code if it runs out of
-         * memory.
-         */
 #ifndef CONFIG_SOFTMMU
         clear_helper_retaddr();
-        tcg_debug_assert(!have_mmap_lock());
+        if (have_mmap_lock()) {
+            mmap_unlock();
+        }
 #endif
         if (qemu_mutex_iothread_locked()) {
             qemu_mutex_unlock_iothread();
@@ -936,7 +934,9 @@  int cpu_exec(CPUState *cpu)
 
 #ifndef CONFIG_SOFTMMU
         clear_helper_retaddr();
-        tcg_debug_assert(!have_mmap_lock());
+        if (have_mmap_lock()) {
+            mmap_unlock();
+        }
 #endif
         if (qemu_mutex_iothread_locked()) {
             qemu_mutex_unlock_iothread();
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index a20234fb02..58edd33896 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -80,10 +80,7 @@  MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
          * (and if the translator doesn't handle page boundaries correctly
          * there's little we can do about that here).  Therefore, do not
          * trigger the unwinder.
-         *
-         * Like tb_gen_code, release the memory lock before cpu_loop_exit.
          */
-        mmap_unlock();
         *pc = 0;
         return MMU_INST_FETCH;
     }