diff mbox series

[PULL,1/3] tcg: Fix mmap lock assert on translation failure

Message ID 20190709075042.13941-2-richard.henderson@linaro.org (mailing list archive)
State New, archived
Headers show
Series [PULL,1/3] tcg: Fix mmap lock assert on translation failure | expand

Commit Message

Richard Henderson July 9, 2019, 7:50 a.m. UTC
Check page flags before letting an invalid pc cause a SIGSEGV.

Prepare for eventially validating PROT_EXEC.  The current wrinkle being
that we have a problem with our implementation of signals.  We should
be using a vdso like the kernel, but we instead put the trampoline on
the stack.  In the meantime, let PROT_READ match PROT_EXEC.

Fixes: https://bugs.launchpad.net/qemu/+bug/1832353
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 include/exec/cpu-all.h                    |  1 +
 include/exec/cpu_ldst_useronly_template.h |  8 +++++--
 accel/tcg/translate-all.c                 | 29 +++++++++++++++++++++++
 3 files changed, 36 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 536ea58f81..58b8915617 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -259,6 +259,7 @@  int walk_memory_regions(void *, walk_memory_regions_fn);
 int page_get_flags(target_ulong address);
 void page_set_flags(target_ulong start, target_ulong end, int flags);
 int page_check_range(target_ulong start, target_ulong len, int flags);
+void validate_exec_access(CPUArchState *env, target_ulong s, target_ulong l);
 #endif
 
 CPUArchState *cpu_copy(CPUArchState *env);
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
index bc45e2b8d4..f095415149 100644
--- a/include/exec/cpu_ldst_useronly_template.h
+++ b/include/exec/cpu_ldst_useronly_template.h
@@ -64,7 +64,9 @@ 
 static inline RES_TYPE
 glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
 {
-#if !defined(CODE_ACCESS)
+#ifdef CODE_ACCESS
+    validate_exec_access(env, ptr, DATA_SIZE);
+#else
     trace_guest_mem_before_exec(
         env_cpu(env), ptr,
         trace_mem_build_info(SHIFT, false, MO_TE, false));
@@ -88,7 +90,9 @@  glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
 static inline int
 glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
 {
-#if !defined(CODE_ACCESS)
+#ifdef CODE_ACCESS
+    validate_exec_access(env, ptr, DATA_SIZE);
+#else
     trace_guest_mem_before_exec(
         env_cpu(env), ptr,
         trace_mem_build_info(SHIFT, true, MO_TE, false));
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 5d1e08b169..1d4a8a260f 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -2600,10 +2600,39 @@  int page_check_range(target_ulong start, target_ulong len, int flags)
                 }
             }
         }
+        /*
+         * FIXME: We place the signal trampoline on the stack,
+         * even when the guest expects that to be in the vdso.
+         * Until we fix that, allow execute on any readable page.
+         */
+        if ((flags & PAGE_EXEC) && !(p->flags & (PAGE_EXEC | PAGE_READ))) {
+            return -1;
+        }
     }
     return 0;
 }
 
+/*
+ * Called for each code read, longjmp out to issue SIGSEGV if the page(s)
+ * do not have execute access.
+ */
+void validate_exec_access(CPUArchState *env,
+                          target_ulong ptr, target_ulong len)
+{
+    if (page_check_range(ptr, len, PAGE_EXEC) < 0) {
+        CPUState *cs = env_cpu(env);
+        CPUClass *cc = CPU_GET_CLASS(cs);
+
+        /* Like tb_gen_code, release the memory lock before cpu_loop_exit.  */
+        assert_memory_lock();
+        mmap_unlock();
+
+        /* This is user-only.  The target must raise an exception.  */
+        cc->tlb_fill(cs, ptr, 0, MMU_INST_FETCH, MMU_USER_IDX, false, 0);
+        g_assert_not_reached();
+    }
+}
+
 /* called from signal handler: invalidate the code and unprotect the
  * page. Return 0 if the fault was not handled, 1 if it was handled,
  * and 2 if it was handled but the caller must cause the TB to be