diff mbox series

target/arm: Implement MTE3

Message ID 20210611190653.754648-1-pcc@google.com (mailing list archive)
State New, archived
Headers show
Series target/arm: Implement MTE3 | expand

Commit Message

Peter Collingbourne June 11, 2021, 7:06 p.m. UTC
MTE3 introduces an asymmetric tag checking mode, in which loads are
checked synchronously and stores are checked asynchronously. Add
support for it.

Signed-off-by: Peter Collingbourne <pcc@google.com>
---
 target/arm/cpu64.c      |  2 +-
 target/arm/mte_helper.c | 83 ++++++++++++++++++++++++++---------------
 2 files changed, 53 insertions(+), 32 deletions(-)

Comments

Richard Henderson June 12, 2021, 9:19 p.m. UTC | #1
On 6/11/21 12:06 PM, Peter Collingbourne wrote:
> MTE3 introduces an asymmetric tag checking mode, in which loads are
> checked synchronously and stores are checked asynchronously. Add
> support for it.
> 
> Signed-off-by: Peter Collingbourne <pcc@google.com>
> ---
>   target/arm/cpu64.c      |  2 +-
>   target/arm/mte_helper.c | 83 ++++++++++++++++++++++++++---------------
>   2 files changed, 53 insertions(+), 32 deletions(-)
> 
> diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
> index 1c23187d1a..c7a1626bec 100644
> --- a/target/arm/cpu64.c
> +++ b/target/arm/cpu64.c
> @@ -683,7 +683,7 @@ static void aarch64_max_initfn(Object *obj)
>            * during realize if the board provides no tag memory, much like
>            * we do for EL2 with the virtualization=on property.
>            */
> -        t = FIELD_DP64(t, ID_AA64PFR1, MTE, 2);
> +        t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);
>           cpu->isar.id_aa64pfr1 = t;
>   
>           t = cpu->isar.id_aa64mmfr0;
> diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
> index 166b9d260f..7b76d871ff 100644
> --- a/target/arm/mte_helper.c
> +++ b/target/arm/mte_helper.c
> @@ -538,13 +538,51 @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
>       }
>   }
>   
> +static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
> +                                uint64_t dirty_ptr, uintptr_t ra)
> +{
> +    int is_write, syn;
> +
> +    env->exception.vaddress = dirty_ptr;
> +
> +    is_write = FIELD_EX32(desc, MTEDESC, WRITE);
> +    syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
> +                                0x11);
> +    raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
> +    g_assert_not_reached();
> +}
> +
> +static void mte_async_check_fail(CPUARMState *env, uint32_t desc,
> +                                 uint64_t dirty_ptr, uintptr_t ra,
> +                                 ARMMMUIdx arm_mmu_idx, int el)
> +{
> +    int select;
> +
> +    if (regime_has_2_ranges(arm_mmu_idx)) {
> +        select = extract64(dirty_ptr, 55, 1);
> +    } else {
> +        select = 0;
> +    }
> +    env->cp15.tfsr_el[el] |= 1 << select;
> +#ifdef CONFIG_USER_ONLY
> +    /*
> +     * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
> +     * which then sends a SIGSEGV when the thread is next scheduled.
> +     * This cpu will return to the main loop at the end of the TB,
> +     * which is rather sooner than "normal".  But the alternative
> +     * is waiting until the next syscall.
> +     */
> +    qemu_cpu_kick(env_cpu(env));
> +#endif
> +}

This is ok, though the desc parameter is unused for async.
I'm not adverse to using a goto, like so.

But either way,
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>


r~

---%<
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index 9e615cc513..e93603bc02 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -561,12 +561,23 @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
          tcf = extract64(sctlr, 40, 2);
      }

+    is_write = FIELD_EX32(desc, MTEDESC, WRITE);
+
      switch (tcf) {
+    default: /* case 3 */
+        /*
+         * Tag check fail causes asynchronous flag set for stores,
+         * or a synchronous exception for loads.
+         */
+        if (is_write) {
+            goto fail_async;
+        }
+        /* fall through */
+
      case 1:
          /* Tag check fail causes a synchronous exception. */
          env->exception.vaddress = dirty_ptr;

-        is_write = FIELD_EX32(desc, MTEDESC, WRITE);
          syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0,
                                      is_write, 0x11);
          raise_exception_ra(env, EXCP_DATA_ABORT, syn,
@@ -582,6 +593,7 @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
          g_assert_not_reached();

      case 2:
+    fail_async:
          /* Tag check fail causes asynchronous flag set.  */
          if (regime_has_2_ranges(arm_mmu_idx)) {
              select = extract64(dirty_ptr, 55, 1);
@@ -600,14 +612,6 @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
          qemu_cpu_kick(env_cpu(env));
  #endif
          break;
-
-    default:
-        /* Case 3: Reserved. */
-        qemu_log_mask(LOG_GUEST_ERROR,
-                      "Tag check failure with SCTLR_EL%d.TCF%s "
-                      "set to reserved value %d\n",
-                      reg_el, el ? "" : "0", tcf);
-        break;
      }
  }
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 1c23187d1a..c4afee77d7 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -679,9 +679,10 @@ static void aarch64_max_initfn(Object *obj)
         t = FIELD_DP64(t, ID_AA64PFR1, BT, 1);
         t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2);
         /*
-         * Begin with full support for MTE. This will be downgraded to MTE=0
-         * during realize if the board provides no tag memory, much like
-         * we do for EL2 with the virtualization=on property.
+         * Begin with full support for MTE (FEAT_MTE3). This will be
+         * downgraded to MTE=0 (no MTE) during realize if the board
+         * provides no tag memory, much like we do for EL2 with the
+         * virtualization=on property.
          */
         t = FIELD_DP64(t, ID_AA64PFR1, MTE, 2);
         cpu->isar.id_aa64pfr1 = t;
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index 9e615cc513..e93603bc02 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -561,12 +561,23 @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
         tcf = extract64(sctlr, 40, 2);
     }
 
+    is_write = FIELD_EX32(desc, MTEDESC, WRITE);
+
     switch (tcf) {
+    default: /* case 3 */
+        /*
+         * Tag check fail causes asynchronous flag set for stores,
+         * or a synchronous exception for loads.
+         */
+        if (is_write) {
+            goto fail_async;
+        }
+        /* fall through */
+
     case 1:
         /* Tag check fail causes a synchronous exception. */
         env->exception.vaddress = dirty_ptr;
 
-        is_write = FIELD_EX32(desc, MTEDESC, WRITE);
         syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0,
                                     is_write, 0x11);
         raise_exception_ra(env, EXCP_DATA_ABORT, syn,
@@ -582,6 +593,7 @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
         g_assert_not_reached();
 
     case 2:
+    fail_async:
         /* Tag check fail causes asynchronous flag set.  */
         if (regime_has_2_ranges(arm_mmu_idx)) {
             select = extract64(dirty_ptr, 55, 1);
@@ -600,14 +612,6 @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
         qemu_cpu_kick(env_cpu(env));
 #endif
         break;
-
-    default:
-        /* Case 3: Reserved. */
-        qemu_log_mask(LOG_GUEST_ERROR,
-                      "Tag check failure with SCTLR_EL%d.TCF%s "
-                      "set to reserved value %d\n",
-                      reg_el, el ? "" : "0", tcf);
-        break;
     }
 }
Richard Henderson July 8, 2021, 3:08 p.m. UTC | #2
Cc: Peter.

r~

On 6/11/21 12:06 PM, Peter Collingbourne wrote:
> MTE3 introduces an asymmetric tag checking mode, in which loads are
> checked synchronously and stores are checked asynchronously. Add
> support for it.
> 
> Signed-off-by: Peter Collingbourne <pcc@google.com>
> ---
>   target/arm/cpu64.c      |  2 +-
>   target/arm/mte_helper.c | 83 ++++++++++++++++++++++++++---------------
>   2 files changed, 53 insertions(+), 32 deletions(-)
> 
> diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
> index 1c23187d1a..c7a1626bec 100644
> --- a/target/arm/cpu64.c
> +++ b/target/arm/cpu64.c
> @@ -683,7 +683,7 @@ static void aarch64_max_initfn(Object *obj)
>            * during realize if the board provides no tag memory, much like
>            * we do for EL2 with the virtualization=on property.
>            */
> -        t = FIELD_DP64(t, ID_AA64PFR1, MTE, 2);
> +        t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);
>           cpu->isar.id_aa64pfr1 = t;
>   
>           t = cpu->isar.id_aa64mmfr0;
> diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
> index 166b9d260f..7b76d871ff 100644
> --- a/target/arm/mte_helper.c
> +++ b/target/arm/mte_helper.c
> @@ -538,13 +538,51 @@ void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
>       }
>   }
>   
> +static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
> +                                uint64_t dirty_ptr, uintptr_t ra)
> +{
> +    int is_write, syn;
> +
> +    env->exception.vaddress = dirty_ptr;
> +
> +    is_write = FIELD_EX32(desc, MTEDESC, WRITE);
> +    syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
> +                                0x11);
> +    raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
> +    g_assert_not_reached();
> +}
> +
> +static void mte_async_check_fail(CPUARMState *env, uint32_t desc,
> +                                 uint64_t dirty_ptr, uintptr_t ra,
> +                                 ARMMMUIdx arm_mmu_idx, int el)
> +{
> +    int select;
> +
> +    if (regime_has_2_ranges(arm_mmu_idx)) {
> +        select = extract64(dirty_ptr, 55, 1);
> +    } else {
> +        select = 0;
> +    }
> +    env->cp15.tfsr_el[el] |= 1 << select;
> +#ifdef CONFIG_USER_ONLY
> +    /*
> +     * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
> +     * which then sends a SIGSEGV when the thread is next scheduled.
> +     * This cpu will return to the main loop at the end of the TB,
> +     * which is rather sooner than "normal".  But the alternative
> +     * is waiting until the next syscall.
> +     */
> +    qemu_cpu_kick(env_cpu(env));
> +#endif
> +}
> +
>   /* Record a tag check failure.  */
>   static void mte_check_fail(CPUARMState *env, uint32_t desc,
>                              uint64_t dirty_ptr, uintptr_t ra)
>   {
>       int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
>       ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
> -    int el, reg_el, tcf, select, is_write, syn;
> +    int el, reg_el, tcf;
>       uint64_t sctlr;
>   
>       reg_el = regime_el(env, arm_mmu_idx);
> @@ -564,14 +602,8 @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
>       switch (tcf) {
>       case 1:
>           /* Tag check fail causes a synchronous exception. */
> -        env->exception.vaddress = dirty_ptr;
> -
> -        is_write = FIELD_EX32(desc, MTEDESC, WRITE);
> -        syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0,
> -                                    is_write, 0x11);
> -        raise_exception_ra(env, EXCP_DATA_ABORT, syn,
> -                           exception_target_el(env), ra);
> -        /* noreturn, but fall through to the assert anyway */
> +        mte_sync_check_fail(env, desc, dirty_ptr, ra);
> +        break;
>   
>       case 0:
>           /*
> @@ -583,30 +615,19 @@ static void mte_check_fail(CPUARMState *env, uint32_t desc,
>   
>       case 2:
>           /* Tag check fail causes asynchronous flag set.  */
> -        if (regime_has_2_ranges(arm_mmu_idx)) {
> -            select = extract64(dirty_ptr, 55, 1);
> -        } else {
> -            select = 0;
> -        }
> -        env->cp15.tfsr_el[el] |= 1 << select;
> -#ifdef CONFIG_USER_ONLY
> -        /*
> -         * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
> -         * which then sends a SIGSEGV when the thread is next scheduled.
> -         * This cpu will return to the main loop at the end of the TB,
> -         * which is rather sooner than "normal".  But the alternative
> -         * is waiting until the next syscall.
> -         */
> -        qemu_cpu_kick(env_cpu(env));
> -#endif
> +        mte_async_check_fail(env, desc, dirty_ptr, ra, arm_mmu_idx, el);
>           break;
>   
> -    default:
> -        /* Case 3: Reserved. */
> -        qemu_log_mask(LOG_GUEST_ERROR,
> -                      "Tag check failure with SCTLR_EL%d.TCF%s "
> -                      "set to reserved value %d\n",
> -                      reg_el, el ? "" : "0", tcf);
> +    case 3:
> +        /*
> +         * Tag check fail causes asynchronous flag set for stores, or
> +         * a synchronous exception for loads.
> +         */
> +        if (FIELD_EX32(desc, MTEDESC, WRITE)) {
> +            mte_async_check_fail(env, desc, dirty_ptr, ra, arm_mmu_idx, el);
> +        } else {
> +            mte_sync_check_fail(env, desc, dirty_ptr, ra);
> +        }
>           break;
>       }
>   }
>
Richard Henderson July 8, 2021, 3:23 p.m. UTC | #3
On 7/8/21 8:08 AM, Richard Henderson wrote:
> Cc: Peter.

Bah, nevermind.  There was a v2 that already went in.


r~
diff mbox series

Patch

diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 1c23187d1a..c7a1626bec 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -683,7 +683,7 @@  static void aarch64_max_initfn(Object *obj)
          * during realize if the board provides no tag memory, much like
          * we do for EL2 with the virtualization=on property.
          */
-        t = FIELD_DP64(t, ID_AA64PFR1, MTE, 2);
+        t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3);
         cpu->isar.id_aa64pfr1 = t;
 
         t = cpu->isar.id_aa64mmfr0;
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index 166b9d260f..7b76d871ff 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -538,13 +538,51 @@  void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
     }
 }
 
+static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
+                                uint64_t dirty_ptr, uintptr_t ra)
+{
+    int is_write, syn;
+
+    env->exception.vaddress = dirty_ptr;
+
+    is_write = FIELD_EX32(desc, MTEDESC, WRITE);
+    syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
+                                0x11);
+    raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
+    g_assert_not_reached();
+}
+
+static void mte_async_check_fail(CPUARMState *env, uint32_t desc,
+                                 uint64_t dirty_ptr, uintptr_t ra,
+                                 ARMMMUIdx arm_mmu_idx, int el)
+{
+    int select;
+
+    if (regime_has_2_ranges(arm_mmu_idx)) {
+        select = extract64(dirty_ptr, 55, 1);
+    } else {
+        select = 0;
+    }
+    env->cp15.tfsr_el[el] |= 1 << select;
+#ifdef CONFIG_USER_ONLY
+    /*
+     * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
+     * which then sends a SIGSEGV when the thread is next scheduled.
+     * This cpu will return to the main loop at the end of the TB,
+     * which is rather sooner than "normal".  But the alternative
+     * is waiting until the next syscall.
+     */
+    qemu_cpu_kick(env_cpu(env));
+#endif
+}
+
 /* Record a tag check failure.  */
 static void mte_check_fail(CPUARMState *env, uint32_t desc,
                            uint64_t dirty_ptr, uintptr_t ra)
 {
     int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
     ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
-    int el, reg_el, tcf, select, is_write, syn;
+    int el, reg_el, tcf;
     uint64_t sctlr;
 
     reg_el = regime_el(env, arm_mmu_idx);
@@ -564,14 +602,8 @@  static void mte_check_fail(CPUARMState *env, uint32_t desc,
     switch (tcf) {
     case 1:
         /* Tag check fail causes a synchronous exception. */
-        env->exception.vaddress = dirty_ptr;
-
-        is_write = FIELD_EX32(desc, MTEDESC, WRITE);
-        syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0,
-                                    is_write, 0x11);
-        raise_exception_ra(env, EXCP_DATA_ABORT, syn,
-                           exception_target_el(env), ra);
-        /* noreturn, but fall through to the assert anyway */
+        mte_sync_check_fail(env, desc, dirty_ptr, ra);
+        break;
 
     case 0:
         /*
@@ -583,30 +615,19 @@  static void mte_check_fail(CPUARMState *env, uint32_t desc,
 
     case 2:
         /* Tag check fail causes asynchronous flag set.  */
-        if (regime_has_2_ranges(arm_mmu_idx)) {
-            select = extract64(dirty_ptr, 55, 1);
-        } else {
-            select = 0;
-        }
-        env->cp15.tfsr_el[el] |= 1 << select;
-#ifdef CONFIG_USER_ONLY
-        /*
-         * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
-         * which then sends a SIGSEGV when the thread is next scheduled.
-         * This cpu will return to the main loop at the end of the TB,
-         * which is rather sooner than "normal".  But the alternative
-         * is waiting until the next syscall.
-         */
-        qemu_cpu_kick(env_cpu(env));
-#endif
+        mte_async_check_fail(env, desc, dirty_ptr, ra, arm_mmu_idx, el);
         break;
 
-    default:
-        /* Case 3: Reserved. */
-        qemu_log_mask(LOG_GUEST_ERROR,
-                      "Tag check failure with SCTLR_EL%d.TCF%s "
-                      "set to reserved value %d\n",
-                      reg_el, el ? "" : "0", tcf);
+    case 3:
+        /*
+         * Tag check fail causes asynchronous flag set for stores, or
+         * a synchronous exception for loads.
+         */
+        if (FIELD_EX32(desc, MTEDESC, WRITE)) {
+            mte_async_check_fail(env, desc, dirty_ptr, ra, arm_mmu_idx, el);
+        } else {
+            mte_sync_check_fail(env, desc, dirty_ptr, ra);
+        }
         break;
     }
 }