diff mbox series

[17/19] target/arm: add ARMv8.4-SEL2 extension

Message ID 20210112104511.36576-17-remi.denis.courmont@huawei.com (mailing list archive)
State New, archived
Headers show
Series ARMv8.4-A Secure EL2 | expand

Commit Message

Rémi Denis-Courmont Jan. 12, 2021, 10:45 a.m. UTC
From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>

This adds handling for the SCR_EL3.EEL2 bit.

Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
---
 target/arm/cpu.c       |  2 +-
 target/arm/cpu.h       |  8 ++++++--
 target/arm/helper.c    | 19 ++++++++++++++++---
 target/arm/translate.c | 14 ++++++++++++--
 4 files changed, 35 insertions(+), 8 deletions(-)

Comments

Richard Henderson Jan. 12, 2021, 9:30 p.m. UTC | #1
On 1/12/21 12:45 AM, remi.denis.courmont@huawei.com wrote:
> From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
> 
> This adds handling for the SCR_EL3.EEL2 bit.
> 
> Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>

The patch title seems to have gone awry.

> @@ -2832,9 +2832,19 @@ static bool msr_banked_access_decode
>          }
>          if (s->current_el == 1) {
>              /* If we're in Secure EL1 (which implies that EL3 is AArch64)
> -             * then accesses to Mon registers trap to EL3
> +             * then accesses to Mon registers trap to Secure EL2, if it exists,
> +             * otherwise EL3.
>               */
> -            TCGv_i32 tcg_el = tcg_const_i32(3);
> +            TCGv_i32 tcg_el;
> +
> +            if (dc_isar_feature(aa64_sel2, s)) {
> +                /* Target EL is EL<3 minus SCR_EL3.EEL2> */
> +                tcg_el = load_cpu_field(cp15.scr_el3);
> +                tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1);
> +                tcg_gen_addi_i32(tcg_el, tcg_el, 3);
> +            } else {
> +                tcg_el = tcg_const_i32(3);
> +            }

You can't test an aa64 feature without verifying that the cpu has aa64 support
(if the cpu is strictly aa32, the aa64 registers are undefined/uninitialized).  So:

    if (arm_dc_feature(s, ARM_FEATURE_AARCH64) &&
        dc_isar_feature(aa64_sel2, s)) {
        ...

With those things changed,
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>


r~
Rémi Denis-Courmont Jan. 13, 2021, 1:12 p.m. UTC | #2
Le tiistaina 12. tammikuuta 2021, 23.30.39 EET Richard Henderson a écrit :
> On 1/12/21 12:45 AM, remi.denis.courmont@huawei.com wrote:
> > From: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
> > 
> > This adds handling for the SCR_EL3.EEL2 bit.
> > 
> > Signed-off-by: Rémi Denis-Courmont <remi.denis.courmont@huawei.com>
> 
> The patch title seems to have gone awry.
> 
> > @@ -2832,9 +2832,19 @@ static bool msr_banked_access_decode
> > 
> >          }
> >          if (s->current_el == 1) {
> >          
> >              /* If we're in Secure EL1 (which implies that EL3 is AArch64)
> > 
> > -             * then accesses to Mon registers trap to EL3
> > +             * then accesses to Mon registers trap to Secure EL2, if it
> > exists, +             * otherwise EL3.
> > 
> >               */
> > 
> > -            TCGv_i32 tcg_el = tcg_const_i32(3);
> > +            TCGv_i32 tcg_el;
> > +
> > +            if (dc_isar_feature(aa64_sel2, s)) {
> > +                /* Target EL is EL<3 minus SCR_EL3.EEL2> */
> > +                tcg_el = load_cpu_field(cp15.scr_el3);
> > +                tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1);
> > +                tcg_gen_addi_i32(tcg_el, tcg_el, 3);
> > +            } else {
> > +                tcg_el = tcg_const_i32(3);
> > +            }
> 
> You can't test an aa64 feature without verifying that the cpu has aa64
> support (if the cpu is strictly aa32, the aa64 registers are
> undefined/uninitialized).  So:
> 
>     if (arm_dc_feature(s, ARM_FEATURE_AARCH64) &&
>         dc_isar_feature(aa64_sel2, s)) {
>         ...
> 

Hmm, yeah. Should this be an ifdef on TARGET_AARCH64 instead?

Also do we need to revector the exception code, or leave it mostly duplicated 
as is?


> With those things changed,
> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
> 
> 
> r~
Richard Henderson Jan. 13, 2021, 5:35 p.m. UTC | #3
On 1/13/21 3:12 AM, Rémi Denis-Courmont wrote:
>>     if (arm_dc_feature(s, ARM_FEATURE_AARCH64) &&
>>         dc_isar_feature(aa64_sel2, s)) {
>>         ...
>>
> 
> Hmm, yeah. Should this be an ifdef on TARGET_AARCH64 instead?

No, I don't think so.  It's not performance critical that we avoid a single bit
test.

> Also do we need to revector the exception code, or leave it mostly duplicated 
> as is?

If you see a way to tidy that, great, please.  But I don't think it's required
for this patch set.


r~
diff mbox series

Patch

diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 5530874686..b85b644941 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -480,7 +480,7 @@  static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
              * masked from Secure state. The HCR and SCR settings
              * don't affect the masking logic, only the interrupt routing.
              */
-            if (target_el == 3 || !secure) {
+            if (target_el == 3 || !secure || (env->cp15.scr_el3 & SCR_EEL2)) {
                 unmasked = true;
             }
         } else {
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index e605791e47..df510114f6 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -2094,7 +2094,10 @@  static inline bool arm_is_secure(CPUARMState *env)
 static inline bool arm_is_el2_enabled(CPUARMState *env)
 {
     if (arm_feature(env, ARM_FEATURE_EL2)) {
-        return !arm_is_secure_below_el3(env);
+        if (arm_is_secure_below_el3(env)) {
+            return (env->cp15.scr_el3 & SCR_EEL2) != 0;
+        }
+        return true;
     }
     return false;
 }
@@ -2141,7 +2144,8 @@  static inline bool arm_el_is_aa64(CPUARMState *env, int el)
         return aa64;
     }
 
-    if (arm_feature(env, ARM_FEATURE_EL3)) {
+    if (arm_feature(env, ARM_FEATURE_EL3) &&
+        ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) {
         aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
     }
 
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 7648f6fb97..32fc72d9ed 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -532,6 +532,9 @@  static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
         return CP_ACCESS_OK;
     }
     if (arm_is_secure_below_el3(env)) {
+        if (env->cp15.scr_el3 & SCR_EEL2) {
+            return CP_ACCESS_TRAP_EL2;
+        }
         return CP_ACCESS_TRAP_EL3;
     }
     /* This will be EL1 NS and EL2 NS, which just UNDEF */
@@ -2029,6 +2032,9 @@  static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
         if (cpu_isar_feature(aa64_pauth, cpu)) {
             valid_mask |= SCR_API | SCR_APK;
         }
+        if (cpu_isar_feature(aa64_sel2, cpu)) {
+            valid_mask |= SCR_EEL2;
+        }
         if (cpu_isar_feature(aa64_mte, cpu)) {
             valid_mask |= SCR_ATA;
         }
@@ -3387,13 +3393,16 @@  static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
                                  bool isread)
 {
     if (ri->opc2 & 4) {
-        /* The ATS12NSO* operations must trap to EL3 if executed in
+        /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
          * Secure EL1 (which can only happen if EL3 is AArch64).
          * They are simply UNDEF if executed from NS EL1.
          * They function normally from EL2 or EL3.
          */
         if (arm_current_el(env) == 1) {
             if (arm_is_secure_below_el3(env)) {
+                if (env->cp15.scr_el3 & SCR_EEL2) {
+                    return CP_ACCESS_TRAP_UNCATEGORIZED_EL2;
+                }
                 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
             }
             return CP_ACCESS_TRAP_UNCATEGORIZED;
@@ -3656,7 +3665,8 @@  static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
                                      bool isread)
 {
-    if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
+    if (arm_current_el(env) == 3 &&
+        !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
         return CP_ACCESS_TRAP;
     }
     return CP_ACCESS_OK;
@@ -5755,12 +5765,15 @@  static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
                                    bool isread)
 {
     /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
-     * At Secure EL1 it traps to EL3.
+     * At Secure EL1 it traps to EL3 or EL2.
      */
     if (arm_current_el(env) == 3) {
         return CP_ACCESS_OK;
     }
     if (arm_is_secure_below_el3(env)) {
+        if (env->cp15.scr_el3 & SCR_EEL2) {
+            return CP_ACCESS_TRAP_EL2;
+        }
         return CP_ACCESS_TRAP_EL3;
     }
     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 8b6b7355c9..688cd41684 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -2832,9 +2832,19 @@  static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
         }
         if (s->current_el == 1) {
             /* If we're in Secure EL1 (which implies that EL3 is AArch64)
-             * then accesses to Mon registers trap to EL3
+             * then accesses to Mon registers trap to Secure EL2, if it exists,
+             * otherwise EL3.
              */
-            TCGv_i32 tcg_el = tcg_const_i32(3);
+            TCGv_i32 tcg_el;
+
+            if (dc_isar_feature(aa64_sel2, s)) {
+                /* Target EL is EL<3 minus SCR_EL3.EEL2> */
+                tcg_el = load_cpu_field(cp15.scr_el3);
+                tcg_gen_sextract_i32(tcg_el, tcg_el, ctz32(SCR_EEL2), 1);
+                tcg_gen_addi_i32(tcg_el, tcg_el, 3);
+            } else {
+                tcg_el = tcg_const_i32(3);
+            }
 
             gen_exception_el(s, EXCP_UDEF, syn_uncategorized(), tcg_el);
             tcg_temp_free_i32(tcg_el);