diff mbox series

[v3,06/20] target/arm: Replace CPSR_ERET_MASK with aarch32_cpsr_valid_mask

Message ID 20200203144716.32204-7-richard.henderson@linaro.org (mailing list archive)
State New, archived
Headers show
Series target/arm: Implement PAN, ATS1E1, UAO | expand

Commit Message

Richard Henderson Feb. 3, 2020, 2:47 p.m. UTC
CPSR_ERET_MASK was a useless renaming of CPSR_RESERVED.
The function also takes into account bits that the cpu
does not support.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/cpu.h       | 2 --
 target/arm/op_helper.c | 5 ++++-
 2 files changed, 4 insertions(+), 3 deletions(-)

Comments

Peter Maydell Feb. 7, 2020, 5:32 p.m. UTC | #1
On Mon, 3 Feb 2020 at 14:47, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> CPSR_ERET_MASK was a useless renaming of CPSR_RESERVED.
> The function also takes into account bits that the cpu
> does not support.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>  target/arm/cpu.h       | 2 --
>  target/arm/op_helper.c | 5 ++++-
>  2 files changed, 4 insertions(+), 3 deletions(-)
>
> diff --git a/target/arm/cpu.h b/target/arm/cpu.h
> index 08b2f5d73e..694b074298 100644
> --- a/target/arm/cpu.h
> +++ b/target/arm/cpu.h
> @@ -1209,8 +1209,6 @@ void pmu_init(ARMCPU *cpu);
>  #define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE)
>  /* Execution state bits.  MRS read as zero, MSR writes ignored.  */
>  #define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
> -/* Mask of bits which may be set by exception return copying them from SPSR */
> -#define CPSR_ERET_MASK (~CPSR_RESERVED)
>
>  /* Bit definitions for M profile XPSR. Most are the same as CPSR. */
>  #define XPSR_EXCP 0x1ffU
> diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
> index 27d16ad9ad..acf1815ea3 100644
> --- a/target/arm/op_helper.c
> +++ b/target/arm/op_helper.c
> @@ -400,11 +400,14 @@ void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
>  /* Write the CPSR for a 32-bit exception return */
>  void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
>  {
> +    uint32_t mask;
> +
>      qemu_mutex_lock_iothread();
>      arm_call_pre_el_change_hook(env_archcpu(env));
>      qemu_mutex_unlock_iothread();
>
> -    cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
> +    mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
> +    cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
>
>      /* Generated code has already stored the new PC value, but
>       * without masking out its low bits, because which bits need
> --

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>

thanks
-- PMM
diff mbox series

Patch

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 08b2f5d73e..694b074298 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1209,8 +1209,6 @@  void pmu_init(ARMCPU *cpu);
 #define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE)
 /* Execution state bits.  MRS read as zero, MSR writes ignored.  */
 #define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
-/* Mask of bits which may be set by exception return copying them from SPSR */
-#define CPSR_ERET_MASK (~CPSR_RESERVED)
 
 /* Bit definitions for M profile XPSR. Most are the same as CPSR. */
 #define XPSR_EXCP 0x1ffU
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 27d16ad9ad..acf1815ea3 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -400,11 +400,14 @@  void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
 /* Write the CPSR for a 32-bit exception return */
 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
 {
+    uint32_t mask;
+
     qemu_mutex_lock_iothread();
     arm_call_pre_el_change_hook(env_archcpu(env));
     qemu_mutex_unlock_iothread();
 
-    cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
+    mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
+    cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
 
     /* Generated code has already stored the new PC value, but
      * without masking out its low bits, because which bits need