Message ID | 20250318045125.759259-12-pierrick.bouvier@linaro.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | single-binary: start make hw/arm/ common (boot.c) | expand |
On 18/3/25 05:51, Pierrick Bouvier wrote: > Directly condition associated calls in target/arm/helper.c for now. > > Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> > --- > target/arm/cpu.h | 8 -------- > target/arm/helper.c | 6 ++++++ > 2 files changed, 6 insertions(+), 8 deletions(-) > > diff --git a/target/arm/cpu.h b/target/arm/cpu.h > index 51b6428cfec..9205cbdec43 100644 > --- a/target/arm/cpu.h > +++ b/target/arm/cpu.h > @@ -1222,7 +1222,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, > */ > void arm_emulate_firmware_reset(CPUState *cpustate, int target_el); > > -#ifdef TARGET_AARCH64 > int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); > int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); > void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); > @@ -1254,13 +1253,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) > #endif > } > > -#else > -static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } > -static inline void aarch64_sve_change_el(CPUARMState *env, int o, > - int n, bool a) > -{ } > -#endif > - > void aarch64_sync_32_to_64(CPUARMState *env); > void aarch64_sync_64_to_32(CPUARMState *env); > > diff --git a/target/arm/helper.c b/target/arm/helper.c > index b46b2bffcf3..774e1ee0245 100644 > --- a/target/arm/helper.c > +++ b/target/arm/helper.c > @@ -6562,7 +6562,9 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, > */ > new_len = sve_vqm1_for_el(env, cur_el); > if (new_len < old_len) { > +#ifdef TARGET_AARCH64 What about using runtime check instead? if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && new_len < old_len) { > aarch64_sve_narrow_vq(env, new_len + 1); > +#endif > } > }
On Tue, 18 Mar 2025 at 17:42, Philippe Mathieu-Daudé <philmd@linaro.org> wrote: > > On 18/3/25 05:51, Pierrick Bouvier wrote: > > Directly condition associated calls in target/arm/helper.c for now. > > > > Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> > > --- > > target/arm/cpu.h | 8 -------- > > target/arm/helper.c | 6 ++++++ > > 2 files changed, 6 insertions(+), 8 deletions(-) > > > > diff --git a/target/arm/cpu.h b/target/arm/cpu.h > > index 51b6428cfec..9205cbdec43 100644 > > --- a/target/arm/cpu.h > > +++ b/target/arm/cpu.h > > @@ -1222,7 +1222,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, > > */ > > void arm_emulate_firmware_reset(CPUState *cpustate, int target_el); > > > > -#ifdef TARGET_AARCH64 > > int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); > > int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); > > void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); > > @@ -1254,13 +1253,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) > > #endif > > } > > > > -#else > > -static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } > > -static inline void aarch64_sve_change_el(CPUARMState *env, int o, > > - int n, bool a) > > -{ } > > -#endif > > - > > void aarch64_sync_32_to_64(CPUARMState *env); > > void aarch64_sync_64_to_32(CPUARMState *env); > > > > diff --git a/target/arm/helper.c b/target/arm/helper.c > > index b46b2bffcf3..774e1ee0245 100644 > > --- a/target/arm/helper.c > > +++ b/target/arm/helper.c > > @@ -6562,7 +6562,9 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, > > */ > > new_len = sve_vqm1_for_el(env, cur_el); > > if (new_len < old_len) { > > +#ifdef TARGET_AARCH64 > > What about using runtime check instead? > > if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && new_len < old_len) { > That would be a dead check: it is not possible to get here unless ARM_FEATURE_AARCH64 is set. thanks -- PMM
On 3/18/25 10:42, Philippe Mathieu-Daudé wrote: > On 18/3/25 05:51, Pierrick Bouvier wrote: >> Directly condition associated calls in target/arm/helper.c for now. >> >> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >> --- >> target/arm/cpu.h | 8 -------- >> target/arm/helper.c | 6 ++++++ >> 2 files changed, 6 insertions(+), 8 deletions(-) >> >> diff --git a/target/arm/cpu.h b/target/arm/cpu.h >> index 51b6428cfec..9205cbdec43 100644 >> --- a/target/arm/cpu.h >> +++ b/target/arm/cpu.h >> @@ -1222,7 +1222,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, >> */ >> void arm_emulate_firmware_reset(CPUState *cpustate, int target_el); >> >> -#ifdef TARGET_AARCH64 >> int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); >> int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); >> void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); >> @@ -1254,13 +1253,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) >> #endif >> } >> >> -#else >> -static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } >> -static inline void aarch64_sve_change_el(CPUARMState *env, int o, >> - int n, bool a) >> -{ } >> -#endif >> - >> void aarch64_sync_32_to_64(CPUARMState *env); >> void aarch64_sync_64_to_32(CPUARMState *env); >> >> diff --git a/target/arm/helper.c b/target/arm/helper.c >> index b46b2bffcf3..774e1ee0245 100644 >> --- a/target/arm/helper.c >> +++ b/target/arm/helper.c >> @@ -6562,7 +6562,9 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, >> */ >> new_len = sve_vqm1_for_el(env, cur_el); >> if (new_len < old_len) { >> +#ifdef TARGET_AARCH64 > > What about using runtime check instead? > > if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && new_len < old_len) { > This is the right way to deal with it, but I would prefer to do it when the concerned file (target/arm/helper.c) will be modified, in a future series. The current one focuses on hw/arm and "pushes back" dependencies in other places, that we can deal with later. Nothing wrong with the change proposed, just trying to focus on the minimal set needed to reach the goal, without any detour. >> aarch64_sve_narrow_vq(env, new_len + 1); >> +#endif >> } >> } >
On 3/18/25 10:50, Peter Maydell wrote: > On Tue, 18 Mar 2025 at 17:42, Philippe Mathieu-Daudé <philmd@linaro.org> wrote: >> >> On 18/3/25 05:51, Pierrick Bouvier wrote: >>> Directly condition associated calls in target/arm/helper.c for now. >>> >>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >>> --- >>> target/arm/cpu.h | 8 -------- >>> target/arm/helper.c | 6 ++++++ >>> 2 files changed, 6 insertions(+), 8 deletions(-) >>> >>> diff --git a/target/arm/cpu.h b/target/arm/cpu.h >>> index 51b6428cfec..9205cbdec43 100644 >>> --- a/target/arm/cpu.h >>> +++ b/target/arm/cpu.h >>> @@ -1222,7 +1222,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, >>> */ >>> void arm_emulate_firmware_reset(CPUState *cpustate, int target_el); >>> >>> -#ifdef TARGET_AARCH64 >>> int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); >>> int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); >>> void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); >>> @@ -1254,13 +1253,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) >>> #endif >>> } >>> >>> -#else >>> -static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } >>> -static inline void aarch64_sve_change_el(CPUARMState *env, int o, >>> - int n, bool a) >>> -{ } >>> -#endif >>> - >>> void aarch64_sync_32_to_64(CPUARMState *env); >>> void aarch64_sync_64_to_32(CPUARMState *env); >>> >>> diff --git a/target/arm/helper.c b/target/arm/helper.c >>> index b46b2bffcf3..774e1ee0245 100644 >>> --- a/target/arm/helper.c >>> +++ b/target/arm/helper.c >>> @@ -6562,7 +6562,9 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, >>> */ >>> new_len = sve_vqm1_for_el(env, cur_el); >>> if (new_len < old_len) { >>> +#ifdef TARGET_AARCH64 >> >> What about using runtime check instead? >> >> if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && new_len < old_len) { >> > > That would be a dead check: it is not possible to get here > unless ARM_FEATURE_AARCH64 is set. > We can then assert it, to make sure there is no regression around that. We now have another conversation and something to decide in another file, and that's why I chose to do the minimal change ("ifdef the issue") instead of trying to do any change. > thanks > -- PMM
On Tue, 18 Mar 2025 at 17:52, Pierrick Bouvier <pierrick.bouvier@linaro.org> wrote: > > On 3/18/25 10:50, Peter Maydell wrote: > > On Tue, 18 Mar 2025 at 17:42, Philippe Mathieu-Daudé <philmd@linaro.org> wrote: > >> > >> On 18/3/25 05:51, Pierrick Bouvier wrote: > >>> Directly condition associated calls in target/arm/helper.c for now. > >>> > >>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> > >>> --- > >>> target/arm/cpu.h | 8 -------- > >>> target/arm/helper.c | 6 ++++++ > >>> 2 files changed, 6 insertions(+), 8 deletions(-) > >>> > >>> diff --git a/target/arm/cpu.h b/target/arm/cpu.h > >>> index 51b6428cfec..9205cbdec43 100644 > >>> --- a/target/arm/cpu.h > >>> +++ b/target/arm/cpu.h > >>> @@ -1222,7 +1222,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, > >>> */ > >>> void arm_emulate_firmware_reset(CPUState *cpustate, int target_el); > >>> > >>> -#ifdef TARGET_AARCH64 > >>> int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); > >>> int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); > >>> void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); > >>> @@ -1254,13 +1253,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) > >>> #endif > >>> } > >>> > >>> -#else > >>> -static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } > >>> -static inline void aarch64_sve_change_el(CPUARMState *env, int o, > >>> - int n, bool a) > >>> -{ } > >>> -#endif > >>> - > >>> void aarch64_sync_32_to_64(CPUARMState *env); > >>> void aarch64_sync_64_to_32(CPUARMState *env); > >>> > >>> diff --git a/target/arm/helper.c b/target/arm/helper.c > >>> index b46b2bffcf3..774e1ee0245 100644 > >>> --- a/target/arm/helper.c > >>> +++ b/target/arm/helper.c > >>> @@ -6562,7 +6562,9 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, > >>> */ > >>> new_len = sve_vqm1_for_el(env, cur_el); > >>> if (new_len < old_len) { > >>> +#ifdef TARGET_AARCH64 > >> > >> What about using runtime check instead? > >> > >> if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && new_len < old_len) { > >> > > > > That would be a dead check: it is not possible to get here > > unless ARM_FEATURE_AARCH64 is set. > > > > We can then assert it, to make sure there is no regression around that. We have a lot of write/read/access fns for AArch64-only sysregs, and we don't need to assert in all of them that they're called only when the CPU has AArch64 enabled. > We now have another conversation and something to decide in another > file, and that's why I chose to do the minimal change ("ifdef the > issue") instead of trying to do any change. I think we can fairly easily avoid ifdeffing the callsite of aarch64_sve_narrow_vq(). Currently we have: * a real version of the function, whose definition is inside an ifdef TARGET_AARCH64 in target/arm/helper.c * a stub version, inline in the cpu.h header If we don't want to have the stub version with ifdefs, then we can move the real implementation of the function to not be inside the ifdef (matching the fact that the prototype is no longer inside an ifdef). The function doesn't call any other functions that are TARGET_AARCH64 only, so it shouldn't be a "now we have to move 50 other things" problem, I hope. thanks -- PMM
On 3/18/25 11:06, Peter Maydell wrote: > On Tue, 18 Mar 2025 at 17:52, Pierrick Bouvier > <pierrick.bouvier@linaro.org> wrote: >> >> On 3/18/25 10:50, Peter Maydell wrote: >>> On Tue, 18 Mar 2025 at 17:42, Philippe Mathieu-Daudé <philmd@linaro.org> wrote: >>>> >>>> On 18/3/25 05:51, Pierrick Bouvier wrote: >>>>> Directly condition associated calls in target/arm/helper.c for now. >>>>> >>>>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >>>>> --- >>>>> target/arm/cpu.h | 8 -------- >>>>> target/arm/helper.c | 6 ++++++ >>>>> 2 files changed, 6 insertions(+), 8 deletions(-) >>>>> >>>>> diff --git a/target/arm/cpu.h b/target/arm/cpu.h >>>>> index 51b6428cfec..9205cbdec43 100644 >>>>> --- a/target/arm/cpu.h >>>>> +++ b/target/arm/cpu.h >>>>> @@ -1222,7 +1222,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, >>>>> */ >>>>> void arm_emulate_firmware_reset(CPUState *cpustate, int target_el); >>>>> >>>>> -#ifdef TARGET_AARCH64 >>>>> int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); >>>>> int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); >>>>> void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); >>>>> @@ -1254,13 +1253,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) >>>>> #endif >>>>> } >>>>> >>>>> -#else >>>>> -static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } >>>>> -static inline void aarch64_sve_change_el(CPUARMState *env, int o, >>>>> - int n, bool a) >>>>> -{ } >>>>> -#endif >>>>> - >>>>> void aarch64_sync_32_to_64(CPUARMState *env); >>>>> void aarch64_sync_64_to_32(CPUARMState *env); >>>>> >>>>> diff --git a/target/arm/helper.c b/target/arm/helper.c >>>>> index b46b2bffcf3..774e1ee0245 100644 >>>>> --- a/target/arm/helper.c >>>>> +++ b/target/arm/helper.c >>>>> @@ -6562,7 +6562,9 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, >>>>> */ >>>>> new_len = sve_vqm1_for_el(env, cur_el); >>>>> if (new_len < old_len) { >>>>> +#ifdef TARGET_AARCH64 >>>> >>>> What about using runtime check instead? >>>> >>>> if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && new_len < old_len) { >>>> >>> >>> That would be a dead check: it is not possible to get here >>> unless ARM_FEATURE_AARCH64 is set. >>> >> >> We can then assert it, to make sure there is no regression around that. > > We have a lot of write/read/access fns for AArch64-only sysregs, and > we don't need to assert in all of them that they're called only when > the CPU has AArch64 enabled. > >> We now have another conversation and something to decide in another >> file, and that's why I chose to do the minimal change ("ifdef the >> issue") instead of trying to do any change. > > I think we can fairly easily avoid ifdeffing the callsite of > aarch64_sve_narrow_vq(). Currently we have: > * a real version of the function, whose definition is inside > an ifdef TARGET_AARCH64 in target/arm/helper.c > * a stub version, inline in the cpu.h header > > If we don't want to have the stub version with ifdefs, then we can > move the real implementation of the function to not be inside the > ifdef (matching the fact that the prototype is no longer inside > an ifdef). The function doesn't call any other functions that are > TARGET_AARCH64 only, so it shouldn't be a "now we have to move > 50 other things" problem, I hope. > I'll try to just let the call be done, and see what happens. But if I meet a regression somewhere in target/arm/*, I'll simply let the current ifdef for now. I understand it "should be ok", but I just want to focus on hw/arm, and not start further changes in target/arm/helper.c as a side effect of simply moving an ifdef in a header. > thanks > -- PMM
On 18/3/25 18:50, Peter Maydell wrote: > On Tue, 18 Mar 2025 at 17:42, Philippe Mathieu-Daudé <philmd@linaro.org> wrote: >> >> On 18/3/25 05:51, Pierrick Bouvier wrote: >>> Directly condition associated calls in target/arm/helper.c for now. >>> >>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >>> --- >>> target/arm/cpu.h | 8 -------- >>> target/arm/helper.c | 6 ++++++ >>> 2 files changed, 6 insertions(+), 8 deletions(-) >>> >>> diff --git a/target/arm/cpu.h b/target/arm/cpu.h >>> index 51b6428cfec..9205cbdec43 100644 >>> --- a/target/arm/cpu.h >>> +++ b/target/arm/cpu.h >>> @@ -1222,7 +1222,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, >>> */ >>> void arm_emulate_firmware_reset(CPUState *cpustate, int target_el); >>> >>> -#ifdef TARGET_AARCH64 >>> int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); >>> int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); >>> void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); >>> @@ -1254,13 +1253,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) >>> #endif >>> } >>> >>> -#else >>> -static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } >>> -static inline void aarch64_sve_change_el(CPUARMState *env, int o, >>> - int n, bool a) >>> -{ } >>> -#endif >>> - >>> void aarch64_sync_32_to_64(CPUARMState *env); >>> void aarch64_sync_64_to_32(CPUARMState *env); >>> >>> diff --git a/target/arm/helper.c b/target/arm/helper.c >>> index b46b2bffcf3..774e1ee0245 100644 >>> --- a/target/arm/helper.c >>> +++ b/target/arm/helper.c >>> @@ -6562,7 +6562,9 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, >>> */ >>> new_len = sve_vqm1_for_el(env, cur_el); >>> if (new_len < old_len) { >>> +#ifdef TARGET_AARCH64 >> >> What about using runtime check instead? >> >> if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && new_len < old_len) { >> > > That would be a dead check: it is not possible to get here > unless ARM_FEATURE_AARCH64 is set. So checks in callees such: -- >8 -- diff --git a/target/arm/helper.c b/target/arm/helper.c index bb445e30cd1..8377eb0e710 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -11547,5 +11547,7 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) uint64_t pmask; + ARMCPU *cpu = env_archcpu(env); + assert(cpu_isar_feature(aa64_sve, cpu)); assert(vq >= 1 && vq <= ARM_MAX_VQ); - assert(vq <= env_archcpu(env)->sve_max_vq); + assert(vq <= cpu->sve_max_vq); ---
diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 51b6428cfec..9205cbdec43 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -1222,7 +1222,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, */ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el); -#ifdef TARGET_AARCH64 int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); @@ -1254,13 +1253,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) #endif } -#else -static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } -static inline void aarch64_sve_change_el(CPUARMState *env, int o, - int n, bool a) -{ } -#endif - void aarch64_sync_32_to_64(CPUARMState *env); void aarch64_sync_64_to_32(CPUARMState *env); diff --git a/target/arm/helper.c b/target/arm/helper.c index b46b2bffcf3..774e1ee0245 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -6562,7 +6562,9 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, */ new_len = sve_vqm1_for_el(env, cur_el); if (new_len < old_len) { +#ifdef TARGET_AARCH64 aarch64_sve_narrow_vq(env, new_len + 1); +#endif } } @@ -10646,7 +10648,9 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) * Note that new_el can never be 0. If cur_el is 0, then * el0_a64 is is_a64(), else el0_a64 is ignored. */ +#ifdef TARGET_AARCH64 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); +#endif } if (cur_el < new_el) { @@ -11663,7 +11667,9 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el, /* When changing vector length, clear inaccessible state. */ if (new_len < old_len) { +#ifdef TARGET_AARCH64 aarch64_sve_narrow_vq(env, new_len + 1); +#endif } } #endif
Directly condition associated calls in target/arm/helper.c for now. Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> --- target/arm/cpu.h | 8 -------- target/arm/helper.c | 6 ++++++ 2 files changed, 6 insertions(+), 8 deletions(-)