@@ -35,6 +35,6 @@ void cpu_sve_add_props(Object *obj);
void cpu_sve_add_props_max(Object *obj);
/* return the vector length for EL */
-uint32_t sve_zcr_len_for_el(CPUARMState *env, int el);
+uint32_t cpu_sve_get_zcr_len_for_el(CPUARMState *env, int el);
#endif /* CPU_SVE_H */
@@ -168,7 +168,7 @@ static off_t sve_fpcr_offset(uint32_t vq)
static uint32_t sve_current_vq(CPUARMState *env)
{
- return sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
+ return cpu_sve_get_zcr_len_for_el(env, arm_current_el(env)) + 1;
}
static size_t sve_size_vq(uint32_t vq)
@@ -289,7 +289,7 @@ void cpu_sve_add_props_max(Object *obj)
object_property_add(obj, "sve-max-vq", "uint32", get_prop_max_vq, set_prop_max_vq, NULL, NULL);
}
-static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
+static uint32_t get_valid_len(ARMCPU *cpu, uint32_t start_len)
{
uint32_t end_len;
@@ -304,7 +304,7 @@ static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
/*
* Given that SVE is enabled, return the vector length for EL.
*/
-uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
+uint32_t cpu_sve_get_zcr_len_for_el(CPUARMState *env, int el)
{
ARMCPU *cpu = env_archcpu(env);
uint32_t zcr_len = cpu->sve_max_vq - 1;
@@ -319,5 +319,5 @@ uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
}
- return sve_zcr_get_valid_len(cpu, zcr_len);
+ return get_valid_len(cpu, zcr_len);
}
@@ -549,7 +549,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
vfp_get_fpcr(env), vfp_get_fpsr(env));
if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
- int j, zcr_len = sve_zcr_len_for_el(env, el);
+ int j, zcr_len = cpu_sve_get_zcr_len_for_el(env, el);
for (i = 0; i <= FFR_PRED_NUM; i++) {
bool eol;
@@ -5802,7 +5802,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
int cur_el = arm_current_el(env);
- int old_len = sve_zcr_len_for_el(env, cur_el);
+ int old_len = cpu_sve_get_zcr_len_for_el(env, cur_el);
int new_len;
/* Bits other than [3:0] are RAZ/WI. */
@@ -5813,7 +5813,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
* Because we arrived here, we know both FP and SVE are enabled;
* otherwise we would have trapped access to the ZCR_ELn register.
*/
- new_len = sve_zcr_len_for_el(env, cur_el);
+ new_len = cpu_sve_get_zcr_len_for_el(env, cur_el);
if (new_len < old_len) {
tcg_sve_narrow_vq(env, new_len + 1);
}
@@ -186,7 +186,7 @@ static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
* We report in Vector Granules (VG) which is 64bit in a Z reg
* while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
*/
- int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
+ int vq = cpu_sve_get_zcr_len_for_el(env, arm_current_el(env)) + 1;
return gdb_get_reg64(buf, vq * 2);
}
default:
@@ -1034,7 +1034,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
if (sve_el != 0 && fp_el == 0) {
zcr_len = 0;
} else {
- zcr_len = sve_zcr_len_for_el(env, el);
+ zcr_len = cpu_sve_get_zcr_len_for_el(env, el);
}
DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len);
@@ -155,10 +155,10 @@ void tcg_sve_change_el(CPUARMState *env, int old_el,
*/
old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
old_len = (old_a64 && !sve_exception_el(env, old_el)
- ? sve_zcr_len_for_el(env, old_el) : 0);
+ ? cpu_sve_get_zcr_len_for_el(env, old_el) : 0);
new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
new_len = (new_a64 && !sve_exception_el(env, new_el)
- ? sve_zcr_len_for_el(env, new_el) : 0);
+ ? cpu_sve_get_zcr_len_for_el(env, new_el) : 0);
/* When changing vector length, clear inaccessible state. */
if (new_len < old_len) {