Message ID | eb05e70faaae3c328bfd3cc6c1aa4c7c90a351fd.1692971966.git.nicola.vetrini@bugseng.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [XEN,v3] arm64/vfp: address MISRA C:2012 Dir 4.3 | expand |
On Fri, 25 Aug 2023, Nicola Vetrini wrote: > Directive 4.3 prescribes the following: > "Assembly language shall be encapsulated and isolated", > on the grounds of improved readability and ease of maintenance. > > A static inline function is the chosen encapsulation mechanism. > > No functional change. > > Signed-off-by: Nicola Vetrini <nicola.vetrini@bugseng.com> > Reviewed-by: Michal Orzel <michal.orzel@amd.com> Acked-by: Stefano Stabellini <sstabellini@kernel.org> > --- > Changes in v2: > - Switched to a static inline function > Changes in v3: > - Applied changes suggested by Michal > --- > xen/arch/arm/arm64/vfp.c | 82 +++++++++++++++++++++------------------- > 1 file changed, 44 insertions(+), 38 deletions(-) > > diff --git a/xen/arch/arm/arm64/vfp.c b/xen/arch/arm/arm64/vfp.c > index 2d0d7c2e6ddb..c4f89c7b0e33 100644 > --- a/xen/arch/arm/arm64/vfp.c > +++ b/xen/arch/arm/arm64/vfp.c > @@ -4,6 +4,48 @@ > #include <asm/vfp.h> > #include <asm/arm64/sve.h> > > +static inline void save_state(uint64_t *fpregs) > +{ > + asm volatile("stp q0, q1, [%1, #16 * 0]\n\t" > + "stp q2, q3, [%1, #16 * 2]\n\t" > + "stp q4, q5, [%1, #16 * 4]\n\t" > + "stp q6, q7, [%1, #16 * 6]\n\t" > + "stp q8, q9, [%1, #16 * 8]\n\t" > + "stp q10, q11, [%1, #16 * 10]\n\t" > + "stp q12, q13, [%1, #16 * 12]\n\t" > + "stp q14, q15, [%1, #16 * 14]\n\t" > + "stp q16, q17, [%1, #16 * 16]\n\t" > + "stp q18, q19, [%1, #16 * 18]\n\t" > + "stp q20, q21, [%1, #16 * 20]\n\t" > + "stp q22, q23, [%1, #16 * 22]\n\t" > + "stp q24, q25, [%1, #16 * 24]\n\t" > + "stp q26, q27, [%1, #16 * 26]\n\t" > + "stp q28, q29, [%1, #16 * 28]\n\t" > + "stp q30, q31, [%1, #16 * 30]\n\t" > + : "=Q" (*fpregs) : "r" (fpregs)); > +} > + > +static inline void restore_state(const uint64_t *fpregs) > +{ > + asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t" > + "ldp q2, q3, [%1, #16 * 2]\n\t" > + "ldp q4, q5, [%1, #16 * 4]\n\t" > + "ldp q6, q7, [%1, #16 * 6]\n\t" > + "ldp q8, q9, [%1, #16 * 8]\n\t" > + "ldp q10, q11, [%1, #16 * 10]\n\t" > + "ldp q12, q13, [%1, #16 * 12]\n\t" > + "ldp q14, q15, [%1, #16 * 14]\n\t" > + "ldp q16, q17, [%1, #16 * 16]\n\t" > + "ldp q18, q19, [%1, #16 * 18]\n\t" > + "ldp q20, q21, [%1, #16 * 20]\n\t" > + "ldp q22, q23, [%1, #16 * 22]\n\t" > + "ldp q24, q25, [%1, #16 * 24]\n\t" > + "ldp q26, q27, [%1, #16 * 26]\n\t" > + "ldp q28, q29, [%1, #16 * 28]\n\t" > + "ldp q30, q31, [%1, #16 * 30]\n\t" > + : : "Q" (*fpregs), "r" (fpregs)); > +} > + > void vfp_save_state(struct vcpu *v) > { > if ( !cpu_has_fp ) > @@ -12,25 +54,7 @@ void vfp_save_state(struct vcpu *v) > if ( is_sve_domain(v->domain) ) > sve_save_state(v); > else > - { > - asm volatile("stp q0, q1, [%1, #16 * 0]\n\t" > - "stp q2, q3, [%1, #16 * 2]\n\t" > - "stp q4, q5, [%1, #16 * 4]\n\t" > - "stp q6, q7, [%1, #16 * 6]\n\t" > - "stp q8, q9, [%1, #16 * 8]\n\t" > - "stp q10, q11, [%1, #16 * 10]\n\t" > - "stp q12, q13, [%1, #16 * 12]\n\t" > - "stp q14, q15, [%1, #16 * 14]\n\t" > - "stp q16, q17, [%1, #16 * 16]\n\t" > - "stp q18, q19, [%1, #16 * 18]\n\t" > - "stp q20, q21, [%1, #16 * 20]\n\t" > - "stp q22, q23, [%1, #16 * 22]\n\t" > - "stp q24, q25, [%1, #16 * 24]\n\t" > - "stp q26, q27, [%1, #16 * 26]\n\t" > - "stp q28, q29, [%1, #16 * 28]\n\t" > - "stp q30, q31, [%1, #16 * 30]\n\t" > - : "=Q" (*v->arch.vfp.fpregs) : "r" (v->arch.vfp.fpregs)); > - } > + save_state(v->arch.vfp.fpregs); > > v->arch.vfp.fpsr = READ_SYSREG(FPSR); > v->arch.vfp.fpcr = READ_SYSREG(FPCR); > @@ -46,25 +70,7 @@ void vfp_restore_state(struct vcpu *v) > if ( is_sve_domain(v->domain) ) > sve_restore_state(v); > else > - { > - asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t" > - "ldp q2, q3, [%1, #16 * 2]\n\t" > - "ldp q4, q5, [%1, #16 * 4]\n\t" > - "ldp q6, q7, [%1, #16 * 6]\n\t" > - "ldp q8, q9, [%1, #16 * 8]\n\t" > - "ldp q10, q11, [%1, #16 * 10]\n\t" > - "ldp q12, q13, [%1, #16 * 12]\n\t" > - "ldp q14, q15, [%1, #16 * 14]\n\t" > - "ldp q16, q17, [%1, #16 * 16]\n\t" > - "ldp q18, q19, [%1, #16 * 18]\n\t" > - "ldp q20, q21, [%1, #16 * 20]\n\t" > - "ldp q22, q23, [%1, #16 * 22]\n\t" > - "ldp q24, q25, [%1, #16 * 24]\n\t" > - "ldp q26, q27, [%1, #16 * 26]\n\t" > - "ldp q28, q29, [%1, #16 * 28]\n\t" > - "ldp q30, q31, [%1, #16 * 30]\n\t" > - : : "Q" (*v->arch.vfp.fpregs), "r" (v->arch.vfp.fpregs)); > - } > + restore_state(v->arch.vfp.fpregs); > > WRITE_SYSREG(v->arch.vfp.fpsr, FPSR); > WRITE_SYSREG(v->arch.vfp.fpcr, FPCR); > -- > 2.34.1 >
diff --git a/xen/arch/arm/arm64/vfp.c b/xen/arch/arm/arm64/vfp.c index 2d0d7c2e6ddb..c4f89c7b0e33 100644 --- a/xen/arch/arm/arm64/vfp.c +++ b/xen/arch/arm/arm64/vfp.c @@ -4,6 +4,48 @@ #include <asm/vfp.h> #include <asm/arm64/sve.h> +static inline void save_state(uint64_t *fpregs) +{ + asm volatile("stp q0, q1, [%1, #16 * 0]\n\t" + "stp q2, q3, [%1, #16 * 2]\n\t" + "stp q4, q5, [%1, #16 * 4]\n\t" + "stp q6, q7, [%1, #16 * 6]\n\t" + "stp q8, q9, [%1, #16 * 8]\n\t" + "stp q10, q11, [%1, #16 * 10]\n\t" + "stp q12, q13, [%1, #16 * 12]\n\t" + "stp q14, q15, [%1, #16 * 14]\n\t" + "stp q16, q17, [%1, #16 * 16]\n\t" + "stp q18, q19, [%1, #16 * 18]\n\t" + "stp q20, q21, [%1, #16 * 20]\n\t" + "stp q22, q23, [%1, #16 * 22]\n\t" + "stp q24, q25, [%1, #16 * 24]\n\t" + "stp q26, q27, [%1, #16 * 26]\n\t" + "stp q28, q29, [%1, #16 * 28]\n\t" + "stp q30, q31, [%1, #16 * 30]\n\t" + : "=Q" (*fpregs) : "r" (fpregs)); +} + +static inline void restore_state(const uint64_t *fpregs) +{ + asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t" + "ldp q2, q3, [%1, #16 * 2]\n\t" + "ldp q4, q5, [%1, #16 * 4]\n\t" + "ldp q6, q7, [%1, #16 * 6]\n\t" + "ldp q8, q9, [%1, #16 * 8]\n\t" + "ldp q10, q11, [%1, #16 * 10]\n\t" + "ldp q12, q13, [%1, #16 * 12]\n\t" + "ldp q14, q15, [%1, #16 * 14]\n\t" + "ldp q16, q17, [%1, #16 * 16]\n\t" + "ldp q18, q19, [%1, #16 * 18]\n\t" + "ldp q20, q21, [%1, #16 * 20]\n\t" + "ldp q22, q23, [%1, #16 * 22]\n\t" + "ldp q24, q25, [%1, #16 * 24]\n\t" + "ldp q26, q27, [%1, #16 * 26]\n\t" + "ldp q28, q29, [%1, #16 * 28]\n\t" + "ldp q30, q31, [%1, #16 * 30]\n\t" + : : "Q" (*fpregs), "r" (fpregs)); +} + void vfp_save_state(struct vcpu *v) { if ( !cpu_has_fp ) @@ -12,25 +54,7 @@ void vfp_save_state(struct vcpu *v) if ( is_sve_domain(v->domain) ) sve_save_state(v); else - { - asm volatile("stp q0, q1, [%1, #16 * 0]\n\t" - "stp q2, q3, [%1, #16 * 2]\n\t" - "stp q4, q5, [%1, #16 * 4]\n\t" - "stp q6, q7, [%1, #16 * 6]\n\t" - "stp q8, q9, [%1, #16 * 8]\n\t" - "stp q10, q11, [%1, #16 * 10]\n\t" - "stp q12, q13, [%1, #16 * 12]\n\t" - "stp q14, q15, [%1, #16 * 14]\n\t" - "stp q16, q17, [%1, #16 * 16]\n\t" - "stp q18, q19, [%1, #16 * 18]\n\t" - "stp q20, q21, [%1, #16 * 20]\n\t" - "stp q22, q23, [%1, #16 * 22]\n\t" - "stp q24, q25, [%1, #16 * 24]\n\t" - "stp q26, q27, [%1, #16 * 26]\n\t" - "stp q28, q29, [%1, #16 * 28]\n\t" - "stp q30, q31, [%1, #16 * 30]\n\t" - : "=Q" (*v->arch.vfp.fpregs) : "r" (v->arch.vfp.fpregs)); - } + save_state(v->arch.vfp.fpregs); v->arch.vfp.fpsr = READ_SYSREG(FPSR); v->arch.vfp.fpcr = READ_SYSREG(FPCR); @@ -46,25 +70,7 @@ void vfp_restore_state(struct vcpu *v) if ( is_sve_domain(v->domain) ) sve_restore_state(v); else - { - asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t" - "ldp q2, q3, [%1, #16 * 2]\n\t" - "ldp q4, q5, [%1, #16 * 4]\n\t" - "ldp q6, q7, [%1, #16 * 6]\n\t" - "ldp q8, q9, [%1, #16 * 8]\n\t" - "ldp q10, q11, [%1, #16 * 10]\n\t" - "ldp q12, q13, [%1, #16 * 12]\n\t" - "ldp q14, q15, [%1, #16 * 14]\n\t" - "ldp q16, q17, [%1, #16 * 16]\n\t" - "ldp q18, q19, [%1, #16 * 18]\n\t" - "ldp q20, q21, [%1, #16 * 20]\n\t" - "ldp q22, q23, [%1, #16 * 22]\n\t" - "ldp q24, q25, [%1, #16 * 24]\n\t" - "ldp q26, q27, [%1, #16 * 26]\n\t" - "ldp q28, q29, [%1, #16 * 28]\n\t" - "ldp q30, q31, [%1, #16 * 30]\n\t" - : : "Q" (*v->arch.vfp.fpregs), "r" (v->arch.vfp.fpregs)); - } + restore_state(v->arch.vfp.fpregs); WRITE_SYSREG(v->arch.vfp.fpsr, FPSR); WRITE_SYSREG(v->arch.vfp.fpcr, FPCR);