diff mbox series

[XEN] arm64/vfp: address MISRA C:2012 Dir 4.3

Message ID 998ecebdda92f1704fa35e8692b1f7e37b674d16.1692800477.git.nicola.vetrini@bugseng.com (mailing list archive)
State Superseded
Headers show
Series [XEN] arm64/vfp: address MISRA C:2012 Dir 4.3 | expand

Commit Message

Nicola Vetrini Aug. 23, 2023, 2:27 p.m. UTC
Directive 4.3 prescribes the following:
"Assembly language shall be encapsulated and isolated",
on the grounds of improved readability and ease of maintenance.
The Directive is violated in this case by asm code in between C code.

A macro is the chosen encapsulation mechanism.

No functional change.

Signed-off-by: Nicola Vetrini <nicola.vetrini@bugseng.com>
---
A couple of remarks:
- An inline function is another possible encapsulation technique
- A #define wrapper() do { asm volatile (...) } while(0); is also allowed,
  but I don't think this is needed in the specific case.
---
 xen/arch/arm/arm64/vfp.c | 74 ++++++++++++++++++++++------------------
 1 file changed, 40 insertions(+), 34 deletions(-)

--
2.34.1

Comments

Julien Grall Aug. 23, 2023, 2:59 p.m. UTC | #1
Hi,

On 23/08/2023 15:27, Nicola Vetrini wrote:
> Directive 4.3 prescribes the following:
> "Assembly language shall be encapsulated and isolated",
> on the grounds of improved readability and ease of maintenance.
> The Directive is violated in this case by asm code in between C code.
> 
> A macro is the chosen encapsulation mechanism.

I would rather prefer if we use a static inline.

> 
> No functional change.
> 
> Signed-off-by: Nicola Vetrini <nicola.vetrini@bugseng.com>
> ---
> A couple of remarks:
> - An inline function is another possible encapsulation technique
> - A #define wrapper() do { asm volatile (...) } while(0); is also allowed,
>    but I don't think this is needed in the specific case.
> ---
>   xen/arch/arm/arm64/vfp.c | 74 ++++++++++++++++++++++------------------
>   1 file changed, 40 insertions(+), 34 deletions(-)
> 
> diff --git a/xen/arch/arm/arm64/vfp.c b/xen/arch/arm/arm64/vfp.c
> index 2d0d7c2e6ddb..0248601453ec 100644
> --- a/xen/arch/arm/arm64/vfp.c
> +++ b/xen/arch/arm/arm64/vfp.c
> @@ -4,6 +4,44 @@
>   #include <asm/vfp.h>
>   #include <asm/arm64/sve.h>
> 
> +#define save_state(fpregs)                           \
> +    asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"     \
> +                 "stp q2, q3, [%1, #16 * 2]\n\t"     \
> +                 "stp q4, q5, [%1, #16 * 4]\n\t"     \
> +                 "stp q6, q7, [%1, #16 * 6]\n\t"     \
> +                 "stp q8, q9, [%1, #16 * 8]\n\t"     \
> +                 "stp q10, q11, [%1, #16 * 10]\n\t"  \
> +                 "stp q12, q13, [%1, #16 * 12]\n\t"  \
> +                 "stp q14, q15, [%1, #16 * 14]\n\t"  \
> +                 "stp q16, q17, [%1, #16 * 16]\n\t"  \
> +                 "stp q18, q19, [%1, #16 * 18]\n\t"  \
> +                 "stp q20, q21, [%1, #16 * 20]\n\t"  \
> +                 "stp q22, q23, [%1, #16 * 22]\n\t"  \
> +                 "stp q24, q25, [%1, #16 * 24]\n\t"  \
> +                 "stp q26, q27, [%1, #16 * 26]\n\t"  \
> +                 "stp q28, q29, [%1, #16 * 28]\n\t"  \
> +                 "stp q30, q31, [%1, #16 * 30]\n\t"  \
> +                 : "=Q" (*fpregs) : "r" (fpregs));
> +
> +#define restore_state(fpregs)                        \
> +    asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t"     \
> +                 "ldp q2, q3, [%1, #16 * 2]\n\t"     \
> +                 "ldp q4, q5, [%1, #16 * 4]\n\t"     \
> +                 "ldp q6, q7, [%1, #16 * 6]\n\t"     \
> +                 "ldp q8, q9, [%1, #16 * 8]\n\t"     \
> +                 "ldp q10, q11, [%1, #16 * 10]\n\t"  \
> +                 "ldp q12, q13, [%1, #16 * 12]\n\t"  \
> +                 "ldp q14, q15, [%1, #16 * 14]\n\t"  \
> +                 "ldp q16, q17, [%1, #16 * 16]\n\t"  \
> +                 "ldp q18, q19, [%1, #16 * 18]\n\t"  \
> +                 "ldp q20, q21, [%1, #16 * 20]\n\t"  \
> +                 "ldp q22, q23, [%1, #16 * 22]\n\t"  \
> +                 "ldp q24, q25, [%1, #16 * 24]\n\t"  \
> +                 "ldp q26, q27, [%1, #16 * 26]\n\t"  \
> +                 "ldp q28, q29, [%1, #16 * 28]\n\t"  \
> +                 "ldp q30, q31, [%1, #16 * 30]\n\t"  \
> +                 : : "Q" (*fpregs), "r" (fpregs))
> +
>   void vfp_save_state(struct vcpu *v)
>   {
>       if ( !cpu_has_fp )
> @@ -13,23 +51,7 @@ void vfp_save_state(struct vcpu *v)
>           sve_save_state(v);
>       else
>       {
> -        asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"
> -                     "stp q2, q3, [%1, #16 * 2]\n\t"
> -                     "stp q4, q5, [%1, #16 * 4]\n\t"
> -                     "stp q6, q7, [%1, #16 * 6]\n\t"
> -                     "stp q8, q9, [%1, #16 * 8]\n\t"
> -                     "stp q10, q11, [%1, #16 * 10]\n\t"
> -                     "stp q12, q13, [%1, #16 * 12]\n\t"
> -                     "stp q14, q15, [%1, #16 * 14]\n\t"
> -                     "stp q16, q17, [%1, #16 * 16]\n\t"
> -                     "stp q18, q19, [%1, #16 * 18]\n\t"
> -                     "stp q20, q21, [%1, #16 * 20]\n\t"
> -                     "stp q22, q23, [%1, #16 * 22]\n\t"
> -                     "stp q24, q25, [%1, #16 * 24]\n\t"
> -                     "stp q26, q27, [%1, #16 * 26]\n\t"
> -                     "stp q28, q29, [%1, #16 * 28]\n\t"
> -                     "stp q30, q31, [%1, #16 * 30]\n\t"
> -                     : "=Q" (*v->arch.vfp.fpregs) : "r" (v->arch.vfp.fpregs));
> +        save_state(v->arch.vfp.fpregs);
>       }
> 
>       v->arch.vfp.fpsr = READ_SYSREG(FPSR);
> @@ -47,23 +69,7 @@ void vfp_restore_state(struct vcpu *v)
>           sve_restore_state(v);
>       else
>       {
> -        asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t"
> -                     "ldp q2, q3, [%1, #16 * 2]\n\t"
> -                     "ldp q4, q5, [%1, #16 * 4]\n\t"
> -                     "ldp q6, q7, [%1, #16 * 6]\n\t"
> -                     "ldp q8, q9, [%1, #16 * 8]\n\t"
> -                     "ldp q10, q11, [%1, #16 * 10]\n\t"
> -                     "ldp q12, q13, [%1, #16 * 12]\n\t"
> -                     "ldp q14, q15, [%1, #16 * 14]\n\t"
> -                     "ldp q16, q17, [%1, #16 * 16]\n\t"
> -                     "ldp q18, q19, [%1, #16 * 18]\n\t"
> -                     "ldp q20, q21, [%1, #16 * 20]\n\t"
> -                     "ldp q22, q23, [%1, #16 * 22]\n\t"
> -                     "ldp q24, q25, [%1, #16 * 24]\n\t"
> -                     "ldp q26, q27, [%1, #16 * 26]\n\t"
> -                     "ldp q28, q29, [%1, #16 * 28]\n\t"
> -                     "ldp q30, q31, [%1, #16 * 30]\n\t"
> -                     : : "Q" (*v->arch.vfp.fpregs), "r" (v->arch.vfp.fpregs));
> +        restore_state(v->arch.vfp.fpregs);
>       }
> 
>       WRITE_SYSREG(v->arch.vfp.fpsr, FPSR);
> --
> 2.34.1
Nicola Vetrini Aug. 23, 2023, 3:17 p.m. UTC | #2
On 23/08/2023 16:59, Julien Grall wrote:
> Hi,
> 
> On 23/08/2023 15:27, Nicola Vetrini wrote:
>> Directive 4.3 prescribes the following:
>> "Assembly language shall be encapsulated and isolated",
>> on the grounds of improved readability and ease of maintenance.
>> The Directive is violated in this case by asm code in between C code.
>> 
>> A macro is the chosen encapsulation mechanism.
> 
> I would rather prefer if we use a static inline.
> 

Ok
Nicola Vetrini Aug. 23, 2023, 4:09 p.m. UTC | #3
On 23/08/2023 16:59, Julien Grall wrote:
> Hi,
> 
> On 23/08/2023 15:27, Nicola Vetrini wrote:
>> Directive 4.3 prescribes the following:
>> "Assembly language shall be encapsulated and isolated",
>> on the grounds of improved readability and ease of maintenance.
>> The Directive is violated in this case by asm code in between C code.
>> 
>> A macro is the chosen encapsulation mechanism.
> 
> I would rather prefer if we use a static inline.

Just to prevent an possible back and forth on a similar patch:
is it ok to adopt the same approach with the inline asm in
xen/arch/arm/arm64/lib/bitops.c in the definition of the macros
'bitop' and 'testop'?
Julien Grall Aug. 23, 2023, 4:48 p.m. UTC | #4
Hi Nicola,

On 23/08/2023 17:09, Nicola Vetrini wrote:
> On 23/08/2023 16:59, Julien Grall wrote:
>> Hi,
>>
>> On 23/08/2023 15:27, Nicola Vetrini wrote:
>>> Directive 4.3 prescribes the following:
>>> "Assembly language shall be encapsulated and isolated",
>>> on the grounds of improved readability and ease of maintenance.
>>> The Directive is violated in this case by asm code in between C code.
>>>
>>> A macro is the chosen encapsulation mechanism.
>>
>> I would rather prefer if we use a static inline.
> 
> Just to prevent an possible back and forth on a similar patch:
> is it ok to adopt the same approach with the inline asm in
> xen/arch/arm/arm64/lib/bitops.c in the definition of the macros
> 'bitop' and 'testop'?

So, in the VFP I agree that moving the assembly part outside of 
vfp_*_state() makes sense even without MISRA. But I don't agree with 
moving the assembly code out as the C function is tightly coupled with 
the assembly code.

So this would please MISRA but IHMO would make the code more difficult 
to understand. So I think we should deviate for the bitops.

Bertrand, Stefano, what do you think?

Cheers,
Stefano Stabellini Aug. 23, 2023, 8:30 p.m. UTC | #5
On Wed, 23 Aug 2023, Julien Grall wrote:
> Hi Nicola,
> 
> On 23/08/2023 17:09, Nicola Vetrini wrote:
> > On 23/08/2023 16:59, Julien Grall wrote:
> > > Hi,
> > > 
> > > On 23/08/2023 15:27, Nicola Vetrini wrote:
> > > > Directive 4.3 prescribes the following:
> > > > "Assembly language shall be encapsulated and isolated",
> > > > on the grounds of improved readability and ease of maintenance.
> > > > The Directive is violated in this case by asm code in between C code.
> > > > 
> > > > A macro is the chosen encapsulation mechanism.
> > > 
> > > I would rather prefer if we use a static inline.
> > 
> > Just to prevent an possible back and forth on a similar patch:
> > is it ok to adopt the same approach with the inline asm in
> > xen/arch/arm/arm64/lib/bitops.c in the definition of the macros
> > 'bitop' and 'testop'?
> 
> So, in the VFP I agree that moving the assembly part outside of vfp_*_state()
> makes sense even without MISRA. But I don't agree with moving the assembly
> code out as the C function is tightly coupled with the assembly code.
> 
> So this would please MISRA but IHMO would make the code more difficult to
> understand. So I think we should deviate for the bitops.
> 
> Bertrand, Stefano, what do you think?

I agree. I think bitops.c is already encapsulated and introducing
additional macros or functions is likely to make the code worse. testop
and bitop are the encapsulating functions, as you can see there is very
little else other than the asm volatile and a do/while loop.
Nicola Vetrini Aug. 24, 2023, 7:35 a.m. UTC | #6
On 23/08/2023 22:30, Stefano Stabellini wrote:
> On Wed, 23 Aug 2023, Julien Grall wrote:
>> Hi Nicola,
>> 
>> On 23/08/2023 17:09, Nicola Vetrini wrote:
>> > On 23/08/2023 16:59, Julien Grall wrote:
>> > > Hi,
>> > >
>> > > On 23/08/2023 15:27, Nicola Vetrini wrote:
>> > > > Directive 4.3 prescribes the following:
>> > > > "Assembly language shall be encapsulated and isolated",
>> > > > on the grounds of improved readability and ease of maintenance.
>> > > > The Directive is violated in this case by asm code in between C code.
>> > > >
>> > > > A macro is the chosen encapsulation mechanism.
>> > >
>> > > I would rather prefer if we use a static inline.
>> >
>> > Just to prevent an possible back and forth on a similar patch:
>> > is it ok to adopt the same approach with the inline asm in
>> > xen/arch/arm/arm64/lib/bitops.c in the definition of the macros
>> > 'bitop' and 'testop'?
>> 
>> So, in the VFP I agree that moving the assembly part outside of 
>> vfp_*_state()
>> makes sense even without MISRA. But I don't agree with moving the 
>> assembly
>> code out as the C function is tightly coupled with the assembly code.
>> 
>> So this would please MISRA but IHMO would make the code more difficult 
>> to
>> understand. So I think we should deviate for the bitops.
>> 
>> Bertrand, Stefano, what do you think?
> 
> I agree. I think bitops.c is already encapsulated and introducing
> additional macros or functions is likely to make the code worse. testop
> and bitop are the encapsulating functions, as you can see there is very
> little else other than the asm volatile and a do/while loop.

Ok, thanks.
diff mbox series

Patch

diff --git a/xen/arch/arm/arm64/vfp.c b/xen/arch/arm/arm64/vfp.c
index 2d0d7c2e6ddb..0248601453ec 100644
--- a/xen/arch/arm/arm64/vfp.c
+++ b/xen/arch/arm/arm64/vfp.c
@@ -4,6 +4,44 @@ 
 #include <asm/vfp.h>
 #include <asm/arm64/sve.h>

+#define save_state(fpregs)                           \
+    asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"     \
+                 "stp q2, q3, [%1, #16 * 2]\n\t"     \
+                 "stp q4, q5, [%1, #16 * 4]\n\t"     \
+                 "stp q6, q7, [%1, #16 * 6]\n\t"     \
+                 "stp q8, q9, [%1, #16 * 8]\n\t"     \
+                 "stp q10, q11, [%1, #16 * 10]\n\t"  \
+                 "stp q12, q13, [%1, #16 * 12]\n\t"  \
+                 "stp q14, q15, [%1, #16 * 14]\n\t"  \
+                 "stp q16, q17, [%1, #16 * 16]\n\t"  \
+                 "stp q18, q19, [%1, #16 * 18]\n\t"  \
+                 "stp q20, q21, [%1, #16 * 20]\n\t"  \
+                 "stp q22, q23, [%1, #16 * 22]\n\t"  \
+                 "stp q24, q25, [%1, #16 * 24]\n\t"  \
+                 "stp q26, q27, [%1, #16 * 26]\n\t"  \
+                 "stp q28, q29, [%1, #16 * 28]\n\t"  \
+                 "stp q30, q31, [%1, #16 * 30]\n\t"  \
+                 : "=Q" (*fpregs) : "r" (fpregs));
+
+#define restore_state(fpregs)                        \
+    asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t"     \
+                 "ldp q2, q3, [%1, #16 * 2]\n\t"     \
+                 "ldp q4, q5, [%1, #16 * 4]\n\t"     \
+                 "ldp q6, q7, [%1, #16 * 6]\n\t"     \
+                 "ldp q8, q9, [%1, #16 * 8]\n\t"     \
+                 "ldp q10, q11, [%1, #16 * 10]\n\t"  \
+                 "ldp q12, q13, [%1, #16 * 12]\n\t"  \
+                 "ldp q14, q15, [%1, #16 * 14]\n\t"  \
+                 "ldp q16, q17, [%1, #16 * 16]\n\t"  \
+                 "ldp q18, q19, [%1, #16 * 18]\n\t"  \
+                 "ldp q20, q21, [%1, #16 * 20]\n\t"  \
+                 "ldp q22, q23, [%1, #16 * 22]\n\t"  \
+                 "ldp q24, q25, [%1, #16 * 24]\n\t"  \
+                 "ldp q26, q27, [%1, #16 * 26]\n\t"  \
+                 "ldp q28, q29, [%1, #16 * 28]\n\t"  \
+                 "ldp q30, q31, [%1, #16 * 30]\n\t"  \
+                 : : "Q" (*fpregs), "r" (fpregs))
+
 void vfp_save_state(struct vcpu *v)
 {
     if ( !cpu_has_fp )
@@ -13,23 +51,7 @@  void vfp_save_state(struct vcpu *v)
         sve_save_state(v);
     else
     {
-        asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"
-                     "stp q2, q3, [%1, #16 * 2]\n\t"
-                     "stp q4, q5, [%1, #16 * 4]\n\t"
-                     "stp q6, q7, [%1, #16 * 6]\n\t"
-                     "stp q8, q9, [%1, #16 * 8]\n\t"
-                     "stp q10, q11, [%1, #16 * 10]\n\t"
-                     "stp q12, q13, [%1, #16 * 12]\n\t"
-                     "stp q14, q15, [%1, #16 * 14]\n\t"
-                     "stp q16, q17, [%1, #16 * 16]\n\t"
-                     "stp q18, q19, [%1, #16 * 18]\n\t"
-                     "stp q20, q21, [%1, #16 * 20]\n\t"
-                     "stp q22, q23, [%1, #16 * 22]\n\t"
-                     "stp q24, q25, [%1, #16 * 24]\n\t"
-                     "stp q26, q27, [%1, #16 * 26]\n\t"
-                     "stp q28, q29, [%1, #16 * 28]\n\t"
-                     "stp q30, q31, [%1, #16 * 30]\n\t"
-                     : "=Q" (*v->arch.vfp.fpregs) : "r" (v->arch.vfp.fpregs));
+        save_state(v->arch.vfp.fpregs);
     }

     v->arch.vfp.fpsr = READ_SYSREG(FPSR);
@@ -47,23 +69,7 @@  void vfp_restore_state(struct vcpu *v)
         sve_restore_state(v);
     else
     {
-        asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t"
-                     "ldp q2, q3, [%1, #16 * 2]\n\t"
-                     "ldp q4, q5, [%1, #16 * 4]\n\t"
-                     "ldp q6, q7, [%1, #16 * 6]\n\t"
-                     "ldp q8, q9, [%1, #16 * 8]\n\t"
-                     "ldp q10, q11, [%1, #16 * 10]\n\t"
-                     "ldp q12, q13, [%1, #16 * 12]\n\t"
-                     "ldp q14, q15, [%1, #16 * 14]\n\t"
-                     "ldp q16, q17, [%1, #16 * 16]\n\t"
-                     "ldp q18, q19, [%1, #16 * 18]\n\t"
-                     "ldp q20, q21, [%1, #16 * 20]\n\t"
-                     "ldp q22, q23, [%1, #16 * 22]\n\t"
-                     "ldp q24, q25, [%1, #16 * 24]\n\t"
-                     "ldp q26, q27, [%1, #16 * 26]\n\t"
-                     "ldp q28, q29, [%1, #16 * 28]\n\t"
-                     "ldp q30, q31, [%1, #16 * 30]\n\t"
-                     : : "Q" (*v->arch.vfp.fpregs), "r" (v->arch.vfp.fpregs));
+        restore_state(v->arch.vfp.fpregs);
     }

     WRITE_SYSREG(v->arch.vfp.fpsr, FPSR);