diff mbox series

[XEN,v2] arm64/vfp: address MISRA C:2012 Dir 4.3

Message ID 140f450d4f4f88096158e54522fc2734367a90cb.1692807017.git.nicola.vetrini@bugseng.com (mailing list archive)
State Superseded
Headers show
Series [XEN,v2] arm64/vfp: address MISRA C:2012 Dir 4.3 | expand

Commit Message

Nicola Vetrini Aug. 24, 2023, 7:37 a.m. UTC
Directive 4.3 prescribes the following:
"Assembly language shall be encapsulated and isolated",
on the grounds of improved readability and ease of maintenance.

A static inline function is the chosen encapsulation mechanism.

No functional change.

Signed-off-by: Nicola Vetrini <nicola.vetrini@bugseng.com>
---
Changes in v2:
- Switched to a static inline function
---
 xen/arch/arm/arm64/vfp.c | 78 ++++++++++++++++++++++------------------
 1 file changed, 44 insertions(+), 34 deletions(-)

Comments

Michal Orzel Aug. 24, 2023, 8:01 a.m. UTC | #1
Hi Nicola,

On 24/08/2023 09:37, Nicola Vetrini wrote:
> 
> 
> Directive 4.3 prescribes the following:
> "Assembly language shall be encapsulated and isolated",
> on the grounds of improved readability and ease of maintenance.
> 
> A static inline function is the chosen encapsulation mechanism.
> 
> No functional change.
> 
> Signed-off-by: Nicola Vetrini <nicola.vetrini@bugseng.com>
> ---
> Changes in v2:
> - Switched to a static inline function
> ---
>  xen/arch/arm/arm64/vfp.c | 78 ++++++++++++++++++++++------------------
>  1 file changed, 44 insertions(+), 34 deletions(-)
> 
> diff --git a/xen/arch/arm/arm64/vfp.c b/xen/arch/arm/arm64/vfp.c
> index 2d0d7c2e6ddb..5c884380ee42 100644
> --- a/xen/arch/arm/arm64/vfp.c
> +++ b/xen/arch/arm/arm64/vfp.c
> @@ -4,6 +4,48 @@
>  #include <asm/vfp.h>
>  #include <asm/arm64/sve.h>
> 
> +static inline void save_state(uint64_t *fpregs)
> +{
> +    asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"
> +                 "stp q2, q3, [%1, #16 * 2]\n\t"
> +                 "stp q4, q5, [%1, #16 * 4]\n\t"
> +                 "stp q6, q7, [%1, #16 * 6]\n\t"
> +                 "stp q8, q9, [%1, #16 * 8]\n\t"
> +                 "stp q10, q11, [%1, #16 * 10]\n\t"
> +                 "stp q12, q13, [%1, #16 * 12]\n\t"
> +                 "stp q14, q15, [%1, #16 * 14]\n\t"
> +                 "stp q16, q17, [%1, #16 * 16]\n\t"
> +                 "stp q18, q19, [%1, #16 * 18]\n\t"
> +                 "stp q20, q21, [%1, #16 * 20]\n\t"
> +                 "stp q22, q23, [%1, #16 * 22]\n\t"
> +                 "stp q24, q25, [%1, #16 * 24]\n\t"
> +                 "stp q26, q27, [%1, #16 * 26]\n\t"
> +                 "stp q28, q29, [%1, #16 * 28]\n\t"
> +                 "stp q30, q31, [%1, #16 * 30]\n\t"
> +                 : "=Q" (*fpregs) : "r" (fpregs));
> +}
> +
> +static inline void restore_state(uint64_t *fpregs)
This can be const as you are loading data from fpregs into registers

> +{
> +    asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t"
> +                 "ldp q2, q3, [%1, #16 * 2]\n\t"
> +                 "ldp q4, q5, [%1, #16 * 4]\n\t"
> +                 "ldp q6, q7, [%1, #16 * 6]\n\t"
> +                 "ldp q8, q9, [%1, #16 * 8]\n\t"
> +                 "ldp q10, q11, [%1, #16 * 10]\n\t"
> +                 "ldp q12, q13, [%1, #16 * 12]\n\t"
> +                 "ldp q14, q15, [%1, #16 * 14]\n\t"
> +                 "ldp q16, q17, [%1, #16 * 16]\n\t"
> +                 "ldp q18, q19, [%1, #16 * 18]\n\t"
> +                 "ldp q20, q21, [%1, #16 * 20]\n\t"
> +                 "ldp q22, q23, [%1, #16 * 22]\n\t"
> +                 "ldp q24, q25, [%1, #16 * 24]\n\t"
> +                 "ldp q26, q27, [%1, #16 * 26]\n\t"
> +                 "ldp q28, q29, [%1, #16 * 28]\n\t"
> +                 "ldp q30, q31, [%1, #16 * 30]\n\t"
> +                 : : "Q" (*fpregs), "r" (fpregs));
> +}
> +
>  void vfp_save_state(struct vcpu *v)
>  {
>      if ( !cpu_has_fp )
> @@ -13,23 +55,7 @@ void vfp_save_state(struct vcpu *v)
>          sve_save_state(v);
>      else
>      {
No need for brackets
> -        asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"
> -                     "stp q2, q3, [%1, #16 * 2]\n\t"
> -                     "stp q4, q5, [%1, #16 * 4]\n\t"
> -                     "stp q6, q7, [%1, #16 * 6]\n\t"
> -                     "stp q8, q9, [%1, #16 * 8]\n\t"
> -                     "stp q10, q11, [%1, #16 * 10]\n\t"
> -                     "stp q12, q13, [%1, #16 * 12]\n\t"
> -                     "stp q14, q15, [%1, #16 * 14]\n\t"
> -                     "stp q16, q17, [%1, #16 * 16]\n\t"
> -                     "stp q18, q19, [%1, #16 * 18]\n\t"
> -                     "stp q20, q21, [%1, #16 * 20]\n\t"
> -                     "stp q22, q23, [%1, #16 * 22]\n\t"
> -                     "stp q24, q25, [%1, #16 * 24]\n\t"
> -                     "stp q26, q27, [%1, #16 * 26]\n\t"
> -                     "stp q28, q29, [%1, #16 * 28]\n\t"
> -                     "stp q30, q31, [%1, #16 * 30]\n\t"
> -                     : "=Q" (*v->arch.vfp.fpregs) : "r" (v->arch.vfp.fpregs));
> +        save_state(v->arch.vfp.fpregs);
>      }
> 
>      v->arch.vfp.fpsr = READ_SYSREG(FPSR);
> @@ -47,23 +73,7 @@ void vfp_restore_state(struct vcpu *v)
>          sve_restore_state(v);
>      else
>      {
No need for brackets
> -        asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t"
> -                     "ldp q2, q3, [%1, #16 * 2]\n\t"
> -                     "ldp q4, q5, [%1, #16 * 4]\n\t"
> -                     "ldp q6, q7, [%1, #16 * 6]\n\t"
> -                     "ldp q8, q9, [%1, #16 * 8]\n\t"
> -                     "ldp q10, q11, [%1, #16 * 10]\n\t"
> -                     "ldp q12, q13, [%1, #16 * 12]\n\t"
> -                     "ldp q14, q15, [%1, #16 * 14]\n\t"
> -                     "ldp q16, q17, [%1, #16 * 16]\n\t"
> -                     "ldp q18, q19, [%1, #16 * 18]\n\t"
> -                     "ldp q20, q21, [%1, #16 * 20]\n\t"
> -                     "ldp q22, q23, [%1, #16 * 22]\n\t"
> -                     "ldp q24, q25, [%1, #16 * 24]\n\t"
> -                     "ldp q26, q27, [%1, #16 * 26]\n\t"
> -                     "ldp q28, q29, [%1, #16 * 28]\n\t"
> -                     "ldp q30, q31, [%1, #16 * 30]\n\t"
> -                     : : "Q" (*v->arch.vfp.fpregs), "r" (v->arch.vfp.fpregs));
> +        restore_state(v->arch.vfp.fpregs);
>      }
> 
>      WRITE_SYSREG(v->arch.vfp.fpsr, FPSR);

Apart from that (up to maintainers if it can be done on commit):
Reviewed-by: Michal Orzel <michal.orzel@amd.com>

~Michal
Nicola Vetrini Aug. 24, 2023, 11:11 a.m. UTC | #2
On 24/08/2023 10:01, Michal Orzel wrote:
> Hi Nicola,
> 
> On 24/08/2023 09:37, Nicola Vetrini wrote:
>> 
>> 
>> Directive 4.3 prescribes the following:
>> "Assembly language shall be encapsulated and isolated",
>> on the grounds of improved readability and ease of maintenance.
>> 
>> A static inline function is the chosen encapsulation mechanism.
>> 
>> No functional change.
>> 
>> Signed-off-by: Nicola Vetrini <nicola.vetrini@bugseng.com>
>> ---
>> Changes in v2:
>> - Switched to a static inline function
>> ---
>>  xen/arch/arm/arm64/vfp.c | 78 
>> ++++++++++++++++++++++------------------
>>  1 file changed, 44 insertions(+), 34 deletions(-)
>> 
>> diff --git a/xen/arch/arm/arm64/vfp.c b/xen/arch/arm/arm64/vfp.c
>> index 2d0d7c2e6ddb..5c884380ee42 100644
>> --- a/xen/arch/arm/arm64/vfp.c
>> +++ b/xen/arch/arm/arm64/vfp.c
>> @@ -4,6 +4,48 @@
>>  #include <asm/vfp.h>
>>  #include <asm/arm64/sve.h>
>> 
>> +static inline void save_state(uint64_t *fpregs)
>> +{
>> +    asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"
>> +                 "stp q2, q3, [%1, #16 * 2]\n\t"
>> +                 "stp q4, q5, [%1, #16 * 4]\n\t"
>> +                 "stp q6, q7, [%1, #16 * 6]\n\t"
>> +                 "stp q8, q9, [%1, #16 * 8]\n\t"
>> +                 "stp q10, q11, [%1, #16 * 10]\n\t"
>> +                 "stp q12, q13, [%1, #16 * 12]\n\t"
>> +                 "stp q14, q15, [%1, #16 * 14]\n\t"
>> +                 "stp q16, q17, [%1, #16 * 16]\n\t"
>> +                 "stp q18, q19, [%1, #16 * 18]\n\t"
>> +                 "stp q20, q21, [%1, #16 * 20]\n\t"
>> +                 "stp q22, q23, [%1, #16 * 22]\n\t"
>> +                 "stp q24, q25, [%1, #16 * 24]\n\t"
>> +                 "stp q26, q27, [%1, #16 * 26]\n\t"
>> +                 "stp q28, q29, [%1, #16 * 28]\n\t"
>> +                 "stp q30, q31, [%1, #16 * 30]\n\t"
>> +                 : "=Q" (*fpregs) : "r" (fpregs));
>> +}
>> +
>> +static inline void restore_state(uint64_t *fpregs)
> This can be const as you are loading data from fpregs into registers
> 

I wonder whether this would make a difference, given that the return 
type is void.
It's fine either way, tough.
Julien Grall Aug. 24, 2023, 11:20 a.m. UTC | #3
Hi Nicola,

On 24/08/2023 12:11, Nicola Vetrini wrote:
> On 24/08/2023 10:01, Michal Orzel wrote:
>> Hi Nicola,
>>
>> On 24/08/2023 09:37, Nicola Vetrini wrote:
>>>
>>>
>>> Directive 4.3 prescribes the following:
>>> "Assembly language shall be encapsulated and isolated",
>>> on the grounds of improved readability and ease of maintenance.
>>>
>>> A static inline function is the chosen encapsulation mechanism.
>>>
>>> No functional change.
>>>
>>> Signed-off-by: Nicola Vetrini <nicola.vetrini@bugseng.com>
>>> ---
>>> Changes in v2:
>>> - Switched to a static inline function
>>> ---
>>>  xen/arch/arm/arm64/vfp.c | 78 ++++++++++++++++++++++------------------
>>>  1 file changed, 44 insertions(+), 34 deletions(-)
>>>
>>> diff --git a/xen/arch/arm/arm64/vfp.c b/xen/arch/arm/arm64/vfp.c
>>> index 2d0d7c2e6ddb..5c884380ee42 100644
>>> --- a/xen/arch/arm/arm64/vfp.c
>>> +++ b/xen/arch/arm/arm64/vfp.c
>>> @@ -4,6 +4,48 @@
>>>  #include <asm/vfp.h>
>>>  #include <asm/arm64/sve.h>
>>>
>>> +static inline void save_state(uint64_t *fpregs)
>>> +{
>>> +    asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"
>>> +                 "stp q2, q3, [%1, #16 * 2]\n\t"
>>> +                 "stp q4, q5, [%1, #16 * 4]\n\t"
>>> +                 "stp q6, q7, [%1, #16 * 6]\n\t"
>>> +                 "stp q8, q9, [%1, #16 * 8]\n\t"
>>> +                 "stp q10, q11, [%1, #16 * 10]\n\t"
>>> +                 "stp q12, q13, [%1, #16 * 12]\n\t"
>>> +                 "stp q14, q15, [%1, #16 * 14]\n\t"
>>> +                 "stp q16, q17, [%1, #16 * 16]\n\t"
>>> +                 "stp q18, q19, [%1, #16 * 18]\n\t"
>>> +                 "stp q20, q21, [%1, #16 * 20]\n\t"
>>> +                 "stp q22, q23, [%1, #16 * 22]\n\t"
>>> +                 "stp q24, q25, [%1, #16 * 24]\n\t"
>>> +                 "stp q26, q27, [%1, #16 * 26]\n\t"
>>> +                 "stp q28, q29, [%1, #16 * 28]\n\t"
>>> +                 "stp q30, q31, [%1, #16 * 30]\n\t"
>>> +                 : "=Q" (*fpregs) : "r" (fpregs));
>>> +}
>>> +
>>> +static inline void restore_state(uint64_t *fpregs)
>> This can be const as you are loading data from fpregs into registers
>>
> 
> I wonder whether this would make a difference, given that the return 
> type is void.

It is telling the reader that the function is not supposed to modify the 
'fpregs'. A compiler will also be able to throw an error if a developper 
broke this assumption.

I have been pushing quite a lot recently to add 'const' when a pointer 
is not supposed to be modified. And before someone mention it, I know 
that 'const' is not perfect in C as if a field points to another area, 
that area would not be const (unless the definition of the field 
contains const). But that's better than nothing :).

Cheers,
Nicola Vetrini Aug. 24, 2023, 1:03 p.m. UTC | #4
On 24/08/2023 13:20, Julien Grall wrote:
> Hi Nicola,
> 
> On 24/08/2023 12:11, Nicola Vetrini wrote:
>> On 24/08/2023 10:01, Michal Orzel wrote:
>>> Hi Nicola,
>>> 
>>> On 24/08/2023 09:37, Nicola Vetrini wrote:
>>>> 
>>>> 
>>>> Directive 4.3 prescribes the following:
>>>> "Assembly language shall be encapsulated and isolated",
>>>> on the grounds of improved readability and ease of maintenance.
>>>> 
>>>> A static inline function is the chosen encapsulation mechanism.
>>>> 
>>>> No functional change.
>>>> 
>>>> Signed-off-by: Nicola Vetrini <nicola.vetrini@bugseng.com>
>>>> ---
>>>> Changes in v2:
>>>> - Switched to a static inline function
>>>> ---
>>>>  xen/arch/arm/arm64/vfp.c | 78 
>>>> ++++++++++++++++++++++------------------
>>>>  1 file changed, 44 insertions(+), 34 deletions(-)
>>>> 
>>>> diff --git a/xen/arch/arm/arm64/vfp.c b/xen/arch/arm/arm64/vfp.c
>>>> index 2d0d7c2e6ddb..5c884380ee42 100644
>>>> --- a/xen/arch/arm/arm64/vfp.c
>>>> +++ b/xen/arch/arm/arm64/vfp.c
>>>> @@ -4,6 +4,48 @@
>>>>  #include <asm/vfp.h>
>>>>  #include <asm/arm64/sve.h>
>>>> 
>>>> +static inline void save_state(uint64_t *fpregs)
>>>> +{
>>>> +    asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"
>>>> +                 "stp q2, q3, [%1, #16 * 2]\n\t"
>>>> +                 "stp q4, q5, [%1, #16 * 4]\n\t"
>>>> +                 "stp q6, q7, [%1, #16 * 6]\n\t"
>>>> +                 "stp q8, q9, [%1, #16 * 8]\n\t"
>>>> +                 "stp q10, q11, [%1, #16 * 10]\n\t"
>>>> +                 "stp q12, q13, [%1, #16 * 12]\n\t"
>>>> +                 "stp q14, q15, [%1, #16 * 14]\n\t"
>>>> +                 "stp q16, q17, [%1, #16 * 16]\n\t"
>>>> +                 "stp q18, q19, [%1, #16 * 18]\n\t"
>>>> +                 "stp q20, q21, [%1, #16 * 20]\n\t"
>>>> +                 "stp q22, q23, [%1, #16 * 22]\n\t"
>>>> +                 "stp q24, q25, [%1, #16 * 24]\n\t"
>>>> +                 "stp q26, q27, [%1, #16 * 26]\n\t"
>>>> +                 "stp q28, q29, [%1, #16 * 28]\n\t"
>>>> +                 "stp q30, q31, [%1, #16 * 30]\n\t"
>>>> +                 : "=Q" (*fpregs) : "r" (fpregs));
>>>> +}
>>>> +
>>>> +static inline void restore_state(uint64_t *fpregs)
>>> This can be const as you are loading data from fpregs into registers
>>> 
>> 
>> I wonder whether this would make a difference, given that the return 
>> type is void.
> 
> It is telling the reader that the function is not supposed to modify
> the 'fpregs'. A compiler will also be able to throw an error if a
> developper broke this assumption.
> 
> I have been pushing quite a lot recently to add 'const' when a pointer
> is not supposed to be modified. And before someone mention it, I know
> that 'const' is not perfect in C as if a field points to another area,
> that area would not be const (unless the definition of the field
> contains const). But that's better than nothing :).
> 
> Cheers,

Ah, yes indeed. I wasn't paying enough attention before.
Thanks,
diff mbox series

Patch

diff --git a/xen/arch/arm/arm64/vfp.c b/xen/arch/arm/arm64/vfp.c
index 2d0d7c2e6ddb..5c884380ee42 100644
--- a/xen/arch/arm/arm64/vfp.c
+++ b/xen/arch/arm/arm64/vfp.c
@@ -4,6 +4,48 @@ 
 #include <asm/vfp.h>
 #include <asm/arm64/sve.h>
 
+static inline void save_state(uint64_t *fpregs)
+{
+    asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"
+                 "stp q2, q3, [%1, #16 * 2]\n\t"
+                 "stp q4, q5, [%1, #16 * 4]\n\t"
+                 "stp q6, q7, [%1, #16 * 6]\n\t"
+                 "stp q8, q9, [%1, #16 * 8]\n\t"
+                 "stp q10, q11, [%1, #16 * 10]\n\t"
+                 "stp q12, q13, [%1, #16 * 12]\n\t"
+                 "stp q14, q15, [%1, #16 * 14]\n\t"
+                 "stp q16, q17, [%1, #16 * 16]\n\t"
+                 "stp q18, q19, [%1, #16 * 18]\n\t"
+                 "stp q20, q21, [%1, #16 * 20]\n\t"
+                 "stp q22, q23, [%1, #16 * 22]\n\t"
+                 "stp q24, q25, [%1, #16 * 24]\n\t"
+                 "stp q26, q27, [%1, #16 * 26]\n\t"
+                 "stp q28, q29, [%1, #16 * 28]\n\t"
+                 "stp q30, q31, [%1, #16 * 30]\n\t"
+                 : "=Q" (*fpregs) : "r" (fpregs));
+}
+
+static inline void restore_state(uint64_t *fpregs)
+{
+    asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t"
+                 "ldp q2, q3, [%1, #16 * 2]\n\t"
+                 "ldp q4, q5, [%1, #16 * 4]\n\t"
+                 "ldp q6, q7, [%1, #16 * 6]\n\t"
+                 "ldp q8, q9, [%1, #16 * 8]\n\t"
+                 "ldp q10, q11, [%1, #16 * 10]\n\t"
+                 "ldp q12, q13, [%1, #16 * 12]\n\t"
+                 "ldp q14, q15, [%1, #16 * 14]\n\t"
+                 "ldp q16, q17, [%1, #16 * 16]\n\t"
+                 "ldp q18, q19, [%1, #16 * 18]\n\t"
+                 "ldp q20, q21, [%1, #16 * 20]\n\t"
+                 "ldp q22, q23, [%1, #16 * 22]\n\t"
+                 "ldp q24, q25, [%1, #16 * 24]\n\t"
+                 "ldp q26, q27, [%1, #16 * 26]\n\t"
+                 "ldp q28, q29, [%1, #16 * 28]\n\t"
+                 "ldp q30, q31, [%1, #16 * 30]\n\t"
+                 : : "Q" (*fpregs), "r" (fpregs));
+}
+
 void vfp_save_state(struct vcpu *v)
 {
     if ( !cpu_has_fp )
@@ -13,23 +55,7 @@  void vfp_save_state(struct vcpu *v)
         sve_save_state(v);
     else
     {
-        asm volatile("stp q0, q1, [%1, #16 * 0]\n\t"
-                     "stp q2, q3, [%1, #16 * 2]\n\t"
-                     "stp q4, q5, [%1, #16 * 4]\n\t"
-                     "stp q6, q7, [%1, #16 * 6]\n\t"
-                     "stp q8, q9, [%1, #16 * 8]\n\t"
-                     "stp q10, q11, [%1, #16 * 10]\n\t"
-                     "stp q12, q13, [%1, #16 * 12]\n\t"
-                     "stp q14, q15, [%1, #16 * 14]\n\t"
-                     "stp q16, q17, [%1, #16 * 16]\n\t"
-                     "stp q18, q19, [%1, #16 * 18]\n\t"
-                     "stp q20, q21, [%1, #16 * 20]\n\t"
-                     "stp q22, q23, [%1, #16 * 22]\n\t"
-                     "stp q24, q25, [%1, #16 * 24]\n\t"
-                     "stp q26, q27, [%1, #16 * 26]\n\t"
-                     "stp q28, q29, [%1, #16 * 28]\n\t"
-                     "stp q30, q31, [%1, #16 * 30]\n\t"
-                     : "=Q" (*v->arch.vfp.fpregs) : "r" (v->arch.vfp.fpregs));
+        save_state(v->arch.vfp.fpregs);
     }
 
     v->arch.vfp.fpsr = READ_SYSREG(FPSR);
@@ -47,23 +73,7 @@  void vfp_restore_state(struct vcpu *v)
         sve_restore_state(v);
     else
     {
-        asm volatile("ldp q0, q1, [%1, #16 * 0]\n\t"
-                     "ldp q2, q3, [%1, #16 * 2]\n\t"
-                     "ldp q4, q5, [%1, #16 * 4]\n\t"
-                     "ldp q6, q7, [%1, #16 * 6]\n\t"
-                     "ldp q8, q9, [%1, #16 * 8]\n\t"
-                     "ldp q10, q11, [%1, #16 * 10]\n\t"
-                     "ldp q12, q13, [%1, #16 * 12]\n\t"
-                     "ldp q14, q15, [%1, #16 * 14]\n\t"
-                     "ldp q16, q17, [%1, #16 * 16]\n\t"
-                     "ldp q18, q19, [%1, #16 * 18]\n\t"
-                     "ldp q20, q21, [%1, #16 * 20]\n\t"
-                     "ldp q22, q23, [%1, #16 * 22]\n\t"
-                     "ldp q24, q25, [%1, #16 * 24]\n\t"
-                     "ldp q26, q27, [%1, #16 * 26]\n\t"
-                     "ldp q28, q29, [%1, #16 * 28]\n\t"
-                     "ldp q30, q31, [%1, #16 * 30]\n\t"
-                     : : "Q" (*v->arch.vfp.fpregs), "r" (v->arch.vfp.fpregs));
+        restore_state(v->arch.vfp.fpregs);
     }
 
     WRITE_SYSREG(v->arch.vfp.fpsr, FPSR);