diff mbox series

[v3,3/5] xen/bitops: Implement hweight32() in terms of hweightl()

Message ID 20240904225530.3888315-4-andrew.cooper3@citrix.com (mailing list archive)
State New
Headers show
Series xen/bitops: hweight() cleanup/improvements | expand

Commit Message

Andrew Cooper Sept. 4, 2024, 10:55 p.m. UTC
... and drop generic_hweight32().

As noted previously, the only two users of hweight32() are in __init paths.

The int-optimised form of generic_hweight() is only two instructions shorter
than the long-optimised form, and even then only on architectures which lack
fast multiplication, so there's no point providing an int-optimised form.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Stefano Stabellini <sstabellini@kernel.org>
CC: Julien Grall <julien@xen.org>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
CC: Bertrand Marquis <bertrand.marquis@arm.com>
CC: Michal Orzel <michal.orzel@amd.com>
CC: Oleksii Kurochko <oleksii.kurochko@gmail.com>
CC: Shawn Anastasio <sanastasio@raptorengineering.com>

v2:
 * Reorder with respect to the hweight64() patch
 * Rerwrite the commit message
 * s/__pure/attr_const/
---
 xen/arch/arm/include/asm/bitops.h | 1 -
 xen/arch/ppc/include/asm/bitops.h | 1 -
 xen/arch/x86/include/asm/bitops.h | 1 -
 xen/include/xen/bitops.h          | 5 +++++
 4 files changed, 5 insertions(+), 3 deletions(-)

Comments

Stefano Stabellini Sept. 5, 2024, 11:08 p.m. UTC | #1
On Wed, 4 Sep 2024, Andrew Cooper wrote:
> ... and drop generic_hweight32().
> 
> As noted previously, the only two users of hweight32() are in __init paths.
> 
> The int-optimised form of generic_hweight() is only two instructions shorter
> than the long-optimised form, and even then only on architectures which lack
> fast multiplication, so there's no point providing an int-optimised form.
> 
> No functional change.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Acked-by: Jan Beulich <jbeulich@suse.com>

The patch is OK:

Acked-by: Stefano Stabellini <sstabellini@kernel.org>


I was looking at docs/misra/C-language-toolchain.rst to make sure
everything is listed there. We have attr_const as "__const__" noted
among "Non-standard tokens".

Looks like we need to add __always_inline__ ?


> ---
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Roger Pau Monné <roger.pau@citrix.com>
> CC: Stefano Stabellini <sstabellini@kernel.org>
> CC: Julien Grall <julien@xen.org>
> CC: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com>
> CC: Bertrand Marquis <bertrand.marquis@arm.com>
> CC: Michal Orzel <michal.orzel@amd.com>
> CC: Oleksii Kurochko <oleksii.kurochko@gmail.com>
> CC: Shawn Anastasio <sanastasio@raptorengineering.com>
> 
> v2:
>  * Reorder with respect to the hweight64() patch
>  * Rerwrite the commit message
>  * s/__pure/attr_const/
> ---
>  xen/arch/arm/include/asm/bitops.h | 1 -
>  xen/arch/ppc/include/asm/bitops.h | 1 -
>  xen/arch/x86/include/asm/bitops.h | 1 -
>  xen/include/xen/bitops.h          | 5 +++++
>  4 files changed, 5 insertions(+), 3 deletions(-)
> 
> diff --git a/xen/arch/arm/include/asm/bitops.h b/xen/arch/arm/include/asm/bitops.h
> index 91cd167b6bbb..b28c25b3d52d 100644
> --- a/xen/arch/arm/include/asm/bitops.h
> +++ b/xen/arch/arm/include/asm/bitops.h
> @@ -85,7 +85,6 @@ bool clear_mask16_timeout(uint16_t mask, volatile void *p,
>   * The Hamming Weight of a number is the total number of bits set in it.
>   */
>  #define hweight64(x) generic_hweight64(x)
> -#define hweight32(x) generic_hweight32(x)
>  
>  #endif /* _ARM_BITOPS_H */
>  /*
> diff --git a/xen/arch/ppc/include/asm/bitops.h b/xen/arch/ppc/include/asm/bitops.h
> index 64512e949530..f488a7c03425 100644
> --- a/xen/arch/ppc/include/asm/bitops.h
> +++ b/xen/arch/ppc/include/asm/bitops.h
> @@ -133,6 +133,5 @@ static inline int test_and_set_bit(unsigned int nr, volatile void *addr)
>   * The Hamming Weight of a number is the total number of bits set in it.
>   */
>  #define hweight64(x) __builtin_popcountll(x)
> -#define hweight32(x) __builtin_popcount(x)
>  
>  #endif /* _ASM_PPC_BITOPS_H */
> diff --git a/xen/arch/x86/include/asm/bitops.h b/xen/arch/x86/include/asm/bitops.h
> index 4c5b21907a64..507b043b8a86 100644
> --- a/xen/arch/x86/include/asm/bitops.h
> +++ b/xen/arch/x86/include/asm/bitops.h
> @@ -482,6 +482,5 @@ static always_inline unsigned int arch_flsl(unsigned long x)
>   * The Hamming Weight of a number is the total number of bits set in it.
>   */
>  #define hweight64(x) generic_hweight64(x)
> -#define hweight32(x) generic_hweight32(x)
>  
>  #endif /* _X86_BITOPS_H */
> diff --git a/xen/include/xen/bitops.h b/xen/include/xen/bitops.h
> index 58c600155f7e..a462c3065158 100644
> --- a/xen/include/xen/bitops.h
> +++ b/xen/include/xen/bitops.h
> @@ -326,6 +326,11 @@ static always_inline attr_const unsigned int hweightl(unsigned long x)
>  #endif
>  }
>  
> +static always_inline attr_const unsigned int hweight32(uint32_t x)
> +{
> +    return hweightl(x);
> +}
> +
>  /* --------------------- Please tidy below here --------------------- */
>  
>  #ifndef find_next_bit
> -- 
> 2.39.2
>
Andrew Cooper Sept. 6, 2024, 12:27 a.m. UTC | #2
On 06/09/2024 12:08 am, Stefano Stabellini wrote:
> On Wed, 4 Sep 2024, Andrew Cooper wrote:
>> ... and drop generic_hweight32().
>>
>> As noted previously, the only two users of hweight32() are in __init paths.
>>
>> The int-optimised form of generic_hweight() is only two instructions shorter
>> than the long-optimised form, and even then only on architectures which lack
>> fast multiplication, so there's no point providing an int-optimised form.
>>
>> No functional change.
>>
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>> Acked-by: Jan Beulich <jbeulich@suse.com>
> The patch is OK:
>
> Acked-by: Stefano Stabellini <sstabellini@kernel.org>

Thanks.

> I was looking at docs/misra/C-language-toolchain.rst to make sure
> everything is listed there. We have attr_const as "__const__" noted
> among "Non-standard tokens".
>
> Looks like we need to add __always_inline__ ?

Luckily, no.

First, the __const__ referred to there is GCC's alternative spelling of
the regular C 'const' keyword, and not the function attribute by the
same name.

But, non-standard tokens are about things which survive full
preprocessing and are still not standard C.  In this case, it's the
__attribute__ that matters, not what's in it, and this is why we don't
have a hundreds of rows in that table for __attribute__((foo, bar, baz)).

That said, I think f96e2f64576cdbb147391c7cb399d393385719a9 probably
should have removed the entire row including __const__ seeing as AFAICT
the last user in Xen was dropped in 1aa3c54a31a5 in 2008

~Andrew
Andrew Cooper Sept. 6, 2024, 6:50 p.m. UTC | #3
On 06/09/2024 1:27 am, Andrew Cooper wrote:
> On 06/09/2024 12:08 am, Stefano Stabellini wrote:
>> On Wed, 4 Sep 2024, Andrew Cooper wrote:
>>> ... and drop generic_hweight32().
>>>
>>> As noted previously, the only two users of hweight32() are in __init paths.
>>>
>>> The int-optimised form of generic_hweight() is only two instructions shorter
>>> than the long-optimised form, and even then only on architectures which lack
>>> fast multiplication, so there's no point providing an int-optimised form.
>>>
>>> No functional change.
>>>
>>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>>> Acked-by: Jan Beulich <jbeulich@suse.com>
>> The patch is OK:
>>
>> Acked-by: Stefano Stabellini <sstabellini@kernel.org>
> Thanks.
>
>> I was looking at docs/misra/C-language-toolchain.rst to make sure
>> everything is listed there. We have attr_const as "__const__" noted
>> among "Non-standard tokens".
>>
>> Looks like we need to add __always_inline__ ?
> Luckily, no.
>
> First, the __const__ referred to there is GCC's alternative spelling of
> the regular C 'const' keyword, and not the function attribute by the
> same name.
>
> But, non-standard tokens are about things which survive full
> preprocessing and are still not standard C.  In this case, it's the
> __attribute__ that matters, not what's in it, and this is why we don't
> have a hundreds of rows in that table for __attribute__((foo, bar, baz)).
>
> That said, I think f96e2f64576cdbb147391c7cb399d393385719a9 probably
> should have removed the entire row including __const__ seeing as AFAICT
> the last user in Xen was dropped in 1aa3c54a31a5 in 2008

Hmm.  Eclair disagrees.

https://gitlab.com/xen-project/people/andyhhp/xen/-/jobs/7765315981

Now I'm completely confused as to why __const__ matters and everything
else inside an __attribute__(()) apparently doesn't.

CC-ing some folk.  Any ideas?

~Andrew
Nicola Vetrini Sept. 7, 2024, 1:20 p.m. UTC | #4
On 2024-09-06 20:50, Andrew Cooper wrote:
> On 06/09/2024 1:27 am, Andrew Cooper wrote:
>> On 06/09/2024 12:08 am, Stefano Stabellini wrote:
>>> On Wed, 4 Sep 2024, Andrew Cooper wrote:
>>>> ... and drop generic_hweight32().
>>>> 
>>>> As noted previously, the only two users of hweight32() are in __init 
>>>> paths.
>>>> 
>>>> The int-optimised form of generic_hweight() is only two instructions 
>>>> shorter
>>>> than the long-optimised form, and even then only on architectures 
>>>> which lack
>>>> fast multiplication, so there's no point providing an int-optimised 
>>>> form.
>>>> 
>>>> No functional change.
>>>> 
>>>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>>>> Acked-by: Jan Beulich <jbeulich@suse.com>
>>> The patch is OK:
>>> 
>>> Acked-by: Stefano Stabellini <sstabellini@kernel.org>
>> Thanks.
>> 
>>> I was looking at docs/misra/C-language-toolchain.rst to make sure
>>> everything is listed there. We have attr_const as "__const__" noted
>>> among "Non-standard tokens".
>>> 
>>> Looks like we need to add __always_inline__ ?
>> Luckily, no.
>> 
>> First, the __const__ referred to there is GCC's alternative spelling 
>> of
>> the regular C 'const' keyword, and not the function attribute by the
>> same name.
>> 
>> But, non-standard tokens are about things which survive full
>> preprocessing and are still not standard C.  In this case, it's the
>> __attribute__ that matters, not what's in it, and this is why we don't
>> have a hundreds of rows in that table for __attribute__((foo, bar, 
>> baz)).
>> 
>> That said, I think f96e2f64576cdbb147391c7cb399d393385719a9 probably
>> should have removed the entire row including __const__ seeing as 
>> AFAICT
>> the last user in Xen was dropped in 1aa3c54a31a5 in 2008
> 
> Hmm.  Eclair disagrees.
> 
> https://gitlab.com/xen-project/people/andyhhp/xen/-/jobs/7765315981
> 
> Now I'm completely confused as to why __const__ matters and everything
> else inside an __attribute__(()) apparently doesn't.
> 
> CC-ing some folk.  Any ideas?
> 
> ~Andrew

The reason why __attribute__((__const__)) is reported after removing 
from the configuration of STD.tokenext the __const__ token is that:
- __const__ is just an alternate keyword for const, and as such needs to 
be special-cased in the configuration, as it was originally;
- the __const__ inside the attribute is still a non-standard token, no 
matter the surrounding context, and the kind of token is determined 
right after preprocessing as being a keyword (albeit non-standard), as 
far as I'm aware;
- this only happens due to the overlap with the alternate keyword 
__const__; in other words, other attributes such as 
__transparent_union__ in __attribute__((__transparent_union__)) do not 
need to be listed as permitted non-standard tokens because they aren't 
either standard or non-standard keywords, but just ordinary identifiers.

Therefore, I suggest keeping the __const__ inside the configuration.
Shawn Anastasio Sept. 13, 2024, 3 p.m. UTC | #5
Hi Andrew,

On 9/4/24 5:55 PM, Andrew Cooper wrote:
> ... and drop generic_hweight32().
> 
> As noted previously, the only two users of hweight32() are in __init paths.
> 
> The int-optimised form of generic_hweight() is only two instructions shorter
> than the long-optimised form, and even then only on architectures which lack
> fast multiplication, so there's no point providing an int-optimised form.
> 
> No functional change.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Acked-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Shawn Anastasio <sanastasio@raptorengineering.com>

Thanks,
Shawn
diff mbox series

Patch

diff --git a/xen/arch/arm/include/asm/bitops.h b/xen/arch/arm/include/asm/bitops.h
index 91cd167b6bbb..b28c25b3d52d 100644
--- a/xen/arch/arm/include/asm/bitops.h
+++ b/xen/arch/arm/include/asm/bitops.h
@@ -85,7 +85,6 @@  bool clear_mask16_timeout(uint16_t mask, volatile void *p,
  * The Hamming Weight of a number is the total number of bits set in it.
  */
 #define hweight64(x) generic_hweight64(x)
-#define hweight32(x) generic_hweight32(x)
 
 #endif /* _ARM_BITOPS_H */
 /*
diff --git a/xen/arch/ppc/include/asm/bitops.h b/xen/arch/ppc/include/asm/bitops.h
index 64512e949530..f488a7c03425 100644
--- a/xen/arch/ppc/include/asm/bitops.h
+++ b/xen/arch/ppc/include/asm/bitops.h
@@ -133,6 +133,5 @@  static inline int test_and_set_bit(unsigned int nr, volatile void *addr)
  * The Hamming Weight of a number is the total number of bits set in it.
  */
 #define hweight64(x) __builtin_popcountll(x)
-#define hweight32(x) __builtin_popcount(x)
 
 #endif /* _ASM_PPC_BITOPS_H */
diff --git a/xen/arch/x86/include/asm/bitops.h b/xen/arch/x86/include/asm/bitops.h
index 4c5b21907a64..507b043b8a86 100644
--- a/xen/arch/x86/include/asm/bitops.h
+++ b/xen/arch/x86/include/asm/bitops.h
@@ -482,6 +482,5 @@  static always_inline unsigned int arch_flsl(unsigned long x)
  * The Hamming Weight of a number is the total number of bits set in it.
  */
 #define hweight64(x) generic_hweight64(x)
-#define hweight32(x) generic_hweight32(x)
 
 #endif /* _X86_BITOPS_H */
diff --git a/xen/include/xen/bitops.h b/xen/include/xen/bitops.h
index 58c600155f7e..a462c3065158 100644
--- a/xen/include/xen/bitops.h
+++ b/xen/include/xen/bitops.h
@@ -326,6 +326,11 @@  static always_inline attr_const unsigned int hweightl(unsigned long x)
 #endif
 }
 
+static always_inline attr_const unsigned int hweight32(uint32_t x)
+{
+    return hweightl(x);
+}
+
 /* --------------------- Please tidy below here --------------------- */
 
 #ifndef find_next_bit