diff mbox series

[v13,02/10] xen/riscv: introduce bitops.h

Message ID 0e4441eee82b0545e59099e2f62e3a01fa198d08.1719319093.git.oleksii.kurochko@gmail.com (mailing list archive)
State Superseded
Headers show
Series Enable build of full Xen for RISC-V | expand

Commit Message

Oleksii Kurochko June 25, 2024, 1:51 p.m. UTC
Taken from Linux-6.4.0-rc1

Xen's bitops.h consists of several Linux's headers:
* linux/arch/include/asm/bitops.h:
  * The following function were removed as they aren't used in Xen:
        * test_and_set_bit_lock
        * clear_bit_unlock
        * __clear_bit_unlock
  * The following functions were renamed in the way how they are
    used by common code:
        * __test_and_set_bit
        * __test_and_clear_bit
  * The declaration and implementation of the following functios
    were updated to make Xen build happy:
        * clear_bit
        * set_bit
        * __test_and_clear_bit
        * __test_and_set_bit

Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
Changes in V11-V13:
 - Nothing changed. Only rebase was done.
---
Changes in V10:
 - update the error message BITS_PER_LONG -> BITOP_BITS_PER_WORD
---
Changes in V9:
 - add Acked-by: Jan Beulich <jbeulich@suse.com>
 - drop redefinition of bitop_uint_t in asm/types.h as some operation in Xen common code expects
   to work with 32-bit quantities.
 - s/BITS_PER_LONG/BITOP_BITS_PER_WORD in asm/bitops.h around __AMO() macros.
---
Changes in V8:
 - define bitop_uint_t in <asm/types.h> after the changes in patch related to introduction of
   "introduce generic non-atomic test_*bit()".
 - drop duplicated __set_bit() and __clear_bit().
 - drop duplicated comment: /* Based on linux/arch/include/asm/bitops.h */.
 - update type of res and mask in test_and_op_bit_ord(): unsigned long -> bitop_uint_t.
 - drop 1 padding blank in test_and_op_bit_ord().
 - update definition of test_and_set_bit(),test_and_clear_bit(),test_and_change_bit:
   change return type to bool.
 - change addr argument type of test_and_change_bit(): unsigned long * -> void *.
 - move test_and_change_bit() closer to other test_and-s function.
 - Code style fixes: tabs -> space.
 - s/#undef __op_bit/#undef op_bit.
 - update the commit message: delete information about generic-non-atomic.h changes as now
   it is a separate patch.
---
Changes in V7:
 - Update the commit message.
 - Drop "__" for __op_bit and __op_bit_ord as they are atomic.
 - add comment above __set_bit and __clear_bit about why they are defined as atomic.
 - align bitops_uint_t with __AMO().
 - make changes after  generic non-atomic test_*bit() were changed.
 - s/__asm__ __volatile__/asm volatile
---
Changes in V6:
 - rebase clean ups were done: drop unused asm-generic includes
---
 Changes in V5:
   - new patch
---
 xen/arch/riscv/include/asm/bitops.h | 137 ++++++++++++++++++++++++++++
 1 file changed, 137 insertions(+)
 create mode 100644 xen/arch/riscv/include/asm/bitops.h

Comments

Jan Beulich June 26, 2024, 8:31 a.m. UTC | #1
On 25.06.2024 15:51, Oleksii Kurochko wrote:
> --- /dev/null
> +++ b/xen/arch/riscv/include/asm/bitops.h
> @@ -0,0 +1,137 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/* Copyright (C) 2012 Regents of the University of California */
> +
> +#ifndef _ASM_RISCV_BITOPS_H
> +#define _ASM_RISCV_BITOPS_H
> +
> +#include <asm/system.h>
> +
> +#if BITOP_BITS_PER_WORD == 64
> +#define __AMO(op)   "amo" #op ".d"
> +#elif BITOP_BITS_PER_WORD == 32
> +#define __AMO(op)   "amo" #op ".w"
> +#else
> +#error "Unexpected BITOP_BITS_PER_WORD"
> +#endif
> +
> +/* Based on linux/arch/include/asm/bitops.h */
> +
> +/*
> + * Non-atomic bit manipulation.
> + *
> + * Implemented using atomics to be interrupt safe. Could alternatively
> + * implement with local interrupt masking.
> + */
> +#define __set_bit(n, p)      set_bit(n, p)
> +#define __clear_bit(n, p)    clear_bit(n, p)
> +
> +#define test_and_op_bit_ord(op, mod, nr, addr, ord)     \
> +({                                                      \
> +    bitop_uint_t res, mask;                             \
> +    mask = BITOP_MASK(nr);                              \
> +    asm volatile (                                      \
> +        __AMO(op) #ord " %0, %2, %1"                    \
> +        : "=r" (res), "+A" (addr[BITOP_WORD(nr)])       \
> +        : "r" (mod(mask))                               \
> +        : "memory");                                    \
> +    ((res & mask) != 0);                                \
> +})
> +
> +#define op_bit_ord(op, mod, nr, addr, ord)      \
> +    asm volatile (                              \
> +        __AMO(op) #ord " zero, %1, %0"          \
> +        : "+A" (addr[BITOP_WORD(nr)])           \
> +        : "r" (mod(BITOP_MASK(nr)))             \
> +        : "memory");
> +
> +#define test_and_op_bit(op, mod, nr, addr)    \
> +    test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
> +#define op_bit(op, mod, nr, addr) \
> +    op_bit_ord(op, mod, nr, addr, )
> +
> +/* Bitmask modifiers */
> +#define NOP(x)    (x)
> +#define NOT(x)    (~(x))

Since elsewhere you said we would use Zbb in bitops, I wanted to come back
on that: Up to here all we use is AMO.

And further down there's no asm() anymore. What were you referring to?

Jan
Oleksii Kurochko June 26, 2024, 5:27 p.m. UTC | #2
On Wed, 2024-06-26 at 10:31 +0200, Jan Beulich wrote:
> On 25.06.2024 15:51, Oleksii Kurochko wrote:
> > --- /dev/null
> > +++ b/xen/arch/riscv/include/asm/bitops.h
> > @@ -0,0 +1,137 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +/* Copyright (C) 2012 Regents of the University of California */
> > +
> > +#ifndef _ASM_RISCV_BITOPS_H
> > +#define _ASM_RISCV_BITOPS_H
> > +
> > +#include <asm/system.h>
> > +
> > +#if BITOP_BITS_PER_WORD == 64
> > +#define __AMO(op)   "amo" #op ".d"
> > +#elif BITOP_BITS_PER_WORD == 32
> > +#define __AMO(op)   "amo" #op ".w"
> > +#else
> > +#error "Unexpected BITOP_BITS_PER_WORD"
> > +#endif
> > +
> > +/* Based on linux/arch/include/asm/bitops.h */
> > +
> > +/*
> > + * Non-atomic bit manipulation.
> > + *
> > + * Implemented using atomics to be interrupt safe. Could
> > alternatively
> > + * implement with local interrupt masking.
> > + */
> > +#define __set_bit(n, p)      set_bit(n, p)
> > +#define __clear_bit(n, p)    clear_bit(n, p)
> > +
> > +#define test_and_op_bit_ord(op, mod, nr, addr, ord)     \
> > +({                                                      \
> > +    bitop_uint_t res, mask;                             \
> > +    mask = BITOP_MASK(nr);                              \
> > +    asm volatile (                                      \
> > +        __AMO(op) #ord " %0, %2, %1"                    \
> > +        : "=r" (res), "+A" (addr[BITOP_WORD(nr)])       \
> > +        : "r" (mod(mask))                               \
> > +        : "memory");                                    \
> > +    ((res & mask) != 0);                                \
> > +})
> > +
> > +#define op_bit_ord(op, mod, nr, addr, ord)      \
> > +    asm volatile (                              \
> > +        __AMO(op) #ord " zero, %1, %0"          \
> > +        : "+A" (addr[BITOP_WORD(nr)])           \
> > +        : "r" (mod(BITOP_MASK(nr)))             \
> > +        : "memory");
> > +
> > +#define test_and_op_bit(op, mod, nr, addr)    \
> > +    test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
> > +#define op_bit(op, mod, nr, addr) \
> > +    op_bit_ord(op, mod, nr, addr, )
> > +
> > +/* Bitmask modifiers */
> > +#define NOP(x)    (x)
> > +#define NOT(x)    (~(x))
> 
> Since elsewhere you said we would use Zbb in bitops, I wanted to come
> back
> on that: Up to here all we use is AMO.
> 
> And further down there's no asm() anymore. What were you referring
> to?
RISC-V doesn't have a CLZ instruction in the base
ISA.  As a consequence, __builtin_ffs() emits a library call to ffs()
on GCC, or a de Bruijn sequence on Clang.

The optional Zbb extension adds a CLZ instruction, after which
__builtin_ffs() emits a very simple sequence.

~ Oleksii
Jan Beulich June 27, 2024, 7:59 a.m. UTC | #3
On 26.06.2024 19:27, oleksii.kurochko@gmail.com wrote:
> On Wed, 2024-06-26 at 10:31 +0200, Jan Beulich wrote:
>> On 25.06.2024 15:51, Oleksii Kurochko wrote:
>>> --- /dev/null
>>> +++ b/xen/arch/riscv/include/asm/bitops.h
>>> @@ -0,0 +1,137 @@
>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>> +/* Copyright (C) 2012 Regents of the University of California */
>>> +
>>> +#ifndef _ASM_RISCV_BITOPS_H
>>> +#define _ASM_RISCV_BITOPS_H
>>> +
>>> +#include <asm/system.h>
>>> +
>>> +#if BITOP_BITS_PER_WORD == 64
>>> +#define __AMO(op)   "amo" #op ".d"
>>> +#elif BITOP_BITS_PER_WORD == 32
>>> +#define __AMO(op)   "amo" #op ".w"
>>> +#else
>>> +#error "Unexpected BITOP_BITS_PER_WORD"
>>> +#endif
>>> +
>>> +/* Based on linux/arch/include/asm/bitops.h */
>>> +
>>> +/*
>>> + * Non-atomic bit manipulation.
>>> + *
>>> + * Implemented using atomics to be interrupt safe. Could
>>> alternatively
>>> + * implement with local interrupt masking.
>>> + */
>>> +#define __set_bit(n, p)      set_bit(n, p)
>>> +#define __clear_bit(n, p)    clear_bit(n, p)
>>> +
>>> +#define test_and_op_bit_ord(op, mod, nr, addr, ord)     \
>>> +({                                                      \
>>> +    bitop_uint_t res, mask;                             \
>>> +    mask = BITOP_MASK(nr);                              \
>>> +    asm volatile (                                      \
>>> +        __AMO(op) #ord " %0, %2, %1"                    \
>>> +        : "=r" (res), "+A" (addr[BITOP_WORD(nr)])       \
>>> +        : "r" (mod(mask))                               \
>>> +        : "memory");                                    \
>>> +    ((res & mask) != 0);                                \
>>> +})
>>> +
>>> +#define op_bit_ord(op, mod, nr, addr, ord)      \
>>> +    asm volatile (                              \
>>> +        __AMO(op) #ord " zero, %1, %0"          \
>>> +        : "+A" (addr[BITOP_WORD(nr)])           \
>>> +        : "r" (mod(BITOP_MASK(nr)))             \
>>> +        : "memory");
>>> +
>>> +#define test_and_op_bit(op, mod, nr, addr)    \
>>> +    test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
>>> +#define op_bit(op, mod, nr, addr) \
>>> +    op_bit_ord(op, mod, nr, addr, )
>>> +
>>> +/* Bitmask modifiers */
>>> +#define NOP(x)    (x)
>>> +#define NOT(x)    (~(x))
>>
>> Since elsewhere you said we would use Zbb in bitops, I wanted to come
>> back
>> on that: Up to here all we use is AMO.
>>
>> And further down there's no asm() anymore. What were you referring
>> to?
> RISC-V doesn't have a CLZ instruction in the base
> ISA.  As a consequence, __builtin_ffs() emits a library call to ffs()
> on GCC,

Oh, so we'd need to implement that libgcc function, along the lines of
Arm32 implementing quite a few of them to support shifts on 64-bit
quantities as well as division and modulo.

Jan

> or a de Bruijn sequence on Clang.
> 
> The optional Zbb extension adds a CLZ instruction, after which
> __builtin_ffs() emits a very simple sequence.
> 
> ~ Oleksii
Oleksii Kurochko June 27, 2024, 9:58 a.m. UTC | #4
On Thu, 2024-06-27 at 09:59 +0200, Jan Beulich wrote:
> On 26.06.2024 19:27, oleksii.kurochko@gmail.com wrote:
> > On Wed, 2024-06-26 at 10:31 +0200, Jan Beulich wrote:
> > > On 25.06.2024 15:51, Oleksii Kurochko wrote:
> > > > --- /dev/null
> > > > +++ b/xen/arch/riscv/include/asm/bitops.h
> > > > @@ -0,0 +1,137 @@
> > > > +/* SPDX-License-Identifier: GPL-2.0 */
> > > > +/* Copyright (C) 2012 Regents of the University of California
> > > > */
> > > > +
> > > > +#ifndef _ASM_RISCV_BITOPS_H
> > > > +#define _ASM_RISCV_BITOPS_H
> > > > +
> > > > +#include <asm/system.h>
> > > > +
> > > > +#if BITOP_BITS_PER_WORD == 64
> > > > +#define __AMO(op)   "amo" #op ".d"
> > > > +#elif BITOP_BITS_PER_WORD == 32
> > > > +#define __AMO(op)   "amo" #op ".w"
> > > > +#else
> > > > +#error "Unexpected BITOP_BITS_PER_WORD"
> > > > +#endif
> > > > +
> > > > +/* Based on linux/arch/include/asm/bitops.h */
> > > > +
> > > > +/*
> > > > + * Non-atomic bit manipulation.
> > > > + *
> > > > + * Implemented using atomics to be interrupt safe. Could
> > > > alternatively
> > > > + * implement with local interrupt masking.
> > > > + */
> > > > +#define __set_bit(n, p)      set_bit(n, p)
> > > > +#define __clear_bit(n, p)    clear_bit(n, p)
> > > > +
> > > > +#define test_and_op_bit_ord(op, mod, nr, addr, ord)     \
> > > > +({                                                      \
> > > > +    bitop_uint_t res, mask;                             \
> > > > +    mask = BITOP_MASK(nr);                              \
> > > > +    asm volatile (                                      \
> > > > +        __AMO(op) #ord " %0, %2, %1"                    \
> > > > +        : "=r" (res), "+A" (addr[BITOP_WORD(nr)])       \
> > > > +        : "r" (mod(mask))                               \
> > > > +        : "memory");                                    \
> > > > +    ((res & mask) != 0);                                \
> > > > +})
> > > > +
> > > > +#define op_bit_ord(op, mod, nr, addr, ord)      \
> > > > +    asm volatile (                              \
> > > > +        __AMO(op) #ord " zero, %1, %0"          \
> > > > +        : "+A" (addr[BITOP_WORD(nr)])           \
> > > > +        : "r" (mod(BITOP_MASK(nr)))             \
> > > > +        : "memory");
> > > > +
> > > > +#define test_and_op_bit(op, mod, nr, addr)    \
> > > > +    test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
> > > > +#define op_bit(op, mod, nr, addr) \
> > > > +    op_bit_ord(op, mod, nr, addr, )
> > > > +
> > > > +/* Bitmask modifiers */
> > > > +#define NOP(x)    (x)
> > > > +#define NOT(x)    (~(x))
> > > 
> > > Since elsewhere you said we would use Zbb in bitops, I wanted to
> > > come
> > > back
> > > on that: Up to here all we use is AMO.
> > > 
> > > And further down there's no asm() anymore. What were you
> > > referring
> > > to?
> > RISC-V doesn't have a CLZ instruction in the base
> > ISA.  As a consequence, __builtin_ffs() emits a library call to
> > ffs()
> > on GCC,
> 
> Oh, so we'd need to implement that libgcc function, along the lines
> of
> Arm32 implementing quite a few of them to support shifts on 64-bit
> quantities as well as division and modulo.
Why we can't just live with Zbb extension? Zbb extension is presented
on every platform I have in access with hypervisor extension support.

~ Oleksii

> 
> Jan
> 
> > or a de Bruijn sequence on Clang.
> > 
> > The optional Zbb extension adds a CLZ instruction, after which
> > __builtin_ffs() emits a very simple sequence.
> > 
> > ~ Oleksii
>
Jan Beulich June 27, 2024, 10:10 a.m. UTC | #5
On 27.06.2024 11:58, oleksii.kurochko@gmail.com wrote:
> On Thu, 2024-06-27 at 09:59 +0200, Jan Beulich wrote:
>> On 26.06.2024 19:27, oleksii.kurochko@gmail.com wrote:
>>> On Wed, 2024-06-26 at 10:31 +0200, Jan Beulich wrote:
>>>> On 25.06.2024 15:51, Oleksii Kurochko wrote:
>>>>> --- /dev/null
>>>>> +++ b/xen/arch/riscv/include/asm/bitops.h
>>>>> @@ -0,0 +1,137 @@
>>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>>> +/* Copyright (C) 2012 Regents of the University of California
>>>>> */
>>>>> +
>>>>> +#ifndef _ASM_RISCV_BITOPS_H
>>>>> +#define _ASM_RISCV_BITOPS_H
>>>>> +
>>>>> +#include <asm/system.h>
>>>>> +
>>>>> +#if BITOP_BITS_PER_WORD == 64
>>>>> +#define __AMO(op)   "amo" #op ".d"
>>>>> +#elif BITOP_BITS_PER_WORD == 32
>>>>> +#define __AMO(op)   "amo" #op ".w"
>>>>> +#else
>>>>> +#error "Unexpected BITOP_BITS_PER_WORD"
>>>>> +#endif
>>>>> +
>>>>> +/* Based on linux/arch/include/asm/bitops.h */
>>>>> +
>>>>> +/*
>>>>> + * Non-atomic bit manipulation.
>>>>> + *
>>>>> + * Implemented using atomics to be interrupt safe. Could
>>>>> alternatively
>>>>> + * implement with local interrupt masking.
>>>>> + */
>>>>> +#define __set_bit(n, p)      set_bit(n, p)
>>>>> +#define __clear_bit(n, p)    clear_bit(n, p)
>>>>> +
>>>>> +#define test_and_op_bit_ord(op, mod, nr, addr, ord)     \
>>>>> +({                                                      \
>>>>> +    bitop_uint_t res, mask;                             \
>>>>> +    mask = BITOP_MASK(nr);                              \
>>>>> +    asm volatile (                                      \
>>>>> +        __AMO(op) #ord " %0, %2, %1"                    \
>>>>> +        : "=r" (res), "+A" (addr[BITOP_WORD(nr)])       \
>>>>> +        : "r" (mod(mask))                               \
>>>>> +        : "memory");                                    \
>>>>> +    ((res & mask) != 0);                                \
>>>>> +})
>>>>> +
>>>>> +#define op_bit_ord(op, mod, nr, addr, ord)      \
>>>>> +    asm volatile (                              \
>>>>> +        __AMO(op) #ord " zero, %1, %0"          \
>>>>> +        : "+A" (addr[BITOP_WORD(nr)])           \
>>>>> +        : "r" (mod(BITOP_MASK(nr)))             \
>>>>> +        : "memory");
>>>>> +
>>>>> +#define test_and_op_bit(op, mod, nr, addr)    \
>>>>> +    test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
>>>>> +#define op_bit(op, mod, nr, addr) \
>>>>> +    op_bit_ord(op, mod, nr, addr, )
>>>>> +
>>>>> +/* Bitmask modifiers */
>>>>> +#define NOP(x)    (x)
>>>>> +#define NOT(x)    (~(x))
>>>>
>>>> Since elsewhere you said we would use Zbb in bitops, I wanted to
>>>> come
>>>> back
>>>> on that: Up to here all we use is AMO.
>>>>
>>>> And further down there's no asm() anymore. What were you
>>>> referring
>>>> to?
>>> RISC-V doesn't have a CLZ instruction in the base
>>> ISA.  As a consequence, __builtin_ffs() emits a library call to
>>> ffs()
>>> on GCC,
>>
>> Oh, so we'd need to implement that libgcc function, along the lines
>> of
>> Arm32 implementing quite a few of them to support shifts on 64-bit
>> quantities as well as division and modulo.
> Why we can't just live with Zbb extension? Zbb extension is presented
> on every platform I have in access with hypervisor extension support.

I'd be fine that way, but then you don't need to break up ANDN into NOT
and AND. It is my understanding that Andrew has concerns here, even if
- iirc - it was him to originally suggest to build upon that extension
being available. If these concerns are solely about being able to build
with Zbb-unaware tool chains, then what to do about the build issues
there has already been said.

Jan
Oleksii Kurochko June 27, 2024, 12:01 p.m. UTC | #6
On Thu, 2024-06-27 at 12:10 +0200, Jan Beulich wrote:
> On 27.06.2024 11:58, oleksii.kurochko@gmail.com wrote:
> > On Thu, 2024-06-27 at 09:59 +0200, Jan Beulich wrote:
> > > On 26.06.2024 19:27, oleksii.kurochko@gmail.com wrote:
> > > > On Wed, 2024-06-26 at 10:31 +0200, Jan Beulich wrote:
> > > > > On 25.06.2024 15:51, Oleksii Kurochko wrote:
> > > > > > --- /dev/null
> > > > > > +++ b/xen/arch/riscv/include/asm/bitops.h
> > > > > > @@ -0,0 +1,137 @@
> > > > > > +/* SPDX-License-Identifier: GPL-2.0 */
> > > > > > +/* Copyright (C) 2012 Regents of the University of
> > > > > > California
> > > > > > */
> > > > > > +
> > > > > > +#ifndef _ASM_RISCV_BITOPS_H
> > > > > > +#define _ASM_RISCV_BITOPS_H
> > > > > > +
> > > > > > +#include <asm/system.h>
> > > > > > +
> > > > > > +#if BITOP_BITS_PER_WORD == 64
> > > > > > +#define __AMO(op)   "amo" #op ".d"
> > > > > > +#elif BITOP_BITS_PER_WORD == 32
> > > > > > +#define __AMO(op)   "amo" #op ".w"
> > > > > > +#else
> > > > > > +#error "Unexpected BITOP_BITS_PER_WORD"
> > > > > > +#endif
> > > > > > +
> > > > > > +/* Based on linux/arch/include/asm/bitops.h */
> > > > > > +
> > > > > > +/*
> > > > > > + * Non-atomic bit manipulation.
> > > > > > + *
> > > > > > + * Implemented using atomics to be interrupt safe. Could
> > > > > > alternatively
> > > > > > + * implement with local interrupt masking.
> > > > > > + */
> > > > > > +#define __set_bit(n, p)      set_bit(n, p)
> > > > > > +#define __clear_bit(n, p)    clear_bit(n, p)
> > > > > > +
> > > > > > +#define test_and_op_bit_ord(op, mod, nr, addr, ord)     \
> > > > > > +({                                                      \
> > > > > > +    bitop_uint_t res, mask;                             \
> > > > > > +    mask = BITOP_MASK(nr);                              \
> > > > > > +    asm volatile (                                      \
> > > > > > +        __AMO(op) #ord " %0, %2, %1"                    \
> > > > > > +        : "=r" (res), "+A" (addr[BITOP_WORD(nr)])       \
> > > > > > +        : "r" (mod(mask))                               \
> > > > > > +        : "memory");                                    \
> > > > > > +    ((res & mask) != 0);                                \
> > > > > > +})
> > > > > > +
> > > > > > +#define op_bit_ord(op, mod, nr, addr, ord)      \
> > > > > > +    asm volatile (                              \
> > > > > > +        __AMO(op) #ord " zero, %1, %0"          \
> > > > > > +        : "+A" (addr[BITOP_WORD(nr)])           \
> > > > > > +        : "r" (mod(BITOP_MASK(nr)))             \
> > > > > > +        : "memory");
> > > > > > +
> > > > > > +#define test_and_op_bit(op, mod, nr, addr)    \
> > > > > > +    test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
> > > > > > +#define op_bit(op, mod, nr, addr) \
> > > > > > +    op_bit_ord(op, mod, nr, addr, )
> > > > > > +
> > > > > > +/* Bitmask modifiers */
> > > > > > +#define NOP(x)    (x)
> > > > > > +#define NOT(x)    (~(x))
> > > > > 
> > > > > Since elsewhere you said we would use Zbb in bitops, I wanted
> > > > > to
> > > > > come
> > > > > back
> > > > > on that: Up to here all we use is AMO.
> > > > > 
> > > > > And further down there's no asm() anymore. What were you
> > > > > referring
> > > > > to?
> > > > RISC-V doesn't have a CLZ instruction in the base
> > > > ISA.  As a consequence, __builtin_ffs() emits a library call to
> > > > ffs()
> > > > on GCC,
> > > 
> > > Oh, so we'd need to implement that libgcc function, along the
> > > lines
> > > of
> > > Arm32 implementing quite a few of them to support shifts on 64-
> > > bit
> > > quantities as well as division and modulo.
> > Why we can't just live with Zbb extension? Zbb extension is
> > presented
> > on every platform I have in access with hypervisor extension
> > support.
> 
> I'd be fine that way, but then you don't need to break up ANDN into
> NOT
> and AND. It is my understanding that Andrew has concerns here, even
> if
> - iirc - it was him to originally suggest to build upon that
> extension
> being available. If these concerns are solely about being able to
> build
> with Zbb-unaware tool chains, then what to do about the build issues
> there has already been said.
Not much we can do except probably using .insn, as you suggested for
the "pause" instruction in cpu_relax(), for every instruction ( at the
moment it is only ANDB but who knows which instruction will be used in
the future ) from the Zbb extension.

But then we will need to do the same for each possible extension we are
going to use, as there is still a small chance that we might encounter
an extension-unaware toolchain.

I am a little bit confused about what we should do.

In my opinion, the best approach at the moment is to use .insn for the
ANDN and PAUSE instructions and add an explanation to
docs/misc/riscv/booting.txt or create a separate document where such
issues are documented (I am not sure that README is the correct place
for this).

I am also okay to go with ANDN break up int NOT and AND, but it is
needed to come up  with concept which instruction/extenstion should be
used and how consistently to deal with such situations.

Furthermore, I don't think these changes should block the merging (
doesn't matter in 4.19 or in 4.20 Xen release ) of PATCHes v13 01-07 of
this patch series.

~ Oleksii
Jan Beulich June 27, 2024, 2:56 p.m. UTC | #7
On 27.06.2024 14:01, oleksii.kurochko@gmail.com wrote:
> On Thu, 2024-06-27 at 12:10 +0200, Jan Beulich wrote:
>> On 27.06.2024 11:58, oleksii.kurochko@gmail.com wrote:
>>> On Thu, 2024-06-27 at 09:59 +0200, Jan Beulich wrote:
>>>> On 26.06.2024 19:27, oleksii.kurochko@gmail.com wrote:
>>>>> On Wed, 2024-06-26 at 10:31 +0200, Jan Beulich wrote:
>>>>>> On 25.06.2024 15:51, Oleksii Kurochko wrote:
>>>>>>> --- /dev/null
>>>>>>> +++ b/xen/arch/riscv/include/asm/bitops.h
>>>>>>> @@ -0,0 +1,137 @@
>>>>>>> +/* SPDX-License-Identifier: GPL-2.0 */
>>>>>>> +/* Copyright (C) 2012 Regents of the University of
>>>>>>> California
>>>>>>> */
>>>>>>> +
>>>>>>> +#ifndef _ASM_RISCV_BITOPS_H
>>>>>>> +#define _ASM_RISCV_BITOPS_H
>>>>>>> +
>>>>>>> +#include <asm/system.h>
>>>>>>> +
>>>>>>> +#if BITOP_BITS_PER_WORD == 64
>>>>>>> +#define __AMO(op)   "amo" #op ".d"
>>>>>>> +#elif BITOP_BITS_PER_WORD == 32
>>>>>>> +#define __AMO(op)   "amo" #op ".w"
>>>>>>> +#else
>>>>>>> +#error "Unexpected BITOP_BITS_PER_WORD"
>>>>>>> +#endif
>>>>>>> +
>>>>>>> +/* Based on linux/arch/include/asm/bitops.h */
>>>>>>> +
>>>>>>> +/*
>>>>>>> + * Non-atomic bit manipulation.
>>>>>>> + *
>>>>>>> + * Implemented using atomics to be interrupt safe. Could
>>>>>>> alternatively
>>>>>>> + * implement with local interrupt masking.
>>>>>>> + */
>>>>>>> +#define __set_bit(n, p)      set_bit(n, p)
>>>>>>> +#define __clear_bit(n, p)    clear_bit(n, p)
>>>>>>> +
>>>>>>> +#define test_and_op_bit_ord(op, mod, nr, addr, ord)     \
>>>>>>> +({                                                      \
>>>>>>> +    bitop_uint_t res, mask;                             \
>>>>>>> +    mask = BITOP_MASK(nr);                              \
>>>>>>> +    asm volatile (                                      \
>>>>>>> +        __AMO(op) #ord " %0, %2, %1"                    \
>>>>>>> +        : "=r" (res), "+A" (addr[BITOP_WORD(nr)])       \
>>>>>>> +        : "r" (mod(mask))                               \
>>>>>>> +        : "memory");                                    \
>>>>>>> +    ((res & mask) != 0);                                \
>>>>>>> +})
>>>>>>> +
>>>>>>> +#define op_bit_ord(op, mod, nr, addr, ord)      \
>>>>>>> +    asm volatile (                              \
>>>>>>> +        __AMO(op) #ord " zero, %1, %0"          \
>>>>>>> +        : "+A" (addr[BITOP_WORD(nr)])           \
>>>>>>> +        : "r" (mod(BITOP_MASK(nr)))             \
>>>>>>> +        : "memory");
>>>>>>> +
>>>>>>> +#define test_and_op_bit(op, mod, nr, addr)    \
>>>>>>> +    test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
>>>>>>> +#define op_bit(op, mod, nr, addr) \
>>>>>>> +    op_bit_ord(op, mod, nr, addr, )
>>>>>>> +
>>>>>>> +/* Bitmask modifiers */
>>>>>>> +#define NOP(x)    (x)
>>>>>>> +#define NOT(x)    (~(x))
>>>>>>
>>>>>> Since elsewhere you said we would use Zbb in bitops, I wanted
>>>>>> to
>>>>>> come
>>>>>> back
>>>>>> on that: Up to here all we use is AMO.
>>>>>>
>>>>>> And further down there's no asm() anymore. What were you
>>>>>> referring
>>>>>> to?
>>>>> RISC-V doesn't have a CLZ instruction in the base
>>>>> ISA.  As a consequence, __builtin_ffs() emits a library call to
>>>>> ffs()
>>>>> on GCC,
>>>>
>>>> Oh, so we'd need to implement that libgcc function, along the
>>>> lines
>>>> of
>>>> Arm32 implementing quite a few of them to support shifts on 64-
>>>> bit
>>>> quantities as well as division and modulo.
>>> Why we can't just live with Zbb extension? Zbb extension is
>>> presented
>>> on every platform I have in access with hypervisor extension
>>> support.
>>
>> I'd be fine that way, but then you don't need to break up ANDN into
>> NOT
>> and AND. It is my understanding that Andrew has concerns here, even
>> if
>> - iirc - it was him to originally suggest to build upon that
>> extension
>> being available. If these concerns are solely about being able to
>> build
>> with Zbb-unaware tool chains, then what to do about the build issues
>> there has already been said.
> Not much we can do except probably using .insn, as you suggested for
> the "pause" instruction in cpu_relax(), for every instruction ( at the
> moment it is only ANDB but who knows which instruction will be used in
> the future ) from the Zbb extension.
> 
> But then we will need to do the same for each possible extension we are
> going to use, as there is still a small chance that we might encounter
> an extension-unaware toolchain.
> 
> I am a little bit confused about what we should do.
> 
> In my opinion, the best approach at the moment is to use .insn for the
> ANDN and PAUSE instructions

This would be my preference, but please also consult with Andrew.

> and add an explanation to
> docs/misc/riscv/booting.txt or create a separate document where such
> issues are documented (I am not sure that README is the correct place
> for this).
> 
> I am also okay to go with ANDN break up int NOT and AND, but it is
> needed to come up  with concept which instruction/extenstion should be
> used and how consistently to deal with such situations.
> 
> Furthermore, I don't think these changes should block the merging (
> doesn't matter in 4.19 or in 4.20 Xen release ) of PATCHes v13 01-07 of
> this patch series.

This can be looked at different ways. The splitting into NOT+AND was a
rather late change.

Jan
diff mbox series

Patch

diff --git a/xen/arch/riscv/include/asm/bitops.h b/xen/arch/riscv/include/asm/bitops.h
new file mode 100644
index 0000000000..7f7af3fda1
--- /dev/null
+++ b/xen/arch/riscv/include/asm/bitops.h
@@ -0,0 +1,137 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012 Regents of the University of California */
+
+#ifndef _ASM_RISCV_BITOPS_H
+#define _ASM_RISCV_BITOPS_H
+
+#include <asm/system.h>
+
+#if BITOP_BITS_PER_WORD == 64
+#define __AMO(op)   "amo" #op ".d"
+#elif BITOP_BITS_PER_WORD == 32
+#define __AMO(op)   "amo" #op ".w"
+#else
+#error "Unexpected BITOP_BITS_PER_WORD"
+#endif
+
+/* Based on linux/arch/include/asm/bitops.h */
+
+/*
+ * Non-atomic bit manipulation.
+ *
+ * Implemented using atomics to be interrupt safe. Could alternatively
+ * implement with local interrupt masking.
+ */
+#define __set_bit(n, p)      set_bit(n, p)
+#define __clear_bit(n, p)    clear_bit(n, p)
+
+#define test_and_op_bit_ord(op, mod, nr, addr, ord)     \
+({                                                      \
+    bitop_uint_t res, mask;                             \
+    mask = BITOP_MASK(nr);                              \
+    asm volatile (                                      \
+        __AMO(op) #ord " %0, %2, %1"                    \
+        : "=r" (res), "+A" (addr[BITOP_WORD(nr)])       \
+        : "r" (mod(mask))                               \
+        : "memory");                                    \
+    ((res & mask) != 0);                                \
+})
+
+#define op_bit_ord(op, mod, nr, addr, ord)      \
+    asm volatile (                              \
+        __AMO(op) #ord " zero, %1, %0"          \
+        : "+A" (addr[BITOP_WORD(nr)])           \
+        : "r" (mod(BITOP_MASK(nr)))             \
+        : "memory");
+
+#define test_and_op_bit(op, mod, nr, addr)    \
+    test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
+#define op_bit(op, mod, nr, addr) \
+    op_bit_ord(op, mod, nr, addr, )
+
+/* Bitmask modifiers */
+#define NOP(x)    (x)
+#define NOT(x)    (~(x))
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ */
+static inline bool test_and_set_bit(int nr, volatile void *p)
+{
+    volatile bitop_uint_t *addr = p;
+
+    return test_and_op_bit(or, NOP, nr, addr);
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ */
+static inline bool test_and_clear_bit(int nr, volatile void *p)
+{
+    volatile bitop_uint_t *addr = p;
+
+    return test_and_op_bit(and, NOT, nr, addr);
+}
+
+/**
+ * test_and_change_bit - Toggle (change) a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline bool test_and_change_bit(int nr, volatile void *p)
+{
+    volatile bitop_uint_t *addr = p;
+
+    return test_and_op_bit(xor, NOP, nr, addr);
+}
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile void *p)
+{
+    volatile bitop_uint_t *addr = p;
+
+    op_bit(or, NOP, nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ */
+static inline void clear_bit(int nr, volatile void *p)
+{
+    volatile bitop_uint_t *addr = p;
+
+    op_bit(and, NOT, nr, addr);
+}
+
+#undef test_and_op_bit
+#undef op_bit
+#undef NOP
+#undef NOT
+#undef __AMO
+
+#endif /* _ASM_RISCV_BITOPS_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */