diff mbox series

[v2,14/39] xen/riscv: introduce bitops.h

Message ID a9c69e17fe073551e7007242d00b74b6333ce98d.1700761381.git.oleksii.kurochko@gmail.com (mailing list archive)
State Superseded
Headers show
Series Enable build of full Xen for RISC-V | expand

Commit Message

Oleksii K. Nov. 24, 2023, 10:30 a.m. UTC
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
---
Changes in V2:
 - Nothing changed. Only rebase.
---
 xen/arch/riscv/include/asm/bitops.h | 288 ++++++++++++++++++++++++++++
 1 file changed, 288 insertions(+)
 create mode 100644 xen/arch/riscv/include/asm/bitops.h

Comments

Jan Beulich Dec. 7, 2023, 3:37 p.m. UTC | #1
On 24.11.2023 11:30, Oleksii Kurochko wrote:
> Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>

So this looks to have been taken from Linux, which could do with saying
(including which version or most recent commit). It may e.g. justify you
using tab indentation here, albeit ...

> --- /dev/null
> +++ b/xen/arch/riscv/include/asm/bitops.h
> @@ -0,0 +1,288 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/* Copyright (C) 2012 Regents of the University of California */
> +
> +#ifndef _ASM_RISCV_BITOPS_H
> +#define _ASM_RISCV_BITOPS_H
> +
> +#include <asm/system.h>
> +
> +#define BITOP_BITS_PER_WORD     32
> +#define BITOP_MASK(nr)	        (1UL << ((nr) % BITOP_BITS_PER_WORD))
> +#define BITOP_WORD(nr)	        ((nr) / BITOP_BITS_PER_WORD)
> +#define BITS_PER_BYTE	        8
> +
> +#define __set_bit(n,p)          set_bit(n,p)
> +#define __clear_bit(n,p)        clear_bit(n,p)

... then please consistently. Other style related remarks made on the
system.h patch apply here as well (unless again there's a goal of
keeping the diff to the Linux original small; yet then I guess the
delta to the Linux file is already pretty large).

> +/* Based on linux/include/asm-generic/bitops/find.h */
> +
> +#ifndef find_next_bit
> +/**
> + * find_next_bit - find the next set bit in a memory region
> + * @addr: The address to base the search on
> + * @offset: The bitnumber to start searching at
> + * @size: The bitmap size in bits
> + */
> +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
> +		size, unsigned long offset);
> +#endif
> +
> +#ifndef find_next_zero_bit
> +/**
> + * find_next_zero_bit - find the next cleared bit in a memory region
> + * @addr: The address to base the search on
> + * @offset: The bitnumber to start searching at
> + * @size: The bitmap size in bits
> + */
> +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
> +		long size, unsigned long offset);
> +#endif
> +
> +/**
> + * find_first_bit - find the first set bit in a memory region
> + * @addr: The address to start the search at
> + * @size: The maximum size to search
> + *
> + * Returns the bit number of the first set bit.
> + */
> +extern unsigned long find_first_bit(const unsigned long *addr,
> +				    unsigned long size);
> +
> +/**
> + * find_first_zero_bit - find the first cleared bit in a memory region
> + * @addr: The address to start the search at
> + * @size: The maximum size to search
> + *
> + * Returns the bit number of the first cleared bit.
> + */
> +extern unsigned long find_first_zero_bit(const unsigned long *addr,
> +					 unsigned long size);

Looking over the titles of the rest of the series, I can't spot where
these are going to be implemented. The again maybe you indeed can get
away without those, at least initially.

> +#define ffs(x) ({ unsigned int __t = (x); fls(__t & -__t); })

This wants to use ISOLATE_LSB() now.

Jan
Oleksii K. Dec. 8, 2023, 9:50 a.m. UTC | #2
On Thu, 2023-12-07 at 16:37 +0100, Jan Beulich wrote:
> On 24.11.2023 11:30, Oleksii Kurochko wrote:
> > Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
> 
> So this looks to have been taken from Linux, which could do with
> saying
> (including which version or most recent commit). It may e.g. justify
> you
> using tab indentation here, albeit ...
Thanks. I'll update the commit message.

> 
> > --- /dev/null
> > +++ b/xen/arch/riscv/include/asm/bitops.h
> > @@ -0,0 +1,288 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +/* Copyright (C) 2012 Regents of the University of California */
> > +
> > +#ifndef _ASM_RISCV_BITOPS_H
> > +#define _ASM_RISCV_BITOPS_H
> > +
> > +#include <asm/system.h>
> > +
> > +#define BITOP_BITS_PER_WORD     32
> > +#define BITOP_MASK(nr)	        (1UL << ((nr) %
> > BITOP_BITS_PER_WORD))
> > +#define BITOP_WORD(nr)	        ((nr) / BITOP_BITS_PER_WORD)
> > +#define BITS_PER_BYTE	        8
> > +
> > +#define __set_bit(n,p)          set_bit(n,p)
> > +#define __clear_bit(n,p)        clear_bit(n,p)
> 
> ... then please consistently. Other style related remarks made on the
> system.h patch apply here as well (unless again there's a goal of
> keeping the diff to the Linux original small; yet then I guess the
> delta to the Linux file is already pretty large).
> 
> > +/* Based on linux/include/asm-generic/bitops/find.h */
> > +
> > +#ifndef find_next_bit
> > +/**
> > + * find_next_bit - find the next set bit in a memory region
> > + * @addr: The address to base the search on
> > + * @offset: The bitnumber to start searching at
> > + * @size: The bitmap size in bits
> > + */
> > +extern unsigned long find_next_bit(const unsigned long *addr,
> > unsigned long
> > +		size, unsigned long offset);
> > +#endif
> > +
> > +#ifndef find_next_zero_bit
> > +/**
> > + * find_next_zero_bit - find the next cleared bit in a memory
> > region
> > + * @addr: The address to base the search on
> > + * @offset: The bitnumber to start searching at
> > + * @size: The bitmap size in bits
> > + */
> > +extern unsigned long find_next_zero_bit(const unsigned long *addr,
> > unsigned
> > +		long size, unsigned long offset);
> > +#endif
> > +
> > +/**
> > + * find_first_bit - find the first set bit in a memory region
> > + * @addr: The address to start the search at
> > + * @size: The maximum size to search
> > + *
> > + * Returns the bit number of the first set bit.
> > + */
> > +extern unsigned long find_first_bit(const unsigned long *addr,
> > +				    unsigned long size);
> > +
> > +/**
> > + * find_first_zero_bit - find the first cleared bit in a memory
> > region
> > + * @addr: The address to start the search at
> > + * @size: The maximum size to search
> > + *
> > + * Returns the bit number of the first cleared bit.
> > + */
> > +extern unsigned long find_first_zero_bit(const unsigned long
> > *addr,
> > +					 unsigned long size);
> 
> Looking over the titles of the rest of the series, I can't spot where
> these are going to be implemented. The again maybe you indeed can get
> away without those, at least initially.
It's introduced in:
	[PATCH v2 21/39] xen/riscv: introduce bit operations
I think we have to merge this patch with patch 21.
> 
> > +#define ffs(x) ({ unsigned int __t = (x); fls(__t & -__t); })
> 
> This wants to use ISOLATE_LSB() now.
> 
~ Oleksii
diff mbox series

Patch

diff --git a/xen/arch/riscv/include/asm/bitops.h b/xen/arch/riscv/include/asm/bitops.h
new file mode 100644
index 0000000000..24a49c499b
--- /dev/null
+++ b/xen/arch/riscv/include/asm/bitops.h
@@ -0,0 +1,288 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012 Regents of the University of California */
+
+#ifndef _ASM_RISCV_BITOPS_H
+#define _ASM_RISCV_BITOPS_H
+
+#include <asm/system.h>
+
+#define BITOP_BITS_PER_WORD     32
+#define BITOP_MASK(nr)	        (1UL << ((nr) % BITOP_BITS_PER_WORD))
+#define BITOP_WORD(nr)	        ((nr) / BITOP_BITS_PER_WORD)
+#define BITS_PER_BYTE	        8
+
+#define __set_bit(n,p)          set_bit(n,p)
+#define __clear_bit(n,p)        clear_bit(n,p)
+
+#define __AMO(op)	"amo" #op ".w"
+
+#define __test_and_op_bit_ord(op, mod, nr, addr, ord)		\
+({								\
+	unsigned long __res, __mask;				\
+	__mask = BITOP_MASK(nr);					\
+	__asm__ __volatile__ (					\
+		__AMO(op) #ord " %0, %2, %1"			\
+		: "=r" (__res), "+A" (addr[BITOP_WORD(nr)])	\
+		: "r" (mod(__mask))				\
+		: "memory");					\
+	((__res & __mask) != 0);				\
+})
+
+#define __op_bit_ord(op, mod, nr, addr, ord)			\
+	__asm__ __volatile__ (					\
+		__AMO(op) #ord " zero, %1, %0"			\
+		: "+A" (addr[BITOP_WORD(nr)])			\
+		: "r" (mod(BITOP_MASK(nr)))			\
+		: "memory");
+
+#define __test_and_op_bit(op, mod, nr, addr) 			\
+	__test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
+
+#define __op_bit(op, mod, nr, addr)				\
+	__op_bit_ord(op, mod, nr, addr, )
+
+/* Bitmask modifiers */
+#define __NOP(x)	(x)
+#define __NOT(x)	(~(x))
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation may be reordered on other architectures than x86.
+ */
+static inline int __test_and_set_bit(int nr, volatile void *p)
+{
+	volatile uint32_t *addr = p;
+
+	return __test_and_op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation can be reordered on other architectures other than x86.
+ */
+static inline int __test_and_clear_bit(int nr, volatile void *p)
+{
+	volatile uint32_t *addr = p;
+
+	return __test_and_op_bit(and, __NOT, nr, addr);
+}
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile void *p)
+{
+	volatile uint32_t *addr = p;
+
+	__op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ */
+static inline void clear_bit(int nr, volatile void *p)
+{
+	volatile uint32_t *addr = p;
+
+	__op_bit(and, __NOT, nr, addr);
+}
+
+static inline int test_bit(int nr, const volatile void *p)
+{
+	const volatile uint32_t *addr = (const volatile uint32_t *)p;
+
+	return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITOP_BITS_PER_WORD-1)));
+}
+
+#undef __test_and_op_bit
+#undef __op_bit
+#undef __NOP
+#undef __NOT
+#undef __AMO
+
+static inline int fls(unsigned int x)
+{
+    return generic_fls(x);
+}
+
+static inline int flsl(unsigned long x)
+{
+    return generic_flsl(x);
+}
+
+#define test_and_set_bit   __test_and_set_bit
+#define test_and_clear_bit __test_and_clear_bit
+
+/* Based on linux/include/asm-generic/bitops/find.h */
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
+		size, unsigned long offset);
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
+		long size, unsigned long offset);
+#endif
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first set bit.
+ */
+extern unsigned long find_first_bit(const unsigned long *addr,
+				    unsigned long size);
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first cleared bit.
+ */
+extern unsigned long find_first_zero_bit(const unsigned long *addr,
+					 unsigned long size);
+
+#define ffs(x) ({ unsigned int __t = (x); fls(__t & -__t); })
+
+/**
+ * ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Returns 0 if no bit exists, otherwise returns 1-indexed bit location.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+	int num = 0;
+
+#if BITS_PER_LONG == 64
+	if ((word & 0xffffffff) == 0) {
+		num += 32;
+		word >>= 32;
+	}
+#endif
+	if ((word & 0xffff) == 0) {
+		num += 16;
+		word >>= 16;
+	}
+	if ((word & 0xff) == 0) {
+		num += 8;
+		word >>= 8;
+	}
+	if ((word & 0xf) == 0) {
+		num += 4;
+		word >>= 4;
+	}
+	if ((word & 0x3) == 0) {
+		num += 2;
+		word >>= 2;
+	}
+	if ((word & 0x1) == 0)
+		num += 1;
+	return num;
+}
+
+/**
+ * ffsl - find first bit in long.
+ * @word: The word to search
+ *
+ * Returns 0 if no bit exists, otherwise returns 1-indexed bit location.
+ */
+static inline unsigned int ffsl(unsigned long word)
+{
+    int num = 1;
+
+    if (!word)
+        return 0;
+
+#if BITS_PER_LONG == 64
+    if ((word & 0xffffffff) == 0) {
+        num += 32;
+        word >>= 32;
+    }
+#endif
+    if ((word & 0xffff) == 0) {
+        num += 16;
+        word >>= 16;
+    }
+    if ((word & 0xff) == 0) {
+        num += 8;
+        word >>= 8;
+    }
+    if ((word & 0xf) == 0) {
+        num += 4;
+        word >>= 4;
+    }
+    if ((word & 0x3) == 0) {
+        num += 2;
+        word >>= 2;
+    }
+    if ((word & 0x1) == 0)
+        num += 1;
+    return num;
+}
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x)  __ffs(~(x))
+
+/**
+ * find_first_set_bit - find the first set bit in @word
+ * @word: the word to search
+ *
+ * Returns the bit-number of the first set bit (first bit being 0).
+ * The input must *not* be zero.
+ */
+static inline unsigned int find_first_set_bit(unsigned long word)
+{
+        return ffsl(word) - 1;
+}
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+#define hweight64(x) generic_hweight64(x)
+
+#endif /* _ASM_RISCV_BITOPS_H */