diff mbox series

[04/10] btrfs-progs: Implement find_*_bit_le operations

Message ID 1538405181-25231-5-git-send-email-nborisov@suse.com (mailing list archive)
State New, archived
Headers show
Series Freespace tree repair support v2 | expand

Commit Message

Nikolay Borisov Oct. 1, 2018, 2:46 p.m. UTC
This commit introduces explicit little endian bit operations. The only
difference with the existing bitops implementation is that bswap(32|64)
is called when the _le versions are invoked on a big-endian machine.
This is in preparation for adding free space tree conversion support.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
---
 kernel-lib/bitops.h | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 82 insertions(+)

Comments

Omar Sandoval Oct. 4, 2018, 6:08 p.m. UTC | #1
On Mon, Oct 01, 2018 at 05:46:15PM +0300, Nikolay Borisov wrote:
> This commit introduces explicit little endian bit operations. The only
> difference with the existing bitops implementation is that bswap(32|64)
> is called when the _le versions are invoked on a big-endian machine.
> This is in preparation for adding free space tree conversion support.

I had to check, but it looks like these are also pulled from the kernel
source, so

Reviewed-by: Omar Sandoval <osandov@fb.com>

> Signed-off-by: Nikolay Borisov <nborisov@suse.com>
> ---
>  kernel-lib/bitops.h | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 82 insertions(+)
Nikolay Borisov Oct. 4, 2018, 6:09 p.m. UTC | #2
On  4.10.2018 21:08, Omar Sandoval wrote:
> On Mon, Oct 01, 2018 at 05:46:15PM +0300, Nikolay Borisov wrote:
>> This commit introduces explicit little endian bit operations. The only
>> difference with the existing bitops implementation is that bswap(32|64)
>> is called when the _le versions are invoked on a big-endian machine.
>> This is in preparation for adding free space tree conversion support.
> 
> I had to check, but it looks like these are also pulled from the kernel
> source, so


Indeed and I did spend time to convince myself how the code works and
that it works :)

> 
> Reviewed-by: Omar Sandoval <osandov@fb.com>
> 
>> Signed-off-by: Nikolay Borisov <nborisov@suse.com>
>> ---
>>  kernel-lib/bitops.h | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 82 insertions(+)
>
diff mbox series

Patch

diff --git a/kernel-lib/bitops.h b/kernel-lib/bitops.h
index 78256adf55be..5030bfa2815e 100644
--- a/kernel-lib/bitops.h
+++ b/kernel-lib/bitops.h
@@ -2,6 +2,7 @@ 
 #define _PERF_LINUX_BITOPS_H_
 
 #include <linux/kernel.h>
+#include <endian.h>
 #include "internal.h"
 
 #ifndef DIV_ROUND_UP
@@ -170,5 +171,86 @@  static inline unsigned long find_next_zero_bit(const unsigned long *addr,
 }
 
 #define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+
+static inline unsigned long ext2_swab(const unsigned long y)
+{
+#if BITS_PER_LONG == 64
+	return (unsigned long) bswap64((u64) y);
+#elif BITS_PER_LONG == 32
+	return (unsigned long) bswap32((u32) y);
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
+		const unsigned long *addr2, unsigned long nbits,
+		unsigned long start, unsigned long invert)
+{
+	unsigned long tmp;
+
+	if (start >= nbits)
+		return nbits;
+
+	tmp = addr1[start / BITS_PER_LONG];
+	if (addr2)
+		tmp &= addr2[start / BITS_PER_LONG];
+	tmp ^= invert;
+
+	/* Handle 1st word. */
+	tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
+	start = round_down(start, BITS_PER_LONG);
+
+	while (!tmp) {
+		start += BITS_PER_LONG;
+		if (start >= nbits)
+			return nbits;
+
+		tmp = addr1[start / BITS_PER_LONG];
+		if (addr2)
+			tmp &= addr2[start / BITS_PER_LONG];
+		tmp ^= invert;
+	}
+
+	return min(start + __ffs(ext2_swab(tmp)), nbits);
+}
+
+unsigned long find_next_zero_bit_le(const void *addr, unsigned
+		long size, unsigned long offset)
+{
+	return _find_next_bit_le(addr, NULL, size, offset, ~0UL);
+}
+
+
+unsigned long find_next_bit_le(const void *addr, unsigned
+		long size, unsigned long offset)
+{
+	return _find_next_bit_le(addr, NULL, size, offset, 0UL);
+}
+
+#else
+
+static inline unsigned long find_next_zero_bit_le(const void *addr,
+                unsigned long size, unsigned long offset)
+{
+        return find_next_zero_bit(addr, size, offset);
+}
+
+static inline unsigned long find_next_bit_le(const void *addr,
+                unsigned long size, unsigned long offset)
+{
+        return find_next_bit(addr, size, offset);
+}
+
+static inline unsigned long find_first_zero_bit_le(const void *addr,
+                unsigned long size)
+{
+        return find_first_zero_bit(addr, size);
+}
+
+#endif
 
 #endif