diff mbox series

[03/10] btrfs-progs: Replace homegrown bitops related functions with kernel counterparts

Message ID 1538405181-25231-4-git-send-email-nborisov@suse.com (mailing list archive)
State New, archived
Headers show
Series Freespace tree repair support v2 | expand

Commit Message

Nikolay Borisov Oct. 1, 2018, 2:46 p.m. UTC
Replace existing find_*_bit functions with kernel equivalent. This
reduces duplication, simplifies the code (we really have one worker
function _find_next_bit) and is quite likely faster. No functional
changes.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
---
 kernel-lib/bitops.h | 142 +++++++++++++++++-----------------------------------
 1 file changed, 46 insertions(+), 96 deletions(-)

Comments

Omar Sandoval Oct. 2, 2018, 11:32 p.m. UTC | #1
On Mon, Oct 01, 2018 at 05:46:14PM +0300, Nikolay Borisov wrote:
> Replace existing find_*_bit functions with kernel equivalent. This
> reduces duplication, simplifies the code (we really have one worker
> function _find_next_bit) and is quite likely faster. No functional
> changes.

Reviewed-by: Omar Sandoval <osandov@fb.com>

> Signed-off-by: Nikolay Borisov <nborisov@suse.com>
> ---
>  kernel-lib/bitops.h | 142 +++++++++++++++++-----------------------------------
>  1 file changed, 46 insertions(+), 96 deletions(-)
> 
> diff --git a/kernel-lib/bitops.h b/kernel-lib/bitops.h
> index 5b35f9fc5213..78256adf55be 100644
> --- a/kernel-lib/bitops.h
> +++ b/kernel-lib/bitops.h
> @@ -2,6 +2,7 @@
>  #define _PERF_LINUX_BITOPS_H_
>  
>  #include <linux/kernel.h>
> +#include "internal.h"
>  
>  #ifndef DIV_ROUND_UP
>  #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
> @@ -109,116 +110,65 @@ static __always_inline unsigned long __ffs(unsigned long word)
>  
>  #define ffz(x) __ffs(~(x))
>  
> +#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) 
> +#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
> +
>  /*
> - * Find the first set bit in a memory region.
> + * This is a common helper function for find_next_bit, find_next_zero_bit, and
> + * find_next_and_bit. The differences are:
> + *  - The "invert" argument, which is XORed with each fetched word before
> + *    searching it for one bits.
> + *  - The optional "addr2", which is anded with "addr1" if present.
>   */
> -static inline unsigned long
> -find_first_bit(const unsigned long *addr, unsigned long size)
> +static inline unsigned long _find_next_bit(const unsigned long *addr1,
> +		const unsigned long *addr2, unsigned long nbits,
> +		unsigned long start, unsigned long invert)
>  {
> -	const unsigned long *p = addr;
> -	unsigned long result = 0;
>  	unsigned long tmp;
>  
> -	while (size & ~(BITS_PER_LONG-1)) {
> -		if ((tmp = *(p++)))
> -			goto found;
> -		result += BITS_PER_LONG;
> -		size -= BITS_PER_LONG;
> +	if (start >= nbits)
> +		return nbits;
> +
> +	tmp = addr1[start / BITS_PER_LONG];
> +	if (addr2)
> +		tmp &= addr2[start / BITS_PER_LONG];
> +	tmp ^= invert;
> +
> +	/* Handle 1st word. */
> +	tmp &= BITMAP_FIRST_WORD_MASK(start);
> +	start = round_down(start, BITS_PER_LONG);
> +
> +	while (!tmp) {
> +		start += BITS_PER_LONG;
> +		if (start >= nbits)
> +			return nbits;
> +
> +		tmp = addr1[start / BITS_PER_LONG];
> +		if (addr2)
> +			tmp &= addr2[start / BITS_PER_LONG];
> +		tmp ^= invert;
>  	}
> -	if (!size)
> -		return result;
> -
> -	tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
> -	if (tmp == 0UL)		/* Are any bits set? */
> -		return result + size;	/* Nope. */
> -found:
> -	return result + __ffs(tmp);
> +
> +	return min(start + __ffs(tmp), nbits);
>  }
>  
>  /*
>   * Find the next set bit in a memory region.
>   */
> -static inline unsigned long
> -find_next_bit(const unsigned long *addr, unsigned long size,
> -	      unsigned long offset)
> +static inline unsigned long find_next_bit(const unsigned long *addr,
> +					  unsigned long size,
> +					  unsigned long offset)
>  {
> -	const unsigned long *p = addr + BITOP_WORD(offset);
> -	unsigned long result = offset & ~(BITS_PER_LONG-1);
> -	unsigned long tmp;
> -
> -	if (offset >= size)
> -		return size;
> -	size -= result;
> -	offset %= BITS_PER_LONG;
> -	if (offset) {
> -		tmp = *(p++);
> -		tmp &= (~0UL << offset);
> -		if (size < BITS_PER_LONG)
> -			goto found_first;
> -		if (tmp)
> -			goto found_middle;
> -		size -= BITS_PER_LONG;
> -		result += BITS_PER_LONG;
> -	}
> -	while (size & ~(BITS_PER_LONG-1)) {
> -		if ((tmp = *(p++)))
> -			goto found_middle;
> -		result += BITS_PER_LONG;
> -		size -= BITS_PER_LONG;
> -	}
> -	if (!size)
> -		return result;
> -	tmp = *p;
> -
> -found_first:
> -	tmp &= (~0UL >> (BITS_PER_LONG - size));
> -	if (tmp == 0UL)		/* Are any bits set? */
> -		return result + size;	/* Nope. */
> -found_middle:
> -	return result + __ffs(tmp);
> +	return _find_next_bit(addr, NULL, size, offset, 0UL);
>  }
>  
> -/*
> - * This implementation of find_{first,next}_zero_bit was stolen from
> - * Linus' asm-alpha/bitops.h.
> - */
> -static inline unsigned long
> -find_next_zero_bit(const unsigned long *addr, unsigned long size,
> -		   unsigned long offset)
> +static inline unsigned long find_next_zero_bit(const unsigned long *addr,
> +					       unsigned long size,
> +					       unsigned long offset)
>  {
> -	const unsigned long *p = addr + BITOP_WORD(offset);
> -	unsigned long result = offset & ~(BITS_PER_LONG-1);
> -	unsigned long tmp;
> -
> -	if (offset >= size)
> -		return size;
> -	size -= result;
> -	offset %= BITS_PER_LONG;
> -	if (offset) {
> -		tmp = *(p++);
> -		tmp |= ~0UL >> (BITS_PER_LONG - offset);
> -		if (size < BITS_PER_LONG)
> -			goto found_first;
> -		if (~tmp)
> -			goto found_middle;
> -		size -= BITS_PER_LONG;
> -		result += BITS_PER_LONG;
> -	}
> -	while (size & ~(BITS_PER_LONG-1)) {
> -		if (~(tmp = *(p++)))
> -			goto found_middle;
> -		result += BITS_PER_LONG;
> -		size -= BITS_PER_LONG;
> -	}
> -	if (!size)
> -		return result;
> -	tmp = *p;
> -
> -found_first:
> -	tmp |= ~0UL << size;
> -	if (tmp == ~0UL)	/* Are any bits zero? */
> -		return result + size;	/* Nope. */
> -found_middle:
> -	return result + ffz(tmp);
> +	return _find_next_bit(addr, NULL, size, offset, ~0UL);
>  }
> +
> +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
> +
>  #endif
> -- 
> 2.7.4
>
diff mbox series

Patch

diff --git a/kernel-lib/bitops.h b/kernel-lib/bitops.h
index 5b35f9fc5213..78256adf55be 100644
--- a/kernel-lib/bitops.h
+++ b/kernel-lib/bitops.h
@@ -2,6 +2,7 @@ 
 #define _PERF_LINUX_BITOPS_H_
 
 #include <linux/kernel.h>
+#include "internal.h"
 
 #ifndef DIV_ROUND_UP
 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
@@ -109,116 +110,65 @@  static __always_inline unsigned long __ffs(unsigned long word)
 
 #define ffz(x) __ffs(~(x))
 
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) 
+#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+
 /*
- * Find the first set bit in a memory region.
+ * This is a common helper function for find_next_bit, find_next_zero_bit, and
+ * find_next_and_bit. The differences are:
+ *  - The "invert" argument, which is XORed with each fetched word before
+ *    searching it for one bits.
+ *  - The optional "addr2", which is anded with "addr1" if present.
  */
-static inline unsigned long
-find_first_bit(const unsigned long *addr, unsigned long size)
+static inline unsigned long _find_next_bit(const unsigned long *addr1,
+		const unsigned long *addr2, unsigned long nbits,
+		unsigned long start, unsigned long invert)
 {
-	const unsigned long *p = addr;
-	unsigned long result = 0;
 	unsigned long tmp;
 
-	while (size & ~(BITS_PER_LONG-1)) {
-		if ((tmp = *(p++)))
-			goto found;
-		result += BITS_PER_LONG;
-		size -= BITS_PER_LONG;
+	if (start >= nbits)
+		return nbits;
+
+	tmp = addr1[start / BITS_PER_LONG];
+	if (addr2)
+		tmp &= addr2[start / BITS_PER_LONG];
+	tmp ^= invert;
+
+	/* Handle 1st word. */
+	tmp &= BITMAP_FIRST_WORD_MASK(start);
+	start = round_down(start, BITS_PER_LONG);
+
+	while (!tmp) {
+		start += BITS_PER_LONG;
+		if (start >= nbits)
+			return nbits;
+
+		tmp = addr1[start / BITS_PER_LONG];
+		if (addr2)
+			tmp &= addr2[start / BITS_PER_LONG];
+		tmp ^= invert;
 	}
-	if (!size)
-		return result;
-
-	tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
-	if (tmp == 0UL)		/* Are any bits set? */
-		return result + size;	/* Nope. */
-found:
-	return result + __ffs(tmp);
+
+	return min(start + __ffs(tmp), nbits);
 }
 
 /*
  * Find the next set bit in a memory region.
  */
-static inline unsigned long
-find_next_bit(const unsigned long *addr, unsigned long size,
-	      unsigned long offset)
+static inline unsigned long find_next_bit(const unsigned long *addr,
+					  unsigned long size,
+					  unsigned long offset)
 {
-	const unsigned long *p = addr + BITOP_WORD(offset);
-	unsigned long result = offset & ~(BITS_PER_LONG-1);
-	unsigned long tmp;
-
-	if (offset >= size)
-		return size;
-	size -= result;
-	offset %= BITS_PER_LONG;
-	if (offset) {
-		tmp = *(p++);
-		tmp &= (~0UL << offset);
-		if (size < BITS_PER_LONG)
-			goto found_first;
-		if (tmp)
-			goto found_middle;
-		size -= BITS_PER_LONG;
-		result += BITS_PER_LONG;
-	}
-	while (size & ~(BITS_PER_LONG-1)) {
-		if ((tmp = *(p++)))
-			goto found_middle;
-		result += BITS_PER_LONG;
-		size -= BITS_PER_LONG;
-	}
-	if (!size)
-		return result;
-	tmp = *p;
-
-found_first:
-	tmp &= (~0UL >> (BITS_PER_LONG - size));
-	if (tmp == 0UL)		/* Are any bits set? */
-		return result + size;	/* Nope. */
-found_middle:
-	return result + __ffs(tmp);
+	return _find_next_bit(addr, NULL, size, offset, 0UL);
 }
 
-/*
- * This implementation of find_{first,next}_zero_bit was stolen from
- * Linus' asm-alpha/bitops.h.
- */
-static inline unsigned long
-find_next_zero_bit(const unsigned long *addr, unsigned long size,
-		   unsigned long offset)
+static inline unsigned long find_next_zero_bit(const unsigned long *addr,
+					       unsigned long size,
+					       unsigned long offset)
 {
-	const unsigned long *p = addr + BITOP_WORD(offset);
-	unsigned long result = offset & ~(BITS_PER_LONG-1);
-	unsigned long tmp;
-
-	if (offset >= size)
-		return size;
-	size -= result;
-	offset %= BITS_PER_LONG;
-	if (offset) {
-		tmp = *(p++);
-		tmp |= ~0UL >> (BITS_PER_LONG - offset);
-		if (size < BITS_PER_LONG)
-			goto found_first;
-		if (~tmp)
-			goto found_middle;
-		size -= BITS_PER_LONG;
-		result += BITS_PER_LONG;
-	}
-	while (size & ~(BITS_PER_LONG-1)) {
-		if (~(tmp = *(p++)))
-			goto found_middle;
-		result += BITS_PER_LONG;
-		size -= BITS_PER_LONG;
-	}
-	if (!size)
-		return result;
-	tmp = *p;
-
-found_first:
-	tmp |= ~0UL << size;
-	if (tmp == ~0UL)	/* Are any bits zero? */
-		return result + size;	/* Nope. */
-found_middle:
-	return result + ffz(tmp);
+	return _find_next_bit(addr, NULL, size, offset, ~0UL);
 }
+
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
 #endif