diff mbox series

[3/4] sbitmap: add batch tag retrieval

Message ID 20191230181442.4460-4-axboe@kernel.dk (mailing list archive)
State New, archived
Headers show
Series blk-mq: per-ctx tag caching | expand

Commit Message

Jens Axboe Dec. 30, 2019, 6:14 p.m. UTC
This allows retrieving a batch of tags by the caller, instead of getting
them one at the time.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 include/linux/sbitmap.h | 23 ++++++++++
 lib/sbitmap.c           | 97 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 120 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 7cdd82e0e0dd..d7560f57cd6f 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -366,6 +366,29 @@  static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
  */
 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
 
+/**
+ * __sbitmap_queue_get_batch() - Try to allocate a batch of free tags from a
+ * &struct sbitmap_queue with preemption already disabled.
+ * @sbq: Bitmap queue to allocate from.
+ * @offset: tag offset
+ * @mask: mask of free tags
+ *
+ * Return: Zero if successful, -1 otherwise.
+ */
+int __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, unsigned int *offset,
+			      unsigned long *mask);
+
+/**
+ * __sbitmap_queue_clear_batch() - Free a batch a tags
+ * @sbq: Bitmap queue to allocate from.
+ * @offset: tag offset
+ * @mask: mask of free tags
+ *
+ * Return: Zero if successful, -1 otherwise.
+ */
+void __sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, unsigned int offset,
+				 unsigned long mask);
+
 /**
  * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
  * sbitmap_queue with preemption already disabled.
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index a6c6c104b063..62cfc7761e4b 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -143,6 +143,42 @@  int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
 }
 EXPORT_SYMBOL_GPL(sbitmap_get);
 
+static int __sbitmap_get_batch(struct sbitmap *sb, unsigned int index,
+			       unsigned long *ret)
+{
+	do {
+		unsigned long val = sb->map[index].word;
+		unsigned long new_val;
+
+		*ret = ~val;
+		if (!(*ret))
+			return -1;
+
+		new_val = val | *ret;
+		if (cmpxchg(&sb->map[index].word, val, new_val) == val)
+			break;
+	} while (1);
+
+	return 0;
+}
+
+int sbitmap_get_batch(struct sbitmap *sb, unsigned int *index,
+		      unsigned long *ret)
+{
+	int i;
+
+	for (i = 0; i < sb->map_nr; i++) {
+		if (!__sbitmap_get_batch(sb, *index, ret))
+			return 0;
+
+		/* Jump to next index. */
+		if (++(*index) >= sb->map_nr)
+			*index = 0;
+	}
+
+	return -1;
+}
+
 int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
 			unsigned long shallow_depth)
 {
@@ -354,6 +390,67 @@  void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
 }
 EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
 
+void __sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, unsigned int index,
+				 unsigned long mask)
+{
+	index >>= sbq->sb.shift;
+	do {
+		unsigned long val = sbq->sb.map[index].word;
+		unsigned long new_val = val & ~mask;
+
+		if (cmpxchg(&sbq->sb.map[index].word, val, new_val) == val)
+			break;
+	} while (1);
+
+	/*
+	 * Pairs with the memory barrier in set_current_state() to ensure the
+	 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
+	 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
+	 * waiter. See the comment on waitqueue_active().
+	 */
+	smp_mb__after_atomic();
+	sbitmap_queue_wake_up(sbq);
+}
+
+int __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, unsigned int *offset,
+			      unsigned long *mask)
+{
+	unsigned int hint, depth;
+	int nr;
+
+	/* don't do batches for round-robin or very sparse maps */
+	if (sbq->round_robin || sbq->sb.shift < 5)
+		return -1;
+
+	hint = this_cpu_read(*sbq->alloc_hint);
+	depth = READ_ONCE(sbq->sb.depth);
+	if (unlikely(hint >= depth))
+		hint = depth ? prandom_u32() % depth : 0;
+
+	*offset = SB_NR_TO_INDEX(&sbq->sb, hint);
+
+	nr = sbitmap_get_batch(&sbq->sb, offset, mask);
+
+	if (nr == -1) {
+		/* If the map is full, a hint won't do us much good. */
+		this_cpu_write(*sbq->alloc_hint, 0);
+		return -1;
+	}
+
+	/*
+	 * Only update the hint if we used it. We might not have gotten a
+	 * full 'count' worth of bits, but pretend we did. Even if we didn't,
+	 * we want to advance to the next index since we failed to get a full
+	 * batch in this one.
+	 */
+	hint = ((*offset) + 1) << sbq->sb.shift;
+	if (hint >= depth - 1)
+		hint = 0;
+	this_cpu_write(*sbq->alloc_hint, hint);
+	*offset <<= sbq->sb.shift;
+	return 0;
+}
+
 int __sbitmap_queue_get(struct sbitmap_queue *sbq)
 {
 	unsigned int hint, depth;