diff mbox series

[RFC,for-next,v2,2/4] bio: split pcpu cache part of bio_put into a helper

Message ID cd6df8c5289a2df20c338d0842172950b0dedef2.1666122465.git.asml.silence@gmail.com (mailing list archive)
State New, archived
Headers show
Series enable pcpu bio caching for IRQ I/O | expand

Commit Message

Pavel Begunkov Oct. 18, 2022, 7:50 p.m. UTC
Extract a helper out of bio_put for recycling into percpu caches.
It's a preparation patch without functional changes.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 block/bio.c | 38 +++++++++++++++++++++++++-------------
 1 file changed, 25 insertions(+), 13 deletions(-)

Comments

Christoph Hellwig Oct. 20, 2022, 8:30 a.m. UTC | #1
> +	if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) {
> +		bio->bi_next = cache->free_list;
> +		cache->free_list = bio;
> +		cache->nr++;
> +	} else {
> +		put_cpu();
> +		bio_free(bio);
> +		return;
> +	}

This reads a little strange with the return in an else.  Why not:

	if (!(bio->bi_opf & REQ_POLLED) || WARN_ON_ONCE(in_interrupt())) {
		put_cpu();
		bio_free(bio);
		return;
	}

	bio->bi_next = cache->free_list;
	cache->free_list = bio;
	cache->nr++;

but given that the simple free case doesn't care about what CPU we're
on or the per-cpu cache pointer, I think we can simply move the

	cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());

after the above return as well.
diff mbox series

Patch

diff --git a/block/bio.c b/block/bio.c
index 5b4594daa259..ac16cc154476 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -725,6 +725,28 @@  static void bio_alloc_cache_destroy(struct bio_set *bs)
 	bs->cache = NULL;
 }
 
+static inline void bio_put_percpu_cache(struct bio *bio)
+{
+	struct bio_alloc_cache *cache;
+
+	cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
+	bio_uninit(bio);
+
+	if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) {
+		bio->bi_next = cache->free_list;
+		cache->free_list = bio;
+		cache->nr++;
+	} else {
+		put_cpu();
+		bio_free(bio);
+		return;
+	}
+
+	if (cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
+		bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
+	put_cpu();
+}
+
 /**
  * bio_put - release a reference to a bio
  * @bio:   bio to release reference to
@@ -740,20 +762,10 @@  void bio_put(struct bio *bio)
 		if (!atomic_dec_and_test(&bio->__bi_cnt))
 			return;
 	}
-
-	if ((bio->bi_opf & REQ_ALLOC_CACHE) && !WARN_ON_ONCE(in_interrupt())) {
-		struct bio_alloc_cache *cache;
-
-		bio_uninit(bio);
-		cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
-		bio->bi_next = cache->free_list;
-		cache->free_list = bio;
-		if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
-			bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
-		put_cpu();
-	} else {
+	if (bio->bi_opf & REQ_ALLOC_CACHE)
+		bio_put_percpu_cache(bio);
+	else
 		bio_free(bio);
-	}
 }
 EXPORT_SYMBOL(bio_put);