diff mbox series

[6/6] block: convert struct blk_plug callback list to hlists

Message ID 20240123173310.1966157-7-axboe@kernel.dk (mailing list archive)
State New, archived
Headers show
Series Cache issue side time querying | expand

Commit Message

Jens Axboe Jan. 23, 2024, 5:30 p.m. UTC
We currently use a doubly linked list, which means the head takes up
16 bytes. As any iteration goes over the full list by first splicing it
to an on-stack copy, we never need to remove members from the middle of
the list.

Convert it to an hlist instead, saving 8 bytes in the blk_plug structure.
This also helps save 40 bytes of text in the core block code, tested on
arm64.

This does mean that flush callbacks will be run in reverse. While this
should not pose a problem, we can always change the list splicing to
just iteration-and-add instead, preservering ordering. These lists are
generally just a single entry (or a few entries), either way this should
be fine.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 block/blk-core.c       | 26 ++++++++++++++------------
 drivers/md/raid1-10.c  |  2 +-
 include/linux/blkdev.h |  4 ++--
 3 files changed, 17 insertions(+), 15 deletions(-)

Comments

Christoph Hellwig Jan. 24, 2024, 9:30 a.m. UTC | #1
On Tue, Jan 23, 2024 at 10:30:38AM -0700, Jens Axboe wrote:
> We currently use a doubly linked list, which means the head takes up
> 16 bytes. As any iteration goes over the full list by first splicing it
> to an on-stack copy, we never need to remove members from the middle of
> the list.
> 
> Convert it to an hlist instead, saving 8 bytes in the blk_plug structure.
> This also helps save 40 bytes of text in the core block code, tested on
> arm64.
> 
> This does mean that flush callbacks will be run in reverse. While this
> should not pose a problem, we can always change the list splicing to
> just iteration-and-add instead, preservering ordering. These lists are
> generally just a single entry (or a few entries), either way this should
> be fine.

That would complete mess up I/O order for anyone using flush callbacks,
I don't think that's great.
diff mbox series

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index dd593008511c..f28859b4a3ef 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1080,7 +1080,7 @@  void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned char nr_ios)
 	plug->rq_count = 0;
 	plug->multiple_queues = false;
 	plug->has_elevator = false;
-	INIT_LIST_HEAD(&plug->cb_list);
+	INIT_HLIST_HEAD(&plug->cb_list);
 
 	/*
 	 * Store ordering should not be needed here, since a potential
@@ -1120,16 +1120,18 @@  EXPORT_SYMBOL(blk_start_plug);
 
 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
 {
-	LIST_HEAD(callbacks);
+	HLIST_HEAD(callbacks);
 
-	while (!list_empty(&plug->cb_list)) {
-		list_splice_init(&plug->cb_list, &callbacks);
+	while (!hlist_empty(&plug->cb_list)) {
+		struct hlist_node *entry, *tmp;
 
-		while (!list_empty(&callbacks)) {
-			struct blk_plug_cb *cb = list_first_entry(&callbacks,
-							  struct blk_plug_cb,
-							  list);
-			list_del(&cb->list);
+		hlist_move_list(&plug->cb_list, &callbacks);
+
+		hlist_for_each_safe(entry, tmp, &callbacks) {
+			struct blk_plug_cb *cb;
+
+			cb = hlist_entry(entry, struct blk_plug_cb, list);
+			hlist_del(&cb->list);
 			cb->callback(cb, from_schedule);
 		}
 	}
@@ -1144,7 +1146,7 @@  struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
 	if (!plug)
 		return NULL;
 
-	list_for_each_entry(cb, &plug->cb_list, list)
+	hlist_for_each_entry(cb, &plug->cb_list, list)
 		if (cb->callback == unplug && cb->data == data)
 			return cb;
 
@@ -1154,7 +1156,7 @@  struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
 	if (cb) {
 		cb->data = data;
 		cb->callback = unplug;
-		list_add(&cb->list, &plug->cb_list);
+		hlist_add_head(&cb->list, &plug->cb_list);
 	}
 	return cb;
 }
@@ -1162,7 +1164,7 @@  EXPORT_SYMBOL(blk_check_plugged);
 
 void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
 {
-	if (!list_empty(&plug->cb_list))
+	if (!hlist_empty(&plug->cb_list))
 		flush_plug_callbacks(plug, from_schedule);
 	blk_mq_flush_plug_list(plug, from_schedule);
 	/*
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 512746551f36..4a1b6f17067f 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -152,7 +152,7 @@  static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
 	plug = container_of(cb, struct raid1_plug_cb, cb);
 	bio_list_add(&plug->pending, bio);
 	if (++plug->count / MAX_PLUG_BIO >= copies) {
-		list_del(&cb->list);
+		hlist_del(&cb->list);
 		cb->callback(cb, false);
 	}
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ce6d057de2f0..f3105a519ef4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -951,13 +951,13 @@  struct blk_plug {
 	bool multiple_queues;
 	bool has_elevator;
 
-	struct list_head cb_list; /* md requires an unplug callback */
+	struct hlist_head cb_list; /* md requires an unplug callback */
 };
 
 struct blk_plug_cb;
 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
 struct blk_plug_cb {
-	struct list_head list;
+	struct hlist_node list;
 	blk_plug_cb_fn callback;
 	void *data;
 };