diff mbox series

[RFC] io-wq: cut busy list off io_wqe

Message ID 20191222144654.5060-1-hdanton@sina.com (mailing list archive)
State New, archived
Headers show
Series [RFC] io-wq: cut busy list off io_wqe | expand

Commit Message

Hillf Danton Dec. 22, 2019, 2:46 p.m. UTC
Commit e61df66c69b1 ("io-wq: ensure free/busy list browsing see all
items") added a list for io workers in addition to the free and busy
lists, not only making worker walk cleaner but leaving the busy list
to be at most a nice vase. Time to remove it now.

Signed-off-by: Hillf Danton <hdanton@sina.com>
---
diff mbox series

Patch

--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -92,7 +92,6 @@  struct io_wqe {
 	struct io_wqe_acct acct[2];
 
 	struct hlist_nulls_head free_list;
-	struct hlist_nulls_head busy_list;
 	struct list_head all_list;
 
 	struct io_wq *wq;
@@ -327,7 +326,6 @@  static void __io_worker_busy(struct io_w
 	if (worker->flags & IO_WORKER_F_FREE) {
 		worker->flags &= ~IO_WORKER_F_FREE;
 		hlist_nulls_del_init_rcu(&worker->nulls_node);
-		hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
 	}
 
 	/*
@@ -365,7 +363,6 @@  static bool __io_worker_idle(struct io_w
 {
 	if (!(worker->flags & IO_WORKER_F_FREE)) {
 		worker->flags |= IO_WORKER_F_FREE;
-		hlist_nulls_del_init_rcu(&worker->nulls_node);
 		hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
 	}
 
@@ -805,10 +802,6 @@  void io_wq_cancel_all(struct io_wq *wq)
 
 	set_bit(IO_WQ_BIT_CANCEL, &wq->state);
 
-	/*
-	 * Browse both lists, as there's a gap between handing work off
-	 * to a worker and the worker putting itself on the busy_list
-	 */
 	rcu_read_lock();
 	for_each_node(node) {
 		struct io_wqe *wqe = wq->wqes[node];
@@ -1058,7 +1051,6 @@  struct io_wq *io_wq_create(unsigned boun
 		spin_lock_init(&wqe->lock);
 		INIT_WQ_LIST(&wqe->work_list);
 		INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
-		INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
 		INIT_LIST_HEAD(&wqe->all_list);
 	}