diff mbox series

[for-next,02/12] io_uring: io-wq helper to iterate all work

Message ID 20221031134126.82928-3-dylany@meta.com (mailing list archive)
State New
Headers show
Series io_uring: retarget rsrc nodes periodically | expand

Commit Message

Dylan Yudaken Oct. 31, 2022, 1:41 p.m. UTC
Add a helper to iterate all work currently queued on an io-wq.

Signed-off-by: Dylan Yudaken <dylany@meta.com>
---
 io_uring/io-wq.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++
 io_uring/io-wq.h |  3 +++
 2 files changed, 52 insertions(+)
diff mbox series

Patch

diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 6f1d0e5df23a..47cbe2df05c4 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -38,6 +38,11 @@  enum {
 	IO_ACCT_STALLED_BIT	= 0,	/* stalled on hash */
 };
 
+struct io_for_each_work_data {
+	work_for_each_fn	*cb;
+	void			*data;
+};
+
 /*
  * One for each thread in a wqe pool
  */
@@ -856,6 +861,19 @@  static bool io_wq_for_each_worker(struct io_wqe *wqe,
 	return ret;
 }
 
+static bool io_wq_for_each_work_cb(struct io_worker *w, void *data)
+{
+	struct io_for_each_work_data *f = data;
+
+	raw_spin_lock(&w->lock);
+	if (w->cur_work)
+		f->cb(w->cur_work, f->data);
+	if (w->next_work)
+		f->cb(w->next_work, f->data);
+	raw_spin_unlock(&w->lock);
+	return false;
+}
+
 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
 {
 	__set_notify_signal(worker->task);
@@ -1113,6 +1131,37 @@  enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
 	return IO_WQ_CANCEL_NOTFOUND;
 }
 
+void io_wq_for_each(struct io_wq *wq, work_for_each_fn *cb, void *data)
+{
+	int node, i;
+	struct io_for_each_work_data wq_data = {
+		.cb = cb,
+		.data = data
+	};
+
+	for_each_node(node) {
+		struct io_wqe *wqe = wq->wqes[node];
+
+		for (i = 0; i < IO_WQ_ACCT_NR; i++) {
+			struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
+			struct io_wq_work_node *node, *prev;
+			struct io_wq_work *work;
+
+			raw_spin_lock(&acct->lock);
+			wq_list_for_each(node, prev, &acct->work_list) {
+				work = container_of(node, struct io_wq_work, list);
+				cb(work, data);
+			}
+			raw_spin_unlock(&acct->lock);
+		}
+
+
+		raw_spin_lock(&wqe->lock);
+		io_wq_for_each_worker(wqe, io_wq_for_each_work_cb, &wq_data);
+		raw_spin_unlock(&wqe->lock);
+	}
+}
+
 static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
 			    int sync, void *key)
 {
diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h
index 31228426d192..163cb12259b0 100644
--- a/io_uring/io-wq.h
+++ b/io_uring/io-wq.h
@@ -63,6 +63,9 @@  typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
 					void *data, bool cancel_all);
 
+typedef void (work_for_each_fn)(struct io_wq_work *, void *);
+void io_wq_for_each(struct io_wq *wq, work_for_each_fn *cb, void *data);
+
 #if defined(CONFIG_IO_WQ)
 extern void io_wq_worker_sleeping(struct task_struct *);
 extern void io_wq_worker_running(struct task_struct *);