diff mbox series

[for-next,03/12] io_uring: support retargeting rsrc on requests in the io-wq

Message ID 20221031134126.82928-4-dylany@meta.com (mailing list archive)
State New
Headers show
Series io_uring: retarget rsrc nodes periodically | expand

Commit Message

Dylan Yudaken Oct. 31, 2022, 1:41 p.m. UTC
Requests can be in flight on the io-wq, and can be long lived (for example
a double read will get onto the io-wq). So make sure to retarget the rsrc
nodes on those requests.

Signed-off-by: Dylan Yudaken <dylany@meta.com>
---
 io_uring/rsrc.c | 46 ++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)

Comments

Jens Axboe Oct. 31, 2022, 6:19 p.m. UTC | #1
On 10/31/22 7:41 AM, Dylan Yudaken wrote:
> Requests can be in flight on the io-wq, and can be long lived (for example
> a double read will get onto the io-wq). So make sure to retarget the rsrc
> nodes on those requests.
> 
> Signed-off-by: Dylan Yudaken <dylany@meta.com>
> ---
>  io_uring/rsrc.c | 46 ++++++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 42 insertions(+), 4 deletions(-)
> 
> diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
> index 106210e0d5d5..8d0d40713a63 100644
> --- a/io_uring/rsrc.c
> +++ b/io_uring/rsrc.c
> @@ -16,6 +16,7 @@
>  #include "openclose.h"
>  #include "rsrc.h"
>  #include "opdef.h"
> +#include "tctx.h"
>  
>  struct io_rsrc_update {
>  	struct file			*file;
> @@ -24,6 +25,11 @@ struct io_rsrc_update {
>  	u32				offset;
>  };
>  
> +struct io_retarget_data {
> +	struct io_ring_ctx		*ctx;
> +	unsigned int			refs;
> +};

Do we really need this struct? As far as I can tell, you pass in ctx
only and back refs. It's passed in the callbacks, but they only care
about ctx. If io_rsrc_retarget_wq() returned back the refs rather than
use data->refs, then we could just pass in the ctx?

Or you could at least keep it local to io_rsrc_retarget_wq() and
io_retarget_rsrc_wq_cb().

Not a big deal, just always nice to keep the scope of struct as small as
can be (or get rid of them).
diff mbox series

Patch

diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 106210e0d5d5..8d0d40713a63 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -16,6 +16,7 @@ 
 #include "openclose.h"
 #include "rsrc.h"
 #include "opdef.h"
+#include "tctx.h"
 
 struct io_rsrc_update {
 	struct file			*file;
@@ -24,6 +25,11 @@  struct io_rsrc_update {
 	u32				offset;
 };
 
+struct io_retarget_data {
+	struct io_ring_ctx		*ctx;
+	unsigned int			refs;
+};
+
 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
 				  struct io_mapped_ubuf **pimu,
 				  struct page **last_hpage);
@@ -250,11 +256,42 @@  static void io_rsrc_retarget_schedule(struct io_ring_ctx *ctx)
 	ctx->rsrc_retarget_scheduled = true;
 }
 
+static void io_retarget_rsrc_wq_cb(struct io_wq_work *work, void *data)
+{
+	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+	struct io_retarget_data *rd = data;
+
+	if (req->ctx != rd->ctx)
+		return;
+
+	rd->refs += io_rsrc_retarget_req(rd->ctx, req);
+}
+
+static void io_rsrc_retarget_wq(struct io_retarget_data *data)
+	__must_hold(&data->ctx->uring_lock)
+{
+	struct io_ring_ctx *ctx = data->ctx;
+	struct io_tctx_node *node;
+
+	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+		struct io_uring_task *tctx = node->task->io_uring;
+
+		if (!tctx->io_wq)
+			continue;
+
+		io_wq_for_each(tctx->io_wq, io_retarget_rsrc_wq_cb, data);
+	}
+}
+
 static void __io_rsrc_retarget_work(struct io_ring_ctx *ctx)
 	__must_hold(&ctx->uring_lock)
 {
 	struct io_rsrc_node *node;
-	unsigned int refs;
+	struct io_retarget_data data = {
+		.ctx = ctx,
+		.refs = 0
+	};
+	unsigned int poll_refs;
 	bool any_waiting;
 
 	if (!ctx->rsrc_node)
@@ -273,10 +310,11 @@  static void __io_rsrc_retarget_work(struct io_ring_ctx *ctx)
 	if (!any_waiting)
 		return;
 
-	refs = io_rsrc_retarget_table(ctx, &ctx->cancel_table);
-	refs += io_rsrc_retarget_table(ctx, &ctx->cancel_table_locked);
+	poll_refs = io_rsrc_retarget_table(ctx, &ctx->cancel_table);
+	poll_refs += io_rsrc_retarget_table(ctx, &ctx->cancel_table_locked);
+	io_rsrc_retarget_wq(&data);
 
-	ctx->rsrc_cached_refs -= refs;
+	ctx->rsrc_cached_refs -= (poll_refs + data.refs);
 	while (unlikely(ctx->rsrc_cached_refs < 0))
 		io_rsrc_refs_refill(ctx);
 }