diff mbox series

[1/4] io_uring/uring_cmd: get rid of using req->imu

Message ID 20241022020426.819298-2-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series Get rid of io_kiocb->imu | expand

Commit Message

Jens Axboe Oct. 22, 2024, 2:03 a.m. UTC
It's pretty pointless to use io_kiocb as intermediate storage for this,
so split the validity check and the actual usage. The resource node is
assigned upfront at prep time, to prevent it from going away. The actual
import is never called with the ctx->uring_lock held, so grab it for
the import.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 io_uring/uring_cmd.c | 22 +++++++++++++++++-----
 1 file changed, 17 insertions(+), 5 deletions(-)

Comments

Ming Lei Oct. 22, 2024, 2:43 a.m. UTC | #1
On Mon, Oct 21, 2024 at 08:03:20PM -0600, Jens Axboe wrote:
> It's pretty pointless to use io_kiocb as intermediate storage for this,
> so split the validity check and the actual usage. The resource node is
> assigned upfront at prep time, to prevent it from going away. The actual
> import is never called with the ctx->uring_lock held, so grab it for
> the import.
> 
> Signed-off-by: Jens Axboe <axboe@kernel.dk>
> ---
>  io_uring/uring_cmd.c | 22 +++++++++++++++++-----
>  1 file changed, 17 insertions(+), 5 deletions(-)
> 
> diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
> index 39c3c816ec78..313e2a389174 100644
> --- a/io_uring/uring_cmd.c
> +++ b/io_uring/uring_cmd.c
> @@ -211,11 +211,15 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
>  		struct io_ring_ctx *ctx = req->ctx;
>  		u16 index;
>  
> -		req->buf_index = READ_ONCE(sqe->buf_index);
> -		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
> +		index = READ_ONCE(sqe->buf_index);
> +		if (unlikely(index >= ctx->nr_user_bufs))
>  			return -EFAULT;
> -		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
> -		req->imu = ctx->user_bufs[index];
> +		req->buf_index = array_index_nospec(index, ctx->nr_user_bufs);
> +		/*
> +		 * Pi node upfront, prior to io_uring_cmd_import_fixed()
> +		 * being called. This prevents destruction of the mapped buffer
> +		 * we'll need at actual import time.
> +		 */
>  		io_req_set_rsrc_node(req, ctx, 0);
>  	}
>  	ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
> @@ -272,8 +276,16 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
>  			      struct iov_iter *iter, void *ioucmd)
>  {
>  	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
> +	struct io_ring_ctx *ctx = req->ctx;
> +	struct io_mapped_ubuf *imu;
> +	int ret;
>  
> -	return io_import_fixed(rw, iter, req->imu, ubuf, len);
> +	mutex_lock(&ctx->uring_lock);
> +	imu = ctx->user_bufs[req->buf_index];
> +	ret = io_import_fixed(rw, iter, imu, ubuf, len);
> +	mutex_unlock(&ctx->uring_lock);

io_uring_cmd_import_fixed is called in nvme ->issue(), and ->uring_lock
may be held already.



thanks,
Ming
Jens Axboe Oct. 22, 2024, 2:59 a.m. UTC | #2
On 10/21/24 8:43 PM, Ming Lei wrote:
> On Mon, Oct 21, 2024 at 08:03:20PM -0600, Jens Axboe wrote:
>> It's pretty pointless to use io_kiocb as intermediate storage for this,
>> so split the validity check and the actual usage. The resource node is
>> assigned upfront at prep time, to prevent it from going away. The actual
>> import is never called with the ctx->uring_lock held, so grab it for
>> the import.
>>
>> Signed-off-by: Jens Axboe <axboe@kernel.dk>
>> ---
>>  io_uring/uring_cmd.c | 22 +++++++++++++++++-----
>>  1 file changed, 17 insertions(+), 5 deletions(-)
>>
>> diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
>> index 39c3c816ec78..313e2a389174 100644
>> --- a/io_uring/uring_cmd.c
>> +++ b/io_uring/uring_cmd.c
>> @@ -211,11 +211,15 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
>>  		struct io_ring_ctx *ctx = req->ctx;
>>  		u16 index;
>>  
>> -		req->buf_index = READ_ONCE(sqe->buf_index);
>> -		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
>> +		index = READ_ONCE(sqe->buf_index);
>> +		if (unlikely(index >= ctx->nr_user_bufs))
>>  			return -EFAULT;
>> -		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
>> -		req->imu = ctx->user_bufs[index];
>> +		req->buf_index = array_index_nospec(index, ctx->nr_user_bufs);
>> +		/*
>> +		 * Pi node upfront, prior to io_uring_cmd_import_fixed()
>> +		 * being called. This prevents destruction of the mapped buffer
>> +		 * we'll need at actual import time.
>> +		 */
>>  		io_req_set_rsrc_node(req, ctx, 0);
>>  	}
>>  	ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
>> @@ -272,8 +276,16 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
>>  			      struct iov_iter *iter, void *ioucmd)
>>  {
>>  	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
>> +	struct io_ring_ctx *ctx = req->ctx;
>> +	struct io_mapped_ubuf *imu;
>> +	int ret;
>>  
>> -	return io_import_fixed(rw, iter, req->imu, ubuf, len);
>> +	mutex_lock(&ctx->uring_lock);
>> +	imu = ctx->user_bufs[req->buf_index];
>> +	ret = io_import_fixed(rw, iter, imu, ubuf, len);
>> +	mutex_unlock(&ctx->uring_lock);
> 
> io_uring_cmd_import_fixed is called in nvme ->issue(), and ->uring_lock
> may be held already.

Gah indeed, in fact it always should be, unless it's forcefully punted
to io-wq. I'll sort that out, thanks. And looks like we have zero tests
for uring_cmd + fixed buffers :-(
Anuj Gupta Oct. 22, 2024, 8:34 a.m. UTC | #3
>
>Gah indeed, in fact it always should be, unless it's forcefully punted
>to io-wq. I'll sort that out, thanks. And looks like we have zero tests
>for uring_cmd + fixed buffers :-(
>

We do have tests for uring_cmd + fixed-buffers in liburing [*].

[*] https://github.com/axboe/liburing/blob/master/test/io_uring_passthrough.c

>-- 
>Jens Axboe
>
Jens Axboe Oct. 22, 2024, 1:18 p.m. UTC | #4
On 10/22/24 2:34 AM, Anuj Gupta wrote:
>>
>> Gah indeed, in fact it always should be, unless it's forcefully punted
>> to io-wq. I'll sort that out, thanks. And looks like we have zero tests
>> for uring_cmd + fixed buffers :-(
>>
> 
> We do have tests for uring_cmd + fixed-buffers in liburing [*].
> 
> [*] https://github.com/axboe/liburing/blob/master/test/io_uring_passthrough.c

We seem to yes, but its not actually importing a fixed buffer...
Jens Axboe Oct. 22, 2024, 1:24 p.m. UTC | #5
On 10/22/24 7:18 AM, Jens Axboe wrote:
> On 10/22/24 2:34 AM, Anuj Gupta wrote:
>>>
>>> Gah indeed, in fact it always should be, unless it's forcefully punted
>>> to io-wq. I'll sort that out, thanks. And looks like we have zero tests
>>> for uring_cmd + fixed buffers :-(
>>>
>>
>> We do have tests for uring_cmd + fixed-buffers in liburing [*].
>>
>> [*] https://github.com/axboe/liburing/blob/master/test/io_uring_passthrough.c
> 
> We seem to yes, but its not actually importing a fixed buffer...

The test is buggy, this should fix it:

diff --git a/test/io_uring_passthrough.c b/test/io_uring_passthrough.c
index 345c65b1faa4..f18a1862c524 100644
--- a/test/io_uring_passthrough.c
+++ b/test/io_uring_passthrough.c
@@ -167,6 +167,8 @@ static int __test_io(const char *file, struct io_uring *ring, int tc, int read,
 			}
 		}
 		sqe->opcode = IORING_OP_URING_CMD;
+		if (do_fixed)
+			sqe->uring_cmd_flags |= IORING_URING_CMD_FIXED;
 		sqe->user_data = ((uint64_t)offset << 32) | i;
 		if (sqthread)
 			sqe->flags |= IOSQE_FIXED_FILE;
diff mbox series

Patch

diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 39c3c816ec78..313e2a389174 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -211,11 +211,15 @@  int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 		struct io_ring_ctx *ctx = req->ctx;
 		u16 index;
 
-		req->buf_index = READ_ONCE(sqe->buf_index);
-		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+		index = READ_ONCE(sqe->buf_index);
+		if (unlikely(index >= ctx->nr_user_bufs))
 			return -EFAULT;
-		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
-		req->imu = ctx->user_bufs[index];
+		req->buf_index = array_index_nospec(index, ctx->nr_user_bufs);
+		/*
+		 * Pi node upfront, prior to io_uring_cmd_import_fixed()
+		 * being called. This prevents destruction of the mapped buffer
+		 * we'll need at actual import time.
+		 */
 		io_req_set_rsrc_node(req, ctx, 0);
 	}
 	ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
@@ -272,8 +276,16 @@  int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
 			      struct iov_iter *iter, void *ioucmd)
 {
 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+	struct io_ring_ctx *ctx = req->ctx;
+	struct io_mapped_ubuf *imu;
+	int ret;
 
-	return io_import_fixed(rw, iter, req->imu, ubuf, len);
+	mutex_lock(&ctx->uring_lock);
+	imu = ctx->user_bufs[req->buf_index];
+	ret = io_import_fixed(rw, iter, imu, ubuf, len);
+	mutex_unlock(&ctx->uring_lock);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);