diff mbox series

[for-next,v10,7/7] nvme: wire up fixed buffer support for nvme passthrough

Message ID 20220927173610.7794-8-joshi.k@samsung.com (mailing list archive)
State New, archived
Headers show
Series [for-next,v10,1/7] io_uring: add io_uring_cmd_import_fixed | expand

Commit Message

Kanchan Joshi Sept. 27, 2022, 5:36 p.m. UTC
if io_uring sends passthrough command with IORING_URING_CMD_FIXED flag,
use the pre-registered buffer for IO (non-vectored variant). Pass the
buffer/length to io_uring and get the bvec iterator for the range. Next,
pass this bvec to block-layer and obtain a bio/request for subsequent
processing.
While at it, modify nvme_submit_user_cmd to take ubuffer as plain integer
argument, and do away with nvme_to_user_ptr conversion in callers.

Signed-off-by: Kanchan Joshi <joshi.k@samsung.com>
---
 drivers/nvme/host/ioctl.c | 44 +++++++++++++++++++++++++--------------
 1 file changed, 28 insertions(+), 16 deletions(-)

Comments

Christoph Hellwig Sept. 28, 2022, 5:59 p.m. UTC | #1
> -static int nvme_map_user_request(struct request *req, void __user *ubuffer,
> +static int nvme_map_user_request(struct request *req, u64 ubuffer,

The changes to pass ubuffer as an integer trip me up every time.
They are obviously fine as we do the pointer conversion less often,
but maybe they'd be easier to follow if split into a prep patch.

> +	bool fixedbufs = ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED);
>  
> -	if (!vec)
> -		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
> -			GFP_KERNEL);
> -	else {
> +	if (vec) {

If we check IORING_URING_CMD_FIXED first this becomes a bit simpler,
and also works better with the block helper suggested earlier:

diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 1a45246f0d7a8..f46142dcb8f1e 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -94,34 +94,33 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
 	struct bio *bio = NULL;
 	void *meta = NULL;
 	int ret;
-	bool fixedbufs = ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED);
 
-	if (vec) {
-		struct iovec fast_iov[UIO_FASTIOV];
-		struct iovec *iov = fast_iov;
+	if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
 		struct iov_iter iter;
 
 		/* fixedbufs is only for non-vectored io */
-		WARN_ON_ONCE(fixedbufs);
-		ret = import_iovec(rq_data_dir(req), nvme_to_user_ptr(ubuffer),
-				bufflen, UIO_FASTIOV, &iov, &iter);
+		if (WARN_ON_ONCE(vec))
+			return -EINVAL;
+		ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+				rq_data_dir(req), &iter, ioucmd);
 		if (ret < 0)
 			goto out;
-
 		ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
-		kfree(iov);
-	} else if (fixedbufs) {
+	} else if (vec) {
+		struct iovec fast_iov[UIO_FASTIOV];
+		struct iovec *iov = fast_iov;
 		struct iov_iter iter;
 
-		ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
-				rq_data_dir(req), &iter, ioucmd);
+		ret = import_iovec(rq_data_dir(req), nvme_to_user_ptr(ubuffer),
+				bufflen, UIO_FASTIOV, &iov, &iter);
 		if (ret < 0)
 			goto out;
 		ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
-	} else
+		kfree(iov);
+	} else {
 		ret = blk_rq_map_user(q, req, NULL,
-					nvme_to_user_ptr(ubuffer), bufflen,
-					GFP_KERNEL);
+				nvme_to_user_ptr(ubuffer), bufflen, GFP_KERNEL);
+	}
 	if (ret)
 		goto out;
 	bio = req->bio;
Anuj Gupta Sept. 29, 2022, 11:36 a.m. UTC | #2
On Wed, Sep 28, 2022 at 07:59:04PM +0200, Christoph Hellwig wrote:
> > -static int nvme_map_user_request(struct request *req, void __user *ubuffer,
> > +static int nvme_map_user_request(struct request *req, u64 ubuffer,
> 
> The changes to pass ubuffer as an integer trip me up every time.
> They are obviously fine as we do the pointer conversion less often,
> but maybe they'd be easier to follow if split into a prep patch.

ok, will separate these changes in a separate prep patch
> 
> > +	bool fixedbufs = ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED);
> >  
> > -	if (!vec)
> > -		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
> > -			GFP_KERNEL);
> > -	else {
> > +	if (vec) {
> 
> If we check IORING_URING_CMD_FIXED first this becomes a bit simpler,
> and also works better with the block helper suggested earlier:

will create a block helper for this and the scsi counterparts in the next iteration
> 
> diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
> index 1a45246f0d7a8..f46142dcb8f1e 100644
> --- a/drivers/nvme/host/ioctl.c
> +++ b/drivers/nvme/host/ioctl.c
> @@ -94,34 +94,33 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
>  	struct bio *bio = NULL;
>  	void *meta = NULL;
>  	int ret;
> -	bool fixedbufs = ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED);
>  
> -	if (vec) {
> -		struct iovec fast_iov[UIO_FASTIOV];
> -		struct iovec *iov = fast_iov;
> +	if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
>  		struct iov_iter iter;
>  
>  		/* fixedbufs is only for non-vectored io */
> -		WARN_ON_ONCE(fixedbufs);
> -		ret = import_iovec(rq_data_dir(req), nvme_to_user_ptr(ubuffer),
> -				bufflen, UIO_FASTIOV, &iov, &iter);
> +		if (WARN_ON_ONCE(vec))
> +			return -EINVAL;
> +		ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
> +				rq_data_dir(req), &iter, ioucmd);
>  		if (ret < 0)
>  			goto out;
> -
>  		ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
> -		kfree(iov);
> -	} else if (fixedbufs) {
> +	} else if (vec) {
> +		struct iovec fast_iov[UIO_FASTIOV];
> +		struct iovec *iov = fast_iov;
>  		struct iov_iter iter;
>  
> -		ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
> -				rq_data_dir(req), &iter, ioucmd);
> +		ret = import_iovec(rq_data_dir(req), nvme_to_user_ptr(ubuffer),
> +				bufflen, UIO_FASTIOV, &iov, &iter);
>  		if (ret < 0)
>  			goto out;
>  		ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
> -	} else
> +		kfree(iov);
> +	} else {
>  		ret = blk_rq_map_user(q, req, NULL,
> -					nvme_to_user_ptr(ubuffer), bufflen,
> -					GFP_KERNEL);
> +				nvme_to_user_ptr(ubuffer), bufflen, GFP_KERNEL);
> +	}
>  	if (ret)
>  		goto out;
>  	bio = req->bio;
> 

--
Anuj Gupta
diff mbox series

Patch

diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index b9f17dc87de9..1a45246f0d7a 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -83,9 +83,10 @@  static struct request *nvme_alloc_user_request(struct request_queue *q,
 	return req;
 }
 
-static int nvme_map_user_request(struct request *req, void __user *ubuffer,
+static int nvme_map_user_request(struct request *req, u64 ubuffer,
 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
-		u32 meta_seed, void **metap, bool vec)
+		u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
+		bool vec)
 {
 	struct request_queue *q = req->q;
 	struct nvme_ns *ns = q->queuedata;
@@ -93,23 +94,34 @@  static int nvme_map_user_request(struct request *req, void __user *ubuffer,
 	struct bio *bio = NULL;
 	void *meta = NULL;
 	int ret;
+	bool fixedbufs = ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED);
 
-	if (!vec)
-		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
-			GFP_KERNEL);
-	else {
+	if (vec) {
 		struct iovec fast_iov[UIO_FASTIOV];
 		struct iovec *iov = fast_iov;
 		struct iov_iter iter;
 
-		ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
-				UIO_FASTIOV, &iov, &iter);
+		/* fixedbufs is only for non-vectored io */
+		WARN_ON_ONCE(fixedbufs);
+		ret = import_iovec(rq_data_dir(req), nvme_to_user_ptr(ubuffer),
+				bufflen, UIO_FASTIOV, &iov, &iter);
 		if (ret < 0)
 			goto out;
 
 		ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
 		kfree(iov);
-	}
+	} else if (fixedbufs) {
+		struct iov_iter iter;
+
+		ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+				rq_data_dir(req), &iter, ioucmd);
+		if (ret < 0)
+			goto out;
+		ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
+	} else
+		ret = blk_rq_map_user(q, req, NULL,
+					nvme_to_user_ptr(ubuffer), bufflen,
+					GFP_KERNEL);
 	if (ret)
 		goto out;
 	bio = req->bio;
@@ -136,7 +148,7 @@  static int nvme_map_user_request(struct request *req, void __user *ubuffer,
 }
 
 static int nvme_submit_user_cmd(struct request_queue *q,
-		struct nvme_command *cmd, void __user *ubuffer,
+		struct nvme_command *cmd, u64 ubuffer,
 		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
 		u32 meta_seed, u64 *result, unsigned timeout, bool vec)
 {
@@ -152,7 +164,7 @@  static int nvme_submit_user_cmd(struct request_queue *q,
 	req->timeout = timeout;
 	if (ubuffer && bufflen) {
 		ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
-				meta_len, meta_seed, &meta, vec);
+				meta_len, meta_seed, &meta, NULL, vec);
 		if (ret)
 			goto out;
 	}
@@ -231,7 +243,7 @@  static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
 	c.rw.appmask = cpu_to_le16(io.appmask);
 
 	return nvme_submit_user_cmd(ns->queue, &c,
-			nvme_to_user_ptr(io.addr), length,
+			io.addr, length,
 			metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
 			false);
 }
@@ -285,7 +297,7 @@  static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 		timeout = msecs_to_jiffies(cmd.timeout_ms);
 
 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
-			nvme_to_user_ptr(cmd.addr), cmd.data_len,
+			cmd.addr, cmd.data_len,
 			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
 			0, &result, timeout, false);
 
@@ -331,7 +343,7 @@  static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 		timeout = msecs_to_jiffies(cmd.timeout_ms);
 
 	status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
-			nvme_to_user_ptr(cmd.addr), cmd.data_len,
+			cmd.addr, cmd.data_len,
 			nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
 			0, &cmd.result, timeout, vec);
 
@@ -475,9 +487,9 @@  static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
 
 	if (d.addr && d.data_len) {
-		ret = nvme_map_user_request(req, nvme_to_user_ptr(d.addr),
+		ret = nvme_map_user_request(req, d.addr,
 			d.data_len, nvme_to_user_ptr(d.metadata),
-			d.metadata_len, 0, &meta, vec);
+			d.metadata_len, 0, &meta, ioucmd, vec);
 		if (ret)
 			goto out_err;
 	}