diff mbox series

[PATCHv2,3/5] nvme: unify nvme request end_io

Message ID 20230407191636.2631046-4-kbusch@meta.com (mailing list archive)
State New, archived
Headers show
Series nvme io_uring_cmd polling enhancements | expand

Commit Message

Keith Busch April 7, 2023, 7:16 p.m. UTC
From: Keith Busch <kbusch@kernel.org>

We can finish the metadata copy inline with the completion. After that,
there's really nothing else different between the meta and non-meta data
end_io callbacks, so unify them.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 drivers/nvme/host/ioctl.c | 57 +++++++--------------------------------
 1 file changed, 9 insertions(+), 48 deletions(-)

Comments

Kanchan Joshi April 10, 2023, 11:34 a.m. UTC | #1
On Fri, Apr 07, 2023 at 12:16:34PM -0700, Keith Busch wrote:
>From: Keith Busch <kbusch@kernel.org>
>
>We can finish the metadata copy inline with the completion. After that,
>there's really nothing else different between the meta and non-meta data
>end_io callbacks, so unify them.
>
>Signed-off-by: Keith Busch <kbusch@kernel.org>
>---
> drivers/nvme/host/ioctl.c | 57 +++++++--------------------------------
> 1 file changed, 9 insertions(+), 48 deletions(-)
>
>diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
>index 278c57ee0db91..a1e0a14dadedc 100644
>--- a/drivers/nvme/host/ioctl.c
>+++ b/drivers/nvme/host/ioctl.c
>@@ -465,29 +465,6 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
> 	return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
> }
>
>-static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
>-				    unsigned issue_flags)
>-{
>-	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
>-	struct request *req = pdu->req;
>-	int status;
>-	u64 result;
>-
>-	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
>-		status = -EINTR;
>-	else
>-		status = nvme_req(req)->status;
>-
>-	result = le64_to_cpu(nvme_req(req)->result.u64);
>-
>-	if (pdu->meta_len)
>-		status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
>-					pdu->u.meta, pdu->meta_len, status);
>-	blk_mq_free_request(req);
>-
>-	io_uring_cmd_done(ioucmd, status, result, issue_flags);
>-}
>-
> static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
> 			       unsigned issue_flags)
> {
>@@ -502,11 +479,16 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
> 	struct io_uring_cmd *ioucmd = req->end_io_data;
> 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
> 	void *cookie = READ_ONCE(ioucmd->cookie);
>+	int status = nvme_req(req)->status;
>
> 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
>-		pdu->nvme_status = -EINTR;
>-	else
>-		pdu->nvme_status = nvme_req(req)->status;
>+		status = -EINTR;
>+
>+	if (pdu->meta_len)
>+		status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
>+					pdu->u.meta, pdu->meta_len, status);

nvme_finish_user_metadata does copy_to_user.
Here also the attempt was not to touch that memory in interrupt context.
diff mbox series

Patch

diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 278c57ee0db91..a1e0a14dadedc 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -465,29 +465,6 @@  static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
 	return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
 }
 
-static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
-				    unsigned issue_flags)
-{
-	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
-	struct request *req = pdu->req;
-	int status;
-	u64 result;
-
-	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
-		status = -EINTR;
-	else
-		status = nvme_req(req)->status;
-
-	result = le64_to_cpu(nvme_req(req)->result.u64);
-
-	if (pdu->meta_len)
-		status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
-					pdu->u.meta, pdu->meta_len, status);
-	blk_mq_free_request(req);
-
-	io_uring_cmd_done(ioucmd, status, result, issue_flags);
-}
-
 static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
 			       unsigned issue_flags)
 {
@@ -502,11 +479,16 @@  static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
 	struct io_uring_cmd *ioucmd = req->end_io_data;
 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
 	void *cookie = READ_ONCE(ioucmd->cookie);
+	int status = nvme_req(req)->status;
 
 	if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
-		pdu->nvme_status = -EINTR;
-	else
-		pdu->nvme_status = nvme_req(req)->status;
+		status = -EINTR;
+
+	if (pdu->meta_len)
+		status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
+					pdu->u.meta, pdu->meta_len, status);
+
+	pdu->nvme_status = status;
 	pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
 
 	/*
@@ -521,25 +503,6 @@  static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
 	return RQ_END_IO_FREE;
 }
 
-static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
-						     blk_status_t err)
-{
-	struct io_uring_cmd *ioucmd = req->end_io_data;
-	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
-	void *cookie = READ_ONCE(ioucmd->cookie);
-
-	/*
-	 * For iopoll, complete it directly.
-	 * Otherwise, move the completion to task work.
-	 */
-	if (cookie != NULL && blk_rq_is_poll(req))
-		nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
-	else
-		io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
-
-	return RQ_END_IO_NONE;
-}
-
 static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 		struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec)
 {
@@ -620,12 +583,10 @@  static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 	pdu->req = req;
 	pdu->meta_len = d.metadata_len;
 	req->end_io_data = ioucmd;
+	req->end_io = nvme_uring_cmd_end_io;
 	if (pdu->meta_len) {
 		pdu->u.meta = meta;
 		pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
-		req->end_io = nvme_uring_cmd_end_io_meta;
-	} else {
-		req->end_io = nvme_uring_cmd_end_io;
 	}
 	blk_execute_rq_nowait(req, false);
 	return -EIOCBQUEUED;