@@ -356,6 +356,14 @@ void nvme_complete_rq(struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
+void nvme_complete_failed_rq(struct request *req, u16 status)
+{
+ nvme_req(req)->status = status;
+ blk_mq_set_request_complete(req);
+ nvme_complete_rq(req);
+}
+EXPORT_SYMBOL_GPL(nvme_complete_failed_rq);
+
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
{
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
@@ -575,6 +575,7 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
}
void nvme_complete_rq(struct request *req);
+void nvme_complete_failed_rq(struct request *req, u16 status);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
Work with nvme native multipath, if a path related error occurs when queue_rq call HBA drive to send request, queue_rq need complete the request with NVME_SC_HOST_PATH_ERROR, the request will fail over to retry if needed. So introduce nvme_complete_failed_req for queue_rq and nvmf_fail_nonready_command. Signed-off-by: Chao Leng <lengchao@huawei.com> --- drivers/nvme/host/core.c | 8 ++++++++ drivers/nvme/host/nvme.h | 1 + 2 files changed, 9 insertions(+)