Message ID | 20240228125733.299384-7-aaptel@nvidia.com (mailing list archive) |
---|---|
State | Changes Requested |
Headers | show |
Series | nvme-tcp receive offloads | expand |
On 28/02/2024 14:57, Aurelien Aptel wrote: > From: Boris Pismenny <borisp@nvidia.com> > > Introduce the NVMe-TCP DDP data-path offload. > Using this interface, the NIC hardware will scatter TCP payload directly > to the BIO pages according to the command_id in the PDU. > To maintain the correctness of the network stack, the driver is expected > to construct SKBs that point to the BIO pages. > > The data-path interface contains two routines: setup/teardown. > The setup provides the mapping from command_id to the request buffers, > while the teardown removes this mapping. > > For efficiency, we introduce an asynchronous nvme completion, which is > split between NVMe-TCP and the NIC driver as follows: > NVMe-TCP performs the specific completion, while NIC driver performs the > generic mq_blk completion. > > Signed-off-by: Boris Pismenny <borisp@nvidia.com> > Signed-off-by: Ben Ben-Ishay <benishay@nvidia.com> > Signed-off-by: Or Gerlitz <ogerlitz@nvidia.com> > Signed-off-by: Yoray Zack <yorayz@nvidia.com> > Signed-off-by: Shai Malin <smalin@nvidia.com> > Signed-off-by: Aurelien Aptel <aaptel@nvidia.com> > Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> > Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com> > --- > drivers/nvme/host/tcp.c | 135 ++++++++++++++++++++++++++++++++++++++-- > 1 file changed, 130 insertions(+), 5 deletions(-) > > diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c > index 86a9ad9f679b..b7cfe14661d6 100644 > --- a/drivers/nvme/host/tcp.c > +++ b/drivers/nvme/host/tcp.c > @@ -120,6 +120,10 @@ struct nvme_tcp_request { > struct llist_node lentry; > __le32 ddgst; > > + /* ddp async completion */ > + __le16 nvme_status; > + union nvme_result result; > + > struct bio *curr_bio; > struct iov_iter iter; > > @@ -127,6 +131,11 @@ struct nvme_tcp_request { > size_t offset; > size_t data_sent; > enum nvme_tcp_send_state state; > + > +#ifdef CONFIG_ULP_DDP > + bool offloaded; > + struct ulp_ddp_io ddp; > +#endif > }; > > enum nvme_tcp_queue_flags { > @@ -333,6 +342,25 @@ static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req, > > #ifdef CONFIG_ULP_DDP > > +static bool nvme_tcp_is_ddp_offloaded(const struct nvme_tcp_request *req) > +{ > + return req->offloaded; > +} > + > +static int nvme_tcp_ddp_alloc_sgl(struct nvme_tcp_request *req, struct request *rq) > +{ > + int ret; > + > + req->ddp.sg_table.sgl = req->ddp.first_sgl; > + ret = sg_alloc_table_chained(&req->ddp.sg_table, > + blk_rq_nr_phys_segments(rq), > + req->ddp.sg_table.sgl, SG_CHUNK_SIZE); > + if (ret) > + return -ENOMEM; > + req->ddp.nents = blk_rq_map_sg(rq->q, rq, req->ddp.sg_table.sgl); > + return 0; > +} > + > static struct net_device * > nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) > { > @@ -366,10 +394,68 @@ nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) > } > > static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags); > +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx); > static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = { > .resync_request = nvme_tcp_resync_request, > + .ddp_teardown_done = nvme_tcp_ddp_teardown_done, > }; > > +static void nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue, > + struct request *rq) > +{ > + struct net_device *netdev = queue->ctrl->ddp_netdev; > + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); > + > + ulp_ddp_teardown(netdev, queue->sock->sk, &req->ddp, rq); > + sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE); > +} > + > +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx) > +{ > + struct request *rq = ddp_ctx; > + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); > + > + if (!nvme_try_complete_req(rq, req->nvme_status, req->result)) > + nvme_complete_rq(rq); > +} > + > +static void nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, > + struct request *rq) > +{ > + struct net_device *netdev = queue->ctrl->ddp_netdev; > + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); > + int ret; > + > + if (rq_data_dir(rq) != READ || > + queue->ctrl->ddp_threshold > blk_rq_payload_bytes(rq)) > + return; > + > + /* > + * DDP offload is best-effort, errors are ignored. > + */ > + > + req->ddp.command_id = nvme_cid(rq); > + ret = nvme_tcp_ddp_alloc_sgl(req, rq); > + if (ret) > + goto err; > + > + ret = ulp_ddp_setup(netdev, queue->sock->sk, &req->ddp); > + if (ret) { > + sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE); > + goto err; > + } > + > + /* if successful, sg table is freed in nvme_tcp_teardown_ddp() */ > + req->offloaded = true; > + > + return; > +err: > + WARN_ONCE(ret, "ddp setup failed (queue 0x%x, cid 0x%x, ret=%d)", > + nvme_tcp_queue_id(queue), > + nvme_cid(rq), > + ret); > +} > + > static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) > { > struct ulp_ddp_config config = {.type = ULP_DDP_NVME}; > @@ -473,6 +559,11 @@ static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags) > > #else > > +static bool nvme_tcp_is_ddp_offloaded(const struct nvme_tcp_request *req) > +{ > + return false; > +} > + > static struct net_device * > nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) > { > @@ -490,6 +581,14 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) > static void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue) > {} > > +static void nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, > + struct request *rq) > +{} > + > +static void nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue, > + struct request *rq) > +{} > + > static void nvme_tcp_resync_response(struct nvme_tcp_queue *queue, > struct sk_buff *skb, unsigned int offset) > {} > @@ -765,6 +864,24 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) > queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); > } > > +static void nvme_tcp_complete_request(struct request *rq, > + __le16 status, > + union nvme_result result, > + __u16 command_id) > +{ > + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); > + > + if (nvme_tcp_is_ddp_offloaded(req)) { > + req->nvme_status = status; this can just be called req->status I think. > + req->result = result; I think it will be cleaner to always capture req->result and req->status regardless of ddp offload.
Sagi Grimberg <sagi@grimberg.me> writes: >> +static void nvme_tcp_complete_request(struct request *rq, >> + __le16 status, >> + union nvme_result result, >> + __u16 command_id) >> +{ >> + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); >> + >> + if (nvme_tcp_is_ddp_offloaded(req)) { >> + req->nvme_status = status; > > this can just be called req->status I think. Since req->status already exists, we have checked whether it can be safely used instead of adding nvme_status and it seems to be ok. We will remove nvme_status. >> + req->result = result; > I think it will be cleaner to always capture req->result and req->status > regardless of ddp offload. Sure, we will set status and result in the function before the offload check: static void nvme_tcp_complete_request(struct request *rq, __le16 status, union nvme_result result, __u16 command_id) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); req->status = status; req->result = result; if (nvme_tcp_is_ddp_offloaded(req)) { /* complete when teardown is confirmed to be done */ nvme_tcp_teardown_ddp(req->queue, rq); return; } if (!nvme_try_complete_req(rq, status, result)) nvme_complete_rq(rq); }
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 86a9ad9f679b..b7cfe14661d6 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -120,6 +120,10 @@ struct nvme_tcp_request { struct llist_node lentry; __le32 ddgst; + /* ddp async completion */ + __le16 nvme_status; + union nvme_result result; + struct bio *curr_bio; struct iov_iter iter; @@ -127,6 +131,11 @@ struct nvme_tcp_request { size_t offset; size_t data_sent; enum nvme_tcp_send_state state; + +#ifdef CONFIG_ULP_DDP + bool offloaded; + struct ulp_ddp_io ddp; +#endif }; enum nvme_tcp_queue_flags { @@ -333,6 +342,25 @@ static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req, #ifdef CONFIG_ULP_DDP +static bool nvme_tcp_is_ddp_offloaded(const struct nvme_tcp_request *req) +{ + return req->offloaded; +} + +static int nvme_tcp_ddp_alloc_sgl(struct nvme_tcp_request *req, struct request *rq) +{ + int ret; + + req->ddp.sg_table.sgl = req->ddp.first_sgl; + ret = sg_alloc_table_chained(&req->ddp.sg_table, + blk_rq_nr_phys_segments(rq), + req->ddp.sg_table.sgl, SG_CHUNK_SIZE); + if (ret) + return -ENOMEM; + req->ddp.nents = blk_rq_map_sg(rq->q, rq, req->ddp.sg_table.sgl); + return 0; +} + static struct net_device * nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) { @@ -366,10 +394,68 @@ nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) } static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags); +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx); static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = { .resync_request = nvme_tcp_resync_request, + .ddp_teardown_done = nvme_tcp_ddp_teardown_done, }; +static void nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue, + struct request *rq) +{ + struct net_device *netdev = queue->ctrl->ddp_netdev; + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + + ulp_ddp_teardown(netdev, queue->sock->sk, &req->ddp, rq); + sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE); +} + +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx) +{ + struct request *rq = ddp_ctx; + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + + if (!nvme_try_complete_req(rq, req->nvme_status, req->result)) + nvme_complete_rq(rq); +} + +static void nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, + struct request *rq) +{ + struct net_device *netdev = queue->ctrl->ddp_netdev; + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + int ret; + + if (rq_data_dir(rq) != READ || + queue->ctrl->ddp_threshold > blk_rq_payload_bytes(rq)) + return; + + /* + * DDP offload is best-effort, errors are ignored. + */ + + req->ddp.command_id = nvme_cid(rq); + ret = nvme_tcp_ddp_alloc_sgl(req, rq); + if (ret) + goto err; + + ret = ulp_ddp_setup(netdev, queue->sock->sk, &req->ddp); + if (ret) { + sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE); + goto err; + } + + /* if successful, sg table is freed in nvme_tcp_teardown_ddp() */ + req->offloaded = true; + + return; +err: + WARN_ONCE(ret, "ddp setup failed (queue 0x%x, cid 0x%x, ret=%d)", + nvme_tcp_queue_id(queue), + nvme_cid(rq), + ret); +} + static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) { struct ulp_ddp_config config = {.type = ULP_DDP_NVME}; @@ -473,6 +559,11 @@ static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags) #else +static bool nvme_tcp_is_ddp_offloaded(const struct nvme_tcp_request *req) +{ + return false; +} + static struct net_device * nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) { @@ -490,6 +581,14 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) static void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue) {} +static void nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, + struct request *rq) +{} + +static void nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue, + struct request *rq) +{} + static void nvme_tcp_resync_response(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int offset) {} @@ -765,6 +864,24 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); } +static void nvme_tcp_complete_request(struct request *rq, + __le16 status, + union nvme_result result, + __u16 command_id) +{ + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + + if (nvme_tcp_is_ddp_offloaded(req)) { + req->nvme_status = status; + req->result = result; + nvme_tcp_teardown_ddp(req->queue, rq); + return; + } + + if (!nvme_try_complete_req(rq, status, result)) + nvme_complete_rq(rq); +} + static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, struct nvme_completion *cqe) { @@ -784,10 +901,9 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, if (req->status == cpu_to_le16(NVME_SC_SUCCESS)) req->status = cqe->status; - if (!nvme_try_complete_req(rq, req->status, cqe->result)) - nvme_complete_rq(rq); + nvme_tcp_complete_request(rq, req->status, cqe->result, + cqe->command_id); queue->nr_cqe++; - return 0; } @@ -985,10 +1101,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, static inline void nvme_tcp_end_request(struct request *rq, u16 status) { + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_tcp_queue *queue = req->queue; + struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; union nvme_result res = {}; - if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res)) - nvme_complete_rq(rq); + nvme_tcp_complete_request(rq, cpu_to_le16(status << 1), res, + pdu->command_id); } static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, @@ -2734,6 +2853,9 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, if (ret) return ret; +#ifdef CONFIG_ULP_DDP + req->offloaded = false; +#endif req->state = NVME_TCP_SEND_CMD_PDU; req->status = cpu_to_le16(NVME_SC_SUCCESS); req->offset = 0; @@ -2772,6 +2894,9 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, return ret; } + if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) + nvme_tcp_setup_ddp(queue, rq); + return 0; }