Message ID | 20231214132623.119227-7-aaptel@nvidia.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | nvme-tcp receive offloads | expand |
On 14/12/2023 15:26, Aurelien Aptel wrote: > From: Boris Pismenny <borisp@nvidia.com> > > Introduce the NVMe-TCP DDP data-path offload. > Using this interface, the NIC hardware will scatter TCP payload directly > to the BIO pages according to the command_id in the PDU. > To maintain the correctness of the network stack, the driver is expected > to construct SKBs that point to the BIO pages. > > The data-path interface contains two routines: setup/teardown. > The setup provides the mapping from command_id to the request buffers, > while the teardown removes this mapping. > > For efficiency, we introduce an asynchronous nvme completion, which is > split between NVMe-TCP and the NIC driver as follows: > NVMe-TCP performs the specific completion, while NIC driver performs the > generic mq_blk completion. > > Signed-off-by: Boris Pismenny <borisp@nvidia.com> > Signed-off-by: Ben Ben-Ishay <benishay@nvidia.com> > Signed-off-by: Or Gerlitz <ogerlitz@nvidia.com> > Signed-off-by: Yoray Zack <yorayz@nvidia.com> > Signed-off-by: Shai Malin <smalin@nvidia.com> > Signed-off-by: Aurelien Aptel <aaptel@nvidia.com> > Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> > --- > drivers/nvme/host/tcp.c | 125 ++++++++++++++++++++++++++++++++++++++-- > 1 file changed, 120 insertions(+), 5 deletions(-) > > diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c > index 52b129401c78..09ffa8ba7e72 100644 > --- a/drivers/nvme/host/tcp.c > +++ b/drivers/nvme/host/tcp.c > @@ -120,6 +120,10 @@ struct nvme_tcp_request { > struct llist_node lentry; > __le32 ddgst; > > + /* ddp async completion */ > + __le16 nvme_status; > + union nvme_result result; > + > struct bio *curr_bio; > struct iov_iter iter; > > @@ -127,6 +131,11 @@ struct nvme_tcp_request { > size_t offset; > size_t data_sent; > enum nvme_tcp_send_state state; > + > +#ifdef CONFIG_ULP_DDP > + bool offloaded; > + struct ulp_ddp_io ddp; > +#endif > }; > > enum nvme_tcp_queue_flags { > @@ -332,6 +341,11 @@ static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req, > > #ifdef CONFIG_ULP_DDP > > +static bool nvme_tcp_is_ddp_offloaded(const struct nvme_tcp_request *req) > +{ > + return req->offloaded; > +} > + > static struct net_device * > nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) > { > @@ -365,10 +379,72 @@ nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) > } > > static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags); > +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx); > static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = { > .resync_request = nvme_tcp_resync_request, > + .ddp_teardown_done = nvme_tcp_ddp_teardown_done, > }; > > +static void nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue, > + struct request *rq) > +{ > + struct net_device *netdev = queue->ctrl->ddp_netdev; > + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); > + > + ulp_ddp_teardown(netdev, queue->sock->sk, &req->ddp, rq); > + sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE); > +} > + > +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx) > +{ > + struct request *rq = ddp_ctx; > + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); > + > + if (!nvme_try_complete_req(rq, req->nvme_status, req->result)) > + nvme_complete_rq(rq); > +} > + > +static void nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, > + struct request *rq) > +{ > + struct net_device *netdev = queue->ctrl->ddp_netdev; > + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); > + int ret; > + > + if (rq_data_dir(rq) != READ || > + queue->ctrl->ddp_threshold > blk_rq_payload_bytes(rq)) > + return; > + > + /* > + * DDP offload is best-effort, errors are ignored. > + */ > + > + req->ddp.command_id = nvme_cid(rq); > + req->ddp.sg_table.sgl = req->ddp.first_sgl; > + ret = sg_alloc_table_chained(&req->ddp.sg_table, > + blk_rq_nr_phys_segments(rq), > + req->ddp.sg_table.sgl, SG_CHUNK_SIZE); > + if (ret) > + goto err; > + req->ddp.nents = blk_rq_map_sg(rq->q, rq, req->ddp.sg_table.sgl); > + > + ret = ulp_ddp_setup(netdev, queue->sock->sk, &req->ddp); > + if (ret) { > + sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE); > + goto err; > + } > + > + /* if successful, sg table is freed in nvme_tcp_teardown_ddp() */ > + req->offloaded = true; > + > + return; > +err: > + WARN_ONCE(ret, "ddp setup failed (queue 0x%x, cid 0x%x, ret=%d)", > + nvme_tcp_queue_id(queue), > + nvme_cid(rq), > + ret); > +} > + > static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) > { > struct ulp_ddp_config config = {.type = ULP_DDP_NVME}; > @@ -472,6 +548,11 @@ static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags) > > #else > > +static bool nvme_tcp_is_ddp_offloaded(const struct nvme_tcp_request *req) > +{ > + return false; > +} > + > static struct net_device * > nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) > { > @@ -489,6 +570,14 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) > static void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue) > {} > > +static void nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, > + struct request *rq) > +{} > + > +static void nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue, > + struct request *rq) > +{} > + > static void nvme_tcp_resync_response(struct nvme_tcp_queue *queue, > struct sk_buff *skb, unsigned int offset) > {} > @@ -764,6 +853,24 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) > queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); > } > > +static void nvme_tcp_complete_request(struct request *rq, > + __le16 status, > + union nvme_result result, > + __u16 command_id) > +{ > + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); > + > + if (nvme_tcp_is_ddp_offloaded(req)) { Same optimization here: if (IS_ENABLED(CONFIG_ULP_DDP) && nvme_tcp_is_ddp_offloaded(req)) { > + req->nvme_status = status; > + req->result = result; > + nvme_tcp_teardown_ddp(req->queue, rq); > + return; > + } > + > + if (!nvme_try_complete_req(rq, status, result)) > + nvme_complete_rq(rq); > +} > + > static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, > struct nvme_completion *cqe) > { > @@ -783,10 +890,9 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, > if (req->status == cpu_to_le16(NVME_SC_SUCCESS)) > req->status = cqe->status; > > - if (!nvme_try_complete_req(rq, req->status, cqe->result)) > - nvme_complete_rq(rq); > + nvme_tcp_complete_request(rq, req->status, cqe->result, > + cqe->command_id); > queue->nr_cqe++; > - > return 0; > } > > @@ -984,10 +1090,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, > > static inline void nvme_tcp_end_request(struct request *rq, u16 status) > { > + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); > + struct nvme_tcp_queue *queue = req->queue; > + struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; > union nvme_result res = {}; > > - if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res)) > - nvme_complete_rq(rq); > + nvme_tcp_complete_request(rq, cpu_to_le16(status << 1), res, > + pdu->command_id); > } > > static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, > @@ -2727,6 +2836,9 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, > if (ret) > return ret; > > +#ifdef CONFIG_ULP_DDP > + req->offloaded = false; > +#endif > req->state = NVME_TCP_SEND_CMD_PDU; > req->status = cpu_to_le16(NVME_SC_SUCCESS); > req->offset = 0; > @@ -2765,6 +2877,9 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, > return ret; > } > > + if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) and here if (IS_ENABLED(CONFIG_ULP_DDP) && test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) > + nvme_tcp_setup_ddp(queue, rq); > + > return 0; > } >
Max Gurtovoy <mgurtovoy@nvidia.com> writes: >> + >> + if (nvme_tcp_is_ddp_offloaded(req)) { > > Same optimization here: > > if (IS_ENABLED(CONFIG_ULP_DDP) && nvme_tcp_is_ddp_offloaded(req)) { > nvme_tcp_is_ddp_offloaded() compiles to "return false" when ULP_DDP is disabled and the compiler already folds the branching completely. > > if (IS_ENABLED(CONFIG_ULP_DDP) && test_bit(NVME_TCP_Q_OFF_DDP, > &queue->flags)) > Same, I've checked and the test is already optimized out. Thanks
On 18/12/2023 20:08, Aurelien Aptel wrote: > Max Gurtovoy <mgurtovoy@nvidia.com> writes: >>> + >>> + if (nvme_tcp_is_ddp_offloaded(req)) { >> >> Same optimization here: >> >> if (IS_ENABLED(CONFIG_ULP_DDP) && nvme_tcp_is_ddp_offloaded(req)) { >> > > nvme_tcp_is_ddp_offloaded() compiles to "return false" when ULP_DDP is > disabled and the compiler already folds the branching completely. > >> >> if (IS_ENABLED(CONFIG_ULP_DDP) && test_bit(NVME_TCP_Q_OFF_DDP, >> &queue->flags)) >> > > Same, I've checked and the test is already optimized out. > > Thanks > Ok looks good, Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com>
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 52b129401c78..09ffa8ba7e72 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -120,6 +120,10 @@ struct nvme_tcp_request { struct llist_node lentry; __le32 ddgst; + /* ddp async completion */ + __le16 nvme_status; + union nvme_result result; + struct bio *curr_bio; struct iov_iter iter; @@ -127,6 +131,11 @@ struct nvme_tcp_request { size_t offset; size_t data_sent; enum nvme_tcp_send_state state; + +#ifdef CONFIG_ULP_DDP + bool offloaded; + struct ulp_ddp_io ddp; +#endif }; enum nvme_tcp_queue_flags { @@ -332,6 +341,11 @@ static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req, #ifdef CONFIG_ULP_DDP +static bool nvme_tcp_is_ddp_offloaded(const struct nvme_tcp_request *req) +{ + return req->offloaded; +} + static struct net_device * nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) { @@ -365,10 +379,72 @@ nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) } static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags); +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx); static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = { .resync_request = nvme_tcp_resync_request, + .ddp_teardown_done = nvme_tcp_ddp_teardown_done, }; +static void nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue, + struct request *rq) +{ + struct net_device *netdev = queue->ctrl->ddp_netdev; + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + + ulp_ddp_teardown(netdev, queue->sock->sk, &req->ddp, rq); + sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE); +} + +static void nvme_tcp_ddp_teardown_done(void *ddp_ctx) +{ + struct request *rq = ddp_ctx; + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + + if (!nvme_try_complete_req(rq, req->nvme_status, req->result)) + nvme_complete_rq(rq); +} + +static void nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, + struct request *rq) +{ + struct net_device *netdev = queue->ctrl->ddp_netdev; + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + int ret; + + if (rq_data_dir(rq) != READ || + queue->ctrl->ddp_threshold > blk_rq_payload_bytes(rq)) + return; + + /* + * DDP offload is best-effort, errors are ignored. + */ + + req->ddp.command_id = nvme_cid(rq); + req->ddp.sg_table.sgl = req->ddp.first_sgl; + ret = sg_alloc_table_chained(&req->ddp.sg_table, + blk_rq_nr_phys_segments(rq), + req->ddp.sg_table.sgl, SG_CHUNK_SIZE); + if (ret) + goto err; + req->ddp.nents = blk_rq_map_sg(rq->q, rq, req->ddp.sg_table.sgl); + + ret = ulp_ddp_setup(netdev, queue->sock->sk, &req->ddp); + if (ret) { + sg_free_table_chained(&req->ddp.sg_table, SG_CHUNK_SIZE); + goto err; + } + + /* if successful, sg table is freed in nvme_tcp_teardown_ddp() */ + req->offloaded = true; + + return; +err: + WARN_ONCE(ret, "ddp setup failed (queue 0x%x, cid 0x%x, ret=%d)", + nvme_tcp_queue_id(queue), + nvme_cid(rq), + ret); +} + static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) { struct ulp_ddp_config config = {.type = ULP_DDP_NVME}; @@ -472,6 +548,11 @@ static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags) #else +static bool nvme_tcp_is_ddp_offloaded(const struct nvme_tcp_request *req) +{ + return false; +} + static struct net_device * nvme_tcp_get_ddp_netdev_with_limits(struct nvme_tcp_ctrl *ctrl) { @@ -489,6 +570,14 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) static void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue) {} +static void nvme_tcp_setup_ddp(struct nvme_tcp_queue *queue, + struct request *rq) +{} + +static void nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue, + struct request *rq) +{} + static void nvme_tcp_resync_response(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int offset) {} @@ -764,6 +853,24 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); } +static void nvme_tcp_complete_request(struct request *rq, + __le16 status, + union nvme_result result, + __u16 command_id) +{ + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + + if (nvme_tcp_is_ddp_offloaded(req)) { + req->nvme_status = status; + req->result = result; + nvme_tcp_teardown_ddp(req->queue, rq); + return; + } + + if (!nvme_try_complete_req(rq, status, result)) + nvme_complete_rq(rq); +} + static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, struct nvme_completion *cqe) { @@ -783,10 +890,9 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, if (req->status == cpu_to_le16(NVME_SC_SUCCESS)) req->status = cqe->status; - if (!nvme_try_complete_req(rq, req->status, cqe->result)) - nvme_complete_rq(rq); + nvme_tcp_complete_request(rq, req->status, cqe->result, + cqe->command_id); queue->nr_cqe++; - return 0; } @@ -984,10 +1090,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, static inline void nvme_tcp_end_request(struct request *rq, u16 status) { + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_tcp_queue *queue = req->queue; + struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; union nvme_result res = {}; - if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res)) - nvme_complete_rq(rq); + nvme_tcp_complete_request(rq, cpu_to_le16(status << 1), res, + pdu->command_id); } static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, @@ -2727,6 +2836,9 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, if (ret) return ret; +#ifdef CONFIG_ULP_DDP + req->offloaded = false; +#endif req->state = NVME_TCP_SEND_CMD_PDU; req->status = cpu_to_le16(NVME_SC_SUCCESS); req->offset = 0; @@ -2765,6 +2877,9 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, return ret; } + if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) + nvme_tcp_setup_ddp(queue, rq); + return 0; }