Message ID | 20210114151033.13020-7-borisp@mellanox.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | nvme-tcp receive offloads | expand |
Context | Check | Description |
---|---|---|
netdev/apply | fail | Patch does not apply to net-next |
netdev/tree_selection | success | Clearly marked for net-next |
On 1/14/21 8:10 AM, Boris Pismenny wrote: > +static > +int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) > +{ > + struct net_device *netdev = get_netdev_for_sock(queue->sock->sk, true); > + struct nvme_tcp_ddp_config config = {}; > + int ret; > + > + if (!netdev) { > + dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n"); > + return -ENODEV; > + } > + > + if (!(netdev->features & NETIF_F_HW_TCP_DDP)) { > + dev_put(netdev); > + return -EOPNOTSUPP; > + } > + > + config.cfg.type = TCP_DDP_NVME; > + config.pfv = NVME_TCP_PFV_1_0; > + config.cpda = 0; > + config.dgst = queue->hdr_digest ? > + NVME_TCP_HDR_DIGEST_ENABLE : 0; > + config.dgst |= queue->data_digest ? > + NVME_TCP_DATA_DIGEST_ENABLE : 0; > + config.queue_size = queue->queue_size; > + config.queue_id = nvme_tcp_queue_id(queue); > + config.io_cpu = queue->io_cpu; > + > + ret = netdev->tcp_ddp_ops->tcp_ddp_sk_add(netdev, > + queue->sock->sk, > + (struct tcp_ddp_config *)&config); typecast is not needed; tcp_ddp_config is an element of nvme_tcp_ddp_config > + if (ret) { > + dev_put(netdev); > + return ret; > + } > + > + inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = &nvme_tcp_ddp_ulp_ops; > + if (netdev->features & NETIF_F_HW_TCP_DDP) > + set_bit(NVME_TCP_Q_OFF_DDP, &queue->flags); > + > + return ret; > +} > + > +static > +void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue) > +{ > + struct net_device *netdev = queue->ctrl->offloading_netdev; > + > + if (!netdev) { > + dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n"); > + return; > + } > + > + netdev->tcp_ddp_ops->tcp_ddp_sk_del(netdev, queue->sock->sk); > + > + inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = NULL; > + dev_put(netdev); /* put the queue_init get_netdev_for_sock() */ have you validated the netdev reference counts? You have a put here, and ... > +} > + > +static > +int nvme_tcp_offload_limits(struct nvme_tcp_queue *queue) > +{ > + struct net_device *netdev = get_netdev_for_sock(queue->sock->sk, true); ... a get here .... > + struct tcp_ddp_limits limits; > + int ret = 0; > + > + if (!netdev) { > + dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n"); > + return -ENODEV; > + } > + > + if (netdev->features & NETIF_F_HW_TCP_DDP && > + netdev->tcp_ddp_ops && > + netdev->tcp_ddp_ops->tcp_ddp_limits) > + ret = netdev->tcp_ddp_ops->tcp_ddp_limits(netdev, &limits); > + else > + ret = -EOPNOTSUPP; > + > + if (!ret) { > + queue->ctrl->offloading_netdev = netdev; ... you have the device here, but then ... > + dev_dbg_ratelimited(queue->ctrl->ctrl.device, > + "netdev %s offload limits: max_ddp_sgl_len %d\n", > + netdev->name, limits.max_ddp_sgl_len); > + queue->ctrl->ctrl.max_segments = limits.max_ddp_sgl_len; > + queue->ctrl->ctrl.max_hw_sectors = > + limits.max_ddp_sgl_len << (ilog2(SZ_4K) - 9); > + } else { > + queue->ctrl->offloading_netdev = NULL; > + } > + > + dev_put(netdev); ... put here. And this is the limit checking function which seems like an odd place to set offloading_netdev vs nvme_tcp_offload_socket which sets no queue variable but yet hangs on to a netdev reference count. netdev reference count leaks are an absolute PITA to find. Code that takes and puts the counts should be clear and obvious as to when and why. The symmetry of offload and unoffload are clear when the offload saves the address in offloading_netdev. What you have now is dubious.
On 19/01/2021 5:47, David Ahern wrote: > On 1/14/21 8:10 AM, Boris Pismenny wrote: >> +static >> +int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) >> +{ >> + struct net_device *netdev = get_netdev_for_sock(queue->sock->sk, true); >> + struct nvme_tcp_ddp_config config = {}; >> + int ret; >> + >> + if (!netdev) { >> + dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n"); >> + return -ENODEV; >> + } >> + >> + if (!(netdev->features & NETIF_F_HW_TCP_DDP)) { >> + dev_put(netdev); >> + return -EOPNOTSUPP; >> + } >> + >> + config.cfg.type = TCP_DDP_NVME; >> + config.pfv = NVME_TCP_PFV_1_0; >> + config.cpda = 0; >> + config.dgst = queue->hdr_digest ? >> + NVME_TCP_HDR_DIGEST_ENABLE : 0; >> + config.dgst |= queue->data_digest ? >> + NVME_TCP_DATA_DIGEST_ENABLE : 0; >> + config.queue_size = queue->queue_size; >> + config.queue_id = nvme_tcp_queue_id(queue); >> + config.io_cpu = queue->io_cpu; >> + >> + ret = netdev->tcp_ddp_ops->tcp_ddp_sk_add(netdev, >> + queue->sock->sk, >> + (struct tcp_ddp_config *)&config); > > typecast is not needed; tcp_ddp_config is an element of nvme_tcp_ddp_config > True, will fix, thanks! >> + if (ret) { >> + dev_put(netdev); >> + return ret; >> + } >> + >> + inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = &nvme_tcp_ddp_ulp_ops; >> + if (netdev->features & NETIF_F_HW_TCP_DDP) >> + set_bit(NVME_TCP_Q_OFF_DDP, &queue->flags); >> + >> + return ret; >> +} >> + >> +static >> +void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue) >> +{ >> + struct net_device *netdev = queue->ctrl->offloading_netdev; >> + >> + if (!netdev) { >> + dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n"); >> + return; >> + } >> + >> + netdev->tcp_ddp_ops->tcp_ddp_sk_del(netdev, queue->sock->sk); >> + >> + inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = NULL; >> + dev_put(netdev); /* put the queue_init get_netdev_for_sock() */ > > have you validated the netdev reference counts? You have a put here, and ... > Yes, it does work for the cases we've tested: up/down, connect/disconnect, and up/down during traffic. It is unfortunate that it is not trivial to follow. We'll add some comment to make it more clear. Also see below. >> +} >> + >> +static >> +int nvme_tcp_offload_limits(struct nvme_tcp_queue *queue) >> +{ >> + struct net_device *netdev = get_netdev_for_sock(queue->sock->sk, true); > > ... a get here .... > >> + struct tcp_ddp_limits limits; >> + int ret = 0; >> + >> + if (!netdev) { >> + dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n"); >> + return -ENODEV; >> + } >> + >> + if (netdev->features & NETIF_F_HW_TCP_DDP && >> + netdev->tcp_ddp_ops && >> + netdev->tcp_ddp_ops->tcp_ddp_limits) >> + ret = netdev->tcp_ddp_ops->tcp_ddp_limits(netdev, &limits); >> + else >> + ret = -EOPNOTSUPP; >> + >> + if (!ret) { >> + queue->ctrl->offloading_netdev = netdev; > > > ... you have the device here, but then ... > >> + dev_dbg_ratelimited(queue->ctrl->ctrl.device, >> + "netdev %s offload limits: max_ddp_sgl_len %d\n", >> + netdev->name, limits.max_ddp_sgl_len); >> + queue->ctrl->ctrl.max_segments = limits.max_ddp_sgl_len; >> + queue->ctrl->ctrl.max_hw_sectors = >> + limits.max_ddp_sgl_len << (ilog2(SZ_4K) - 9); >> + } else { >> + queue->ctrl->offloading_netdev = NULL; >> + } >> + >> + dev_put(netdev); > > ... put here. And this is the limit checking function which seems like > an odd place to set offloading_netdev vs nvme_tcp_offload_socket which > sets no queue variable but yet hangs on to a netdev reference count. > > netdev reference count leaks are an absolute PITA to find. Code that > takes and puts the counts should be clear and obvious as to when and > why. The symmetry of offload and unoffload are clear when the offload > saves the address in offloading_netdev. What you have now is dubious. > The idea here is to rely on offload and unoffload to hold the netdev during offload. Get limits is not offloading anything; it only queries device limits that are then applied to the queue by the caller. We hold the device here for only to ensure that the function is still there when it is called, and we release it once we are done with it, as no context is established on the NIC, and no offload takes place.
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 1ba659927442..31bf9e3ea236 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -14,6 +14,7 @@ #include <linux/blk-mq.h> #include <crypto/hash.h> #include <net/busy_poll.h> +#include <net/tcp_ddp.h> #include "nvme.h" #include "fabrics.h" @@ -62,6 +63,7 @@ enum nvme_tcp_queue_flags { NVME_TCP_Q_ALLOCATED = 0, NVME_TCP_Q_LIVE = 1, NVME_TCP_Q_POLLING = 2, + NVME_TCP_Q_OFF_DDP = 3, }; enum nvme_tcp_recv_state { @@ -110,6 +112,8 @@ struct nvme_tcp_queue { void (*state_change)(struct sock *); void (*data_ready)(struct sock *); void (*write_space)(struct sock *); + + atomic64_t resync_req; }; struct nvme_tcp_ctrl { @@ -128,6 +132,8 @@ struct nvme_tcp_ctrl { struct delayed_work connect_work; struct nvme_tcp_request async_req; u32 io_queues[HCTX_MAX_TYPES]; + + struct net_device *offloading_netdev; }; static LIST_HEAD(nvme_tcp_ctrl_list); @@ -222,6 +228,182 @@ static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req, return nvme_tcp_pdu_data_left(req) <= len; } +#ifdef CONFIG_TCP_DDP + +static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags); +static const struct tcp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = { + .resync_request = nvme_tcp_resync_request, +}; + +static +int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) +{ + struct net_device *netdev = get_netdev_for_sock(queue->sock->sk, true); + struct nvme_tcp_ddp_config config = {}; + int ret; + + if (!netdev) { + dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n"); + return -ENODEV; + } + + if (!(netdev->features & NETIF_F_HW_TCP_DDP)) { + dev_put(netdev); + return -EOPNOTSUPP; + } + + config.cfg.type = TCP_DDP_NVME; + config.pfv = NVME_TCP_PFV_1_0; + config.cpda = 0; + config.dgst = queue->hdr_digest ? + NVME_TCP_HDR_DIGEST_ENABLE : 0; + config.dgst |= queue->data_digest ? + NVME_TCP_DATA_DIGEST_ENABLE : 0; + config.queue_size = queue->queue_size; + config.queue_id = nvme_tcp_queue_id(queue); + config.io_cpu = queue->io_cpu; + + ret = netdev->tcp_ddp_ops->tcp_ddp_sk_add(netdev, + queue->sock->sk, + (struct tcp_ddp_config *)&config); + if (ret) { + dev_put(netdev); + return ret; + } + + inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = &nvme_tcp_ddp_ulp_ops; + if (netdev->features & NETIF_F_HW_TCP_DDP) + set_bit(NVME_TCP_Q_OFF_DDP, &queue->flags); + + return ret; +} + +static +void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue) +{ + struct net_device *netdev = queue->ctrl->offloading_netdev; + + if (!netdev) { + dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n"); + return; + } + + netdev->tcp_ddp_ops->tcp_ddp_sk_del(netdev, queue->sock->sk); + + inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = NULL; + dev_put(netdev); /* put the queue_init get_netdev_for_sock() */ +} + +static +int nvme_tcp_offload_limits(struct nvme_tcp_queue *queue) +{ + struct net_device *netdev = get_netdev_for_sock(queue->sock->sk, true); + struct tcp_ddp_limits limits; + int ret = 0; + + if (!netdev) { + dev_info_ratelimited(queue->ctrl->ctrl.device, "netdev not found\n"); + return -ENODEV; + } + + if (netdev->features & NETIF_F_HW_TCP_DDP && + netdev->tcp_ddp_ops && + netdev->tcp_ddp_ops->tcp_ddp_limits) + ret = netdev->tcp_ddp_ops->tcp_ddp_limits(netdev, &limits); + else + ret = -EOPNOTSUPP; + + if (!ret) { + queue->ctrl->offloading_netdev = netdev; + dev_dbg_ratelimited(queue->ctrl->ctrl.device, + "netdev %s offload limits: max_ddp_sgl_len %d\n", + netdev->name, limits.max_ddp_sgl_len); + queue->ctrl->ctrl.max_segments = limits.max_ddp_sgl_len; + queue->ctrl->ctrl.max_hw_sectors = + limits.max_ddp_sgl_len << (ilog2(SZ_4K) - 9); + } else { + queue->ctrl->offloading_netdev = NULL; + } + + dev_put(netdev); + + return ret; +} + +static +void nvme_tcp_resync_response(struct nvme_tcp_queue *queue, + unsigned int pdu_seq) +{ + struct net_device *netdev = queue->ctrl->offloading_netdev; + u64 resync_val; + u32 resync_seq; + + resync_val = atomic64_read(&queue->resync_req); + /* Lower 32 bit flags. Check validity of the request */ + if ((resync_val & TCP_DDP_RESYNC_REQ) == 0) + return; + + /* Obtain and check requested sequence number: is this PDU header before the request? */ + resync_seq = resync_val >> 32; + if (before(pdu_seq, resync_seq)) + return; + + if (unlikely(!netdev)) { + pr_info_ratelimited("%s: netdev not found\n", __func__); + return; + } + + /** + * The atomic operation gurarantees that we don't miss any NIC driver + * resync requests submitted after the above checks. + */ + if (atomic64_cmpxchg(&queue->resync_req, resync_val, + resync_val & ~TCP_DDP_RESYNC_REQ)) + netdev->tcp_ddp_ops->tcp_ddp_resync(netdev, queue->sock->sk, pdu_seq); +} + +static +bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags) +{ + struct nvme_tcp_queue *queue = sk->sk_user_data; + + atomic64_set(&queue->resync_req, + (((uint64_t)seq << 32) | flags)); + + return true; +} + +#else + +static +int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) +{ + return -EINVAL; +} + +static +void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue) +{} + +static +int nvme_tcp_offload_limits(struct nvme_tcp_queue *queue) +{ + return -EINVAL; +} + +static +void nvme_tcp_resync_response(struct nvme_tcp_queue *queue, + unsigned int pdu_seq) +{} + +static +bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags) +{ + return false; +} + +#endif + static void nvme_tcp_init_iter(struct nvme_tcp_request *req, unsigned int dir) { @@ -627,6 +809,11 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); int ret; + u64 pdu_seq = TCP_SKB_CB(skb)->seq + *offset - queue->pdu_offset; + + if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) + nvme_tcp_resync_response(queue, pdu_seq); + ret = skb_copy_bits(skb, *offset, &pdu[queue->pdu_offset], rcv_len); if (unlikely(ret)) @@ -1517,6 +1704,9 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) kernel_sock_shutdown(queue->sock, SHUT_RDWR); nvme_tcp_restore_sock_calls(queue); cancel_work_sync(&queue->io_work); + + if (test_bit(NVME_TCP_Q_OFF_DDP, &queue->flags)) + nvme_tcp_unoffload_socket(queue); } static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) @@ -1534,10 +1724,13 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); int ret; - if (idx) + if (idx) { ret = nvmf_connect_io_queue(nctrl, idx, false); - else + nvme_tcp_offload_socket(&ctrl->queues[idx]); + } else { ret = nvmf_connect_admin_queue(nctrl); + nvme_tcp_offload_limits(&ctrl->queues[idx]); + } if (!ret) { set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); @@ -1640,6 +1833,8 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) { int ret; + to_tcp_ctrl(ctrl)->offloading_netdev = NULL; + ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); if (ret) return ret;