Message ID | 20210201100509.27351-10-borisp@mellanox.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | nvme-tcp receive offloads | expand |
Context | Check | Description |
---|---|---|
netdev/cover_letter | success | Link |
netdev/fixes_present | success | Link |
netdev/patch_count | fail | Series longer than 15 patches |
netdev/tree_selection | success | Clearly marked for net-next |
netdev/subject_prefix | success | Link |
netdev/cc_maintainers | success | CCed 5 of 5 maintainers |
netdev/source_inline | success | Was 0 now: 0 |
netdev/verify_signedoff | success | Link |
netdev/module_param | success | Was 0 now: 0 |
netdev/build_32bit | success | Errors and warnings before: 2 this patch: 2 |
netdev/kdoc | success | Errors and warnings before: 0 this patch: 0 |
netdev/verify_fixes | success | Link |
netdev/checkpatch | warning | WARNING: line length of 82 exceeds 80 columns |
netdev/build_allmodconfig_warn | success | Errors and warnings before: 2 this patch: 2 |
netdev/header_inline | success | Link |
netdev/stable | success | Stable not CCed |
> @@ -2930,6 +2931,27 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, > return ERR_PTR(ret); > } > > +static int nvme_tcp_netdev_event(struct notifier_block *this, > + unsigned long event, void *ptr) > +{ > + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); > + struct nvme_tcp_ctrl *ctrl; > + > + switch (event) { > + case NETDEV_GOING_DOWN: > + mutex_lock(&nvme_tcp_ctrl_mutex); > + list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { > + if (ndev != ctrl->offloading_netdev) > + continue; > + nvme_tcp_error_recovery(&ctrl->ctrl); > + } > + mutex_unlock(&nvme_tcp_ctrl_mutex); > + flush_workqueue(nvme_reset_wq); > + /* we assume that the going down part of error recovery is over */ Maybe phrase it as: /* * The associated controllers teardown has completed, ddp contexts * were also torn down so we should be safe to continue... */
On Wed, Feb 3, 2021 at 11:17 AM Sagi Grimberg <sagi@grimberg.me> wrote: > > @@ -2930,6 +2931,27 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, > > return ERR_PTR(ret); > > } > > > > +static int nvme_tcp_netdev_event(struct notifier_block *this, > > + unsigned long event, void *ptr) > > +{ > > + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); > > + struct nvme_tcp_ctrl *ctrl; > > + > > + switch (event) { > > + case NETDEV_GOING_DOWN: > > + mutex_lock(&nvme_tcp_ctrl_mutex); > > + list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { > > + if (ndev != ctrl->offloading_netdev) > > + continue; > > + nvme_tcp_error_recovery(&ctrl->ctrl); > > + } > > + mutex_unlock(&nvme_tcp_ctrl_mutex); > > + flush_workqueue(nvme_reset_wq); > > + /* we assume that the going down part of error recovery is over */ > > Maybe phrase it as: > /* > * The associated controllers teardown has completed, ddp contexts > * were also torn down so we should be safe to continue... > */ sure
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index eb47cf6982d7..5f6eed3a9bc5 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -146,6 +146,7 @@ struct nvme_tcp_ctrl { static LIST_HEAD(nvme_tcp_ctrl_list); static DEFINE_MUTEX(nvme_tcp_ctrl_mutex); +static struct notifier_block nvme_tcp_netdevice_nb; static struct workqueue_struct *nvme_tcp_wq; static const struct blk_mq_ops nvme_tcp_mq_ops; static const struct blk_mq_ops nvme_tcp_admin_mq_ops; @@ -2930,6 +2931,27 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, return ERR_PTR(ret); } +static int nvme_tcp_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct nvme_tcp_ctrl *ctrl; + + switch (event) { + case NETDEV_GOING_DOWN: + mutex_lock(&nvme_tcp_ctrl_mutex); + list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { + if (ndev != ctrl->offloading_netdev) + continue; + nvme_tcp_error_recovery(&ctrl->ctrl); + } + mutex_unlock(&nvme_tcp_ctrl_mutex); + flush_workqueue(nvme_reset_wq); + /* we assume that the going down part of error recovery is over */ + } + return NOTIFY_DONE; +} + static struct nvmf_transport_ops nvme_tcp_transport = { .name = "tcp", .module = THIS_MODULE, @@ -2944,13 +2966,26 @@ static struct nvmf_transport_ops nvme_tcp_transport = { static int __init nvme_tcp_init_module(void) { + int ret; + nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!nvme_tcp_wq) return -ENOMEM; + nvme_tcp_netdevice_nb.notifier_call = nvme_tcp_netdev_event; + ret = register_netdevice_notifier(&nvme_tcp_netdevice_nb); + if (ret) { + pr_err("failed to register netdev notifier\n"); + goto out_err_reg_notifier; + } + nvmf_register_transport(&nvme_tcp_transport); return 0; + +out_err_reg_notifier: + destroy_workqueue(nvme_tcp_wq); + return ret; } static void __exit nvme_tcp_cleanup_module(void) @@ -2958,6 +2993,7 @@ static void __exit nvme_tcp_cleanup_module(void) struct nvme_tcp_ctrl *ctrl; nvmf_unregister_transport(&nvme_tcp_transport); + unregister_netdevice_notifier(&nvme_tcp_netdevice_nb); mutex_lock(&nvme_tcp_ctrl_mutex); list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)