Message ID | 20230816120608.37135-14-hare@suse.de (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | nvme: In-kernel TLS support for TCP | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Not a local patch, async |
On 23/08/16 02:06PM, Hannes Reinecke wrote: >The return value from nvmet_tcp_alloc_queue() are just used to >figure out if sock_release() need to be called. So this patch >moves sock_release() into nvmet_tcp_alloc_queue() and make it >a void function. > >Signed-off-by: Hannes Reinecke <hare@suse.de> >Reviewed-by: Sagi Grimberg <sagi@grimberg.me> >--- > drivers/nvme/target/tcp.c | 20 ++++++++++---------- > 1 file changed, 10 insertions(+), 10 deletions(-) > >diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c >index 97d07488072d..d44e9051ddd9 100644 >--- a/drivers/nvme/target/tcp.c >+++ b/drivers/nvme/target/tcp.c >@@ -1621,15 +1621,17 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) > return ret; > } > >-static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, >+static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, > struct socket *newsock) > { > struct nvmet_tcp_queue *queue; > int ret; > > queue = kzalloc(sizeof(*queue), GFP_KERNEL); >- if (!queue) >- return -ENOMEM; >+ if (!queue) { >+ ret = -ENOMEM; >+ goto out_release; >+ } > > INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); > INIT_WORK(&queue->io_work, nvmet_tcp_io_work); >@@ -1666,7 +1668,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, > if (ret) > goto out_destroy_sq; > >- return 0; >+ return; > out_destroy_sq: > mutex_lock(&nvmet_tcp_queue_mutex); > list_del_init(&queue->queue_list); >@@ -1678,7 +1680,9 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, > ida_free(&nvmet_tcp_queue_ida, queue->idx); > out_free_queue: > kfree(queue); >- return ret; >+out_release: >+ pr_err("failed to allocate queue, error %d\n", ret); >+ sock_release(newsock); > } > > static void nvmet_tcp_accept_work(struct work_struct *w) >@@ -1695,11 +1699,7 @@ static void nvmet_tcp_accept_work(struct work_struct *w) > pr_warn("failed to accept err=%d\n", ret); > return; > } >- ret = nvmet_tcp_alloc_queue(port, newsock); >- if (ret) { >- pr_err("failed to allocate queue\n"); >- sock_release(newsock); >- } >+ nvmet_tcp_alloc_queue(port, newsock); > } > } > >-- >2.35.3 > Reviewed-by: Nitesh Shetty <nj.shetty@samsung.com>
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 97d07488072d..d44e9051ddd9 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1621,15 +1621,17 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) return ret; } -static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, +static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, struct socket *newsock) { struct nvmet_tcp_queue *queue; int ret; queue = kzalloc(sizeof(*queue), GFP_KERNEL); - if (!queue) - return -ENOMEM; + if (!queue) { + ret = -ENOMEM; + goto out_release; + } INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); INIT_WORK(&queue->io_work, nvmet_tcp_io_work); @@ -1666,7 +1668,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, if (ret) goto out_destroy_sq; - return 0; + return; out_destroy_sq: mutex_lock(&nvmet_tcp_queue_mutex); list_del_init(&queue->queue_list); @@ -1678,7 +1680,9 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, ida_free(&nvmet_tcp_queue_ida, queue->idx); out_free_queue: kfree(queue); - return ret; +out_release: + pr_err("failed to allocate queue, error %d\n", ret); + sock_release(newsock); } static void nvmet_tcp_accept_work(struct work_struct *w) @@ -1695,11 +1699,7 @@ static void nvmet_tcp_accept_work(struct work_struct *w) pr_warn("failed to accept err=%d\n", ret); return; } - ret = nvmet_tcp_alloc_queue(port, newsock); - if (ret) { - pr_err("failed to allocate queue\n"); - sock_release(newsock); - } + nvmet_tcp_alloc_queue(port, newsock); } }