From patchwork Sun May 29 16:19:59 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Hao Xu X-Patchwork-Id: 12864210 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9C336C433EF for ; Sun, 29 May 2022 16:20:12 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231302AbiE2QUL (ORCPT ); Sun, 29 May 2022 12:20:11 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:35712 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230074AbiE2QUL (ORCPT ); Sun, 29 May 2022 12:20:11 -0400 Received: from pv50p00im-ztdg10012001.me.com (pv50p00im-ztdg10012001.me.com [17.58.6.51]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 985D652B0F for ; Sun, 29 May 2022 09:20:10 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=icloud.com; s=1a1hai; t=1653841210; bh=RUQZd9agjsKWPP1SpHb3lOd/vq21uUZQTwzQJlNo9hM=; h=From:To:Subject:Date:Message-Id:MIME-Version; b=o6zjw6jJxqCDc01DLrUunWPF3slOV5O3Bu0CW3pDOCUEiWXR3hoZdMNt7X9a1flr1 sYfLackPqrOkRACbGH9oNus0/Ts7WeX6dfb2A8+HWMQPrS0H4vqX6oKdavdCOT4owy iWBksU67XM1d0OfeqAc/hZ2SydcDKBEO4xpM5a0KsrojjxhQEs+BTWTGQDEof+rx6/ LbLrtI2X4I3O3m+ZQrCNvjjIOGb7c8Adh9BF9+rpsqk1HnK6Miau7VdXi7O/vexQw9 AqWrpdXyQzIqK1wtc6sLqZeUemN+PYUWhEosmLk0gzUG8hEoPeigw2ps24Hr6MBxmG JrSoHKG6/yaew== Received: from localhost.localdomain (pv50p00im-dlb-asmtp-mailmevip.me.com [17.56.9.10]) by pv50p00im-ztdg10012001.me.com (Postfix) with ESMTPSA id 222EBA0354; Sun, 29 May 2022 16:20:07 +0000 (UTC) From: Hao Xu To: io-uring@vger.kernel.org Cc: Jens Axboe , Pavel Begunkov Subject: [PATCH 1/2] io_uring: add an argument for io_poll_disarm() Date: Mon, 30 May 2022 00:19:59 +0800 Message-Id: <20220529162000.32489-2-haoxu.linux@icloud.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220529162000.32489-1-haoxu.linux@icloud.com> References: <20220529162000.32489-1-haoxu.linux@icloud.com> MIME-Version: 1.0 X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.486,18.0.874 definitions=2022-05-29_03:2022-05-27,2022-05-29 signatures=0 X-Proofpoint-Spam-Details: rule=notspam policy=default score=0 suspectscore=0 malwarescore=0 phishscore=0 bulkscore=0 spamscore=0 clxscore=1015 mlxscore=0 mlxlogscore=602 adultscore=0 classifier=spam adjust=0 reason=mlx scancount=1 engine=8.0.1-2009150000 definitions=main-2205290095 Precedence: bulk List-ID: X-Mailing-List: io-uring@vger.kernel.org From: Hao Xu From: Hao Xu Add an argument for io_poll_disarm() for later use. Signed-off-by: Hao Xu --- io_uring/poll.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/io_uring/poll.c b/io_uring/poll.c index 728f6e7b47c5..c8982c5ef0fa 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -561,8 +561,9 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, { struct hlist_head *list; struct io_kiocb *req; + u32 index = hash_long(cd->data, ctx->cancel_hash_bits); - list = &ctx->cancel_hash[hash_long(cd->data, ctx->cancel_hash_bits)]; + list = &ctx->cancel_hash[index]; hlist_for_each_entry(req, list, hash_node) { if (cd->data != req->cqe.user_data) continue; @@ -573,6 +574,7 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, continue; req->work.cancel_seq = cd->seq; } + cd->flags = index; return req; } return NULL; @@ -602,7 +604,7 @@ static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, return NULL; } -static bool io_poll_disarm(struct io_kiocb *req) +static bool io_poll_disarm(struct io_kiocb *req, u32 index) __must_hold(&ctx->completion_lock) { if (!io_poll_get_ownership(req)) @@ -724,7 +726,7 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) spin_lock(&ctx->completion_lock); preq = io_poll_find(ctx, true, &cd); - if (!preq || !io_poll_disarm(preq)) { + if (!preq || !io_poll_disarm(preq, cd.flags)) { spin_unlock(&ctx->completion_lock); ret = preq ? -EALREADY : -ENOENT; goto out; From patchwork Sun May 29 16:20:00 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Hao Xu X-Patchwork-Id: 12864211 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id ED5B6C433F5 for ; Sun, 29 May 2022 16:20:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231285AbiE2QUP (ORCPT ); Sun, 29 May 2022 12:20:15 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:35762 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230074AbiE2QUO (ORCPT ); Sun, 29 May 2022 12:20:14 -0400 Received: from pv50p00im-ztdg10012001.me.com (pv50p00im-ztdg10012001.me.com [17.58.6.51]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 99A6152B2F for ; Sun, 29 May 2022 09:20:13 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=icloud.com; s=1a1hai; t=1653841213; bh=7QVi2hcVYosDzEkZAwLbDWpv2ksnf0djAG7Rtu+UEJ0=; h=From:To:Subject:Date:Message-Id:MIME-Version; b=YCO5fufNeM8LRebsrsw5tR7Q8+i3Fght0er/7egS1XQzA5xldmci99Bgj0Qdw0FAs DFXOlgLX8GEpY5KxKX6uVd/vkxFmmPhaQKbNFQwH3A8lCZmcrea5Wr/pWnFxb7l8sU 1liCiEYIendZZlrRsBfN4LyP3J56DLlQmVhlf3LuUB4a+yAtGriUX81ANJ9/pXX7Qp lYI0nttCI21VpNXoAi+jjQ3d4pBYQknVnqUqVydHSeE8DHd6Ulc3pgtLwebPwOGgJr dckq5R5CbHmlAayp5KqsKDrsEYypiJnXGRqhpfgg4UkU69akxAZL683pu3dUKxjgKv ZFFTboMgeB8WQ== Received: from localhost.localdomain (pv50p00im-dlb-asmtp-mailmevip.me.com [17.56.9.10]) by pv50p00im-ztdg10012001.me.com (Postfix) with ESMTPSA id 0FA62A0389; Sun, 29 May 2022 16:20:10 +0000 (UTC) From: Hao Xu To: io-uring@vger.kernel.org Cc: Jens Axboe , Pavel Begunkov Subject: [PATCH 2/2] io_uring: switch cancel_hash to use per list spinlock Date: Mon, 30 May 2022 00:20:00 +0800 Message-Id: <20220529162000.32489-3-haoxu.linux@icloud.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220529162000.32489-1-haoxu.linux@icloud.com> References: <20220529162000.32489-1-haoxu.linux@icloud.com> MIME-Version: 1.0 X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.486,18.0.874 definitions=2022-05-29_03:2022-05-27,2022-05-29 signatures=0 X-Proofpoint-Spam-Details: rule=notspam policy=default score=0 suspectscore=0 malwarescore=0 phishscore=0 bulkscore=0 spamscore=0 clxscore=1015 mlxscore=0 mlxlogscore=863 adultscore=0 classifier=spam adjust=0 reason=mlx scancount=1 engine=8.0.1-2009150000 definitions=main-2205290095 Precedence: bulk List-ID: X-Mailing-List: io-uring@vger.kernel.org From: Hao Xu From: Hao Xu Use per list lock for cancel_hash, this removes some completion lock invocation and remove contension between different cancel_hash entries Signed-off-by: Hao Xu --- io_uring/cancel.c | 12 ++++++++++-- io_uring/cancel.h | 1 + io_uring/io_uring.c | 9 +++++++++ io_uring/io_uring_types.h | 1 + io_uring/poll.c | 30 ++++++++++++++++-------------- 5 files changed, 37 insertions(+), 16 deletions(-) diff --git a/io_uring/cancel.c b/io_uring/cancel.c index 83cceb52d82d..0b1aa3ab7664 100644 --- a/io_uring/cancel.c +++ b/io_uring/cancel.c @@ -93,14 +93,14 @@ int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd) if (!ret) return 0; - spin_lock(&ctx->completion_lock); ret = io_poll_cancel(ctx, cd); if (ret != -ENOENT) goto out; + spin_lock(&ctx->completion_lock); if (!(cd->flags & IORING_ASYNC_CANCEL_FD)) ret = io_timeout_cancel(ctx, cd); -out: spin_unlock(&ctx->completion_lock); +out: return ret; } @@ -192,3 +192,11 @@ int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) io_req_set_res(req, ret, 0); return IOU_OK; } + +inline void init_cancel_hash_locks(spinlock_t *cancel_hash_locks, unsigned size) +{ + int i; + + for (i = 0; i < size; i++) + spin_lock_init(&cancel_hash_locks[i]); +} diff --git a/io_uring/cancel.h b/io_uring/cancel.h index 4f35d8696325..fdec2595797e 100644 --- a/io_uring/cancel.h +++ b/io_uring/cancel.h @@ -4,3 +4,4 @@ int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags); int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd); +inline void init_cancel_hash_locks(spinlock_t *cancel_hash_locks, unsigned size); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index f31d3446dcbf..6eaa27aea197 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -706,7 +706,14 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) GFP_KERNEL); if (!ctx->cancel_hash) goto err; + ctx->cancel_hash_locks = + kmalloc((1U << hash_bits) * sizeof(spinlock_t), + GFP_KERNEL); + if (!ctx->cancel_hash_locks) + goto err; + __hash_init(ctx->cancel_hash, 1U << hash_bits); + init_cancel_hash_locks(ctx->cancel_hash_locks, 1U << hash_bits); ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL); if (!ctx->dummy_ubuf) @@ -749,6 +756,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) err: kfree(ctx->dummy_ubuf); kfree(ctx->cancel_hash); + kfree(ctx->cancel_hash_locks); kfree(ctx->io_bl); xa_destroy(&ctx->io_bl_xa); kfree(ctx); @@ -3045,6 +3053,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) if (ctx->hash_map) io_wq_put_hash(ctx->hash_map); kfree(ctx->cancel_hash); + kfree(ctx->cancel_hash_locks); kfree(ctx->dummy_ubuf); kfree(ctx->io_bl); xa_destroy(&ctx->io_bl_xa); diff --git a/io_uring/io_uring_types.h b/io_uring/io_uring_types.h index 7c22cf35a7e2..4619a46f7ecd 100644 --- a/io_uring/io_uring_types.h +++ b/io_uring/io_uring_types.h @@ -231,6 +231,7 @@ struct io_ring_ctx { */ struct io_wq_work_list iopoll_list; struct hlist_head *cancel_hash; + spinlock_t *cancel_hash_locks; unsigned cancel_hash_bits; bool poll_multi_queue; diff --git a/io_uring/poll.c b/io_uring/poll.c index c8982c5ef0fa..e1b6dd282860 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -73,10 +73,11 @@ static struct io_poll *io_poll_get_single(struct io_kiocb *req) static void io_poll_req_insert(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - struct hlist_head *list; + u32 index = hash_long(req->cqe.user_data, ctx->cancel_hash_bits); - list = &ctx->cancel_hash[hash_long(req->cqe.user_data, ctx->cancel_hash_bits)]; - hlist_add_head(&req->hash_node, list); + spin_lock(&ctx->cancel_hash_locks[index]); + hlist_add_head(&req->hash_node, &ctx->cancel_hash[index]); + spin_unlock(&ctx->cancel_hash_locks[index]); } static void io_init_poll_iocb(struct io_poll *poll, __poll_t events, @@ -439,9 +440,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req, return 0; } - spin_lock(&ctx->completion_lock); io_poll_req_insert(req); - spin_unlock(&ctx->completion_lock); if (mask && (poll->events & EPOLLET)) { /* can't multishot if failed, just queue the event we've got */ @@ -538,10 +537,10 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, bool found = false; int i; - spin_lock(&ctx->completion_lock); for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { struct hlist_head *list; + spin_lock(&ctx->cancel_hash_locks[i]); list = &ctx->cancel_hash[i]; hlist_for_each_entry_safe(req, tmp, list, hash_node) { if (io_match_task_safe(req, tsk, cancel_all)) { @@ -550,19 +549,19 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, found = true; } } + spin_unlock(&ctx->cancel_hash_locks[i]); } - spin_unlock(&ctx->completion_lock); return found; } static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, struct io_cancel_data *cd) - __must_hold(&ctx->completion_lock) { struct hlist_head *list; struct io_kiocb *req; u32 index = hash_long(cd->data, ctx->cancel_hash_bits); + spin_lock(&ctx->cancel_hash_locks[index]); list = &ctx->cancel_hash[index]; hlist_for_each_entry(req, list, hash_node) { if (cd->data != req->cqe.user_data) @@ -574,15 +573,16 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, continue; req->work.cancel_seq = cd->seq; } + spin_unlock(&ctx->cancel_hash_locks[index]); cd->flags = index; return req; } + spin_unlock(&ctx->cancel_hash_locks[index]); return NULL; } static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, struct io_cancel_data *cd) - __must_hold(&ctx->completion_lock) { struct io_kiocb *req; int i; @@ -590,6 +590,7 @@ static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { struct hlist_head *list; + spin_lock(&ctx->cancel_hash_locks[i]); list = &ctx->cancel_hash[i]; hlist_for_each_entry(req, list, hash_node) { if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && @@ -598,24 +599,28 @@ static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, if (cd->seq == req->work.cancel_seq) continue; req->work.cancel_seq = cd->seq; + spin_unlock(&ctx->cancel_hash_locks[i]); return req; } + spin_unlock(&ctx->cancel_hash_locks[i]); } return NULL; } static bool io_poll_disarm(struct io_kiocb *req, u32 index) - __must_hold(&ctx->completion_lock) { + struct io_ring_ctx *ctx = req->ctx; + if (!io_poll_get_ownership(req)) return false; io_poll_remove_entries(req); + spin_lock(&ctx->cancel_hash_locks[index]); hash_del(&req->hash_node); + spin_unlock(&ctx->cancel_hash_locks[index]); return true; } int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) - __must_hold(&ctx->completion_lock) { struct io_kiocb *req; @@ -724,14 +729,11 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) int ret2, ret = 0; bool locked; - spin_lock(&ctx->completion_lock); preq = io_poll_find(ctx, true, &cd); if (!preq || !io_poll_disarm(preq, cd.flags)) { - spin_unlock(&ctx->completion_lock); ret = preq ? -EALREADY : -ENOENT; goto out; } - spin_unlock(&ctx->completion_lock); if (poll_update->update_events || poll_update->update_user_data) { /* only mask one event flags, keep behavior flags */