From patchwork Wed Jul 1 15:54:43 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jeffrey Layton X-Patchwork-Id: 11636751 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id CB794912 for ; Wed, 1 Jul 2020 15:54:53 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id B33F6207BB for ; Wed, 1 Jul 2020 15:54:53 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593618893; bh=A0YEAfsFRur/578o5S0lee/ptD/LGjeWlhqBehco3sk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=UKchF1aRmbqZdbX3R3fNymh9SGYFnmIhGbLHkKIyKPO6P2qzYvfJtFZQfOqthpxrm Jdvdq5twjRWSHQVQfxwBN1T0xXgFV9UmMk45onVgQkfAljI8qLNYbLLGt5EwQmC13+ w+ywaj2b0UeWezn8P8UQMxlrETj8ljNTS0cCNGIs= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732253AbgGAPyu (ORCPT ); Wed, 1 Jul 2020 11:54:50 -0400 Received: from mail.kernel.org ([198.145.29.99]:36886 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732243AbgGAPyt (ORCPT ); Wed, 1 Jul 2020 11:54:49 -0400 Received: from tleilax.com (68-20-15-154.lightspeed.rlghnc.sbcglobal.net [68.20.15.154]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id EB68B207F5; Wed, 1 Jul 2020 15:54:47 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593618888; bh=A0YEAfsFRur/578o5S0lee/ptD/LGjeWlhqBehco3sk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=NKNPECgYjBChbcSGF/sZcwmtOONlzkBoq/P4iJospHtlkaunc2eEs9cKk/RY/XPq3 mjVCsMyRNsrg+GJSKDWf8mruE5Yd/21fGQUxYk49epbqUriFJkrw6YaHoD3pktOvAL ht0ewEcJNo63GNjg8aCUJSLtoJUmF+sVTvsdzH50= From: Jeff Layton To: ceph-devel@vger.kernel.org Cc: idryomov@gmail.com Subject: [PATCH 1/4] libceph: just have osd_req_op_init return a pointer Date: Wed, 1 Jul 2020 11:54:43 -0400 Message-Id: <20200701155446.41141-2-jlayton@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20200701155446.41141-1-jlayton@kernel.org> References: <20200701155446.41141-1-jlayton@kernel.org> MIME-Version: 1.0 Sender: ceph-devel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: ceph-devel@vger.kernel.org The caller can just ignore the return. No need for this wrapper that just casts the other function to void. Signed-off-by: Jeff Layton --- include/linux/ceph/osd_client.h | 2 +- net/ceph/osd_client.c | 31 ++++++++++++------------------- 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index c60b59e9291b..8d63dc22cb36 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -404,7 +404,7 @@ void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc); &__oreq->r_ops[__whch].typ.fld; \ }) -extern void osd_req_op_init(struct ceph_osd_request *osd_req, +extern struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags); extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index db6abb5a5511..3cff29d38b9f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -525,7 +525,7 @@ EXPORT_SYMBOL(ceph_osdc_put_request); static void request_init(struct ceph_osd_request *req) { - /* req only, each op is zeroed in _osd_req_op_init() */ + /* req only, each op is zeroed in osd_req_op_init() */ memset(req, 0, sizeof(*req)); kref_init(&req->r_kref); @@ -746,8 +746,8 @@ EXPORT_SYMBOL(ceph_osdc_alloc_messages); * other information associated with them. It also serves as a * common init routine for all the other init functions, below. */ -static struct ceph_osd_req_op * -_osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, +struct ceph_osd_req_op * +osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags) { struct ceph_osd_req_op *op; @@ -762,12 +762,6 @@ _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, return op; } - -void osd_req_op_init(struct ceph_osd_request *osd_req, - unsigned int which, u16 opcode, u32 flags) -{ - (void)_osd_req_op_init(osd_req, which, opcode, flags); -} EXPORT_SYMBOL(osd_req_op_init); void osd_req_op_extent_init(struct ceph_osd_request *osd_req, @@ -775,8 +769,7 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req, u64 offset, u64 length, u64 truncate_size, u32 truncate_seq) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, - opcode, 0); + struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode, 0); size_t payload_len = 0; BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && @@ -822,7 +815,7 @@ void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req, BUG_ON(which + 1 >= osd_req->r_num_ops); prev_op = &osd_req->r_ops[which]; - op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); + op = osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags); /* dup previous one */ op->indata_len = prev_op->indata_len; op->outdata_len = prev_op->outdata_len; @@ -845,7 +838,7 @@ int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, size_t size; int ret; - op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); + op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0); pagelist = ceph_pagelist_alloc(GFP_NOFS); if (!pagelist) @@ -883,7 +876,7 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *name, const void *value, size_t size, u8 cmp_op, u8 cmp_mode) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, + struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode, 0); struct ceph_pagelist *pagelist; size_t payload_len; @@ -928,7 +921,7 @@ static void osd_req_op_watch_init(struct ceph_osd_request *req, int which, { struct ceph_osd_req_op *op; - op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); + op = osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0); op->watch.cookie = cookie; op->watch.op = watch_opcode; op->watch.gen = 0; @@ -943,7 +936,7 @@ void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, u64 expected_write_size, u32 flags) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, + struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, CEPH_OSD_OP_SETALLOCHINT, 0); @@ -4799,7 +4792,7 @@ static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which, struct ceph_pagelist *pl; int ret; - op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); + op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0); pl = ceph_pagelist_alloc(GFP_NOIO); if (!pl) @@ -4868,7 +4861,7 @@ static int osd_req_op_notify_init(struct ceph_osd_request *req, int which, struct ceph_pagelist *pl; int ret; - op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); + op = osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0); op->notify.cookie = cookie; pl = ceph_pagelist_alloc(GFP_NOIO); @@ -5332,7 +5325,7 @@ static int osd_req_op_copy_from_init(struct ceph_osd_request *req, if (IS_ERR(pages)) return PTR_ERR(pages); - op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2, + op = osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2, dst_fadvise_flags); op->copy_from.snapid = src_snapid; op->copy_from.src_version = src_version; From patchwork Wed Jul 1 15:54:44 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jeffrey Layton X-Patchwork-Id: 11636747 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 1F9F813BD for ; Wed, 1 Jul 2020 15:54:52 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 04345207BB for ; Wed, 1 Jul 2020 15:54:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593618892; bh=6SUZB+ZpSqDaogUqwn6a+ZSM0NiIayXEdhiYSP9EssI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=NHR09nsZE+XXkBSLd0Wu3UhqV4+UVHNxs6Oc9hwALlb8Pk3AuAl7da3a/5ND3XfsH oxjeVVV9gP4HKtuQ6Qf2zp8HOCCQ0IyfJFW337/cVuokGxL2eMejayK1+I5QIOs7pX RTOwaIj8n9p6cUvtjGgJgozxyGKuw6WQpFxfMrN0= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732257AbgGAPyv (ORCPT ); Wed, 1 Jul 2020 11:54:51 -0400 Received: from mail.kernel.org ([198.145.29.99]:36890 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732242AbgGAPyt (ORCPT ); Wed, 1 Jul 2020 11:54:49 -0400 Received: from tleilax.com (68-20-15-154.lightspeed.rlghnc.sbcglobal.net [68.20.15.154]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 7015520809; Wed, 1 Jul 2020 15:54:48 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593618888; bh=6SUZB+ZpSqDaogUqwn6a+ZSM0NiIayXEdhiYSP9EssI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=fkvWPvnswLnRIQZrLrdvS/bcXli5yDOsuJE3ZSSBfggDiTor2PgEvriQcTvGgdw4r lZ4AD79PvapnJ6fJPXnMV9ONui2WQmi3PENhx9sz3VoS8DRrSTPen1x5g2J5dtjuBT +gJZbkqEu3a1FZf0MsTbzHr/WwgYQjV5sgVK5ku0= From: Jeff Layton To: ceph-devel@vger.kernel.org Cc: idryomov@gmail.com Subject: [PATCH 2/4] libceph: refactor osdc request initialization Date: Wed, 1 Jul 2020 11:54:44 -0400 Message-Id: <20200701155446.41141-3-jlayton@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20200701155446.41141-1-jlayton@kernel.org> References: <20200701155446.41141-1-jlayton@kernel.org> MIME-Version: 1.0 Sender: ceph-devel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: ceph-devel@vger.kernel.org Turn the request_init helper into a more full-featured initialization routine that we can use to initialize an already-allocated request. Make it a public and exported function so we can use it from ceph.ko. Signed-off-by: Jeff Layton --- include/linux/ceph/osd_client.h | 4 ++++ net/ceph/osd_client.c | 28 +++++++++++++++------------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 8d63dc22cb36..40a08c4e5d8d 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -495,6 +495,10 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, extern void ceph_osdc_get_request(struct ceph_osd_request *req); extern void ceph_osdc_put_request(struct ceph_osd_request *req); +void ceph_osdc_init_request(struct ceph_osd_request *req, + struct ceph_osd_client *osdc, + struct ceph_snap_context *snapc, + unsigned int num_ops); extern int ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 3cff29d38b9f..4ddf23120b1a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -523,7 +523,10 @@ void ceph_osdc_put_request(struct ceph_osd_request *req) } EXPORT_SYMBOL(ceph_osdc_put_request); -static void request_init(struct ceph_osd_request *req) +void ceph_osdc_init_request(struct ceph_osd_request *req, + struct ceph_osd_client *osdc, + struct ceph_snap_context *snapc, + unsigned int num_ops) { /* req only, each op is zeroed in osd_req_op_init() */ memset(req, 0, sizeof(*req)); @@ -535,7 +538,13 @@ static void request_init(struct ceph_osd_request *req) INIT_LIST_HEAD(&req->r_private_item); target_init(&req->r_t); + + req->r_osdc = osdc; + req->r_num_ops = num_ops; + req->r_snapid = CEPH_NOSNAP; + req->r_snapc = ceph_get_snap_context(snapc); } +EXPORT_SYMBOL(ceph_osdc_init_request); /* * This is ugly, but it allows us to reuse linger registration and ping @@ -563,12 +572,9 @@ static void request_reinit(struct ceph_osd_request *req) WARN_ON(kref_read(&reply_msg->kref) != 1); target_destroy(&req->r_t); - request_init(req); - req->r_osdc = osdc; + ceph_osdc_init_request(req, osdc, snapc, num_ops); req->r_mempool = mempool; - req->r_num_ops = num_ops; req->r_snapid = snapid; - req->r_snapc = snapc; req->r_linger = linger; req->r_request = request_msg; req->r_reply = reply_msg; @@ -591,15 +597,11 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, BUG_ON(num_ops > CEPH_OSD_MAX_OPS); req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags); } - if (unlikely(!req)) - return NULL; - request_init(req); - req->r_osdc = osdc; - req->r_mempool = use_mempool; - req->r_num_ops = num_ops; - req->r_snapid = CEPH_NOSNAP; - req->r_snapc = ceph_get_snap_context(snapc); + if (likely(req)) { + req->r_mempool = use_mempool; + ceph_osdc_init_request(req, osdc, snapc, num_ops); + } dout("%s req %p\n", __func__, req); return req; From patchwork Wed Jul 1 15:54:45 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jeffrey Layton X-Patchwork-Id: 11636749 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 93465912 for ; Wed, 1 Jul 2020 15:54:52 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 7A5D5207BB for ; Wed, 1 Jul 2020 15:54:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593618892; bh=/3+GVsjn7vhyVn9Cm/royUfnOHGEjNmn+/w0sYtEAJs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=K5CjlhkS3YhxYWJxrsabmd0IlMkTFLEU+vycnJ2DDKD51E4qO4tXXHTA3kPzcO4s9 pqgXKMNHV/JZQrRareC9nuAer3TJ1JbUinr8o0yyc4JqSpVB/WPXz5E6WUnHiOQfxC eflpCQ/z/K5rCCrzjeEK64kwpbMEiqfgpV66o8ho= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732258AbgGAPyv (ORCPT ); Wed, 1 Jul 2020 11:54:51 -0400 Received: from mail.kernel.org ([198.145.29.99]:36902 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732244AbgGAPyt (ORCPT ); Wed, 1 Jul 2020 11:54:49 -0400 Received: from tleilax.com (68-20-15-154.lightspeed.rlghnc.sbcglobal.net [68.20.15.154]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id E7C352080D; Wed, 1 Jul 2020 15:54:48 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593618889; bh=/3+GVsjn7vhyVn9Cm/royUfnOHGEjNmn+/w0sYtEAJs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=htjeT401Xxz//zyRisnGSVUgbf4OqT5cxUg4oEPg5aGbjagH/iXJEI8lrr8tdU023 pzOIFo8ehO3ccKZvD0bGq9niyxo7ghlQVLlfCEt9yU6xe+7n1p8zFOMG88R8KpcrIk qUB4we50X2UsqPG0mqcOUHfWXkIqRVAuEPr6HuNs= From: Jeff Layton To: ceph-devel@vger.kernel.org Cc: idryomov@gmail.com Subject: [PATCH 3/4] libceph: rename __ceph_osdc_alloc_messages to ceph_osdc_alloc_num_messages Date: Wed, 1 Jul 2020 11:54:45 -0400 Message-Id: <20200701155446.41141-4-jlayton@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20200701155446.41141-1-jlayton@kernel.org> References: <20200701155446.41141-1-jlayton@kernel.org> MIME-Version: 1.0 Sender: ceph-devel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: ceph-devel@vger.kernel.org ...and make it public and export it. Signed-off-by: Jeff Layton --- include/linux/ceph/osd_client.h | 3 +++ net/ceph/osd_client.c | 13 +++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 40a08c4e5d8d..71b7610c3a3c 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -481,6 +481,9 @@ extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client * unsigned int num_ops, bool use_mempool, gfp_t gfp_flags); +int ceph_osdc_alloc_num_messages(struct ceph_osd_request *req, gfp_t gfp, + int num_request_data_items, + int num_reply_data_items); int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp); extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 4ddf23120b1a..7be78fa6e2c3 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -613,9 +613,9 @@ static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc) return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0); } -static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp, - int num_request_data_items, - int num_reply_data_items) +int ceph_osdc_alloc_num_messages(struct ceph_osd_request *req, gfp_t gfp, + int num_request_data_items, + int num_reply_data_items) { struct ceph_osd_client *osdc = req->r_osdc; struct ceph_msg *msg; @@ -672,6 +672,7 @@ static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp, return 0; } +EXPORT_SYMBOL(ceph_osdc_alloc_num_messages); static bool osd_req_opcode_valid(u16 opcode) { @@ -738,8 +739,8 @@ int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp) int num_request_data_items, num_reply_data_items; get_num_data_items(req, &num_request_data_items, &num_reply_data_items); - return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items, - num_reply_data_items); + return ceph_osdc_alloc_num_messages(req, gfp, num_request_data_items, + num_reply_data_items); } EXPORT_SYMBOL(ceph_osdc_alloc_messages); @@ -1129,7 +1130,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, * also covers ceph_uninline_data(). If more multi-op request * use cases emerge, we will need a separate helper. */ - r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0); + r = ceph_osdc_alloc_num_messages(req, GFP_NOFS, num_ops, 0); else r = ceph_osdc_alloc_messages(req, GFP_NOFS); if (r) From patchwork Wed Jul 1 15:54:46 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jeffrey Layton X-Patchwork-Id: 11636753 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id B724E912 for ; Wed, 1 Jul 2020 15:54:54 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 9475A207BB for ; Wed, 1 Jul 2020 15:54:54 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593618894; bh=7m6c4SrS2KinsfNszEwMXhKwT9h0CeYe2V5GdlXEOMw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=LtqvZv9zAHLMflp5Ja1RhevsD1HQIuxsXAdatfn/xy0upwf14uSXO0VLeJzDrwBeQ p1jBHzmCVpuOcH3m3dKTHsHuKlaT3VaH+mRX3lHf2pVx7Emkc3NmgGcXZB/dWs62YQ yxucANj4qT8fnjg/tmA0G3p1acu8gZJdA+SpCoKc= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732255AbgGAPyx (ORCPT ); Wed, 1 Jul 2020 11:54:53 -0400 Received: from mail.kernel.org ([198.145.29.99]:36908 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732247AbgGAPyu (ORCPT ); Wed, 1 Jul 2020 11:54:50 -0400 Received: from tleilax.com (68-20-15-154.lightspeed.rlghnc.sbcglobal.net [68.20.15.154]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 6BFA22083E; Wed, 1 Jul 2020 15:54:49 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593618889; bh=7m6c4SrS2KinsfNszEwMXhKwT9h0CeYe2V5GdlXEOMw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=To/eiMAvM933/pGM7MS7o+hSd+JUI8n+RPpYx0fyDgxzZznmVtmFCPdZloPcB9Ag1 T1gsw5pJHulcZrAJnXBXrz2KP/5V85sxHM/29qX37C/OfFA9bEkwC+OiXWdhvrDk6g 65cGW0N+sfUr9rc+v1L7MPT3DbvSJ5lJIXnD04Ao= From: Jeff Layton To: ceph-devel@vger.kernel.org Cc: idryomov@gmail.com Subject: [PATCH 4/4] libceph/ceph: move ceph_osdc_new_request into ceph.ko Date: Wed, 1 Jul 2020 11:54:46 -0400 Message-Id: <20200701155446.41141-5-jlayton@kernel.org> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20200701155446.41141-1-jlayton@kernel.org> References: <20200701155446.41141-1-jlayton@kernel.org> MIME-Version: 1.0 Sender: ceph-devel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: ceph-devel@vger.kernel.org ceph_osdc_new_request is cephfs specific. Move it and calc_layout into ceph.ko. Also, calc_layout can be void return. Signed-off-by: Jeff Layton --- fs/ceph/Makefile | 2 +- fs/ceph/addr.c | 1 + fs/ceph/file.c | 1 + fs/ceph/osdc.c | 113 +++++++++++++++++++++++++++++++ fs/ceph/osdc.h | 16 +++++ include/linux/ceph/osd_client.h | 10 --- net/ceph/osd_client.c | 115 -------------------------------- 7 files changed, 132 insertions(+), 126 deletions(-) create mode 100644 fs/ceph/osdc.c create mode 100644 fs/ceph/osdc.h diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile index 50c635dc7f71..f2ec52fa8d37 100644 --- a/fs/ceph/Makefile +++ b/fs/ceph/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_CEPH_FS) += ceph.o ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \ export.o caps.o snap.o xattr.o quota.o io.o \ mds_client.o mdsmap.o strings.o ceph_frag.o \ - debugfs.o util.o metric.o + debugfs.o util.o metric.o osdc.o ceph-$(CONFIG_CEPH_FSCACHE) += cache.o ceph-$(CONFIG_CEPH_FS_POSIX_ACL) += acl.o diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 01ad09733ac7..1a3cc1875a31 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -19,6 +19,7 @@ #include "metric.h" #include #include +#include "osdc.h" /* * Ceph address space ops. diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 160644ddaeed..b697a1f3c56e 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -18,6 +18,7 @@ #include "cache.h" #include "io.h" #include "metric.h" +#include "osdc.h" static __le32 ceph_flags_sys2wire(u32 flags) { diff --git a/fs/ceph/osdc.c b/fs/ceph/osdc.c new file mode 100644 index 000000000000..303e39fce3d4 --- /dev/null +++ b/fs/ceph/osdc.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include "osdc.h" + +/* + * calculate the mapping of a file extent onto an object, and fill out the + * request accordingly. shorten extent as necessary if it crosses an + * object boundary. + * + * fill osd op in request message. + */ +static void calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, + u64 *objnum, u64 *objoff, u64 *objlen) +{ + u64 orig_len = *plen; + u32 xlen; + + /* object extent? */ + ceph_calc_file_object_mapping(layout, off, orig_len, objnum, + objoff, &xlen); + *objlen = xlen; + if (*objlen < orig_len) { + *plen = *objlen; + dout(" skipping last %llu, final file extent %llu~%llu\n", + orig_len - *plen, off, *plen); + } + + dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); +} + +/* + * build new request AND message, calculate layout, and adjust file + * extent as needed. + * + * if the file was recently truncated, we include information about its + * old and new size so that the object can be updated appropriately. (we + * avoid synchronously deleting truncated objects because it's slow.) + */ +struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, + struct ceph_file_layout *layout, + struct ceph_vino vino, + u64 off, u64 *plen, + unsigned int which, int num_ops, + int opcode, int flags, + struct ceph_snap_context *snapc, + u32 truncate_seq, + u64 truncate_size, + bool use_mempool) +{ + struct ceph_osd_request *req; + u64 objnum = 0; + u64 objoff = 0; + u64 objlen = 0; + int r; + + BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && + opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE && + opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE); + + req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, + GFP_NOFS); + if (!req) + return ERR_PTR(-ENOMEM); + + /* calculate max write size */ + calc_layout(layout, off, plen, &objnum, &objoff, &objlen); + + if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) { + osd_req_op_init(req, which, opcode, 0); + } else { + u32 object_size = layout->object_size; + u32 object_base = off - objoff; + if (!(truncate_seq == 1 && truncate_size == -1ULL)) { + if (truncate_size <= object_base) { + truncate_size = 0; + } else { + truncate_size -= object_base; + if (truncate_size > object_size) + truncate_size = object_size; + } + } + osd_req_op_extent_init(req, which, opcode, objoff, objlen, + truncate_size, truncate_seq); + } + + req->r_base_oloc.pool = layout->pool_id; + req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns); + req->r_flags = flags | osdc->client->options->read_from_replica; + ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum); + + req->r_snapid = vino.snap; + if (flags & CEPH_OSD_FLAG_WRITE) + req->r_data_offset = off; + + if (num_ops > 1) + /* + * This is a special case for ceph_writepages_start(), but it + * also covers ceph_uninline_data(). If more multi-op request + * use cases emerge, we will need a separate helper. + */ + r = ceph_osdc_alloc_num_messages(req, GFP_NOFS, num_ops, 0); + else + r = ceph_osdc_alloc_messages(req, GFP_NOFS); + if (r) { + ceph_osdc_put_request(req); + return ERR_PTR(r); + } + + return req; +} diff --git a/fs/ceph/osdc.h b/fs/ceph/osdc.h new file mode 100644 index 000000000000..b03e5f9ef733 --- /dev/null +++ b/fs/ceph/osdc.h @@ -0,0 +1,16 @@ +#ifndef _FS_CEPH_OSDC_H +#define _FS_CEPH_OSDC_H + +#include + +struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, + struct ceph_file_layout *layout, + struct ceph_vino vino, + u64 off, u64 *plen, + unsigned int which, int num_ops, + int opcode, int flags, + struct ceph_snap_context *snapc, + u32 truncate_seq, + u64 truncate_size, + bool use_mempool); +#endif /* _FS_CEPH_OSDC_H */ diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 71b7610c3a3c..f59eb192c472 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -486,16 +486,6 @@ int ceph_osdc_alloc_num_messages(struct ceph_osd_request *req, gfp_t gfp, int num_reply_data_items); int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp); -extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, - struct ceph_file_layout *layout, - struct ceph_vino vino, - u64 offset, u64 *len, - unsigned int which, int num_ops, - int opcode, int flags, - struct ceph_snap_context *snapc, - u32 truncate_seq, u64 truncate_size, - bool use_mempool); - extern void ceph_osdc_get_request(struct ceph_osd_request *req); extern void ceph_osdc_put_request(struct ceph_osd_request *req); void ceph_osdc_init_request(struct ceph_osd_request *req, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 7be78fa6e2c3..5e54971bb7b2 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -93,33 +93,6 @@ static inline void verify_osd_locked(struct ceph_osd *osd) { } static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { } #endif -/* - * calculate the mapping of a file extent onto an object, and fill out the - * request accordingly. shorten extent as necessary if it crosses an - * object boundary. - * - * fill osd op in request message. - */ -static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, - u64 *objnum, u64 *objoff, u64 *objlen) -{ - u64 orig_len = *plen; - u32 xlen; - - /* object extent? */ - ceph_calc_file_object_mapping(layout, off, orig_len, objnum, - objoff, &xlen); - *objlen = xlen; - if (*objlen < orig_len) { - *plen = *objlen; - dout(" skipping last %llu, final file extent %llu~%llu\n", - orig_len - *plen, off, *plen); - } - - dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); - return 0; -} - static void ceph_osd_data_init(struct ceph_osd_data *osd_data) { memset(osd_data, 0, sizeof (*osd_data)); @@ -1056,94 +1029,6 @@ static u32 osd_req_encode_op(struct ceph_osd_op *dst, return src->indata_len; } -/* - * build new request AND message, calculate layout, and adjust file - * extent as needed. - * - * if the file was recently truncated, we include information about its - * old and new size so that the object can be updated appropriately. (we - * avoid synchronously deleting truncated objects because it's slow.) - */ -struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, - struct ceph_file_layout *layout, - struct ceph_vino vino, - u64 off, u64 *plen, - unsigned int which, int num_ops, - int opcode, int flags, - struct ceph_snap_context *snapc, - u32 truncate_seq, - u64 truncate_size, - bool use_mempool) -{ - struct ceph_osd_request *req; - u64 objnum = 0; - u64 objoff = 0; - u64 objlen = 0; - int r; - - BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && - opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE && - opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE); - - req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, - GFP_NOFS); - if (!req) { - r = -ENOMEM; - goto fail; - } - - /* calculate max write size */ - r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); - if (r) - goto fail; - - if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) { - osd_req_op_init(req, which, opcode, 0); - } else { - u32 object_size = layout->object_size; - u32 object_base = off - objoff; - if (!(truncate_seq == 1 && truncate_size == -1ULL)) { - if (truncate_size <= object_base) { - truncate_size = 0; - } else { - truncate_size -= object_base; - if (truncate_size > object_size) - truncate_size = object_size; - } - } - osd_req_op_extent_init(req, which, opcode, objoff, objlen, - truncate_size, truncate_seq); - } - - req->r_base_oloc.pool = layout->pool_id; - req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns); - ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum); - req->r_flags = flags | osdc->client->options->read_from_replica; - - req->r_snapid = vino.snap; - if (flags & CEPH_OSD_FLAG_WRITE) - req->r_data_offset = off; - - if (num_ops > 1) - /* - * This is a special case for ceph_writepages_start(), but it - * also covers ceph_uninline_data(). If more multi-op request - * use cases emerge, we will need a separate helper. - */ - r = ceph_osdc_alloc_num_messages(req, GFP_NOFS, num_ops, 0); - else - r = ceph_osdc_alloc_messages(req, GFP_NOFS); - if (r) - goto fail; - - return req; - -fail: - ceph_osdc_put_request(req); - return ERR_PTR(r); -} -EXPORT_SYMBOL(ceph_osdc_new_request); - /* * We keep osd requests in an rbtree, sorted by ->r_tid. */