From patchwork Wed Jul 13 19:08:20 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12917025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 0B8F8C43334 for ; Wed, 13 Jul 2022 19:08:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230481AbiGMTIb (ORCPT ); Wed, 13 Jul 2022 15:08:31 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:33874 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230414AbiGMTIa (ORCPT ); Wed, 13 Jul 2022 15:08:30 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 774DD2CDEA for ; Wed, 13 Jul 2022 12:08:29 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 0776961DC6 for ; Wed, 13 Jul 2022 19:08:29 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 1DAE0C3411E; Wed, 13 Jul 2022 19:08:28 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657739308; bh=OH9h/JTS87YVQ2CrWGISefPug2oRlihUAgfsHSj3hOM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=mHtqT6gvMMyqsLwxITvxgbOpSoiG/H6QQ1y4dzrPltiP9BFFxdG2lBvFsZaVkNoLh +EfjQYayG69C/74Txu7a6oxkDVcDvnuYXBCWomOrC4VYxbNYer/DaRyn07Zs578+sH zgRLV94f8mFOfOo2zsGIiVlVbjPRdIBgOp3JCeFCzJMJ+lnhUUZiOzpuP68bVZGyWZ orJh9tjVuEVJ1Amsut/9wVnxTssGEpq2lN/a//NoQ0nzl9R5eistlcbF5AKSXGm0nv jme4n+5DKZ5AgtUBfUWYG2upOzUVLYvFlqkGk/T7+S/AxzdzKCV0zSAikZKdjYhStV 3lxWXomEl959g== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v2 1/6] SUNRPC: Introduce xdr_stream_move_subsegment() Date: Wed, 13 Jul 2022 15:08:20 -0400 Message-Id: <20220713190825.615678-2-anna@kernel.org> X-Mailer: git-send-email 2.37.0 In-Reply-To: <20220713190825.615678-1-anna@kernel.org> References: <20220713190825.615678-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker I do this by creating an xdr subsegment for the range we will be operating over. This lets me shift data to the correct place without potentially overwriting anything already there. Signed-off-by: Anna Schumaker --- include/linux/sunrpc/xdr.h | 2 ++ net/sunrpc/xdr.c | 59 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 5860f32e3958..7dcc6c31fe29 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -262,6 +262,8 @@ extern unsigned int xdr_align_data(struct xdr_stream *, unsigned int offset, uns extern unsigned int xdr_expand_hole(struct xdr_stream *, unsigned int offset, unsigned int length); extern bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, unsigned int len); +extern unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset, + unsigned int target, unsigned int length); /** * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 5d2b3e6979fb..8ba11a754297 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -775,6 +775,34 @@ static void xdr_buf_pages_shift_left(const struct xdr_buf *buf, xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift); } +static void xdr_buf_head_shift_left(const struct xdr_buf *buf, + unsigned int base, unsigned int len, + unsigned int shift) +{ + const struct kvec *head = buf->head; + unsigned int bytes; + + if (!shift || !len) + return; + + if (shift > base) { + bytes = (shift - base); + if (bytes >= len) + return; + base += bytes; + len -= bytes; + } + + if (base < head->iov_len) { + bytes = min_t(unsigned int, len, head->iov_len - base); + memmove(head->iov_base + (base - shift), + head->iov_base + base, bytes); + base += bytes; + len -= bytes; + } + xdr_buf_pages_shift_left(buf, base - head->iov_len, len, shift); +} + /** * xdr_shrink_bufhead * @buf: xdr_buf @@ -1680,6 +1708,37 @@ bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, } EXPORT_SYMBOL_GPL(xdr_stream_subsegment); +/** + * xdr_stream_move_subsegment - Move part of a stream to another position + * @xdr: the source xdr_stream + * @offset: the source offset of the segment + * @target: the target offset of the segment + * @length: the number of bytes to move + * + * Moves @length bytes from @offset to @target in the xdr_stream, overwriting + * anything in its space. Returns the number of bytes in the segment. + */ +unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset, + unsigned int target, unsigned int length) +{ + struct xdr_buf buf; + unsigned int shift; + + if (offset < target) { + shift = target - offset; + if (xdr_buf_subsegment(xdr->buf, &buf, offset, shift + length) < 0) + return 0; + xdr_buf_head_shift_right(&buf, 0, length, shift); + } else if (offset > target) { + shift = offset - target; + if (xdr_buf_subsegment(xdr->buf, &buf, target, shift + length) < 0) + return 0; + xdr_buf_head_shift_left(&buf, shift, length, shift); + } + return length; +} +EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment); + /** * xdr_buf_trim - lop at most "len" bytes off the end of "buf" * @buf: buf to be trimmed From patchwork Wed Jul 13 19:08:21 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12917027 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2887EC433EF for ; Wed, 13 Jul 2022 19:08:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S236669AbiGMTIg (ORCPT ); Wed, 13 Jul 2022 15:08:36 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:34178 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235383AbiGMTIf (ORCPT ); Wed, 13 Jul 2022 15:08:35 -0400 Received: from ams.source.kernel.org (ams.source.kernel.org [IPv6:2604:1380:4601:e00::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7E2521AD for ; Wed, 13 Jul 2022 12:08:31 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id 2BBD5B82117 for ; Wed, 13 Jul 2022 19:08:30 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id A0317C341C8; Wed, 13 Jul 2022 19:08:28 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657739308; bh=Y0mRQoJ9Q41fz09H4UBzVJNl5xtFsMy+j0AyoGrf07A=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=LzPo6Keuu0Sc5TTGH+ZVwV+v/x98DriETA0CbOqWXcwFPasAmMLmEh8D7XoIhccST RzDNZdLVMp2ewx3zBjjA+Q1n9NHpxMEAy4AYNoIsAuCVXxp7Oq9cRvHiwsUqrCRC+h hv9AaqV7UXDseKNDWkoIFqPCDEVvcFENJU9SS9luACoE/Lb8n2OVUcrbRbWf0iS+0J zF4BJMNTtF3ZcyqAQrkpuqg5L+eEgM06MBDm4P/OromuNXDqxJUbcOFmyOo2ksfzs+ kOQMJDPpKvc3TDONY2gxcrzrZjVh7XxO1Kvfz7R5xjAJR/A9TJKhff3edr49oBo+2O RV96cdfvaUbNw== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v2 2/6] SUNRPC: Introduce xdr_encode_double() Date: Wed, 13 Jul 2022 15:08:21 -0400 Message-Id: <20220713190825.615678-3-anna@kernel.org> X-Mailer: git-send-email 2.37.0 In-Reply-To: <20220713190825.615678-1-anna@kernel.org> References: <20220713190825.615678-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker This is similar to xdr_encode_word(), but instead lets us encode a 64-bit wide value into the xdr_buf at the given offset. Signed-off-by: Anna Schumaker --- include/linux/sunrpc/xdr.h | 1 + net/sunrpc/xdr.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 7dcc6c31fe29..e26047d474b2 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -196,6 +196,7 @@ extern int read_bytes_from_xdr_buf(const struct xdr_buf *, unsigned int, void *, extern int write_bytes_to_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); extern int xdr_encode_word(const struct xdr_buf *, unsigned int, u32); +extern int xdr_encode_double(const struct xdr_buf *, unsigned int, u64); extern int xdr_decode_word(const struct xdr_buf *, unsigned int, u32 *); struct xdr_array2_desc; diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 8ba11a754297..63d9cdc989da 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1865,6 +1865,14 @@ int xdr_encode_word(const struct xdr_buf *buf, unsigned int base, u32 obj) } EXPORT_SYMBOL_GPL(xdr_encode_word); +int xdr_encode_double(const struct xdr_buf *buf, unsigned int base, u64 obj) +{ + __be64 raw = cpu_to_be64(obj); + + return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); +} +EXPORT_SYMBOL_GPL(xdr_encode_double); + /* Returns 0 on success, or else a negative error code. */ static int xdr_xcode_array2(const struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc, int encode) From patchwork Wed Jul 13 19:08:22 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12917028 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id CF2B4C43334 for ; Wed, 13 Jul 2022 19:08:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235383AbiGMTIg (ORCPT ); Wed, 13 Jul 2022 15:08:36 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:34180 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S236502AbiGMTIf (ORCPT ); Wed, 13 Jul 2022 15:08:35 -0400 Received: from ams.source.kernel.org (ams.source.kernel.org [IPv6:2604:1380:4601:e00::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EB4D62BF for ; Wed, 13 Jul 2022 12:08:31 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id B00BFB82130 for ; Wed, 13 Jul 2022 19:08:30 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 2C304C341C6; Wed, 13 Jul 2022 19:08:29 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657739309; bh=z6eXA65BD09Jyev3T8fHrTGmJnWKOLPXSDRw9hwPTyU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=c0JoZKQSo8uhlvcRvMlTS7cvzOqpQYkpvumnvhQPsjkOzUdjSgG0aYmRglXLJmnqr IiMc7XziWtcPBuJl3gqhoOsofQxm4aETwlIT7RA5QnXr1I8ctxSrrPYVr4OnXwFTdu Tncp49BxJDG7T8zyKtp+njv9PZOsY7Xa7xoMfT7Jo2aVvLxIVfJDdjtulI1S6tyc+s +K6NQakJsM4hcIAmGxzWW4SUYRzqke8zE3D0eB28XmVvdjei4DshBOVCfgvBnu1Ohk pMR9n8GfKQ9uFCz7Q+THvpI9+qtLhJT6BuPFLdwh/NS0rrqjh8bUX3yfYEp3ObTnEj OONYTBgaZ727Q== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v2 3/6] SUNRPC: Introduce xdr_buf_trim_head() Date: Wed, 13 Jul 2022 15:08:22 -0400 Message-Id: <20220713190825.615678-4-anna@kernel.org> X-Mailer: git-send-email 2.37.0 In-Reply-To: <20220713190825.615678-1-anna@kernel.org> References: <20220713190825.615678-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker The READ_PLUS operation uses a 32-bit length field for encoding a DATA segment, but 64-bit length field for encoding a HOLE segment. When setting up our reply buffer, we need to reserve enough space to encode a HOLE before reading the file data and use this function if the first segment turns out to be DATA. Signed-off-by: Anna Schumaker Reported-by: kernel test robot --- include/linux/sunrpc/xdr.h | 1 + net/sunrpc/xdr.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index e26047d474b2..bdaf048edde0 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -191,6 +191,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(const struct kvec *, struct xdr_buf *); extern int xdr_buf_subsegment(const struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); +extern void xdr_buf_trim_head(struct xdr_buf *, unsigned int); extern void xdr_buf_trim(struct xdr_buf *, unsigned int); extern int read_bytes_from_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 63d9cdc989da..37956a274f81 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1739,6 +1739,23 @@ unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int off } EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment); +/** + * xdr_buf_trim_head - lop at most "len" bytes off the end of "buf"->head + * @buf: buf to be trimmed + * @len: number of bytes to reduce "buf"->head by + * + * Trim an xdr_buf->head by the given number of bytes by fixing up the lengths. + * Note that it's possible that we'll trim less than that amount if the + * xdr_buf->head is too small. + */ +void xdr_buf_trim_head(struct xdr_buf *buf, unsigned int len) +{ + size_t trim = min_t(size_t, buf->head[0].iov_len, len); + buf->head[0].iov_len -= trim; + buf->len -= trim; +} +EXPORT_SYMBOL_GPL(xdr_buf_trim_head); + /** * xdr_buf_trim - lop at most "len" bytes off the end of "buf" * @buf: buf to be trimmed From patchwork Wed Jul 13 19:08:23 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12917031 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 92F5BC43334 for ; Wed, 13 Jul 2022 19:08:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S236659AbiGMTIj (ORCPT ); Wed, 13 Jul 2022 15:08:39 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:34182 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S236627AbiGMTIf (ORCPT ); Wed, 13 Jul 2022 15:08:35 -0400 Received: from ams.source.kernel.org (ams.source.kernel.org [IPv6:2604:1380:4601:e00::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 9C751334 for ; Wed, 13 Jul 2022 12:08:32 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id 4C997B82131 for ; Wed, 13 Jul 2022 19:08:31 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id AD11BC341C0; Wed, 13 Jul 2022 19:08:29 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657739310; bh=06tbDAIV50KkX1Rev4Q+dwEAZwyYL5nx50/XVjO3+DI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=jV1WIvTnRf6N8dmGBqVdvItUps/vQB5sNtT44Z9FxIOu4TfTPKS93oQt4/7dleHnP E58emDZYt0MB2FmzbqAvra7IkqiI3EZqQ5vHsbTUDc8+WgYQDMiorzi+EIRhcmajfM f18Q6CjnWwZg/MRDKR5akQsqeqsHsr0ctUgBl8DXEoE7FlLwbCC5hn4gH9HSGa8X/P nUOrFohS4JLOGVGJwx1FmzG6ETuruTHb2j2FUCA3+Vr/hM6ShFD6Akx0VecOhWhGCQ ZtzvGu52G2nDoaCzwrxmzoSIp8GGyO+anAbTPeNEuqoxgIYAzMqaaOibN27/IycJ8N pYQZrdYjwT76g== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v2 4/6] SUNRPC: Introduce xdr_buf_nth_page_address() Date: Wed, 13 Jul 2022 15:08:23 -0400 Message-Id: <20220713190825.615678-5-anna@kernel.org> X-Mailer: git-send-email 2.37.0 In-Reply-To: <20220713190825.615678-1-anna@kernel.org> References: <20220713190825.615678-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker For getting a pointer to the memory address represented by the nth page, along with the length of the data on that page. Signed-off-by: Anna Schumaker --- include/linux/sunrpc/xdr.h | 2 ++ net/sunrpc/xdr.c | 17 +++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index bdaf048edde0..79824fea4529 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -137,6 +137,8 @@ void xdr_inline_pages(struct xdr_buf *, unsigned int, struct page **, unsigned int, unsigned int); void xdr_terminate_string(const struct xdr_buf *, const u32); size_t xdr_buf_pagecount(const struct xdr_buf *buf); +char *xdr_buf_nth_page_address(const struct xdr_buf *buf, unsigned int n, + unsigned int *len); int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp); void xdr_free_bvec(struct xdr_buf *buf); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 37956a274f81..88b28656a05d 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -140,6 +140,23 @@ size_t xdr_buf_pagecount(const struct xdr_buf *buf) return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT; } +char *xdr_buf_nth_page_address(const struct xdr_buf *buf, unsigned int n, + unsigned int *len) +{ + unsigned int pgbase = buf->page_base + (n * PAGE_SIZE); + struct page **pages = buf->pages; + struct page **page; + + if (n >= xdr_buf_pagecount(buf)) + return NULL; + + page = pages + (pgbase >> PAGE_SHIFT); + pgbase &= ~PAGE_MASK; + *len = min_t(size_t, PAGE_SIZE, buf->page_len - (n * PAGE_SIZE)); + return page_address(*page) + pgbase; +} +EXPORT_SYMBOL_GPL(xdr_buf_nth_page_address); + int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp) { From patchwork Wed Jul 13 19:08:24 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12917029 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D6647CCA47C for ; Wed, 13 Jul 2022 19:08:39 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S236502AbiGMTIi (ORCPT ); Wed, 13 Jul 2022 15:08:38 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:34186 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S236646AbiGMTIf (ORCPT ); Wed, 13 Jul 2022 15:08:35 -0400 Received: from ams.source.kernel.org (ams.source.kernel.org [145.40.68.75]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3290FB5B for ; Wed, 13 Jul 2022 12:08:33 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id D0FBAB82120 for ; Wed, 13 Jul 2022 19:08:31 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3965FC3411E; Wed, 13 Jul 2022 19:08:30 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657739310; bh=6+eHd8H9jHMNlhID7997tVEizQaw08h5kwQLixpxpYE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=QzIsME98G7Xb4it3DslA3TOrsfuSWSJTglaKFFXsoBvDvodtH0Y0yD9kxcFlzVhV9 2K76KWS+nAw8kI0kkeEA2DpqEYQFjn5Ia39LprinHswzySeS7AURLzJFiBe3hmHIAJ 5Vqbgq0KJ4RzJXAZP9qIYhMwijo7Uy1EUjP2UmvLdNvYBqyHp3WKudw7d2hYH2c6Nt WBYX7l4mDw/snPSMa4DXWtJ+ntQY04hltXGGNhYI5g9EtvVj4cge61PD7gl+yZsoYI gfo2Pr4QTrTZq6Ww+BbcrOFotDVgZhq4VQ6cPCqxOSfbvnYilcSKKZnTUcgGL0tSjb l+PKz/ZNAWB9g== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v2 5/6] SUNRPC: Export xdr_buf_pagecount() Date: Wed, 13 Jul 2022 15:08:24 -0400 Message-Id: <20220713190825.615678-6-anna@kernel.org> X-Mailer: git-send-email 2.37.0 In-Reply-To: <20220713190825.615678-1-anna@kernel.org> References: <20220713190825.615678-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker The NFS server will need this for iterating over pages in a READ_PLUS reply Signed-off-by: Anna Schumaker --- net/sunrpc/xdr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 88b28656a05d..ea734b14af0f 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -139,6 +139,7 @@ size_t xdr_buf_pagecount(const struct xdr_buf *buf) return 0; return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT; } +EXPORT_SYMBOL_GPL(xdr_buf_pagecount); char *xdr_buf_nth_page_address(const struct xdr_buf *buf, unsigned int n, unsigned int *len) From patchwork Wed Jul 13 19:08:25 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12917030 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1B0FDCCA479 for ; Wed, 13 Jul 2022 19:08:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S236646AbiGMTIi (ORCPT ); Wed, 13 Jul 2022 15:08:38 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:34184 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S236659AbiGMTIf (ORCPT ); Wed, 13 Jul 2022 15:08:35 -0400 Received: from ams.source.kernel.org (ams.source.kernel.org [IPv6:2604:1380:4601:e00::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id CB437B1C for ; Wed, 13 Jul 2022 12:08:32 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id 62BFFB82132 for ; Wed, 13 Jul 2022 19:08:31 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id BB61CC341C8; Wed, 13 Jul 2022 19:08:30 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657739311; bh=YE3y5f6dXQaHq2j8y9lblwal+x5uZiaZu3WZRmNr22M=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ttiApPEXduIxjVNcDXOGA/UKJTtgIGDcStihlGWrPhWkVA2n7CEcOSUdPuzTZ6KpQ AtvKlildcXqE7y6/Lif3c6ZPSU/iK6hNcMBXdDZJCIuZHE4iCaaSCx+olCeOFzcgIL 2cY07QvzlkvaukDnrY2nRdOmuymKArVH8qmQFaHfFWIVwKX2AuP5I+H5KlwRoL9BD1 Lu9b5NeAWszFjDEEfboCBve19l7gXyP2vwsfBG4H0lOwt4KW0c1Wm1TEDzYsrkWc3S HBmT9H15cFeIhem1dEklLJluLIik8YffpVMUrpBp49WfNVnQxUXNH4pYXHJWI8aupx 2yCmbCQmb9KbQ== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v2 6/6] NFSD: Repeal and replace the READ_PLUS implementation Date: Wed, 13 Jul 2022 15:08:25 -0400 Message-Id: <20220713190825.615678-7-anna@kernel.org> X-Mailer: git-send-email 2.37.0 In-Reply-To: <20220713190825.615678-1-anna@kernel.org> References: <20220713190825.615678-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker Rather than relying on the underlying filesystem to tell us where hole and data segments are through vfs_llseek(), let's instead do the hole compression ourselves. This has a few advantages over the old implementation: 1) A single call to the underlying filesystem through nfsd_readv() means the file can't change from underneath us in the middle of encoding. 2) A single call to the underlying filestem also means that the underlying filesystem only needs to synchronize cached and on-disk data one time instead of potentially many speeding up the reply. 3) Hole support for filesystems that don't support SEEK_HOLE and SEEK_DATA I also included an optimization where we can cut down on the amount of memory being shifed around by doing the compression as (hole, data) pairs. This patch not only fixes xfstests generic/091 and generic/263 for me but the "-g quick" group tests also finish about a minute faster. Signed-off-by: Anna Schumaker --- fs/nfsd/nfs4xdr.c | 202 +++++++++++++++++++++++----------------------- 1 file changed, 102 insertions(+), 100 deletions(-) diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 61b2aae81abb..0e1e7a37d4e0 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -4731,81 +4731,121 @@ nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr, return nfserr; } +struct read_plus_segment { + enum data_content4 type; + unsigned long offset; + unsigned long length; + unsigned int page_pos; +}; + static __be32 -nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp, - struct nfsd4_read *read, - unsigned long *maxcount, u32 *eof, - loff_t *pos) +nfsd4_read_plus_readv(struct nfsd4_compoundres *resp, struct nfsd4_read *read, + unsigned long *maxcount, u32 *eof) { struct xdr_stream *xdr = resp->xdr; - struct file *file = read->rd_nf->nf_file; - int starting_len = xdr->buf->len; - loff_t hole_pos; - __be32 nfserr; - __be32 *p, tmp; - __be64 tmp64; - - hole_pos = pos ? *pos : vfs_llseek(file, read->rd_offset, SEEK_HOLE); - if (hole_pos > read->rd_offset) - *maxcount = min_t(unsigned long, *maxcount, hole_pos - read->rd_offset); - *maxcount = min_t(unsigned long, *maxcount, (xdr->buf->buflen - xdr->buf->len)); - - /* Content type, offset, byte count */ - p = xdr_reserve_space(xdr, 4 + 8 + 4); - if (!p) - return nfserr_resource; + unsigned int starting_len = xdr->buf->len; + __be32 nfserr, zero = xdr_zero; + int pad; + /* xdr_reserve_space_vec() switches us to the xdr->pages */ read->rd_vlen = xdr_reserve_space_vec(xdr, resp->rqstp->rq_vec, *maxcount); if (read->rd_vlen < 0) return nfserr_resource; - nfserr = nfsd_readv(resp->rqstp, read->rd_fhp, file, read->rd_offset, - resp->rqstp->rq_vec, read->rd_vlen, maxcount, eof); + nfserr = nfsd_readv(resp->rqstp, read->rd_fhp, read->rd_nf->nf_file, + read->rd_offset, resp->rqstp->rq_vec, read->rd_vlen, + maxcount, eof); if (nfserr) return nfserr; - xdr_truncate_encode(xdr, starting_len + 16 + xdr_align_size(*maxcount)); + xdr_truncate_encode(xdr, starting_len + xdr_align_size(*maxcount)); - tmp = htonl(NFS4_CONTENT_DATA); - write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4); - tmp64 = cpu_to_be64(read->rd_offset); - write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp64, 8); - tmp = htonl(*maxcount); - write_bytes_to_xdr_buf(xdr->buf, starting_len + 12, &tmp, 4); - - tmp = xdr_zero; - write_bytes_to_xdr_buf(xdr->buf, starting_len + 16 + *maxcount, &tmp, - xdr_pad_size(*maxcount)); + pad = (*maxcount&3) ? 4 - (*maxcount&3) : 0; + write_bytes_to_xdr_buf(xdr->buf, starting_len + *maxcount, &zero, pad); return nfs_ok; } +static void +nfsd4_encode_read_plus_segment(struct xdr_stream *xdr, + struct read_plus_segment *segment, + unsigned int *bufpos, unsigned int *segments) +{ + struct xdr_buf *buf = xdr->buf; + + xdr_encode_word(buf, *bufpos, segment->type); + xdr_encode_double(buf, *bufpos + 4, segment->offset); + + if (segment->type == NFS4_CONTENT_HOLE) { + xdr_encode_double(buf, *bufpos + 12, segment->length); + *bufpos += 4 + 8 + 8; + } else { + size_t align = xdr_align_size(segment->length); + xdr_encode_word(buf, *bufpos + 12, segment->length); + if (*segments == 0) + xdr_buf_trim_head(buf, 4); + + xdr_stream_move_subsegment(xdr, + buf->head[0].iov_len + segment->page_pos, + *bufpos + 16, align); + *bufpos += 4 + 8 + 4 + align; + } + + *segments += 1; +} + static __be32 -nfsd4_encode_read_plus_hole(struct nfsd4_compoundres *resp, - struct nfsd4_read *read, - unsigned long *maxcount, u32 *eof) +nfsd4_encode_read_plus_segments(struct nfsd4_compoundres *resp, + struct nfsd4_read *read, + unsigned int *segments, u32 *eof) { - struct file *file = read->rd_nf->nf_file; - loff_t data_pos = vfs_llseek(file, read->rd_offset, SEEK_DATA); - loff_t f_size = i_size_read(file_inode(file)); - unsigned long count; - __be32 *p; + enum data_content4 pagetype; + struct read_plus_segment segment; + struct xdr_stream *xdr = resp->xdr; + unsigned long offset = read->rd_offset; + unsigned int bufpos = xdr->buf->len; + unsigned long maxcount; + unsigned int pagelen, i = 0; + char *vpage, *p; + __be32 nfserr; - if (data_pos == -ENXIO) - data_pos = f_size; - else if (data_pos <= read->rd_offset || (data_pos < f_size && data_pos % PAGE_SIZE)) - return nfsd4_encode_read_plus_data(resp, read, maxcount, eof, &f_size); - count = data_pos - read->rd_offset; - - /* Content type, offset, byte count */ - p = xdr_reserve_space(resp->xdr, 4 + 8 + 8); - if (!p) + /* enough space for a HOLE segment before we switch to the pages */ + if (!xdr_reserve_space(xdr, 4 + 8 + 8)) return nfserr_resource; + xdr_commit_encode(xdr); - *p++ = htonl(NFS4_CONTENT_HOLE); - p = xdr_encode_hyper(p, read->rd_offset); - p = xdr_encode_hyper(p, count); + maxcount = min_t(unsigned long, read->rd_length, + (xdr->buf->buflen - xdr->buf->len)); - *eof = (read->rd_offset + count) >= f_size; - *maxcount = min_t(unsigned long, count, *maxcount); + nfserr = nfsd4_read_plus_readv(resp, read, &maxcount, eof); + if (nfserr) + return nfserr; + + while (maxcount > 0) { + vpage = xdr_buf_nth_page_address(xdr->buf, i, &pagelen); + pagelen = min_t(unsigned int, pagelen, maxcount); + if (!vpage || pagelen == 0) + break; + p = memchr_inv(vpage, 0, pagelen); + pagetype = (p == NULL) ? NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA; + + if (pagetype != segment.type || i == 0) { + if (likely(i > 0)) { + nfsd4_encode_read_plus_segment(xdr, &segment, + &bufpos, segments); + offset += segment.length; + } + segment.type = pagetype; + segment.offset = offset; + segment.length = pagelen; + segment.page_pos = i * PAGE_SIZE; + } else + segment.length += pagelen; + + maxcount -= pagelen; + i++; + } + + nfsd4_encode_read_plus_segment(xdr, &segment, &bufpos, segments); + xdr_truncate_encode(xdr, bufpos); return nfs_ok; } @@ -4813,69 +4853,31 @@ static __be32 nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_read *read) { - unsigned long maxcount, count; struct xdr_stream *xdr = resp->xdr; - struct file *file; int starting_len = xdr->buf->len; - int last_segment = xdr->buf->len; - int segments = 0; - __be32 *p, tmp; - bool is_data; - loff_t pos; + unsigned int segments = 0; u32 eof; if (nfserr) return nfserr; - file = read->rd_nf->nf_file; /* eof flag, segment count */ - p = xdr_reserve_space(xdr, 4 + 4); - if (!p) + if (!xdr_reserve_space(xdr, 4 + 4)) return nfserr_resource; xdr_commit_encode(xdr); - maxcount = min_t(unsigned long, read->rd_length, - (xdr->buf->buflen - xdr->buf->len)); - count = maxcount; - - eof = read->rd_offset >= i_size_read(file_inode(file)); + eof = read->rd_offset >= i_size_read(file_inode(read->rd_nf->nf_file)); if (eof) goto out; - pos = vfs_llseek(file, read->rd_offset, SEEK_HOLE); - is_data = pos > read->rd_offset; - - while (count > 0 && !eof) { - maxcount = count; - if (is_data) - nfserr = nfsd4_encode_read_plus_data(resp, read, &maxcount, &eof, - segments == 0 ? &pos : NULL); - else - nfserr = nfsd4_encode_read_plus_hole(resp, read, &maxcount, &eof); - if (nfserr) - goto out; - count -= maxcount; - read->rd_offset += maxcount; - is_data = !is_data; - last_segment = xdr->buf->len; - segments++; - } - + nfserr = nfsd4_encode_read_plus_segments(resp, read, &segments, &eof); out: - if (nfserr && segments == 0) + if (nfserr) xdr_truncate_encode(xdr, starting_len); else { - if (nfserr) { - xdr_truncate_encode(xdr, last_segment); - nfserr = nfs_ok; - eof = 0; - } - tmp = htonl(eof); - write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4); - tmp = htonl(segments); - write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4); + xdr_encode_word(xdr->buf, starting_len, eof); + xdr_encode_word(xdr->buf, starting_len + 4, segments); } - return nfserr; }