From patchwork Fri Jul 15 18:44:28 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12919573 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 439E9C43334 for ; Fri, 15 Jul 2022 18:44:39 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229979AbiGOSoi (ORCPT ); Fri, 15 Jul 2022 14:44:38 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59418 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229481AbiGOSoh (ORCPT ); Fri, 15 Jul 2022 14:44:37 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DE27414D14 for ; Fri, 15 Jul 2022 11:44:36 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 80D7962340 for ; Fri, 15 Jul 2022 18:44:36 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 81983C385A5; Fri, 15 Jul 2022 18:44:35 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657910675; bh=/rdMpVPibNoV+V4UZpxODig15LN1SP1snCf7ckUe3vo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=D2Fc86nxGI1OMxQq1/uM4o5FSpFdeV4+BjyS8pZGefBwLODq/QJkez+K9SipYaFu8 275KybF1SiYz1s3Zz+AsV7Ux+3NcIM9zGaFI3rs6D/FmHbTJb84twtAF440xwHMIS8 0A4p+g6XP6NrY0QiOJp0sAEuOHkLpvJ/HSMjS57S/qPuXC/Mou1u9iIY/oIGQcLkpC VcXhjjoLMj1xsj0w3sTa9QQ/xRj/j/RQgAEf67ZXFW7s4F1Q6+QwqCKzH6ZhYJaIqS /2iU/x1XPb2WasD4rPkZj1Oysq1NAePDWUNzZ8XXkW45074L60EKHkY+D9US5lAIkU FwjaqxhM6SCiQ== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v3 1/6] SUNRPC: Introduce xdr_stream_move_subsegment() Date: Fri, 15 Jul 2022 14:44:28 -0400 Message-Id: <20220715184433.838521-2-anna@kernel.org> X-Mailer: git-send-email 2.37.1 In-Reply-To: <20220715184433.838521-1-anna@kernel.org> References: <20220715184433.838521-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker I do this by creating an xdr subsegment for the range we will be operating over. This lets me shift data to the correct place without potentially overwriting anything already there. Signed-off-by: Anna Schumaker --- include/linux/sunrpc/xdr.h | 2 ++ net/sunrpc/xdr.c | 59 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 5860f32e3958..7dcc6c31fe29 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -262,6 +262,8 @@ extern unsigned int xdr_align_data(struct xdr_stream *, unsigned int offset, uns extern unsigned int xdr_expand_hole(struct xdr_stream *, unsigned int offset, unsigned int length); extern bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, unsigned int len); +extern unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset, + unsigned int target, unsigned int length); /** * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 5d2b3e6979fb..8ba11a754297 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -775,6 +775,34 @@ static void xdr_buf_pages_shift_left(const struct xdr_buf *buf, xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift); } +static void xdr_buf_head_shift_left(const struct xdr_buf *buf, + unsigned int base, unsigned int len, + unsigned int shift) +{ + const struct kvec *head = buf->head; + unsigned int bytes; + + if (!shift || !len) + return; + + if (shift > base) { + bytes = (shift - base); + if (bytes >= len) + return; + base += bytes; + len -= bytes; + } + + if (base < head->iov_len) { + bytes = min_t(unsigned int, len, head->iov_len - base); + memmove(head->iov_base + (base - shift), + head->iov_base + base, bytes); + base += bytes; + len -= bytes; + } + xdr_buf_pages_shift_left(buf, base - head->iov_len, len, shift); +} + /** * xdr_shrink_bufhead * @buf: xdr_buf @@ -1680,6 +1708,37 @@ bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, } EXPORT_SYMBOL_GPL(xdr_stream_subsegment); +/** + * xdr_stream_move_subsegment - Move part of a stream to another position + * @xdr: the source xdr_stream + * @offset: the source offset of the segment + * @target: the target offset of the segment + * @length: the number of bytes to move + * + * Moves @length bytes from @offset to @target in the xdr_stream, overwriting + * anything in its space. Returns the number of bytes in the segment. + */ +unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset, + unsigned int target, unsigned int length) +{ + struct xdr_buf buf; + unsigned int shift; + + if (offset < target) { + shift = target - offset; + if (xdr_buf_subsegment(xdr->buf, &buf, offset, shift + length) < 0) + return 0; + xdr_buf_head_shift_right(&buf, 0, length, shift); + } else if (offset > target) { + shift = offset - target; + if (xdr_buf_subsegment(xdr->buf, &buf, target, shift + length) < 0) + return 0; + xdr_buf_head_shift_left(&buf, shift, length, shift); + } + return length; +} +EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment); + /** * xdr_buf_trim - lop at most "len" bytes off the end of "buf" * @buf: buf to be trimmed From patchwork Fri Jul 15 18:44:29 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12919578 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3AADECCA480 for ; Fri, 15 Jul 2022 18:44:42 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230257AbiGOSol (ORCPT ); Fri, 15 Jul 2022 14:44:41 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59456 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230259AbiGOSok (ORCPT ); Fri, 15 Jul 2022 14:44:40 -0400 Received: from ams.source.kernel.org (ams.source.kernel.org [IPv6:2604:1380:4601:e00::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0CAD115737 for ; Fri, 15 Jul 2022 11:44:39 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id BC863B82B4B for ; Fri, 15 Jul 2022 18:44:37 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 22FCDC36AE2; Fri, 15 Jul 2022 18:44:36 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657910676; bh=DBodrd6rWys7AMnjLnDrDYBas0ZrK+zrjtjrexNdkME=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=JZWyP3nX9ClBkIfq35L0qLbyOVoiJzQSWvCRMT97gNIXUtR7DnNpSeROQahqALAoi oWJVBkIEfA8GQCZK24AtMlKPvEyNQYF6DKdivgOBKDQnVkBP6hyNyDLpFLJ6QQkl0k AJ34xekqnk08qiJ/5ito44NgDzU2ayPw4QPfRLt5PakucaKgD40Pqy0Snxhy4Da022 m71FMlTePwja9yFqtHsiq7zNsae+yYHVvY4KY0qxWs6l7GqtJYKvXyV7AG4J4CnEUF X9beRcz9KcG4zLQ/oYTT7HHCsc+OqDVNuklITjUCF6J6S1sLJV+Q+jegCgFBFnBLOv Oav9FYz9iFZ4A== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v3 2/6] SUNRPC: Introduce xdr_encode_double() Date: Fri, 15 Jul 2022 14:44:29 -0400 Message-Id: <20220715184433.838521-3-anna@kernel.org> X-Mailer: git-send-email 2.37.1 In-Reply-To: <20220715184433.838521-1-anna@kernel.org> References: <20220715184433.838521-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker This is similar to xdr_encode_word(), but instead lets us encode a 64-bit wide value into the xdr_buf at the given offset. Signed-off-by: Anna Schumaker --- include/linux/sunrpc/xdr.h | 1 + net/sunrpc/xdr.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 7dcc6c31fe29..e26047d474b2 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -196,6 +196,7 @@ extern int read_bytes_from_xdr_buf(const struct xdr_buf *, unsigned int, void *, extern int write_bytes_to_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); extern int xdr_encode_word(const struct xdr_buf *, unsigned int, u32); +extern int xdr_encode_double(const struct xdr_buf *, unsigned int, u64); extern int xdr_decode_word(const struct xdr_buf *, unsigned int, u32 *); struct xdr_array2_desc; diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 8ba11a754297..63d9cdc989da 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1865,6 +1865,14 @@ int xdr_encode_word(const struct xdr_buf *buf, unsigned int base, u32 obj) } EXPORT_SYMBOL_GPL(xdr_encode_word); +int xdr_encode_double(const struct xdr_buf *buf, unsigned int base, u64 obj) +{ + __be64 raw = cpu_to_be64(obj); + + return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); +} +EXPORT_SYMBOL_GPL(xdr_encode_double); + /* Returns 0 on success, or else a negative error code. */ static int xdr_xcode_array2(const struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc, int encode) From patchwork Fri Jul 15 18:44:30 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12919577 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8D7D1CCA482 for ; Fri, 15 Jul 2022 18:44:42 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230259AbiGOSol (ORCPT ); Fri, 15 Jul 2022 14:44:41 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59458 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230260AbiGOSok (ORCPT ); Fri, 15 Jul 2022 14:44:40 -0400 Received: from ams.source.kernel.org (ams.source.kernel.org [IPv6:2604:1380:4601:e00::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A6D0415717 for ; Fri, 15 Jul 2022 11:44:39 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id 5CBFFB82DFA for ; Fri, 15 Jul 2022 18:44:38 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id B733EC36AE5; Fri, 15 Jul 2022 18:44:36 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657910677; bh=rbaoRmiuvzEvZAaZp5dqREdyYszBZe65+kkiOf3tSi4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=TM8abieWummkeQeJhs0hprDGcANI6xv5Lyjx+U96PSo7i+b98kj6t7RR2VU4/zyGp Ar8PY6tiBOaNMRfGfTi4mDkG5GBgT1fkuv3AIZQtdWEqooCiG2EmkK5GXiIgapako9 Y7nLA0pYwvwGYxFOFynbG8si+k4e3FM9rIhD0jYGs38JcdmKlMz/+3MYoCFI/j8+9W sQXfphhmYi1hXC/ix/hMDIG9P3+L0JRT+QvpKACJCr/k3GQ2gWilhSg1x5p9lolHar N/hagl9Me+Oxx7X6KO3RCCQNJOZEJ/90k+k1GKxhSJxEfqwEqQyDlQXEFy6LQIv5xr 7RklhNsBn3tdA== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v3 3/6] SUNRPC: Introduce xdr_buf_trim_head() Date: Fri, 15 Jul 2022 14:44:30 -0400 Message-Id: <20220715184433.838521-4-anna@kernel.org> X-Mailer: git-send-email 2.37.1 In-Reply-To: <20220715184433.838521-1-anna@kernel.org> References: <20220715184433.838521-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker The READ_PLUS operation uses a 32-bit length field for encoding a DATA segment, but 64-bit length field for encoding a HOLE segment. When setting up our reply buffer, we need to reserve enough space to encode a HOLE before reading the file data and use this function if the first segment turns out to be DATA. Signed-off-by: Anna Schumaker --- include/linux/sunrpc/xdr.h | 1 + net/sunrpc/xdr.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index e26047d474b2..bdaf048edde0 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -191,6 +191,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(const struct kvec *, struct xdr_buf *); extern int xdr_buf_subsegment(const struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); +extern void xdr_buf_trim_head(struct xdr_buf *, unsigned int); extern void xdr_buf_trim(struct xdr_buf *, unsigned int); extern int read_bytes_from_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(const struct xdr_buf *, unsigned int, void *, unsigned int); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 63d9cdc989da..37956a274f81 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1739,6 +1739,23 @@ unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int off } EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment); +/** + * xdr_buf_trim_head - lop at most "len" bytes off the end of "buf"->head + * @buf: buf to be trimmed + * @len: number of bytes to reduce "buf"->head by + * + * Trim an xdr_buf->head by the given number of bytes by fixing up the lengths. + * Note that it's possible that we'll trim less than that amount if the + * xdr_buf->head is too small. + */ +void xdr_buf_trim_head(struct xdr_buf *buf, unsigned int len) +{ + size_t trim = min_t(size_t, buf->head[0].iov_len, len); + buf->head[0].iov_len -= trim; + buf->len -= trim; +} +EXPORT_SYMBOL_GPL(xdr_buf_trim_head); + /** * xdr_buf_trim - lop at most "len" bytes off the end of "buf" * @buf: buf to be trimmed From patchwork Fri Jul 15 18:44:31 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12919575 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A81FCC43334 for ; Fri, 15 Jul 2022 18:44:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230269AbiGOSok (ORCPT ); Fri, 15 Jul 2022 14:44:40 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59444 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229481AbiGOSok (ORCPT ); Fri, 15 Jul 2022 14:44:40 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id B184314D14 for ; Fri, 15 Jul 2022 11:44:38 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 4C94462344 for ; Fri, 15 Jul 2022 18:44:38 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 5763BC36AE7; Fri, 15 Jul 2022 18:44:37 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657910677; bh=/G/PnfbcZZNw5IcZF3cQ6cOKcMI3B2noDm5Z4XxTY1o=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=rpliADDb0OElJeEjNke3H+0TSD72wQmqsWSo7gEJKMzrTmA21tNctlDzlFeBFVrlb 04pWkI8Xa9eS6KaAG9DVr5VOJCG5LqUsyvuUsk0ez3E+rUVBRAx5/grPsYGnknvc+c GwWAqAq82gZTdaMqbQeaMFSJty50lBqoQOzg3QLtTLY7tudiPo0QQcX/2tm6LJk0O8 EI0MbF4cksXPoaBvg4+ToRtLa1hqgq/uHpDq2ebYYKBFaV4diZH0ds5wVir5mec6N3 rklG12KRL4AjqGnLbI2flcvz4nio5w1G1Rq8p6xxZ/frlWS8rMC2PKoQRUkzc4JOsp C0G+jQYzewf/A== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v3 4/6] SUNRPC: Introduce xdr_buf_nth_page_address() Date: Fri, 15 Jul 2022 14:44:31 -0400 Message-Id: <20220715184433.838521-5-anna@kernel.org> X-Mailer: git-send-email 2.37.1 In-Reply-To: <20220715184433.838521-1-anna@kernel.org> References: <20220715184433.838521-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker For getting a pointer to the memory address represented by the nth page, along with the length of the data on that page. Signed-off-by: Anna Schumaker --- include/linux/sunrpc/xdr.h | 2 ++ net/sunrpc/xdr.c | 17 +++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index bdaf048edde0..79824fea4529 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -137,6 +137,8 @@ void xdr_inline_pages(struct xdr_buf *, unsigned int, struct page **, unsigned int, unsigned int); void xdr_terminate_string(const struct xdr_buf *, const u32); size_t xdr_buf_pagecount(const struct xdr_buf *buf); +char *xdr_buf_nth_page_address(const struct xdr_buf *buf, unsigned int n, + unsigned int *len); int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp); void xdr_free_bvec(struct xdr_buf *buf); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 37956a274f81..88b28656a05d 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -140,6 +140,23 @@ size_t xdr_buf_pagecount(const struct xdr_buf *buf) return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT; } +char *xdr_buf_nth_page_address(const struct xdr_buf *buf, unsigned int n, + unsigned int *len) +{ + unsigned int pgbase = buf->page_base + (n * PAGE_SIZE); + struct page **pages = buf->pages; + struct page **page; + + if (n >= xdr_buf_pagecount(buf)) + return NULL; + + page = pages + (pgbase >> PAGE_SHIFT); + pgbase &= ~PAGE_MASK; + *len = min_t(size_t, PAGE_SIZE, buf->page_len - (n * PAGE_SIZE)); + return page_address(*page) + pgbase; +} +EXPORT_SYMBOL_GPL(xdr_buf_nth_page_address); + int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp) { From patchwork Fri Jul 15 18:44:32 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12919576 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 12B06C433EF for ; Fri, 15 Jul 2022 18:44:42 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230270AbiGOSol (ORCPT ); Fri, 15 Jul 2022 14:44:41 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59454 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230257AbiGOSok (ORCPT ); Fri, 15 Jul 2022 14:44:40 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [139.178.84.217]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E930015719 for ; Fri, 15 Jul 2022 11:44:38 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 82FDE6233C for ; Fri, 15 Jul 2022 18:44:38 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id ECC89C341C0; Fri, 15 Jul 2022 18:44:37 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657910678; bh=242WLO1LU1JqW0v/Oy8P+ERcqsN/jM8Wzy7wu5m9vbE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Pfjf3D5gVMRFfrgySNjrsO2iNDbJqXuwkELs2kdPdpHsOMmtkdtNlqSSpaCNgl8xE WCRE7x99UXbO745Izc7F2agVIEi2oPJGDCoMN/UkXuPAxWiHDUOrMvnTJwA6iB3E2J icuHnTuamb/LYEJq9Ki7vqdaZDWtvyCi1x3pvoZ/AGtZa14WN8Q7J5P4ivdoPP2Smk XpuTlIsy2livoA/3vrO6Euy96iUwON105s7yF1Nna/4ZCUyW1EXeNxwACISzFbBone /6uF1lK7gVA7d950wzExKuXIFC4QY67bRu5kkJx9+si6enRM6Wxdyds0RChW6lnDSl pwT3OVtJc9JSA== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v3 5/6] SUNRPC: Export xdr_buf_pagecount() Date: Fri, 15 Jul 2022 14:44:32 -0400 Message-Id: <20220715184433.838521-6-anna@kernel.org> X-Mailer: git-send-email 2.37.1 In-Reply-To: <20220715184433.838521-1-anna@kernel.org> References: <20220715184433.838521-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker The NFS server will need this for iterating over pages in a READ_PLUS reply Signed-off-by: Anna Schumaker --- net/sunrpc/xdr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 88b28656a05d..ea734b14af0f 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -139,6 +139,7 @@ size_t xdr_buf_pagecount(const struct xdr_buf *buf) return 0; return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT; } +EXPORT_SYMBOL_GPL(xdr_buf_pagecount); char *xdr_buf_nth_page_address(const struct xdr_buf *buf, unsigned int n, unsigned int *len) From patchwork Fri Jul 15 18:44:33 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Anna Schumaker X-Patchwork-Id: 12919579 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id F187DCCA483 for ; Fri, 15 Jul 2022 18:44:42 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230260AbiGOSom (ORCPT ); Fri, 15 Jul 2022 14:44:42 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:59466 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229481AbiGOSol (ORCPT ); Fri, 15 Jul 2022 14:44:41 -0400 Received: from dfw.source.kernel.org (dfw.source.kernel.org [IPv6:2604:1380:4641:c500::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id F3F5715A04 for ; Fri, 15 Jul 2022 11:44:39 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 83F8562343 for ; Fri, 15 Jul 2022 18:44:39 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 8F2E6C385A5; Fri, 15 Jul 2022 18:44:38 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1657910678; bh=PQTM8iMrLoTIgyAyMUjUCQ5Mi+HWXwf/800xJrzq89E=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=oIvl5yiL6VWkxNlJ6WqYIfw13uTX35ETAWIo/3aNbZVAhwTXErtEDV6cOBx0R+C4u P77nEcyvOmgZMke0cSwCZ9cibToMyHhAbmFYuuIZbUzvLzz/cyYQ6PXGwXp3SUSgbf 6w+WPuU+sopu0wuG0qqEW0957MUEshoQWhhbm363/UECBjIJYH2gR0WpYvSglq87u5 4AHArnUfLmtEgXWllB2AIXfgapj8Dig2VT9dZLkiyz0ABgUAYmebZqVSj5OAslAVOo 1s0bOPDzKJCA6Y+j/OqXOQ9ARDnHTVqFvZuvUHsWCULNY3l9F86YGOsq1YCeMHjVh9 EoNrRgUM5RX9g== From: Anna Schumaker To: linux-nfs@vger.kernel.org, chuck.lever@oracle.com Cc: anna@kernel.org Subject: [PATCH v3 6/6] NFSD: Repeal and replace the READ_PLUS implementation Date: Fri, 15 Jul 2022 14:44:33 -0400 Message-Id: <20220715184433.838521-7-anna@kernel.org> X-Mailer: git-send-email 2.37.1 In-Reply-To: <20220715184433.838521-1-anna@kernel.org> References: <20220715184433.838521-1-anna@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org From: Anna Schumaker Rather than relying on the underlying filesystem to tell us where hole and data segments are through vfs_llseek(), let's instead do the hole compression ourselves. This has a few advantages over the old implementation: 1) A single call to the underlying filesystem through nfsd_readv() means the file can't change from underneath us in the middle of encoding. 2) A single call to the underlying filestem also means that the underlying filesystem only needs to synchronize cached and on-disk data one time instead of potentially many speeding up the reply. 3) Hole support for filesystems that don't support SEEK_HOLE and SEEK_DATA I also included an optimization where we can cut down on the amount of memory being shifed around by doing the compression as (hole, data) pairs. Signed-off-by: Anna Schumaker --- fs/nfsd/nfs4xdr.c | 219 +++++++++++++++++++++++++--------------------- 1 file changed, 119 insertions(+), 100 deletions(-) diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 61b2aae81abb..df8289fce4ef 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -4731,81 +4731,138 @@ nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr, return nfserr; } +struct read_plus_segment { + enum data_content4 rp_type; + u64 rp_offset; + u64 rp_length; + unsigned int rp_page_pos; +}; + static __be32 -nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp, - struct nfsd4_read *read, - unsigned long *maxcount, u32 *eof, - loff_t *pos) +nfsd4_read_plus_readv(struct nfsd4_compoundres *resp, struct nfsd4_read *read, + unsigned long *maxcount, u32 *eof) { struct xdr_stream *xdr = resp->xdr; - struct file *file = read->rd_nf->nf_file; - int starting_len = xdr->buf->len; - loff_t hole_pos; - __be32 nfserr; - __be32 *p, tmp; - __be64 tmp64; - - hole_pos = pos ? *pos : vfs_llseek(file, read->rd_offset, SEEK_HOLE); - if (hole_pos > read->rd_offset) - *maxcount = min_t(unsigned long, *maxcount, hole_pos - read->rd_offset); - *maxcount = min_t(unsigned long, *maxcount, (xdr->buf->buflen - xdr->buf->len)); - - /* Content type, offset, byte count */ - p = xdr_reserve_space(xdr, 4 + 8 + 4); - if (!p) - return nfserr_resource; + unsigned int starting_len = xdr->buf->len; + __be32 nfserr, zero = xdr_zero; + unsigned int pad; + /* + * Reserve the maximum abount of space needed to craft a READ_PLUS + * reply. The call to xdr_reserve_space_vec() switches us to the + * xdr->pages, which we then read file data into before analyzing + * the individual segments. + */ read->rd_vlen = xdr_reserve_space_vec(xdr, resp->rqstp->rq_vec, *maxcount); if (read->rd_vlen < 0) return nfserr_resource; - nfserr = nfsd_readv(resp->rqstp, read->rd_fhp, file, read->rd_offset, - resp->rqstp->rq_vec, read->rd_vlen, maxcount, eof); + nfserr = nfsd_readv(resp->rqstp, read->rd_fhp, read->rd_nf->nf_file, + read->rd_offset, resp->rqstp->rq_vec, read->rd_vlen, + maxcount, eof); if (nfserr) return nfserr; - xdr_truncate_encode(xdr, starting_len + 16 + xdr_align_size(*maxcount)); + xdr_truncate_encode(xdr, starting_len + xdr_align_size(*maxcount)); - tmp = htonl(NFS4_CONTENT_DATA); - write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4); - tmp64 = cpu_to_be64(read->rd_offset); - write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp64, 8); - tmp = htonl(*maxcount); - write_bytes_to_xdr_buf(xdr->buf, starting_len + 12, &tmp, 4); - - tmp = xdr_zero; - write_bytes_to_xdr_buf(xdr->buf, starting_len + 16 + *maxcount, &tmp, - xdr_pad_size(*maxcount)); + pad = xdr_pad_size(*maxcount); + write_bytes_to_xdr_buf(xdr->buf, starting_len + *maxcount, &zero, pad); return nfs_ok; } +/** + * nfsd4_encode_read_plus_segment - Encode a single READ_PLUS segment + * @xdr: pointer to an xdr_stream + * @segment: pointer to a single segment + * @bufpos: xdr_stream offset to place the segment + * @segments: pointer to the total number of segments seen + * + * Performs surgery on the xdr_stream to compress out HOLE segments and + * to place DATA segments in the proper place. + */ +static void +nfsd4_encode_read_plus_segment(struct xdr_stream *xdr, + struct read_plus_segment *segment, + unsigned int *bufpos, unsigned int *segments) +{ + struct xdr_buf *buf = xdr->buf; + + xdr_encode_word(buf, *bufpos, segment->rp_type); + xdr_encode_double(buf, *bufpos + XDR_UNIT, segment->rp_offset); + *bufpos += 3 * XDR_UNIT; + + if (segment->rp_type == NFS4_CONTENT_HOLE) { + xdr_encode_double(buf, *bufpos, segment->rp_length); + *bufpos += 2 * XDR_UNIT; + } else { + size_t align = xdr_align_size(segment->rp_length); + xdr_encode_word(buf, *bufpos, segment->rp_length); + if (*segments == 0) + xdr_buf_trim_head(buf, XDR_UNIT); + + xdr_stream_move_subsegment(xdr, + buf->head[0].iov_len + segment->rp_page_pos, + *bufpos + XDR_UNIT, align); + *bufpos += XDR_UNIT + align; + } + + *segments += 1; +} + static __be32 -nfsd4_encode_read_plus_hole(struct nfsd4_compoundres *resp, - struct nfsd4_read *read, - unsigned long *maxcount, u32 *eof) +nfsd4_encode_read_plus_segments(struct nfsd4_compoundres *resp, + struct nfsd4_read *read, + unsigned int *segments, u32 *eof) { - struct file *file = read->rd_nf->nf_file; - loff_t data_pos = vfs_llseek(file, read->rd_offset, SEEK_DATA); - loff_t f_size = i_size_read(file_inode(file)); - unsigned long count; - __be32 *p; + struct xdr_stream *xdr = resp->xdr; + unsigned int bufpos = xdr->buf->len; + u64 offset = read->rd_offset; + struct read_plus_segment segment; + enum data_content4 pagetype; + unsigned long maxcount; + unsigned int pagenum = 0; + unsigned int pagelen; + char *vpage, *p; + __be32 nfserr; - if (data_pos == -ENXIO) - data_pos = f_size; - else if (data_pos <= read->rd_offset || (data_pos < f_size && data_pos % PAGE_SIZE)) - return nfsd4_encode_read_plus_data(resp, read, maxcount, eof, &f_size); - count = data_pos - read->rd_offset; - - /* Content type, offset, byte count */ - p = xdr_reserve_space(resp->xdr, 4 + 8 + 8); - if (!p) + /* enough space for a HOLE segment before we switch to the pages */ + if (!xdr_reserve_space(xdr, 5 * XDR_UNIT)) return nfserr_resource; + xdr_commit_encode(xdr); - *p++ = htonl(NFS4_CONTENT_HOLE); - p = xdr_encode_hyper(p, read->rd_offset); - p = xdr_encode_hyper(p, count); + maxcount = min_t(unsigned long, read->rd_length, + (xdr->buf->buflen - xdr->buf->len)); - *eof = (read->rd_offset + count) >= f_size; - *maxcount = min_t(unsigned long, count, *maxcount); + nfserr = nfsd4_read_plus_readv(resp, read, &maxcount, eof); + if (nfserr) + return nfserr; + + while (maxcount > 0) { + vpage = xdr_buf_nth_page_address(xdr->buf, pagenum, &pagelen); + pagelen = min_t(unsigned int, pagelen, maxcount); + if (!vpage || pagelen == 0) + break; + p = memchr_inv(vpage, 0, pagelen); + pagetype = (p == NULL) ? NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA; + + if (pagetype != segment.rp_type || pagenum == 0) { + if (likely(pagenum > 0)) { + nfsd4_encode_read_plus_segment(xdr, &segment, + &bufpos, segments); + offset += segment.rp_length; + } + segment.rp_type = pagetype; + segment.rp_offset = offset; + segment.rp_length = pagelen; + segment.rp_page_pos = pagenum * PAGE_SIZE; + } else + segment.rp_length += pagelen; + + maxcount -= pagelen; + pagenum++; + } + + nfsd4_encode_read_plus_segment(xdr, &segment, &bufpos, segments); + xdr_truncate_encode(xdr, bufpos); return nfs_ok; } @@ -4813,69 +4870,31 @@ static __be32 nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_read *read) { - unsigned long maxcount, count; struct xdr_stream *xdr = resp->xdr; - struct file *file; int starting_len = xdr->buf->len; - int last_segment = xdr->buf->len; - int segments = 0; - __be32 *p, tmp; - bool is_data; - loff_t pos; + unsigned int segments = 0; u32 eof; if (nfserr) return nfserr; - file = read->rd_nf->nf_file; /* eof flag, segment count */ - p = xdr_reserve_space(xdr, 4 + 4); - if (!p) + if (!xdr_reserve_space(xdr, 2 * XDR_UNIT)) return nfserr_resource; xdr_commit_encode(xdr); - maxcount = min_t(unsigned long, read->rd_length, - (xdr->buf->buflen - xdr->buf->len)); - count = maxcount; - - eof = read->rd_offset >= i_size_read(file_inode(file)); + eof = read->rd_offset >= i_size_read(file_inode(read->rd_nf->nf_file)); if (eof) goto out; - pos = vfs_llseek(file, read->rd_offset, SEEK_HOLE); - is_data = pos > read->rd_offset; - - while (count > 0 && !eof) { - maxcount = count; - if (is_data) - nfserr = nfsd4_encode_read_plus_data(resp, read, &maxcount, &eof, - segments == 0 ? &pos : NULL); - else - nfserr = nfsd4_encode_read_plus_hole(resp, read, &maxcount, &eof); - if (nfserr) - goto out; - count -= maxcount; - read->rd_offset += maxcount; - is_data = !is_data; - last_segment = xdr->buf->len; - segments++; - } - + nfserr = nfsd4_encode_read_plus_segments(resp, read, &segments, &eof); out: - if (nfserr && segments == 0) + if (nfserr) xdr_truncate_encode(xdr, starting_len); else { - if (nfserr) { - xdr_truncate_encode(xdr, last_segment); - nfserr = nfs_ok; - eof = 0; - } - tmp = htonl(eof); - write_bytes_to_xdr_buf(xdr->buf, starting_len, &tmp, 4); - tmp = htonl(segments); - write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4); + xdr_encode_word(xdr->buf, starting_len, eof); + xdr_encode_word(xdr->buf, starting_len + XDR_UNIT, segments); } - return nfserr; }