diff mbox series

[net-next,v5,03/16] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage

Message ID 20230623225513.2732256-4-dhowells@redhat.com (mailing list archive)
State New, archived
Headers show
Series None | expand

Commit Message

David Howells June 23, 2023, 10:55 p.m. UTC
Use sendmsg() and MSG_SPLICE_PAGES rather than sendpage in ceph when
transmitting data.  For the moment, this can only transmit one page at a
time because of the architecture of net/ceph/, but if
write_partial_message_data() can be given a bvec[] at a time by the
iteration code, this would allow pages to be sent in a batch.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Xiubo Li <xiubli@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: "David S. Miller" <davem@davemloft.net>
cc: Eric Dumazet <edumazet@google.com>
cc: Jakub Kicinski <kuba@kernel.org>
cc: Paolo Abeni <pabeni@redhat.com>
cc: Jens Axboe <axboe@kernel.dk>
cc: Matthew Wilcox <willy@infradead.org>
cc: ceph-devel@vger.kernel.org
cc: netdev@vger.kernel.org
---

Notes:
    ver #5)
     - Switch condition for setting MSG_MORE in write_partial_message_data()

 net/ceph/messenger_v1.c | 60 ++++++++++++++---------------------------
 1 file changed, 20 insertions(+), 40 deletions(-)

Comments

Ilya Dryomov June 25, 2023, 12:20 p.m. UTC | #1
On Sat, Jun 24, 2023 at 12:55 AM David Howells <dhowells@redhat.com> wrote:
>
> Use sendmsg() and MSG_SPLICE_PAGES rather than sendpage in ceph when
> transmitting data.  For the moment, this can only transmit one page at a
> time because of the architecture of net/ceph/, but if
> write_partial_message_data() can be given a bvec[] at a time by the
> iteration code, this would allow pages to be sent in a batch.
>
> Signed-off-by: David Howells <dhowells@redhat.com>
> cc: Ilya Dryomov <idryomov@gmail.com>
> cc: Xiubo Li <xiubli@redhat.com>
> cc: Jeff Layton <jlayton@kernel.org>
> cc: "David S. Miller" <davem@davemloft.net>
> cc: Eric Dumazet <edumazet@google.com>
> cc: Jakub Kicinski <kuba@kernel.org>
> cc: Paolo Abeni <pabeni@redhat.com>
> cc: Jens Axboe <axboe@kernel.dk>
> cc: Matthew Wilcox <willy@infradead.org>
> cc: ceph-devel@vger.kernel.org
> cc: netdev@vger.kernel.org
> ---
>
> Notes:
>     ver #5)
>      - Switch condition for setting MSG_MORE in write_partial_message_data()
>
>  net/ceph/messenger_v1.c | 60 ++++++++++++++---------------------------
>  1 file changed, 20 insertions(+), 40 deletions(-)
>
> diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
> index d664cb1593a7..814579f27f04 100644
> --- a/net/ceph/messenger_v1.c
> +++ b/net/ceph/messenger_v1.c
> @@ -74,37 +74,6 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
>         return r;
>  }
>
> -/*
> - * @more: either or both of MSG_MORE and MSG_SENDPAGE_NOTLAST
> - */
> -static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
> -                            int offset, size_t size, int more)
> -{
> -       ssize_t (*sendpage)(struct socket *sock, struct page *page,
> -                           int offset, size_t size, int flags);
> -       int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
> -       int ret;
> -
> -       /*
> -        * sendpage cannot properly handle pages with page_count == 0,
> -        * we need to fall back to sendmsg if that's the case.
> -        *
> -        * Same goes for slab pages: skb_can_coalesce() allows
> -        * coalescing neighboring slab objects into a single frag which
> -        * triggers one of hardened usercopy checks.
> -        */
> -       if (sendpage_ok(page))
> -               sendpage = sock->ops->sendpage;
> -       else
> -               sendpage = sock_no_sendpage;
> -
> -       ret = sendpage(sock, page, offset, size, flags);
> -       if (ret == -EAGAIN)
> -               ret = 0;
> -
> -       return ret;
> -}
> -
>  static void con_out_kvec_reset(struct ceph_connection *con)
>  {
>         BUG_ON(con->v1.out_skip);
> @@ -464,7 +433,6 @@ static int write_partial_message_data(struct ceph_connection *con)
>         struct ceph_msg *msg = con->out_msg;
>         struct ceph_msg_data_cursor *cursor = &msg->cursor;
>         bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
> -       int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
>         u32 crc;
>
>         dout("%s %p msg %p\n", __func__, con, msg);
> @@ -482,6 +450,10 @@ static int write_partial_message_data(struct ceph_connection *con)
>          */
>         crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
>         while (cursor->total_resid) {
> +               struct bio_vec bvec;
> +               struct msghdr msghdr = {
> +                       .msg_flags = MSG_SPLICE_PAGES,

Hi David,

This appears to be losing MSG_DONTWAIT | MSG_NOSIGNAL flags which were
set previously?

> +               };
>                 struct page *page;
>                 size_t page_offset;
>                 size_t length;
> @@ -493,10 +465,13 @@ static int write_partial_message_data(struct ceph_connection *con)
>                 }
>
>                 page = ceph_msg_data_next(cursor, &page_offset, &length);
> -               if (length == cursor->total_resid)
> -                       more = MSG_MORE;
> -               ret = ceph_tcp_sendpage(con->sock, page, page_offset, length,
> -                                       more);
> +               if (length != cursor->total_resid)
> +                       msghdr.msg_flags |= MSG_MORE;
> +
> +               bvec_set_page(&bvec, page, length, page_offset);
> +               iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, length);
> +
> +               ret = sock_sendmsg(con->sock, &msghdr);
>                 if (ret <= 0) {

And this is losing munging -EAGAIN -> 0?

>                         if (do_datacrc)
>                                 msg->footer.data_crc = cpu_to_le32(crc);
> @@ -526,7 +501,10 @@ static int write_partial_message_data(struct ceph_connection *con)
>   */
>  static int write_partial_skip(struct ceph_connection *con)
>  {
> -       int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
> +       struct bio_vec bvec;
> +       struct msghdr msghdr = {
> +               .msg_flags = MSG_SPLICE_PAGES | MSG_MORE,
> +       };
>         int ret;
>
>         dout("%s %p %d left\n", __func__, con, con->v1.out_skip);
> @@ -534,9 +512,11 @@ static int write_partial_skip(struct ceph_connection *con)
>                 size_t size = min(con->v1.out_skip, (int)PAGE_SIZE);
>
>                 if (size == con->v1.out_skip)
> -                       more = MSG_MORE;
> -               ret = ceph_tcp_sendpage(con->sock, ceph_zero_page, 0, size,
> -                                       more);
> +                       msghdr.msg_flags &= ~MSG_MORE;
> +               bvec_set_page(&bvec, ZERO_PAGE(0), size, 0);
> +               iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size);
> +
> +               ret = sock_sendmsg(con->sock, &msghdr);
>                 if (ret <= 0)

Same here...  I would suggest that you keep ceph_tcp_sendpage() function
and make only minimal modifications to avoid regressions.

Thanks,

                Ilya
David Howells June 26, 2023, 2 p.m. UTC | #2
Ilya Dryomov <idryomov@gmail.com> wrote:

> Same here...  I would suggest that you keep ceph_tcp_sendpage() function
> and make only minimal modifications to avoid regressions.

This is now committed to net-next.  I can bring ceph_tcp_sendpage() back into
existence or fix it in place for now if you have a preference.

Note that I'm working on patches to rework the libceph transmission path so
that it isn't dealing with transmitting a single page at a time, but it's not
ready yet.

David
David Howells June 26, 2023, 3:12 p.m. UTC | #3
Ilya Dryomov <idryomov@gmail.com> wrote:

> > -       int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;

Btw, why are you setting MSG_DONTWAIT?  If you're in the middle of
transmitting a message on a TCP socket, surely you can't just switch to
transmitting a different message on the same socket without doing some sort of
reframing?

David
Ilya Dryomov June 26, 2023, 3:41 p.m. UTC | #4
On Mon, Jun 26, 2023 at 4:00 PM David Howells <dhowells@redhat.com> wrote:
>
> Ilya Dryomov <idryomov@gmail.com> wrote:
>
> > Same here...  I would suggest that you keep ceph_tcp_sendpage() function
> > and make only minimal modifications to avoid regressions.
>
> This is now committed to net-next.

This needs to be dropped from linux-next because both this and
especially the other (net/ceph/messenger_v2.c) patch introduce
regressions.

> I can bring ceph_tcp_sendpage() back into
> existence or fix it in place for now if you have a preference.

I already mentioned that I would prefer if ceph_tcp_sendpage() was
brought back into existence.

>
> Note that I'm working on patches to rework the libceph transmission path so
> that it isn't dealing with transmitting a single page at a time, but it's not
> ready yet.

That is a worthwhile improvement now that sock_sendmsg() can take
advantage of multiple pages!  It would be pretty invasive though so
I think ceph_tcp_sendpage() is better to remain in place until then.

Thanks,

                Ilya
Ilya Dryomov June 26, 2023, 3:52 p.m. UTC | #5
On Mon, Jun 26, 2023 at 5:12 PM David Howells <dhowells@redhat.com> wrote:
>
> Ilya Dryomov <idryomov@gmail.com> wrote:
>
> > > -       int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
>
> Btw, why are you setting MSG_DONTWAIT?  If you're in the middle of
> transmitting a message on a TCP socket, surely you can't just switch to
> transmitting a different message on the same socket without doing some sort of
> reframing?

We don't want to hog kworker threads.  You are correct that we can't
switch to transmitting a different message on the same socket but Ceph
is massively parallel and there can be dozens or even hundreds of other
sockets to work on.

Thanks,

                Ilya
David Howells June 26, 2023, 4:44 p.m. UTC | #6
Ilya Dryomov <idryomov@gmail.com> wrote:

> > This is now committed to net-next.
> 
> This needs to be dropped from linux-next because both this and
> especially the other (net/ceph/messenger_v2.c) patch introduce
> regressions.

net-next, not linux-next.  I'm not sure they drop things from there rather
than reverting them.

David
diff mbox series

Patch

diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
index d664cb1593a7..814579f27f04 100644
--- a/net/ceph/messenger_v1.c
+++ b/net/ceph/messenger_v1.c
@@ -74,37 +74,6 @@  static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
 	return r;
 }
 
-/*
- * @more: either or both of MSG_MORE and MSG_SENDPAGE_NOTLAST
- */
-static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
-			     int offset, size_t size, int more)
-{
-	ssize_t (*sendpage)(struct socket *sock, struct page *page,
-			    int offset, size_t size, int flags);
-	int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
-	int ret;
-
-	/*
-	 * sendpage cannot properly handle pages with page_count == 0,
-	 * we need to fall back to sendmsg if that's the case.
-	 *
-	 * Same goes for slab pages: skb_can_coalesce() allows
-	 * coalescing neighboring slab objects into a single frag which
-	 * triggers one of hardened usercopy checks.
-	 */
-	if (sendpage_ok(page))
-		sendpage = sock->ops->sendpage;
-	else
-		sendpage = sock_no_sendpage;
-
-	ret = sendpage(sock, page, offset, size, flags);
-	if (ret == -EAGAIN)
-		ret = 0;
-
-	return ret;
-}
-
 static void con_out_kvec_reset(struct ceph_connection *con)
 {
 	BUG_ON(con->v1.out_skip);
@@ -464,7 +433,6 @@  static int write_partial_message_data(struct ceph_connection *con)
 	struct ceph_msg *msg = con->out_msg;
 	struct ceph_msg_data_cursor *cursor = &msg->cursor;
 	bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
-	int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
 	u32 crc;
 
 	dout("%s %p msg %p\n", __func__, con, msg);
@@ -482,6 +450,10 @@  static int write_partial_message_data(struct ceph_connection *con)
 	 */
 	crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
 	while (cursor->total_resid) {
+		struct bio_vec bvec;
+		struct msghdr msghdr = {
+			.msg_flags = MSG_SPLICE_PAGES,
+		};
 		struct page *page;
 		size_t page_offset;
 		size_t length;
@@ -493,10 +465,13 @@  static int write_partial_message_data(struct ceph_connection *con)
 		}
 
 		page = ceph_msg_data_next(cursor, &page_offset, &length);
-		if (length == cursor->total_resid)
-			more = MSG_MORE;
-		ret = ceph_tcp_sendpage(con->sock, page, page_offset, length,
-					more);
+		if (length != cursor->total_resid)
+			msghdr.msg_flags |= MSG_MORE;
+
+		bvec_set_page(&bvec, page, length, page_offset);
+		iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, length);
+
+		ret = sock_sendmsg(con->sock, &msghdr);
 		if (ret <= 0) {
 			if (do_datacrc)
 				msg->footer.data_crc = cpu_to_le32(crc);
@@ -526,7 +501,10 @@  static int write_partial_message_data(struct ceph_connection *con)
  */
 static int write_partial_skip(struct ceph_connection *con)
 {
-	int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
+	struct bio_vec bvec;
+	struct msghdr msghdr = {
+		.msg_flags = MSG_SPLICE_PAGES | MSG_MORE,
+	};
 	int ret;
 
 	dout("%s %p %d left\n", __func__, con, con->v1.out_skip);
@@ -534,9 +512,11 @@  static int write_partial_skip(struct ceph_connection *con)
 		size_t size = min(con->v1.out_skip, (int)PAGE_SIZE);
 
 		if (size == con->v1.out_skip)
-			more = MSG_MORE;
-		ret = ceph_tcp_sendpage(con->sock, ceph_zero_page, 0, size,
-					more);
+			msghdr.msg_flags &= ~MSG_MORE;
+		bvec_set_page(&bvec, ZERO_PAGE(0), size, 0);
+		iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size);
+
+		ret = sock_sendmsg(con->sock, &msghdr);
 		if (ret <= 0)
 			goto out;
 		con->v1.out_skip -= ret;