diff mbox series

[net-next,v6,12/18] siw: Inline do_tcp_sendpages()

Message ID 20230411160902.4134381-13-dhowells@redhat.com (mailing list archive)
State New
Headers show
Series splice, net: Replace sendpage with sendmsg(MSG_SPLICE_PAGES), part 1 | expand

Commit Message

David Howells April 11, 2023, 4:08 p.m. UTC
do_tcp_sendpages() is now just a small wrapper around tcp_sendmsg_locked(),
so inline it, allowing do_tcp_sendpages() to be removed.  This is part of
replacing ->sendpage() with a call to sendmsg() with MSG_SPLICE_PAGES set.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Bernard Metzler <bmt@zurich.ibm.com>
cc: Jason Gunthorpe <jgg@ziepe.ca>
cc: Leon Romanovsky <leon@kernel.org>
cc: Tom Talpey <tom@talpey.com>
cc: "David S. Miller" <davem@davemloft.net>
cc: Eric Dumazet <edumazet@google.com>
cc: Jakub Kicinski <kuba@kernel.org>
cc: Paolo Abeni <pabeni@redhat.com>
cc: Jens Axboe <axboe@kernel.dk>
cc: Matthew Wilcox <willy@infradead.org>
cc: linux-rdma@vger.kernel.org
cc: netdev@vger.kernel.org
---

Notes:
    ver #6)
     - Don't clear MSG_SPLICE_PAGES on the last page.

 drivers/infiniband/sw/siw/siw_qp_tx.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

Comments

Tom Talpey April 11, 2023, 5:22 p.m. UTC | #1
On 4/11/2023 12:08 PM, David Howells wrote:
> do_tcp_sendpages() is now just a small wrapper around tcp_sendmsg_locked(),
> so inline it, allowing do_tcp_sendpages() to be removed.  This is part of
> replacing ->sendpage() with a call to sendmsg() with MSG_SPLICE_PAGES set.
> 
> Signed-off-by: David Howells <dhowells@redhat.com>
> Reviewed-by: Bernard Metzler <bmt@zurich.ibm.com>
> cc: Jason Gunthorpe <jgg@ziepe.ca>
> cc: Leon Romanovsky <leon@kernel.org>
> cc: Tom Talpey <tom@talpey.com>
> cc: "David S. Miller" <davem@davemloft.net>
> cc: Eric Dumazet <edumazet@google.com>
> cc: Jakub Kicinski <kuba@kernel.org>
> cc: Paolo Abeni <pabeni@redhat.com>
> cc: Jens Axboe <axboe@kernel.dk>
> cc: Matthew Wilcox <willy@infradead.org>
> cc: linux-rdma@vger.kernel.org
> cc: netdev@vger.kernel.org
> ---
> 
> Notes:
>      ver #6)
>       - Don't clear MSG_SPLICE_PAGES on the last page.

v6 looks good, thanks.

Reviewed-by: Tom Talpey <tom@talpey.com>


> 
>   drivers/infiniband/sw/siw/siw_qp_tx.c | 17 ++++++++++++-----
>   1 file changed, 12 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
> index 05052b49107f..5552e60bb927 100644
> --- a/drivers/infiniband/sw/siw/siw_qp_tx.c
> +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
> @@ -313,7 +313,7 @@ static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
>   }
>   
>   /*
> - * 0copy TCP transmit interface: Use do_tcp_sendpages.
> + * 0copy TCP transmit interface: Use MSG_SPLICE_PAGES.
>    *
>    * Using sendpage to push page by page appears to be less efficient
>    * than using sendmsg, even if data are copied.
> @@ -324,20 +324,27 @@ static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
>   static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
>   			     size_t size)
>   {
> +	struct bio_vec bvec;
> +	struct msghdr msg = {
> +		.msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST |
> +			      MSG_SPLICE_PAGES),
> +	};
>   	struct sock *sk = s->sk;
> -	int i = 0, rv = 0, sent = 0,
> -	    flags = MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST;
> +	int i = 0, rv = 0, sent = 0;
>   
>   	while (size) {
>   		size_t bytes = min_t(size_t, PAGE_SIZE - offset, size);
>   
>   		if (size + offset <= PAGE_SIZE)
> -			flags = MSG_MORE | MSG_DONTWAIT;
> +			msg.msg_flags &= ~MSG_SENDPAGE_NOTLAST;
>   
>   		tcp_rate_check_app_limited(sk);
> +		bvec_set_page(&bvec, page[i], bytes, offset);
> +		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
> +
>   try_page_again:
>   		lock_sock(sk);
> -		rv = do_tcp_sendpages(sk, page[i], offset, bytes, flags);
> +		rv = tcp_sendmsg_locked(sk, &msg, size);
>   		release_sock(sk);
>   
>   		if (rv > 0) {
> 
>
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 05052b49107f..5552e60bb927 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -313,7 +313,7 @@  static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
 }
 
 /*
- * 0copy TCP transmit interface: Use do_tcp_sendpages.
+ * 0copy TCP transmit interface: Use MSG_SPLICE_PAGES.
  *
  * Using sendpage to push page by page appears to be less efficient
  * than using sendmsg, even if data are copied.
@@ -324,20 +324,27 @@  static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s,
 static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset,
 			     size_t size)
 {
+	struct bio_vec bvec;
+	struct msghdr msg = {
+		.msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST |
+			      MSG_SPLICE_PAGES),
+	};
 	struct sock *sk = s->sk;
-	int i = 0, rv = 0, sent = 0,
-	    flags = MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST;
+	int i = 0, rv = 0, sent = 0;
 
 	while (size) {
 		size_t bytes = min_t(size_t, PAGE_SIZE - offset, size);
 
 		if (size + offset <= PAGE_SIZE)
-			flags = MSG_MORE | MSG_DONTWAIT;
+			msg.msg_flags &= ~MSG_SENDPAGE_NOTLAST;
 
 		tcp_rate_check_app_limited(sk);
+		bvec_set_page(&bvec, page[i], bytes, offset);
+		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
+
 try_page_again:
 		lock_sock(sk);
-		rv = do_tcp_sendpages(sk, page[i], offset, bytes, flags);
+		rv = tcp_sendmsg_locked(sk, &msg, size);
 		release_sock(sk);
 
 		if (rv > 0) {