@@ -953,7 +953,10 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
int pending;
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
- MSG_CMSG_COMPAT))
+ MSG_EOR | MSG_CMSG_COMPAT))
+ return -EOPNOTSUPP;
+
+ if (!eor && msg->msg_flags & MSG_EOR)
return -EOPNOTSUPP;
ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
@@ -1274,11 +1277,15 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
- if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_EOR |
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
MSG_NO_SHARED_FRAGS))
return -EOPNOTSUPP;
+ if ((flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)) &&
+ (flags & MSG_EOR))
+ return -EINVAL;
+
return tls_sw_do_sendpage(sk, page, offset, size, flags);
}
@@ -1288,10 +1295,14 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
struct tls_context *tls_ctx = tls_get_ctx(sk);
int ret;
- if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_EOR |
MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
return -EOPNOTSUPP;
+ if ((flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)) &&
+ (flags & MSG_EOR))
+ return -EOPNOTSUPP;
+
ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
if (ret)
return ret;
tls_sw_sendmsg() / tls_do_sw_sendpage() already handles MSG_MORE / MSG_SENDPAGE_NOTLAST, but bails out on MSG_EOR. But seeing that MSG_EOR is basically the opposite of MSG_MORE / MSG_SENDPAGE_NOTLAST this patch adds handling MSG_EOR by treating it as the negation of MSG_MORE. And erroring out if MSG_EOR is specified with either MSG_MORE or MSG_SENDPAGE_NOTLAST. Cc: Jakub Kicinski <kuba@kernel.org> Cc: netdev@vger.kernel.org Signed-off-by: Hannes Reinecke <hare@suse.de> --- net/tls/tls_sw.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-)