diff mbox series

[2/2] net/packet: send and receive pkt with given vnet_hdr_sz

Message ID 1675946595-103034-3-git-send-email-amy.saq@antgroup.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series net/packet: support of specifying virtio net header size | expand

Checks

Context Check Description
netdev/tree_selection success Guessed tree name to be net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix warning Target tree name not specified in the subject
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 27 this patch: 85
netdev/cc_maintainers warning 3 maintainers not CCed: edumazet@google.com pabeni@redhat.com kuba@kernel.org
netdev/build_clang fail Errors and warnings before: 112 this patch: 7
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 45 this patch: 71
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 94 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

沈安琪(凛玥) Feb. 9, 2023, 12:43 p.m. UTC
From: "Jianfeng Tan" <henry.tjf@antgroup.com>

When raw socket is used as the backend for kernel vhost, currently it
will regard the virtio net header as 10-byte, which is not always the
case since some virtio features need virtio net header other than
10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
net header.

Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
size that is recorded in packet_sock to indicate the exact virtio net
header size that virtio user actually prepares in the packets. By doing
so, it can fix the issue of incorrect mac header parsing when these
virtio features that need virtio net header other than 10-byte are
enable.

Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
---
 net/packet/af_packet.c | 48 +++++++++++++++++++++++++++++++++---------------
 1 file changed, 33 insertions(+), 15 deletions(-)

Comments

Michael S. Tsirkin Feb. 9, 2023, 1:07 p.m. UTC | #1
On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
> From: "Jianfeng Tan" <henry.tjf@antgroup.com>
> 
> When raw socket is used as the backend for kernel vhost, currently it
> will regard the virtio net header as 10-byte, which is not always the
> case since some virtio features need virtio net header other than
> 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
> net header.
> 
> Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
> tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
> size that is recorded in packet_sock to indicate the exact virtio net
> header size that virtio user actually prepares in the packets. By doing
> so, it can fix the issue of incorrect mac header parsing when these
> virtio features that need virtio net header other than 10-byte are
> enable.
> 
> Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
> Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
> Signed-off-by: Anqi Shen <amy.saq@antgroup.com>

Does it handle VERSION_1 though? That one is also LE.
Would it be better to pass a features bitmap instead?


> ---
>  net/packet/af_packet.c | 48 +++++++++++++++++++++++++++++++++---------------
>  1 file changed, 33 insertions(+), 15 deletions(-)
> 
> diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
> index ab37baf..4f49939 100644
> --- a/net/packet/af_packet.c
> +++ b/net/packet/af_packet.c
> @@ -2092,18 +2092,25 @@ static unsigned int run_filter(struct sk_buff *skb,
>  }
>  
>  static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
> -			   size_t *len)
> +			   size_t *len, int vnet_hdr_sz)
>  {
>  	struct virtio_net_hdr vnet_hdr;
> +	int ret;
>  
> -	if (*len < sizeof(vnet_hdr))
> +	if (*len < vnet_hdr_sz)
>  		return -EINVAL;
> -	*len -= sizeof(vnet_hdr);
> +	*len -= vnet_hdr_sz;
>  
>  	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
>  		return -EINVAL;
>  
> -	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
> +	ret = memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
> +
> +	/* reserve space for extra info in vnet_hdr if needed */
> +	if (ret == 0)
> +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(vnet_hdr));
> +
> +	return ret;
>  }
>  
>  /*
> @@ -2311,7 +2318,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
>  				       (maclen < 16 ? 16 : maclen)) +
>  				       po->tp_reserve;
>  		if (po->has_vnet_hdr) {
> -			netoff += sizeof(struct virtio_net_hdr);
> +			netoff += po->vnet_hdr_sz;
>  			do_vnet = true;
>  		}
>  		macoff = netoff - maclen;
> @@ -2552,16 +2559,23 @@ static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
>  }
>  
>  static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
> -				 struct virtio_net_hdr *vnet_hdr)
> +				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
>  {
> -	if (*len < sizeof(*vnet_hdr))
> +	int ret;
> +
> +	if (*len < vnet_hdr_sz)
>  		return -EINVAL;
> -	*len -= sizeof(*vnet_hdr);
> +	*len -= vnet_hdr_sz;
>  
>  	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
>  		return -EFAULT;
>  
> -	return __packet_snd_vnet_parse(vnet_hdr, *len);
> +	ret = __packet_snd_vnet_parse(vnet_hdr, *len);
> +
> +	/* move iter to point to the start of mac header */
> +	if (ret == 0)
> +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
> +	return ret;
>  }
>  
>  static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
> @@ -2730,6 +2744,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
>  	int status = TP_STATUS_AVAILABLE;
>  	int hlen, tlen, copylen = 0;
>  	long timeo = 0;
> +	int vnet_hdr_sz;
>  
>  	mutex_lock(&po->pg_vec_lock);
>  
> @@ -2811,8 +2826,9 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
>  		tlen = dev->needed_tailroom;
>  		if (po->has_vnet_hdr) {
>  			vnet_hdr = data;
> -			data += sizeof(*vnet_hdr);
> -			tp_len -= sizeof(*vnet_hdr);
> +			vnet_hdr_sz = po->vnet_hdr_sz;
> +			data += vnet_hdr_sz;
> +			tp_len -= vnet_hdr_sz;
>  			if (tp_len < 0 ||
>  			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
>  				tp_len = -EINVAL;
> @@ -2947,6 +2963,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
>  	int offset = 0;
>  	struct packet_sock *po = pkt_sk(sk);
>  	bool has_vnet_hdr = false;
> +	int vnet_hdr_sz;
>  	int hlen, tlen, linear;
>  	int extra_len = 0;
>  
> @@ -2991,7 +3008,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
>  	if (sock->type == SOCK_RAW)
>  		reserve = dev->hard_header_len;
>  	if (po->has_vnet_hdr) {
> -		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
> +		vnet_hdr_sz = po->vnet_hdr_sz;
> +		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
>  		if (err)
>  			goto out_unlock;
>  		has_vnet_hdr = true;
> @@ -3068,7 +3086,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
>  		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
>  		if (err)
>  			goto out_free;
> -		len += sizeof(vnet_hdr);
> +		len += vnet_hdr_sz;
>  		virtio_net_hdr_set_proto(skb, &vnet_hdr);
>  	}
>  
> @@ -3452,10 +3470,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
>  	packet_rcv_try_clear_pressure(pkt_sk(sk));
>  
>  	if (pkt_sk(sk)->has_vnet_hdr) {
> -		err = packet_rcv_vnet(msg, skb, &len);
> +		vnet_hdr_len = pkt_sk(sk)->vnet_hdr_sz;
> +		err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
>  		if (err)
>  			goto out_free;
> -		vnet_hdr_len = sizeof(struct virtio_net_hdr);
>  	}
>  
>  	/* You lose any data beyond the buffer you gave. If it worries
> -- 
> 1.8.3.1
沈安琪(凛玥) Feb. 10, 2023, 4:01 a.m. UTC | #2
在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
> On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
>> From: "Jianfeng Tan" <henry.tjf@antgroup.com>
>>
>> When raw socket is used as the backend for kernel vhost, currently it
>> will regard the virtio net header as 10-byte, which is not always the
>> case since some virtio features need virtio net header other than
>> 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
>> net header.
>>
>> Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
>> tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
>> size that is recorded in packet_sock to indicate the exact virtio net
>> header size that virtio user actually prepares in the packets. By doing
>> so, it can fix the issue of incorrect mac header parsing when these
>> virtio features that need virtio net header other than 10-byte are
>> enable.
>>
>> Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
>> Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
>> Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
> Does it handle VERSION_1 though? That one is also LE.
> Would it be better to pass a features bitmap instead?


Thanks for quick reply!

I am a little confused abot what "LE" presents here?

For passing a features bitmap to af_packet here, our consideration is 
whether it will be too complicated for af_packet to understand the 
virtio features bitmap in order to get the vnet header size. For now, 
all the virtio features stuff is handled by vhost worker and af_packet 
actually does not need to know much about virtio features. Would it be 
better if we keep the virtio feature stuff in user-level and let 
user-level tell af_packet how much space it should reserve?

>
>
>> ---
>>   net/packet/af_packet.c | 48 +++++++++++++++++++++++++++++++++---------------
>>   1 file changed, 33 insertions(+), 15 deletions(-)
>>
>> diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
>> index ab37baf..4f49939 100644
>> --- a/net/packet/af_packet.c
>> +++ b/net/packet/af_packet.c
>> @@ -2092,18 +2092,25 @@ static unsigned int run_filter(struct sk_buff *skb,
>>   }
>>   
>>   static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
>> -			   size_t *len)
>> +			   size_t *len, int vnet_hdr_sz)
>>   {
>>   	struct virtio_net_hdr vnet_hdr;
>> +	int ret;
>>   
>> -	if (*len < sizeof(vnet_hdr))
>> +	if (*len < vnet_hdr_sz)
>>   		return -EINVAL;
>> -	*len -= sizeof(vnet_hdr);
>> +	*len -= vnet_hdr_sz;
>>   
>>   	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
>>   		return -EINVAL;
>>   
>> -	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
>> +	ret = memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
>> +
>> +	/* reserve space for extra info in vnet_hdr if needed */
>> +	if (ret == 0)
>> +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(vnet_hdr));
>> +
>> +	return ret;
>>   }
>>   
>>   /*
>> @@ -2311,7 +2318,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
>>   				       (maclen < 16 ? 16 : maclen)) +
>>   				       po->tp_reserve;
>>   		if (po->has_vnet_hdr) {
>> -			netoff += sizeof(struct virtio_net_hdr);
>> +			netoff += po->vnet_hdr_sz;
>>   			do_vnet = true;
>>   		}
>>   		macoff = netoff - maclen;
>> @@ -2552,16 +2559,23 @@ static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
>>   }
>>   
>>   static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
>> -				 struct virtio_net_hdr *vnet_hdr)
>> +				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
>>   {
>> -	if (*len < sizeof(*vnet_hdr))
>> +	int ret;
>> +
>> +	if (*len < vnet_hdr_sz)
>>   		return -EINVAL;
>> -	*len -= sizeof(*vnet_hdr);
>> +	*len -= vnet_hdr_sz;
>>   
>>   	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
>>   		return -EFAULT;
>>   
>> -	return __packet_snd_vnet_parse(vnet_hdr, *len);
>> +	ret = __packet_snd_vnet_parse(vnet_hdr, *len);
>> +
>> +	/* move iter to point to the start of mac header */
>> +	if (ret == 0)
>> +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
>> +	return ret;
>>   }
>>   
>>   static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
>> @@ -2730,6 +2744,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
>>   	int status = TP_STATUS_AVAILABLE;
>>   	int hlen, tlen, copylen = 0;
>>   	long timeo = 0;
>> +	int vnet_hdr_sz;
>>   
>>   	mutex_lock(&po->pg_vec_lock);
>>   
>> @@ -2811,8 +2826,9 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
>>   		tlen = dev->needed_tailroom;
>>   		if (po->has_vnet_hdr) {
>>   			vnet_hdr = data;
>> -			data += sizeof(*vnet_hdr);
>> -			tp_len -= sizeof(*vnet_hdr);
>> +			vnet_hdr_sz = po->vnet_hdr_sz;
>> +			data += vnet_hdr_sz;
>> +			tp_len -= vnet_hdr_sz;
>>   			if (tp_len < 0 ||
>>   			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
>>   				tp_len = -EINVAL;
>> @@ -2947,6 +2963,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
>>   	int offset = 0;
>>   	struct packet_sock *po = pkt_sk(sk);
>>   	bool has_vnet_hdr = false;
>> +	int vnet_hdr_sz;
>>   	int hlen, tlen, linear;
>>   	int extra_len = 0;
>>   
>> @@ -2991,7 +3008,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
>>   	if (sock->type == SOCK_RAW)
>>   		reserve = dev->hard_header_len;
>>   	if (po->has_vnet_hdr) {
>> -		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
>> +		vnet_hdr_sz = po->vnet_hdr_sz;
>> +		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
>>   		if (err)
>>   			goto out_unlock;
>>   		has_vnet_hdr = true;
>> @@ -3068,7 +3086,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
>>   		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
>>   		if (err)
>>   			goto out_free;
>> -		len += sizeof(vnet_hdr);
>> +		len += vnet_hdr_sz;
>>   		virtio_net_hdr_set_proto(skb, &vnet_hdr);
>>   	}
>>   
>> @@ -3452,10 +3470,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
>>   	packet_rcv_try_clear_pressure(pkt_sk(sk));
>>   
>>   	if (pkt_sk(sk)->has_vnet_hdr) {
>> -		err = packet_rcv_vnet(msg, skb, &len);
>> +		vnet_hdr_len = pkt_sk(sk)->vnet_hdr_sz;
>> +		err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
>>   		if (err)
>>   			goto out_free;
>> -		vnet_hdr_len = sizeof(struct virtio_net_hdr);
>>   	}
>>   
>>   	/* You lose any data beyond the buffer you gave. If it worries
>> -- 
>> 1.8.3.1
Michael S. Tsirkin Feb. 10, 2023, 8:10 a.m. UTC | #3
On Fri, Feb 10, 2023 at 12:01:03PM +0800, 沈安琪(凛玥) wrote:
> 
> 在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
> > On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
> > > From: "Jianfeng Tan" <henry.tjf@antgroup.com>
> > > 
> > > When raw socket is used as the backend for kernel vhost, currently it
> > > will regard the virtio net header as 10-byte, which is not always the
> > > case since some virtio features need virtio net header other than
> > > 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
> > > net header.
> > > 
> > > Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
> > > tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
> > > size that is recorded in packet_sock to indicate the exact virtio net
> > > header size that virtio user actually prepares in the packets. By doing
> > > so, it can fix the issue of incorrect mac header parsing when these
> > > virtio features that need virtio net header other than 10-byte are
> > > enable.
> > > 
> > > Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
> > > Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
> > > Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
> > Does it handle VERSION_1 though? That one is also LE.
> > Would it be better to pass a features bitmap instead?
> 
> 
> Thanks for quick reply!
> 
> I am a little confused abot what "LE" presents here?

LE == little_endian.
Little endian format.

> For passing a features bitmap to af_packet here, our consideration is
> whether it will be too complicated for af_packet to understand the virtio
> features bitmap in order to get the vnet header size. For now, all the
> virtio features stuff is handled by vhost worker and af_packet actually does
> not need to know much about virtio features. Would it be better if we keep
> the virtio feature stuff in user-level and let user-level tell af_packet how
> much space it should reserve?

Presumably, we'd add an API in include/linux/virtio_net.h ?

> > 
> > 
> > > ---
> > >   net/packet/af_packet.c | 48 +++++++++++++++++++++++++++++++++---------------
> > >   1 file changed, 33 insertions(+), 15 deletions(-)
> > > 
> > > diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
> > > index ab37baf..4f49939 100644
> > > --- a/net/packet/af_packet.c
> > > +++ b/net/packet/af_packet.c
> > > @@ -2092,18 +2092,25 @@ static unsigned int run_filter(struct sk_buff *skb,
> > >   }
> > >   static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
> > > -			   size_t *len)
> > > +			   size_t *len, int vnet_hdr_sz)
> > >   {
> > >   	struct virtio_net_hdr vnet_hdr;
> > > +	int ret;
> > > -	if (*len < sizeof(vnet_hdr))
> > > +	if (*len < vnet_hdr_sz)
> > >   		return -EINVAL;
> > > -	*len -= sizeof(vnet_hdr);
> > > +	*len -= vnet_hdr_sz;
> > >   	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
> > >   		return -EINVAL;
> > > -	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
> > > +	ret = memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
> > > +
> > > +	/* reserve space for extra info in vnet_hdr if needed */
> > > +	if (ret == 0)
> > > +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(vnet_hdr));
> > > +
> > > +	return ret;
> > >   }
> > >   /*
> > > @@ -2311,7 +2318,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
> > >   				       (maclen < 16 ? 16 : maclen)) +
> > >   				       po->tp_reserve;
> > >   		if (po->has_vnet_hdr) {
> > > -			netoff += sizeof(struct virtio_net_hdr);
> > > +			netoff += po->vnet_hdr_sz;
> > >   			do_vnet = true;
> > >   		}
> > >   		macoff = netoff - maclen;
> > > @@ -2552,16 +2559,23 @@ static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
> > >   }
> > >   static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
> > > -				 struct virtio_net_hdr *vnet_hdr)
> > > +				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
> > >   {
> > > -	if (*len < sizeof(*vnet_hdr))
> > > +	int ret;
> > > +
> > > +	if (*len < vnet_hdr_sz)
> > >   		return -EINVAL;
> > > -	*len -= sizeof(*vnet_hdr);
> > > +	*len -= vnet_hdr_sz;
> > >   	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
> > >   		return -EFAULT;
> > > -	return __packet_snd_vnet_parse(vnet_hdr, *len);
> > > +	ret = __packet_snd_vnet_parse(vnet_hdr, *len);
> > > +
> > > +	/* move iter to point to the start of mac header */
> > > +	if (ret == 0)
> > > +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
> > > +	return ret;
> > >   }
> > >   static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
> > > @@ -2730,6 +2744,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
> > >   	int status = TP_STATUS_AVAILABLE;
> > >   	int hlen, tlen, copylen = 0;
> > >   	long timeo = 0;
> > > +	int vnet_hdr_sz;
> > >   	mutex_lock(&po->pg_vec_lock);
> > > @@ -2811,8 +2826,9 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
> > >   		tlen = dev->needed_tailroom;
> > >   		if (po->has_vnet_hdr) {
> > >   			vnet_hdr = data;
> > > -			data += sizeof(*vnet_hdr);
> > > -			tp_len -= sizeof(*vnet_hdr);
> > > +			vnet_hdr_sz = po->vnet_hdr_sz;
> > > +			data += vnet_hdr_sz;
> > > +			tp_len -= vnet_hdr_sz;
> > >   			if (tp_len < 0 ||
> > >   			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
> > >   				tp_len = -EINVAL;
> > > @@ -2947,6 +2963,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > >   	int offset = 0;
> > >   	struct packet_sock *po = pkt_sk(sk);
> > >   	bool has_vnet_hdr = false;
> > > +	int vnet_hdr_sz;
> > >   	int hlen, tlen, linear;
> > >   	int extra_len = 0;
> > > @@ -2991,7 +3008,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > >   	if (sock->type == SOCK_RAW)
> > >   		reserve = dev->hard_header_len;
> > >   	if (po->has_vnet_hdr) {
> > > -		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
> > > +		vnet_hdr_sz = po->vnet_hdr_sz;
> > > +		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
> > >   		if (err)
> > >   			goto out_unlock;
> > >   		has_vnet_hdr = true;
> > > @@ -3068,7 +3086,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > >   		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
> > >   		if (err)
> > >   			goto out_free;
> > > -		len += sizeof(vnet_hdr);
> > > +		len += vnet_hdr_sz;
> > >   		virtio_net_hdr_set_proto(skb, &vnet_hdr);
> > >   	}
> > > @@ -3452,10 +3470,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
> > >   	packet_rcv_try_clear_pressure(pkt_sk(sk));
> > >   	if (pkt_sk(sk)->has_vnet_hdr) {
> > > -		err = packet_rcv_vnet(msg, skb, &len);
> > > +		vnet_hdr_len = pkt_sk(sk)->vnet_hdr_sz;
> > > +		err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
> > >   		if (err)
> > >   			goto out_free;
> > > -		vnet_hdr_len = sizeof(struct virtio_net_hdr);
> > >   	}
> > >   	/* You lose any data beyond the buffer you gave. If it worries
> > > -- 
> > > 1.8.3.1
Willem de Bruijn Feb. 10, 2023, 3:36 p.m. UTC | #4
Michael S. Tsirkin wrote:
> On Fri, Feb 10, 2023 at 12:01:03PM +0800, 沈安琪(凛玥) wrote:
> > 
> > 在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
> > > On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
> > > > From: "Jianfeng Tan" <henry.tjf@antgroup.com>
> > > > 
> > > > When raw socket is used as the backend for kernel vhost, currently it
> > > > will regard the virtio net header as 10-byte, which is not always the
> > > > case since some virtio features need virtio net header other than
> > > > 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
> > > > net header.
> > > > 
> > > > Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
> > > > tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
> > > > size that is recorded in packet_sock to indicate the exact virtio net
> > > > header size that virtio user actually prepares in the packets. By doing
> > > > so, it can fix the issue of incorrect mac header parsing when these
> > > > virtio features that need virtio net header other than 10-byte are
> > > > enable.
> > > > 
> > > > Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
> > > > Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
> > > > Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
> > > Does it handle VERSION_1 though? That one is also LE.
> > > Would it be better to pass a features bitmap instead?
> > 
> > 
> > Thanks for quick reply!
> > 
> > I am a little confused abot what "LE" presents here?
> 
> LE == little_endian.
> Little endian format.
> 
> > For passing a features bitmap to af_packet here, our consideration is
> > whether it will be too complicated for af_packet to understand the virtio
> > features bitmap in order to get the vnet header size. For now, all the
> > virtio features stuff is handled by vhost worker and af_packet actually does
> > not need to know much about virtio features. Would it be better if we keep
> > the virtio feature stuff in user-level and let user-level tell af_packet how
> > much space it should reserve?
> 
> Presumably, we'd add an API in include/linux/virtio_net.h ?

If packet sockets do not act on the contents of these extended fields,
it's probably better to leave them opaque.

This patch series probably should be one patch. The new option in the
first patch modifies the data path. Now there is one SHA1 at which its
behavior would not work.

> 
> > > 
> > > 
> > > > ---
> > > >   net/packet/af_packet.c | 48 +++++++++++++++++++++++++++++++++---------------
> > > >   1 file changed, 33 insertions(+), 15 deletions(-)
> > > > 
> > > > diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
> > > > index ab37baf..4f49939 100644
> > > > --- a/net/packet/af_packet.c
> > > > +++ b/net/packet/af_packet.c
> > > > @@ -2092,18 +2092,25 @@ static unsigned int run_filter(struct sk_buff *skb,
> > > >   }
> > > >   static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
> > > > -			   size_t *len)
> > > > +			   size_t *len, int vnet_hdr_sz)
> > > >   {
> > > >   	struct virtio_net_hdr vnet_hdr;
> > > > +	int ret;
> > > > -	if (*len < sizeof(vnet_hdr))
> > > > +	if (*len < vnet_hdr_sz)
> > > >   		return -EINVAL;
> > > > -	*len -= sizeof(vnet_hdr);
> > > > +	*len -= vnet_hdr_sz;
> > > >   	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
> > > >   		return -EINVAL;
> > > > -	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
> > > > +	ret = memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
> > > > +
> > > > +	/* reserve space for extra info in vnet_hdr if needed */
> > > > +	if (ret == 0)
> > > > +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(vnet_hdr));
> > > > +

How about

    struct virtio_net_hdr_mrg_rxbuf vnet_hdr { .num_buffers = 0 };

    ..

    ret = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz);

To avoid the iov_iter_advance and properly initialize those bytes.

> > > > +	return ret;
> > > >   }
> > > >   /*
> > > > @@ -2311,7 +2318,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
> > > >   				       (maclen < 16 ? 16 : maclen)) +
> > > >   				       po->tp_reserve;
> > > >   		if (po->has_vnet_hdr) {
> > > > -			netoff += sizeof(struct virtio_net_hdr);
> > > > +			netoff += po->vnet_hdr_sz;
> > > >   			do_vnet = true;
> > > >   		}
> > > >   		macoff = netoff - maclen;
> > > > @@ -2552,16 +2559,23 @@ static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
> > > >   }
> > > >   static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
> > > > -				 struct virtio_net_hdr *vnet_hdr)
> > > > +				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
> > > >   {
> > > > -	if (*len < sizeof(*vnet_hdr))
> > > > +	int ret;
> > > > +
> > > > +	if (*len < vnet_hdr_sz)
> > > >   		return -EINVAL;
> > > > -	*len -= sizeof(*vnet_hdr);
> > > > +	*len -= vnet_hdr_sz;
> > > >   	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
> > > >   		return -EFAULT;
> > > > -	return __packet_snd_vnet_parse(vnet_hdr, *len);
> > > > +	ret = __packet_snd_vnet_parse(vnet_hdr, *len);
> > > > +
> > > > +	/* move iter to point to the start of mac header */
> > > > +	if (ret == 0)
> > > > +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
> > > > +	return ret;
> > > >   }
> > > >   static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
> > > > @@ -2730,6 +2744,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
> > > >   	int status = TP_STATUS_AVAILABLE;
> > > >   	int hlen, tlen, copylen = 0;
> > > >   	long timeo = 0;
> > > > +	int vnet_hdr_sz;
> > > >   	mutex_lock(&po->pg_vec_lock);
> > > > @@ -2811,8 +2826,9 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
> > > >   		tlen = dev->needed_tailroom;
> > > >   		if (po->has_vnet_hdr) {
> > > >   			vnet_hdr = data;
> > > > -			data += sizeof(*vnet_hdr);
> > > > -			tp_len -= sizeof(*vnet_hdr);
> > > > +			vnet_hdr_sz = po->vnet_hdr_sz;
> > > > +			data += vnet_hdr_sz;
> > > > +			tp_len -= vnet_hdr_sz;
> > > >   			if (tp_len < 0 ||
> > > >   			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
> > > >   				tp_len = -EINVAL;
> > > > @@ -2947,6 +2963,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > > >   	int offset = 0;
> > > >   	struct packet_sock *po = pkt_sk(sk);
> > > >   	bool has_vnet_hdr = false;
> > > > +	int vnet_hdr_sz;
> > > >   	int hlen, tlen, linear;
> > > >   	int extra_len = 0;
> > > > @@ -2991,7 +3008,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > > >   	if (sock->type == SOCK_RAW)
> > > >   		reserve = dev->hard_header_len;
> > > >   	if (po->has_vnet_hdr) {
> > > > -		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
> > > > +		vnet_hdr_sz = po->vnet_hdr_sz;
> > > > +		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
> > > >   		if (err)
> > > >   			goto out_unlock;
> > > >   		has_vnet_hdr = true;
> > > > @@ -3068,7 +3086,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > > >   		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
> > > >   		if (err)
> > > >   			goto out_free;
> > > > -		len += sizeof(vnet_hdr);
> > > > +		len += vnet_hdr_sz;
> > > >   		virtio_net_hdr_set_proto(skb, &vnet_hdr);
> > > >   	}
> > > > @@ -3452,10 +3470,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
> > > >   	packet_rcv_try_clear_pressure(pkt_sk(sk));
> > > >   	if (pkt_sk(sk)->has_vnet_hdr) {
> > > > -		err = packet_rcv_vnet(msg, skb, &len);
> > > > +		vnet_hdr_len = pkt_sk(sk)->vnet_hdr_sz;
> > > > +		err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
> > > >   		if (err)
> > > >   			goto out_free;
> > > > -		vnet_hdr_len = sizeof(struct virtio_net_hdr);
> > > >   	}
> > > >   	/* You lose any data beyond the buffer you gave. If it worries
> > > > -- 
> > > > 1.8.3.1
>
Willem de Bruijn Feb. 10, 2023, 3:39 p.m. UTC | #5
Michael S. Tsirkin wrote:
> On Fri, Feb 10, 2023 at 12:01:03PM +0800, 沈安琪(凛玥) wrote:
> > 
> > 在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
> > > On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
> > > > From: "Jianfeng Tan" <henry.tjf@antgroup.com>
> > > > 
> > > > When raw socket is used as the backend for kernel vhost, currently it
> > > > will regard the virtio net header as 10-byte, which is not always the
> > > > case since some virtio features need virtio net header other than
> > > > 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
> > > > net header.
> > > > 
> > > > Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
> > > > tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
> > > > size that is recorded in packet_sock to indicate the exact virtio net
> > > > header size that virtio user actually prepares in the packets. By doing
> > > > so, it can fix the issue of incorrect mac header parsing when these
> > > > virtio features that need virtio net header other than 10-byte are
> > > > enable.
> > > > 
> > > > Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
> > > > Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
> > > > Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
> > > Does it handle VERSION_1 though? That one is also LE.
> > > Would it be better to pass a features bitmap instead?
> > 
> > 
> > Thanks for quick reply!
> > 
> > I am a little confused abot what "LE" presents here?
> 
> LE == little_endian.
> Little endian format.
> 
> > For passing a features bitmap to af_packet here, our consideration is
> > whether it will be too complicated for af_packet to understand the virtio
> > features bitmap in order to get the vnet header size. For now, all the
> > virtio features stuff is handled by vhost worker and af_packet actually does
> > not need to know much about virtio features. Would it be better if we keep
> > the virtio feature stuff in user-level and let user-level tell af_packet how
> > much space it should reserve?
> 
> Presumably, we'd add an API in include/linux/virtio_net.h ?

Better leave this opaque to packet sockets if they won't act on this
type info.
 
This patch series probably should be a single patch btw. As else the
socket option introduced in the first is broken at that commit, since
the behavior is only introduced in patch 2.

> > > 
> > > 
> > > > ---
> > > >   net/packet/af_packet.c | 48 +++++++++++++++++++++++++++++++++---------------
> > > >   1 file changed, 33 insertions(+), 15 deletions(-)
> > > > 
> > > > diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
> > > > index ab37baf..4f49939 100644
> > > > --- a/net/packet/af_packet.c
> > > > +++ b/net/packet/af_packet.c
> > > > @@ -2092,18 +2092,25 @@ static unsigned int run_filter(struct sk_buff *skb,
> > > >   }
> > > >   static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
> > > > -			   size_t *len)
> > > > +			   size_t *len, int vnet_hdr_sz)
> > > >   {
> > > >   	struct virtio_net_hdr vnet_hdr;
> > > > +	int ret;
> > > > -	if (*len < sizeof(vnet_hdr))
> > > > +	if (*len < vnet_hdr_sz)
> > > >   		return -EINVAL;
> > > > -	*len -= sizeof(vnet_hdr);
> > > > +	*len -= vnet_hdr_sz;
> > > >   	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
> > > >   		return -EINVAL;
> > > > -	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
> > > > +	ret = memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
> > > > +
> > > > +	/* reserve space for extra info in vnet_hdr if needed */
> > > > +	if (ret == 0)
> > > > +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(vnet_hdr));
> > > > +

How about

    struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 };

    ..

    ret = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz);

To initialize data correctly and avoid the extra function call.

> > > > +	return ret;
> > > >   }
> > > >   /*
> > > > @@ -2311,7 +2318,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
> > > >   				       (maclen < 16 ? 16 : maclen)) +
> > > >   				       po->tp_reserve;
> > > >   		if (po->has_vnet_hdr) {
> > > > -			netoff += sizeof(struct virtio_net_hdr);
> > > > +			netoff += po->vnet_hdr_sz;
> > > >   			do_vnet = true;
> > > >   		}
> > > >   		macoff = netoff - maclen;
> > > > @@ -2552,16 +2559,23 @@ static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
> > > >   }
> > > >   static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
> > > > -				 struct virtio_net_hdr *vnet_hdr)
> > > > +				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
> > > >   {
> > > > -	if (*len < sizeof(*vnet_hdr))
> > > > +	int ret;
> > > > +
> > > > +	if (*len < vnet_hdr_sz)
> > > >   		return -EINVAL;
> > > > -	*len -= sizeof(*vnet_hdr);
> > > > +	*len -= vnet_hdr_sz;
> > > >   	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
> > > >   		return -EFAULT;
> > > > -	return __packet_snd_vnet_parse(vnet_hdr, *len);
> > > > +	ret = __packet_snd_vnet_parse(vnet_hdr, *len);
> > > > +
> > > > +	/* move iter to point to the start of mac header */
> > > > +	if (ret == 0)
> > > > +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
> > > > +	return ret;
> > > >   }
> > > >   static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
> > > > @@ -2730,6 +2744,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
> > > >   	int status = TP_STATUS_AVAILABLE;
> > > >   	int hlen, tlen, copylen = 0;
> > > >   	long timeo = 0;
> > > > +	int vnet_hdr_sz;
> > > >   	mutex_lock(&po->pg_vec_lock);
> > > > @@ -2811,8 +2826,9 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
> > > >   		tlen = dev->needed_tailroom;
> > > >   		if (po->has_vnet_hdr) {
> > > >   			vnet_hdr = data;
> > > > -			data += sizeof(*vnet_hdr);
> > > > -			tp_len -= sizeof(*vnet_hdr);
> > > > +			vnet_hdr_sz = po->vnet_hdr_sz;
> > > > +			data += vnet_hdr_sz;
> > > > +			tp_len -= vnet_hdr_sz;
> > > >   			if (tp_len < 0 ||
> > > >   			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
> > > >   				tp_len = -EINVAL;
> > > > @@ -2947,6 +2963,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > > >   	int offset = 0;
> > > >   	struct packet_sock *po = pkt_sk(sk);
> > > >   	bool has_vnet_hdr = false;
> > > > +	int vnet_hdr_sz;
> > > >   	int hlen, tlen, linear;
> > > >   	int extra_len = 0;
> > > > @@ -2991,7 +3008,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > > >   	if (sock->type == SOCK_RAW)
> > > >   		reserve = dev->hard_header_len;
> > > >   	if (po->has_vnet_hdr) {
> > > > -		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
> > > > +		vnet_hdr_sz = po->vnet_hdr_sz;
> > > > +		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
> > > >   		if (err)
> > > >   			goto out_unlock;
> > > >   		has_vnet_hdr = true;
> > > > @@ -3068,7 +3086,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > > >   		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
> > > >   		if (err)
> > > >   			goto out_free;
> > > > -		len += sizeof(vnet_hdr);
> > > > +		len += vnet_hdr_sz;
> > > >   		virtio_net_hdr_set_proto(skb, &vnet_hdr);
> > > >   	}
> > > > @@ -3452,10 +3470,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
> > > >   	packet_rcv_try_clear_pressure(pkt_sk(sk));
> > > >   	if (pkt_sk(sk)->has_vnet_hdr) {
> > > > -		err = packet_rcv_vnet(msg, skb, &len);
> > > > +		vnet_hdr_len = pkt_sk(sk)->vnet_hdr_sz;
> > > > +		err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
> > > >   		if (err)
> > > >   			goto out_free;
> > > > -		vnet_hdr_len = sizeof(struct virtio_net_hdr);
> > > >   	}
> > > >   	/* You lose any data beyond the buffer you gave. If it worries
> > > > -- 
> > > > 1.8.3.1
>
Michael S. Tsirkin Feb. 12, 2023, 9:54 a.m. UTC | #6
On Fri, Feb 10, 2023 at 10:36:16AM -0500, Willem de Bruijn wrote:
> Michael S. Tsirkin wrote:
> > On Fri, Feb 10, 2023 at 12:01:03PM +0800, 沈安琪(凛玥) wrote:
> > > 
> > > 在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
> > > > On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
> > > > > From: "Jianfeng Tan" <henry.tjf@antgroup.com>
> > > > > 
> > > > > When raw socket is used as the backend for kernel vhost, currently it
> > > > > will regard the virtio net header as 10-byte, which is not always the
> > > > > case since some virtio features need virtio net header other than
> > > > > 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
> > > > > net header.
> > > > > 
> > > > > Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
> > > > > tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
> > > > > size that is recorded in packet_sock to indicate the exact virtio net
> > > > > header size that virtio user actually prepares in the packets. By doing
> > > > > so, it can fix the issue of incorrect mac header parsing when these
> > > > > virtio features that need virtio net header other than 10-byte are
> > > > > enable.
> > > > > 
> > > > > Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
> > > > > Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
> > > > > Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
> > > > Does it handle VERSION_1 though? That one is also LE.
> > > > Would it be better to pass a features bitmap instead?
> > > 
> > > 
> > > Thanks for quick reply!
> > > 
> > > I am a little confused abot what "LE" presents here?
> > 
> > LE == little_endian.
> > Little endian format.
> > 
> > > For passing a features bitmap to af_packet here, our consideration is
> > > whether it will be too complicated for af_packet to understand the virtio
> > > features bitmap in order to get the vnet header size. For now, all the
> > > virtio features stuff is handled by vhost worker and af_packet actually does
> > > not need to know much about virtio features. Would it be better if we keep
> > > the virtio feature stuff in user-level and let user-level tell af_packet how
> > > much space it should reserve?
> > 
> > Presumably, we'd add an API in include/linux/virtio_net.h ?
> 
> If packet sockets do not act on the contents of these extended fields,
> it's probably better to leave them opaque.

Well. If the point is to support VERSION_1 which the
commit log says it is, the switch to LE format then does affect
all fields, both used and unused by the packet socket.



> This patch series probably should be one patch. The new option in the
> first patch modifies the data path. Now there is one SHA1 at which its
> behavior would not work.
> 
> > 
> > > > 
> > > > 
> > > > > ---
> > > > >   net/packet/af_packet.c | 48 +++++++++++++++++++++++++++++++++---------------
> > > > >   1 file changed, 33 insertions(+), 15 deletions(-)
> > > > > 
> > > > > diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
> > > > > index ab37baf..4f49939 100644
> > > > > --- a/net/packet/af_packet.c
> > > > > +++ b/net/packet/af_packet.c
> > > > > @@ -2092,18 +2092,25 @@ static unsigned int run_filter(struct sk_buff *skb,
> > > > >   }
> > > > >   static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
> > > > > -			   size_t *len)
> > > > > +			   size_t *len, int vnet_hdr_sz)
> > > > >   {
> > > > >   	struct virtio_net_hdr vnet_hdr;
> > > > > +	int ret;
> > > > > -	if (*len < sizeof(vnet_hdr))
> > > > > +	if (*len < vnet_hdr_sz)
> > > > >   		return -EINVAL;
> > > > > -	*len -= sizeof(vnet_hdr);
> > > > > +	*len -= vnet_hdr_sz;
> > > > >   	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
> > > > >   		return -EINVAL;
> > > > > -	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
> > > > > +	ret = memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
> > > > > +
> > > > > +	/* reserve space for extra info in vnet_hdr if needed */
> > > > > +	if (ret == 0)
> > > > > +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(vnet_hdr));
> > > > > +
> 
> How about
> 
>     struct virtio_net_hdr_mrg_rxbuf vnet_hdr { .num_buffers = 0 };
> 
>     ..
> 
>     ret = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz);
> 
> To avoid the iov_iter_advance and properly initialize those bytes.
> 
> > > > > +	return ret;
> > > > >   }
> > > > >   /*
> > > > > @@ -2311,7 +2318,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
> > > > >   				       (maclen < 16 ? 16 : maclen)) +
> > > > >   				       po->tp_reserve;
> > > > >   		if (po->has_vnet_hdr) {
> > > > > -			netoff += sizeof(struct virtio_net_hdr);
> > > > > +			netoff += po->vnet_hdr_sz;
> > > > >   			do_vnet = true;
> > > > >   		}
> > > > >   		macoff = netoff - maclen;
> > > > > @@ -2552,16 +2559,23 @@ static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
> > > > >   }
> > > > >   static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
> > > > > -				 struct virtio_net_hdr *vnet_hdr)
> > > > > +				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
> > > > >   {
> > > > > -	if (*len < sizeof(*vnet_hdr))
> > > > > +	int ret;
> > > > > +
> > > > > +	if (*len < vnet_hdr_sz)
> > > > >   		return -EINVAL;
> > > > > -	*len -= sizeof(*vnet_hdr);
> > > > > +	*len -= vnet_hdr_sz;
> > > > >   	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
> > > > >   		return -EFAULT;
> > > > > -	return __packet_snd_vnet_parse(vnet_hdr, *len);
> > > > > +	ret = __packet_snd_vnet_parse(vnet_hdr, *len);
> > > > > +
> > > > > +	/* move iter to point to the start of mac header */
> > > > > +	if (ret == 0)
> > > > > +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
> > > > > +	return ret;
> > > > >   }
> > > > >   static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
> > > > > @@ -2730,6 +2744,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
> > > > >   	int status = TP_STATUS_AVAILABLE;
> > > > >   	int hlen, tlen, copylen = 0;
> > > > >   	long timeo = 0;
> > > > > +	int vnet_hdr_sz;
> > > > >   	mutex_lock(&po->pg_vec_lock);
> > > > > @@ -2811,8 +2826,9 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
> > > > >   		tlen = dev->needed_tailroom;
> > > > >   		if (po->has_vnet_hdr) {
> > > > >   			vnet_hdr = data;
> > > > > -			data += sizeof(*vnet_hdr);
> > > > > -			tp_len -= sizeof(*vnet_hdr);
> > > > > +			vnet_hdr_sz = po->vnet_hdr_sz;
> > > > > +			data += vnet_hdr_sz;
> > > > > +			tp_len -= vnet_hdr_sz;
> > > > >   			if (tp_len < 0 ||
> > > > >   			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
> > > > >   				tp_len = -EINVAL;
> > > > > @@ -2947,6 +2963,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > > > >   	int offset = 0;
> > > > >   	struct packet_sock *po = pkt_sk(sk);
> > > > >   	bool has_vnet_hdr = false;
> > > > > +	int vnet_hdr_sz;
> > > > >   	int hlen, tlen, linear;
> > > > >   	int extra_len = 0;
> > > > > @@ -2991,7 +3008,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > > > >   	if (sock->type == SOCK_RAW)
> > > > >   		reserve = dev->hard_header_len;
> > > > >   	if (po->has_vnet_hdr) {
> > > > > -		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
> > > > > +		vnet_hdr_sz = po->vnet_hdr_sz;
> > > > > +		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
> > > > >   		if (err)
> > > > >   			goto out_unlock;
> > > > >   		has_vnet_hdr = true;
> > > > > @@ -3068,7 +3086,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
> > > > >   		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
> > > > >   		if (err)
> > > > >   			goto out_free;
> > > > > -		len += sizeof(vnet_hdr);
> > > > > +		len += vnet_hdr_sz;
> > > > >   		virtio_net_hdr_set_proto(skb, &vnet_hdr);
> > > > >   	}
> > > > > @@ -3452,10 +3470,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
> > > > >   	packet_rcv_try_clear_pressure(pkt_sk(sk));
> > > > >   	if (pkt_sk(sk)->has_vnet_hdr) {
> > > > > -		err = packet_rcv_vnet(msg, skb, &len);
> > > > > +		vnet_hdr_len = pkt_sk(sk)->vnet_hdr_sz;
> > > > > +		err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
> > > > >   		if (err)
> > > > >   			goto out_free;
> > > > > -		vnet_hdr_len = sizeof(struct virtio_net_hdr);
> > > > >   	}
> > > > >   	/* You lose any data beyond the buffer you gave. If it worries
> > > > > -- 
> > > > > 1.8.3.1
> > 
>
沈安琪(凛玥) Feb. 13, 2023, 11:06 a.m. UTC | #7
在 2023/2/10 下午11:39, Willem de Bruijn 写道:
> Michael S. Tsirkin wrote:
>> On Fri, Feb 10, 2023 at 12:01:03PM +0800, 沈安琪(凛玥) wrote:
>>> 在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
>>>> On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
>>>>> From: "Jianfeng Tan" <henry.tjf@antgroup.com>
>>>>>
>>>>> When raw socket is used as the backend for kernel vhost, currently it
>>>>> will regard the virtio net header as 10-byte, which is not always the
>>>>> case since some virtio features need virtio net header other than
>>>>> 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
>>>>> net header.
>>>>>
>>>>> Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
>>>>> tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
>>>>> size that is recorded in packet_sock to indicate the exact virtio net
>>>>> header size that virtio user actually prepares in the packets. By doing
>>>>> so, it can fix the issue of incorrect mac header parsing when these
>>>>> virtio features that need virtio net header other than 10-byte are
>>>>> enable.
>>>>>
>>>>> Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
>>>>> Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
>>>>> Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
>>>> Does it handle VERSION_1 though? That one is also LE.
>>>> Would it be better to pass a features bitmap instead?
>>>
>>> Thanks for quick reply!
>>>
>>> I am a little confused abot what "LE" presents here?
>> LE == little_endian.
>> Little endian format.
>>
>>> For passing a features bitmap to af_packet here, our consideration is
>>> whether it will be too complicated for af_packet to understand the virtio
>>> features bitmap in order to get the vnet header size. For now, all the
>>> virtio features stuff is handled by vhost worker and af_packet actually does
>>> not need to know much about virtio features. Would it be better if we keep
>>> the virtio feature stuff in user-level and let user-level tell af_packet how
>>> much space it should reserve?
>> Presumably, we'd add an API in include/linux/virtio_net.h ?
> Better leave this opaque to packet sockets if they won't act on this
> type info.
>   
> This patch series probably should be a single patch btw. As else the
> socket option introduced in the first is broken at that commit, since
> the behavior is only introduced in patch 2.


Good point, will merge this patch series into one patch.


Thanks for Michael's enlightening advice, we plan to modify current UAPI 
change of adding an extra socketopt from only setting vnet header size 
only to setting a bit-map of virtio features, and implement another 
helper function in include/linux/virtio_net.h to parse the feature 
bit-map. In this case, packet sockets have no need to understand the 
feature bit-map but only pass this bit-map to virtio_net helper and get 
back the information, such as vnet header size, it needs.

This change will make the new UAPI more general and avoid further 
modification if there are more virtio features to support in the future.


>
>>>>
>>>>> ---
>>>>>    net/packet/af_packet.c | 48 +++++++++++++++++++++++++++++++++---------------
>>>>>    1 file changed, 33 insertions(+), 15 deletions(-)
>>>>>
>>>>> diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
>>>>> index ab37baf..4f49939 100644
>>>>> --- a/net/packet/af_packet.c
>>>>> +++ b/net/packet/af_packet.c
>>>>> @@ -2092,18 +2092,25 @@ static unsigned int run_filter(struct sk_buff *skb,
>>>>>    }
>>>>>    static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
>>>>> -			   size_t *len)
>>>>> +			   size_t *len, int vnet_hdr_sz)
>>>>>    {
>>>>>    	struct virtio_net_hdr vnet_hdr;
>>>>> +	int ret;
>>>>> -	if (*len < sizeof(vnet_hdr))
>>>>> +	if (*len < vnet_hdr_sz)
>>>>>    		return -EINVAL;
>>>>> -	*len -= sizeof(vnet_hdr);
>>>>> +	*len -= vnet_hdr_sz;
>>>>>    	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
>>>>>    		return -EINVAL;
>>>>> -	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
>>>>> +	ret = memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
>>>>> +
>>>>> +	/* reserve space for extra info in vnet_hdr if needed */
>>>>> +	if (ret == 0)
>>>>> +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(vnet_hdr));
>>>>> +
> How about
>
>      struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 };
>
>      ..
>
>      ret = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz);
>
> To initialize data correctly and avoid the extra function call.


It makes sense. Thanks for pointing out and we will address it in the 
next version of this patch.


>
>>>>> +	return ret;
>>>>>    }
>>>>>    /*
>>>>> @@ -2311,7 +2318,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
>>>>>    				       (maclen < 16 ? 16 : maclen)) +
>>>>>    				       po->tp_reserve;
>>>>>    		if (po->has_vnet_hdr) {
>>>>> -			netoff += sizeof(struct virtio_net_hdr);
>>>>> +			netoff += po->vnet_hdr_sz;
>>>>>    			do_vnet = true;
>>>>>    		}
>>>>>    		macoff = netoff - maclen;
>>>>> @@ -2552,16 +2559,23 @@ static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
>>>>>    }
>>>>>    static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
>>>>> -				 struct virtio_net_hdr *vnet_hdr)
>>>>> +				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
>>>>>    {
>>>>> -	if (*len < sizeof(*vnet_hdr))
>>>>> +	int ret;
>>>>> +
>>>>> +	if (*len < vnet_hdr_sz)
>>>>>    		return -EINVAL;
>>>>> -	*len -= sizeof(*vnet_hdr);
>>>>> +	*len -= vnet_hdr_sz;
>>>>>    	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
>>>>>    		return -EFAULT;
>>>>> -	return __packet_snd_vnet_parse(vnet_hdr, *len);
>>>>> +	ret = __packet_snd_vnet_parse(vnet_hdr, *len);
>>>>> +
>>>>> +	/* move iter to point to the start of mac header */
>>>>> +	if (ret == 0)
>>>>> +		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
>>>>> +	return ret;
>>>>>    }
>>>>>    static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
>>>>> @@ -2730,6 +2744,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
>>>>>    	int status = TP_STATUS_AVAILABLE;
>>>>>    	int hlen, tlen, copylen = 0;
>>>>>    	long timeo = 0;
>>>>> +	int vnet_hdr_sz;
>>>>>    	mutex_lock(&po->pg_vec_lock);
>>>>> @@ -2811,8 +2826,9 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
>>>>>    		tlen = dev->needed_tailroom;
>>>>>    		if (po->has_vnet_hdr) {
>>>>>    			vnet_hdr = data;
>>>>> -			data += sizeof(*vnet_hdr);
>>>>> -			tp_len -= sizeof(*vnet_hdr);
>>>>> +			vnet_hdr_sz = po->vnet_hdr_sz;
>>>>> +			data += vnet_hdr_sz;
>>>>> +			tp_len -= vnet_hdr_sz;
>>>>>    			if (tp_len < 0 ||
>>>>>    			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
>>>>>    				tp_len = -EINVAL;
>>>>> @@ -2947,6 +2963,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
>>>>>    	int offset = 0;
>>>>>    	struct packet_sock *po = pkt_sk(sk);
>>>>>    	bool has_vnet_hdr = false;
>>>>> +	int vnet_hdr_sz;
>>>>>    	int hlen, tlen, linear;
>>>>>    	int extra_len = 0;
>>>>> @@ -2991,7 +3008,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
>>>>>    	if (sock->type == SOCK_RAW)
>>>>>    		reserve = dev->hard_header_len;
>>>>>    	if (po->has_vnet_hdr) {
>>>>> -		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
>>>>> +		vnet_hdr_sz = po->vnet_hdr_sz;
>>>>> +		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
>>>>>    		if (err)
>>>>>    			goto out_unlock;
>>>>>    		has_vnet_hdr = true;
>>>>> @@ -3068,7 +3086,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
>>>>>    		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
>>>>>    		if (err)
>>>>>    			goto out_free;
>>>>> -		len += sizeof(vnet_hdr);
>>>>> +		len += vnet_hdr_sz;
>>>>>    		virtio_net_hdr_set_proto(skb, &vnet_hdr);
>>>>>    	}
>>>>> @@ -3452,10 +3470,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
>>>>>    	packet_rcv_try_clear_pressure(pkt_sk(sk));
>>>>>    	if (pkt_sk(sk)->has_vnet_hdr) {
>>>>> -		err = packet_rcv_vnet(msg, skb, &len);
>>>>> +		vnet_hdr_len = pkt_sk(sk)->vnet_hdr_sz;
>>>>> +		err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
>>>>>    		if (err)
>>>>>    			goto out_free;
>>>>> -		vnet_hdr_len = sizeof(struct virtio_net_hdr);
>>>>>    	}
>>>>>    	/* You lose any data beyond the buffer you gave. If it worries
>>>>> -- 
>>>>> 1.8.3.1
Willem de Bruijn Feb. 14, 2023, 2:28 p.m. UTC | #8
沈安琪(凛玥) wrote:
> 
> 在 2023/2/10 下午11:39, Willem de Bruijn 写道:
> > Michael S. Tsirkin wrote:
> >> On Fri, Feb 10, 2023 at 12:01:03PM +0800, 沈安琪(凛玥) wrote:
> >>> 在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
> >>>> On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
> >>>>> From: "Jianfeng Tan" <henry.tjf@antgroup.com>
> >>>>>
> >>>>> When raw socket is used as the backend for kernel vhost, currently it
> >>>>> will regard the virtio net header as 10-byte, which is not always the
> >>>>> case since some virtio features need virtio net header other than
> >>>>> 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
> >>>>> net header.
> >>>>>
> >>>>> Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
> >>>>> tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
> >>>>> size that is recorded in packet_sock to indicate the exact virtio net
> >>>>> header size that virtio user actually prepares in the packets. By doing
> >>>>> so, it can fix the issue of incorrect mac header parsing when these
> >>>>> virtio features that need virtio net header other than 10-byte are
> >>>>> enable.
> >>>>>
> >>>>> Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
> >>>>> Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
> >>>>> Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
> >>>> Does it handle VERSION_1 though? That one is also LE.
> >>>> Would it be better to pass a features bitmap instead?
> >>>
> >>> Thanks for quick reply!
> >>>
> >>> I am a little confused abot what "LE" presents here?
> >> LE == little_endian.
> >> Little endian format.
> >>
> >>> For passing a features bitmap to af_packet here, our consideration is
> >>> whether it will be too complicated for af_packet to understand the virtio
> >>> features bitmap in order to get the vnet header size. For now, all the
> >>> virtio features stuff is handled by vhost worker and af_packet actually does
> >>> not need to know much about virtio features. Would it be better if we keep
> >>> the virtio feature stuff in user-level and let user-level tell af_packet how
> >>> much space it should reserve?
> >> Presumably, we'd add an API in include/linux/virtio_net.h ?
> > Better leave this opaque to packet sockets if they won't act on this
> > type info.
> >   
> > This patch series probably should be a single patch btw. As else the
> > socket option introduced in the first is broken at that commit, since
> > the behavior is only introduced in patch 2.
> 
> 
> Good point, will merge this patch series into one patch.
> 
> 
> Thanks for Michael's enlightening advice, we plan to modify current UAPI 
> change of adding an extra socketopt from only setting vnet header size 
> only to setting a bit-map of virtio features, and implement another 
> helper function in include/linux/virtio_net.h to parse the feature 
> bit-map. In this case, packet sockets have no need to understand the 
> feature bit-map but only pass this bit-map to virtio_net helper and get 
> back the information, such as vnet header size, it needs.
> 
> This change will make the new UAPI more general and avoid further 
> modification if there are more virtio features to support in the future.
>

Please also comment how these UAPI extension are intended to be used.
As that use is not included in this initial patch series.

If the only intended user is vhost-net, we can consider not exposing
outside the kernel at all. That makes it easier to iterate if
necessary (no stable ABI) and avoids accidentally opening up new
avenues for bugs and exploits (syzkaller has a history with
virtio_net_header options).
沈安琪(凛玥) Feb. 21, 2023, 9:40 a.m. UTC | #9
在 2023/2/14 下午10:28, Willem de Bruijn 写道:
> 沈安琪(凛玥) wrote:
>> 在 2023/2/10 下午11:39, Willem de Bruijn 写道:
>>> Michael S. Tsirkin wrote:
>>>> On Fri, Feb 10, 2023 at 12:01:03PM +0800, 沈安琪(凛玥) wrote:
>>>>> 在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
>>>>>> On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
>>>>>>> From: "Jianfeng Tan" <henry.tjf@antgroup.com>
>>>>>>>
>>>>>>> When raw socket is used as the backend for kernel vhost, currently it
>>>>>>> will regard the virtio net header as 10-byte, which is not always the
>>>>>>> case since some virtio features need virtio net header other than
>>>>>>> 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
>>>>>>> net header.
>>>>>>>
>>>>>>> Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
>>>>>>> tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
>>>>>>> size that is recorded in packet_sock to indicate the exact virtio net
>>>>>>> header size that virtio user actually prepares in the packets. By doing
>>>>>>> so, it can fix the issue of incorrect mac header parsing when these
>>>>>>> virtio features that need virtio net header other than 10-byte are
>>>>>>> enable.
>>>>>>>
>>>>>>> Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
>>>>>>> Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
>>>>>>> Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
>>>>>> Does it handle VERSION_1 though? That one is also LE.
>>>>>> Would it be better to pass a features bitmap instead?
>>>>> Thanks for quick reply!
>>>>>
>>>>> I am a little confused abot what "LE" presents here?
>>>> LE == little_endian.
>>>> Little endian format.
>>>>
>>>>> For passing a features bitmap to af_packet here, our consideration is
>>>>> whether it will be too complicated for af_packet to understand the virtio
>>>>> features bitmap in order to get the vnet header size. For now, all the
>>>>> virtio features stuff is handled by vhost worker and af_packet actually does
>>>>> not need to know much about virtio features. Would it be better if we keep
>>>>> the virtio feature stuff in user-level and let user-level tell af_packet how
>>>>> much space it should reserve?
>>>> Presumably, we'd add an API in include/linux/virtio_net.h ?
>>> Better leave this opaque to packet sockets if they won't act on this
>>> type info.
>>>    
>>> This patch series probably should be a single patch btw. As else the
>>> socket option introduced in the first is broken at that commit, since
>>> the behavior is only introduced in patch 2.
>>
>> Good point, will merge this patch series into one patch.
>>
>>
>> Thanks for Michael's enlightening advice, we plan to modify current UAPI
>> change of adding an extra socketopt from only setting vnet header size
>> only to setting a bit-map of virtio features, and implement another
>> helper function in include/linux/virtio_net.h to parse the feature
>> bit-map. In this case, packet sockets have no need to understand the
>> feature bit-map but only pass this bit-map to virtio_net helper and get
>> back the information, such as vnet header size, it needs.
>>
>> This change will make the new UAPI more general and avoid further
>> modification if there are more virtio features to support in the future.
>>
> Please also comment how these UAPI extension are intended to be used.
> As that use is not included in this initial patch series.
>
> If the only intended user is vhost-net, we can consider not exposing
> outside the kernel at all. That makes it easier to iterate if
> necessary (no stable ABI) and avoids accidentally opening up new
> avenues for bugs and exploits (syzkaller has a history with
> virtio_net_header options).


Our concern is, it seems there is no other solution than uapi to let 
packet sockets know the vnet header size they should use.

Receiving packets in vhost driver, implemented in drivers/vhost/net.c: 
1109 handle_rx(), will abstract the backend device it uses and directly 
invoke the corresponding socket ops with no extra information indicating 
it is invoked by vhost worker. Vhost worker actually does not know the 
type of backend device it is using; only virito-user knows what type of 
backend device it uses. Therefore, it seems impossible to let vhost set 
the vnet header information to the target backend device.

Tap, another kind of backend device vhost may use, lets virtio-user set 
whether it needs vnet header and how long the vnet header is through 
ioctl. (implemented in drivers/net/tap.c:1066)

In this case, we wonder whether we should align with what tap does and 
set vnet hdr size through setsockopt for packet_sockets.

We really appreciate suggestions on if any, potential approachs to pass 
this vnet header size information from virtio-user to packet-socket.
Willem de Bruijn Feb. 21, 2023, 3:03 p.m. UTC | #10
沈安琪(凛玥) wrote:
> 
> 在 2023/2/14 下午10:28, Willem de Bruijn 写道:
> > 沈安琪(凛玥) wrote:
> >> 在 2023/2/10 下午11:39, Willem de Bruijn 写道:
> >>> Michael S. Tsirkin wrote:
> >>>> On Fri, Feb 10, 2023 at 12:01:03PM +0800, 沈安琪(凛玥) wrote:
> >>>>> 在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
> >>>>>> On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
> >>>>>>> From: "Jianfeng Tan" <henry.tjf@antgroup.com>
> >>>>>>>
> >>>>>>> When raw socket is used as the backend for kernel vhost, currently it
> >>>>>>> will regard the virtio net header as 10-byte, which is not always the
> >>>>>>> case since some virtio features need virtio net header other than
> >>>>>>> 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
> >>>>>>> net header.
> >>>>>>>
> >>>>>>> Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
> >>>>>>> tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
> >>>>>>> size that is recorded in packet_sock to indicate the exact virtio net
> >>>>>>> header size that virtio user actually prepares in the packets. By doing
> >>>>>>> so, it can fix the issue of incorrect mac header parsing when these
> >>>>>>> virtio features that need virtio net header other than 10-byte are
> >>>>>>> enable.
> >>>>>>>
> >>>>>>> Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
> >>>>>>> Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
> >>>>>>> Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
> >>>>>> Does it handle VERSION_1 though? That one is also LE.
> >>>>>> Would it be better to pass a features bitmap instead?
> >>>>> Thanks for quick reply!
> >>>>>
> >>>>> I am a little confused abot what "LE" presents here?
> >>>> LE == little_endian.
> >>>> Little endian format.
> >>>>
> >>>>> For passing a features bitmap to af_packet here, our consideration is
> >>>>> whether it will be too complicated for af_packet to understand the virtio
> >>>>> features bitmap in order to get the vnet header size. For now, all the
> >>>>> virtio features stuff is handled by vhost worker and af_packet actually does
> >>>>> not need to know much about virtio features. Would it be better if we keep
> >>>>> the virtio feature stuff in user-level and let user-level tell af_packet how
> >>>>> much space it should reserve?
> >>>> Presumably, we'd add an API in include/linux/virtio_net.h ?
> >>> Better leave this opaque to packet sockets if they won't act on this
> >>> type info.
> >>>    
> >>> This patch series probably should be a single patch btw. As else the
> >>> socket option introduced in the first is broken at that commit, since
> >>> the behavior is only introduced in patch 2.
> >>
> >> Good point, will merge this patch series into one patch.
> >>
> >>
> >> Thanks for Michael's enlightening advice, we plan to modify current UAPI
> >> change of adding an extra socketopt from only setting vnet header size
> >> only to setting a bit-map of virtio features, and implement another
> >> helper function in include/linux/virtio_net.h to parse the feature
> >> bit-map. In this case, packet sockets have no need to understand the
> >> feature bit-map but only pass this bit-map to virtio_net helper and get
> >> back the information, such as vnet header size, it needs.
> >>
> >> This change will make the new UAPI more general and avoid further
> >> modification if there are more virtio features to support in the future.
> >>
> > Please also comment how these UAPI extension are intended to be used.
> > As that use is not included in this initial patch series.
> >
> > If the only intended user is vhost-net, we can consider not exposing
> > outside the kernel at all. That makes it easier to iterate if
> > necessary (no stable ABI) and avoids accidentally opening up new
> > avenues for bugs and exploits (syzkaller has a history with
> > virtio_net_header options).
> 
> 
> Our concern is, it seems there is no other solution than uapi to let 
> packet sockets know the vnet header size they should use.
> 
> Receiving packets in vhost driver, implemented in drivers/vhost/net.c: 
> 1109 handle_rx(), will abstract the backend device it uses and directly 
> invoke the corresponding socket ops with no extra information indicating 
> it is invoked by vhost worker. Vhost worker actually does not know the 
> type of backend device it is using; only virito-user knows what type of 
> backend device it uses. Therefore, it seems impossible to let vhost set 
> the vnet header information to the target backend device.
> 
> Tap, another kind of backend device vhost may use, lets virtio-user set 
> whether it needs vnet header and how long the vnet header is through 
> ioctl. (implemented in drivers/net/tap.c:1066)
> 
> In this case, we wonder whether we should align with what tap does and 
> set vnet hdr size through setsockopt for packet_sockets.
> 
> We really appreciate suggestions on if any, potential approachs to pass 
> this vnet header size information from virtio-user to packet-socket.

You're right. This is configured from userspace before the FD is passed
to vhost-net, so indeed this will require packet socket UAPI support.
沈安琪(凛玥) Feb. 22, 2023, 8:04 a.m. UTC | #11
在 2023/2/21 下午11:03, Willem de Bruijn 写道:
> 沈安琪(凛玥) wrote:
>> 在 2023/2/14 下午10:28, Willem de Bruijn 写道:
>>> 沈安琪(凛玥) wrote:
>>>> 在 2023/2/10 下午11:39, Willem de Bruijn 写道:
>>>>> Michael S. Tsirkin wrote:
>>>>>> On Fri, Feb 10, 2023 at 12:01:03PM +0800, 沈安琪(凛玥) wrote:
>>>>>>> 在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
>>>>>>>> On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
>>>>>>>>> From: "Jianfeng Tan" <henry.tjf@antgroup.com>
>>>>>>>>>
>>>>>>>>> When raw socket is used as the backend for kernel vhost, currently it
>>>>>>>>> will regard the virtio net header as 10-byte, which is not always the
>>>>>>>>> case since some virtio features need virtio net header other than
>>>>>>>>> 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
>>>>>>>>> net header.
>>>>>>>>>
>>>>>>>>> Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
>>>>>>>>> tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
>>>>>>>>> size that is recorded in packet_sock to indicate the exact virtio net
>>>>>>>>> header size that virtio user actually prepares in the packets. By doing
>>>>>>>>> so, it can fix the issue of incorrect mac header parsing when these
>>>>>>>>> virtio features that need virtio net header other than 10-byte are
>>>>>>>>> enable.
>>>>>>>>>
>>>>>>>>> Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
>>>>>>>>> Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
>>>>>>>>> Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
>>>>>>>> Does it handle VERSION_1 though? That one is also LE.
>>>>>>>> Would it be better to pass a features bitmap instead?
>>>>>>> Thanks for quick reply!
>>>>>>>
>>>>>>> I am a little confused abot what "LE" presents here?
>>>>>> LE == little_endian.
>>>>>> Little endian format.
>>>>>>
>>>>>>> For passing a features bitmap to af_packet here, our consideration is
>>>>>>> whether it will be too complicated for af_packet to understand the virtio
>>>>>>> features bitmap in order to get the vnet header size. For now, all the
>>>>>>> virtio features stuff is handled by vhost worker and af_packet actually does
>>>>>>> not need to know much about virtio features. Would it be better if we keep
>>>>>>> the virtio feature stuff in user-level and let user-level tell af_packet how
>>>>>>> much space it should reserve?
>>>>>> Presumably, we'd add an API in include/linux/virtio_net.h ?
>>>>> Better leave this opaque to packet sockets if they won't act on this
>>>>> type info.
>>>>>     
>>>>> This patch series probably should be a single patch btw. As else the
>>>>> socket option introduced in the first is broken at that commit, since
>>>>> the behavior is only introduced in patch 2.
>>>> Good point, will merge this patch series into one patch.
>>>>
>>>>
>>>> Thanks for Michael's enlightening advice, we plan to modify current UAPI
>>>> change of adding an extra socketopt from only setting vnet header size
>>>> only to setting a bit-map of virtio features, and implement another
>>>> helper function in include/linux/virtio_net.h to parse the feature
>>>> bit-map. In this case, packet sockets have no need to understand the
>>>> feature bit-map but only pass this bit-map to virtio_net helper and get
>>>> back the information, such as vnet header size, it needs.
>>>>
>>>> This change will make the new UAPI more general and avoid further
>>>> modification if there are more virtio features to support in the future.
>>>>
>>> Please also comment how these UAPI extension are intended to be used.
>>> As that use is not included in this initial patch series.
>>>
>>> If the only intended user is vhost-net, we can consider not exposing
>>> outside the kernel at all. That makes it easier to iterate if
>>> necessary (no stable ABI) and avoids accidentally opening up new
>>> avenues for bugs and exploits (syzkaller has a history with
>>> virtio_net_header options).
>>
>> Our concern is, it seems there is no other solution than uapi to let
>> packet sockets know the vnet header size they should use.
>>
>> Receiving packets in vhost driver, implemented in drivers/vhost/net.c:
>> 1109 handle_rx(), will abstract the backend device it uses and directly
>> invoke the corresponding socket ops with no extra information indicating
>> it is invoked by vhost worker. Vhost worker actually does not know the
>> type of backend device it is using; only virito-user knows what type of
>> backend device it uses. Therefore, it seems impossible to let vhost set
>> the vnet header information to the target backend device.
>>
>> Tap, another kind of backend device vhost may use, lets virtio-user set
>> whether it needs vnet header and how long the vnet header is through
>> ioctl. (implemented in drivers/net/tap.c:1066)
>>
>> In this case, we wonder whether we should align with what tap does and
>> set vnet hdr size through setsockopt for packet_sockets.
>>
>> We really appreciate suggestions on if any, potential approachs to pass
>> this vnet header size information from virtio-user to packet-socket.
> You're right. This is configured from userspace before the FD is passed
> to vhost-net, so indeed this will require packet socket UAPI support.


Thanks for quick reply. We will go with adding an extra UAPI here then.


Another discussion for designing this UAPI is, whether it will be better 
to support setting only vnet header size, just like what TAP does in its 
ioctl, or to support setting a virtio feature bit-map.


UAPI setting only vnet header size

Pros:

1. It aligns with how other virito backend devices communicate with 
virtio-user

2. We can use the holes in struct packet_socket 
(net/packet/internal.h:120) to record the extra information since the 
size info only takes 8 bits.

Cons:

1. It may have more information that virtio-user needs to communicate 
with packet socket in the future and needs to add more UAPI supports here.

To Michael: Is there any other information that backend device needs and 
will be given from virtio-user?


UAPI setting a virtio feature bit-map

Pros:

1. It is more general and may reduce future UAPI changes.

Cons:

1. A virtio feature bit-map needs 64 bits, which needs to add an extra 
field in packet_sock struct

2. Virtio-user needs to aware that using packet socket as backend 
supports different approach to negotiate the vnet header size.


We really appreciate any suggestion or discussion on this design choice 
of UAPI.
Michael S. Tsirkin Feb. 22, 2023, 11:37 a.m. UTC | #12
On Wed, Feb 22, 2023 at 04:04:34PM +0800, 沈安琪(凛玥) wrote:
> 
> 在 2023/2/21 下午11:03, Willem de Bruijn 写道:
> > 沈安琪(凛玥) wrote:
> > > 在 2023/2/14 下午10:28, Willem de Bruijn 写道:
> > > > 沈安琪(凛玥) wrote:
> > > > > 在 2023/2/10 下午11:39, Willem de Bruijn 写道:
> > > > > > Michael S. Tsirkin wrote:
> > > > > > > On Fri, Feb 10, 2023 at 12:01:03PM +0800, 沈安琪(凛玥) wrote:
> > > > > > > > 在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
> > > > > > > > > On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
> > > > > > > > > > From: "Jianfeng Tan" <henry.tjf@antgroup.com>
> > > > > > > > > > 
> > > > > > > > > > When raw socket is used as the backend for kernel vhost, currently it
> > > > > > > > > > will regard the virtio net header as 10-byte, which is not always the
> > > > > > > > > > case since some virtio features need virtio net header other than
> > > > > > > > > > 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
> > > > > > > > > > net header.
> > > > > > > > > > 
> > > > > > > > > > Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
> > > > > > > > > > tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
> > > > > > > > > > size that is recorded in packet_sock to indicate the exact virtio net
> > > > > > > > > > header size that virtio user actually prepares in the packets. By doing
> > > > > > > > > > so, it can fix the issue of incorrect mac header parsing when these
> > > > > > > > > > virtio features that need virtio net header other than 10-byte are
> > > > > > > > > > enable.
> > > > > > > > > > 
> > > > > > > > > > Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
> > > > > > > > > > Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
> > > > > > > > > > Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
> > > > > > > > > Does it handle VERSION_1 though? That one is also LE.
> > > > > > > > > Would it be better to pass a features bitmap instead?
> > > > > > > > Thanks for quick reply!
> > > > > > > > 
> > > > > > > > I am a little confused abot what "LE" presents here?
> > > > > > > LE == little_endian.
> > > > > > > Little endian format.
> > > > > > > 
> > > > > > > > For passing a features bitmap to af_packet here, our consideration is
> > > > > > > > whether it will be too complicated for af_packet to understand the virtio
> > > > > > > > features bitmap in order to get the vnet header size. For now, all the
> > > > > > > > virtio features stuff is handled by vhost worker and af_packet actually does
> > > > > > > > not need to know much about virtio features. Would it be better if we keep
> > > > > > > > the virtio feature stuff in user-level and let user-level tell af_packet how
> > > > > > > > much space it should reserve?
> > > > > > > Presumably, we'd add an API in include/linux/virtio_net.h ?
> > > > > > Better leave this opaque to packet sockets if they won't act on this
> > > > > > type info.
> > > > > > This patch series probably should be a single patch btw. As else the
> > > > > > socket option introduced in the first is broken at that commit, since
> > > > > > the behavior is only introduced in patch 2.
> > > > > Good point, will merge this patch series into one patch.
> > > > > 
> > > > > 
> > > > > Thanks for Michael's enlightening advice, we plan to modify current UAPI
> > > > > change of adding an extra socketopt from only setting vnet header size
> > > > > only to setting a bit-map of virtio features, and implement another
> > > > > helper function in include/linux/virtio_net.h to parse the feature
> > > > > bit-map. In this case, packet sockets have no need to understand the
> > > > > feature bit-map but only pass this bit-map to virtio_net helper and get
> > > > > back the information, such as vnet header size, it needs.
> > > > > 
> > > > > This change will make the new UAPI more general and avoid further
> > > > > modification if there are more virtio features to support in the future.
> > > > > 
> > > > Please also comment how these UAPI extension are intended to be used.
> > > > As that use is not included in this initial patch series.
> > > > 
> > > > If the only intended user is vhost-net, we can consider not exposing
> > > > outside the kernel at all. That makes it easier to iterate if
> > > > necessary (no stable ABI) and avoids accidentally opening up new
> > > > avenues for bugs and exploits (syzkaller has a history with
> > > > virtio_net_header options).
> > > 
> > > Our concern is, it seems there is no other solution than uapi to let
> > > packet sockets know the vnet header size they should use.
> > > 
> > > Receiving packets in vhost driver, implemented in drivers/vhost/net.c:
> > > 1109 handle_rx(), will abstract the backend device it uses and directly
> > > invoke the corresponding socket ops with no extra information indicating
> > > it is invoked by vhost worker. Vhost worker actually does not know the
> > > type of backend device it is using; only virito-user knows what type of
> > > backend device it uses. Therefore, it seems impossible to let vhost set
> > > the vnet header information to the target backend device.
> > > 
> > > Tap, another kind of backend device vhost may use, lets virtio-user set
> > > whether it needs vnet header and how long the vnet header is through
> > > ioctl. (implemented in drivers/net/tap.c:1066)
> > > 
> > > In this case, we wonder whether we should align with what tap does and
> > > set vnet hdr size through setsockopt for packet_sockets.
> > > 
> > > We really appreciate suggestions on if any, potential approachs to pass
> > > this vnet header size information from virtio-user to packet-socket.
> > You're right. This is configured from userspace before the FD is passed
> > to vhost-net, so indeed this will require packet socket UAPI support.
> 
> 
> Thanks for quick reply. We will go with adding an extra UAPI here then.
> 
> 
> Another discussion for designing this UAPI is, whether it will be better to
> support setting only vnet header size, just like what TAP does in its ioctl,
> or to support setting a virtio feature bit-map.
> 
> 
> UAPI setting only vnet header size
> 
> Pros:
> 
> 1. It aligns with how other virito backend devices communicate with
> virtio-user
> 
> 2. We can use the holes in struct packet_socket (net/packet/internal.h:120)
> to record the extra information since the size info only takes 8 bits.
> 
> Cons:
> 
> 1. It may have more information that virtio-user needs to communicate with
> packet socket in the future and needs to add more UAPI supports here.
> 
> To Michael: Is there any other information that backend device needs and
> will be given from virtio-user?


Yes e.g. I already mentioned virtio 1.0 wrt LE versus native endian
format.


> 
> UAPI setting a virtio feature bit-map
> 
> Pros:
> 
> 1. It is more general and may reduce future UAPI changes.
> 
> Cons:
> 
> 1. A virtio feature bit-map needs 64 bits, which needs to add an extra field
> in packet_sock struct
> 
> 2. Virtio-user needs to aware that using packet socket as backend supports
> different approach to negotiate the vnet header size.
> 
> 
> We really appreciate any suggestion or discussion on this design choice of
> UAPI.

In the end it's ok with just size too, you just probably shouldn't say
you support VERSION_1 if you are not passing that bit.
沈安琪(凛玥) Feb. 22, 2023, 11:43 a.m. UTC | #13
在 2023/2/22 下午7:37, Michael S. Tsirkin 写道:
> On Wed, Feb 22, 2023 at 04:04:34PM +0800, 沈安琪(凛玥) wrote:
>> 在 2023/2/21 下午11:03, Willem de Bruijn 写道:
>>> 沈安琪(凛玥) wrote:
>>>> 在 2023/2/14 下午10:28, Willem de Bruijn 写道:
>>>>> 沈安琪(凛玥) wrote:
>>>>>> 在 2023/2/10 下午11:39, Willem de Bruijn 写道:
>>>>>>> Michael S. Tsirkin wrote:
>>>>>>>> On Fri, Feb 10, 2023 at 12:01:03PM +0800, 沈安琪(凛玥) wrote:
>>>>>>>>> 在 2023/2/9 下午9:07, Michael S. Tsirkin 写道:
>>>>>>>>>> On Thu, Feb 09, 2023 at 08:43:15PM +0800, 沈安琪(凛玥) wrote:
>>>>>>>>>>> From: "Jianfeng Tan" <henry.tjf@antgroup.com>
>>>>>>>>>>>
>>>>>>>>>>> When raw socket is used as the backend for kernel vhost, currently it
>>>>>>>>>>> will regard the virtio net header as 10-byte, which is not always the
>>>>>>>>>>> case since some virtio features need virtio net header other than
>>>>>>>>>>> 10-byte, such as mrg_rxbuf and VERSION_1 that both need 12-byte virtio
>>>>>>>>>>> net header.
>>>>>>>>>>>
>>>>>>>>>>> Instead of hardcoding virtio net header length to 10 bytes, tpacket_snd,
>>>>>>>>>>> tpacket_rcv, packet_snd and packet_recvmsg now get the virtio net header
>>>>>>>>>>> size that is recorded in packet_sock to indicate the exact virtio net
>>>>>>>>>>> header size that virtio user actually prepares in the packets. By doing
>>>>>>>>>>> so, it can fix the issue of incorrect mac header parsing when these
>>>>>>>>>>> virtio features that need virtio net header other than 10-byte are
>>>>>>>>>>> enable.
>>>>>>>>>>>
>>>>>>>>>>> Signed-off-by: Jianfeng Tan <henry.tjf@antgroup.com>
>>>>>>>>>>> Co-developed-by: Anqi Shen <amy.saq@antgroup.com>
>>>>>>>>>>> Signed-off-by: Anqi Shen <amy.saq@antgroup.com>
>>>>>>>>>> Does it handle VERSION_1 though? That one is also LE.
>>>>>>>>>> Would it be better to pass a features bitmap instead?
>>>>>>>>> Thanks for quick reply!
>>>>>>>>>
>>>>>>>>> I am a little confused abot what "LE" presents here?
>>>>>>>> LE == little_endian.
>>>>>>>> Little endian format.
>>>>>>>>
>>>>>>>>> For passing a features bitmap to af_packet here, our consideration is
>>>>>>>>> whether it will be too complicated for af_packet to understand the virtio
>>>>>>>>> features bitmap in order to get the vnet header size. For now, all the
>>>>>>>>> virtio features stuff is handled by vhost worker and af_packet actually does
>>>>>>>>> not need to know much about virtio features. Would it be better if we keep
>>>>>>>>> the virtio feature stuff in user-level and let user-level tell af_packet how
>>>>>>>>> much space it should reserve?
>>>>>>>> Presumably, we'd add an API in include/linux/virtio_net.h ?
>>>>>>> Better leave this opaque to packet sockets if they won't act on this
>>>>>>> type info.
>>>>>>> This patch series probably should be a single patch btw. As else the
>>>>>>> socket option introduced in the first is broken at that commit, since
>>>>>>> the behavior is only introduced in patch 2.
>>>>>> Good point, will merge this patch series into one patch.
>>>>>>
>>>>>>
>>>>>> Thanks for Michael's enlightening advice, we plan to modify current UAPI
>>>>>> change of adding an extra socketopt from only setting vnet header size
>>>>>> only to setting a bit-map of virtio features, and implement another
>>>>>> helper function in include/linux/virtio_net.h to parse the feature
>>>>>> bit-map. In this case, packet sockets have no need to understand the
>>>>>> feature bit-map but only pass this bit-map to virtio_net helper and get
>>>>>> back the information, such as vnet header size, it needs.
>>>>>>
>>>>>> This change will make the new UAPI more general and avoid further
>>>>>> modification if there are more virtio features to support in the future.
>>>>>>
>>>>> Please also comment how these UAPI extension are intended to be used.
>>>>> As that use is not included in this initial patch series.
>>>>>
>>>>> If the only intended user is vhost-net, we can consider not exposing
>>>>> outside the kernel at all. That makes it easier to iterate if
>>>>> necessary (no stable ABI) and avoids accidentally opening up new
>>>>> avenues for bugs and exploits (syzkaller has a history with
>>>>> virtio_net_header options).
>>>> Our concern is, it seems there is no other solution than uapi to let
>>>> packet sockets know the vnet header size they should use.
>>>>
>>>> Receiving packets in vhost driver, implemented in drivers/vhost/net.c:
>>>> 1109 handle_rx(), will abstract the backend device it uses and directly
>>>> invoke the corresponding socket ops with no extra information indicating
>>>> it is invoked by vhost worker. Vhost worker actually does not know the
>>>> type of backend device it is using; only virito-user knows what type of
>>>> backend device it uses. Therefore, it seems impossible to let vhost set
>>>> the vnet header information to the target backend device.
>>>>
>>>> Tap, another kind of backend device vhost may use, lets virtio-user set
>>>> whether it needs vnet header and how long the vnet header is through
>>>> ioctl. (implemented in drivers/net/tap.c:1066)
>>>>
>>>> In this case, we wonder whether we should align with what tap does and
>>>> set vnet hdr size through setsockopt for packet_sockets.
>>>>
>>>> We really appreciate suggestions on if any, potential approachs to pass
>>>> this vnet header size information from virtio-user to packet-socket.
>>> You're right. This is configured from userspace before the FD is passed
>>> to vhost-net, so indeed this will require packet socket UAPI support.
>>
>> Thanks for quick reply. We will go with adding an extra UAPI here then.
>>
>>
>> Another discussion for designing this UAPI is, whether it will be better to
>> support setting only vnet header size, just like what TAP does in its ioctl,
>> or to support setting a virtio feature bit-map.
>>
>>
>> UAPI setting only vnet header size
>>
>> Pros:
>>
>> 1. It aligns with how other virito backend devices communicate with
>> virtio-user
>>
>> 2. We can use the holes in struct packet_socket (net/packet/internal.h:120)
>> to record the extra information since the size info only takes 8 bits.
>>
>> Cons:
>>
>> 1. It may have more information that virtio-user needs to communicate with
>> packet socket in the future and needs to add more UAPI supports here.
>>
>> To Michael: Is there any other information that backend device needs and
>> will be given from virtio-user?
>
> Yes e.g. I already mentioned virtio 1.0 wrt LE versus native endian
> format.
>
>
>> UAPI setting a virtio feature bit-map
>>
>> Pros:
>>
>> 1. It is more general and may reduce future UAPI changes.
>>
>> Cons:
>>
>> 1. A virtio feature bit-map needs 64 bits, which needs to add an extra field
>> in packet_sock struct
>>
>> 2. Virtio-user needs to aware that using packet socket as backend supports
>> different approach to negotiate the vnet header size.
>>
>>
>> We really appreciate any suggestion or discussion on this design choice of
>> UAPI.
> In the end it's ok with just size too, you just probably shouldn't say
> you support VERSION_1 if you are not passing that bit.
>

Sorry for the confusion here that we mentioned VERSION_1 in the commit 
log. We actually just attended to give an example of what features that 
may need 12-byte vnet header. We will remove it from the commit log in 
patch v2 to avoid confusion here. Thanks a lot for your suggestions.
Willem de Bruijn Feb. 22, 2023, 3:04 p.m. UTC | #14
> >>> You're right. This is configured from userspace before the FD is passed
> >>> to vhost-net, so indeed this will require packet socket UAPI support.
> >>
> >> Thanks for quick reply. We will go with adding an extra UAPI here then.
> >>
> >>
> >> Another discussion for designing this UAPI is, whether it will be better to
> >> support setting only vnet header size, just like what TAP does in its ioctl,
> >> or to support setting a virtio feature bit-map.
> >>
> >>
> >> UAPI setting only vnet header size
> >>
> >> Pros:
> >>
> >> 1. It aligns with how other virito backend devices communicate with
> >> virtio-user
> >>
> >> 2. We can use the holes in struct packet_socket (net/packet/internal.h:120)
> >> to record the extra information since the size info only takes 8 bits.
> >>
> >> Cons:
> >>
> >> 1. It may have more information that virtio-user needs to communicate with
> >> packet socket in the future and needs to add more UAPI supports here.
> >>
> >> To Michael: Is there any other information that backend device needs and
> >> will be given from virtio-user?
> >
> > Yes e.g. I already mentioned virtio 1.0 wrt LE versus native endian
> > format.
> >
> >
> >> UAPI setting a virtio feature bit-map
> >>
> >> Pros:
> >>
> >> 1. It is more general and may reduce future UAPI changes.
> >>
> >> Cons:
> >>
> >> 1. A virtio feature bit-map needs 64 bits, which needs to add an extra field
> >> in packet_sock struct

Accepting a bitmap in the ABI does not have to imply storing a bitmap.

> >>
> >> 2. Virtio-user needs to aware that using packet socket as backend supports
> >> different approach to negotiate the vnet header size.
> >>
> >>
> >> We really appreciate any suggestion or discussion on this design choice of
> >> UAPI.
> > In the end it's ok with just size too, you just probably shouldn't say
> > you support VERSION_1 if you are not passing that bit.
> >
> 
> Sorry for the confusion here that we mentioned VERSION_1 in the commit 
> log. We actually just attended to give an example of what features that 
> may need 12-byte vnet header. We will remove it from the commit log in 
> patch v2 to avoid confusion here. Thanks a lot for your suggestions.

The question hinges on which features are expected to have to be
supported in the future. So far we have

- extra num_buffers field
- little endian (V1)

Given the rate of change in the spec, I don't think this should
be over designed. If V1 is not planned to be supported, just
configure header length. If it is, then perhaps instead a feature
bitmap.
diff mbox series

Patch

diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ab37baf..4f49939 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2092,18 +2092,25 @@  static unsigned int run_filter(struct sk_buff *skb,
 }
 
 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
-			   size_t *len)
+			   size_t *len, int vnet_hdr_sz)
 {
 	struct virtio_net_hdr vnet_hdr;
+	int ret;
 
-	if (*len < sizeof(vnet_hdr))
+	if (*len < vnet_hdr_sz)
 		return -EINVAL;
-	*len -= sizeof(vnet_hdr);
+	*len -= vnet_hdr_sz;
 
 	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
 		return -EINVAL;
 
-	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
+	ret = memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
+
+	/* reserve space for extra info in vnet_hdr if needed */
+	if (ret == 0)
+		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(vnet_hdr));
+
+	return ret;
 }
 
 /*
@@ -2311,7 +2318,7 @@  static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 				       (maclen < 16 ? 16 : maclen)) +
 				       po->tp_reserve;
 		if (po->has_vnet_hdr) {
-			netoff += sizeof(struct virtio_net_hdr);
+			netoff += po->vnet_hdr_sz;
 			do_vnet = true;
 		}
 		macoff = netoff - maclen;
@@ -2552,16 +2559,23 @@  static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
 }
 
 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
-				 struct virtio_net_hdr *vnet_hdr)
+				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
 {
-	if (*len < sizeof(*vnet_hdr))
+	int ret;
+
+	if (*len < vnet_hdr_sz)
 		return -EINVAL;
-	*len -= sizeof(*vnet_hdr);
+	*len -= vnet_hdr_sz;
 
 	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
 		return -EFAULT;
 
-	return __packet_snd_vnet_parse(vnet_hdr, *len);
+	ret = __packet_snd_vnet_parse(vnet_hdr, *len);
+
+	/* move iter to point to the start of mac header */
+	if (ret == 0)
+		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
+	return ret;
 }
 
 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
@@ -2730,6 +2744,7 @@  static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 	int status = TP_STATUS_AVAILABLE;
 	int hlen, tlen, copylen = 0;
 	long timeo = 0;
+	int vnet_hdr_sz;
 
 	mutex_lock(&po->pg_vec_lock);
 
@@ -2811,8 +2826,9 @@  static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 		tlen = dev->needed_tailroom;
 		if (po->has_vnet_hdr) {
 			vnet_hdr = data;
-			data += sizeof(*vnet_hdr);
-			tp_len -= sizeof(*vnet_hdr);
+			vnet_hdr_sz = po->vnet_hdr_sz;
+			data += vnet_hdr_sz;
+			tp_len -= vnet_hdr_sz;
 			if (tp_len < 0 ||
 			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
 				tp_len = -EINVAL;
@@ -2947,6 +2963,7 @@  static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 	int offset = 0;
 	struct packet_sock *po = pkt_sk(sk);
 	bool has_vnet_hdr = false;
+	int vnet_hdr_sz;
 	int hlen, tlen, linear;
 	int extra_len = 0;
 
@@ -2991,7 +3008,8 @@  static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 	if (sock->type == SOCK_RAW)
 		reserve = dev->hard_header_len;
 	if (po->has_vnet_hdr) {
-		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
+		vnet_hdr_sz = po->vnet_hdr_sz;
+		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
 		if (err)
 			goto out_unlock;
 		has_vnet_hdr = true;
@@ -3068,7 +3086,7 @@  static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
 		if (err)
 			goto out_free;
-		len += sizeof(vnet_hdr);
+		len += vnet_hdr_sz;
 		virtio_net_hdr_set_proto(skb, &vnet_hdr);
 	}
 
@@ -3452,10 +3470,10 @@  static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 	packet_rcv_try_clear_pressure(pkt_sk(sk));
 
 	if (pkt_sk(sk)->has_vnet_hdr) {
-		err = packet_rcv_vnet(msg, skb, &len);
+		vnet_hdr_len = pkt_sk(sk)->vnet_hdr_sz;
+		err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
 		if (err)
 			goto out_free;
-		vnet_hdr_len = sizeof(struct virtio_net_hdr);
 	}
 
 	/* You lose any data beyond the buffer you gave. If it worries