diff mbox series

[RFC,5/7] tun: Introduce virtio-net hashing feature

Message ID 20231008052101.144422-6-akihiko.odaki@daynix.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series tun: Introduce virtio-net hashing feature | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Guessed tree name to be net-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 1365 this patch: 1390
netdev/cc_maintainers warning 1 maintainers not CCed: edumazet@google.com
netdev/build_clang success Errors and warnings before: 1388 this patch: 1388
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 1390 this patch: 1415
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Akihiko Odaki Oct. 8, 2023, 5:20 a.m. UTC
virtio-net have two usage of hashes: one is RSS and another is hash
reporting. Conventionally the hash calculation was done by the VMM.
However, computing the hash after the queue was chosen defeats the
purpose of RSS.

Another approach is to use eBPF steering program. This approach has
another downside: it cannot report the calculated hash due to the
restrictive nature of eBPF.

Introduce the code to compute hashes to the kernel in order to overcome
thse challenges. An alternative solution is to extend the eBPF steering
program so that it will be able to report to the userspace, but it makes
little sense to allow to implement different hashing algorithms with
eBPF since the hash value reported by virtio-net is strictly defined by
the specification.

The hash value already stored in sk_buff is not used and computed
independently since it may have been computed in a way not conformant
with the specification.

Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
---
 drivers/net/tun.c           | 187 ++++++++++++++++++++++++++++++++----
 include/uapi/linux/if_tun.h |  16 +++
 2 files changed, 182 insertions(+), 21 deletions(-)

Comments

Willem de Bruijn Oct. 8, 2023, 7:07 p.m. UTC | #1
On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> virtio-net have two usage of hashes: one is RSS and another is hash
> reporting. Conventionally the hash calculation was done by the VMM.
> However, computing the hash after the queue was chosen defeats the
> purpose of RSS.
>
> Another approach is to use eBPF steering program. This approach has
> another downside: it cannot report the calculated hash due to the
> restrictive nature of eBPF.
>
> Introduce the code to compute hashes to the kernel in order to overcome
> thse challenges. An alternative solution is to extend the eBPF steering
> program so that it will be able to report to the userspace, but it makes
> little sense to allow to implement different hashing algorithms with
> eBPF since the hash value reported by virtio-net is strictly defined by
> the specification.
>
> The hash value already stored in sk_buff is not used and computed
> independently since it may have been computed in a way not conformant
> with the specification.
>
> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> ---

> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
> +       .max_indirection_table_length =
> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
> +
> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
> +};

No need to have explicit capabilities exchange like this? Tun either
supports all or none.

>         case TUNSETSTEERINGEBPF:
> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> +               if (IS_ERR(bpf_ret))
> +                       ret = PTR_ERR(bpf_ret);
> +               else if (bpf_ret)
> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;

Don't make one feature disable another.

TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
functions. If one is enabled the other call should fail, with EBUSY
for instance.

> +       case TUNSETVNETHASH:
> +               len = sizeof(vnet_hash);
> +               if (copy_from_user(&vnet_hash, argp, len)) {
> +                       ret = -EFAULT;
> +                       break;
> +               }
> +
> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
> +                     !tun_is_little_endian(tun))) ||
> +                    vnet_hash.indirection_table_mask >=
> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
> +                       ret = -EINVAL;
> +                       break;
> +               }
> +
> +               argp = (u8 __user *)argp + len;
> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
> +                       ret = -EFAULT;
> +                       break;
> +               }
> +
> +               argp = (u8 __user *)argp + len;
> +               len = virtio_net_hash_key_length(vnet_hash.types);
> +
> +               if (copy_from_user(vnet_hash_key, argp, len)) {
> +                       ret = -EFAULT;
> +                       break;
> +               }

Probably easier and less error-prone to define a fixed size control
struct with the max indirection table size.

Btw: please trim the CC: list considerably on future patches.
Akihiko Odaki Oct. 8, 2023, 8:04 p.m. UTC | #2
On 2023/10/09 4:07, Willem de Bruijn wrote:
> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> virtio-net have two usage of hashes: one is RSS and another is hash
>> reporting. Conventionally the hash calculation was done by the VMM.
>> However, computing the hash after the queue was chosen defeats the
>> purpose of RSS.
>>
>> Another approach is to use eBPF steering program. This approach has
>> another downside: it cannot report the calculated hash due to the
>> restrictive nature of eBPF.
>>
>> Introduce the code to compute hashes to the kernel in order to overcome
>> thse challenges. An alternative solution is to extend the eBPF steering
>> program so that it will be able to report to the userspace, but it makes
>> little sense to allow to implement different hashing algorithms with
>> eBPF since the hash value reported by virtio-net is strictly defined by
>> the specification.
>>
>> The hash value already stored in sk_buff is not used and computed
>> independently since it may have been computed in a way not conformant
>> with the specification.
>>
>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>> ---
> 
>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
>> +       .max_indirection_table_length =
>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
>> +
>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
>> +};
> 
> No need to have explicit capabilities exchange like this? Tun either
> supports all or none.

tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX, 
VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.

It is because the flow dissector does not support IPv6 extensions. The 
specification is also vague, and does not tell how many TLVs should be 
consumed at most when interpreting destination option header so I chose 
to avoid adding code for these hash types to the flow dissector. I doubt 
anyone will complain about it since nobody complains for Linux.

I'm also adding this so that we can extend it later. 
max_indirection_table_length may grow for systems with 128+ CPUs, or 
types may have other bits for new protocols in the future.

> 
>>          case TUNSETSTEERINGEBPF:
>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>> +               if (IS_ERR(bpf_ret))
>> +                       ret = PTR_ERR(bpf_ret);
>> +               else if (bpf_ret)
>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
> 
> Don't make one feature disable another.
> 
> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
> functions. If one is enabled the other call should fail, with EBUSY
> for instance.
> 
>> +       case TUNSETVNETHASH:
>> +               len = sizeof(vnet_hash);
>> +               if (copy_from_user(&vnet_hash, argp, len)) {
>> +                       ret = -EFAULT;
>> +                       break;
>> +               }
>> +
>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
>> +                     !tun_is_little_endian(tun))) ||
>> +                    vnet_hash.indirection_table_mask >=
>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
>> +                       ret = -EINVAL;
>> +                       break;
>> +               }
>> +
>> +               argp = (u8 __user *)argp + len;
>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
>> +                       ret = -EFAULT;
>> +                       break;
>> +               }
>> +
>> +               argp = (u8 __user *)argp + len;
>> +               len = virtio_net_hash_key_length(vnet_hash.types);
>> +
>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
>> +                       ret = -EFAULT;
>> +                       break;
>> +               }
> 
> Probably easier and less error-prone to define a fixed size control
> struct with the max indirection table size.

I made its size variable because the indirection table and key may grow 
in the future as I wrote above.

> 
> Btw: please trim the CC: list considerably on future patches.

I'll do so in the next version with the TUNSETSTEERINGEBPF change you 
proposed.
Willem de Bruijn Oct. 8, 2023, 8:08 p.m. UTC | #3
On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2023/10/09 4:07, Willem de Bruijn wrote:
> > On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >> virtio-net have two usage of hashes: one is RSS and another is hash
> >> reporting. Conventionally the hash calculation was done by the VMM.
> >> However, computing the hash after the queue was chosen defeats the
> >> purpose of RSS.
> >>
> >> Another approach is to use eBPF steering program. This approach has
> >> another downside: it cannot report the calculated hash due to the
> >> restrictive nature of eBPF.
> >>
> >> Introduce the code to compute hashes to the kernel in order to overcome
> >> thse challenges. An alternative solution is to extend the eBPF steering
> >> program so that it will be able to report to the userspace, but it makes
> >> little sense to allow to implement different hashing algorithms with
> >> eBPF since the hash value reported by virtio-net is strictly defined by
> >> the specification.
> >>
> >> The hash value already stored in sk_buff is not used and computed
> >> independently since it may have been computed in a way not conformant
> >> with the specification.
> >>
> >> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >> ---
> >
> >> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
> >> +       .max_indirection_table_length =
> >> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
> >> +
> >> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
> >> +};
> >
> > No need to have explicit capabilities exchange like this? Tun either
> > supports all or none.
>
> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
>
> It is because the flow dissector does not support IPv6 extensions. The
> specification is also vague, and does not tell how many TLVs should be
> consumed at most when interpreting destination option header so I chose
> to avoid adding code for these hash types to the flow dissector. I doubt
> anyone will complain about it since nobody complains for Linux.
>
> I'm also adding this so that we can extend it later.
> max_indirection_table_length may grow for systems with 128+ CPUs, or
> types may have other bits for new protocols in the future.
>
> >
> >>          case TUNSETSTEERINGEBPF:
> >> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >> +               if (IS_ERR(bpf_ret))
> >> +                       ret = PTR_ERR(bpf_ret);
> >> +               else if (bpf_ret)
> >> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
> >
> > Don't make one feature disable another.
> >
> > TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
> > functions. If one is enabled the other call should fail, with EBUSY
> > for instance.
> >
> >> +       case TUNSETVNETHASH:
> >> +               len = sizeof(vnet_hash);
> >> +               if (copy_from_user(&vnet_hash, argp, len)) {
> >> +                       ret = -EFAULT;
> >> +                       break;
> >> +               }
> >> +
> >> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
> >> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
> >> +                     !tun_is_little_endian(tun))) ||
> >> +                    vnet_hash.indirection_table_mask >=
> >> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
> >> +                       ret = -EINVAL;
> >> +                       break;
> >> +               }
> >> +
> >> +               argp = (u8 __user *)argp + len;
> >> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
> >> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
> >> +                       ret = -EFAULT;
> >> +                       break;
> >> +               }
> >> +
> >> +               argp = (u8 __user *)argp + len;
> >> +               len = virtio_net_hash_key_length(vnet_hash.types);
> >> +
> >> +               if (copy_from_user(vnet_hash_key, argp, len)) {
> >> +                       ret = -EFAULT;
> >> +                       break;
> >> +               }
> >
> > Probably easier and less error-prone to define a fixed size control
> > struct with the max indirection table size.
>
> I made its size variable because the indirection table and key may grow
> in the future as I wrote above.
>
> >
> > Btw: please trim the CC: list considerably on future patches.
>
> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
> proposed.

To be clear: please don't just resubmit with that one change.

The skb and cb issues are quite fundamental issues that need to be resolved.

I'd like to understand why adjusting the existing BPF feature for this
exact purpose cannot be amended to return the key it produced.

As you point out, the C flow dissector is insufficient. The BPF flow
dissector does not have this problem. The same argument would go for
the pre-existing BPF steering program.
Akihiko Odaki Oct. 8, 2023, 8:46 p.m. UTC | #4
On 2023/10/09 5:08, Willem de Bruijn wrote:
> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> On 2023/10/09 4:07, Willem de Bruijn wrote:
>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>
>>>> virtio-net have two usage of hashes: one is RSS and another is hash
>>>> reporting. Conventionally the hash calculation was done by the VMM.
>>>> However, computing the hash after the queue was chosen defeats the
>>>> purpose of RSS.
>>>>
>>>> Another approach is to use eBPF steering program. This approach has
>>>> another downside: it cannot report the calculated hash due to the
>>>> restrictive nature of eBPF.
>>>>
>>>> Introduce the code to compute hashes to the kernel in order to overcome
>>>> thse challenges. An alternative solution is to extend the eBPF steering
>>>> program so that it will be able to report to the userspace, but it makes
>>>> little sense to allow to implement different hashing algorithms with
>>>> eBPF since the hash value reported by virtio-net is strictly defined by
>>>> the specification.
>>>>
>>>> The hash value already stored in sk_buff is not used and computed
>>>> independently since it may have been computed in a way not conformant
>>>> with the specification.
>>>>
>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>>> ---
>>>
>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
>>>> +       .max_indirection_table_length =
>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
>>>> +
>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
>>>> +};
>>>
>>> No need to have explicit capabilities exchange like this? Tun either
>>> supports all or none.
>>
>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
>>
>> It is because the flow dissector does not support IPv6 extensions. The
>> specification is also vague, and does not tell how many TLVs should be
>> consumed at most when interpreting destination option header so I chose
>> to avoid adding code for these hash types to the flow dissector. I doubt
>> anyone will complain about it since nobody complains for Linux.
>>
>> I'm also adding this so that we can extend it later.
>> max_indirection_table_length may grow for systems with 128+ CPUs, or
>> types may have other bits for new protocols in the future.
>>
>>>
>>>>           case TUNSETSTEERINGEBPF:
>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>> +               if (IS_ERR(bpf_ret))
>>>> +                       ret = PTR_ERR(bpf_ret);
>>>> +               else if (bpf_ret)
>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
>>>
>>> Don't make one feature disable another.
>>>
>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
>>> functions. If one is enabled the other call should fail, with EBUSY
>>> for instance.
>>>
>>>> +       case TUNSETVNETHASH:
>>>> +               len = sizeof(vnet_hash);
>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
>>>> +                       ret = -EFAULT;
>>>> +                       break;
>>>> +               }
>>>> +
>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
>>>> +                     !tun_is_little_endian(tun))) ||
>>>> +                    vnet_hash.indirection_table_mask >=
>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
>>>> +                       ret = -EINVAL;
>>>> +                       break;
>>>> +               }
>>>> +
>>>> +               argp = (u8 __user *)argp + len;
>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
>>>> +                       ret = -EFAULT;
>>>> +                       break;
>>>> +               }
>>>> +
>>>> +               argp = (u8 __user *)argp + len;
>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
>>>> +
>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
>>>> +                       ret = -EFAULT;
>>>> +                       break;
>>>> +               }
>>>
>>> Probably easier and less error-prone to define a fixed size control
>>> struct with the max indirection table size.
>>
>> I made its size variable because the indirection table and key may grow
>> in the future as I wrote above.
>>
>>>
>>> Btw: please trim the CC: list considerably on future patches.
>>
>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
>> proposed.
> 
> To be clear: please don't just resubmit with that one change.
> 
> The skb and cb issues are quite fundamental issues that need to be resolved.
> 
> I'd like to understand why adjusting the existing BPF feature for this
> exact purpose cannot be amended to return the key it produced.

eBPF steering program is not designed for this particular problem in my 
understanding. It was introduced to derive hash values with an 
understanding of application-specific semantics of packets instead of 
generic IP/TCP/UDP semantics.

This problem is rather different in terms that the hash derivation is 
strictly defined by virtio-net. I don't think it makes sense to 
introduce the complexity of BPF when you always run the same code.

It can utilize the existing flow dissector and also make it easier to 
use for the userspace by implementing this in the kernel.

> 
> As you point out, the C flow dissector is insufficient. The BPF flow
> dissector does not have this problem. The same argument would go for
> the pre-existing BPF steering program.
It is possible to extend the C flow dissector just as it is possible to 
implement a BPF flow dissector. The more serious problem is that 
virtio-net specification (and Microsoft RSS it follows) does not tell 
how to implement IPv6 extension support.
Willem de Bruijn Oct. 9, 2023, 8:04 a.m. UTC | #5
On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2023/10/09 5:08, Willem de Bruijn wrote:
> > On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >> On 2023/10/09 4:07, Willem de Bruijn wrote:
> >>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>
> >>>> virtio-net have two usage of hashes: one is RSS and another is hash
> >>>> reporting. Conventionally the hash calculation was done by the VMM.
> >>>> However, computing the hash after the queue was chosen defeats the
> >>>> purpose of RSS.
> >>>>
> >>>> Another approach is to use eBPF steering program. This approach has
> >>>> another downside: it cannot report the calculated hash due to the
> >>>> restrictive nature of eBPF.
> >>>>
> >>>> Introduce the code to compute hashes to the kernel in order to overcome
> >>>> thse challenges. An alternative solution is to extend the eBPF steering
> >>>> program so that it will be able to report to the userspace, but it makes
> >>>> little sense to allow to implement different hashing algorithms with
> >>>> eBPF since the hash value reported by virtio-net is strictly defined by
> >>>> the specification.
> >>>>
> >>>> The hash value already stored in sk_buff is not used and computed
> >>>> independently since it may have been computed in a way not conformant
> >>>> with the specification.
> >>>>
> >>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >>>> ---
> >>>
> >>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
> >>>> +       .max_indirection_table_length =
> >>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
> >>>> +
> >>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
> >>>> +};
> >>>
> >>> No need to have explicit capabilities exchange like this? Tun either
> >>> supports all or none.
> >>
> >> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
> >> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
> >>
> >> It is because the flow dissector does not support IPv6 extensions. The
> >> specification is also vague, and does not tell how many TLVs should be
> >> consumed at most when interpreting destination option header so I chose
> >> to avoid adding code for these hash types to the flow dissector. I doubt
> >> anyone will complain about it since nobody complains for Linux.
> >>
> >> I'm also adding this so that we can extend it later.
> >> max_indirection_table_length may grow for systems with 128+ CPUs, or
> >> types may have other bits for new protocols in the future.
> >>
> >>>
> >>>>           case TUNSETSTEERINGEBPF:
> >>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>> +               if (IS_ERR(bpf_ret))
> >>>> +                       ret = PTR_ERR(bpf_ret);
> >>>> +               else if (bpf_ret)
> >>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
> >>>
> >>> Don't make one feature disable another.
> >>>
> >>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
> >>> functions. If one is enabled the other call should fail, with EBUSY
> >>> for instance.
> >>>
> >>>> +       case TUNSETVNETHASH:
> >>>> +               len = sizeof(vnet_hash);
> >>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
> >>>> +                       ret = -EFAULT;
> >>>> +                       break;
> >>>> +               }
> >>>> +
> >>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
> >>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
> >>>> +                     !tun_is_little_endian(tun))) ||
> >>>> +                    vnet_hash.indirection_table_mask >=
> >>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
> >>>> +                       ret = -EINVAL;
> >>>> +                       break;
> >>>> +               }
> >>>> +
> >>>> +               argp = (u8 __user *)argp + len;
> >>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
> >>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
> >>>> +                       ret = -EFAULT;
> >>>> +                       break;
> >>>> +               }
> >>>> +
> >>>> +               argp = (u8 __user *)argp + len;
> >>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
> >>>> +
> >>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
> >>>> +                       ret = -EFAULT;
> >>>> +                       break;
> >>>> +               }
> >>>
> >>> Probably easier and less error-prone to define a fixed size control
> >>> struct with the max indirection table size.
> >>
> >> I made its size variable because the indirection table and key may grow
> >> in the future as I wrote above.
> >>
> >>>
> >>> Btw: please trim the CC: list considerably on future patches.
> >>
> >> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
> >> proposed.
> >
> > To be clear: please don't just resubmit with that one change.
> >
> > The skb and cb issues are quite fundamental issues that need to be resolved.
> >
> > I'd like to understand why adjusting the existing BPF feature for this
> > exact purpose cannot be amended to return the key it produced.
>
> eBPF steering program is not designed for this particular problem in my
> understanding. It was introduced to derive hash values with an
> understanding of application-specific semantics of packets instead of
> generic IP/TCP/UDP semantics.
>
> This problem is rather different in terms that the hash derivation is
> strictly defined by virtio-net. I don't think it makes sense to
> introduce the complexity of BPF when you always run the same code.
>
> It can utilize the existing flow dissector and also make it easier to
> use for the userspace by implementing this in the kernel.

Ok. There does appear to be overlap in functionality. But it might be
easier to deploy to just have standard Toeplitz available without
having to compile and load an eBPF program.

As for the sk_buff and cb[] changes. The first is really not needed.
sk_buff simply would not scale if every edge case needs a few bits.

For the control block, generally it is not safe to use that across
layers. In this case, between qdisc enqueue of a given device and
ndo_start_xmit of that device, I suppose it is. Though uncommon. I
wonder if there is any precedent.

The data will have to be stored in the skb somewhere. A simpler option
is just skb->hash? This code would use skb_get_hash, if it would
always produce a Toeplitz hash, anyway.

> >
> > As you point out, the C flow dissector is insufficient. The BPF flow
> > dissector does not have this problem. The same argument would go for
> > the pre-existing BPF steering program.
> It is possible to extend the C flow dissector just as it is possible to
> implement a BPF flow dissector. The more serious problem is that
> virtio-net specification (and Microsoft RSS it follows) does not tell
> how to implement IPv6 extension support.
Willem de Bruijn Oct. 9, 2023, 8:13 a.m. UTC | #6
On Sun, Oct 8, 2023 at 12:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> virtio-net have two usage of hashes: one is RSS and another is hash
> reporting. Conventionally the hash calculation was done by the VMM.
> However, computing the hash after the queue was chosen defeats the
> purpose of RSS.
>
> Another approach is to use eBPF steering program. This approach has
> another downside: it cannot report the calculated hash due to the
> restrictive nature of eBPF.
>
> Introduce the code to compute hashes to the kernel in order to overcome
> thse challenges. An alternative solution is to extend the eBPF steering
> program so that it will be able to report to the userspace, but it makes
> little sense to allow to implement different hashing algorithms with
> eBPF since the hash value reported by virtio-net is strictly defined by
> the specification.
>
> The hash value already stored in sk_buff is not used and computed
> independently since it may have been computed in a way not conformant
> with the specification.
>
> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>

> @@ -2116,31 +2172,49 @@ static ssize_t tun_put_user(struct tun_struct *tun,
>         }
>
>         if (vnet_hdr_sz) {
> -               struct virtio_net_hdr gso;
> +               union {
> +                       struct virtio_net_hdr hdr;
> +                       struct virtio_net_hdr_v1_hash v1_hash_hdr;
> +               } hdr;
> +               int ret;
>
>                 if (iov_iter_count(iter) < vnet_hdr_sz)
>                         return -EINVAL;
>
> -               if (virtio_net_hdr_from_skb(skb, &gso,
> -                                           tun_is_little_endian(tun), true,
> -                                           vlan_hlen)) {
> +               if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
> +                   vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
> +                   skb->tun_vnet_hash) {

Isn't vnet_hdr_sz guaranteed to be >= hdr.v1_hash_hdr, by virtue of
the set hash ioctl failing otherwise?

Such checks should be limited to control path where possible

> +                       vnet_hdr_content_sz = sizeof(hdr.v1_hash_hdr);
> +                       ret = virtio_net_hdr_v1_hash_from_skb(skb,
> +                                                             &hdr.v1_hash_hdr,
> +                                                             true,
> +                                                             vlan_hlen,
> +                                                             &vnet_hash);
> +               } else {
> +                       vnet_hdr_content_sz = sizeof(hdr.hdr);
> +                       ret = virtio_net_hdr_from_skb(skb, &hdr.hdr,
> +                                                     tun_is_little_endian(tun),
> +                                                     true, vlan_hlen);
> +               }
> +
Akihiko Odaki Oct. 9, 2023, 8:44 a.m. UTC | #7
On 2023/10/09 17:13, Willem de Bruijn wrote:
> On Sun, Oct 8, 2023 at 12:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> virtio-net have two usage of hashes: one is RSS and another is hash
>> reporting. Conventionally the hash calculation was done by the VMM.
>> However, computing the hash after the queue was chosen defeats the
>> purpose of RSS.
>>
>> Another approach is to use eBPF steering program. This approach has
>> another downside: it cannot report the calculated hash due to the
>> restrictive nature of eBPF.
>>
>> Introduce the code to compute hashes to the kernel in order to overcome
>> thse challenges. An alternative solution is to extend the eBPF steering
>> program so that it will be able to report to the userspace, but it makes
>> little sense to allow to implement different hashing algorithms with
>> eBPF since the hash value reported by virtio-net is strictly defined by
>> the specification.
>>
>> The hash value already stored in sk_buff is not used and computed
>> independently since it may have been computed in a way not conformant
>> with the specification.
>>
>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> 
>> @@ -2116,31 +2172,49 @@ static ssize_t tun_put_user(struct tun_struct *tun,
>>          }
>>
>>          if (vnet_hdr_sz) {
>> -               struct virtio_net_hdr gso;
>> +               union {
>> +                       struct virtio_net_hdr hdr;
>> +                       struct virtio_net_hdr_v1_hash v1_hash_hdr;
>> +               } hdr;
>> +               int ret;
>>
>>                  if (iov_iter_count(iter) < vnet_hdr_sz)
>>                          return -EINVAL;
>>
>> -               if (virtio_net_hdr_from_skb(skb, &gso,
>> -                                           tun_is_little_endian(tun), true,
>> -                                           vlan_hlen)) {
>> +               if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
>> +                   vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
>> +                   skb->tun_vnet_hash) {
> 
> Isn't vnet_hdr_sz guaranteed to be >= hdr.v1_hash_hdr, by virtue of
> the set hash ioctl failing otherwise?
> 
> Such checks should be limited to control path where possible

There is a potential race since tun->vnet_hash.flags and vnet_hdr_sz are 
not read at once.
Akihiko Odaki Oct. 9, 2023, 8:57 a.m. UTC | #8
On 2023/10/09 17:04, Willem de Bruijn wrote:
> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> On 2023/10/09 5:08, Willem de Bruijn wrote:
>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>
>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>
>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
>>>>>> However, computing the hash after the queue was chosen defeats the
>>>>>> purpose of RSS.
>>>>>>
>>>>>> Another approach is to use eBPF steering program. This approach has
>>>>>> another downside: it cannot report the calculated hash due to the
>>>>>> restrictive nature of eBPF.
>>>>>>
>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
>>>>>> program so that it will be able to report to the userspace, but it makes
>>>>>> little sense to allow to implement different hashing algorithms with
>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
>>>>>> the specification.
>>>>>>
>>>>>> The hash value already stored in sk_buff is not used and computed
>>>>>> independently since it may have been computed in a way not conformant
>>>>>> with the specification.
>>>>>>
>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>>>>> ---
>>>>>
>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
>>>>>> +       .max_indirection_table_length =
>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
>>>>>> +
>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
>>>>>> +};
>>>>>
>>>>> No need to have explicit capabilities exchange like this? Tun either
>>>>> supports all or none.
>>>>
>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
>>>>
>>>> It is because the flow dissector does not support IPv6 extensions. The
>>>> specification is also vague, and does not tell how many TLVs should be
>>>> consumed at most when interpreting destination option header so I chose
>>>> to avoid adding code for these hash types to the flow dissector. I doubt
>>>> anyone will complain about it since nobody complains for Linux.
>>>>
>>>> I'm also adding this so that we can extend it later.
>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
>>>> types may have other bits for new protocols in the future.
>>>>
>>>>>
>>>>>>            case TUNSETSTEERINGEBPF:
>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>> +               if (IS_ERR(bpf_ret))
>>>>>> +                       ret = PTR_ERR(bpf_ret);
>>>>>> +               else if (bpf_ret)
>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
>>>>>
>>>>> Don't make one feature disable another.
>>>>>
>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
>>>>> functions. If one is enabled the other call should fail, with EBUSY
>>>>> for instance.
>>>>>
>>>>>> +       case TUNSETVNETHASH:
>>>>>> +               len = sizeof(vnet_hash);
>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
>>>>>> +                       ret = -EFAULT;
>>>>>> +                       break;
>>>>>> +               }
>>>>>> +
>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
>>>>>> +                     !tun_is_little_endian(tun))) ||
>>>>>> +                    vnet_hash.indirection_table_mask >=
>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
>>>>>> +                       ret = -EINVAL;
>>>>>> +                       break;
>>>>>> +               }
>>>>>> +
>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
>>>>>> +                       ret = -EFAULT;
>>>>>> +                       break;
>>>>>> +               }
>>>>>> +
>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
>>>>>> +
>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
>>>>>> +                       ret = -EFAULT;
>>>>>> +                       break;
>>>>>> +               }
>>>>>
>>>>> Probably easier and less error-prone to define a fixed size control
>>>>> struct with the max indirection table size.
>>>>
>>>> I made its size variable because the indirection table and key may grow
>>>> in the future as I wrote above.
>>>>
>>>>>
>>>>> Btw: please trim the CC: list considerably on future patches.
>>>>
>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
>>>> proposed.
>>>
>>> To be clear: please don't just resubmit with that one change.
>>>
>>> The skb and cb issues are quite fundamental issues that need to be resolved.
>>>
>>> I'd like to understand why adjusting the existing BPF feature for this
>>> exact purpose cannot be amended to return the key it produced.
>>
>> eBPF steering program is not designed for this particular problem in my
>> understanding. It was introduced to derive hash values with an
>> understanding of application-specific semantics of packets instead of
>> generic IP/TCP/UDP semantics.
>>
>> This problem is rather different in terms that the hash derivation is
>> strictly defined by virtio-net. I don't think it makes sense to
>> introduce the complexity of BPF when you always run the same code.
>>
>> It can utilize the existing flow dissector and also make it easier to
>> use for the userspace by implementing this in the kernel.
> 
> Ok. There does appear to be overlap in functionality. But it might be
> easier to deploy to just have standard Toeplitz available without
> having to compile and load an eBPF program.
> 
> As for the sk_buff and cb[] changes. The first is really not needed.
> sk_buff simply would not scale if every edge case needs a few bits.

An alternative is to move the bit to cb[] and clear it for every code 
paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.

I think we can put the bit in sk_buff for now. We can implement the 
alternative when we are short of bits.

> 
> For the control block, generally it is not safe to use that across
> layers. In this case, between qdisc enqueue of a given device and
> ndo_start_xmit of that device, I suppose it is. Though uncommon. I
> wonder if there is any precedent.

That's one of the reasons modifying qdisc_skb_cb instead of creating 
another struct embedding it as bpf_skb_data_end and tc_skb_cb do. This 
clarifies that it is qdisc's responsibility to keep these data intact.

> 
> The data will have to be stored in the skb somewhere. A simpler option
> is just skb->hash? This code would use skb_get_hash, if it would
> always produce a Toeplitz hash, anyway.

We still need tun_vnet_hash_report to identify hash type.

skb_get_hash() uses Siphash instead of Toeplitz, and the configuration 
of Toeplitz relies on tun's internal state. It is still possible to 
store a hash calculated with tun's own logic to skb->hash, but someone 
may later overwrite it with __skb_get_hash() though it's unlikely.
Willem de Bruijn Oct. 9, 2023, 9:54 a.m. UTC | #9
On Mon, Oct 9, 2023 at 3:44 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2023/10/09 17:13, Willem de Bruijn wrote:
> > On Sun, Oct 8, 2023 at 12:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >> virtio-net have two usage of hashes: one is RSS and another is hash
> >> reporting. Conventionally the hash calculation was done by the VMM.
> >> However, computing the hash after the queue was chosen defeats the
> >> purpose of RSS.
> >>
> >> Another approach is to use eBPF steering program. This approach has
> >> another downside: it cannot report the calculated hash due to the
> >> restrictive nature of eBPF.
> >>
> >> Introduce the code to compute hashes to the kernel in order to overcome
> >> thse challenges. An alternative solution is to extend the eBPF steering
> >> program so that it will be able to report to the userspace, but it makes
> >> little sense to allow to implement different hashing algorithms with
> >> eBPF since the hash value reported by virtio-net is strictly defined by
> >> the specification.
> >>
> >> The hash value already stored in sk_buff is not used and computed
> >> independently since it may have been computed in a way not conformant
> >> with the specification.
> >>
> >> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >
> >> @@ -2116,31 +2172,49 @@ static ssize_t tun_put_user(struct tun_struct *tun,
> >>          }
> >>
> >>          if (vnet_hdr_sz) {
> >> -               struct virtio_net_hdr gso;
> >> +               union {
> >> +                       struct virtio_net_hdr hdr;
> >> +                       struct virtio_net_hdr_v1_hash v1_hash_hdr;
> >> +               } hdr;
> >> +               int ret;
> >>
> >>                  if (iov_iter_count(iter) < vnet_hdr_sz)
> >>                          return -EINVAL;
> >>
> >> -               if (virtio_net_hdr_from_skb(skb, &gso,
> >> -                                           tun_is_little_endian(tun), true,
> >> -                                           vlan_hlen)) {
> >> +               if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
> >> +                   vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
> >> +                   skb->tun_vnet_hash) {
> >
> > Isn't vnet_hdr_sz guaranteed to be >= hdr.v1_hash_hdr, by virtue of
> > the set hash ioctl failing otherwise?
> >
> > Such checks should be limited to control path where possible
>
> There is a potential race since tun->vnet_hash.flags and vnet_hdr_sz are
> not read at once.

It should not be possible to downgrade the hdr_sz once v1 is selected.
Willem de Bruijn Oct. 9, 2023, 9:57 a.m. UTC | #10
On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2023/10/09 17:04, Willem de Bruijn wrote:
> > On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >> On 2023/10/09 5:08, Willem de Bruijn wrote:
> >>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>
> >>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
> >>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>
> >>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
> >>>>>> reporting. Conventionally the hash calculation was done by the VMM.
> >>>>>> However, computing the hash after the queue was chosen defeats the
> >>>>>> purpose of RSS.
> >>>>>>
> >>>>>> Another approach is to use eBPF steering program. This approach has
> >>>>>> another downside: it cannot report the calculated hash due to the
> >>>>>> restrictive nature of eBPF.
> >>>>>>
> >>>>>> Introduce the code to compute hashes to the kernel in order to overcome
> >>>>>> thse challenges. An alternative solution is to extend the eBPF steering
> >>>>>> program so that it will be able to report to the userspace, but it makes
> >>>>>> little sense to allow to implement different hashing algorithms with
> >>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
> >>>>>> the specification.
> >>>>>>
> >>>>>> The hash value already stored in sk_buff is not used and computed
> >>>>>> independently since it may have been computed in a way not conformant
> >>>>>> with the specification.
> >>>>>>
> >>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >>>>>> ---
> >>>>>
> >>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
> >>>>>> +       .max_indirection_table_length =
> >>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
> >>>>>> +
> >>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
> >>>>>> +};
> >>>>>
> >>>>> No need to have explicit capabilities exchange like this? Tun either
> >>>>> supports all or none.
> >>>>
> >>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
> >>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
> >>>>
> >>>> It is because the flow dissector does not support IPv6 extensions. The
> >>>> specification is also vague, and does not tell how many TLVs should be
> >>>> consumed at most when interpreting destination option header so I chose
> >>>> to avoid adding code for these hash types to the flow dissector. I doubt
> >>>> anyone will complain about it since nobody complains for Linux.
> >>>>
> >>>> I'm also adding this so that we can extend it later.
> >>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
> >>>> types may have other bits for new protocols in the future.
> >>>>
> >>>>>
> >>>>>>            case TUNSETSTEERINGEBPF:
> >>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>> +               if (IS_ERR(bpf_ret))
> >>>>>> +                       ret = PTR_ERR(bpf_ret);
> >>>>>> +               else if (bpf_ret)
> >>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
> >>>>>
> >>>>> Don't make one feature disable another.
> >>>>>
> >>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
> >>>>> functions. If one is enabled the other call should fail, with EBUSY
> >>>>> for instance.
> >>>>>
> >>>>>> +       case TUNSETVNETHASH:
> >>>>>> +               len = sizeof(vnet_hash);
> >>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
> >>>>>> +                       ret = -EFAULT;
> >>>>>> +                       break;
> >>>>>> +               }
> >>>>>> +
> >>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
> >>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
> >>>>>> +                     !tun_is_little_endian(tun))) ||
> >>>>>> +                    vnet_hash.indirection_table_mask >=
> >>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
> >>>>>> +                       ret = -EINVAL;
> >>>>>> +                       break;
> >>>>>> +               }
> >>>>>> +
> >>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
> >>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
> >>>>>> +                       ret = -EFAULT;
> >>>>>> +                       break;
> >>>>>> +               }
> >>>>>> +
> >>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
> >>>>>> +
> >>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
> >>>>>> +                       ret = -EFAULT;
> >>>>>> +                       break;
> >>>>>> +               }
> >>>>>
> >>>>> Probably easier and less error-prone to define a fixed size control
> >>>>> struct with the max indirection table size.
> >>>>
> >>>> I made its size variable because the indirection table and key may grow
> >>>> in the future as I wrote above.
> >>>>
> >>>>>
> >>>>> Btw: please trim the CC: list considerably on future patches.
> >>>>
> >>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
> >>>> proposed.
> >>>
> >>> To be clear: please don't just resubmit with that one change.
> >>>
> >>> The skb and cb issues are quite fundamental issues that need to be resolved.
> >>>
> >>> I'd like to understand why adjusting the existing BPF feature for this
> >>> exact purpose cannot be amended to return the key it produced.
> >>
> >> eBPF steering program is not designed for this particular problem in my
> >> understanding. It was introduced to derive hash values with an
> >> understanding of application-specific semantics of packets instead of
> >> generic IP/TCP/UDP semantics.
> >>
> >> This problem is rather different in terms that the hash derivation is
> >> strictly defined by virtio-net. I don't think it makes sense to
> >> introduce the complexity of BPF when you always run the same code.
> >>
> >> It can utilize the existing flow dissector and also make it easier to
> >> use for the userspace by implementing this in the kernel.
> >
> > Ok. There does appear to be overlap in functionality. But it might be
> > easier to deploy to just have standard Toeplitz available without
> > having to compile and load an eBPF program.
> >
> > As for the sk_buff and cb[] changes. The first is really not needed.
> > sk_buff simply would not scale if every edge case needs a few bits.
>
> An alternative is to move the bit to cb[] and clear it for every code
> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
>
> I think we can put the bit in sk_buff for now. We can implement the
> alternative when we are short of bits.

I disagree. sk_buff fields add a cost to every code path. They cannot
be added for every edge case.
>
> >
> > For the control block, generally it is not safe to use that across
> > layers. In this case, between qdisc enqueue of a given device and
> > ndo_start_xmit of that device, I suppose it is. Though uncommon. I
> > wonder if there is any precedent.
>
> That's one of the reasons modifying qdisc_skb_cb instead of creating
> another struct embedding it as bpf_skb_data_end and tc_skb_cb do. This
> clarifies that it is qdisc's responsibility to keep these data intact.
>
> >
> > The data will have to be stored in the skb somewhere. A simpler option
> > is just skb->hash? This code would use skb_get_hash, if it would
> > always produce a Toeplitz hash, anyway.
>
> We still need tun_vnet_hash_report to identify hash type.
>
> skb_get_hash() uses Siphash instead of Toeplitz, and the configuration
> of Toeplitz relies on tun's internal state. It is still possible to
> store a hash calculated with tun's own logic to skb->hash, but someone
> may later overwrite it with __skb_get_hash() though it's unlikely.

That won't happen in this data path.

Fair point on the hash type.
Akihiko Odaki Oct. 9, 2023, 10:01 a.m. UTC | #11
On 2023/10/09 18:57, Willem de Bruijn wrote:
> On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> On 2023/10/09 17:04, Willem de Bruijn wrote:
>>> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>
>>>> On 2023/10/09 5:08, Willem de Bruijn wrote:
>>>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>
>>>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
>>>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>
>>>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
>>>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
>>>>>>>> However, computing the hash after the queue was chosen defeats the
>>>>>>>> purpose of RSS.
>>>>>>>>
>>>>>>>> Another approach is to use eBPF steering program. This approach has
>>>>>>>> another downside: it cannot report the calculated hash due to the
>>>>>>>> restrictive nature of eBPF.
>>>>>>>>
>>>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
>>>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
>>>>>>>> program so that it will be able to report to the userspace, but it makes
>>>>>>>> little sense to allow to implement different hashing algorithms with
>>>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
>>>>>>>> the specification.
>>>>>>>>
>>>>>>>> The hash value already stored in sk_buff is not used and computed
>>>>>>>> independently since it may have been computed in a way not conformant
>>>>>>>> with the specification.
>>>>>>>>
>>>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>>>>>>> ---
>>>>>>>
>>>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
>>>>>>>> +       .max_indirection_table_length =
>>>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
>>>>>>>> +
>>>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
>>>>>>>> +};
>>>>>>>
>>>>>>> No need to have explicit capabilities exchange like this? Tun either
>>>>>>> supports all or none.
>>>>>>
>>>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
>>>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
>>>>>>
>>>>>> It is because the flow dissector does not support IPv6 extensions. The
>>>>>> specification is also vague, and does not tell how many TLVs should be
>>>>>> consumed at most when interpreting destination option header so I chose
>>>>>> to avoid adding code for these hash types to the flow dissector. I doubt
>>>>>> anyone will complain about it since nobody complains for Linux.
>>>>>>
>>>>>> I'm also adding this so that we can extend it later.
>>>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
>>>>>> types may have other bits for new protocols in the future.
>>>>>>
>>>>>>>
>>>>>>>>             case TUNSETSTEERINGEBPF:
>>>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>> +               if (IS_ERR(bpf_ret))
>>>>>>>> +                       ret = PTR_ERR(bpf_ret);
>>>>>>>> +               else if (bpf_ret)
>>>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
>>>>>>>
>>>>>>> Don't make one feature disable another.
>>>>>>>
>>>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
>>>>>>> functions. If one is enabled the other call should fail, with EBUSY
>>>>>>> for instance.
>>>>>>>
>>>>>>>> +       case TUNSETVNETHASH:
>>>>>>>> +               len = sizeof(vnet_hash);
>>>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
>>>>>>>> +                       ret = -EFAULT;
>>>>>>>> +                       break;
>>>>>>>> +               }
>>>>>>>> +
>>>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
>>>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
>>>>>>>> +                     !tun_is_little_endian(tun))) ||
>>>>>>>> +                    vnet_hash.indirection_table_mask >=
>>>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
>>>>>>>> +                       ret = -EINVAL;
>>>>>>>> +                       break;
>>>>>>>> +               }
>>>>>>>> +
>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
>>>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
>>>>>>>> +                       ret = -EFAULT;
>>>>>>>> +                       break;
>>>>>>>> +               }
>>>>>>>> +
>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
>>>>>>>> +
>>>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
>>>>>>>> +                       ret = -EFAULT;
>>>>>>>> +                       break;
>>>>>>>> +               }
>>>>>>>
>>>>>>> Probably easier and less error-prone to define a fixed size control
>>>>>>> struct with the max indirection table size.
>>>>>>
>>>>>> I made its size variable because the indirection table and key may grow
>>>>>> in the future as I wrote above.
>>>>>>
>>>>>>>
>>>>>>> Btw: please trim the CC: list considerably on future patches.
>>>>>>
>>>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
>>>>>> proposed.
>>>>>
>>>>> To be clear: please don't just resubmit with that one change.
>>>>>
>>>>> The skb and cb issues are quite fundamental issues that need to be resolved.
>>>>>
>>>>> I'd like to understand why adjusting the existing BPF feature for this
>>>>> exact purpose cannot be amended to return the key it produced.
>>>>
>>>> eBPF steering program is not designed for this particular problem in my
>>>> understanding. It was introduced to derive hash values with an
>>>> understanding of application-specific semantics of packets instead of
>>>> generic IP/TCP/UDP semantics.
>>>>
>>>> This problem is rather different in terms that the hash derivation is
>>>> strictly defined by virtio-net. I don't think it makes sense to
>>>> introduce the complexity of BPF when you always run the same code.
>>>>
>>>> It can utilize the existing flow dissector and also make it easier to
>>>> use for the userspace by implementing this in the kernel.
>>>
>>> Ok. There does appear to be overlap in functionality. But it might be
>>> easier to deploy to just have standard Toeplitz available without
>>> having to compile and load an eBPF program.
>>>
>>> As for the sk_buff and cb[] changes. The first is really not needed.
>>> sk_buff simply would not scale if every edge case needs a few bits.
>>
>> An alternative is to move the bit to cb[] and clear it for every code
>> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
>>
>> I think we can put the bit in sk_buff for now. We can implement the
>> alternative when we are short of bits.
> 
> I disagree. sk_buff fields add a cost to every code path. They cannot
> be added for every edge case.

It only takes an unused bit and does not grow the sk_buff size so I 
think it has practically no cost for now.
Akihiko Odaki Oct. 9, 2023, 10:05 a.m. UTC | #12
On 2023/10/09 18:54, Willem de Bruijn wrote:
> On Mon, Oct 9, 2023 at 3:44 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> On 2023/10/09 17:13, Willem de Bruijn wrote:
>>> On Sun, Oct 8, 2023 at 12:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>
>>>> virtio-net have two usage of hashes: one is RSS and another is hash
>>>> reporting. Conventionally the hash calculation was done by the VMM.
>>>> However, computing the hash after the queue was chosen defeats the
>>>> purpose of RSS.
>>>>
>>>> Another approach is to use eBPF steering program. This approach has
>>>> another downside: it cannot report the calculated hash due to the
>>>> restrictive nature of eBPF.
>>>>
>>>> Introduce the code to compute hashes to the kernel in order to overcome
>>>> thse challenges. An alternative solution is to extend the eBPF steering
>>>> program so that it will be able to report to the userspace, but it makes
>>>> little sense to allow to implement different hashing algorithms with
>>>> eBPF since the hash value reported by virtio-net is strictly defined by
>>>> the specification.
>>>>
>>>> The hash value already stored in sk_buff is not used and computed
>>>> independently since it may have been computed in a way not conformant
>>>> with the specification.
>>>>
>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>>
>>>> @@ -2116,31 +2172,49 @@ static ssize_t tun_put_user(struct tun_struct *tun,
>>>>           }
>>>>
>>>>           if (vnet_hdr_sz) {
>>>> -               struct virtio_net_hdr gso;
>>>> +               union {
>>>> +                       struct virtio_net_hdr hdr;
>>>> +                       struct virtio_net_hdr_v1_hash v1_hash_hdr;
>>>> +               } hdr;
>>>> +               int ret;
>>>>
>>>>                   if (iov_iter_count(iter) < vnet_hdr_sz)
>>>>                           return -EINVAL;
>>>>
>>>> -               if (virtio_net_hdr_from_skb(skb, &gso,
>>>> -                                           tun_is_little_endian(tun), true,
>>>> -                                           vlan_hlen)) {
>>>> +               if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
>>>> +                   vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
>>>> +                   skb->tun_vnet_hash) {
>>>
>>> Isn't vnet_hdr_sz guaranteed to be >= hdr.v1_hash_hdr, by virtue of
>>> the set hash ioctl failing otherwise?
>>>
>>> Such checks should be limited to control path where possible
>>
>> There is a potential race since tun->vnet_hash.flags and vnet_hdr_sz are
>> not read at once.
> 
> It should not be possible to downgrade the hdr_sz once v1 is selected.

I see nothing that prevents shrinking the header size.

tun->vnet_hash.flags is read after vnet_hdr_sz so the race can happen 
even for the case the header size grows though this can be fixed by 
reordering the two reads.
Willem de Bruijn Oct. 9, 2023, 10:06 a.m. UTC | #13
On Mon, Oct 9, 2023 at 3:02 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2023/10/09 18:57, Willem de Bruijn wrote:
> > On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >> On 2023/10/09 17:04, Willem de Bruijn wrote:
> >>> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>
> >>>> On 2023/10/09 5:08, Willem de Bruijn wrote:
> >>>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>
> >>>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
> >>>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>
> >>>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
> >>>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
> >>>>>>>> However, computing the hash after the queue was chosen defeats the
> >>>>>>>> purpose of RSS.
> >>>>>>>>
> >>>>>>>> Another approach is to use eBPF steering program. This approach has
> >>>>>>>> another downside: it cannot report the calculated hash due to the
> >>>>>>>> restrictive nature of eBPF.
> >>>>>>>>
> >>>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
> >>>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
> >>>>>>>> program so that it will be able to report to the userspace, but it makes
> >>>>>>>> little sense to allow to implement different hashing algorithms with
> >>>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
> >>>>>>>> the specification.
> >>>>>>>>
> >>>>>>>> The hash value already stored in sk_buff is not used and computed
> >>>>>>>> independently since it may have been computed in a way not conformant
> >>>>>>>> with the specification.
> >>>>>>>>
> >>>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >>>>>>>> ---
> >>>>>>>
> >>>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
> >>>>>>>> +       .max_indirection_table_length =
> >>>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
> >>>>>>>> +
> >>>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
> >>>>>>>> +};
> >>>>>>>
> >>>>>>> No need to have explicit capabilities exchange like this? Tun either
> >>>>>>> supports all or none.
> >>>>>>
> >>>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
> >>>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
> >>>>>>
> >>>>>> It is because the flow dissector does not support IPv6 extensions. The
> >>>>>> specification is also vague, and does not tell how many TLVs should be
> >>>>>> consumed at most when interpreting destination option header so I chose
> >>>>>> to avoid adding code for these hash types to the flow dissector. I doubt
> >>>>>> anyone will complain about it since nobody complains for Linux.
> >>>>>>
> >>>>>> I'm also adding this so that we can extend it later.
> >>>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
> >>>>>> types may have other bits for new protocols in the future.
> >>>>>>
> >>>>>>>
> >>>>>>>>             case TUNSETSTEERINGEBPF:
> >>>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>>>> +               if (IS_ERR(bpf_ret))
> >>>>>>>> +                       ret = PTR_ERR(bpf_ret);
> >>>>>>>> +               else if (bpf_ret)
> >>>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
> >>>>>>>
> >>>>>>> Don't make one feature disable another.
> >>>>>>>
> >>>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
> >>>>>>> functions. If one is enabled the other call should fail, with EBUSY
> >>>>>>> for instance.
> >>>>>>>
> >>>>>>>> +       case TUNSETVNETHASH:
> >>>>>>>> +               len = sizeof(vnet_hash);
> >>>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
> >>>>>>>> +                       ret = -EFAULT;
> >>>>>>>> +                       break;
> >>>>>>>> +               }
> >>>>>>>> +
> >>>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
> >>>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
> >>>>>>>> +                     !tun_is_little_endian(tun))) ||
> >>>>>>>> +                    vnet_hash.indirection_table_mask >=
> >>>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
> >>>>>>>> +                       ret = -EINVAL;
> >>>>>>>> +                       break;
> >>>>>>>> +               }
> >>>>>>>> +
> >>>>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
> >>>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
> >>>>>>>> +                       ret = -EFAULT;
> >>>>>>>> +                       break;
> >>>>>>>> +               }
> >>>>>>>> +
> >>>>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
> >>>>>>>> +
> >>>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
> >>>>>>>> +                       ret = -EFAULT;
> >>>>>>>> +                       break;
> >>>>>>>> +               }
> >>>>>>>
> >>>>>>> Probably easier and less error-prone to define a fixed size control
> >>>>>>> struct with the max indirection table size.
> >>>>>>
> >>>>>> I made its size variable because the indirection table and key may grow
> >>>>>> in the future as I wrote above.
> >>>>>>
> >>>>>>>
> >>>>>>> Btw: please trim the CC: list considerably on future patches.
> >>>>>>
> >>>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
> >>>>>> proposed.
> >>>>>
> >>>>> To be clear: please don't just resubmit with that one change.
> >>>>>
> >>>>> The skb and cb issues are quite fundamental issues that need to be resolved.
> >>>>>
> >>>>> I'd like to understand why adjusting the existing BPF feature for this
> >>>>> exact purpose cannot be amended to return the key it produced.
> >>>>
> >>>> eBPF steering program is not designed for this particular problem in my
> >>>> understanding. It was introduced to derive hash values with an
> >>>> understanding of application-specific semantics of packets instead of
> >>>> generic IP/TCP/UDP semantics.
> >>>>
> >>>> This problem is rather different in terms that the hash derivation is
> >>>> strictly defined by virtio-net. I don't think it makes sense to
> >>>> introduce the complexity of BPF when you always run the same code.
> >>>>
> >>>> It can utilize the existing flow dissector and also make it easier to
> >>>> use for the userspace by implementing this in the kernel.
> >>>
> >>> Ok. There does appear to be overlap in functionality. But it might be
> >>> easier to deploy to just have standard Toeplitz available without
> >>> having to compile and load an eBPF program.
> >>>
> >>> As for the sk_buff and cb[] changes. The first is really not needed.
> >>> sk_buff simply would not scale if every edge case needs a few bits.
> >>
> >> An alternative is to move the bit to cb[] and clear it for every code
> >> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
> >>
> >> I think we can put the bit in sk_buff for now. We can implement the
> >> alternative when we are short of bits.
> >
> > I disagree. sk_buff fields add a cost to every code path. They cannot
> > be added for every edge case.
>
> It only takes an unused bit and does not grow the sk_buff size so I
> think it has practically no cost for now.

The problem is that that thinking leads to death by a thousand cuts.

"for now" forces the cost of having to think hard how to avoid growing
sk_buff onto the next person. Let's do it right from the start.
Willem de Bruijn Oct. 9, 2023, 10:07 a.m. UTC | #14
On Mon, Oct 9, 2023 at 3:05 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
>
>
> On 2023/10/09 18:54, Willem de Bruijn wrote:
> > On Mon, Oct 9, 2023 at 3:44 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >> On 2023/10/09 17:13, Willem de Bruijn wrote:
> >>> On Sun, Oct 8, 2023 at 12:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>
> >>>> virtio-net have two usage of hashes: one is RSS and another is hash
> >>>> reporting. Conventionally the hash calculation was done by the VMM.
> >>>> However, computing the hash after the queue was chosen defeats the
> >>>> purpose of RSS.
> >>>>
> >>>> Another approach is to use eBPF steering program. This approach has
> >>>> another downside: it cannot report the calculated hash due to the
> >>>> restrictive nature of eBPF.
> >>>>
> >>>> Introduce the code to compute hashes to the kernel in order to overcome
> >>>> thse challenges. An alternative solution is to extend the eBPF steering
> >>>> program so that it will be able to report to the userspace, but it makes
> >>>> little sense to allow to implement different hashing algorithms with
> >>>> eBPF since the hash value reported by virtio-net is strictly defined by
> >>>> the specification.
> >>>>
> >>>> The hash value already stored in sk_buff is not used and computed
> >>>> independently since it may have been computed in a way not conformant
> >>>> with the specification.
> >>>>
> >>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >>>
> >>>> @@ -2116,31 +2172,49 @@ static ssize_t tun_put_user(struct tun_struct *tun,
> >>>>           }
> >>>>
> >>>>           if (vnet_hdr_sz) {
> >>>> -               struct virtio_net_hdr gso;
> >>>> +               union {
> >>>> +                       struct virtio_net_hdr hdr;
> >>>> +                       struct virtio_net_hdr_v1_hash v1_hash_hdr;
> >>>> +               } hdr;
> >>>> +               int ret;
> >>>>
> >>>>                   if (iov_iter_count(iter) < vnet_hdr_sz)
> >>>>                           return -EINVAL;
> >>>>
> >>>> -               if (virtio_net_hdr_from_skb(skb, &gso,
> >>>> -                                           tun_is_little_endian(tun), true,
> >>>> -                                           vlan_hlen)) {
> >>>> +               if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
> >>>> +                   vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
> >>>> +                   skb->tun_vnet_hash) {
> >>>
> >>> Isn't vnet_hdr_sz guaranteed to be >= hdr.v1_hash_hdr, by virtue of
> >>> the set hash ioctl failing otherwise?
> >>>
> >>> Such checks should be limited to control path where possible
> >>
> >> There is a potential race since tun->vnet_hash.flags and vnet_hdr_sz are
> >> not read at once.
> >
> > It should not be possible to downgrade the hdr_sz once v1 is selected.
>
> I see nothing that prevents shrinking the header size.
>
> tun->vnet_hash.flags is read after vnet_hdr_sz so the race can happen
> even for the case the header size grows though this can be fixed by
> reordering the two reads.

One option is to fail any control path that tries to re-negotiate
header size once this hash option is enabled?

There is no practical reason to allow feature re-negotiation at any
arbitrary time.
Akihiko Odaki Oct. 9, 2023, 10:11 a.m. UTC | #15
On 2023/10/09 19:07, Willem de Bruijn wrote:
> On Mon, Oct 9, 2023 at 3:05 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>>
>>
>> On 2023/10/09 18:54, Willem de Bruijn wrote:
>>> On Mon, Oct 9, 2023 at 3:44 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>
>>>> On 2023/10/09 17:13, Willem de Bruijn wrote:
>>>>> On Sun, Oct 8, 2023 at 12:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>
>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
>>>>>> However, computing the hash after the queue was chosen defeats the
>>>>>> purpose of RSS.
>>>>>>
>>>>>> Another approach is to use eBPF steering program. This approach has
>>>>>> another downside: it cannot report the calculated hash due to the
>>>>>> restrictive nature of eBPF.
>>>>>>
>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
>>>>>> program so that it will be able to report to the userspace, but it makes
>>>>>> little sense to allow to implement different hashing algorithms with
>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
>>>>>> the specification.
>>>>>>
>>>>>> The hash value already stored in sk_buff is not used and computed
>>>>>> independently since it may have been computed in a way not conformant
>>>>>> with the specification.
>>>>>>
>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>>>>
>>>>>> @@ -2116,31 +2172,49 @@ static ssize_t tun_put_user(struct tun_struct *tun,
>>>>>>            }
>>>>>>
>>>>>>            if (vnet_hdr_sz) {
>>>>>> -               struct virtio_net_hdr gso;
>>>>>> +               union {
>>>>>> +                       struct virtio_net_hdr hdr;
>>>>>> +                       struct virtio_net_hdr_v1_hash v1_hash_hdr;
>>>>>> +               } hdr;
>>>>>> +               int ret;
>>>>>>
>>>>>>                    if (iov_iter_count(iter) < vnet_hdr_sz)
>>>>>>                            return -EINVAL;
>>>>>>
>>>>>> -               if (virtio_net_hdr_from_skb(skb, &gso,
>>>>>> -                                           tun_is_little_endian(tun), true,
>>>>>> -                                           vlan_hlen)) {
>>>>>> +               if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
>>>>>> +                   vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
>>>>>> +                   skb->tun_vnet_hash) {
>>>>>
>>>>> Isn't vnet_hdr_sz guaranteed to be >= hdr.v1_hash_hdr, by virtue of
>>>>> the set hash ioctl failing otherwise?
>>>>>
>>>>> Such checks should be limited to control path where possible
>>>>
>>>> There is a potential race since tun->vnet_hash.flags and vnet_hdr_sz are
>>>> not read at once.
>>>
>>> It should not be possible to downgrade the hdr_sz once v1 is selected.
>>
>> I see nothing that prevents shrinking the header size.
>>
>> tun->vnet_hash.flags is read after vnet_hdr_sz so the race can happen
>> even for the case the header size grows though this can be fixed by
>> reordering the two reads.
> 
> One option is to fail any control path that tries to re-negotiate
> header size once this hash option is enabled?
> 
> There is no practical reason to allow feature re-negotiation at any
> arbitrary time.

I think it's a bit awkward interface design since tun allows to 
reconfigure any of its parameters, but it's certainly possible.
Akihiko Odaki Oct. 9, 2023, 10:12 a.m. UTC | #16
On 2023/10/09 19:06, Willem de Bruijn wrote:
> On Mon, Oct 9, 2023 at 3:02 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> On 2023/10/09 18:57, Willem de Bruijn wrote:
>>> On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>
>>>> On 2023/10/09 17:04, Willem de Bruijn wrote:
>>>>> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>
>>>>>> On 2023/10/09 5:08, Willem de Bruijn wrote:
>>>>>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>
>>>>>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
>>>>>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>
>>>>>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
>>>>>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
>>>>>>>>>> However, computing the hash after the queue was chosen defeats the
>>>>>>>>>> purpose of RSS.
>>>>>>>>>>
>>>>>>>>>> Another approach is to use eBPF steering program. This approach has
>>>>>>>>>> another downside: it cannot report the calculated hash due to the
>>>>>>>>>> restrictive nature of eBPF.
>>>>>>>>>>
>>>>>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
>>>>>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
>>>>>>>>>> program so that it will be able to report to the userspace, but it makes
>>>>>>>>>> little sense to allow to implement different hashing algorithms with
>>>>>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
>>>>>>>>>> the specification.
>>>>>>>>>>
>>>>>>>>>> The hash value already stored in sk_buff is not used and computed
>>>>>>>>>> independently since it may have been computed in a way not conformant
>>>>>>>>>> with the specification.
>>>>>>>>>>
>>>>>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>>>>>>>>> ---
>>>>>>>>>
>>>>>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
>>>>>>>>>> +       .max_indirection_table_length =
>>>>>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
>>>>>>>>>> +
>>>>>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
>>>>>>>>>> +};
>>>>>>>>>
>>>>>>>>> No need to have explicit capabilities exchange like this? Tun either
>>>>>>>>> supports all or none.
>>>>>>>>
>>>>>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
>>>>>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
>>>>>>>>
>>>>>>>> It is because the flow dissector does not support IPv6 extensions. The
>>>>>>>> specification is also vague, and does not tell how many TLVs should be
>>>>>>>> consumed at most when interpreting destination option header so I chose
>>>>>>>> to avoid adding code for these hash types to the flow dissector. I doubt
>>>>>>>> anyone will complain about it since nobody complains for Linux.
>>>>>>>>
>>>>>>>> I'm also adding this so that we can extend it later.
>>>>>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
>>>>>>>> types may have other bits for new protocols in the future.
>>>>>>>>
>>>>>>>>>
>>>>>>>>>>              case TUNSETSTEERINGEBPF:
>>>>>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>>>> +               if (IS_ERR(bpf_ret))
>>>>>>>>>> +                       ret = PTR_ERR(bpf_ret);
>>>>>>>>>> +               else if (bpf_ret)
>>>>>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
>>>>>>>>>
>>>>>>>>> Don't make one feature disable another.
>>>>>>>>>
>>>>>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
>>>>>>>>> functions. If one is enabled the other call should fail, with EBUSY
>>>>>>>>> for instance.
>>>>>>>>>
>>>>>>>>>> +       case TUNSETVNETHASH:
>>>>>>>>>> +               len = sizeof(vnet_hash);
>>>>>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>> +                       break;
>>>>>>>>>> +               }
>>>>>>>>>> +
>>>>>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
>>>>>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
>>>>>>>>>> +                     !tun_is_little_endian(tun))) ||
>>>>>>>>>> +                    vnet_hash.indirection_table_mask >=
>>>>>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
>>>>>>>>>> +                       ret = -EINVAL;
>>>>>>>>>> +                       break;
>>>>>>>>>> +               }
>>>>>>>>>> +
>>>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
>>>>>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>> +                       break;
>>>>>>>>>> +               }
>>>>>>>>>> +
>>>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
>>>>>>>>>> +
>>>>>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>> +                       break;
>>>>>>>>>> +               }
>>>>>>>>>
>>>>>>>>> Probably easier and less error-prone to define a fixed size control
>>>>>>>>> struct with the max indirection table size.
>>>>>>>>
>>>>>>>> I made its size variable because the indirection table and key may grow
>>>>>>>> in the future as I wrote above.
>>>>>>>>
>>>>>>>>>
>>>>>>>>> Btw: please trim the CC: list considerably on future patches.
>>>>>>>>
>>>>>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
>>>>>>>> proposed.
>>>>>>>
>>>>>>> To be clear: please don't just resubmit with that one change.
>>>>>>>
>>>>>>> The skb and cb issues are quite fundamental issues that need to be resolved.
>>>>>>>
>>>>>>> I'd like to understand why adjusting the existing BPF feature for this
>>>>>>> exact purpose cannot be amended to return the key it produced.
>>>>>>
>>>>>> eBPF steering program is not designed for this particular problem in my
>>>>>> understanding. It was introduced to derive hash values with an
>>>>>> understanding of application-specific semantics of packets instead of
>>>>>> generic IP/TCP/UDP semantics.
>>>>>>
>>>>>> This problem is rather different in terms that the hash derivation is
>>>>>> strictly defined by virtio-net. I don't think it makes sense to
>>>>>> introduce the complexity of BPF when you always run the same code.
>>>>>>
>>>>>> It can utilize the existing flow dissector and also make it easier to
>>>>>> use for the userspace by implementing this in the kernel.
>>>>>
>>>>> Ok. There does appear to be overlap in functionality. But it might be
>>>>> easier to deploy to just have standard Toeplitz available without
>>>>> having to compile and load an eBPF program.
>>>>>
>>>>> As for the sk_buff and cb[] changes. The first is really not needed.
>>>>> sk_buff simply would not scale if every edge case needs a few bits.
>>>>
>>>> An alternative is to move the bit to cb[] and clear it for every code
>>>> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
>>>>
>>>> I think we can put the bit in sk_buff for now. We can implement the
>>>> alternative when we are short of bits.
>>>
>>> I disagree. sk_buff fields add a cost to every code path. They cannot
>>> be added for every edge case.
>>
>> It only takes an unused bit and does not grow the sk_buff size so I
>> think it has practically no cost for now.
> 
> The problem is that that thinking leads to death by a thousand cuts.
> 
> "for now" forces the cost of having to think hard how to avoid growing
> sk_buff onto the next person. Let's do it right from the start.

I see. I described an alternative to move the bit to cb[] and clear it 
in all code paths that leads to ndo_start_xmit() earlier. Does that 
sound good to you?
Willem de Bruijn Oct. 9, 2023, 10:32 a.m. UTC | #17
On Mon, Oct 9, 2023 at 3:11 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2023/10/09 19:07, Willem de Bruijn wrote:
> > On Mon, Oct 9, 2023 at 3:05 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >>
> >>
> >> On 2023/10/09 18:54, Willem de Bruijn wrote:
> >>> On Mon, Oct 9, 2023 at 3:44 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>
> >>>> On 2023/10/09 17:13, Willem de Bruijn wrote:
> >>>>> On Sun, Oct 8, 2023 at 12:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>
> >>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
> >>>>>> reporting. Conventionally the hash calculation was done by the VMM.
> >>>>>> However, computing the hash after the queue was chosen defeats the
> >>>>>> purpose of RSS.
> >>>>>>
> >>>>>> Another approach is to use eBPF steering program. This approach has
> >>>>>> another downside: it cannot report the calculated hash due to the
> >>>>>> restrictive nature of eBPF.
> >>>>>>
> >>>>>> Introduce the code to compute hashes to the kernel in order to overcome
> >>>>>> thse challenges. An alternative solution is to extend the eBPF steering
> >>>>>> program so that it will be able to report to the userspace, but it makes
> >>>>>> little sense to allow to implement different hashing algorithms with
> >>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
> >>>>>> the specification.
> >>>>>>
> >>>>>> The hash value already stored in sk_buff is not used and computed
> >>>>>> independently since it may have been computed in a way not conformant
> >>>>>> with the specification.
> >>>>>>
> >>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >>>>>
> >>>>>> @@ -2116,31 +2172,49 @@ static ssize_t tun_put_user(struct tun_struct *tun,
> >>>>>>            }
> >>>>>>
> >>>>>>            if (vnet_hdr_sz) {
> >>>>>> -               struct virtio_net_hdr gso;
> >>>>>> +               union {
> >>>>>> +                       struct virtio_net_hdr hdr;
> >>>>>> +                       struct virtio_net_hdr_v1_hash v1_hash_hdr;
> >>>>>> +               } hdr;
> >>>>>> +               int ret;
> >>>>>>
> >>>>>>                    if (iov_iter_count(iter) < vnet_hdr_sz)
> >>>>>>                            return -EINVAL;
> >>>>>>
> >>>>>> -               if (virtio_net_hdr_from_skb(skb, &gso,
> >>>>>> -                                           tun_is_little_endian(tun), true,
> >>>>>> -                                           vlan_hlen)) {
> >>>>>> +               if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
> >>>>>> +                   vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
> >>>>>> +                   skb->tun_vnet_hash) {
> >>>>>
> >>>>> Isn't vnet_hdr_sz guaranteed to be >= hdr.v1_hash_hdr, by virtue of
> >>>>> the set hash ioctl failing otherwise?
> >>>>>
> >>>>> Such checks should be limited to control path where possible
> >>>>
> >>>> There is a potential race since tun->vnet_hash.flags and vnet_hdr_sz are
> >>>> not read at once.
> >>>
> >>> It should not be possible to downgrade the hdr_sz once v1 is selected.
> >>
> >> I see nothing that prevents shrinking the header size.
> >>
> >> tun->vnet_hash.flags is read after vnet_hdr_sz so the race can happen
> >> even for the case the header size grows though this can be fixed by
> >> reordering the two reads.
> >
> > One option is to fail any control path that tries to re-negotiate
> > header size once this hash option is enabled?
> >
> > There is no practical reason to allow feature re-negotiation at any
> > arbitrary time.
>
> I think it's a bit awkward interface design since tun allows to
> reconfigure any of its parameters, but it's certainly possible.

If this would be the only exception to that rule, and this is the only
place that needs a datapath check, then it's fine to leave as is.

In general, this runtime configurability serves little purpose but to
help syzbot exercise code paths no real application would attempt. But
I won't ask to diverge from whatever tun already does. We just have to
be more careful about the possible races it brings.
Willem de Bruijn Oct. 9, 2023, 10:44 a.m. UTC | #18
On Mon, Oct 9, 2023 at 3:12 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2023/10/09 19:06, Willem de Bruijn wrote:
> > On Mon, Oct 9, 2023 at 3:02 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >> On 2023/10/09 18:57, Willem de Bruijn wrote:
> >>> On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>
> >>>> On 2023/10/09 17:04, Willem de Bruijn wrote:
> >>>>> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>
> >>>>>> On 2023/10/09 5:08, Willem de Bruijn wrote:
> >>>>>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>
> >>>>>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
> >>>>>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>>>
> >>>>>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
> >>>>>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
> >>>>>>>>>> However, computing the hash after the queue was chosen defeats the
> >>>>>>>>>> purpose of RSS.
> >>>>>>>>>>
> >>>>>>>>>> Another approach is to use eBPF steering program. This approach has
> >>>>>>>>>> another downside: it cannot report the calculated hash due to the
> >>>>>>>>>> restrictive nature of eBPF.
> >>>>>>>>>>
> >>>>>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
> >>>>>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
> >>>>>>>>>> program so that it will be able to report to the userspace, but it makes
> >>>>>>>>>> little sense to allow to implement different hashing algorithms with
> >>>>>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
> >>>>>>>>>> the specification.
> >>>>>>>>>>
> >>>>>>>>>> The hash value already stored in sk_buff is not used and computed
> >>>>>>>>>> independently since it may have been computed in a way not conformant
> >>>>>>>>>> with the specification.
> >>>>>>>>>>
> >>>>>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >>>>>>>>>> ---
> >>>>>>>>>
> >>>>>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
> >>>>>>>>>> +       .max_indirection_table_length =
> >>>>>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
> >>>>>>>>>> +
> >>>>>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
> >>>>>>>>>> +};
> >>>>>>>>>
> >>>>>>>>> No need to have explicit capabilities exchange like this? Tun either
> >>>>>>>>> supports all or none.
> >>>>>>>>
> >>>>>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
> >>>>>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
> >>>>>>>>
> >>>>>>>> It is because the flow dissector does not support IPv6 extensions. The
> >>>>>>>> specification is also vague, and does not tell how many TLVs should be
> >>>>>>>> consumed at most when interpreting destination option header so I chose
> >>>>>>>> to avoid adding code for these hash types to the flow dissector. I doubt
> >>>>>>>> anyone will complain about it since nobody complains for Linux.
> >>>>>>>>
> >>>>>>>> I'm also adding this so that we can extend it later.
> >>>>>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
> >>>>>>>> types may have other bits for new protocols in the future.
> >>>>>>>>
> >>>>>>>>>
> >>>>>>>>>>              case TUNSETSTEERINGEBPF:
> >>>>>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>>>>>> +               if (IS_ERR(bpf_ret))
> >>>>>>>>>> +                       ret = PTR_ERR(bpf_ret);
> >>>>>>>>>> +               else if (bpf_ret)
> >>>>>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
> >>>>>>>>>
> >>>>>>>>> Don't make one feature disable another.
> >>>>>>>>>
> >>>>>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
> >>>>>>>>> functions. If one is enabled the other call should fail, with EBUSY
> >>>>>>>>> for instance.
> >>>>>>>>>
> >>>>>>>>>> +       case TUNSETVNETHASH:
> >>>>>>>>>> +               len = sizeof(vnet_hash);
> >>>>>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
> >>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>> +                       break;
> >>>>>>>>>> +               }
> >>>>>>>>>> +
> >>>>>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
> >>>>>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
> >>>>>>>>>> +                     !tun_is_little_endian(tun))) ||
> >>>>>>>>>> +                    vnet_hash.indirection_table_mask >=
> >>>>>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
> >>>>>>>>>> +                       ret = -EINVAL;
> >>>>>>>>>> +                       break;
> >>>>>>>>>> +               }
> >>>>>>>>>> +
> >>>>>>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
> >>>>>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
> >>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>> +                       break;
> >>>>>>>>>> +               }
> >>>>>>>>>> +
> >>>>>>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
> >>>>>>>>>> +
> >>>>>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
> >>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>> +                       break;
> >>>>>>>>>> +               }
> >>>>>>>>>
> >>>>>>>>> Probably easier and less error-prone to define a fixed size control
> >>>>>>>>> struct with the max indirection table size.
> >>>>>>>>
> >>>>>>>> I made its size variable because the indirection table and key may grow
> >>>>>>>> in the future as I wrote above.
> >>>>>>>>
> >>>>>>>>>
> >>>>>>>>> Btw: please trim the CC: list considerably on future patches.
> >>>>>>>>
> >>>>>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
> >>>>>>>> proposed.
> >>>>>>>
> >>>>>>> To be clear: please don't just resubmit with that one change.
> >>>>>>>
> >>>>>>> The skb and cb issues are quite fundamental issues that need to be resolved.
> >>>>>>>
> >>>>>>> I'd like to understand why adjusting the existing BPF feature for this
> >>>>>>> exact purpose cannot be amended to return the key it produced.
> >>>>>>
> >>>>>> eBPF steering program is not designed for this particular problem in my
> >>>>>> understanding. It was introduced to derive hash values with an
> >>>>>> understanding of application-specific semantics of packets instead of
> >>>>>> generic IP/TCP/UDP semantics.
> >>>>>>
> >>>>>> This problem is rather different in terms that the hash derivation is
> >>>>>> strictly defined by virtio-net. I don't think it makes sense to
> >>>>>> introduce the complexity of BPF when you always run the same code.
> >>>>>>
> >>>>>> It can utilize the existing flow dissector and also make it easier to
> >>>>>> use for the userspace by implementing this in the kernel.
> >>>>>
> >>>>> Ok. There does appear to be overlap in functionality. But it might be
> >>>>> easier to deploy to just have standard Toeplitz available without
> >>>>> having to compile and load an eBPF program.
> >>>>>
> >>>>> As for the sk_buff and cb[] changes. The first is really not needed.
> >>>>> sk_buff simply would not scale if every edge case needs a few bits.
> >>>>
> >>>> An alternative is to move the bit to cb[] and clear it for every code
> >>>> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
> >>>>
> >>>> I think we can put the bit in sk_buff for now. We can implement the
> >>>> alternative when we are short of bits.
> >>>
> >>> I disagree. sk_buff fields add a cost to every code path. They cannot
> >>> be added for every edge case.
> >>
> >> It only takes an unused bit and does not grow the sk_buff size so I
> >> think it has practically no cost for now.
> >
> > The problem is that that thinking leads to death by a thousand cuts.
> >
> > "for now" forces the cost of having to think hard how to avoid growing
> > sk_buff onto the next person. Let's do it right from the start.
>
> I see. I described an alternative to move the bit to cb[] and clear it
> in all code paths that leads to ndo_start_xmit() earlier. Does that
> sound good to you?

If you use the control block to pass information between
__dev_queue_xmit on the tun device and tun_net_xmit, using gso_skb_cb,
the field can be left undefined in all non-tun paths. tun_select_queue
can initialize.

I would still use skb->hash to encode the hash. That hash type of that
field is not strictly defined. It can be siphash from ___skb_get_hash
or a device hash, which most likely also uses Toeplitz. Then you also
don't run into the problem of growing the struct size.
Michael S. Tsirkin Oct. 9, 2023, 11:38 a.m. UTC | #19
Akihiko Odaki sorry about reposts.
Having an email with two "@" in the CC list:
	 xuanzhuo@linux.alibaba.comshuah@kernel.org
tripped up mutt's reply-all for me and made it send to you only.

I am guessing you meant two addresses: xuanzhuo@linux.alibaba.com
and shuah@kernel.org.


On Sun, Oct 08, 2023 at 02:20:49PM +0900, Akihiko Odaki wrote:
> virtio-net have two usage of hashes: one is RSS and another is hash
> reporting. Conventionally the hash calculation was done by the VMM.
> However, computing the hash after the queue was chosen defeats the
> purpose of RSS.
> 
> Another approach is to use eBPF steering program. This approach has
> another downside: it cannot report the calculated hash due to the
> restrictive nature of eBPF.
> 
> Introduce the code to compute hashes to the kernel in order to overcome
> thse challenges. An alternative solution is to extend the eBPF steering
> program so that it will be able to report to the userspace, but it makes
> little sense to allow to implement different hashing algorithms with
> eBPF since the hash value reported by virtio-net is strictly defined by
> the specification.
> 
> The hash value already stored in sk_buff is not used and computed
> independently since it may have been computed in a way not conformant
> with the specification.
> 
> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> ---
>  drivers/net/tun.c           | 187 ++++++++++++++++++++++++++++++++----
>  include/uapi/linux/if_tun.h |  16 +++
>  2 files changed, 182 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/net/tun.c b/drivers/net/tun.c
> index 89ab9efe522c..561a573cd008 100644
> --- a/drivers/net/tun.c
> +++ b/drivers/net/tun.c
> @@ -171,6 +171,9 @@ struct tun_prog {
>  	struct bpf_prog *prog;
>  };
>  
> +#define TUN_VNET_HASH_MAX_KEY_SIZE 40
> +#define TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH 128
> +

where do these come from?

>  /* Since the socket were moved to tun_file, to preserve the behavior of persist
>   * device, socket filter, sndbuf and vnet header size were restore when the
>   * file were attached to a persist device.
> @@ -209,6 +212,9 @@ struct tun_struct {
>  	struct tun_prog __rcu *steering_prog;
>  	struct tun_prog __rcu *filter_prog;
>  	struct ethtool_link_ksettings link_ksettings;
> +	struct tun_vnet_hash vnet_hash;
> +	u16 vnet_hash_indirection_table[TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH];
> +	u32 vnet_hash_key[TUN_VNET_HASH_MAX_KEY_SIZE / 4];

That's quite a lot of data to add in this struct, and will be used
by a small minority of users. Are you pushing any hot data out of cache
with this? Why not allocate these as needed?

>  	/* init args */
>  	struct file *file;
>  	struct ifreq *ifr;
> @@ -219,6 +225,13 @@ struct veth {
>  	__be16 h_vlan_TCI;
>  };
>  
> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
> +	.max_indirection_table_length =
> +		TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
> +
> +	.types = VIRTIO_NET_SUPPORTED_HASH_TYPES
> +};
> +
>  static void tun_flow_init(struct tun_struct *tun);
>  static void tun_flow_uninit(struct tun_struct *tun);
>  
> @@ -320,10 +333,16 @@ static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
>  	if (get_user(be, argp))
>  		return -EFAULT;
>  
> -	if (be)
> +	if (be) {
> +		if (!(tun->flags & TUN_VNET_LE) &&
> +		    (tun->vnet_hash.flags & TUN_VNET_HASH_REPORT)) {
> +			return -EINVAL;
> +		}
> +
>  		tun->flags |= TUN_VNET_BE;
> -	else
> +	} else {
>  		tun->flags &= ~TUN_VNET_BE;
> +	}
>  
>  	return 0;
>  }
> @@ -558,15 +577,47 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
>  	return ret % numqueues;
>  }
>  
> +static u16 tun_vnet_select_queue(struct tun_struct *tun, struct sk_buff *skb)
> +{
> +	u32 value = qdisc_skb_cb(skb)->tun_vnet_hash_value;
> +	u32 numqueues;
> +	u32 index;
> +	u16 queue;
> +
> +	numqueues = READ_ONCE(tun->numqueues);
> +	if (!numqueues)
> +		return 0;
> +
> +	index = value & READ_ONCE(tun->vnet_hash.indirection_table_mask);
> +	queue = READ_ONCE(tun->vnet_hash_indirection_table[index]);
> +	if (!queue)
> +		queue = READ_ONCE(tun->vnet_hash.unclassified_queue);

Apparently 0 is an illegal queue value? You are making this part
of UAPI better document things like this.

> +
> +	return queue % numqueues;
> +}
> +
>  static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
>  			    struct net_device *sb_dev)
>  {
>  	struct tun_struct *tun = netdev_priv(dev);
> +	u8 vnet_hash_flags = READ_ONCE(tun->vnet_hash.flags);
> +	struct virtio_net_hash hash;
>  	u16 ret;
>  
> +	if (vnet_hash_flags & (TUN_VNET_HASH_RSS | TUN_VNET_HASH_REPORT)) {
> +		virtio_net_hash(skb, READ_ONCE(tun->vnet_hash.types),
> +				tun->vnet_hash_key, &hash);
> +

What are all these READ_ONCE things doing?
E.g. you seem to be very content to have tun->vnet_hash.types read twice apparently?
This is volatile which basically breaks all compiler's attempts
to optimize code - needs to be used judiciously.



> +		skb->tun_vnet_hash = true;
> +		qdisc_skb_cb(skb)->tun_vnet_hash_value = hash.value;
> +		qdisc_skb_cb(skb)->tun_vnet_hash_report = hash.report;
> +	}
> +
>  	rcu_read_lock();
>  	if (rcu_dereference(tun->steering_prog))
>  		ret = tun_ebpf_select_queue(tun, skb);
> +	else if (vnet_hash_flags & TUN_VNET_HASH_RSS)
> +		ret = tun_vnet_select_queue(tun, skb);
>  	else
>  		ret = tun_automq_select_queue(tun, skb);
>  	rcu_read_unlock();
> @@ -2088,10 +2139,15 @@ static ssize_t tun_put_user(struct tun_struct *tun,
>  			    struct iov_iter *iter)
>  {
>  	struct tun_pi pi = { 0, skb->protocol };
> +	struct virtio_net_hash vnet_hash = {
> +		.value = qdisc_skb_cb(skb)->tun_vnet_hash_value,
> +		.report = qdisc_skb_cb(skb)->tun_vnet_hash_report
> +	};
>  	ssize_t total;
>  	int vlan_offset = 0;
>  	int vlan_hlen = 0;
>  	int vnet_hdr_sz = 0;
> +	size_t vnet_hdr_content_sz;
>  
>  	if (skb_vlan_tag_present(skb))
>  		vlan_hlen = VLAN_HLEN;
> @@ -2116,31 +2172,49 @@ static ssize_t tun_put_user(struct tun_struct *tun,
>  	}
>  
>  	if (vnet_hdr_sz) {
> -		struct virtio_net_hdr gso;
> +		union {
> +			struct virtio_net_hdr hdr;
> +			struct virtio_net_hdr_v1_hash v1_hash_hdr;
> +		} hdr;
> +		int ret;
>  
>  		if (iov_iter_count(iter) < vnet_hdr_sz)
>  			return -EINVAL;
>  
> -		if (virtio_net_hdr_from_skb(skb, &gso,
> -					    tun_is_little_endian(tun), true,
> -					    vlan_hlen)) {
> +		if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
> +		    vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
> +		    skb->tun_vnet_hash) {
> +			vnet_hdr_content_sz = sizeof(hdr.v1_hash_hdr);
> +			ret = virtio_net_hdr_v1_hash_from_skb(skb,
> +							      &hdr.v1_hash_hdr,
> +							      true,
> +							      vlan_hlen,
> +							      &vnet_hash);
> +		} else {
> +			vnet_hdr_content_sz = sizeof(hdr.hdr);
> +			ret = virtio_net_hdr_from_skb(skb, &hdr.hdr,
> +						      tun_is_little_endian(tun),
> +						      true, vlan_hlen);
> +		}
> +
> +		if (ret) {
>  			struct skb_shared_info *sinfo = skb_shinfo(skb);
>  			pr_err("unexpected GSO type: "
>  			       "0x%x, gso_size %d, hdr_len %d\n",
> -			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
> -			       tun16_to_cpu(tun, gso.hdr_len));
> +			       sinfo->gso_type, tun16_to_cpu(tun, hdr.hdr.gso_size),
> +			       tun16_to_cpu(tun, hdr.hdr.hdr_len));
>  			print_hex_dump(KERN_ERR, "tun: ",
>  				       DUMP_PREFIX_NONE,
>  				       16, 1, skb->head,
> -				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
> +				       min((int)tun16_to_cpu(tun, hdr.hdr.hdr_len), 64), true);
>  			WARN_ON_ONCE(1);
>  			return -EINVAL;
>  		}
>  
> -		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
> +		if (copy_to_iter(&hdr, vnet_hdr_content_sz, iter) != vnet_hdr_content_sz)
>  			return -EFAULT;
>  
> -		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
> +		iov_iter_advance(iter, vnet_hdr_sz - vnet_hdr_content_sz);
>  	}
>  
>  	if (vlan_hlen) {
> @@ -3007,24 +3081,27 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
>  	return ret;
>  }
>  
> -static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
> -			void __user *data)
> +static struct bpf_prog *tun_set_ebpf(struct tun_struct *tun,
> +				     struct tun_prog __rcu **prog_p,
> +				     void __user *data)
>  {
>  	struct bpf_prog *prog;
>  	int fd;
> +	int ret;
>  
>  	if (copy_from_user(&fd, data, sizeof(fd)))
> -		return -EFAULT;
> +		return ERR_PTR(-EFAULT);
>  
>  	if (fd == -1) {
>  		prog = NULL;
>  	} else {
>  		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
>  		if (IS_ERR(prog))
> -			return PTR_ERR(prog);
> +			return prog;
>  	}
>  
> -	return __tun_set_ebpf(tun, prog_p, prog);
> +	ret = __tun_set_ebpf(tun, prog_p, prog);
> +	return ret ? ERR_PTR(ret) : prog;
>  }
>  
>  /* Return correct value for tun->dev->addr_len based on tun->dev->type. */
> @@ -3082,6 +3159,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
>  	int le;
>  	int ret;
>  	bool do_notify = false;
> +	struct bpf_prog *bpf_ret;
> +	struct tun_vnet_hash vnet_hash;
> +	u16 vnet_hash_indirection_table[TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH];
> +	u8 vnet_hash_key[TUN_VNET_HASH_MAX_KEY_SIZE];
> +	size_t len;
>  
>  	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
>  	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
> @@ -3295,7 +3377,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
>  			ret = -EFAULT;
>  			break;
>  		}
> -		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
> +		if (vnet_hdr_sz <
> +		    (int)((tun->vnet_hash.flags & TUN_VNET_HASH_REPORT) ?
> +			  sizeof(struct virtio_net_hdr_v1_hash) :
> +			  sizeof(struct virtio_net_hdr))) {
>  			ret = -EINVAL;
>  			break;
>  		}
> @@ -3314,10 +3399,16 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
>  			ret = -EFAULT;
>  			break;
>  		}
> -		if (le)
> +		if (le) {
>  			tun->flags |= TUN_VNET_LE;
> -		else
> +		} else {
> +			if (!tun_legacy_is_little_endian(tun)) {
> +				ret = -EINVAL;
> +				break;
> +			}
> +
>  			tun->flags &= ~TUN_VNET_LE;
> +		}
>  		break;
>  
>  	case TUNGETVNETBE:
> @@ -3360,11 +3451,17 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
>  		break;
>  
>  	case TUNSETSTEERINGEBPF:
> -		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> +		bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> +		if (IS_ERR(bpf_ret))
> +			ret = PTR_ERR(bpf_ret);
> +		else if (bpf_ret)
> +			tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;

what is this doing?

>  		break;
>  
>  	case TUNSETFILTEREBPF:
> -		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
> +		bpf_ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
> +		if (IS_ERR(bpf_ret))
> +			ret = PTR_ERR(bpf_ret);
>  		break;
>  
>  	case TUNSETCARRIER:
> @@ -3382,6 +3479,54 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
>  		ret = open_related_ns(&net->ns, get_net_ns);
>  		break;
>  
> +	case TUNGETVNETHASHCAP:
> +		if (copy_to_user(argp, &tun_vnet_hash_cap,
> +				 sizeof(tun_vnet_hash_cap)))
> +			ret = -EFAULT;
> +		break;
> +
> +	case TUNSETVNETHASH:
> +		len = sizeof(vnet_hash);
> +		if (copy_from_user(&vnet_hash, argp, len)) {
> +			ret = -EFAULT;
> +			break;
> +		}


what if flags has some bits set you don't know how to handle?
should these be ignored as now or cause a failure?
these decisions all affect uapi.

> +
> +		if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
> +		     (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
> +		      !tun_is_little_endian(tun))) ||
> +		     vnet_hash.indirection_table_mask >=
> +		     TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
> +			ret = -EINVAL;
> +			break;
> +		}

Given this is later used to index within an array one has to
be very careful about spectre things here, which this code isn't.


> +
> +		argp = (u8 __user *)argp + len;
> +		len = (vnet_hash.indirection_table_mask + 1) * 2;

comment pointer math tricks like this extensively please.

> +		if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
> +			ret = -EFAULT;
> +			break;
> +		}
> +
> +		argp = (u8 __user *)argp + len;
> +		len = virtio_net_hash_key_length(vnet_hash.types);
> +
> +		if (copy_from_user(vnet_hash_key, argp, len)) {
> +			ret = -EFAULT;
> +			break;
> +		}
> +
> +		tun->vnet_hash = vnet_hash;
> +		memcpy(tun->vnet_hash_indirection_table,
> +		       vnet_hash_indirection_table,
> +		       (vnet_hash.indirection_table_mask + 1) * 2);
> +		memcpy(tun->vnet_hash_key, vnet_hash_key, len);
> +
> +		if (vnet_hash.flags & TUN_VNET_HASH_RSS)
> +			__tun_set_ebpf(tun, &tun->steering_prog, NULL);
> +
> +		break;
> +
>  	default:
>  		ret = -EINVAL;
>  		break;
> diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
> index 287cdc81c939..dc591cd897c8 100644
> --- a/include/uapi/linux/if_tun.h
> +++ b/include/uapi/linux/if_tun.h
> @@ -61,6 +61,8 @@
>  #define TUNSETFILTEREBPF _IOR('T', 225, int)
>  #define TUNSETCARRIER _IOW('T', 226, int)
>  #define TUNGETDEVNETNS _IO('T', 227)
> +#define TUNGETVNETHASHCAP _IO('T', 228)
> +#define TUNSETVNETHASH _IOW('T', 229, unsigned int)
>  
>  /* TUNSETIFF ifr flags */
>  #define IFF_TUN		0x0001
> @@ -115,4 +117,18 @@ struct tun_filter {
>  	__u8   addr[][ETH_ALEN];
>  };
>  
> +struct tun_vnet_hash_cap {
> +	__u16 max_indirection_table_length;
> +	__u32 types;
> +};
> +

There's hidden padding in this struct - not good, copy
will leak kernel info out.



> +#define TUN_VNET_HASH_RSS	0x01
> +#define TUN_VNET_HASH_REPORT	0x02


Do you intend to add more flags down the road?
How will userspace know what is supported?

> +struct tun_vnet_hash {
> +	__u8 flags;
> +	__u32 types;
> +	__u16 indirection_table_mask;
> +	__u16 unclassified_queue;
> +};
> +

Padding here too. Best avoided.

In any case, document UAPI please.


>  #endif /* _UAPI__IF_TUN_H */
> -- 
> 2.42.0
Michael S. Tsirkin Oct. 9, 2023, 11:50 a.m. UTC | #20
On Mon, Oct 09, 2023 at 05:44:20PM +0900, Akihiko Odaki wrote:
> On 2023/10/09 17:13, Willem de Bruijn wrote:
> > On Sun, Oct 8, 2023 at 12:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> > > 
> > > virtio-net have two usage of hashes: one is RSS and another is hash
> > > reporting. Conventionally the hash calculation was done by the VMM.
> > > However, computing the hash after the queue was chosen defeats the
> > > purpose of RSS.
> > > 
> > > Another approach is to use eBPF steering program. This approach has
> > > another downside: it cannot report the calculated hash due to the
> > > restrictive nature of eBPF.
> > > 
> > > Introduce the code to compute hashes to the kernel in order to overcome
> > > thse challenges. An alternative solution is to extend the eBPF steering
> > > program so that it will be able to report to the userspace, but it makes
> > > little sense to allow to implement different hashing algorithms with
> > > eBPF since the hash value reported by virtio-net is strictly defined by
> > > the specification.
> > > 
> > > The hash value already stored in sk_buff is not used and computed
> > > independently since it may have been computed in a way not conformant
> > > with the specification.
> > > 
> > > Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> > 
> > > @@ -2116,31 +2172,49 @@ static ssize_t tun_put_user(struct tun_struct *tun,
> > >          }
> > > 
> > >          if (vnet_hdr_sz) {
> > > -               struct virtio_net_hdr gso;
> > > +               union {
> > > +                       struct virtio_net_hdr hdr;
> > > +                       struct virtio_net_hdr_v1_hash v1_hash_hdr;
> > > +               } hdr;
> > > +               int ret;
> > > 
> > >                  if (iov_iter_count(iter) < vnet_hdr_sz)
> > >                          return -EINVAL;
> > > 
> > > -               if (virtio_net_hdr_from_skb(skb, &gso,
> > > -                                           tun_is_little_endian(tun), true,
> > > -                                           vlan_hlen)) {
> > > +               if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
> > > +                   vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
> > > +                   skb->tun_vnet_hash) {
> > 
> > Isn't vnet_hdr_sz guaranteed to be >= hdr.v1_hash_hdr, by virtue of
> > the set hash ioctl failing otherwise?
> > 
> > Such checks should be limited to control path where possible
> 
> There is a potential race since tun->vnet_hash.flags and vnet_hdr_sz are not
> read at once.

And then it's a complete mess and you get inconsistent
behaviour with packets getting sent all over the place, right?
So maybe keep a pointer to this struct so it can be
changed atomically then. Maybe even something with rcu I donnu.
Akihiko Odaki Oct. 10, 2023, 1:52 a.m. UTC | #21
On 2023/10/09 19:44, Willem de Bruijn wrote:
> On Mon, Oct 9, 2023 at 3:12 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> On 2023/10/09 19:06, Willem de Bruijn wrote:
>>> On Mon, Oct 9, 2023 at 3:02 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>
>>>> On 2023/10/09 18:57, Willem de Bruijn wrote:
>>>>> On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>
>>>>>> On 2023/10/09 17:04, Willem de Bruijn wrote:
>>>>>>> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>
>>>>>>>> On 2023/10/09 5:08, Willem de Bruijn wrote:
>>>>>>>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>
>>>>>>>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
>>>>>>>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>>>
>>>>>>>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
>>>>>>>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
>>>>>>>>>>>> However, computing the hash after the queue was chosen defeats the
>>>>>>>>>>>> purpose of RSS.
>>>>>>>>>>>>
>>>>>>>>>>>> Another approach is to use eBPF steering program. This approach has
>>>>>>>>>>>> another downside: it cannot report the calculated hash due to the
>>>>>>>>>>>> restrictive nature of eBPF.
>>>>>>>>>>>>
>>>>>>>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
>>>>>>>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
>>>>>>>>>>>> program so that it will be able to report to the userspace, but it makes
>>>>>>>>>>>> little sense to allow to implement different hashing algorithms with
>>>>>>>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
>>>>>>>>>>>> the specification.
>>>>>>>>>>>>
>>>>>>>>>>>> The hash value already stored in sk_buff is not used and computed
>>>>>>>>>>>> independently since it may have been computed in a way not conformant
>>>>>>>>>>>> with the specification.
>>>>>>>>>>>>
>>>>>>>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>>>>>>>>>>> ---
>>>>>>>>>>>
>>>>>>>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
>>>>>>>>>>>> +       .max_indirection_table_length =
>>>>>>>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
>>>>>>>>>>>> +
>>>>>>>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
>>>>>>>>>>>> +};
>>>>>>>>>>>
>>>>>>>>>>> No need to have explicit capabilities exchange like this? Tun either
>>>>>>>>>>> supports all or none.
>>>>>>>>>>
>>>>>>>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
>>>>>>>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
>>>>>>>>>>
>>>>>>>>>> It is because the flow dissector does not support IPv6 extensions. The
>>>>>>>>>> specification is also vague, and does not tell how many TLVs should be
>>>>>>>>>> consumed at most when interpreting destination option header so I chose
>>>>>>>>>> to avoid adding code for these hash types to the flow dissector. I doubt
>>>>>>>>>> anyone will complain about it since nobody complains for Linux.
>>>>>>>>>>
>>>>>>>>>> I'm also adding this so that we can extend it later.
>>>>>>>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
>>>>>>>>>> types may have other bits for new protocols in the future.
>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>>               case TUNSETSTEERINGEBPF:
>>>>>>>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>>>>>> +               if (IS_ERR(bpf_ret))
>>>>>>>>>>>> +                       ret = PTR_ERR(bpf_ret);
>>>>>>>>>>>> +               else if (bpf_ret)
>>>>>>>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
>>>>>>>>>>>
>>>>>>>>>>> Don't make one feature disable another.
>>>>>>>>>>>
>>>>>>>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
>>>>>>>>>>> functions. If one is enabled the other call should fail, with EBUSY
>>>>>>>>>>> for instance.
>>>>>>>>>>>
>>>>>>>>>>>> +       case TUNSETVNETHASH:
>>>>>>>>>>>> +               len = sizeof(vnet_hash);
>>>>>>>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>> +                       break;
>>>>>>>>>>>> +               }
>>>>>>>>>>>> +
>>>>>>>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
>>>>>>>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
>>>>>>>>>>>> +                     !tun_is_little_endian(tun))) ||
>>>>>>>>>>>> +                    vnet_hash.indirection_table_mask >=
>>>>>>>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
>>>>>>>>>>>> +                       ret = -EINVAL;
>>>>>>>>>>>> +                       break;
>>>>>>>>>>>> +               }
>>>>>>>>>>>> +
>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>> +                       break;
>>>>>>>>>>>> +               }
>>>>>>>>>>>> +
>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
>>>>>>>>>>>> +
>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>> +                       break;
>>>>>>>>>>>> +               }
>>>>>>>>>>>
>>>>>>>>>>> Probably easier and less error-prone to define a fixed size control
>>>>>>>>>>> struct with the max indirection table size.
>>>>>>>>>>
>>>>>>>>>> I made its size variable because the indirection table and key may grow
>>>>>>>>>> in the future as I wrote above.
>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> Btw: please trim the CC: list considerably on future patches.
>>>>>>>>>>
>>>>>>>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
>>>>>>>>>> proposed.
>>>>>>>>>
>>>>>>>>> To be clear: please don't just resubmit with that one change.
>>>>>>>>>
>>>>>>>>> The skb and cb issues are quite fundamental issues that need to be resolved.
>>>>>>>>>
>>>>>>>>> I'd like to understand why adjusting the existing BPF feature for this
>>>>>>>>> exact purpose cannot be amended to return the key it produced.
>>>>>>>>
>>>>>>>> eBPF steering program is not designed for this particular problem in my
>>>>>>>> understanding. It was introduced to derive hash values with an
>>>>>>>> understanding of application-specific semantics of packets instead of
>>>>>>>> generic IP/TCP/UDP semantics.
>>>>>>>>
>>>>>>>> This problem is rather different in terms that the hash derivation is
>>>>>>>> strictly defined by virtio-net. I don't think it makes sense to
>>>>>>>> introduce the complexity of BPF when you always run the same code.
>>>>>>>>
>>>>>>>> It can utilize the existing flow dissector and also make it easier to
>>>>>>>> use for the userspace by implementing this in the kernel.
>>>>>>>
>>>>>>> Ok. There does appear to be overlap in functionality. But it might be
>>>>>>> easier to deploy to just have standard Toeplitz available without
>>>>>>> having to compile and load an eBPF program.
>>>>>>>
>>>>>>> As for the sk_buff and cb[] changes. The first is really not needed.
>>>>>>> sk_buff simply would not scale if every edge case needs a few bits.
>>>>>>
>>>>>> An alternative is to move the bit to cb[] and clear it for every code
>>>>>> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
>>>>>>
>>>>>> I think we can put the bit in sk_buff for now. We can implement the
>>>>>> alternative when we are short of bits.
>>>>>
>>>>> I disagree. sk_buff fields add a cost to every code path. They cannot
>>>>> be added for every edge case.
>>>>
>>>> It only takes an unused bit and does not grow the sk_buff size so I
>>>> think it has practically no cost for now.
>>>
>>> The problem is that that thinking leads to death by a thousand cuts.
>>>
>>> "for now" forces the cost of having to think hard how to avoid growing
>>> sk_buff onto the next person. Let's do it right from the start.
>>
>> I see. I described an alternative to move the bit to cb[] and clear it
>> in all code paths that leads to ndo_start_xmit() earlier. Does that
>> sound good to you?
> 
> If you use the control block to pass information between
> __dev_queue_xmit on the tun device and tun_net_xmit, using gso_skb_cb,
> the field can be left undefined in all non-tun paths. tun_select_queue
> can initialize.

The problem is that tun_select_queue() is not always called. 
netdev_core_pick_tx() ensures dev->real_num_tx_queues != 1 before 
calling it, but this variable may change later and result in a race 
condition. Another case is that XDP with predefined queue.

> 
> I would still use skb->hash to encode the hash. That hash type of that
> field is not strictly defined. It can be siphash from ___skb_get_hash
> or a device hash, which most likely also uses Toeplitz. Then you also
> don't run into the problem of growing the struct size.

I'm concerned exactly because it's not strictly defined. Someone may 
decide to overwrite it later if we are not cautious enough. qdisc_skb_cb 
also has sufficient space to contain both of the hash value and type.
Akihiko Odaki Oct. 10, 2023, 2:32 a.m. UTC | #22
On 2023/10/09 20:38, Michael S. Tsirkin wrote:
> Akihiko Odaki sorry about reposts.
> Having an email with two "@" in the CC list:
> 	 xuanzhuo@linux.alibaba.comshuah@kernel.org
> tripped up mutt's reply-all for me and made it send to you only.
> 
> I am guessing you meant two addresses: xuanzhuo@linux.alibaba.com
> and shuah@kernel.org.

Oops, I'll send the future versions with corrected addresses (and fewer 
CCs).

> 
> 
> On Sun, Oct 08, 2023 at 02:20:49PM +0900, Akihiko Odaki wrote:
>> virtio-net have two usage of hashes: one is RSS and another is hash
>> reporting. Conventionally the hash calculation was done by the VMM.
>> However, computing the hash after the queue was chosen defeats the
>> purpose of RSS.
>>
>> Another approach is to use eBPF steering program. This approach has
>> another downside: it cannot report the calculated hash due to the
>> restrictive nature of eBPF.
>>
>> Introduce the code to compute hashes to the kernel in order to overcome
>> thse challenges. An alternative solution is to extend the eBPF steering
>> program so that it will be able to report to the userspace, but it makes
>> little sense to allow to implement different hashing algorithms with
>> eBPF since the hash value reported by virtio-net is strictly defined by
>> the specification.
>>
>> The hash value already stored in sk_buff is not used and computed
>> independently since it may have been computed in a way not conformant
>> with the specification.
>>
>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>> ---
>>   drivers/net/tun.c           | 187 ++++++++++++++++++++++++++++++++----
>>   include/uapi/linux/if_tun.h |  16 +++
>>   2 files changed, 182 insertions(+), 21 deletions(-)
>>
>> diff --git a/drivers/net/tun.c b/drivers/net/tun.c
>> index 89ab9efe522c..561a573cd008 100644
>> --- a/drivers/net/tun.c
>> +++ b/drivers/net/tun.c
>> @@ -171,6 +171,9 @@ struct tun_prog {
>>   	struct bpf_prog *prog;
>>   };
>>   
>> +#define TUN_VNET_HASH_MAX_KEY_SIZE 40
>> +#define TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH 128
>> +
> 
> where do these come from?

TUN_VNET_HASH_MAX_KEY_SIZE is the total size of IPv6 addresses and 
TCP/UDP ports.

TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH is from the following 
statement in VIRTIO Version 1.2 clause 5.1.4.1:
 > The device MUST set rss_max_indirection_table_length to at least 128,
 > if it offers VIRTIO_NET_F_RSS.

> 
>>   /* Since the socket were moved to tun_file, to preserve the behavior of persist
>>    * device, socket filter, sndbuf and vnet header size were restore when the
>>    * file were attached to a persist device.
>> @@ -209,6 +212,9 @@ struct tun_struct {
>>   	struct tun_prog __rcu *steering_prog;
>>   	struct tun_prog __rcu *filter_prog;
>>   	struct ethtool_link_ksettings link_ksettings;
>> +	struct tun_vnet_hash vnet_hash;
>> +	u16 vnet_hash_indirection_table[TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH];
>> +	u32 vnet_hash_key[TUN_VNET_HASH_MAX_KEY_SIZE / 4];
> 
> That's quite a lot of data to add in this struct, and will be used
> by a small minority of users. Are you pushing any hot data out of cache
> with this? Why not allocate these as needed?

I put this here just because it's convenient. It would be better to have 
a separate structure.

> 
>>   	/* init args */
>>   	struct file *file;
>>   	struct ifreq *ifr;
>> @@ -219,6 +225,13 @@ struct veth {
>>   	__be16 h_vlan_TCI;
>>   };
>>   
>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
>> +	.max_indirection_table_length =
>> +		TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
>> +
>> +	.types = VIRTIO_NET_SUPPORTED_HASH_TYPES
>> +};
>> +
>>   static void tun_flow_init(struct tun_struct *tun);
>>   static void tun_flow_uninit(struct tun_struct *tun);
>>   
>> @@ -320,10 +333,16 @@ static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
>>   	if (get_user(be, argp))
>>   		return -EFAULT;
>>   
>> -	if (be)
>> +	if (be) {
>> +		if (!(tun->flags & TUN_VNET_LE) &&
>> +		    (tun->vnet_hash.flags & TUN_VNET_HASH_REPORT)) {
>> +			return -EINVAL;
>> +		}
>> +
>>   		tun->flags |= TUN_VNET_BE;
>> -	else
>> +	} else {
>>   		tun->flags &= ~TUN_VNET_BE;
>> +	}
>>   
>>   	return 0;
>>   }
>> @@ -558,15 +577,47 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
>>   	return ret % numqueues;
>>   }
>>   
>> +static u16 tun_vnet_select_queue(struct tun_struct *tun, struct sk_buff *skb)
>> +{
>> +	u32 value = qdisc_skb_cb(skb)->tun_vnet_hash_value;
>> +	u32 numqueues;
>> +	u32 index;
>> +	u16 queue;
>> +
>> +	numqueues = READ_ONCE(tun->numqueues);
>> +	if (!numqueues)
>> +		return 0;
>> +
>> +	index = value & READ_ONCE(tun->vnet_hash.indirection_table_mask);
>> +	queue = READ_ONCE(tun->vnet_hash_indirection_table[index]);
>> +	if (!queue)
>> +		queue = READ_ONCE(tun->vnet_hash.unclassified_queue);
> 
> Apparently 0 is an illegal queue value? You are making this part
> of UAPI better document things like this.

Well, I don't think so. I naively copied this from QEMU's eBPF steering 
program but I think this should check tun_vnet_hash_report == 
VIRTIO_NET_HASH_REPORT_NONE. I'll fix it.

> 
>> +
>> +	return queue % numqueues;
>> +}
>> +
>>   static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
>>   			    struct net_device *sb_dev)
>>   {
>>   	struct tun_struct *tun = netdev_priv(dev);
>> +	u8 vnet_hash_flags = READ_ONCE(tun->vnet_hash.flags);
>> +	struct virtio_net_hash hash;
>>   	u16 ret;
>>   
>> +	if (vnet_hash_flags & (TUN_VNET_HASH_RSS | TUN_VNET_HASH_REPORT)) {
>> +		virtio_net_hash(skb, READ_ONCE(tun->vnet_hash.types),
>> +				tun->vnet_hash_key, &hash);
>> +
> 
> What are all these READ_ONCE things doing?
> E.g. you seem to be very content to have tun->vnet_hash.types read twice apparently?
> This is volatile which basically breaks all compiler's attempts
> to optimize code - needs to be used judiciously.

I'll replace them with one RCU dereference once I extract them into a 
structure.

> 
> 
> 
>> +		skb->tun_vnet_hash = true;
>> +		qdisc_skb_cb(skb)->tun_vnet_hash_value = hash.value;
>> +		qdisc_skb_cb(skb)->tun_vnet_hash_report = hash.report;
>> +	}
>> +
>>   	rcu_read_lock();
>>   	if (rcu_dereference(tun->steering_prog))
>>   		ret = tun_ebpf_select_queue(tun, skb);
>> +	else if (vnet_hash_flags & TUN_VNET_HASH_RSS)
>> +		ret = tun_vnet_select_queue(tun, skb);
>>   	else
>>   		ret = tun_automq_select_queue(tun, skb);
>>   	rcu_read_unlock();
>> @@ -2088,10 +2139,15 @@ static ssize_t tun_put_user(struct tun_struct *tun,
>>   			    struct iov_iter *iter)
>>   {
>>   	struct tun_pi pi = { 0, skb->protocol };
>> +	struct virtio_net_hash vnet_hash = {
>> +		.value = qdisc_skb_cb(skb)->tun_vnet_hash_value,
>> +		.report = qdisc_skb_cb(skb)->tun_vnet_hash_report
>> +	};
>>   	ssize_t total;
>>   	int vlan_offset = 0;
>>   	int vlan_hlen = 0;
>>   	int vnet_hdr_sz = 0;
>> +	size_t vnet_hdr_content_sz;
>>   
>>   	if (skb_vlan_tag_present(skb))
>>   		vlan_hlen = VLAN_HLEN;
>> @@ -2116,31 +2172,49 @@ static ssize_t tun_put_user(struct tun_struct *tun,
>>   	}
>>   
>>   	if (vnet_hdr_sz) {
>> -		struct virtio_net_hdr gso;
>> +		union {
>> +			struct virtio_net_hdr hdr;
>> +			struct virtio_net_hdr_v1_hash v1_hash_hdr;
>> +		} hdr;
>> +		int ret;
>>   
>>   		if (iov_iter_count(iter) < vnet_hdr_sz)
>>   			return -EINVAL;
>>   
>> -		if (virtio_net_hdr_from_skb(skb, &gso,
>> -					    tun_is_little_endian(tun), true,
>> -					    vlan_hlen)) {
>> +		if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
>> +		    vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
>> +		    skb->tun_vnet_hash) {
>> +			vnet_hdr_content_sz = sizeof(hdr.v1_hash_hdr);
>> +			ret = virtio_net_hdr_v1_hash_from_skb(skb,
>> +							      &hdr.v1_hash_hdr,
>> +							      true,
>> +							      vlan_hlen,
>> +							      &vnet_hash);
>> +		} else {
>> +			vnet_hdr_content_sz = sizeof(hdr.hdr);
>> +			ret = virtio_net_hdr_from_skb(skb, &hdr.hdr,
>> +						      tun_is_little_endian(tun),
>> +						      true, vlan_hlen);
>> +		}
>> +
>> +		if (ret) {
>>   			struct skb_shared_info *sinfo = skb_shinfo(skb);
>>   			pr_err("unexpected GSO type: "
>>   			       "0x%x, gso_size %d, hdr_len %d\n",
>> -			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
>> -			       tun16_to_cpu(tun, gso.hdr_len));
>> +			       sinfo->gso_type, tun16_to_cpu(tun, hdr.hdr.gso_size),
>> +			       tun16_to_cpu(tun, hdr.hdr.hdr_len));
>>   			print_hex_dump(KERN_ERR, "tun: ",
>>   				       DUMP_PREFIX_NONE,
>>   				       16, 1, skb->head,
>> -				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
>> +				       min((int)tun16_to_cpu(tun, hdr.hdr.hdr_len), 64), true);
>>   			WARN_ON_ONCE(1);
>>   			return -EINVAL;
>>   		}
>>   
>> -		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
>> +		if (copy_to_iter(&hdr, vnet_hdr_content_sz, iter) != vnet_hdr_content_sz)
>>   			return -EFAULT;
>>   
>> -		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
>> +		iov_iter_advance(iter, vnet_hdr_sz - vnet_hdr_content_sz);
>>   	}
>>   
>>   	if (vlan_hlen) {
>> @@ -3007,24 +3081,27 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
>>   	return ret;
>>   }
>>   
>> -static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
>> -			void __user *data)
>> +static struct bpf_prog *tun_set_ebpf(struct tun_struct *tun,
>> +				     struct tun_prog __rcu **prog_p,
>> +				     void __user *data)
>>   {
>>   	struct bpf_prog *prog;
>>   	int fd;
>> +	int ret;
>>   
>>   	if (copy_from_user(&fd, data, sizeof(fd)))
>> -		return -EFAULT;
>> +		return ERR_PTR(-EFAULT);
>>   
>>   	if (fd == -1) {
>>   		prog = NULL;
>>   	} else {
>>   		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
>>   		if (IS_ERR(prog))
>> -			return PTR_ERR(prog);
>> +			return prog;
>>   	}
>>   
>> -	return __tun_set_ebpf(tun, prog_p, prog);
>> +	ret = __tun_set_ebpf(tun, prog_p, prog);
>> +	return ret ? ERR_PTR(ret) : prog;
>>   }
>>   
>>   /* Return correct value for tun->dev->addr_len based on tun->dev->type. */
>> @@ -3082,6 +3159,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
>>   	int le;
>>   	int ret;
>>   	bool do_notify = false;
>> +	struct bpf_prog *bpf_ret;
>> +	struct tun_vnet_hash vnet_hash;
>> +	u16 vnet_hash_indirection_table[TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH];
>> +	u8 vnet_hash_key[TUN_VNET_HASH_MAX_KEY_SIZE];
>> +	size_t len;
>>   
>>   	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
>>   	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
>> @@ -3295,7 +3377,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
>>   			ret = -EFAULT;
>>   			break;
>>   		}
>> -		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
>> +		if (vnet_hdr_sz <
>> +		    (int)((tun->vnet_hash.flags & TUN_VNET_HASH_REPORT) ?
>> +			  sizeof(struct virtio_net_hdr_v1_hash) :
>> +			  sizeof(struct virtio_net_hdr))) {
>>   			ret = -EINVAL;
>>   			break;
>>   		}
>> @@ -3314,10 +3399,16 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
>>   			ret = -EFAULT;
>>   			break;
>>   		}
>> -		if (le)
>> +		if (le) {
>>   			tun->flags |= TUN_VNET_LE;
>> -		else
>> +		} else {
>> +			if (!tun_legacy_is_little_endian(tun)) {
>> +				ret = -EINVAL;
>> +				break;
>> +			}
>> +
>>   			tun->flags &= ~TUN_VNET_LE;
>> +		}
>>   		break;
>>   
>>   	case TUNGETVNETBE:
>> @@ -3360,11 +3451,17 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
>>   		break;
>>   
>>   	case TUNSETSTEERINGEBPF:
>> -		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>> +		bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>> +		if (IS_ERR(bpf_ret))
>> +			ret = PTR_ERR(bpf_ret);
>> +		else if (bpf_ret)
>> +			tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
> 
> what is this doing?

It's unsetting RSS because steering eBPF is incompatible with RSS. 
Willem proposed to return EBUSY instead so I'll do so in the future.

> 
>>   		break;
>>   
>>   	case TUNSETFILTEREBPF:
>> -		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
>> +		bpf_ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
>> +		if (IS_ERR(bpf_ret))
>> +			ret = PTR_ERR(bpf_ret);
>>   		break;
>>   
>>   	case TUNSETCARRIER:
>> @@ -3382,6 +3479,54 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
>>   		ret = open_related_ns(&net->ns, get_net_ns);
>>   		break;
>>   
>> +	case TUNGETVNETHASHCAP:
>> +		if (copy_to_user(argp, &tun_vnet_hash_cap,
>> +				 sizeof(tun_vnet_hash_cap)))
>> +			ret = -EFAULT;
>> +		break;
>> +
>> +	case TUNSETVNETHASH:
>> +		len = sizeof(vnet_hash);
>> +		if (copy_from_user(&vnet_hash, argp, len)) {
>> +			ret = -EFAULT;
>> +			break;
>> +		}
> 
> 
> what if flags has some bits set you don't know how to handle?
> should these be ignored as now or cause a failure?
> these decisions all affect uapi.

Currently they are ignored.

> 
>> +
>> +		if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
>> +		     (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
>> +		      !tun_is_little_endian(tun))) ||
>> +		     vnet_hash.indirection_table_mask >=
>> +		     TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
>> +			ret = -EINVAL;
>> +			break;
>> +		}
> 
> Given this is later used to index within an array one has to
> be very careful about spectre things here, which this code isn't.

I have totally forgotten that. Thanks for pointing this out.

> 
> 
>> +
>> +		argp = (u8 __user *)argp + len;
>> +		len = (vnet_hash.indirection_table_mask + 1) * 2;
> 
> comment pointer math tricks like this extensively please.

Ok, I will.

> 
>> +		if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
>> +			ret = -EFAULT;
>> +			break;
>> +		}
>> +
>> +		argp = (u8 __user *)argp + len;
>> +		len = virtio_net_hash_key_length(vnet_hash.types);
>> +
>> +		if (copy_from_user(vnet_hash_key, argp, len)) {
>> +			ret = -EFAULT;
>> +			break;
>> +		}
>> +
>> +		tun->vnet_hash = vnet_hash;
>> +		memcpy(tun->vnet_hash_indirection_table,
>> +		       vnet_hash_indirection_table,
>> +		       (vnet_hash.indirection_table_mask + 1) * 2);
>> +		memcpy(tun->vnet_hash_key, vnet_hash_key, len);
>> +
>> +		if (vnet_hash.flags & TUN_VNET_HASH_RSS)
>> +			__tun_set_ebpf(tun, &tun->steering_prog, NULL);
>> +
>> +		break;
>> +
>>   	default:
>>   		ret = -EINVAL;
>>   		break;
>> diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
>> index 287cdc81c939..dc591cd897c8 100644
>> --- a/include/uapi/linux/if_tun.h
>> +++ b/include/uapi/linux/if_tun.h
>> @@ -61,6 +61,8 @@
>>   #define TUNSETFILTEREBPF _IOR('T', 225, int)
>>   #define TUNSETCARRIER _IOW('T', 226, int)
>>   #define TUNGETDEVNETNS _IO('T', 227)
>> +#define TUNGETVNETHASHCAP _IO('T', 228)
>> +#define TUNSETVNETHASH _IOW('T', 229, unsigned int)
>>   
>>   /* TUNSETIFF ifr flags */
>>   #define IFF_TUN		0x0001
>> @@ -115,4 +117,18 @@ struct tun_filter {
>>   	__u8   addr[][ETH_ALEN];
>>   };
>>   
>> +struct tun_vnet_hash_cap {
>> +	__u16 max_indirection_table_length;
>> +	__u32 types;
>> +};
>> +
> 
> There's hidden padding in this struct - not good, copy
> will leak kernel info out.

I'll add an explicit padding.

> 
> 
> 
>> +#define TUN_VNET_HASH_RSS	0x01
>> +#define TUN_VNET_HASH_REPORT	0x02
> 
> 
> Do you intend to add more flags down the road?
> How will userspace know what is supported?

I don't intend to do so, but perhaps it may be better to return EINVAL 
when an unknown flag is set.

> 
>> +struct tun_vnet_hash {
>> +	__u8 flags;
>> +	__u32 types;
>> +	__u16 indirection_table_mask;
>> +	__u16 unclassified_queue;
>> +};
>> +
> 
> Padding here too. Best avoided.

Perhaps it may be easier just to grow flags into u32.

> 
> In any case, document UAPI please.

Sure, I'll.
Akihiko Odaki Oct. 10, 2023, 2:34 a.m. UTC | #23
On 2023/10/09 20:50, Michael S. Tsirkin wrote:
> On Mon, Oct 09, 2023 at 05:44:20PM +0900, Akihiko Odaki wrote:
>> On 2023/10/09 17:13, Willem de Bruijn wrote:
>>> On Sun, Oct 8, 2023 at 12:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>
>>>> virtio-net have two usage of hashes: one is RSS and another is hash
>>>> reporting. Conventionally the hash calculation was done by the VMM.
>>>> However, computing the hash after the queue was chosen defeats the
>>>> purpose of RSS.
>>>>
>>>> Another approach is to use eBPF steering program. This approach has
>>>> another downside: it cannot report the calculated hash due to the
>>>> restrictive nature of eBPF.
>>>>
>>>> Introduce the code to compute hashes to the kernel in order to overcome
>>>> thse challenges. An alternative solution is to extend the eBPF steering
>>>> program so that it will be able to report to the userspace, but it makes
>>>> little sense to allow to implement different hashing algorithms with
>>>> eBPF since the hash value reported by virtio-net is strictly defined by
>>>> the specification.
>>>>
>>>> The hash value already stored in sk_buff is not used and computed
>>>> independently since it may have been computed in a way not conformant
>>>> with the specification.
>>>>
>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>>
>>>> @@ -2116,31 +2172,49 @@ static ssize_t tun_put_user(struct tun_struct *tun,
>>>>           }
>>>>
>>>>           if (vnet_hdr_sz) {
>>>> -               struct virtio_net_hdr gso;
>>>> +               union {
>>>> +                       struct virtio_net_hdr hdr;
>>>> +                       struct virtio_net_hdr_v1_hash v1_hash_hdr;
>>>> +               } hdr;
>>>> +               int ret;
>>>>
>>>>                   if (iov_iter_count(iter) < vnet_hdr_sz)
>>>>                           return -EINVAL;
>>>>
>>>> -               if (virtio_net_hdr_from_skb(skb, &gso,
>>>> -                                           tun_is_little_endian(tun), true,
>>>> -                                           vlan_hlen)) {
>>>> +               if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
>>>> +                   vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
>>>> +                   skb->tun_vnet_hash) {
>>>
>>> Isn't vnet_hdr_sz guaranteed to be >= hdr.v1_hash_hdr, by virtue of
>>> the set hash ioctl failing otherwise?
>>>
>>> Such checks should be limited to control path where possible
>>
>> There is a potential race since tun->vnet_hash.flags and vnet_hdr_sz are not
>> read at once.
> 
> And then it's a complete mess and you get inconsistent
> behaviour with packets getting sent all over the place, right?
> So maybe keep a pointer to this struct so it can be
> changed atomically then. Maybe even something with rcu I donnu.

I think it's a good idea to use RCU for the vnet_hash members, but 
vnet_hdr_sz is something not specific to vnet_hash so this check will be 
still necessary.
Jason Wang Oct. 10, 2023, 5:45 a.m. UTC | #24
On Tue, Oct 10, 2023 at 9:52 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2023/10/09 19:44, Willem de Bruijn wrote:
> > On Mon, Oct 9, 2023 at 3:12 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >> On 2023/10/09 19:06, Willem de Bruijn wrote:
> >>> On Mon, Oct 9, 2023 at 3:02 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>
> >>>> On 2023/10/09 18:57, Willem de Bruijn wrote:
> >>>>> On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>
> >>>>>> On 2023/10/09 17:04, Willem de Bruijn wrote:
> >>>>>>> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>
> >>>>>>>> On 2023/10/09 5:08, Willem de Bruijn wrote:
> >>>>>>>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>>>
> >>>>>>>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
> >>>>>>>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>>>>>
> >>>>>>>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
> >>>>>>>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
> >>>>>>>>>>>> However, computing the hash after the queue was chosen defeats the
> >>>>>>>>>>>> purpose of RSS.
> >>>>>>>>>>>>
> >>>>>>>>>>>> Another approach is to use eBPF steering program. This approach has
> >>>>>>>>>>>> another downside: it cannot report the calculated hash due to the
> >>>>>>>>>>>> restrictive nature of eBPF.
> >>>>>>>>>>>>
> >>>>>>>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
> >>>>>>>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
> >>>>>>>>>>>> program so that it will be able to report to the userspace, but it makes
> >>>>>>>>>>>> little sense to allow to implement different hashing algorithms with
> >>>>>>>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
> >>>>>>>>>>>> the specification.
> >>>>>>>>>>>>
> >>>>>>>>>>>> The hash value already stored in sk_buff is not used and computed
> >>>>>>>>>>>> independently since it may have been computed in a way not conformant
> >>>>>>>>>>>> with the specification.
> >>>>>>>>>>>>
> >>>>>>>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >>>>>>>>>>>> ---
> >>>>>>>>>>>
> >>>>>>>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
> >>>>>>>>>>>> +       .max_indirection_table_length =
> >>>>>>>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
> >>>>>>>>>>>> +
> >>>>>>>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
> >>>>>>>>>>>> +};
> >>>>>>>>>>>
> >>>>>>>>>>> No need to have explicit capabilities exchange like this? Tun either
> >>>>>>>>>>> supports all or none.
> >>>>>>>>>>
> >>>>>>>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
> >>>>>>>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
> >>>>>>>>>>
> >>>>>>>>>> It is because the flow dissector does not support IPv6 extensions. The
> >>>>>>>>>> specification is also vague, and does not tell how many TLVs should be
> >>>>>>>>>> consumed at most when interpreting destination option header so I chose
> >>>>>>>>>> to avoid adding code for these hash types to the flow dissector. I doubt
> >>>>>>>>>> anyone will complain about it since nobody complains for Linux.
> >>>>>>>>>>
> >>>>>>>>>> I'm also adding this so that we can extend it later.
> >>>>>>>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
> >>>>>>>>>> types may have other bits for new protocols in the future.
> >>>>>>>>>>
> >>>>>>>>>>>
> >>>>>>>>>>>>               case TUNSETSTEERINGEBPF:
> >>>>>>>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>>>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>>>>>>>> +               if (IS_ERR(bpf_ret))
> >>>>>>>>>>>> +                       ret = PTR_ERR(bpf_ret);
> >>>>>>>>>>>> +               else if (bpf_ret)
> >>>>>>>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
> >>>>>>>>>>>
> >>>>>>>>>>> Don't make one feature disable another.
> >>>>>>>>>>>
> >>>>>>>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
> >>>>>>>>>>> functions. If one is enabled the other call should fail, with EBUSY
> >>>>>>>>>>> for instance.
> >>>>>>>>>>>
> >>>>>>>>>>>> +       case TUNSETVNETHASH:
> >>>>>>>>>>>> +               len = sizeof(vnet_hash);
> >>>>>>>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
> >>>>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>>>> +                       break;
> >>>>>>>>>>>> +               }
> >>>>>>>>>>>> +
> >>>>>>>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
> >>>>>>>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
> >>>>>>>>>>>> +                     !tun_is_little_endian(tun))) ||
> >>>>>>>>>>>> +                    vnet_hash.indirection_table_mask >=
> >>>>>>>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
> >>>>>>>>>>>> +                       ret = -EINVAL;
> >>>>>>>>>>>> +                       break;
> >>>>>>>>>>>> +               }
> >>>>>>>>>>>> +
> >>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>>>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
> >>>>>>>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
> >>>>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>>>> +                       break;
> >>>>>>>>>>>> +               }
> >>>>>>>>>>>> +
> >>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>>>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
> >>>>>>>>>>>> +
> >>>>>>>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
> >>>>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>>>> +                       break;
> >>>>>>>>>>>> +               }
> >>>>>>>>>>>
> >>>>>>>>>>> Probably easier and less error-prone to define a fixed size control
> >>>>>>>>>>> struct with the max indirection table size.
> >>>>>>>>>>
> >>>>>>>>>> I made its size variable because the indirection table and key may grow
> >>>>>>>>>> in the future as I wrote above.
> >>>>>>>>>>
> >>>>>>>>>>>
> >>>>>>>>>>> Btw: please trim the CC: list considerably on future patches.
> >>>>>>>>>>
> >>>>>>>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
> >>>>>>>>>> proposed.
> >>>>>>>>>
> >>>>>>>>> To be clear: please don't just resubmit with that one change.
> >>>>>>>>>
> >>>>>>>>> The skb and cb issues are quite fundamental issues that need to be resolved.
> >>>>>>>>>
> >>>>>>>>> I'd like to understand why adjusting the existing BPF feature for this
> >>>>>>>>> exact purpose cannot be amended to return the key it produced.
> >>>>>>>>
> >>>>>>>> eBPF steering program is not designed for this particular problem in my
> >>>>>>>> understanding. It was introduced to derive hash values with an
> >>>>>>>> understanding of application-specific semantics of packets instead of
> >>>>>>>> generic IP/TCP/UDP semantics.
> >>>>>>>>
> >>>>>>>> This problem is rather different in terms that the hash derivation is
> >>>>>>>> strictly defined by virtio-net. I don't think it makes sense to
> >>>>>>>> introduce the complexity of BPF when you always run the same code.
> >>>>>>>>
> >>>>>>>> It can utilize the existing flow dissector and also make it easier to
> >>>>>>>> use for the userspace by implementing this in the kernel.
> >>>>>>>
> >>>>>>> Ok. There does appear to be overlap in functionality. But it might be
> >>>>>>> easier to deploy to just have standard Toeplitz available without
> >>>>>>> having to compile and load an eBPF program.
> >>>>>>>
> >>>>>>> As for the sk_buff and cb[] changes. The first is really not needed.
> >>>>>>> sk_buff simply would not scale if every edge case needs a few bits.
> >>>>>>
> >>>>>> An alternative is to move the bit to cb[] and clear it for every code
> >>>>>> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
> >>>>>>
> >>>>>> I think we can put the bit in sk_buff for now. We can implement the
> >>>>>> alternative when we are short of bits.
> >>>>>
> >>>>> I disagree. sk_buff fields add a cost to every code path. They cannot
> >>>>> be added for every edge case.
> >>>>
> >>>> It only takes an unused bit and does not grow the sk_buff size so I
> >>>> think it has practically no cost for now.
> >>>
> >>> The problem is that that thinking leads to death by a thousand cuts.
> >>>
> >>> "for now" forces the cost of having to think hard how to avoid growing
> >>> sk_buff onto the next person. Let's do it right from the start.
> >>
> >> I see. I described an alternative to move the bit to cb[] and clear it
> >> in all code paths that leads to ndo_start_xmit() earlier. Does that
> >> sound good to you?
> >
> > If you use the control block to pass information between
> > __dev_queue_xmit on the tun device and tun_net_xmit, using gso_skb_cb,
> > the field can be left undefined in all non-tun paths. tun_select_queue
> > can initialize.
>
> The problem is that tun_select_queue() is not always called.
> netdev_core_pick_tx() ensures dev->real_num_tx_queues != 1 before
> calling it, but this variable may change later and result in a race
> condition. Another case is that XDP with predefined queue.
>
> >
> > I would still use skb->hash to encode the hash. That hash type of that
> > field is not strictly defined. It can be siphash from ___skb_get_hash
> > or a device hash, which most likely also uses Toeplitz. Then you also
> > don't run into the problem of growing the struct size.
>
> I'm concerned exactly because it's not strictly defined. Someone may
> decide to overwrite it later if we are not cautious enough. qdisc_skb_cb
> also has sufficient space to contain both of the hash value and type.

How about using skb extensions?

Thanks

>
Akihiko Odaki Oct. 10, 2023, 5:51 a.m. UTC | #25
On 2023/10/10 14:45, Jason Wang wrote:
> On Tue, Oct 10, 2023 at 9:52 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> On 2023/10/09 19:44, Willem de Bruijn wrote:
>>> On Mon, Oct 9, 2023 at 3:12 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>
>>>> On 2023/10/09 19:06, Willem de Bruijn wrote:
>>>>> On Mon, Oct 9, 2023 at 3:02 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>
>>>>>> On 2023/10/09 18:57, Willem de Bruijn wrote:
>>>>>>> On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>
>>>>>>>> On 2023/10/09 17:04, Willem de Bruijn wrote:
>>>>>>>>> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>
>>>>>>>>>> On 2023/10/09 5:08, Willem de Bruijn wrote:
>>>>>>>>>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>>>
>>>>>>>>>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
>>>>>>>>>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
>>>>>>>>>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
>>>>>>>>>>>>>> However, computing the hash after the queue was chosen defeats the
>>>>>>>>>>>>>> purpose of RSS.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> Another approach is to use eBPF steering program. This approach has
>>>>>>>>>>>>>> another downside: it cannot report the calculated hash due to the
>>>>>>>>>>>>>> restrictive nature of eBPF.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
>>>>>>>>>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
>>>>>>>>>>>>>> program so that it will be able to report to the userspace, but it makes
>>>>>>>>>>>>>> little sense to allow to implement different hashing algorithms with
>>>>>>>>>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
>>>>>>>>>>>>>> the specification.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> The hash value already stored in sk_buff is not used and computed
>>>>>>>>>>>>>> independently since it may have been computed in a way not conformant
>>>>>>>>>>>>>> with the specification.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>>>>>>>>>>>>> ---
>>>>>>>>>>>>>
>>>>>>>>>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
>>>>>>>>>>>>>> +       .max_indirection_table_length =
>>>>>>>>>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
>>>>>>>>>>>>>> +};
>>>>>>>>>>>>>
>>>>>>>>>>>>> No need to have explicit capabilities exchange like this? Tun either
>>>>>>>>>>>>> supports all or none.
>>>>>>>>>>>>
>>>>>>>>>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
>>>>>>>>>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
>>>>>>>>>>>>
>>>>>>>>>>>> It is because the flow dissector does not support IPv6 extensions. The
>>>>>>>>>>>> specification is also vague, and does not tell how many TLVs should be
>>>>>>>>>>>> consumed at most when interpreting destination option header so I chose
>>>>>>>>>>>> to avoid adding code for these hash types to the flow dissector. I doubt
>>>>>>>>>>>> anyone will complain about it since nobody complains for Linux.
>>>>>>>>>>>>
>>>>>>>>>>>> I'm also adding this so that we can extend it later.
>>>>>>>>>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
>>>>>>>>>>>> types may have other bits for new protocols in the future.
>>>>>>>>>>>>
>>>>>>>>>>>>>
>>>>>>>>>>>>>>                case TUNSETSTEERINGEBPF:
>>>>>>>>>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>>>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>>>>>>>> +               if (IS_ERR(bpf_ret))
>>>>>>>>>>>>>> +                       ret = PTR_ERR(bpf_ret);
>>>>>>>>>>>>>> +               else if (bpf_ret)
>>>>>>>>>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
>>>>>>>>>>>>>
>>>>>>>>>>>>> Don't make one feature disable another.
>>>>>>>>>>>>>
>>>>>>>>>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
>>>>>>>>>>>>> functions. If one is enabled the other call should fail, with EBUSY
>>>>>>>>>>>>> for instance.
>>>>>>>>>>>>>
>>>>>>>>>>>>>> +       case TUNSETVNETHASH:
>>>>>>>>>>>>>> +               len = sizeof(vnet_hash);
>>>>>>>>>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
>>>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
>>>>>>>>>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
>>>>>>>>>>>>>> +                     !tun_is_little_endian(tun))) ||
>>>>>>>>>>>>>> +                    vnet_hash.indirection_table_mask >=
>>>>>>>>>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
>>>>>>>>>>>>>> +                       ret = -EINVAL;
>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>>>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
>>>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
>>>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>>>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
>>>>>>>>>>>>>> +
>>>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
>>>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>
>>>>>>>>>>>>> Probably easier and less error-prone to define a fixed size control
>>>>>>>>>>>>> struct with the max indirection table size.
>>>>>>>>>>>>
>>>>>>>>>>>> I made its size variable because the indirection table and key may grow
>>>>>>>>>>>> in the future as I wrote above.
>>>>>>>>>>>>
>>>>>>>>>>>>>
>>>>>>>>>>>>> Btw: please trim the CC: list considerably on future patches.
>>>>>>>>>>>>
>>>>>>>>>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
>>>>>>>>>>>> proposed.
>>>>>>>>>>>
>>>>>>>>>>> To be clear: please don't just resubmit with that one change.
>>>>>>>>>>>
>>>>>>>>>>> The skb and cb issues are quite fundamental issues that need to be resolved.
>>>>>>>>>>>
>>>>>>>>>>> I'd like to understand why adjusting the existing BPF feature for this
>>>>>>>>>>> exact purpose cannot be amended to return the key it produced.
>>>>>>>>>>
>>>>>>>>>> eBPF steering program is not designed for this particular problem in my
>>>>>>>>>> understanding. It was introduced to derive hash values with an
>>>>>>>>>> understanding of application-specific semantics of packets instead of
>>>>>>>>>> generic IP/TCP/UDP semantics.
>>>>>>>>>>
>>>>>>>>>> This problem is rather different in terms that the hash derivation is
>>>>>>>>>> strictly defined by virtio-net. I don't think it makes sense to
>>>>>>>>>> introduce the complexity of BPF when you always run the same code.
>>>>>>>>>>
>>>>>>>>>> It can utilize the existing flow dissector and also make it easier to
>>>>>>>>>> use for the userspace by implementing this in the kernel.
>>>>>>>>>
>>>>>>>>> Ok. There does appear to be overlap in functionality. But it might be
>>>>>>>>> easier to deploy to just have standard Toeplitz available without
>>>>>>>>> having to compile and load an eBPF program.
>>>>>>>>>
>>>>>>>>> As for the sk_buff and cb[] changes. The first is really not needed.
>>>>>>>>> sk_buff simply would not scale if every edge case needs a few bits.
>>>>>>>>
>>>>>>>> An alternative is to move the bit to cb[] and clear it for every code
>>>>>>>> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
>>>>>>>>
>>>>>>>> I think we can put the bit in sk_buff for now. We can implement the
>>>>>>>> alternative when we are short of bits.
>>>>>>>
>>>>>>> I disagree. sk_buff fields add a cost to every code path. They cannot
>>>>>>> be added for every edge case.
>>>>>>
>>>>>> It only takes an unused bit and does not grow the sk_buff size so I
>>>>>> think it has practically no cost for now.
>>>>>
>>>>> The problem is that that thinking leads to death by a thousand cuts.
>>>>>
>>>>> "for now" forces the cost of having to think hard how to avoid growing
>>>>> sk_buff onto the next person. Let's do it right from the start.
>>>>
>>>> I see. I described an alternative to move the bit to cb[] and clear it
>>>> in all code paths that leads to ndo_start_xmit() earlier. Does that
>>>> sound good to you?
>>>
>>> If you use the control block to pass information between
>>> __dev_queue_xmit on the tun device and tun_net_xmit, using gso_skb_cb,
>>> the field can be left undefined in all non-tun paths. tun_select_queue
>>> can initialize.
>>
>> The problem is that tun_select_queue() is not always called.
>> netdev_core_pick_tx() ensures dev->real_num_tx_queues != 1 before
>> calling it, but this variable may change later and result in a race
>> condition. Another case is that XDP with predefined queue.
>>
>>>
>>> I would still use skb->hash to encode the hash. That hash type of that
>>> field is not strictly defined. It can be siphash from ___skb_get_hash
>>> or a device hash, which most likely also uses Toeplitz. Then you also
>>> don't run into the problem of growing the struct size.
>>
>> I'm concerned exactly because it's not strictly defined. Someone may
>> decide to overwrite it later if we are not cautious enough. qdisc_skb_cb
>> also has sufficient space to contain both of the hash value and type.
> 
> How about using skb extensions?

I think it will work. I'll try it in the next version.
Jason Wang Oct. 10, 2023, 6 a.m. UTC | #26
On Tue, Oct 10, 2023 at 1:51 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2023/10/10 14:45, Jason Wang wrote:
> > On Tue, Oct 10, 2023 at 9:52 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >> On 2023/10/09 19:44, Willem de Bruijn wrote:
> >>> On Mon, Oct 9, 2023 at 3:12 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>
> >>>> On 2023/10/09 19:06, Willem de Bruijn wrote:
> >>>>> On Mon, Oct 9, 2023 at 3:02 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>
> >>>>>> On 2023/10/09 18:57, Willem de Bruijn wrote:
> >>>>>>> On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>
> >>>>>>>> On 2023/10/09 17:04, Willem de Bruijn wrote:
> >>>>>>>>> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>>>
> >>>>>>>>>> On 2023/10/09 5:08, Willem de Bruijn wrote:
> >>>>>>>>>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>>>>>
> >>>>>>>>>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
> >>>>>>>>>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
> >>>>>>>>>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
> >>>>>>>>>>>>>> However, computing the hash after the queue was chosen defeats the
> >>>>>>>>>>>>>> purpose of RSS.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> Another approach is to use eBPF steering program. This approach has
> >>>>>>>>>>>>>> another downside: it cannot report the calculated hash due to the
> >>>>>>>>>>>>>> restrictive nature of eBPF.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
> >>>>>>>>>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
> >>>>>>>>>>>>>> program so that it will be able to report to the userspace, but it makes
> >>>>>>>>>>>>>> little sense to allow to implement different hashing algorithms with
> >>>>>>>>>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
> >>>>>>>>>>>>>> the specification.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> The hash value already stored in sk_buff is not used and computed
> >>>>>>>>>>>>>> independently since it may have been computed in a way not conformant
> >>>>>>>>>>>>>> with the specification.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >>>>>>>>>>>>>> ---
> >>>>>>>>>>>>>
> >>>>>>>>>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
> >>>>>>>>>>>>>> +       .max_indirection_table_length =
> >>>>>>>>>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
> >>>>>>>>>>>>>> +
> >>>>>>>>>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
> >>>>>>>>>>>>>> +};
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> No need to have explicit capabilities exchange like this? Tun either
> >>>>>>>>>>>>> supports all or none.
> >>>>>>>>>>>>
> >>>>>>>>>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
> >>>>>>>>>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
> >>>>>>>>>>>>
> >>>>>>>>>>>> It is because the flow dissector does not support IPv6 extensions. The
> >>>>>>>>>>>> specification is also vague, and does not tell how many TLVs should be
> >>>>>>>>>>>> consumed at most when interpreting destination option header so I chose
> >>>>>>>>>>>> to avoid adding code for these hash types to the flow dissector. I doubt
> >>>>>>>>>>>> anyone will complain about it since nobody complains for Linux.
> >>>>>>>>>>>>
> >>>>>>>>>>>> I'm also adding this so that we can extend it later.
> >>>>>>>>>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
> >>>>>>>>>>>> types may have other bits for new protocols in the future.
> >>>>>>>>>>>>
> >>>>>>>>>>>>>
> >>>>>>>>>>>>>>                case TUNSETSTEERINGEBPF:
> >>>>>>>>>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>>>>>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>>>>>>>>>> +               if (IS_ERR(bpf_ret))
> >>>>>>>>>>>>>> +                       ret = PTR_ERR(bpf_ret);
> >>>>>>>>>>>>>> +               else if (bpf_ret)
> >>>>>>>>>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> Don't make one feature disable another.
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
> >>>>>>>>>>>>> functions. If one is enabled the other call should fail, with EBUSY
> >>>>>>>>>>>>> for instance.
> >>>>>>>>>>>>>
> >>>>>>>>>>>>>> +       case TUNSETVNETHASH:
> >>>>>>>>>>>>>> +               len = sizeof(vnet_hash);
> >>>>>>>>>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
> >>>>>>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>>>>>> +                       break;
> >>>>>>>>>>>>>> +               }
> >>>>>>>>>>>>>> +
> >>>>>>>>>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
> >>>>>>>>>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
> >>>>>>>>>>>>>> +                     !tun_is_little_endian(tun))) ||
> >>>>>>>>>>>>>> +                    vnet_hash.indirection_table_mask >=
> >>>>>>>>>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
> >>>>>>>>>>>>>> +                       ret = -EINVAL;
> >>>>>>>>>>>>>> +                       break;
> >>>>>>>>>>>>>> +               }
> >>>>>>>>>>>>>> +
> >>>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>>>>>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
> >>>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
> >>>>>>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>>>>>> +                       break;
> >>>>>>>>>>>>>> +               }
> >>>>>>>>>>>>>> +
> >>>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>>>>>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
> >>>>>>>>>>>>>> +
> >>>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
> >>>>>>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>>>>>> +                       break;
> >>>>>>>>>>>>>> +               }
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> Probably easier and less error-prone to define a fixed size control
> >>>>>>>>>>>>> struct with the max indirection table size.
> >>>>>>>>>>>>
> >>>>>>>>>>>> I made its size variable because the indirection table and key may grow
> >>>>>>>>>>>> in the future as I wrote above.
> >>>>>>>>>>>>
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> Btw: please trim the CC: list considerably on future patches.
> >>>>>>>>>>>>
> >>>>>>>>>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
> >>>>>>>>>>>> proposed.
> >>>>>>>>>>>
> >>>>>>>>>>> To be clear: please don't just resubmit with that one change.
> >>>>>>>>>>>
> >>>>>>>>>>> The skb and cb issues are quite fundamental issues that need to be resolved.
> >>>>>>>>>>>
> >>>>>>>>>>> I'd like to understand why adjusting the existing BPF feature for this
> >>>>>>>>>>> exact purpose cannot be amended to return the key it produced.
> >>>>>>>>>>
> >>>>>>>>>> eBPF steering program is not designed for this particular problem in my
> >>>>>>>>>> understanding. It was introduced to derive hash values with an
> >>>>>>>>>> understanding of application-specific semantics of packets instead of
> >>>>>>>>>> generic IP/TCP/UDP semantics.
> >>>>>>>>>>
> >>>>>>>>>> This problem is rather different in terms that the hash derivation is
> >>>>>>>>>> strictly defined by virtio-net. I don't think it makes sense to
> >>>>>>>>>> introduce the complexity of BPF when you always run the same code.
> >>>>>>>>>>
> >>>>>>>>>> It can utilize the existing flow dissector and also make it easier to
> >>>>>>>>>> use for the userspace by implementing this in the kernel.
> >>>>>>>>>
> >>>>>>>>> Ok. There does appear to be overlap in functionality. But it might be
> >>>>>>>>> easier to deploy to just have standard Toeplitz available without
> >>>>>>>>> having to compile and load an eBPF program.
> >>>>>>>>>
> >>>>>>>>> As for the sk_buff and cb[] changes. The first is really not needed.
> >>>>>>>>> sk_buff simply would not scale if every edge case needs a few bits.
> >>>>>>>>
> >>>>>>>> An alternative is to move the bit to cb[] and clear it for every code
> >>>>>>>> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
> >>>>>>>>
> >>>>>>>> I think we can put the bit in sk_buff for now. We can implement the
> >>>>>>>> alternative when we are short of bits.
> >>>>>>>
> >>>>>>> I disagree. sk_buff fields add a cost to every code path. They cannot
> >>>>>>> be added for every edge case.
> >>>>>>
> >>>>>> It only takes an unused bit and does not grow the sk_buff size so I
> >>>>>> think it has practically no cost for now.
> >>>>>
> >>>>> The problem is that that thinking leads to death by a thousand cuts.
> >>>>>
> >>>>> "for now" forces the cost of having to think hard how to avoid growing
> >>>>> sk_buff onto the next person. Let's do it right from the start.
> >>>>
> >>>> I see. I described an alternative to move the bit to cb[] and clear it
> >>>> in all code paths that leads to ndo_start_xmit() earlier. Does that
> >>>> sound good to you?
> >>>
> >>> If you use the control block to pass information between
> >>> __dev_queue_xmit on the tun device and tun_net_xmit, using gso_skb_cb,
> >>> the field can be left undefined in all non-tun paths. tun_select_queue
> >>> can initialize.
> >>
> >> The problem is that tun_select_queue() is not always called.
> >> netdev_core_pick_tx() ensures dev->real_num_tx_queues != 1 before
> >> calling it, but this variable may change later and result in a race
> >> condition. Another case is that XDP with predefined queue.
> >>
> >>>
> >>> I would still use skb->hash to encode the hash. That hash type of that
> >>> field is not strictly defined. It can be siphash from ___skb_get_hash
> >>> or a device hash, which most likely also uses Toeplitz. Then you also
> >>> don't run into the problem of growing the struct size.
> >>
> >> I'm concerned exactly because it's not strictly defined. Someone may
> >> decide to overwrite it later if we are not cautious enough. qdisc_skb_cb
> >> also has sufficient space to contain both of the hash value and type.
> >
> > How about using skb extensions?
>
> I think it will work. I'll try it in the next version.

Btw, I still think using eBPF for hash might be better.

Though the hashing rule is defined in the spec, it may be extended in
the future. For example, several extensions has been proposed:

1) RSS context
2) encapsulated packet hashing

Thanks

>
Akihiko Odaki Oct. 10, 2023, 6:19 a.m. UTC | #27
On 2023/10/10 15:00, Jason Wang wrote:
> On Tue, Oct 10, 2023 at 1:51 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> On 2023/10/10 14:45, Jason Wang wrote:
>>> On Tue, Oct 10, 2023 at 9:52 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>
>>>> On 2023/10/09 19:44, Willem de Bruijn wrote:
>>>>> On Mon, Oct 9, 2023 at 3:12 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>
>>>>>> On 2023/10/09 19:06, Willem de Bruijn wrote:
>>>>>>> On Mon, Oct 9, 2023 at 3:02 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>
>>>>>>>> On 2023/10/09 18:57, Willem de Bruijn wrote:
>>>>>>>>> On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>
>>>>>>>>>> On 2023/10/09 17:04, Willem de Bruijn wrote:
>>>>>>>>>>> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>>>
>>>>>>>>>>>> On 2023/10/09 5:08, Willem de Bruijn wrote:
>>>>>>>>>>>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
>>>>>>>>>>>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
>>>>>>>>>>>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
>>>>>>>>>>>>>>>> However, computing the hash after the queue was chosen defeats the
>>>>>>>>>>>>>>>> purpose of RSS.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> Another approach is to use eBPF steering program. This approach has
>>>>>>>>>>>>>>>> another downside: it cannot report the calculated hash due to the
>>>>>>>>>>>>>>>> restrictive nature of eBPF.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
>>>>>>>>>>>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
>>>>>>>>>>>>>>>> program so that it will be able to report to the userspace, but it makes
>>>>>>>>>>>>>>>> little sense to allow to implement different hashing algorithms with
>>>>>>>>>>>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
>>>>>>>>>>>>>>>> the specification.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> The hash value already stored in sk_buff is not used and computed
>>>>>>>>>>>>>>>> independently since it may have been computed in a way not conformant
>>>>>>>>>>>>>>>> with the specification.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>>>>>>>>>>>>>>> ---
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
>>>>>>>>>>>>>>>> +       .max_indirection_table_length =
>>>>>>>>>>>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
>>>>>>>>>>>>>>>> +};
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> No need to have explicit capabilities exchange like this? Tun either
>>>>>>>>>>>>>>> supports all or none.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
>>>>>>>>>>>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> It is because the flow dissector does not support IPv6 extensions. The
>>>>>>>>>>>>>> specification is also vague, and does not tell how many TLVs should be
>>>>>>>>>>>>>> consumed at most when interpreting destination option header so I chose
>>>>>>>>>>>>>> to avoid adding code for these hash types to the flow dissector. I doubt
>>>>>>>>>>>>>> anyone will complain about it since nobody complains for Linux.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> I'm also adding this so that we can extend it later.
>>>>>>>>>>>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
>>>>>>>>>>>>>> types may have other bits for new protocols in the future.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>                 case TUNSETSTEERINGEBPF:
>>>>>>>>>>>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>>>>>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>>>>>>>>>> +               if (IS_ERR(bpf_ret))
>>>>>>>>>>>>>>>> +                       ret = PTR_ERR(bpf_ret);
>>>>>>>>>>>>>>>> +               else if (bpf_ret)
>>>>>>>>>>>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> Don't make one feature disable another.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
>>>>>>>>>>>>>>> functions. If one is enabled the other call should fail, with EBUSY
>>>>>>>>>>>>>>> for instance.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> +       case TUNSETVNETHASH:
>>>>>>>>>>>>>>>> +               len = sizeof(vnet_hash);
>>>>>>>>>>>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
>>>>>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
>>>>>>>>>>>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
>>>>>>>>>>>>>>>> +                     !tun_is_little_endian(tun))) ||
>>>>>>>>>>>>>>>> +                    vnet_hash.indirection_table_mask >=
>>>>>>>>>>>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
>>>>>>>>>>>>>>>> +                       ret = -EINVAL;
>>>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>>>>>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
>>>>>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
>>>>>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>>>>>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
>>>>>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> Probably easier and less error-prone to define a fixed size control
>>>>>>>>>>>>>>> struct with the max indirection table size.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> I made its size variable because the indirection table and key may grow
>>>>>>>>>>>>>> in the future as I wrote above.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> Btw: please trim the CC: list considerably on future patches.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
>>>>>>>>>>>>>> proposed.
>>>>>>>>>>>>>
>>>>>>>>>>>>> To be clear: please don't just resubmit with that one change.
>>>>>>>>>>>>>
>>>>>>>>>>>>> The skb and cb issues are quite fundamental issues that need to be resolved.
>>>>>>>>>>>>>
>>>>>>>>>>>>> I'd like to understand why adjusting the existing BPF feature for this
>>>>>>>>>>>>> exact purpose cannot be amended to return the key it produced.
>>>>>>>>>>>>
>>>>>>>>>>>> eBPF steering program is not designed for this particular problem in my
>>>>>>>>>>>> understanding. It was introduced to derive hash values with an
>>>>>>>>>>>> understanding of application-specific semantics of packets instead of
>>>>>>>>>>>> generic IP/TCP/UDP semantics.
>>>>>>>>>>>>
>>>>>>>>>>>> This problem is rather different in terms that the hash derivation is
>>>>>>>>>>>> strictly defined by virtio-net. I don't think it makes sense to
>>>>>>>>>>>> introduce the complexity of BPF when you always run the same code.
>>>>>>>>>>>>
>>>>>>>>>>>> It can utilize the existing flow dissector and also make it easier to
>>>>>>>>>>>> use for the userspace by implementing this in the kernel.
>>>>>>>>>>>
>>>>>>>>>>> Ok. There does appear to be overlap in functionality. But it might be
>>>>>>>>>>> easier to deploy to just have standard Toeplitz available without
>>>>>>>>>>> having to compile and load an eBPF program.
>>>>>>>>>>>
>>>>>>>>>>> As for the sk_buff and cb[] changes. The first is really not needed.
>>>>>>>>>>> sk_buff simply would not scale if every edge case needs a few bits.
>>>>>>>>>>
>>>>>>>>>> An alternative is to move the bit to cb[] and clear it for every code
>>>>>>>>>> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
>>>>>>>>>>
>>>>>>>>>> I think we can put the bit in sk_buff for now. We can implement the
>>>>>>>>>> alternative when we are short of bits.
>>>>>>>>>
>>>>>>>>> I disagree. sk_buff fields add a cost to every code path. They cannot
>>>>>>>>> be added for every edge case.
>>>>>>>>
>>>>>>>> It only takes an unused bit and does not grow the sk_buff size so I
>>>>>>>> think it has practically no cost for now.
>>>>>>>
>>>>>>> The problem is that that thinking leads to death by a thousand cuts.
>>>>>>>
>>>>>>> "for now" forces the cost of having to think hard how to avoid growing
>>>>>>> sk_buff onto the next person. Let's do it right from the start.
>>>>>>
>>>>>> I see. I described an alternative to move the bit to cb[] and clear it
>>>>>> in all code paths that leads to ndo_start_xmit() earlier. Does that
>>>>>> sound good to you?
>>>>>
>>>>> If you use the control block to pass information between
>>>>> __dev_queue_xmit on the tun device and tun_net_xmit, using gso_skb_cb,
>>>>> the field can be left undefined in all non-tun paths. tun_select_queue
>>>>> can initialize.
>>>>
>>>> The problem is that tun_select_queue() is not always called.
>>>> netdev_core_pick_tx() ensures dev->real_num_tx_queues != 1 before
>>>> calling it, but this variable may change later and result in a race
>>>> condition. Another case is that XDP with predefined queue.
>>>>
>>>>>
>>>>> I would still use skb->hash to encode the hash. That hash type of that
>>>>> field is not strictly defined. It can be siphash from ___skb_get_hash
>>>>> or a device hash, which most likely also uses Toeplitz. Then you also
>>>>> don't run into the problem of growing the struct size.
>>>>
>>>> I'm concerned exactly because it's not strictly defined. Someone may
>>>> decide to overwrite it later if we are not cautious enough. qdisc_skb_cb
>>>> also has sufficient space to contain both of the hash value and type.
>>>
>>> How about using skb extensions?
>>
>> I think it will work. I'll try it in the next version.
> 
> Btw, I still think using eBPF for hash might be better.
> 
> Though the hashing rule is defined in the spec, it may be extended in
> the future. For example, several extensions has been proposed:
> 
> 1) RSS context
> 2) encapsulated packet hashing

Looking at the proposals, I'm now more inclined to extend the BPF 
steering program.

Yuri, who wrote the RFC patches to extend the BPF steering program, also 
raised an concern that it may become hard to implement virtio-net 
extensions in the future. It is much easier to deploy a new BPF program 
to support extensions since it will be included in QEMU and can be 
deployed at once without concerning other kernel stuff.

I was still not sure how likely such an extension will emerge especially 
when the hardware RSS capability is not evolving for a decade or so. But 
those proposals show that there are more demands of new features for 
virtio-net.
Jason Wang Oct. 11, 2023, 3:18 a.m. UTC | #28
On Tue, Oct 10, 2023 at 2:19 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>
> On 2023/10/10 15:00, Jason Wang wrote:
> > On Tue, Oct 10, 2023 at 1:51 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>
> >> On 2023/10/10 14:45, Jason Wang wrote:
> >>> On Tue, Oct 10, 2023 at 9:52 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>
> >>>> On 2023/10/09 19:44, Willem de Bruijn wrote:
> >>>>> On Mon, Oct 9, 2023 at 3:12 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>
> >>>>>> On 2023/10/09 19:06, Willem de Bruijn wrote:
> >>>>>>> On Mon, Oct 9, 2023 at 3:02 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>
> >>>>>>>> On 2023/10/09 18:57, Willem de Bruijn wrote:
> >>>>>>>>> On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>>>
> >>>>>>>>>> On 2023/10/09 17:04, Willem de Bruijn wrote:
> >>>>>>>>>>> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>>>>>
> >>>>>>>>>>>> On 2023/10/09 5:08, Willem de Bruijn wrote:
> >>>>>>>>>>>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
> >>>>>>>>>>>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
> >>>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
> >>>>>>>>>>>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
> >>>>>>>>>>>>>>>> However, computing the hash after the queue was chosen defeats the
> >>>>>>>>>>>>>>>> purpose of RSS.
> >>>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>>> Another approach is to use eBPF steering program. This approach has
> >>>>>>>>>>>>>>>> another downside: it cannot report the calculated hash due to the
> >>>>>>>>>>>>>>>> restrictive nature of eBPF.
> >>>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
> >>>>>>>>>>>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
> >>>>>>>>>>>>>>>> program so that it will be able to report to the userspace, but it makes
> >>>>>>>>>>>>>>>> little sense to allow to implement different hashing algorithms with
> >>>>>>>>>>>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
> >>>>>>>>>>>>>>>> the specification.
> >>>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>>> The hash value already stored in sk_buff is not used and computed
> >>>>>>>>>>>>>>>> independently since it may have been computed in a way not conformant
> >>>>>>>>>>>>>>>> with the specification.
> >>>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
> >>>>>>>>>>>>>>>> ---
> >>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
> >>>>>>>>>>>>>>>> +       .max_indirection_table_length =
> >>>>>>>>>>>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
> >>>>>>>>>>>>>>>> +
> >>>>>>>>>>>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
> >>>>>>>>>>>>>>>> +};
> >>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>> No need to have explicit capabilities exchange like this? Tun either
> >>>>>>>>>>>>>>> supports all or none.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
> >>>>>>>>>>>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> It is because the flow dissector does not support IPv6 extensions. The
> >>>>>>>>>>>>>> specification is also vague, and does not tell how many TLVs should be
> >>>>>>>>>>>>>> consumed at most when interpreting destination option header so I chose
> >>>>>>>>>>>>>> to avoid adding code for these hash types to the flow dissector. I doubt
> >>>>>>>>>>>>>> anyone will complain about it since nobody complains for Linux.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> I'm also adding this so that we can extend it later.
> >>>>>>>>>>>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
> >>>>>>>>>>>>>> types may have other bits for new protocols in the future.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>>>                 case TUNSETSTEERINGEBPF:
> >>>>>>>>>>>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>>>>>>>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
> >>>>>>>>>>>>>>>> +               if (IS_ERR(bpf_ret))
> >>>>>>>>>>>>>>>> +                       ret = PTR_ERR(bpf_ret);
> >>>>>>>>>>>>>>>> +               else if (bpf_ret)
> >>>>>>>>>>>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
> >>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>> Don't make one feature disable another.
> >>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
> >>>>>>>>>>>>>>> functions. If one is enabled the other call should fail, with EBUSY
> >>>>>>>>>>>>>>> for instance.
> >>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>>> +       case TUNSETVNETHASH:
> >>>>>>>>>>>>>>>> +               len = sizeof(vnet_hash);
> >>>>>>>>>>>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
> >>>>>>>>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>>>>>>>> +                       break;
> >>>>>>>>>>>>>>>> +               }
> >>>>>>>>>>>>>>>> +
> >>>>>>>>>>>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
> >>>>>>>>>>>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
> >>>>>>>>>>>>>>>> +                     !tun_is_little_endian(tun))) ||
> >>>>>>>>>>>>>>>> +                    vnet_hash.indirection_table_mask >=
> >>>>>>>>>>>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
> >>>>>>>>>>>>>>>> +                       ret = -EINVAL;
> >>>>>>>>>>>>>>>> +                       break;
> >>>>>>>>>>>>>>>> +               }
> >>>>>>>>>>>>>>>> +
> >>>>>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>>>>>>>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
> >>>>>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
> >>>>>>>>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>>>>>>>> +                       break;
> >>>>>>>>>>>>>>>> +               }
> >>>>>>>>>>>>>>>> +
> >>>>>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
> >>>>>>>>>>>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
> >>>>>>>>>>>>>>>> +
> >>>>>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
> >>>>>>>>>>>>>>>> +                       ret = -EFAULT;
> >>>>>>>>>>>>>>>> +                       break;
> >>>>>>>>>>>>>>>> +               }
> >>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>> Probably easier and less error-prone to define a fixed size control
> >>>>>>>>>>>>>>> struct with the max indirection table size.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> I made its size variable because the indirection table and key may grow
> >>>>>>>>>>>>>> in the future as I wrote above.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>>>
> >>>>>>>>>>>>>>> Btw: please trim the CC: list considerably on future patches.
> >>>>>>>>>>>>>>
> >>>>>>>>>>>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
> >>>>>>>>>>>>>> proposed.
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> To be clear: please don't just resubmit with that one change.
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> The skb and cb issues are quite fundamental issues that need to be resolved.
> >>>>>>>>>>>>>
> >>>>>>>>>>>>> I'd like to understand why adjusting the existing BPF feature for this
> >>>>>>>>>>>>> exact purpose cannot be amended to return the key it produced.
> >>>>>>>>>>>>
> >>>>>>>>>>>> eBPF steering program is not designed for this particular problem in my
> >>>>>>>>>>>> understanding. It was introduced to derive hash values with an
> >>>>>>>>>>>> understanding of application-specific semantics of packets instead of
> >>>>>>>>>>>> generic IP/TCP/UDP semantics.
> >>>>>>>>>>>>
> >>>>>>>>>>>> This problem is rather different in terms that the hash derivation is
> >>>>>>>>>>>> strictly defined by virtio-net. I don't think it makes sense to
> >>>>>>>>>>>> introduce the complexity of BPF when you always run the same code.
> >>>>>>>>>>>>
> >>>>>>>>>>>> It can utilize the existing flow dissector and also make it easier to
> >>>>>>>>>>>> use for the userspace by implementing this in the kernel.
> >>>>>>>>>>>
> >>>>>>>>>>> Ok. There does appear to be overlap in functionality. But it might be
> >>>>>>>>>>> easier to deploy to just have standard Toeplitz available without
> >>>>>>>>>>> having to compile and load an eBPF program.
> >>>>>>>>>>>
> >>>>>>>>>>> As for the sk_buff and cb[] changes. The first is really not needed.
> >>>>>>>>>>> sk_buff simply would not scale if every edge case needs a few bits.
> >>>>>>>>>>
> >>>>>>>>>> An alternative is to move the bit to cb[] and clear it for every code
> >>>>>>>>>> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
> >>>>>>>>>>
> >>>>>>>>>> I think we can put the bit in sk_buff for now. We can implement the
> >>>>>>>>>> alternative when we are short of bits.
> >>>>>>>>>
> >>>>>>>>> I disagree. sk_buff fields add a cost to every code path. They cannot
> >>>>>>>>> be added for every edge case.
> >>>>>>>>
> >>>>>>>> It only takes an unused bit and does not grow the sk_buff size so I
> >>>>>>>> think it has practically no cost for now.
> >>>>>>>
> >>>>>>> The problem is that that thinking leads to death by a thousand cuts.
> >>>>>>>
> >>>>>>> "for now" forces the cost of having to think hard how to avoid growing
> >>>>>>> sk_buff onto the next person. Let's do it right from the start.
> >>>>>>
> >>>>>> I see. I described an alternative to move the bit to cb[] and clear it
> >>>>>> in all code paths that leads to ndo_start_xmit() earlier. Does that
> >>>>>> sound good to you?
> >>>>>
> >>>>> If you use the control block to pass information between
> >>>>> __dev_queue_xmit on the tun device and tun_net_xmit, using gso_skb_cb,
> >>>>> the field can be left undefined in all non-tun paths. tun_select_queue
> >>>>> can initialize.
> >>>>
> >>>> The problem is that tun_select_queue() is not always called.
> >>>> netdev_core_pick_tx() ensures dev->real_num_tx_queues != 1 before
> >>>> calling it, but this variable may change later and result in a race
> >>>> condition. Another case is that XDP with predefined queue.
> >>>>
> >>>>>
> >>>>> I would still use skb->hash to encode the hash. That hash type of that
> >>>>> field is not strictly defined. It can be siphash from ___skb_get_hash
> >>>>> or a device hash, which most likely also uses Toeplitz. Then you also
> >>>>> don't run into the problem of growing the struct size.
> >>>>
> >>>> I'm concerned exactly because it's not strictly defined. Someone may
> >>>> decide to overwrite it later if we are not cautious enough. qdisc_skb_cb
> >>>> also has sufficient space to contain both of the hash value and type.
> >>>
> >>> How about using skb extensions?
> >>
> >> I think it will work. I'll try it in the next version.
> >
> > Btw, I still think using eBPF for hash might be better.
> >
> > Though the hashing rule is defined in the spec, it may be extended in
> > the future. For example, several extensions has been proposed:
> >
> > 1) RSS context
> > 2) encapsulated packet hashing
>
> Looking at the proposals, I'm now more inclined to extend the BPF
> steering program.

Just to make sure we are at the same page.

If the eBPF program needs to access skb extensions, it would not be a
steering program anymore (not a filter).

Or do you mean it is a dedicated eBPF program that calculates the hash?

>
> Yuri, who wrote the RFC patches to extend the BPF steering program, also
> raised an concern that it may become hard to implement virtio-net
> extensions in the future. It is much easier to deploy a new BPF program
> to support extensions since it will be included in QEMU and can be
> deployed at once without concerning other kernel stuff.
>
> I was still not sure how likely such an extension will emerge especially
> when the hardware RSS capability is not evolving for a decade or so. But
> those proposals show that there are more demands of new features for
> virtio-net.

It's not only the RSS, if you track virtio development, flow directors
are also being proposed.

Thanks

>
Akihiko Odaki Oct. 11, 2023, 3:57 a.m. UTC | #29
On 2023/10/11 12:18, Jason Wang wrote:
> On Tue, Oct 10, 2023 at 2:19 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>
>> On 2023/10/10 15:00, Jason Wang wrote:
>>> On Tue, Oct 10, 2023 at 1:51 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>
>>>> On 2023/10/10 14:45, Jason Wang wrote:
>>>>> On Tue, Oct 10, 2023 at 9:52 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>
>>>>>> On 2023/10/09 19:44, Willem de Bruijn wrote:
>>>>>>> On Mon, Oct 9, 2023 at 3:12 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>
>>>>>>>> On 2023/10/09 19:06, Willem de Bruijn wrote:
>>>>>>>>> On Mon, Oct 9, 2023 at 3:02 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>
>>>>>>>>>> On 2023/10/09 18:57, Willem de Bruijn wrote:
>>>>>>>>>>> On Mon, Oct 9, 2023 at 3:57 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>>>
>>>>>>>>>>>> On 2023/10/09 17:04, Willem de Bruijn wrote:
>>>>>>>>>>>>> On Sun, Oct 8, 2023 at 3:46 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> On 2023/10/09 5:08, Willem de Bruijn wrote:
>>>>>>>>>>>>>>> On Sun, Oct 8, 2023 at 10:04 PM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> On 2023/10/09 4:07, Willem de Bruijn wrote:
>>>>>>>>>>>>>>>>> On Sun, Oct 8, 2023 at 7:22 AM Akihiko Odaki <akihiko.odaki@daynix.com> wrote:
>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>> virtio-net have two usage of hashes: one is RSS and another is hash
>>>>>>>>>>>>>>>>>> reporting. Conventionally the hash calculation was done by the VMM.
>>>>>>>>>>>>>>>>>> However, computing the hash after the queue was chosen defeats the
>>>>>>>>>>>>>>>>>> purpose of RSS.
>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>> Another approach is to use eBPF steering program. This approach has
>>>>>>>>>>>>>>>>>> another downside: it cannot report the calculated hash due to the
>>>>>>>>>>>>>>>>>> restrictive nature of eBPF.
>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>> Introduce the code to compute hashes to the kernel in order to overcome
>>>>>>>>>>>>>>>>>> thse challenges. An alternative solution is to extend the eBPF steering
>>>>>>>>>>>>>>>>>> program so that it will be able to report to the userspace, but it makes
>>>>>>>>>>>>>>>>>> little sense to allow to implement different hashing algorithms with
>>>>>>>>>>>>>>>>>> eBPF since the hash value reported by virtio-net is strictly defined by
>>>>>>>>>>>>>>>>>> the specification.
>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>> The hash value already stored in sk_buff is not used and computed
>>>>>>>>>>>>>>>>>> independently since it may have been computed in a way not conformant
>>>>>>>>>>>>>>>>>> with the specification.
>>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
>>>>>>>>>>>>>>>>>> ---
>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>> +static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
>>>>>>>>>>>>>>>>>> +       .max_indirection_table_length =
>>>>>>>>>>>>>>>>>> +               TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
>>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>>> +       .types = VIRTIO_NET_SUPPORTED_HASH_TYPES
>>>>>>>>>>>>>>>>>> +};
>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>> No need to have explicit capabilities exchange like this? Tun either
>>>>>>>>>>>>>>>>> supports all or none.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> tun does not support VIRTIO_NET_RSS_HASH_TYPE_IP_EX,
>>>>>>>>>>>>>>>> VIRTIO_NET_RSS_HASH_TYPE_TCP_EX, and VIRTIO_NET_RSS_HASH_TYPE_UDP_EX.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> It is because the flow dissector does not support IPv6 extensions. The
>>>>>>>>>>>>>>>> specification is also vague, and does not tell how many TLVs should be
>>>>>>>>>>>>>>>> consumed at most when interpreting destination option header so I chose
>>>>>>>>>>>>>>>> to avoid adding code for these hash types to the flow dissector. I doubt
>>>>>>>>>>>>>>>> anyone will complain about it since nobody complains for Linux.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> I'm also adding this so that we can extend it later.
>>>>>>>>>>>>>>>> max_indirection_table_length may grow for systems with 128+ CPUs, or
>>>>>>>>>>>>>>>> types may have other bits for new protocols in the future.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>>                  case TUNSETSTEERINGEBPF:
>>>>>>>>>>>>>>>>>> -               ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>>>>>>>>>>>> +               bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
>>>>>>>>>>>>>>>>>> +               if (IS_ERR(bpf_ret))
>>>>>>>>>>>>>>>>>> +                       ret = PTR_ERR(bpf_ret);
>>>>>>>>>>>>>>>>>> +               else if (bpf_ret)
>>>>>>>>>>>>>>>>>> +                       tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>> Don't make one feature disable another.
>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>> TUNSETSTEERINGEBPF and TUNSETVNETHASH are mutually exclusive
>>>>>>>>>>>>>>>>> functions. If one is enabled the other call should fail, with EBUSY
>>>>>>>>>>>>>>>>> for instance.
>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>> +       case TUNSETVNETHASH:
>>>>>>>>>>>>>>>>>> +               len = sizeof(vnet_hash);
>>>>>>>>>>>>>>>>>> +               if (copy_from_user(&vnet_hash, argp, len)) {
>>>>>>>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>>> +               if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
>>>>>>>>>>>>>>>>>> +                    (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
>>>>>>>>>>>>>>>>>> +                     !tun_is_little_endian(tun))) ||
>>>>>>>>>>>>>>>>>> +                    vnet_hash.indirection_table_mask >=
>>>>>>>>>>>>>>>>>> +                    TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
>>>>>>>>>>>>>>>>>> +                       ret = -EINVAL;
>>>>>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>>>>>>>>>>>> +               len = (vnet_hash.indirection_table_mask + 1) * 2;
>>>>>>>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
>>>>>>>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>>> +               argp = (u8 __user *)argp + len;
>>>>>>>>>>>>>>>>>> +               len = virtio_net_hash_key_length(vnet_hash.types);
>>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>>> +               if (copy_from_user(vnet_hash_key, argp, len)) {
>>>>>>>>>>>>>>>>>> +                       ret = -EFAULT;
>>>>>>>>>>>>>>>>>> +                       break;
>>>>>>>>>>>>>>>>>> +               }
>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>> Probably easier and less error-prone to define a fixed size control
>>>>>>>>>>>>>>>>> struct with the max indirection table size.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> I made its size variable because the indirection table and key may grow
>>>>>>>>>>>>>>>> in the future as I wrote above.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>> Btw: please trim the CC: list considerably on future patches.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> I'll do so in the next version with the TUNSETSTEERINGEBPF change you
>>>>>>>>>>>>>>>> proposed.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> To be clear: please don't just resubmit with that one change.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> The skb and cb issues are quite fundamental issues that need to be resolved.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> I'd like to understand why adjusting the existing BPF feature for this
>>>>>>>>>>>>>>> exact purpose cannot be amended to return the key it produced.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> eBPF steering program is not designed for this particular problem in my
>>>>>>>>>>>>>> understanding. It was introduced to derive hash values with an
>>>>>>>>>>>>>> understanding of application-specific semantics of packets instead of
>>>>>>>>>>>>>> generic IP/TCP/UDP semantics.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> This problem is rather different in terms that the hash derivation is
>>>>>>>>>>>>>> strictly defined by virtio-net. I don't think it makes sense to
>>>>>>>>>>>>>> introduce the complexity of BPF when you always run the same code.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> It can utilize the existing flow dissector and also make it easier to
>>>>>>>>>>>>>> use for the userspace by implementing this in the kernel.
>>>>>>>>>>>>>
>>>>>>>>>>>>> Ok. There does appear to be overlap in functionality. But it might be
>>>>>>>>>>>>> easier to deploy to just have standard Toeplitz available without
>>>>>>>>>>>>> having to compile and load an eBPF program.
>>>>>>>>>>>>>
>>>>>>>>>>>>> As for the sk_buff and cb[] changes. The first is really not needed.
>>>>>>>>>>>>> sk_buff simply would not scale if every edge case needs a few bits.
>>>>>>>>>>>>
>>>>>>>>>>>> An alternative is to move the bit to cb[] and clear it for every code
>>>>>>>>>>>> paths that lead to ndo_start_xmit(), but I'm worried that it is error-prone.
>>>>>>>>>>>>
>>>>>>>>>>>> I think we can put the bit in sk_buff for now. We can implement the
>>>>>>>>>>>> alternative when we are short of bits.
>>>>>>>>>>>
>>>>>>>>>>> I disagree. sk_buff fields add a cost to every code path. They cannot
>>>>>>>>>>> be added for every edge case.
>>>>>>>>>>
>>>>>>>>>> It only takes an unused bit and does not grow the sk_buff size so I
>>>>>>>>>> think it has practically no cost for now.
>>>>>>>>>
>>>>>>>>> The problem is that that thinking leads to death by a thousand cuts.
>>>>>>>>>
>>>>>>>>> "for now" forces the cost of having to think hard how to avoid growing
>>>>>>>>> sk_buff onto the next person. Let's do it right from the start.
>>>>>>>>
>>>>>>>> I see. I described an alternative to move the bit to cb[] and clear it
>>>>>>>> in all code paths that leads to ndo_start_xmit() earlier. Does that
>>>>>>>> sound good to you?
>>>>>>>
>>>>>>> If you use the control block to pass information between
>>>>>>> __dev_queue_xmit on the tun device and tun_net_xmit, using gso_skb_cb,
>>>>>>> the field can be left undefined in all non-tun paths. tun_select_queue
>>>>>>> can initialize.
>>>>>>
>>>>>> The problem is that tun_select_queue() is not always called.
>>>>>> netdev_core_pick_tx() ensures dev->real_num_tx_queues != 1 before
>>>>>> calling it, but this variable may change later and result in a race
>>>>>> condition. Another case is that XDP with predefined queue.
>>>>>>
>>>>>>>
>>>>>>> I would still use skb->hash to encode the hash. That hash type of that
>>>>>>> field is not strictly defined. It can be siphash from ___skb_get_hash
>>>>>>> or a device hash, which most likely also uses Toeplitz. Then you also
>>>>>>> don't run into the problem of growing the struct size.
>>>>>>
>>>>>> I'm concerned exactly because it's not strictly defined. Someone may
>>>>>> decide to overwrite it later if we are not cautious enough. qdisc_skb_cb
>>>>>> also has sufficient space to contain both of the hash value and type.
>>>>>
>>>>> How about using skb extensions?
>>>>
>>>> I think it will work. I'll try it in the next version.
>>>
>>> Btw, I still think using eBPF for hash might be better.
>>>
>>> Though the hashing rule is defined in the spec, it may be extended in
>>> the future. For example, several extensions has been proposed:
>>>
>>> 1) RSS context
>>> 2) encapsulated packet hashing
>>
>> Looking at the proposals, I'm now more inclined to extend the BPF
>> steering program.
> 
> Just to make sure we are at the same page.
> 
> If the eBPF program needs to access skb extensions, it would not be a
> steering program anymore (not a filter).
> 
> Or do you mean it is a dedicated eBPF program that calculates the hash?

I think the BPF program should be a steering program but extended for 
hash reporting.

Since we need a hash reporting feature that is not present in a socket 
filter, the BPF program should have a dedicated bpf_prog_type (not 
BPF_PROG_TYPE_SOCKET_FILTER). However, as its functionality is the 
superset of the conventional steering program, I'm planning to use the 
existing TUNSETSTEERINGEBPF ioctl to set it.
diff mbox series

Patch

diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 89ab9efe522c..561a573cd008 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -171,6 +171,9 @@  struct tun_prog {
 	struct bpf_prog *prog;
 };
 
+#define TUN_VNET_HASH_MAX_KEY_SIZE 40
+#define TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH 128
+
 /* Since the socket were moved to tun_file, to preserve the behavior of persist
  * device, socket filter, sndbuf and vnet header size were restore when the
  * file were attached to a persist device.
@@ -209,6 +212,9 @@  struct tun_struct {
 	struct tun_prog __rcu *steering_prog;
 	struct tun_prog __rcu *filter_prog;
 	struct ethtool_link_ksettings link_ksettings;
+	struct tun_vnet_hash vnet_hash;
+	u16 vnet_hash_indirection_table[TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH];
+	u32 vnet_hash_key[TUN_VNET_HASH_MAX_KEY_SIZE / 4];
 	/* init args */
 	struct file *file;
 	struct ifreq *ifr;
@@ -219,6 +225,13 @@  struct veth {
 	__be16 h_vlan_TCI;
 };
 
+static const struct tun_vnet_hash_cap tun_vnet_hash_cap = {
+	.max_indirection_table_length =
+		TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH,
+
+	.types = VIRTIO_NET_SUPPORTED_HASH_TYPES
+};
+
 static void tun_flow_init(struct tun_struct *tun);
 static void tun_flow_uninit(struct tun_struct *tun);
 
@@ -320,10 +333,16 @@  static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
 	if (get_user(be, argp))
 		return -EFAULT;
 
-	if (be)
+	if (be) {
+		if (!(tun->flags & TUN_VNET_LE) &&
+		    (tun->vnet_hash.flags & TUN_VNET_HASH_REPORT)) {
+			return -EINVAL;
+		}
+
 		tun->flags |= TUN_VNET_BE;
-	else
+	} else {
 		tun->flags &= ~TUN_VNET_BE;
+	}
 
 	return 0;
 }
@@ -558,15 +577,47 @@  static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 	return ret % numqueues;
 }
 
+static u16 tun_vnet_select_queue(struct tun_struct *tun, struct sk_buff *skb)
+{
+	u32 value = qdisc_skb_cb(skb)->tun_vnet_hash_value;
+	u32 numqueues;
+	u32 index;
+	u16 queue;
+
+	numqueues = READ_ONCE(tun->numqueues);
+	if (!numqueues)
+		return 0;
+
+	index = value & READ_ONCE(tun->vnet_hash.indirection_table_mask);
+	queue = READ_ONCE(tun->vnet_hash_indirection_table[index]);
+	if (!queue)
+		queue = READ_ONCE(tun->vnet_hash.unclassified_queue);
+
+	return queue % numqueues;
+}
+
 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
 			    struct net_device *sb_dev)
 {
 	struct tun_struct *tun = netdev_priv(dev);
+	u8 vnet_hash_flags = READ_ONCE(tun->vnet_hash.flags);
+	struct virtio_net_hash hash;
 	u16 ret;
 
+	if (vnet_hash_flags & (TUN_VNET_HASH_RSS | TUN_VNET_HASH_REPORT)) {
+		virtio_net_hash(skb, READ_ONCE(tun->vnet_hash.types),
+				tun->vnet_hash_key, &hash);
+
+		skb->tun_vnet_hash = true;
+		qdisc_skb_cb(skb)->tun_vnet_hash_value = hash.value;
+		qdisc_skb_cb(skb)->tun_vnet_hash_report = hash.report;
+	}
+
 	rcu_read_lock();
 	if (rcu_dereference(tun->steering_prog))
 		ret = tun_ebpf_select_queue(tun, skb);
+	else if (vnet_hash_flags & TUN_VNET_HASH_RSS)
+		ret = tun_vnet_select_queue(tun, skb);
 	else
 		ret = tun_automq_select_queue(tun, skb);
 	rcu_read_unlock();
@@ -2088,10 +2139,15 @@  static ssize_t tun_put_user(struct tun_struct *tun,
 			    struct iov_iter *iter)
 {
 	struct tun_pi pi = { 0, skb->protocol };
+	struct virtio_net_hash vnet_hash = {
+		.value = qdisc_skb_cb(skb)->tun_vnet_hash_value,
+		.report = qdisc_skb_cb(skb)->tun_vnet_hash_report
+	};
 	ssize_t total;
 	int vlan_offset = 0;
 	int vlan_hlen = 0;
 	int vnet_hdr_sz = 0;
+	size_t vnet_hdr_content_sz;
 
 	if (skb_vlan_tag_present(skb))
 		vlan_hlen = VLAN_HLEN;
@@ -2116,31 +2172,49 @@  static ssize_t tun_put_user(struct tun_struct *tun,
 	}
 
 	if (vnet_hdr_sz) {
-		struct virtio_net_hdr gso;
+		union {
+			struct virtio_net_hdr hdr;
+			struct virtio_net_hdr_v1_hash v1_hash_hdr;
+		} hdr;
+		int ret;
 
 		if (iov_iter_count(iter) < vnet_hdr_sz)
 			return -EINVAL;
 
-		if (virtio_net_hdr_from_skb(skb, &gso,
-					    tun_is_little_endian(tun), true,
-					    vlan_hlen)) {
+		if ((READ_ONCE(tun->vnet_hash.flags) & TUN_VNET_HASH_REPORT) &&
+		    vnet_hdr_sz >= sizeof(hdr.v1_hash_hdr) &&
+		    skb->tun_vnet_hash) {
+			vnet_hdr_content_sz = sizeof(hdr.v1_hash_hdr);
+			ret = virtio_net_hdr_v1_hash_from_skb(skb,
+							      &hdr.v1_hash_hdr,
+							      true,
+							      vlan_hlen,
+							      &vnet_hash);
+		} else {
+			vnet_hdr_content_sz = sizeof(hdr.hdr);
+			ret = virtio_net_hdr_from_skb(skb, &hdr.hdr,
+						      tun_is_little_endian(tun),
+						      true, vlan_hlen);
+		}
+
+		if (ret) {
 			struct skb_shared_info *sinfo = skb_shinfo(skb);
 			pr_err("unexpected GSO type: "
 			       "0x%x, gso_size %d, hdr_len %d\n",
-			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
-			       tun16_to_cpu(tun, gso.hdr_len));
+			       sinfo->gso_type, tun16_to_cpu(tun, hdr.hdr.gso_size),
+			       tun16_to_cpu(tun, hdr.hdr.hdr_len));
 			print_hex_dump(KERN_ERR, "tun: ",
 				       DUMP_PREFIX_NONE,
 				       16, 1, skb->head,
-				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+				       min((int)tun16_to_cpu(tun, hdr.hdr.hdr_len), 64), true);
 			WARN_ON_ONCE(1);
 			return -EINVAL;
 		}
 
-		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
+		if (copy_to_iter(&hdr, vnet_hdr_content_sz, iter) != vnet_hdr_content_sz)
 			return -EFAULT;
 
-		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+		iov_iter_advance(iter, vnet_hdr_sz - vnet_hdr_content_sz);
 	}
 
 	if (vlan_hlen) {
@@ -3007,24 +3081,27 @@  static int tun_set_queue(struct file *file, struct ifreq *ifr)
 	return ret;
 }
 
-static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
-			void __user *data)
+static struct bpf_prog *tun_set_ebpf(struct tun_struct *tun,
+				     struct tun_prog __rcu **prog_p,
+				     void __user *data)
 {
 	struct bpf_prog *prog;
 	int fd;
+	int ret;
 
 	if (copy_from_user(&fd, data, sizeof(fd)))
-		return -EFAULT;
+		return ERR_PTR(-EFAULT);
 
 	if (fd == -1) {
 		prog = NULL;
 	} else {
 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
 		if (IS_ERR(prog))
-			return PTR_ERR(prog);
+			return prog;
 	}
 
-	return __tun_set_ebpf(tun, prog_p, prog);
+	ret = __tun_set_ebpf(tun, prog_p, prog);
+	return ret ? ERR_PTR(ret) : prog;
 }
 
 /* Return correct value for tun->dev->addr_len based on tun->dev->type. */
@@ -3082,6 +3159,11 @@  static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 	int le;
 	int ret;
 	bool do_notify = false;
+	struct bpf_prog *bpf_ret;
+	struct tun_vnet_hash vnet_hash;
+	u16 vnet_hash_indirection_table[TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH];
+	u8 vnet_hash_key[TUN_VNET_HASH_MAX_KEY_SIZE];
+	size_t len;
 
 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
 	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
@@ -3295,7 +3377,10 @@  static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 			ret = -EFAULT;
 			break;
 		}
-		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
+		if (vnet_hdr_sz <
+		    (int)((tun->vnet_hash.flags & TUN_VNET_HASH_REPORT) ?
+			  sizeof(struct virtio_net_hdr_v1_hash) :
+			  sizeof(struct virtio_net_hdr))) {
 			ret = -EINVAL;
 			break;
 		}
@@ -3314,10 +3399,16 @@  static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 			ret = -EFAULT;
 			break;
 		}
-		if (le)
+		if (le) {
 			tun->flags |= TUN_VNET_LE;
-		else
+		} else {
+			if (!tun_legacy_is_little_endian(tun)) {
+				ret = -EINVAL;
+				break;
+			}
+
 			tun->flags &= ~TUN_VNET_LE;
+		}
 		break;
 
 	case TUNGETVNETBE:
@@ -3360,11 +3451,17 @@  static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 		break;
 
 	case TUNSETSTEERINGEBPF:
-		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
+		bpf_ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
+		if (IS_ERR(bpf_ret))
+			ret = PTR_ERR(bpf_ret);
+		else if (bpf_ret)
+			tun->vnet_hash.flags &= ~TUN_VNET_HASH_RSS;
 		break;
 
 	case TUNSETFILTEREBPF:
-		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
+		bpf_ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
+		if (IS_ERR(bpf_ret))
+			ret = PTR_ERR(bpf_ret);
 		break;
 
 	case TUNSETCARRIER:
@@ -3382,6 +3479,54 @@  static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 		ret = open_related_ns(&net->ns, get_net_ns);
 		break;
 
+	case TUNGETVNETHASHCAP:
+		if (copy_to_user(argp, &tun_vnet_hash_cap,
+				 sizeof(tun_vnet_hash_cap)))
+			ret = -EFAULT;
+		break;
+
+	case TUNSETVNETHASH:
+		len = sizeof(vnet_hash);
+		if (copy_from_user(&vnet_hash, argp, len)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		if (((vnet_hash.flags & TUN_VNET_HASH_REPORT) &&
+		     (tun->vnet_hdr_sz < sizeof(struct virtio_net_hdr_v1_hash) ||
+		      !tun_is_little_endian(tun))) ||
+		     vnet_hash.indirection_table_mask >=
+		     TUN_VNET_HASH_MAX_INDIRECTION_TABLE_LENGTH) {
+			ret = -EINVAL;
+			break;
+		}
+
+		argp = (u8 __user *)argp + len;
+		len = (vnet_hash.indirection_table_mask + 1) * 2;
+		if (copy_from_user(vnet_hash_indirection_table, argp, len)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		argp = (u8 __user *)argp + len;
+		len = virtio_net_hash_key_length(vnet_hash.types);
+
+		if (copy_from_user(vnet_hash_key, argp, len)) {
+			ret = -EFAULT;
+			break;
+		}
+
+		tun->vnet_hash = vnet_hash;
+		memcpy(tun->vnet_hash_indirection_table,
+		       vnet_hash_indirection_table,
+		       (vnet_hash.indirection_table_mask + 1) * 2);
+		memcpy(tun->vnet_hash_key, vnet_hash_key, len);
+
+		if (vnet_hash.flags & TUN_VNET_HASH_RSS)
+			__tun_set_ebpf(tun, &tun->steering_prog, NULL);
+
+		break;
+
 	default:
 		ret = -EINVAL;
 		break;
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 287cdc81c939..dc591cd897c8 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -61,6 +61,8 @@ 
 #define TUNSETFILTEREBPF _IOR('T', 225, int)
 #define TUNSETCARRIER _IOW('T', 226, int)
 #define TUNGETDEVNETNS _IO('T', 227)
+#define TUNGETVNETHASHCAP _IO('T', 228)
+#define TUNSETVNETHASH _IOW('T', 229, unsigned int)
 
 /* TUNSETIFF ifr flags */
 #define IFF_TUN		0x0001
@@ -115,4 +117,18 @@  struct tun_filter {
 	__u8   addr[][ETH_ALEN];
 };
 
+struct tun_vnet_hash_cap {
+	__u16 max_indirection_table_length;
+	__u32 types;
+};
+
+#define TUN_VNET_HASH_RSS	0x01
+#define TUN_VNET_HASH_REPORT	0x02
+struct tun_vnet_hash {
+	__u8 flags;
+	__u32 types;
+	__u16 indirection_table_mask;
+	__u16 unclassified_queue;
+};
+
 #endif /* _UAPI__IF_TUN_H */